diff --git a/configure.ml b/configure.ml index 382904eceb5..55d8a53a825 100644 --- a/configure.ml +++ b/configure.ml @@ -3,11 +3,11 @@ let config_mk = "config.mk" (* Configure script *) open Cmdliner -let dir name default docv doc = +let dir name default docv doc = let doc = Printf.sprintf "Set the directory for installing %s" doc in Arg.(value & opt string default & info [name] ~docv ~doc) -let path name default docv doc = +let path name default docv doc = let doc = Printf.sprintf "Set the path for %s" doc in Arg.(value & opt string default & info [name] ~docv ~doc) @@ -34,7 +34,7 @@ let udevdir = dir "udevdir" "/etc/udev" "UDEVDIR" "udev scripts" let info = let doc = "Configures a package" in - Term.info "configure" ~version:"0.1" ~doc + Term.info "configure" ~version:"0.1" ~doc let output_file filename lines = let oc = open_out filename in @@ -46,7 +46,7 @@ let configure disable_warn_error varpatchdir etcdir optdir plugindir extensiondi Printf.printf "Configuring with the following params:\n\tdisable_warn_error=%b\n\tvarpatchdir=%s\n\tetcdir=%s\n\toptdir=%s\n\tplugindir=%s\n\textensiondir=%s\n\thooksdir=%s\n\tinventory=%s\n\txapiconf=%s\n\tlibexecdir=%s\n\tscriptsdir=%s\n\tsharedir=%s\n\twebdir=%s\n\tcluster_stack_root=%s\n\tbindir=%s\n\tsbindir=%s\n\tudevdir=%s\n\n" disable_warn_error varpatchdir etcdir optdir plugindir extensiondir hooksdir inventory xapiconf libexecdir scriptsdir sharedir webdir cluster_stack_root bindir sbindir udevdir; (* Write config.mk *) - let lines = + let lines = [ "# Warning - this file is autogenerated by the configure script"; "# Do not edit"; Printf.sprintf "DISABLE_WARN_ERROR=%b" disable_warn_error; @@ -56,7 +56,7 @@ let configure disable_warn_error varpatchdir etcdir optdir plugindir extensiondi Printf.sprintf "PLUGINDIR=%s" plugindir; Printf.sprintf "EXTENSIONDIR=%s" extensiondir; Printf.sprintf "HOOKSDIR=%s" hooksdir; - Printf.sprintf "INVENTORY=%s" inventory; + Printf.sprintf "INVENTORY=%s" inventory; Printf.sprintf "XAPICONF=%s" xapiconf; Printf.sprintf "LIBEXECDIR=%s" libexecdir; Printf.sprintf "SCRIPTSDIR=%s" scriptsdir; @@ -71,9 +71,9 @@ let configure disable_warn_error varpatchdir etcdir optdir plugindir extensiondi let configure_t = Term.(pure configure $ disable_warn_error $ varpatchdir $ etcdir $ optdir $ plugindir $ extensiondir $ hooksdir $ inventory $ xapiconf $ libexecdir $ scriptsdir $ sharedir $ webdir $ cluster_stack_root $ bindir $ sbindir $ udevdir ) -let () = - match - Term.eval (configure_t, info) +let () = + match + Term.eval (configure_t, info) with - | `Error _ -> exit 1 + | `Error _ -> exit 1 | _ -> exit 0 diff --git a/ocaml/auth/auth_signature.ml b/ocaml/auth/auth_signature.ml index 813e7813e51..28999179873 100644 --- a/ocaml/auth/auth_signature.ml +++ b/ocaml/auth/auth_signature.ml @@ -14,8 +14,8 @@ (** * Interface for External Authentication Plugin component * @group Access Control - *) - +*) + (* * v1 22Oct08 * @@ -27,114 +27,114 @@ exception Auth_service_error of auth_service_error_tag * string exception Subject_cannot_be_resolved let suffix_of_tag errtag = - match errtag with - | E_GENERIC -> "" - | E_LOOKUP -> Api_errors.auth_suffix_domain_lookup_failed - | E_DENIED -> Api_errors.auth_suffix_permission_denied - | E_CREDENTIALS -> Api_errors.auth_suffix_wrong_credentials - | E_UNAVAILABLE -> Api_errors.auth_suffix_unavailable - | E_INVALID_OU -> Api_errors.auth_suffix_invalid_ou + match errtag with + | E_GENERIC -> "" + | E_LOOKUP -> Api_errors.auth_suffix_domain_lookup_failed + | E_DENIED -> Api_errors.auth_suffix_permission_denied + | E_CREDENTIALS -> Api_errors.auth_suffix_wrong_credentials + | E_UNAVAILABLE -> Api_errors.auth_suffix_unavailable + | E_INVALID_OU -> Api_errors.auth_suffix_invalid_ou (* required fields in subject.other_config *) let subject_information_field_subject_name = "subject-name" type t = - { - - (* subject_id Authenticate_username_password(string username, string password) - - Takes a username and password, and tries to authenticate against an already configured - auth service (see XenAPI requirements Wiki page for details of how auth service configuration - takes place and the appropriate values are stored within the XenServer Metadata). - If authentication is successful then a subject_id is returned representing the account - corresponding to the supplied credentials (where the subject_id is in a namespace managed by - the auth module/service itself -- e.g. maybe a SID or something in the AD case). - Raises auth_failure if authentication is not successful - *) - authenticate_username_password : string -> string -> string; + { + + (* subject_id Authenticate_username_password(string username, string password) + + Takes a username and password, and tries to authenticate against an already configured + auth service (see XenAPI requirements Wiki page for details of how auth service configuration + takes place and the appropriate values are stored within the XenServer Metadata). + If authentication is successful then a subject_id is returned representing the account + corresponding to the supplied credentials (where the subject_id is in a namespace managed by + the auth module/service itself -- e.g. maybe a SID or something in the AD case). + Raises auth_failure if authentication is not successful + *) + authenticate_username_password : string -> string -> string; + + (* subject_id Authenticate_ticket(string ticket) + + As above but uses a ticket as credentials (i.e. for single sign-on) + *) + authenticate_ticket : string -> string; + + (* subject_id get_subject_identifier(string subject_name) + + Takes a subject_name (as may be entered into the XenCenter UI when defining subjects -- + see Access Control wiki page); and resolves it to a subject_id against the external + auth/directory service. + Raises Not_found if authentication is not succesful. + *) + get_subject_identifier : string -> string; + + (* ((string*string) list) query_subject_information(string subject_identifier) + + Takes a subject_identifier and returns the user record from the directory service as + key/value pairs. In the returned string*string map, there _must_ be a key called + subject_name that refers to the name of the account (e.g. the user or group name as may + be displayed in XenCenter). There is no other requirements to include fields from the user + record -- initially I'd imagine that we wouldn't bother adding anything else here, but + it's a string*string list anyway for possible future expansion. + Raises Not_found if subject_id cannot be resolved by external auth service + *) + query_subject_information : string -> ((string*string) list); + + (* (string list) query_group_membership(string subject_identifier) + + Takes a subject_identifier and returns its group membership (i.e. a list of subject + identifiers of the groups that the subject passed in belongs to). The set of groups returned + _must_ be transitively closed wrt the is_member_of relation if the external directory service + supports nested groups (as AD does for example) + *) + query_group_membership : string -> (string list); - (* subject_id Authenticate_ticket(string ticket) - - As above but uses a ticket as credentials (i.e. for single sign-on) - *) - authenticate_ticket : string -> string; - - (* subject_id get_subject_identifier(string subject_name) - - Takes a subject_name (as may be entered into the XenCenter UI when defining subjects -- - see Access Control wiki page); and resolves it to a subject_id against the external - auth/directory service. - Raises Not_found if authentication is not succesful. - *) - get_subject_identifier : string -> string; - - (* ((string*string) list) query_subject_information(string subject_identifier) - - Takes a subject_identifier and returns the user record from the directory service as - key/value pairs. In the returned string*string map, there _must_ be a key called - subject_name that refers to the name of the account (e.g. the user or group name as may - be displayed in XenCenter). There is no other requirements to include fields from the user - record -- initially I'd imagine that we wouldn't bother adding anything else here, but - it's a string*string list anyway for possible future expansion. - Raises Not_found if subject_id cannot be resolved by external auth service - *) - query_subject_information : string -> ((string*string) list); - - (* (string list) query_group_membership(string subject_identifier) - - Takes a subject_identifier and returns its group membership (i.e. a list of subject - identifiers of the groups that the subject passed in belongs to). The set of groups returned - _must_ be transitively closed wrt the is_member_of relation if the external directory service - supports nested groups (as AD does for example) - *) - query_group_membership : string -> (string list); - (* In addition, there are some event hooks that auth modules implement as follows: *) - - (* unit on_enable(((string*string) list) config_params) - - Called internally by xapi _on each host_ when a client enables an external auth service for the - pool via the XenAPI [see AD integration wiki page]. The config_params here are the ones passed - by the client as part of the corresponding XenAPI call. - On receiving this hook, the auth module should: - (i) do whatever it needs to do (if anything) to register with the external auth/directory - service [using the config params supplied to get access] - (ii) Write the config_params that it needs to store persistently in the XenServer metadata - into the Pool.external_auth_configuration field. [Note - the rationale for making the plugin - write the config params it needs long-term into the XenServer metadata itself is so it can - explicitly filter any one-time credentials [like AD username/password for example] that it - does not need long-term.] - *) - on_enable : ((string*string) list) -> unit; - - (* unit on_disable() - - Called internally by xapi _on each host_ when a client disables an auth service via the XenAPI. - The hook will be called _before_ the Pool configuration fields relating to the external-auth - service are cleared (i.e. so you can access the config params you need from the pool metadata - within the body of the on_disable method) - *) - on_disable : ((string*string) list) -> unit; - - (* unit on_xapi_initialize(bool system_boot) - - Called internally by xapi whenever it starts up. The system_boot flag is true iff xapi is - starting for the first time after a host boot - *) - on_xapi_initialize : bool -> unit; - - (* unit on_xapi_exit() - - Called internally when xapi is doing a clean exit. - *) - on_xapi_exit : unit -> unit; - } + (* unit on_enable(((string*string) list) config_params) + + Called internally by xapi _on each host_ when a client enables an external auth service for the + pool via the XenAPI [see AD integration wiki page]. The config_params here are the ones passed + by the client as part of the corresponding XenAPI call. + On receiving this hook, the auth module should: + (i) do whatever it needs to do (if anything) to register with the external auth/directory + service [using the config params supplied to get access] + (ii) Write the config_params that it needs to store persistently in the XenServer metadata + into the Pool.external_auth_configuration field. [Note - the rationale for making the plugin + write the config params it needs long-term into the XenServer metadata itself is so it can + explicitly filter any one-time credentials [like AD username/password for example] that it + does not need long-term.] + *) + on_enable : ((string*string) list) -> unit; + + (* unit on_disable() + + Called internally by xapi _on each host_ when a client disables an auth service via the XenAPI. + The hook will be called _before_ the Pool configuration fields relating to the external-auth + service are cleared (i.e. so you can access the config params you need from the pool metadata + within the body of the on_disable method) + *) + on_disable : ((string*string) list) -> unit; + + (* unit on_xapi_initialize(bool system_boot) + + Called internally by xapi whenever it starts up. The system_boot flag is true iff xapi is + starting for the first time after a host boot + *) + on_xapi_initialize : bool -> unit; + + (* unit on_xapi_exit() + + Called internally when xapi is doing a clean exit. + *) + on_xapi_exit : unit -> unit; + + } (* Auth modules must implement this signature:*) module type AUTH_MODULE = sig - val methods : t + val methods : t end diff --git a/ocaml/auth/authx.ml b/ocaml/auth/authx.ml index 43f6923a4ca..886d6ecb15a 100644 --- a/ocaml/auth/authx.ml +++ b/ocaml/auth/authx.ml @@ -13,331 +13,331 @@ *) (** * @group Access Control - *) - +*) + module D = Debug.Make(struct let name="extauth_plugin_PAM_NSS" end) open D module AuthX : Auth_signature.AUTH_MODULE = struct -(* +(* * External Authentication Plugin component * using Unix PAM/NSS as a backend * v1 22Oct08 marcusg@eu.citrix.com * *) -(* This implementation is supposed to only use local names in the NSS and PAM databases *) -(* => PAM: is used for authentication *) -(* => NSS: is used as a database for groups, list of users etc *) -(* Both, by default, use only local information from /etc/passwd and /etc/group *) -(* PAM can be extended to use Kerberos by using xs-documents.hg/technical/howto/howto-dom0-ad-krb5.txt *) -(* NSS can be extended to use LDAP by using xs-documents.hg/technical/howto/howto-dom0-ad-nss-ldap.txt *) - - -let with_cmd cmd params_list fn = - let debug_cmd = (cmd^" "^(List.fold_right (fun p pp->"\""^p^"\" "^pp) params_list "")) in - debug "Executing cmd [%s]" debug_cmd; - let output_str, _ = - try - Forkhelpers.execute_command_get_output cmd params_list - with e -> begin - let errmsg = Printf.sprintf "[%s]: %s" debug_cmd (Printexc.to_string e) in - debug "Error executing cmd %s" errmsg; - raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,errmsg)) - end - in - let output_lines = Stdext.Xstringext.String.split '\n' output_str in - fn output_lines - -let getent_common nss_database fn = - with_cmd "/usr/bin/getent" [nss_database] - (fun lines -> - try - (* getent passwd returns several lines *) - let rec get_next_line lines = - (match lines with - | [] -> raise Not_found - | line::lines -> - let recs = Stdext.Xstringext.String.split ':' line in - let username = List.nth recs 0 in - let uid = List.nth recs 2 in - (match fn username uid recs with - | None -> get_next_line lines - | Some x -> x - ) - ) - in - get_next_line lines - with e -> - begin - debug "error looking up nss_database=%s: %s" - nss_database (Printexc.to_string e); - raise Not_found - end - ) - -(* Verifies if a subject_name is in one of the NSS databases *) -(* Useful databases are: *) -(* 'passwd', for the list of users *) -(* 'group', for the list of groups *) -(* Returns the id corresponding to the subject_name *) -(* Raises Not_found if subject_name not in NSS database *) -let getent_idbyname nss_database subject_name = - getent_common nss_database - (fun username uid recs -> if username = subject_name then Some uid else None) - -let getent_namebyid nss_database subject_id = - getent_common nss_database - (fun username uid recs -> if uid = subject_id then Some username else None) - -let getent_idbyid nss_database subject_id = - getent_common nss_database - (fun username uid recs -> if uid = subject_id then Some uid else None) - -let getent_allbyid nss_database subject_id = - getent_common nss_database - (fun username uid recs -> if uid = subject_id then Some recs else None) - -(* subject_id get_subject_identifier(string subject_name) - - Takes a subject_name (as may be entered into the XenCenter UI when defining subjects -- - see Access Control wiki page); and resolves it to a subject_id against the external - auth/directory service. - Raises Not_found if authentication is not succesful. -*) -let get_subject_identifier subject_name = - - try - (* looks up list of users*) - "u"^(getent_idbyname "passwd" subject_name) - with Not_found -> - (* looks up list of groups*) - "g"^(getent_idbyname "group" subject_name) - - -(* subject_id Authenticate_username_password(string username, string password) - - Takes a username and password, and tries to authenticate against an already configured - auth service (see XenAPI requirements Wiki page for details of how auth service configuration - takes place and the appropriate values are stored within the XenServer Metadata). - If authentication is successful then a subject_id is returned representing the account - corresponding to the supplied credentials (where the subject_id is in a namespace managed by - the auth module/service itself -- e.g. maybe a SID or something in the AD case). - Raises auth_failure if authentication is not successful -*) - -(* call already existing pam.ml *) -let authenticate_username_password username password = - - (* first, we try to authenticated against our user database using PAM *) - try - Pam.authenticate username password; - - (* no exception raised, then authentication succeeded, *) - (* now we return the authenticated user's id *) - get_subject_identifier username - - with (Failure msg) -> - (*debug "Failed to authenticate user %s: %s" uname msg;*) - raise (Auth_signature.Auth_failure msg) - -(* subject_id Authenticate_ticket(string ticket) - - As above but uses a ticket as credentials (i.e. for single sign-on) -*) - (* not implemented now, not needed for our tests, only for a *) - (* future single sign-on feature *) -let authenticate_ticket tgt = - failwith "authx authenticate_ticket not implemented" - -(* ((string*string) list) query_subject_information(string subject_identifier) - - Takes a subject_identifier and returns the user record from the directory service as - key/value pairs. In the returned string*string map, there _must_ be a key called - subject_name that refers to the name of the account (e.g. the user or group name as may - be displayed in XenCenter). There is no other requirements to include fields from the user - record -- initially I'd imagine that we wouldn't bother adding anything else here, but - it's a string*string list anyway for possible future expansion. - Raises Not_found if subject_id cannot be resolved by external auth service -*) -let query_subject_information subject_identifier = - - (* we are expecting an id such as u0, g0, u123 etc *) - if String.length subject_identifier < 2 then raise Not_found; - - match (String.get subject_identifier 0) with - | 'u' -> begin - (* 1. first look up the list of users *) - - (* here we remove the prefix u or g *) - let subject_identifier = String.sub subject_identifier 1 (String.length subject_identifier-1) in - - let infolist = getent_allbyid "passwd" subject_identifier in - let passwd = List.nth infolist 1 in - let account_disabled = - if (String.length passwd < 1) - then true (* no password *) - else - passwd.[0] = '*' (* disabled account *)|| passwd.[0] = '!' (* disabled password *) - in - [ ("subject-name", List.nth infolist 0); - (*("subject-pwd", List.nth infolist 1);*) - ("subject-uid", "u"^(List.nth infolist 2)); - ("subject-gid", "g"^(List.nth infolist 3)); - ("subject-gecos", List.nth infolist 4); - ("subject-displayname", - let n = (List.nth infolist 4) in - if n <> "" - then n (* gecos *) - else List.nth infolist 0 (* name *) - ); - (*("subject-homedir", List.nth infolist 5);*) - (*("subject-shell", List.nth infolist 6);*) - (* comma-separated list of subjects that are contained in this subject *) - (*("contains-byname", ""); (*in this case, no element *)*) - (* fields required in xen_center: *) - ("subject-is-group", "false"); - (* fields required in xapi_session: *) - ("subject-account-disabled", string_of_bool account_disabled); - ("subject-account-expired", "false"); - ("subject-account-locked", "false"); - ("subject-password-expired", "false"); - ] - end - | 'g' -> begin - (* 2. only then we look up the list of groups *) - - (* here we remove the prefix u or g *) - let subject_identifier = String.sub subject_identifier 1 (String.length subject_identifier-1) in - - let infolist = getent_allbyid "group" subject_identifier in - [ ("subject-name", List.nth infolist 0); - (*("subject-pwd", List.nth infolist 1);*) - ("subject-uid", "g"^(List.nth infolist 2)); - ("subject-gid", "g"^(List.nth infolist 2)); - (*("subject-homedir", "");*) - (*("subject-shell", "");*) - (* comma-separated list of subjects that are contained in this subject *) - (*("contains-byname", List.nth infolist 3);*) - (* fields required in xen_center: *) - ("subject-is-group", "true"); - (* fields required in xapi_session: *) - ] - end - | _ -> raise Not_found - -(* (string list) query_group_membership(string subject_identifier) - - Takes a subject_identifier and returns its group membership (i.e. a list of subject - identifiers of the groups that the subject passed in belongs to). The set of groups returned - _must_ be transitively closed wrt the is_member_of relation if the external directory service - supports nested groups (as AD does for example) -*) - (* in unix, groups cannot contain groups, so we just verify the groups a user *) - (* belongs to and, if that fails, if some group has the required identifier *) -let query_group_membership subject_identifier = - - (* 1. first we try to see if our subject identifier is a user id...*) - let sanitized_subject_id = String.escaped subject_identifier in - - (* we are expecting an id such as u0, g0, u123 etc *) - if String.length sanitized_subject_id < 2 then raise Not_found; - - (* here we remove the prefix u or g *) - let sanitized_subject_id = String.sub sanitized_subject_id 1 (String.length sanitized_subject_id-1) in - - match (String.get subject_identifier 0) with - | 'u' -> begin - (* looks up list of users*) - let subject_name = getent_namebyid "passwd" sanitized_subject_id in - - (* Not necessary to escape subject_name because we call execv in forkhelpers *) - (* Also, escaping will break unicode chars in usernames *) - with_cmd "/usr/bin/id" ["-G";subject_name] - (fun lines -> - (* id -G always returns at most one line in stdout *) - match lines with - | [] -> raise Not_found - | gidline::_ -> - let gids = Stdext.Xstringext.String.split ' ' gidline in - debug "Resolved %i group ids for subject %s (%s): %s" - (List.length gids) - subject_name - subject_identifier - (List.fold_left (fun p pp->if p="" then pp else p^","^pp) "" gids); - List.map (fun gid -> "g"^gid) gids - ) - end - - | 'g' -> begin - (* 2. if (1) fails, we try to see if our subject identifier is a group id...*) - (* in Unix, a group cannot contain other groups, so no need to go recursively *) - ("g"^(getent_idbyid "group" sanitized_subject_id))::[] - end - - | _ -> raise Not_found + (* This implementation is supposed to only use local names in the NSS and PAM databases *) + (* => PAM: is used for authentication *) + (* => NSS: is used as a database for groups, list of users etc *) + (* Both, by default, use only local information from /etc/passwd and /etc/group *) + (* PAM can be extended to use Kerberos by using xs-documents.hg/technical/howto/howto-dom0-ad-krb5.txt *) + (* NSS can be extended to use LDAP by using xs-documents.hg/technical/howto/howto-dom0-ad-nss-ldap.txt *) + + + let with_cmd cmd params_list fn = + let debug_cmd = (cmd^" "^(List.fold_right (fun p pp->"\""^p^"\" "^pp) params_list "")) in + debug "Executing cmd [%s]" debug_cmd; + let output_str, _ = + try + Forkhelpers.execute_command_get_output cmd params_list + with e -> begin + let errmsg = Printf.sprintf "[%s]: %s" debug_cmd (Printexc.to_string e) in + debug "Error executing cmd %s" errmsg; + raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,errmsg)) + end + in + let output_lines = Stdext.Xstringext.String.split '\n' output_str in + fn output_lines + + let getent_common nss_database fn = + with_cmd "/usr/bin/getent" [nss_database] + (fun lines -> + try + (* getent passwd returns several lines *) + let rec get_next_line lines = + (match lines with + | [] -> raise Not_found + | line::lines -> + let recs = Stdext.Xstringext.String.split ':' line in + let username = List.nth recs 0 in + let uid = List.nth recs 2 in + (match fn username uid recs with + | None -> get_next_line lines + | Some x -> x + ) + ) + in + get_next_line lines + with e -> + begin + debug "error looking up nss_database=%s: %s" + nss_database (Printexc.to_string e); + raise Not_found + end + ) + + (* Verifies if a subject_name is in one of the NSS databases *) + (* Useful databases are: *) + (* 'passwd', for the list of users *) + (* 'group', for the list of groups *) + (* Returns the id corresponding to the subject_name *) + (* Raises Not_found if subject_name not in NSS database *) + let getent_idbyname nss_database subject_name = + getent_common nss_database + (fun username uid recs -> if username = subject_name then Some uid else None) + + let getent_namebyid nss_database subject_id = + getent_common nss_database + (fun username uid recs -> if uid = subject_id then Some username else None) + + let getent_idbyid nss_database subject_id = + getent_common nss_database + (fun username uid recs -> if uid = subject_id then Some uid else None) + + let getent_allbyid nss_database subject_id = + getent_common nss_database + (fun username uid recs -> if uid = subject_id then Some recs else None) + + (* subject_id get_subject_identifier(string subject_name) + + Takes a subject_name (as may be entered into the XenCenter UI when defining subjects -- + see Access Control wiki page); and resolves it to a subject_id against the external + auth/directory service. + Raises Not_found if authentication is not succesful. + *) + let get_subject_identifier subject_name = + + try + (* looks up list of users*) + "u"^(getent_idbyname "passwd" subject_name) + with Not_found -> + (* looks up list of groups*) + "g"^(getent_idbyname "group" subject_name) + + + (* subject_id Authenticate_username_password(string username, string password) + + Takes a username and password, and tries to authenticate against an already configured + auth service (see XenAPI requirements Wiki page for details of how auth service configuration + takes place and the appropriate values are stored within the XenServer Metadata). + If authentication is successful then a subject_id is returned representing the account + corresponding to the supplied credentials (where the subject_id is in a namespace managed by + the auth module/service itself -- e.g. maybe a SID or something in the AD case). + Raises auth_failure if authentication is not successful + *) + + (* call already existing pam.ml *) + let authenticate_username_password username password = + + (* first, we try to authenticated against our user database using PAM *) + try + Pam.authenticate username password; + + (* no exception raised, then authentication succeeded, *) + (* now we return the authenticated user's id *) + get_subject_identifier username + + with (Failure msg) -> + (*debug "Failed to authenticate user %s: %s" uname msg;*) + raise (Auth_signature.Auth_failure msg) + + (* subject_id Authenticate_ticket(string ticket) + + As above but uses a ticket as credentials (i.e. for single sign-on) + *) + (* not implemented now, not needed for our tests, only for a *) + (* future single sign-on feature *) + let authenticate_ticket tgt = + failwith "authx authenticate_ticket not implemented" + + (* ((string*string) list) query_subject_information(string subject_identifier) + + Takes a subject_identifier and returns the user record from the directory service as + key/value pairs. In the returned string*string map, there _must_ be a key called + subject_name that refers to the name of the account (e.g. the user or group name as may + be displayed in XenCenter). There is no other requirements to include fields from the user + record -- initially I'd imagine that we wouldn't bother adding anything else here, but + it's a string*string list anyway for possible future expansion. + Raises Not_found if subject_id cannot be resolved by external auth service + *) + let query_subject_information subject_identifier = + + (* we are expecting an id such as u0, g0, u123 etc *) + if String.length subject_identifier < 2 then raise Not_found; + + match (String.get subject_identifier 0) with + | 'u' -> begin + (* 1. first look up the list of users *) + + (* here we remove the prefix u or g *) + let subject_identifier = String.sub subject_identifier 1 (String.length subject_identifier-1) in + + let infolist = getent_allbyid "passwd" subject_identifier in + let passwd = List.nth infolist 1 in + let account_disabled = + if (String.length passwd < 1) + then true (* no password *) + else + passwd.[0] = '*' (* disabled account *)|| passwd.[0] = '!' (* disabled password *) + in + [ ("subject-name", List.nth infolist 0); + (*("subject-pwd", List.nth infolist 1);*) + ("subject-uid", "u"^(List.nth infolist 2)); + ("subject-gid", "g"^(List.nth infolist 3)); + ("subject-gecos", List.nth infolist 4); + ("subject-displayname", + let n = (List.nth infolist 4) in + if n <> "" + then n (* gecos *) + else List.nth infolist 0 (* name *) + ); + (*("subject-homedir", List.nth infolist 5);*) + (*("subject-shell", List.nth infolist 6);*) + (* comma-separated list of subjects that are contained in this subject *) + (*("contains-byname", ""); (*in this case, no element *)*) + (* fields required in xen_center: *) + ("subject-is-group", "false"); + (* fields required in xapi_session: *) + ("subject-account-disabled", string_of_bool account_disabled); + ("subject-account-expired", "false"); + ("subject-account-locked", "false"); + ("subject-password-expired", "false"); + ] + end + | 'g' -> begin + (* 2. only then we look up the list of groups *) + + (* here we remove the prefix u or g *) + let subject_identifier = String.sub subject_identifier 1 (String.length subject_identifier-1) in + + let infolist = getent_allbyid "group" subject_identifier in + [ ("subject-name", List.nth infolist 0); + (*("subject-pwd", List.nth infolist 1);*) + ("subject-uid", "g"^(List.nth infolist 2)); + ("subject-gid", "g"^(List.nth infolist 2)); + (*("subject-homedir", "");*) + (*("subject-shell", "");*) + (* comma-separated list of subjects that are contained in this subject *) + (*("contains-byname", List.nth infolist 3);*) + (* fields required in xen_center: *) + ("subject-is-group", "true"); + (* fields required in xapi_session: *) + ] + end + | _ -> raise Not_found + + (* (string list) query_group_membership(string subject_identifier) + + Takes a subject_identifier and returns its group membership (i.e. a list of subject + identifiers of the groups that the subject passed in belongs to). The set of groups returned + _must_ be transitively closed wrt the is_member_of relation if the external directory service + supports nested groups (as AD does for example) + *) + (* in unix, groups cannot contain groups, so we just verify the groups a user *) + (* belongs to and, if that fails, if some group has the required identifier *) + let query_group_membership subject_identifier = + + (* 1. first we try to see if our subject identifier is a user id...*) + let sanitized_subject_id = String.escaped subject_identifier in + + (* we are expecting an id such as u0, g0, u123 etc *) + if String.length sanitized_subject_id < 2 then raise Not_found; + + (* here we remove the prefix u or g *) + let sanitized_subject_id = String.sub sanitized_subject_id 1 (String.length sanitized_subject_id-1) in + + match (String.get subject_identifier 0) with + | 'u' -> begin + (* looks up list of users*) + let subject_name = getent_namebyid "passwd" sanitized_subject_id in + + (* Not necessary to escape subject_name because we call execv in forkhelpers *) + (* Also, escaping will break unicode chars in usernames *) + with_cmd "/usr/bin/id" ["-G";subject_name] + (fun lines -> + (* id -G always returns at most one line in stdout *) + match lines with + | [] -> raise Not_found + | gidline::_ -> + let gids = Stdext.Xstringext.String.split ' ' gidline in + debug "Resolved %i group ids for subject %s (%s): %s" + (List.length gids) + subject_name + subject_identifier + (List.fold_left (fun p pp->if p="" then pp else p^","^pp) "" gids); + List.map (fun gid -> "g"^gid) gids + ) + end + + | 'g' -> begin + (* 2. if (1) fails, we try to see if our subject identifier is a group id...*) + (* in Unix, a group cannot contain other groups, so no need to go recursively *) + ("g"^(getent_idbyid "group" sanitized_subject_id))::[] + end + + | _ -> raise Not_found (* In addition, there are some event hooks that auth modules implement as follows: *) -(* unit on_enable(((string*string) list) config_params) - - Called internally by xapi _on each host_ when a client enables an external auth service for the - pool via the XenAPI [see AD integration wiki page]. The config_params here are the ones passed - by the client as part of the corresponding XenAPI call. - On receiving this hook, the auth module should: - (i) do whatever it needs to do (if anything) to register with the external auth/directory - service [using the config params supplied to get access] - (ii) Write the config_params that it needs to store persistently in the XenServer metadata - into the Pool.external_auth_configuration field. [Note - the rationale for making the plugin - write the config params it needs long-term into the XenServer metadata itself is so it can - explicitly filter any one-time credentials [like AD username/password for example] that it - does not need long-term.] -*) -let on_enable config_params = - (* nothing to do in this unix plugin, we always have /etc/passwd and /etc/group *) - () - -(* unit on_disable() - - Called internally by xapi _on each host_ when a client disables an auth service via the XenAPI. - The hook will be called _before_ the Pool configuration fields relating to the external-auth - service are cleared (i.e. so you can access the config params you need from the pool metadata - within the body of the on_disable method) -*) -let on_disable config_params = - (* nothing to disable in this unix plugin, we should not disable /etc/passwd and /etc/group:) *) - () - -(* unit on_xapi_initialize(bool system_boot) - - Called internally by xapi whenever it starts up. The system_boot flag is true iff xapi is - starting for the first time after a host boot -*) -let on_xapi_initialize system_boot = - (* again, nothing to be initialized here in this unix plugin *) - () - -(* unit on_xapi_exit() - - Called internally when xapi is doing a clean exit. -*) -let on_xapi_exit () = - (* nothing to do here in this unix plugin *) - () - -(* Implement the single value required for the module signature *) -let methods = {Auth_signature.authenticate_username_password = authenticate_username_password; - Auth_signature.authenticate_ticket = authenticate_ticket; - Auth_signature.get_subject_identifier = get_subject_identifier; - Auth_signature.query_subject_information = query_subject_information; - Auth_signature.query_group_membership = query_group_membership; - Auth_signature.on_enable = on_enable; - Auth_signature.on_disable = on_disable; - Auth_signature.on_xapi_initialize = on_xapi_initialize; - Auth_signature.on_xapi_exit = on_xapi_exit} + (* unit on_enable(((string*string) list) config_params) + + Called internally by xapi _on each host_ when a client enables an external auth service for the + pool via the XenAPI [see AD integration wiki page]. The config_params here are the ones passed + by the client as part of the corresponding XenAPI call. + On receiving this hook, the auth module should: + (i) do whatever it needs to do (if anything) to register with the external auth/directory + service [using the config params supplied to get access] + (ii) Write the config_params that it needs to store persistently in the XenServer metadata + into the Pool.external_auth_configuration field. [Note - the rationale for making the plugin + write the config params it needs long-term into the XenServer metadata itself is so it can + explicitly filter any one-time credentials [like AD username/password for example] that it + does not need long-term.] + *) + let on_enable config_params = + (* nothing to do in this unix plugin, we always have /etc/passwd and /etc/group *) + () + + (* unit on_disable() + + Called internally by xapi _on each host_ when a client disables an auth service via the XenAPI. + The hook will be called _before_ the Pool configuration fields relating to the external-auth + service are cleared (i.e. so you can access the config params you need from the pool metadata + within the body of the on_disable method) + *) + let on_disable config_params = + (* nothing to disable in this unix plugin, we should not disable /etc/passwd and /etc/group:) *) + () + + (* unit on_xapi_initialize(bool system_boot) + + Called internally by xapi whenever it starts up. The system_boot flag is true iff xapi is + starting for the first time after a host boot + *) + let on_xapi_initialize system_boot = + (* again, nothing to be initialized here in this unix plugin *) + () + + (* unit on_xapi_exit() + + Called internally when xapi is doing a clean exit. + *) + let on_xapi_exit () = + (* nothing to do here in this unix plugin *) + () + + (* Implement the single value required for the module signature *) + let methods = {Auth_signature.authenticate_username_password = authenticate_username_password; + Auth_signature.authenticate_ticket = authenticate_ticket; + Auth_signature.get_subject_identifier = get_subject_identifier; + Auth_signature.query_subject_information = query_subject_information; + Auth_signature.query_group_membership = query_group_membership; + Auth_signature.on_enable = on_enable; + Auth_signature.on_disable = on_disable; + Auth_signature.on_xapi_initialize = on_xapi_initialize; + Auth_signature.on_xapi_exit = on_xapi_exit} end diff --git a/ocaml/auth/extauth.ml b/ocaml/auth/extauth.ml index 21eb125fe8a..0138b8b4621 100644 --- a/ocaml/auth/extauth.ml +++ b/ocaml/auth/extauth.ml @@ -13,8 +13,8 @@ *) (** * @group Access Control - *) - +*) + open Db_actions open Auth_signature @@ -31,46 +31,46 @@ let auth_type_PAM = "PAM" module Ext_auth = struct -(* CP-781: Implement Ext_auth as a multiplexer for all the authentication plugins *) -(* Will call the dispatch to the appropriate xapi-auth-module's functions, - depending on the 'auth_type' field of this host *) + (* CP-781: Implement Ext_auth as a multiplexer for all the authentication plugins *) + (* Will call the dispatch to the appropriate xapi-auth-module's functions, + depending on the 'auth_type' field of this host *) (* this 'named dispatcher' should not be used in general by other xapi modules, only during *) (* xapi_host.enable_extauth, when we do not yet have access here to the global variable host.external_auth_type *) let nd auth_type = - debug "using external auth plugin %s" auth_type; - match auth_type with - | "" -> (* ext auth is disabled, no plugin available*) - begin - debug "External authentication is disabled."; - raise Extauth_is_disabled - end - (* our "local" authentication plugin *) - | "PAM" -> (*pam/nss unix services*) - Authx.AuthX.methods - (* the PBIS authentication plugin *) - | "AD" -> (*windows active directory*) - Extauth_plugin_ADpbis.AuthADlw.methods - (* if no other auth_type fits, then we don't know what to do *) - | _ as uat -> (*error*) - begin - debug "Unknown external authentication type: %s" uat; - raise (Unknown_extauth_type uat) - end + debug "using external auth plugin %s" auth_type; + match auth_type with + | "" -> (* ext auth is disabled, no plugin available*) + begin + debug "External authentication is disabled."; + raise Extauth_is_disabled + end + (* our "local" authentication plugin *) + | "PAM" -> (*pam/nss unix services*) + Authx.AuthX.methods + (* the PBIS authentication plugin *) + | "AD" -> (*windows active directory*) + Extauth_plugin_ADpbis.AuthADlw.methods + (* if no other auth_type fits, then we don't know what to do *) + | _ as uat -> (*error*) + begin + debug "Unknown external authentication type: %s" uat; + raise (Unknown_extauth_type uat) + end (* this is the generic dispatcher that should be used by any function in other xapi modules *) let d() = (* this function reads auth_type field for this host and returns appropriate methods record for each implemented xapi-auth-module plugin *) - let auth_type = - Server_helpers.exec_with_new_task "obtaining auth_type" - (fun __context -> - let host = Helpers.get_localhost ~__context in - Db.Host.get_external_auth_type ~__context ~self:host - ) - in - nd auth_type - + let auth_type = + Server_helpers.exec_with_new_task "obtaining auth_type" + (fun __context -> + let host = Helpers.get_localhost ~__context in + Db.Host.get_external_auth_type ~__context ~self:host + ) + in + nd auth_type + end (* some constants *) @@ -82,106 +82,106 @@ let event_name_before_extauth_disable = "before-extauth-disable" let event_name_after_extauth_enable = "after-extauth-enable" let event_name_after_roles_update = "after-roles-update" let get_event_params ~__context host = - let auth_type = Db.Host.get_external_auth_type ~__context ~self:host in - let service_name = Db.Host.get_external_auth_service_name ~__context ~self:host in - [("auth_type",auth_type);("service_name",service_name)] + let auth_type = Db.Host.get_external_auth_type ~__context ~self:host in + let service_name = Db.Host.get_external_auth_service_name ~__context ~self:host in + [("auth_type",auth_type);("service_name",service_name)] -(* allows extauth hook script to be called only under specific conditions *) +(* allows extauth hook script to be called only under specific conditions *) let can_execute_extauth_hook_script ~__context host event_name = - let auth_type = Db.Host.get_external_auth_type ~__context ~self:host in - (* if extauth is enabled, we call the hook-script for any event *) - ((auth_type<>"") || - (* otherwise, if extauth is disabled, we call the hook-script only when enabling extauth or initializing xapi *) - List.mem event_name [ - event_name_after_extauth_enable; - event_name_after_xapi_initialize; - ] - ) + let auth_type = Db.Host.get_external_auth_type ~__context ~self:host in + (* if extauth is enabled, we call the hook-script for any event *) + ((auth_type<>"") || + (* otherwise, if extauth is disabled, we call the hook-script only when enabling extauth or initializing xapi *) + List.mem event_name [ + event_name_after_extauth_enable; + event_name_after_xapi_initialize; + ] + ) (* this function should only be used directly by host.{enable,disable}_extauth *) (* use the generic call below to avoid concurrency problems between the script and host.{enable,disable}_extauth *) let call_extauth_hook_script_in_host_wrapper ~__context host event_name ~call_plugin_fn = - (* CP-709: call extauth-hook-script *) - (* Forkhelpers.execute_command_get_output hook-script "@PLUGINDIR@/extauth-hook" *) - (* fork a new thread and call new xapi.host.call-subject-add-hook-script method *) - (* see xapi_sync.ml *) - (* host.call-plugins scriptname (calls @PLUGINDIR@/scriptname*) - - if can_execute_extauth_hook_script ~__context host event_name - then begin - try - let result = call_plugin_fn () in - debug "Result of Extauth-hook: '%s'" result; - - (* extauth-hook can return three values: - "True" -> OK - "ERROR_0: rewrite_etc_pamd_ssh failed" - "ERROR_1: revert_etc_pamd_ssh failed" - *) - begin match result with - | "True" -> begin - (host,result) (* OK *) - end - | "ERROR_0: rewrite_etc_pamd_ssh failed" as errmsg -> begin - failwith errmsg - end - | "ERROR_1: revert_etc_pamd_ssh failed" as errmsg -> begin - failwith errmsg - end - | _ as errmsg -> begin (* unexpected result *) - failwith errmsg - end - end - with e -> - let msg = (ExnHelper.string_of_exn e) in - warn "Extauth-hook failed: exception: %s" msg; - raise e (* FAILED *) - end - else begin - debug "Extauth-hook event %s not called in this host because external authentication is disabled." event_name; - (host,"") (* hook script was not called, no result to return *) - end + (* CP-709: call extauth-hook-script *) + (* Forkhelpers.execute_command_get_output hook-script "@PLUGINDIR@/extauth-hook" *) + (* fork a new thread and call new xapi.host.call-subject-add-hook-script method *) + (* see xapi_sync.ml *) + (* host.call-plugins scriptname (calls @PLUGINDIR@/scriptname*) + + if can_execute_extauth_hook_script ~__context host event_name + then begin + try + let result = call_plugin_fn () in + debug "Result of Extauth-hook: '%s'" result; + + (* extauth-hook can return three values: + "True" -> OK + "ERROR_0: rewrite_etc_pamd_ssh failed" + "ERROR_1: revert_etc_pamd_ssh failed" + *) + begin match result with + | "True" -> begin + (host,result) (* OK *) + end + | "ERROR_0: rewrite_etc_pamd_ssh failed" as errmsg -> begin + failwith errmsg + end + | "ERROR_1: revert_etc_pamd_ssh failed" as errmsg -> begin + failwith errmsg + end + | _ as errmsg -> begin (* unexpected result *) + failwith errmsg + end + end + with e -> + let msg = (ExnHelper.string_of_exn e) in + warn "Extauth-hook failed: exception: %s" msg; + raise e (* FAILED *) + end + else begin + debug "Extauth-hook event %s not called in this host because external authentication is disabled." event_name; + (host,"") (* hook script was not called, no result to return *) + end (* this is the generic call to be used by anyone who wants to call the extauth-hook script *) let call_extauth_hook_script_in_host ~__context host event_name = - let event_params = get_event_params ~__context host in - let call_plugin_fn () = - Helpers.call_api_functions ~__context (fun rpc session_id -> - Client.Client.Host.call_plugin rpc session_id host (* will call extauth plugin with mutex *) - extauth_hook_script_name (* script name in @PLUGINDIR@/ *) - event_name (* event name sent to script *) - event_params (* parameters sent to event name *) - ) - in - call_extauth_hook_script_in_host_wrapper ~__context host event_name ~call_plugin_fn - -type hook_script_result = - | Hook_Script_Success of string - | Hook_Script_Failure of string - + let event_params = get_event_params ~__context host in + let call_plugin_fn () = + Helpers.call_api_functions ~__context (fun rpc session_id -> + Client.Client.Host.call_plugin rpc session_id host (* will call extauth plugin with mutex *) + extauth_hook_script_name (* script name in @PLUGINDIR@/ *) + event_name (* event name sent to script *) + event_params (* parameters sent to event name *) + ) + in + call_extauth_hook_script_in_host_wrapper ~__context host event_name ~call_plugin_fn + +type hook_script_result = + | Hook_Script_Success of string + | Hook_Script_Failure of string + (* calls extauth_hook_script in all hosts of the pool *) let call_extauth_hook_script_in_pool ~__context event_name = - (* CP-709: call extauth-hook-script *) - (* we call the script for each host in the pool, using a best-effort attempt to call *) - (* all hosts even if one fails *) - - let host = Helpers.get_localhost ~__context in - if can_execute_extauth_hook_script ~__context host event_name - then begin - let hosts = Db.Host.get_all ~__context in - let host_msgs = List.map (fun host -> - try - let (host,result) = call_extauth_hook_script_in_host ~__context host event_name in - (host,Hook_Script_Success result) - with e -> - (* we should not re-raise the exception here, since we want to go through as many hosts as possible *) - let msg = (ExnHelper.string_of_exn e) in - (host,Hook_Script_Failure msg) - ) hosts in - host_msgs - end - else begin - debug "Extauth-hook event %s not called in the pool because external authentication is disabled." event_name; - [] - end + (* CP-709: call extauth-hook-script *) + (* we call the script for each host in the pool, using a best-effort attempt to call *) + (* all hosts even if one fails *) + + let host = Helpers.get_localhost ~__context in + if can_execute_extauth_hook_script ~__context host event_name + then begin + let hosts = Db.Host.get_all ~__context in + let host_msgs = List.map (fun host -> + try + let (host,result) = call_extauth_hook_script_in_host ~__context host event_name in + (host,Hook_Script_Success result) + with e -> + (* we should not re-raise the exception here, since we want to go through as many hosts as possible *) + let msg = (ExnHelper.string_of_exn e) in + (host,Hook_Script_Failure msg) + ) hosts in + host_msgs + end + else begin + debug "Extauth-hook event %s not called in the pool because external authentication is disabled." event_name; + [] + end diff --git a/ocaml/auth/extauth_plugin_ADpbis.ml b/ocaml/auth/extauth_plugin_ADpbis.ml index efdedb052b1..6e30ed70ed5 100644 --- a/ocaml/auth/extauth_plugin_ADpbis.ml +++ b/ocaml/auth/extauth_plugin_ADpbis.ml @@ -13,7 +13,7 @@ *) (** * @group Access Control - *) +*) module D = Debug.Make(struct let name="extauth_plugin_ADpbis" end) open D @@ -28,59 +28,59 @@ struct * *) -let user_friendly_error_msg = "The Active Directory Plug-in could not complete the command. Additional information in the logs." + let user_friendly_error_msg = "The Active Directory Plug-in could not complete the command. Additional information in the logs." -open Stdext.Xstringext + open Stdext.Xstringext -let splitlines s = String.split_f (fun c -> c = '\n') (String.replace "#012" "\n" s) + let splitlines s = String.split_f (fun c -> c = '\n') (String.replace "#012" "\n" s) -let rec string_trim s = - let l = String.length s in - if l = 0 then - s - else if s.[0] = ' ' || s.[0] = '\t' || s.[0] = '\n' || s.[0] = '\r' then - string_trim (String.sub s 1 (l-1)) - else if s.[l-1] = ' ' || s.[l-1] = '\t' || s.[l-1] = '\n' || s.[l-1] = '\r' then - string_trim (String.sub s 0 (l-1)) - else - s + let rec string_trim s = + let l = String.length s in + if l = 0 then + s + else if s.[0] = ' ' || s.[0] = '\t' || s.[0] = '\n' || s.[0] = '\r' then + string_trim (String.sub s 1 (l-1)) + else if s.[l-1] = ' ' || s.[l-1] = '\t' || s.[l-1] = '\n' || s.[l-1] = '\r' then + string_trim (String.sub s 0 (l-1)) + else + s -let pbis_common_with_password (password:string) (pbis_cmd:string) (pbis_args:string list) = + let pbis_common_with_password (password:string) (pbis_cmd:string) (pbis_args:string list) = let debug_cmd = pbis_cmd ^ " " ^ (List.fold_left (fun p pp -> p^" "^pp) " " pbis_args) in try - debug "execute %s" debug_cmd; - let env = [| "PASSWORD=" ^ password |] in - let _ = Forkhelpers.execute_command_get_output ~env pbis_cmd pbis_args in - [] + debug "execute %s" debug_cmd; + let env = [| "PASSWORD=" ^ password |] in + let _ = Forkhelpers.execute_command_get_output ~env pbis_cmd pbis_args in + [] with | Forkhelpers.Spawn_internal_error(stderr, stdout, Unix.WEXITED n)-> - error "execute %s exited with code %d [stdout = '%s'; stderr = '%s']" debug_cmd n stdout stderr; - let lines = List.filter (fun l-> String.length l > 0) (splitlines (stdout ^ stderr)) in - let errmsg = List.hd (List.rev lines) in - raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, errmsg)) + error "execute %s exited with code %d [stdout = '%s'; stderr = '%s']" debug_cmd n stdout stderr; + let lines = List.filter (fun l-> String.length l > 0) (splitlines (stdout ^ stderr)) in + let errmsg = List.hd (List.rev lines) in + raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, errmsg)) | e -> - error "execute %s exited: %s" debug_cmd (ExnHelper.string_of_exn e); - raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, user_friendly_error_msg)) + error "execute %s exited: %s" debug_cmd (ExnHelper.string_of_exn e); + raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, user_friendly_error_msg)) -let pbis_config (name:string) (value:string) = + let pbis_config (name:string) (value:string) = let pbis_cmd = "/opt/pbis/bin/config" in let pbis_args = [name ; value] in let debug_cmd = pbis_cmd ^ " " ^ name ^ " " ^ value in try - debug "execute %s" debug_cmd; - let _ = Forkhelpers.execute_command_get_output pbis_cmd pbis_args in - () + debug "execute %s" debug_cmd; + let _ = Forkhelpers.execute_command_get_output pbis_cmd pbis_args in + () with | Forkhelpers.Spawn_internal_error(stderr, stdout, Unix.WEXITED n)-> - error "execute %s exited with code %d [stdout = '%s'; stderr = '%s']" debug_cmd n stdout stderr; - let lines = List.filter (fun l-> String.length l > 0) (splitlines (stdout ^ stderr)) in - let errmsg = List.hd (List.rev lines) in - raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, errmsg)) + error "execute %s exited with code %d [stdout = '%s'; stderr = '%s']" debug_cmd n stdout stderr; + let lines = List.filter (fun l-> String.length l > 0) (splitlines (stdout ^ stderr)) in + let errmsg = List.hd (List.rev lines) in + raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, errmsg)) | e -> - error "execute %s exited: %s" debug_cmd (ExnHelper.string_of_exn e); - raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, user_friendly_error_msg)) + error "execute %s exited: %s" debug_cmd (ExnHelper.string_of_exn e); + raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, user_friendly_error_msg)) -let ensure_pbis_configured () = + let ensure_pbis_configured () = pbis_config "SpaceReplacement" "+"; pbis_config "CreateHomeDir" "false"; pbis_config "SyncSystemTime" "false"; @@ -88,7 +88,7 @@ let ensure_pbis_configured () = pbis_config "CacheEntryExpiry" "300"; () -let pbis_common ?stdin_string:(stdin_string="") (pbis_cmd:string) (pbis_args:string list) = + let pbis_common ?stdin_string:(stdin_string="") (pbis_cmd:string) (pbis_args:string list) = let debug_cmd = pbis_cmd ^ " " ^ (List.fold_left (fun p pp -> p^" "^pp) " " pbis_args) in let debug_cmd = if String.has_substr debug_cmd "--password" then "(omitted for security)" else debug_cmd in @@ -117,65 +117,65 @@ let pbis_common ?stdin_string:(stdin_string="") (pbis_cmd:string) (pbis_args:str let exited_code = ref 0 in let output = ref "" in finally_finalize (fun () -> - let _ = try - debug "execute %s" debug_cmd; - (* creates pipes between xapi and pbis process *) - let (in_readme, in_writeme) = Unix.pipe () in - fds_to_close := in_readme :: in_writeme :: !fds_to_close; - let out_tmpfile = Filename.temp_file "pbis" ".out" in - files_to_unlink := out_tmpfile :: !files_to_unlink; - let err_tmpfile = Filename.temp_file "pbis" ".err" in - files_to_unlink := err_tmpfile :: !files_to_unlink; - let out_writeme = Unix.openfile out_tmpfile [ Unix.O_WRONLY] 0o0 in - fds_to_close := out_writeme :: !fds_to_close; - let err_writeme = Unix.openfile err_tmpfile [ Unix.O_WRONLY] 0o0 in - fds_to_close := err_writeme :: !fds_to_close; - - let pid = Forkhelpers.safe_close_and_exec (Some in_readme) (Some out_writeme) (Some err_writeme) [] pbis_cmd pbis_args in - Stdext.Pervasiveext.finally - (fun () -> - debug "Created process pid %s for cmd %s" (Forkhelpers.string_of_pidty pid) debug_cmd; - (* Insert this delay to reproduce the cannot write to stdin bug: - Thread.delay 5.; *) - (* WARNING: we don't close the in_readme because otherwise in the case where the pbis - binary doesn't expect any input there is a race between it finishing (and closing the last - reference to the in_readme) and us attempting to write to in_writeme. If pbis wins the - race then our write will fail with EPIPE (Unix.error 31 in ocamlese). If we keep a reference - to in_readme then our write of "\n" will succeed. - - An alternative fix would be to not write anything when stdin_string = "" *) - - (* push stdin_string to recently created process' STDIN *) - begin - (* usually, STDIN contains some sensitive data such as passwords that we do not want showing up in ps *) - (* or in the debug log via debug_cmd *) - try - let stdin_string = stdin_string ^ "\n" in (*HACK:without \n, the pbis scripts don't return!*) - let (_: int) = Unix.write in_writeme stdin_string 0 (String.length stdin_string) in - close_fd in_writeme; (* we need to close stdin, otherwise the unix cmd waits forever *) - with e -> begin - (* in_string is usually the password or other sensitive param, so never write it to debug or exn *) - debug "Error writing to stdin for cmd %s: %s" debug_cmd (ExnHelper.string_of_exn e); - raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,ExnHelper.string_of_exn e)) - end - end; - ) - (fun () -> - match Forkhelpers.waitpid pid with - | (_, Unix.WEXITED n) -> - exited_code := n; - output := (Stdext.Unixext.string_of_file out_tmpfile) ^ (Stdext.Unixext.string_of_file err_tmpfile) - | _ -> - error "PBIS %s exit with WSTOPPED or WSIGNALED" debug_cmd; - raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, user_friendly_error_msg)) - ) - with e -> - error "execute %s exited: %s" debug_cmd (ExnHelper.string_of_exn e); - raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, user_friendly_error_msg)) - in + let _ = try + debug "execute %s" debug_cmd; + (* creates pipes between xapi and pbis process *) + let (in_readme, in_writeme) = Unix.pipe () in + fds_to_close := in_readme :: in_writeme :: !fds_to_close; + let out_tmpfile = Filename.temp_file "pbis" ".out" in + files_to_unlink := out_tmpfile :: !files_to_unlink; + let err_tmpfile = Filename.temp_file "pbis" ".err" in + files_to_unlink := err_tmpfile :: !files_to_unlink; + let out_writeme = Unix.openfile out_tmpfile [ Unix.O_WRONLY] 0o0 in + fds_to_close := out_writeme :: !fds_to_close; + let err_writeme = Unix.openfile err_tmpfile [ Unix.O_WRONLY] 0o0 in + fds_to_close := err_writeme :: !fds_to_close; + + let pid = Forkhelpers.safe_close_and_exec (Some in_readme) (Some out_writeme) (Some err_writeme) [] pbis_cmd pbis_args in + Stdext.Pervasiveext.finally + (fun () -> + debug "Created process pid %s for cmd %s" (Forkhelpers.string_of_pidty pid) debug_cmd; + (* Insert this delay to reproduce the cannot write to stdin bug: + Thread.delay 5.; *) + (* WARNING: we don't close the in_readme because otherwise in the case where the pbis + binary doesn't expect any input there is a race between it finishing (and closing the last + reference to the in_readme) and us attempting to write to in_writeme. If pbis wins the + race then our write will fail with EPIPE (Unix.error 31 in ocamlese). If we keep a reference + to in_readme then our write of "\n" will succeed. + + An alternative fix would be to not write anything when stdin_string = "" *) + + (* push stdin_string to recently created process' STDIN *) + begin + (* usually, STDIN contains some sensitive data such as passwords that we do not want showing up in ps *) + (* or in the debug log via debug_cmd *) + try + let stdin_string = stdin_string ^ "\n" in (*HACK:without \n, the pbis scripts don't return!*) + let (_: int) = Unix.write in_writeme stdin_string 0 (String.length stdin_string) in + close_fd in_writeme; (* we need to close stdin, otherwise the unix cmd waits forever *) + with e -> begin + (* in_string is usually the password or other sensitive param, so never write it to debug or exn *) + debug "Error writing to stdin for cmd %s: %s" debug_cmd (ExnHelper.string_of_exn e); + raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,ExnHelper.string_of_exn e)) + end + end; + ) + (fun () -> + match Forkhelpers.waitpid pid with + | (_, Unix.WEXITED n) -> + exited_code := n; + output := (Stdext.Unixext.string_of_file out_tmpfile) ^ (Stdext.Unixext.string_of_file err_tmpfile) + | _ -> + error "PBIS %s exit with WSTOPPED or WSIGNALED" debug_cmd; + raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, user_friendly_error_msg)) + ) + with e -> + error "execute %s exited: %s" debug_cmd (ExnHelper.string_of_exn e); + raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, user_friendly_error_msg)) + in - if !exited_code <> 0 then - begin + if !exited_code <> 0 then + begin error "execute '%s': exit_code=[%d] output=[%s]" debug_cmd !exited_code (String.replace "\n" ";" !output); let split_to_words = fun s -> String.split_f (fun c -> c = '(' || c = ')' || c = '.' || c = ' ') s in let revlines = List.rev (List.filter (fun l-> String.length l > 0) (splitlines !output)) in @@ -184,135 +184,135 @@ let pbis_common ?stdin_string:(stdin_string="") (pbis_cmd:string) (pbis_args:str let errcode = List.hd (List.filter (fun w -> String.startswith "LW_ERROR_" w) (split_to_words errcodeline)) in debug "Pbis raised an error for cmd %s: (%s) %s" debug_cmd errcode errmsg; match errcode with - | "LW_ERROR_INVALID_GROUP_INFO_LEVEL" - -> raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, errcode)) (* For pbis_get_all_byid *) - | "LW_ERROR_NO_SUCH_USER" - | "LW_ERROR_NO_SUCH_GROUP" - | "LW_ERROR_NO_SUCH_OBJECT" - -> raise Not_found (* Subject_cannot_be_resolved *) - | "LW_ERROR_KRB5_CALL_FAILED" - | "LW_ERROR_PASSWORD_MISMATCH" - | "LW_ERROR_ACCOUNT_DISABLED" - | "LW_ERROR_NOT_HANDLED" - -> raise (Auth_signature.Auth_failure errmsg) - | "LW_ERROR_INVALID_OU" - -> raise (Auth_signature.Auth_service_error (Auth_signature.E_INVALID_OU, errmsg)) - | "LW_ERROR_INVALID_DOMAIN" - -> raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, errmsg)) - | "LW_ERROR_LSA_SERVER_UNREACHABLE" - | _ -> - raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,(Printf.sprintf "(%s) %s" errcode errmsg))) - end - else - debug "execute %s: output length=[%d]" debug_cmd (String.length !output); + | "LW_ERROR_INVALID_GROUP_INFO_LEVEL" + -> raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, errcode)) (* For pbis_get_all_byid *) + | "LW_ERROR_NO_SUCH_USER" + | "LW_ERROR_NO_SUCH_GROUP" + | "LW_ERROR_NO_SUCH_OBJECT" + -> raise Not_found (* Subject_cannot_be_resolved *) + | "LW_ERROR_KRB5_CALL_FAILED" + | "LW_ERROR_PASSWORD_MISMATCH" + | "LW_ERROR_ACCOUNT_DISABLED" + | "LW_ERROR_NOT_HANDLED" + -> raise (Auth_signature.Auth_failure errmsg) + | "LW_ERROR_INVALID_OU" + -> raise (Auth_signature.Auth_service_error (Auth_signature.E_INVALID_OU, errmsg)) + | "LW_ERROR_INVALID_DOMAIN" + -> raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, errmsg)) + | "LW_ERROR_LSA_SERVER_UNREACHABLE" + | _ -> + raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,(Printf.sprintf "(%s) %s" errcode errmsg))) + end + else + debug "execute %s: output length=[%d]" debug_cmd (String.length !output); let lines = List.filter (fun l-> String.length l > 0) (splitlines !output) in let parse_line = (fun (acc, currkey) line -> - let slices = String.split ~limit:2 ':' line in - debug "parse %s: currkey=[%s] line=[%s]" debug_cmd currkey line; - if List.length slices > 1 then - begin - let key = string_trim (List.hd slices) in - let value = string_trim (List.nth slices 1) in - debug "parse %s: key=[%s] value=[%s] currkey=[%s]" debug_cmd key value currkey; - if String.length value > 0 then - (acc @ [(key, value)], "") - else - (acc, key) - end - else - let key = currkey in - let value = string_trim line in - debug "parse %s: key=[%s] value=[%s] currkey=[%s]" debug_cmd key value currkey; - (acc @ [(key, value)], currkey) - ) in + let slices = String.split ~limit:2 ':' line in + debug "parse %s: currkey=[%s] line=[%s]" debug_cmd currkey line; + if List.length slices > 1 then + begin + let key = string_trim (List.hd slices) in + let value = string_trim (List.nth slices 1) in + debug "parse %s: key=[%s] value=[%s] currkey=[%s]" debug_cmd key value currkey; + if String.length value > 0 then + (acc @ [(key, value)], "") + else + (acc, key) + end + else + let key = currkey in + let value = string_trim line in + debug "parse %s: key=[%s] value=[%s] currkey=[%s]" debug_cmd key value currkey; + (acc @ [(key, value)], currkey) + ) in let attrs, _ = List.fold_left parse_line ([], "") lines in attrs - ) + ) -(* assoc list for caching pbis_common results, - item value is ((stdin_string, pbis_cmd, pbis_args), (unix_time, pbis_common_result)) -*) -let cache_of_pbis_common : ((string * string * (string list)) * (float * ((string * string) list))) list ref = ref [] -let cache_of_pbis_common_m = Mutex.create () + (* assoc list for caching pbis_common results, + item value is ((stdin_string, pbis_cmd, pbis_args), (unix_time, pbis_common_result)) + *) + let cache_of_pbis_common : ((string * string * (string list)) * (float * ((string * string) list))) list ref = ref [] + let cache_of_pbis_common_m = Mutex.create () -let pbis_common_with_cache ?stdin_string:(stdin_string="") (pbis_cmd:string) (pbis_args:string list) = + let pbis_common_with_cache ?stdin_string:(stdin_string="") (pbis_cmd:string) (pbis_args:string list) = let expired = 120.0 in let now = Unix.time () in let cache_key = (stdin_string, pbis_cmd, pbis_args) in - let f = fun () -> - cache_of_pbis_common := List.filter (fun (_, (ts, _)) -> now -. ts < expired) !cache_of_pbis_common; - try - let _, result = List.assoc cache_key !cache_of_pbis_common in - debug "pbis_common_with_cache hit \"%s\" cache." pbis_cmd; - result - with Not_found -> - let result = pbis_common ~stdin_string:stdin_string pbis_cmd pbis_args in - cache_of_pbis_common := !cache_of_pbis_common @ [(cache_key, (Unix.time (), result))]; - result + let f = fun () -> + cache_of_pbis_common := List.filter (fun (_, (ts, _)) -> now -. ts < expired) !cache_of_pbis_common; + try + let _, result = List.assoc cache_key !cache_of_pbis_common in + debug "pbis_common_with_cache hit \"%s\" cache." pbis_cmd; + result + with Not_found -> + let result = pbis_common ~stdin_string:stdin_string pbis_cmd pbis_args in + cache_of_pbis_common := !cache_of_pbis_common @ [(cache_key, (Unix.time (), result))]; + result in Stdext.Threadext.Mutex.execute cache_of_pbis_common_m f -let get_joined_domain_name () = + let get_joined_domain_name () = Server_helpers.exec_with_new_task "obtaining joined-domain name" - (fun __context -> - let host = Helpers.get_localhost ~__context in - (* the service_name always contains the domain name provided during domain-join *) - Db.Host.get_external_auth_service_name ~__context ~self:host; - ) - -(* CP-842: when resolving AD usernames, make joined-domain prefix optional *) -let get_full_subject_name ?(use_nt_format=true) subject_name = (* CA-27744: always use NT-style names by default *) + (fun __context -> + let host = Helpers.get_localhost ~__context in + (* the service_name always contains the domain name provided during domain-join *) + Db.Host.get_external_auth_service_name ~__context ~self:host; + ) + + (* CP-842: when resolving AD usernames, make joined-domain prefix optional *) + let get_full_subject_name ?(use_nt_format=true) subject_name = (* CA-27744: always use NT-style names by default *) try - (* tests if the UPN account name separator @ is present in subject name *) - ignore(String.index subject_name '@'); - (* we only reach this point if the separator @ is present in subject_name *) - (* nothing to do, we assume that subject_name already contains the domain name after @ *) - subject_name + (* tests if the UPN account name separator @ is present in subject name *) + ignore(String.index subject_name '@'); + (* we only reach this point if the separator @ is present in subject_name *) + (* nothing to do, we assume that subject_name already contains the domain name after @ *) + subject_name with Not_found -> begin (* if no UPN username separator @ was found *) try - (* tests if the NT account name separator \ is present in subject name *) - ignore(String.index subject_name '\\'); - (* we only reach this point if the separator \ is present in subject_name *) - (* nothing to do, we assume that subject_name already contains the domain name before \ *) - subject_name + (* tests if the NT account name separator \ is present in subject name *) + ignore(String.index subject_name '\\'); + (* we only reach this point if the separator \ is present in subject_name *) + (* nothing to do, we assume that subject_name already contains the domain name before \ *) + subject_name with Not_found -> begin (* if neither the UPN separator @ nor the NT username separator \ was found *) if use_nt_format then begin (* the default: NT names is unique, whereas UPN ones are not (CA-27744) *) - (* we prepend the joined-domain name to the subjectname as an NT name: \ *) - (get_joined_domain_name ()) ^ "\\" ^ subject_name - (* obs: (1) pbis accepts a fully qualified domain name with both formats and *) - (* (2) some pbis commands accept only the NT-format, such as find-group-by-name *) + (* we prepend the joined-domain name to the subjectname as an NT name: \ *) + (get_joined_domain_name ()) ^ "\\" ^ subject_name + (* obs: (1) pbis accepts a fully qualified domain name with both formats and *) + (* (2) some pbis commands accept only the NT-format, such as find-group-by-name *) end else begin (* UPN format not the default format (CA-27744) *) - (* we append the joined-domain name to the subjectname as a UPN name: @ *) - subject_name ^"@"^(get_joined_domain_name ()) + (* we append the joined-domain name to the subjectname as a UPN name: @ *) + subject_name ^"@"^(get_joined_domain_name ()) end - end - end + end + end -(* Converts from UPN format (user@domain.com) to legacy NT format (domain.com\user) *) -(* This function is a workaround to use find-group-by-name, which requires nt-format names) *) -(* For anything else, use the original UPN name *) -let convert_upn_to_nt_username subject_name = + (* Converts from UPN format (user@domain.com) to legacy NT format (domain.com\user) *) + (* This function is a workaround to use find-group-by-name, which requires nt-format names) *) + (* For anything else, use the original UPN name *) + let convert_upn_to_nt_username subject_name = try - (* test if the UPN account name separator @ is present in subject name *) - let i = String.index subject_name '@' in - (* we only reach this point if the separator @ is present in subject_name *) - (* when @ is present, we need to convert the UPN name to NT format *) - let user = String.sub subject_name 0 i in - let domain = String.sub subject_name (i+1) ((String.length subject_name) - i - 1) in - domain ^ "\\" ^ user + (* test if the UPN account name separator @ is present in subject name *) + let i = String.index subject_name '@' in + (* we only reach this point if the separator @ is present in subject_name *) + (* when @ is present, we need to convert the UPN name to NT format *) + let user = String.sub subject_name 0 i in + let domain = String.sub subject_name (i+1) ((String.length subject_name) - i - 1) in + domain ^ "\\" ^ user with Not_found -> begin (* if no UPN username separator @ was found *) (* nothing to do in this case *) subject_name - end + end -let pbis_get_all_byid subject_id = + let pbis_get_all_byid subject_id = try - pbis_common_with_cache "/opt/pbis/bin/find-by-sid" ["--level";"2";subject_id] + pbis_common_with_cache "/opt/pbis/bin/find-by-sid" ["--level";"2";subject_id] with Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, "LW_ERROR_INVALID_GROUP_INFO_LEVEL") -> - pbis_common_with_cache "/opt/pbis/bin/find-by-sid" ["--level";"1";subject_id] + pbis_common_with_cache "/opt/pbis/bin/find-by-sid" ["--level";"1";subject_id] -let pbis_get_group_sids_byname _subject_name = + let pbis_get_group_sids_byname _subject_name = let subject_name = get_full_subject_name _subject_name in (* append domain if necessary *) let subject_attrs = pbis_common_with_cache "/opt/pbis/bin/list-groups-for-user" ["--show-sid";subject_name] in @@ -324,69 +324,69 @@ let pbis_get_group_sids_byname _subject_name = [("Number of groups found for user 'test@testdomain'", "2"), ("", line1), ("", line2) ... ("", lineN)] *) List.map (fun (n,v)-> - let v = String.replace ")" "|" v in - let v = String.replace "sid =" "|" v in - let vs = String.split_f (fun c -> c = '|') v in - let sid = string_trim (List.nth vs 1) in - debug "pbis_get_group_sids_byname %s get sid=[%s]" _subject_name sid; - sid - ) (List.filter (fun (n,v)->n="") subject_attrs) + let v = String.replace ")" "|" v in + let v = String.replace "sid =" "|" v in + let vs = String.split_f (fun c -> c = '|') v in + let sid = string_trim (List.nth vs 1) in + debug "pbis_get_group_sids_byname %s get sid=[%s]" _subject_name sid; + sid + ) (List.filter (fun (n,v)->n="") subject_attrs) -let pbis_get_sid_bygid gid = + let pbis_get_sid_bygid gid = let subject_attrs = pbis_common "/opt/pbis/bin/find-group-by-id" ["--level";"1";gid] in (* find-group-by-id returns several lines. We only need the SID *) if List.mem_assoc "SID" subject_attrs then List.assoc "SID" subject_attrs (* OK, return SID *) else begin (*no SID value returned*) - (* this should not have happend, pbis didn't return an SID field!! *) - let msg = (Printf.sprintf "Pbis didn't return an SID field for gid %s" gid) in - debug "Error pbis_get_sid_bygid for gid %s: %s" gid msg; - raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,msg)) (* general Pbis error *) + (* this should not have happend, pbis didn't return an SID field!! *) + let msg = (Printf.sprintf "Pbis didn't return an SID field for gid %s" gid) in + debug "Error pbis_get_sid_bygid for gid %s: %s" gid msg; + raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,msg)) (* general Pbis error *) end -let pbis_get_sid_byname _subject_name cmd = + let pbis_get_sid_byname _subject_name cmd = let subject_name = get_full_subject_name _subject_name in (* append domain if necessary *) let subject_attrs = pbis_common cmd ["--level";"1";subject_name] in (* find-user-by-name returns several lines. We ony need the SID *) if List.mem_assoc "SID" subject_attrs then List.assoc "SID" subject_attrs (* OK, return SID *) else begin (*no SID value returned*) - (* this should not have happend, pbis didn't return an SID field!! *) - let msg = (Printf.sprintf "Pbis didn't return an SID field for user %s" subject_name) in - debug "Error pbis_get_sid_byname for subject name %s: %s" subject_name msg; - raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,msg)) (* general Pbis error *) + (* this should not have happend, pbis didn't return an SID field!! *) + let msg = (Printf.sprintf "Pbis didn't return an SID field for user %s" subject_name) in + debug "Error pbis_get_sid_byname for subject name %s: %s" subject_name msg; + raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,msg)) (* general Pbis error *) end -(* subject_id get_subject_identifier(string subject_name) + (* subject_id get_subject_identifier(string subject_name) - Takes a subject_name (as may be entered into the XenCenter UI when defining subjects -- - see Access Control wiki page); and resolves it to a subject_id against the external - auth/directory service. - Raises Not_found (*Subject_cannot_be_resolved*) if authentication is not succesful. -*) -let get_subject_identifier _subject_name = + Takes a subject_name (as may be entered into the XenCenter UI when defining subjects -- + see Access Control wiki page); and resolves it to a subject_id against the external + auth/directory service. + Raises Not_found (*Subject_cannot_be_resolved*) if authentication is not succesful. + *) + let get_subject_identifier _subject_name = try - (* looks up list of users*) - let subject_name = get_full_subject_name _subject_name in (* append domain if necessary *) - pbis_get_sid_byname subject_name "/opt/pbis/bin/find-user-by-name" + (* looks up list of users*) + let subject_name = get_full_subject_name _subject_name in (* append domain if necessary *) + pbis_get_sid_byname subject_name "/opt/pbis/bin/find-user-by-name" with _ -> - (* append domain if necessary, find-group-by-name only accepts nt-format names *) - let subject_name = get_full_subject_name ~use_nt_format:true (convert_upn_to_nt_username _subject_name) in - (* looks up list of groups*) - pbis_get_sid_byname subject_name "/opt/pbis/bin/find-group-by-name" - -(* subject_id Authenticate_username_password(string username, string password) - - Takes a username and password, and tries to authenticate against an already configured - auth service (see XenAPI requirements Wiki page for details of how auth service configuration - takes place and the appropriate values are stored within the XenServer Metadata). - If authentication is successful then a subject_id is returned representing the account - corresponding to the supplied credentials (where the subject_id is in a namespace managed by - the auth module/service itself -- e.g. maybe a SID or something in the AD case). - Raises auth_failure if authentication is not successful -*) - -let authenticate_username_password username password = + (* append domain if necessary, find-group-by-name only accepts nt-format names *) + let subject_name = get_full_subject_name ~use_nt_format:true (convert_upn_to_nt_username _subject_name) in + (* looks up list of groups*) + pbis_get_sid_byname subject_name "/opt/pbis/bin/find-group-by-name" + + (* subject_id Authenticate_username_password(string username, string password) + + Takes a username and password, and tries to authenticate against an already configured + auth service (see XenAPI requirements Wiki page for details of how auth service configuration + takes place and the appropriate values are stored within the XenServer Metadata). + If authentication is successful then a subject_id is returned representing the account + corresponding to the supplied credentials (where the subject_id is in a namespace managed by + the auth module/service itself -- e.g. maybe a SID or something in the AD case). + Raises auth_failure if authentication is not successful + *) + + let authenticate_username_password username password = (* first, we try to authenticated user against our external user database *) (* pbis_common will raise an Auth_failure if external authentication fails *) let user = List.hd (List.rev (String.split_f (fun c -> c = '\\') username)) in @@ -397,342 +397,342 @@ let authenticate_username_password username password = get_subject_identifier (get_full_subject_name username) -(* subject_id Authenticate_ticket(string ticket) + (* subject_id Authenticate_ticket(string ticket) - As above but uses a ticket as credentials (i.e. for single sign-on) -*) - (* not implemented now, not needed for our tests, only for a *) - (* future single sign-on feature *) -let authenticate_ticket tgt = + As above but uses a ticket as credentials (i.e. for single sign-on) + *) + (* not implemented now, not needed for our tests, only for a *) + (* future single sign-on feature *) + let authenticate_ticket tgt = failwith "extauth_plugin authenticate_ticket not implemented" -(* ((string*string) list) query_subject_information(string subject_identifier) + (* ((string*string) list) query_subject_information(string subject_identifier) - Takes a subject_identifier and returns the user record from the directory service as - key/value pairs. In the returned string*string map, there _must_ be a key called - subject_name that refers to the name of the account (e.g. the user or group name as may - be displayed in XenCenter). There is no other requirements to include fields from the user - record -- initially qI'd imagine that we wouldn't bother adding anything else here, but - it's a string*string list anyway for possible future expansion. - Raises Not_found (*Subject_cannot_be_resolved*) if subject_id cannot be resolved by external auth service -*) -let query_subject_information subject_identifier = + Takes a subject_identifier and returns the user record from the directory service as + key/value pairs. In the returned string*string map, there _must_ be a key called + subject_name that refers to the name of the account (e.g. the user or group name as may + be displayed in XenCenter). There is no other requirements to include fields from the user + record -- initially qI'd imagine that we wouldn't bother adding anything else here, but + it's a string*string list anyway for possible future expansion. + Raises Not_found (*Subject_cannot_be_resolved*) if subject_id cannot be resolved by external auth service + *) + let query_subject_information subject_identifier = let unmap_lw_space_chars lwname = - let defensive_copy = String.copy lwname in - (* CA-29006: map chars in names back to original space chars in windows-names *) - (* we use + as the pbis space-replacement because it's an invalid NT-username char in windows *) - (* the space-replacement char used by pbis is defined at /etc/pbis/lsassd.conf *) - let current_lw_space_replacement = '+' in - for i = 0 to String.length defensive_copy - 1 - do - if defensive_copy.[i] = current_lw_space_replacement then defensive_copy.[i] <- ' ' - done; - defensive_copy + let defensive_copy = String.copy lwname in + (* CA-29006: map chars in names back to original space chars in windows-names *) + (* we use + as the pbis space-replacement because it's an invalid NT-username char in windows *) + (* the space-replacement char used by pbis is defined at /etc/pbis/lsassd.conf *) + let current_lw_space_replacement = '+' in + for i = 0 to String.length defensive_copy - 1 + do + if defensive_copy.[i] = current_lw_space_replacement then defensive_copy.[i] <- ' ' + done; + defensive_copy in let get_value name ls = if List.mem_assoc name ls then List.assoc name ls else "" in let infolist = pbis_get_all_byid subject_identifier in let subject_is_group = (get_value "Uid" infolist)="" in if subject_is_group then (* subject is group *) - (* in this case, a few info fields are not available: UPN, Uid, Gecos, Account {disabled,expired,locked}, Password expired *) - [ ("subject-name", unmap_lw_space_chars (get_value "Name" infolist)); - ("subject-gid", get_value "Gid" infolist); - ("subject-sid", get_value "SID" infolist); - ("subject-is-group", "true"); - (*(* comma-separated list of subjects that are contained in this subject *) + (* in this case, a few info fields are not available: UPN, Uid, Gecos, Account {disabled,expired,locked}, Password expired *) + [ ("subject-name", unmap_lw_space_chars (get_value "Name" infolist)); + ("subject-gid", get_value "Gid" infolist); + ("subject-sid", get_value "SID" infolist); + ("subject-is-group", "true"); + (*(* comma-separated list of subjects that are contained in this subject *) ("contains-byname", List.fold_left (fun (n,v) m ->m^","^v) "" (List.filter (fun (n,v)->n="Members") infolist));*) - ] + ] else (* subject is user *) - let subject_name = unmap_lw_space_chars (get_value "Name" infolist) in - let subject_gecos = get_value "Gecos" infolist in - [ ("subject-name", subject_name); - ("subject-upn", get_value "UPN" infolist); - ("subject-uid", get_value "Uid" infolist); - ("subject-gid", get_value "Gid" infolist); - ("subject-sid", get_value "SID" infolist); - ("subject-gecos", subject_gecos); - ("subject-displayname", if (subject_gecos="" || subject_gecos="") then subject_name else subject_gecos); - (*("subject-homedir", get_value "Home dir" infolist);*) - (*("subject-shell", get_value "Shell" infolist);*) - ("subject-is-group", "false"); - ("subject-account-disabled", get_value "Account disabled (or locked)" infolist); - ("subject-account-expired", get_value "Account Expired" infolist); - ("subject-account-locked", get_value "Account disabled (or locked)" infolist); - ("subject-password-expired", get_value "Password Expired" infolist); - ] - - -(* (string list) query_group_membership(string subject_identifier) - - Takes a subject_identifier and returns its group membership (i.e. a list of subject - identifiers of the groups that the subject passed in belongs to). The set of groups returned - _must_ be transitively closed wrt the is_member_of relation if the external directory service - supports nested groups (as AD does for example) -*) -let query_group_membership subject_identifier = + let subject_name = unmap_lw_space_chars (get_value "Name" infolist) in + let subject_gecos = get_value "Gecos" infolist in + [ ("subject-name", subject_name); + ("subject-upn", get_value "UPN" infolist); + ("subject-uid", get_value "Uid" infolist); + ("subject-gid", get_value "Gid" infolist); + ("subject-sid", get_value "SID" infolist); + ("subject-gecos", subject_gecos); + ("subject-displayname", if (subject_gecos="" || subject_gecos="") then subject_name else subject_gecos); + (*("subject-homedir", get_value "Home dir" infolist);*) + (*("subject-shell", get_value "Shell" infolist);*) + ("subject-is-group", "false"); + ("subject-account-disabled", get_value "Account disabled (or locked)" infolist); + ("subject-account-expired", get_value "Account Expired" infolist); + ("subject-account-locked", get_value "Account disabled (or locked)" infolist); + ("subject-password-expired", get_value "Password Expired" infolist); + ] + + + (* (string list) query_group_membership(string subject_identifier) + + Takes a subject_identifier and returns its group membership (i.e. a list of subject + identifiers of the groups that the subject passed in belongs to). The set of groups returned + _must_ be transitively closed wrt the is_member_of relation if the external directory service + supports nested groups (as AD does for example) + *) + let query_group_membership subject_identifier = let subject_info = query_subject_information subject_identifier in if (List.assoc "subject-is-group" subject_info)="true" (* this field is always present *) then (* subject is a group, so get_group_sids_byname will not work because pbis's list-groups *) - (* doesnt work if a group name is given as input *) - (* FIXME: default action for groups until workaround is found: return an empty list of membership groups *) - [] + (* doesnt work if a group name is given as input *) + (* FIXME: default action for groups until workaround is found: return an empty list of membership groups *) + [] else (* subject is a user, list-groups and therefore get_group_sids_byname work fine *) - let subject_name = List.assoc "subject-name" subject_info in (* CA-27744: always use NT-style names *) + let subject_name = List.assoc "subject-name" subject_info in (* CA-27744: always use NT-style names *) - let subject_sid_membership_list = pbis_get_group_sids_byname subject_name in - debug "Resolved %i group sids for subject %s (%s): %s" + let subject_sid_membership_list = pbis_get_group_sids_byname subject_name in + debug "Resolved %i group sids for subject %s (%s): %s" (List.length subject_sid_membership_list) subject_name subject_identifier (List.fold_left (fun p pp->if p="" then pp else p^","^pp) "" subject_sid_membership_list); - subject_sid_membership_list + subject_sid_membership_list (* In addition, there are some event hooks that auth modules implement as follows: *) -let is_pbis_server_available max = + let is_pbis_server_available max = let rec test i = (* let's test this many times *) - if i > max then false (* we give up *) - else begin (* let's test *) - (try - (* (1) we _need_ to use a username contained in our domain, otherwise test (2) doesn't work *) - (* Microsoft KB/Q243330 article provides the KRBTGT account as a well-known built-in SID in AD *) - (* Microsoft KB/Q229909 article says that KRBTGT account cannot be renamed or enabled, making *) - (* it the perfect target for such a test using a username (Administrator account can be renamed). *) - let username = "KRBTGT" in (* domain name prefix automatically added by our internal AD plugin functions *) - let sid = get_subject_identifier username in (* use our well-known KRBTGT builtin username in AD *) - (* OK, we found this username! *) - debug "Request %i/%i to external authentication server successful: user %s was found" i max username; - - (* (2) CA-25427: test (1) above may succeed (because of pbis caching stale AD information) *) - (* even though the AD domain is offline (error 32888), usually because /etc/resolv.conf is not *) - (* pointing to the AD server. This test should catch if the domain is offline by calling find-by-sid *) - (* using a domain SID. We must use a _domain_ SID. A universal SID like S-1-1-0 doesn't work for this test. *) - let (_: (string*string) list) = query_subject_information sid in (* use KRBTGT's domain SID *) - debug "Request %i/%i to external authentication server successful: sid %s was found" i max sid; - - true - with - | Not_found -> (* that means that pbis is responding to at least cached subject queries. *) - (* in this case, KRBTGT wasn't found in the AD domain. this usually indicates that the *) - (* AD domain is offline/inaccessible to pbis, which will cause problems, specially *) - (* to the ssh python hook-script, so we need to try again until KRBTGT is found, indicating *) - (* that the domain is online and accessible to pbis queries *) - debug "Request %i/%i to external authentication server returned KRBTGT Not_found, waiting 5 secs to try again" i max; - Thread.delay 5.0; (*wait 5 seconds*) - (* try again *) - test (i+1) - | e -> (* ERROR: anything else means that the server is NOT responding adequately *) - debug "Request %i/%i to external authentication server failed, waiting 5 secs to try again: %s" i max (ExnHelper.string_of_exn e); - Thread.delay 5.0; (*wait 5 seconds*) - (* try again *) - test (i+1) - ) - end + if i > max then false (* we give up *) + else begin (* let's test *) + (try + (* (1) we _need_ to use a username contained in our domain, otherwise test (2) doesn't work *) + (* Microsoft KB/Q243330 article provides the KRBTGT account as a well-known built-in SID in AD *) + (* Microsoft KB/Q229909 article says that KRBTGT account cannot be renamed or enabled, making *) + (* it the perfect target for such a test using a username (Administrator account can be renamed). *) + let username = "KRBTGT" in (* domain name prefix automatically added by our internal AD plugin functions *) + let sid = get_subject_identifier username in (* use our well-known KRBTGT builtin username in AD *) + (* OK, we found this username! *) + debug "Request %i/%i to external authentication server successful: user %s was found" i max username; + + (* (2) CA-25427: test (1) above may succeed (because of pbis caching stale AD information) *) + (* even though the AD domain is offline (error 32888), usually because /etc/resolv.conf is not *) + (* pointing to the AD server. This test should catch if the domain is offline by calling find-by-sid *) + (* using a domain SID. We must use a _domain_ SID. A universal SID like S-1-1-0 doesn't work for this test. *) + let (_: (string*string) list) = query_subject_information sid in (* use KRBTGT's domain SID *) + debug "Request %i/%i to external authentication server successful: sid %s was found" i max sid; + + true + with + | Not_found -> (* that means that pbis is responding to at least cached subject queries. *) + (* in this case, KRBTGT wasn't found in the AD domain. this usually indicates that the *) + (* AD domain is offline/inaccessible to pbis, which will cause problems, specially *) + (* to the ssh python hook-script, so we need to try again until KRBTGT is found, indicating *) + (* that the domain is online and accessible to pbis queries *) + debug "Request %i/%i to external authentication server returned KRBTGT Not_found, waiting 5 secs to try again" i max; + Thread.delay 5.0; (*wait 5 seconds*) + (* try again *) + test (i+1) + | e -> (* ERROR: anything else means that the server is NOT responding adequately *) + debug "Request %i/%i to external authentication server failed, waiting 5 secs to try again: %s" i max (ExnHelper.string_of_exn e); + Thread.delay 5.0; (*wait 5 seconds*) + (* try again *) + test (i+1) + ) + end in begin - debug "Testing if external authentication server is accepting requests..."; - test 0 + debug "Testing if external authentication server is accepting requests..."; + test 0 end -(* converts from domain.com\user to user@domain.com, in case domain.com is present in the subject_name *) -let convert_nt_to_upn_username subject_name = + (* converts from domain.com\user to user@domain.com, in case domain.com is present in the subject_name *) + let convert_nt_to_upn_username subject_name = try - (* test if the NT account name separator \ is present in subject name *) - let i = String.index subject_name '\\' in - (* we only reach this point if the separator \ is present in subject_name *) - (* when \ is present, we need to convert the NT name to UPN format *) - let domain = String.sub subject_name 0 i in - let user = String.sub subject_name (i+1) ((String.length subject_name) - i - 1) in - user ^ "@" ^ domain + (* test if the NT account name separator \ is present in subject name *) + let i = String.index subject_name '\\' in + (* we only reach this point if the separator \ is present in subject_name *) + (* when \ is present, we need to convert the NT name to UPN format *) + let domain = String.sub subject_name 0 i in + let user = String.sub subject_name (i+1) ((String.length subject_name) - i - 1) in + user ^ "@" ^ domain with Not_found -> begin (* if no NT username separator \ was found *) (* nothing to do in this case *) subject_name - end - - -(* unit on_enable(((string*string) list) config_params) - - Called internally by xapi _on each host_ when a client enables an external auth service for the - pool via the XenAPI [see AD integration wiki page]. The config_params here are the ones passed - by the client as part of the corresponding XenAPI call. - On receiving this hook, the auth module should: - (i) do whatever it needs to do (if anything) to register with the external auth/directory - service [using the config params supplied to get access] - (ii) Write the config_params that it needs to store persistently in the XenServer metadata - into the Pool.external_auth_configuration field. [Note - the rationale for making the plugin - write the config params it needs long-term into the XenServer metadata itself is so it can - explicitly filter any one-time credentials [like AD username/password for example] that it - does not need long-term.] -*) -let on_enable config_params = + end + + + (* unit on_enable(((string*string) list) config_params) + + Called internally by xapi _on each host_ when a client enables an external auth service for the + pool via the XenAPI [see AD integration wiki page]. The config_params here are the ones passed + by the client as part of the corresponding XenAPI call. + On receiving this hook, the auth module should: + (i) do whatever it needs to do (if anything) to register with the external auth/directory + service [using the config params supplied to get access] + (ii) Write the config_params that it needs to store persistently in the XenServer metadata + into the Pool.external_auth_configuration field. [Note - the rationale for making the plugin + write the config params it needs long-term into the XenServer metadata itself is so it can + explicitly filter any one-time credentials [like AD username/password for example] that it + does not need long-term.] + *) + let on_enable config_params = (* but in the ldap plugin, we should 'join the AD/kerberos domain', i.e. we should*) (* basically: (1) create a machine account in the kerberos realm,*) (* (2) store the machine account password somewhere locally (in a keytab) *) if not ( (List.mem_assoc "user" config_params) - && (List.mem_assoc "pass" config_params) - ) + && (List.mem_assoc "pass" config_params) + ) then begin - raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,"enable requires two config params: user and pass.")) + raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,"enable requires two config params: user and pass.")) end else (* we have all the required parameters *) - let hostname = + let hostname = Server_helpers.exec_with_new_task "retrieving hostname" - (fun __context -> - let host = Helpers.get_localhost ~__context in - Db.Host.get_hostname ~__context ~self:host - ) - in - if (String.fold_left - (fun b ch -> b && (ch>='0')&&(ch<='9')) - true - hostname + (fun __context -> + let host = Helpers.get_localhost ~__context in + Db.Host.get_hostname ~__context ~self:host + ) + in + if (String.fold_left + (fun b ch -> b && (ch>='0')&&(ch<='9')) + true + hostname ) - then + then raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,(Printf.sprintf "hostname '%s' cannot contain only digits." hostname))) - else + else - let domain = - let service_name = Server_helpers.exec_with_new_task "retrieving external_auth_service_name" - (fun __context -> - let host = Helpers.get_localhost ~__context in - Db.Host.get_external_auth_service_name ~__context ~self:host; - ) - in - if List.mem_assoc "domain" config_params (* legacy test: do we have domain name in config? *) - then begin (* then config:domain must match service-name *) + let domain = + let service_name = Server_helpers.exec_with_new_task "retrieving external_auth_service_name" + (fun __context -> + let host = Helpers.get_localhost ~__context in + Db.Host.get_external_auth_service_name ~__context ~self:host; + ) + in + if List.mem_assoc "domain" config_params (* legacy test: do we have domain name in config? *) + then begin (* then config:domain must match service-name *) let _domain = List.assoc "domain" config_params in if service_name <> _domain then - raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,"if present, config:domain must match service-name.")) + raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,"if present, config:domain must match service-name.")) else - service_name - end - else (* if no config:domain provided, we simply use the string in service_name for the domain name *) - service_name - in - let _user = List.assoc "user" config_params in - let pass = List.assoc "pass" config_params in - let (ou_conf,ou_params) = if (List.mem_assoc "ou" config_params) then let ou=(List.assoc "ou" config_params) in ([("ou",ou)],["--ou";ou]) else ([],[]) in - (* Adding the config parameter "config:disable_modules=X,Y,Z" - * will disable the modules X, Y and Z in domainjoin-cli. *) - let disabled_modules = - try + service_name + end + else (* if no config:domain provided, we simply use the string in service_name for the domain name *) + service_name + in + let _user = List.assoc "user" config_params in + let pass = List.assoc "pass" config_params in + let (ou_conf,ou_params) = if (List.mem_assoc "ou" config_params) then let ou=(List.assoc "ou" config_params) in ([("ou",ou)],["--ou";ou]) else ([],[]) in + (* Adding the config parameter "config:disable_modules=X,Y,Z" + * will disable the modules X, Y and Z in domainjoin-cli. *) + let disabled_modules = + try match List.assoc "disable_modules" config_params with | "" -> [] | disabled_modules_string -> - String.split_f (fun c -> c = ',') disabled_modules_string - with Not_found -> + String.split_f (fun c -> c = ',') disabled_modules_string + with Not_found -> [] - in - let disabled_module_params = - List.concat + in + let disabled_module_params = + List.concat (List.map - (fun disabled_module -> ["--disable"; disabled_module]) - disabled_modules) - in + (fun disabled_module -> ["--disable"; disabled_module]) + disabled_modules) + in - (* we need to make sure that the user passed to domaijoin-cli command is in the UPN syntax (user@domain.com) *) - let user = convert_nt_to_upn_username _user in + (* we need to make sure that the user passed to domaijoin-cli command is in the UPN syntax (user@domain.com) *) + let user = convert_nt_to_upn_username _user in - (* execute the pbis domain join cmd *) - try - let (_: (string*string) list) = + (* execute the pbis domain join cmd *) + try + let (_: (string*string) list) = pbis_common_with_password - pass - "/opt/pbis/bin/domainjoin-cli" - (["join"] - @ ou_params @ disabled_module_params @ - ["--ignore-pam";"--notimesync";domain;user]) - in - - let max_tries = 60 in (* tests 60 x 5.0 seconds = 300 seconds = 5minutes trying *) - if not (is_pbis_server_available max_tries) then - begin - let errmsg = (Printf.sprintf "External authentication server not available after %i query tests" max_tries) in - debug "%s" errmsg; - raise (Auth_signature.Auth_service_error (Auth_signature.E_UNAVAILABLE,errmsg)) - end; - - (* OK SUCCESS, pbis has joined the AD domain successfully *) - (* write persistently the relevant config_params in the host.external_auth_configuration field *) - (* we should not store the user's (admin's) password !! *) - let extauthconf = [ + pass + "/opt/pbis/bin/domainjoin-cli" + (["join"] + @ ou_params @ disabled_module_params @ + ["--ignore-pam";"--notimesync";domain;user]) + in + + let max_tries = 60 in (* tests 60 x 5.0 seconds = 300 seconds = 5minutes trying *) + if not (is_pbis_server_available max_tries) then + begin + let errmsg = (Printf.sprintf "External authentication server not available after %i query tests" max_tries) in + debug "%s" errmsg; + raise (Auth_signature.Auth_service_error (Auth_signature.E_UNAVAILABLE,errmsg)) + end; + + (* OK SUCCESS, pbis has joined the AD domain successfully *) + (* write persistently the relevant config_params in the host.external_auth_configuration field *) + (* we should not store the user's (admin's) password !! *) + let extauthconf = [ ("domain", domain); ("user", user) - ] @ ou_conf in - Server_helpers.exec_with_new_task "storing external_auth_configuration" + ] @ ou_conf in + Server_helpers.exec_with_new_task "storing external_auth_configuration" (fun __context -> - let host = Helpers.get_localhost ~__context in - Db.Host.set_external_auth_configuration ~__context ~self:host ~value:extauthconf; - debug "added external_auth_configuration for host %s" (Db.Host.get_name_label ~__context ~self:host) + let host = Helpers.get_localhost ~__context in + Db.Host.set_external_auth_configuration ~__context ~self:host ~value:extauthconf; + debug "added external_auth_configuration for host %s" (Db.Host.get_name_label ~__context ~self:host) ); - Stdext.Threadext.Mutex.execute cache_of_pbis_common_m (fun _ -> cache_of_pbis_common := []); - ensure_pbis_configured () + Stdext.Threadext.Mutex.execute cache_of_pbis_common_m (fun _ -> cache_of_pbis_common := []); + ensure_pbis_configured () - with e -> (*ERROR, we didn't join the AD domain*) - debug "Error enabling external authentication for domain %s and user %s: %s" domain user (ExnHelper.string_of_exn e); - raise e + with e -> (*ERROR, we didn't join the AD domain*) + debug "Error enabling external authentication for domain %s and user %s: %s" domain user (ExnHelper.string_of_exn e); + raise e -(* unit on_disable() + (* unit on_disable() - Called internally by xapi _on each host_ when a client disables an auth service via the XenAPI. - The hook will be called _before_ the Pool configuration fields relating to the external-auth - service are cleared (i.e. so you can access the config params you need from the pool metadata - within the body of the on_disable method) -*) -let on_disable config_params = + Called internally by xapi _on each host_ when a client disables an auth service via the XenAPI. + The hook will be called _before_ the Pool configuration fields relating to the external-auth + service are cleared (i.e. so you can access the config params you need from the pool metadata + within the body of the on_disable method) + *) + let on_disable config_params = (* but in the ldap plugin, we should 'leave the AD/kerberos domain', i.e. we should *) (* (1) remove the machine account from the kerberos realm, (2) remove the keytab locally *) let pbis_failure = - (try - - if not ( (List.mem_assoc "user" config_params) - && (List.mem_assoc "pass" config_params) - ) - then - begin (* no windows admin+pass have been provided: leave the pbis host in the AD database *) - (* execute the pbis domain-leave cmd *) - (* this function will raise an exception if something goes wrong *) - let (_: (string*string) list) = pbis_common "/opt/pbis/bin/domainjoin-cli" ["leave"] in - () - end - else - begin (* windows admin+pass have been provided: ask pbis to remove host from AD database *) - let _user = List.assoc "user" config_params in - let pass = List.assoc "pass" config_params in - (* we need to make sure that the user passed to domaijoin-cli command is in the UPN syntax (user@domain.com) *) - let user = convert_nt_to_upn_username (get_full_subject_name ~use_nt_format:false _user) in - (* execute the pbis domain-leave cmd *) - (* this function will raise an exception if something goes wrong *) - let (_: (string*string) list) = pbis_common_with_password pass "/opt/pbis/bin/domainjoin-cli" ["leave";user] in - () - end; - None (* no failure observed in pbis *) - - with e -> (* unexpected error disabling pbis *) - ( - debug "Internal Pbis error when disabling external authentication: %s" (ExnHelper.string_of_exn e); - (* CA-27627: we should NOT re-raise the exception e here, because otherwise we might get stuck, *) - (* without being able to disable an external authentication configuration, since the Pbis *) - (* behavior is outside our control. For instance, Pbis raises an exception during domain-leave *) - (* when the domain controller is offline, so it would be impossible to leave a domain that *) - (* has already been deleted. *) - (* Not re-raising an exception here is not too bad, because both ssh and xapi access to the AD/Pbis *) - (* commands will be disabled anyway by host.disable_external_auth. So, even though access to the external *) - (* authentication service might still be possible from Dom0 shell, it will not be possible *) - (* to login as an external user via ssh or to call external-authentication services via xapi/xe. *) - Some e (* CA-28942: stores exception returned by pbis for later *) - ) - ) in + (try + + if not ( (List.mem_assoc "user" config_params) + && (List.mem_assoc "pass" config_params) + ) + then + begin (* no windows admin+pass have been provided: leave the pbis host in the AD database *) + (* execute the pbis domain-leave cmd *) + (* this function will raise an exception if something goes wrong *) + let (_: (string*string) list) = pbis_common "/opt/pbis/bin/domainjoin-cli" ["leave"] in + () + end + else + begin (* windows admin+pass have been provided: ask pbis to remove host from AD database *) + let _user = List.assoc "user" config_params in + let pass = List.assoc "pass" config_params in + (* we need to make sure that the user passed to domaijoin-cli command is in the UPN syntax (user@domain.com) *) + let user = convert_nt_to_upn_username (get_full_subject_name ~use_nt_format:false _user) in + (* execute the pbis domain-leave cmd *) + (* this function will raise an exception if something goes wrong *) + let (_: (string*string) list) = pbis_common_with_password pass "/opt/pbis/bin/domainjoin-cli" ["leave";user] in + () + end; + None (* no failure observed in pbis *) + + with e -> (* unexpected error disabling pbis *) + ( + debug "Internal Pbis error when disabling external authentication: %s" (ExnHelper.string_of_exn e); + (* CA-27627: we should NOT re-raise the exception e here, because otherwise we might get stuck, *) + (* without being able to disable an external authentication configuration, since the Pbis *) + (* behavior is outside our control. For instance, Pbis raises an exception during domain-leave *) + (* when the domain controller is offline, so it would be impossible to leave a domain that *) + (* has already been deleted. *) + (* Not re-raising an exception here is not too bad, because both ssh and xapi access to the AD/Pbis *) + (* commands will be disabled anyway by host.disable_external_auth. So, even though access to the external *) + (* authentication service might still be possible from Dom0 shell, it will not be possible *) + (* to login as an external user via ssh or to call external-authentication services via xapi/xe. *) + Some e (* CA-28942: stores exception returned by pbis for later *) + ) + ) in (* We always do a manual clean-up of pbis, in order to restore Dom0 to its pre-pbis state *) (* It doesn't matter if pbis succeeded or not *) @@ -742,30 +742,30 @@ let on_disable config_params = (* some of the command-line workarounds that Kyle describes in CA-27627: *) let pbis_force_domain_leave_script = "/opt/xensource/libexec/pbis-force-domain-leave" in (try - let output, stderr = Forkhelpers.execute_command_get_output pbis_force_domain_leave_script [] in - debug "execute %s: stdout=[%s],stderr=[%s]" pbis_force_domain_leave_script (String.replace "\n" ";" output) (String.replace "\n" ";" stderr) - with e-> (debug "exception executing %s: %s" pbis_force_domain_leave_script (ExnHelper.string_of_exn e);) + let output, stderr = Forkhelpers.execute_command_get_output pbis_force_domain_leave_script [] in + debug "execute %s: stdout=[%s],stderr=[%s]" pbis_force_domain_leave_script (String.replace "\n" ";" output) (String.replace "\n" ";" stderr) + with e-> (debug "exception executing %s: %s" pbis_force_domain_leave_script (ExnHelper.string_of_exn e);) ); (* OK SUCCESS, pbis has left the AD domain successfully *) (* remove persistently the relevant config_params in the host.external_auth_configuration field *) Server_helpers.exec_with_new_task "removing external_auth_configuration" - (fun __context -> - let host = Helpers.get_localhost ~__context in - Db.Host.set_external_auth_configuration ~__context ~self:host ~value:[]; - debug "removed external_auth_configuration for host %s" (Db.Host.get_name_label ~__context ~self:host) - ); + (fun __context -> + let host = Helpers.get_localhost ~__context in + Db.Host.set_external_auth_configuration ~__context ~self:host ~value:[]; + debug "removed external_auth_configuration for host %s" (Db.Host.get_name_label ~__context ~self:host) + ); match pbis_failure with - | None -> () (* OK, return unit*) - | Some e -> raise e (* bubble up pbis failure *) + | None -> () (* OK, return unit*) + | Some e -> raise e (* bubble up pbis failure *) -(* unit on_xapi_initialize(bool system_boot) + (* unit on_xapi_initialize(bool system_boot) - Called internally by xapi whenever it starts up. The system_boot flag is true iff xapi is - starting for the first time after a host boot -*) -let on_xapi_initialize system_boot = + Called internally by xapi whenever it starts up. The system_boot flag is true iff xapi is + starting for the first time after a host boot + *) + let on_xapi_initialize system_boot = (* the AD server is initialized outside xapi, by init.d scripts *) (* this function is called during xapi initialization in xapi.ml *) @@ -773,33 +773,33 @@ let on_xapi_initialize system_boot = (* make sure that the AD/LSASS server is responding before returning *) let max_tries = 12 in (* tests 12 x 5.0 seconds = 60 seconds = up to 1 minute trying *) if not (is_pbis_server_available max_tries) then - begin + begin let errmsg = (Printf.sprintf "External authentication server not available after %i query tests" max_tries) in debug "%s" errmsg; raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC,errmsg)) - end; + end; () -(* unit on_xapi_exit() + (* unit on_xapi_exit() - Called internally when xapi is doing a clean exit. -*) -let on_xapi_exit () = + Called internally when xapi is doing a clean exit. + *) + let on_xapi_exit () = (* nothing to do here in this unix plugin *) (* in the ldap plugin, we should remove the tgt ticket in /tmp/krb5cc_0 *) () -(* Implement the single value required for the module signature *) -let methods = {Auth_signature.authenticate_username_password = authenticate_username_password; - Auth_signature.authenticate_ticket = authenticate_ticket; - Auth_signature.get_subject_identifier = get_subject_identifier; - Auth_signature.query_subject_information = query_subject_information; - Auth_signature.query_group_membership = query_group_membership; - Auth_signature.on_enable = on_enable; - Auth_signature.on_disable = on_disable; - Auth_signature.on_xapi_initialize = on_xapi_initialize; - Auth_signature.on_xapi_exit = on_xapi_exit} + (* Implement the single value required for the module signature *) + let methods = {Auth_signature.authenticate_username_password = authenticate_username_password; + Auth_signature.authenticate_ticket = authenticate_ticket; + Auth_signature.get_subject_identifier = get_subject_identifier; + Auth_signature.query_subject_information = query_subject_information; + Auth_signature.query_group_membership = query_group_membership; + Auth_signature.on_enable = on_enable; + Auth_signature.on_disable = on_disable; + Auth_signature.on_xapi_initialize = on_xapi_initialize; + Auth_signature.on_xapi_exit = on_xapi_exit} end diff --git a/ocaml/auth/testauth.ml b/ocaml/auth/testauth.ml index 7fe1e3e160a..a1e951ffb04 100644 --- a/ocaml/auth/testauth.ml +++ b/ocaml/auth/testauth.ml @@ -12,7 +12,7 @@ * GNU Lesser General Public License for more details. *) -let usage() = +let usage() = print_endline "Usage:"; Printf.printf "%s auth \n" Sys.argv.(0); Printf.printf "%s chpasswd \n" Sys.argv.(0); @@ -24,7 +24,7 @@ let _ = and password = Sys.argv.(3) in match Sys.argv.(1) with | "auth" -> - Pam.authenticate username password + Pam.authenticate username password | "chpasswd" -> - Pam.change_password username password + Pam.change_password username password | _ -> usage() diff --git a/ocaml/auth/testauthx.ml b/ocaml/auth/testauthx.ml index 7018aef0428..c343611bfce 100644 --- a/ocaml/auth/testauthx.ml +++ b/ocaml/auth/testauthx.ml @@ -14,106 +14,106 @@ open Auth_signature open Authx -let usage() = +let usage() = print_endline "Usage:"; Printf.printf "%s \n" Sys.argv.(0); exit 1 -let _ = - - if Array.length Sys.argv <> 3 then usage (); - let username = Sys.argv.(1) - and password = Sys.argv.(2) in - - let hr x = print_endline ("-----------------------------\n"^x) in - - (* should return 2037 *) - hr ("TEST 1a. Authx.get_subject_identifier "^username); - let userid = AuthX.methods.get_subject_identifier username in - print_endline ("userid="^userid); - - - hr ("TEST 1b. AuthX.methods.get_subject_identifier "^username^"_werq (unknown subject)"); - try - print_endline (AuthX.methods.get_subject_identifier (username^"_werq")); - with Not_found -> print_endline "subject Not_found, as expected"; - - - (* should return a list of groups that subjectid 1000 (a user) belongs to *) - hr ("TEST 2a. AuthX.methods.query_group_membership "^userid^" (a user subject)"); - let conc x y = (x^","^y) in - let groupid_list = AuthX.methods.query_group_membership userid in - print_endline (List.fold_left (conc) "" groupid_list); - - (* should return a list of groups that subjectid 10024 (a group) belongs to *) - let agroup = List.hd groupid_list in - hr ("TEST 2b. AuthX.methods.query_group_membership "^agroup^" (a group subject)"); - print_endline (List.fold_left (conc) "" (AuthX.methods.query_group_membership agroup)); - - - hr "TEST 2c. AuthX.methods.query_group_membership u999 (unknown subject)"; - try - print_endline (List.fold_left (conc) "" (AuthX.methods.query_group_membership "u999")); - with Not_found -> print_endline "subject Not_found, as expected."; - - hr "TEST 2d. AuthX.methods.query_group_membership a999 (unknown subject)"; - try - print_endline (List.fold_left (conc) "" (AuthX.methods.query_group_membership "a999")); - with Not_found -> print_endline "subject Not_found, as expected."; - - hr "TEST 2e. AuthX.methods.query_group_membership 999 (unknown subject)"; - try - print_endline (List.fold_left (conc) "" (AuthX.methods.query_group_membership "999")); - with Not_found -> print_endline "subject Not_found, as expected."; - - (* should return a list with information about subject_id 1000 (a user)*) - hr ("TEST 3a. AuthX.methods.query_subject_information "^userid^" (a user)"); - let infolist1 = AuthX.methods.query_subject_information userid in - for i=0 to (List.length infolist1)-1 do - let print_elems (e1,e2) = print_endline (e1^": "^e2) in - print_elems (List.nth infolist1 i) - done; - - (* should return a list with information about subject_id 10024 (a group)*) - hr ("TEST 3b. AuthX.methods.query_subject_information "^agroup^" (a group)"); - let infolist1 = AuthX.methods.query_subject_information agroup in - for i=0 to (List.length infolist1)-1 do - let print_elems (e1,e2) = print_endline (e1^": "^e2) in - print_elems (List.nth infolist1 i) - done; - - (* should return an error not_found *) - hr "TEST 3c. AuthX.methods.query_subject_information u999 (unknown subject)"; - try - let infolist1 = AuthX.methods.query_subject_information "u999" in - for i=0 to (List.length infolist1)-1 do - let print_elems (e1,e2) = print_endline (e1^": "^e2) in - print_elems (List.nth infolist1 i) - done; - with Not_found -> print_endline "subject Not_found, as expected."; - - (* should return an error not_found *) - hr "TEST 3d. AuthX.methods.query_subject_information a999 (unknown subject)"; - try - let infolist1 = AuthX.methods.query_subject_information "a999" in - for i=0 to (List.length infolist1)-1 do - let print_elems (e1,e2) = print_endline (e1^": "^e2) in - print_elems (List.nth infolist1 i) - done; - with Not_found -> print_endline "subject Not_found, as expected."; - - (* should return an error not_found *) - hr "TEST 3e. AuthX.methods.query_subject_information 999 (unknown subject)"; - try - let infolist1 = AuthX.methods.query_subject_information "999" in - for i=0 to (List.length infolist1)-1 do - let print_elems (e1,e2) = print_endline (e1^": "^e2) in - print_elems (List.nth infolist1 i) - done; - with Not_found -> print_endline "subject Not_found, as expected."; - - hr ("TEST 4. AuthX.methods.authenticate_username_password "^username); - print_endline (AuthX.methods.authenticate_username_password username password); - - - +let _ = + + if Array.length Sys.argv <> 3 then usage (); + let username = Sys.argv.(1) + and password = Sys.argv.(2) in + + let hr x = print_endline ("-----------------------------\n"^x) in + + (* should return 2037 *) + hr ("TEST 1a. Authx.get_subject_identifier "^username); + let userid = AuthX.methods.get_subject_identifier username in + print_endline ("userid="^userid); + + + hr ("TEST 1b. AuthX.methods.get_subject_identifier "^username^"_werq (unknown subject)"); + try + print_endline (AuthX.methods.get_subject_identifier (username^"_werq")); + with Not_found -> print_endline "subject Not_found, as expected"; + + + (* should return a list of groups that subjectid 1000 (a user) belongs to *) + hr ("TEST 2a. AuthX.methods.query_group_membership "^userid^" (a user subject)"); + let conc x y = (x^","^y) in + let groupid_list = AuthX.methods.query_group_membership userid in + print_endline (List.fold_left (conc) "" groupid_list); + + (* should return a list of groups that subjectid 10024 (a group) belongs to *) + let agroup = List.hd groupid_list in + hr ("TEST 2b. AuthX.methods.query_group_membership "^agroup^" (a group subject)"); + print_endline (List.fold_left (conc) "" (AuthX.methods.query_group_membership agroup)); + + + hr "TEST 2c. AuthX.methods.query_group_membership u999 (unknown subject)"; + try + print_endline (List.fold_left (conc) "" (AuthX.methods.query_group_membership "u999")); + with Not_found -> print_endline "subject Not_found, as expected."; + + hr "TEST 2d. AuthX.methods.query_group_membership a999 (unknown subject)"; + try + print_endline (List.fold_left (conc) "" (AuthX.methods.query_group_membership "a999")); + with Not_found -> print_endline "subject Not_found, as expected."; + + hr "TEST 2e. AuthX.methods.query_group_membership 999 (unknown subject)"; + try + print_endline (List.fold_left (conc) "" (AuthX.methods.query_group_membership "999")); + with Not_found -> print_endline "subject Not_found, as expected."; + + (* should return a list with information about subject_id 1000 (a user)*) + hr ("TEST 3a. AuthX.methods.query_subject_information "^userid^" (a user)"); + let infolist1 = AuthX.methods.query_subject_information userid in + for i=0 to (List.length infolist1)-1 do + let print_elems (e1,e2) = print_endline (e1^": "^e2) in + print_elems (List.nth infolist1 i) + done; + + (* should return a list with information about subject_id 10024 (a group)*) + hr ("TEST 3b. AuthX.methods.query_subject_information "^agroup^" (a group)"); + let infolist1 = AuthX.methods.query_subject_information agroup in + for i=0 to (List.length infolist1)-1 do + let print_elems (e1,e2) = print_endline (e1^": "^e2) in + print_elems (List.nth infolist1 i) + done; + + (* should return an error not_found *) + hr "TEST 3c. AuthX.methods.query_subject_information u999 (unknown subject)"; + try + let infolist1 = AuthX.methods.query_subject_information "u999" in + for i=0 to (List.length infolist1)-1 do + let print_elems (e1,e2) = print_endline (e1^": "^e2) in + print_elems (List.nth infolist1 i) + done; + with Not_found -> print_endline "subject Not_found, as expected."; + + (* should return an error not_found *) + hr "TEST 3d. AuthX.methods.query_subject_information a999 (unknown subject)"; + try + let infolist1 = AuthX.methods.query_subject_information "a999" in + for i=0 to (List.length infolist1)-1 do + let print_elems (e1,e2) = print_endline (e1^": "^e2) in + print_elems (List.nth infolist1 i) + done; + with Not_found -> print_endline "subject Not_found, as expected."; + + (* should return an error not_found *) + hr "TEST 3e. AuthX.methods.query_subject_information 999 (unknown subject)"; + try + let infolist1 = AuthX.methods.query_subject_information "999" in + for i=0 to (List.length infolist1)-1 do + let print_elems (e1,e2) = print_endline (e1^": "^e2) in + print_elems (List.nth infolist1 i) + done; + with Not_found -> print_endline "subject Not_found, as expected."; + + hr ("TEST 4. AuthX.methods.authenticate_username_password "^username); + print_endline (AuthX.methods.authenticate_username_password username password); + + + diff --git a/ocaml/cdrommon/cdrommon.ml b/ocaml/cdrommon/cdrommon.ml index caf933242ba..aa1e9f0ca99 100644 --- a/ocaml/cdrommon/cdrommon.ml +++ b/ocaml/cdrommon/cdrommon.ml @@ -14,53 +14,53 @@ let oldnotify = ref false let disc_inserted name = - let args = [| !Xapi_globs.xe_path; "host-notify"; "type=cdrom"; "params=inserted:" ^ name |] in - let ret = Stdext.Unixext.spawnvp args.(0) args in - (* check if we got an error, and record the fact *) - begin match ret with - | Unix.WEXITED 0 -> oldnotify := false - | Unix.WEXITED n -> oldnotify := true - | _ -> oldnotify := true - end + let args = [| !Xapi_globs.xe_path; "host-notify"; "type=cdrom"; "params=inserted:" ^ name |] in + let ret = Stdext.Unixext.spawnvp args.(0) args in + (* check if we got an error, and record the fact *) + begin match ret with + | Unix.WEXITED 0 -> oldnotify := false + | Unix.WEXITED n -> oldnotify := true + | _ -> oldnotify := true + end let disc_removed name = - (* we don't need to do anything *) - oldnotify := false + (* we don't need to do anything *) + oldnotify := false let check interval name = - let has_disc = ref false in + let has_disc = ref false in - let check_has_disc status = - if !has_disc then ( - begin match status with - | Cdrom.NO_INFO | Cdrom.NO_DISC | Cdrom.TRAY_OPEN -> - has_disc := false; disc_removed name - | _ -> () - end; - if !oldnotify then - disc_inserted name - ) else ( - match status with - | Cdrom.DISC_OK -> has_disc := true; disc_inserted name - | _ -> () - ) - in + let check_has_disc status = + if !has_disc then ( + begin match status with + | Cdrom.NO_INFO | Cdrom.NO_DISC | Cdrom.TRAY_OPEN -> + has_disc := false; disc_removed name + | _ -> () + end; + if !oldnotify then + disc_inserted name + ) else ( + match status with + | Cdrom.DISC_OK -> has_disc := true; disc_inserted name + | _ -> () + ) + in - let status = Cdrom.query_cdrom_drive_status name in - has_disc := status = Cdrom.DISC_OK; + let status = Cdrom.query_cdrom_drive_status name in + has_disc := status = Cdrom.DISC_OK; - while Sys.file_exists name - do - let drive_status = Cdrom.query_cdrom_drive_status name in - check_has_disc drive_status; - Unix.sleep interval - done + while Sys.file_exists name + do + let drive_status = Cdrom.query_cdrom_drive_status name in + check_has_disc drive_status; + Unix.sleep interval + done let () = - if Array.length Sys.argv <> 2 then ( - Printf.eprintf "usage: %s \n" Sys.argv.(0); - exit 1 - ); - Stdext.Unixext.daemonize (); - (* check every 2 seconds *) - check 2 Sys.argv.(1) + if Array.length Sys.argv <> 2 then ( + Printf.eprintf "usage: %s \n" Sys.argv.(0); + exit 1 + ); + Stdext.Unixext.daemonize (); + (* check every 2 seconds *) + check 2 Sys.argv.(1) diff --git a/ocaml/client_records/record_util.ml b/ocaml/client_records/record_util.ml index b649d11a862..28940ab2097 100644 --- a/ocaml/client_records/record_util.ml +++ b/ocaml/client_records/record_util.ml @@ -26,7 +26,7 @@ let power_state_to_string state = | `ShuttingDown -> "Shutting down" | `Migrating -> "Migrating" -let vm_operation_table = +let vm_operation_table = [ `assert_operation_valid, "assertoperationvalid"; `changing_dynamic_range, "changing_dynamic_range"; @@ -44,7 +44,7 @@ let vm_operation_table = `provision, "provision"; `destroy, "destroy"; `export, "export"; - `metadata_export, "metadata_export"; + `metadata_export, "metadata_export"; `import, "import"; `get_boot_record, "get_boot_record"; `data_source_op, "data_sources_op"; @@ -72,12 +72,12 @@ let vm_operation_table = `call_plugin, "call_plugin"; ] -let vm_operation_to_string x = - if not(List.mem_assoc x vm_operation_table) +let vm_operation_to_string x = + if not(List.mem_assoc x vm_operation_table) then "(unknown operation)" else List.assoc x vm_operation_table -let string_to_vm_operation x = +let string_to_vm_operation x = let table = List.map (fun (a, b) -> b, a) vm_operation_table in if not(List.mem_assoc x table) then (raise (Api_errors.Server_error(Api_errors.invalid_value, [ "blocked_operation"; x ]))) @@ -146,26 +146,26 @@ let vif_operation_to_string = function | `unplug_force -> "unplug_force" let vif_locking_mode_to_string = function - | `network_default -> "network_default" - | `locked -> "locked" - | `unlocked -> "unlocked" - | `disabled -> "disabled" + | `network_default -> "network_default" + | `locked -> "locked" + | `unlocked -> "unlocked" + | `disabled -> "disabled" let string_to_vif_locking_mode = function - | "network_default" -> `network_default - | "locked" -> `locked - | "unlocked" -> `unlocked - | "disabled" -> `disabled - | s -> raise (Record_failure ("Expected 'network_default', 'locked', 'unlocked', 'disabled', got "^s)) + | "network_default" -> `network_default + | "locked" -> `locked + | "unlocked" -> `unlocked + | "disabled" -> `disabled + | s -> raise (Record_failure ("Expected 'network_default', 'locked', 'unlocked', 'disabled', got "^s)) let network_default_locking_mode_to_string = function - | `unlocked -> "unlocked" - | `disabled -> "disabled" + | `unlocked -> "unlocked" + | `disabled -> "disabled" let string_to_network_default_locking_mode = function - | "unlocked" -> `unlocked - | "disabled" -> `disabled - | s -> raise (Record_failure ("Expected 'unlocked' or 'disabled', got "^s)) + | "unlocked" -> `unlocked + | "disabled" -> `disabled + | s -> raise (Record_failure ("Expected 'unlocked' or 'disabled', got "^s)) let vm_appliance_operation_to_string = function | `start -> "start" @@ -242,11 +242,11 @@ let cpu_feature_to_string f = let task_status_type_to_string s = match s with - | `pending -> "pending" - | `success -> "success" - | `failure -> "failure" - | `cancelling -> "cancelling" - | `cancelled -> "cancelled" + | `pending -> "pending" + | `success -> "success" + | `failure -> "failure" + | `cancelling -> "cancelling" + | `cancelled -> "cancelled" let protocol_to_string = function | `vt100 -> "VT100" @@ -279,34 +279,34 @@ let string_to_on_normal_exit s = | _ -> raise (Record_failure ("Expected 'destroy' or 'restart', got "^s)) let on_crash_behaviour_to_string x= - match x with - `destroy -> "Destroy" - | `coredump_and_destroy -> "Core dump and destroy" - | `restart -> "Restart" - | `coredump_and_restart -> "Core dump and restart" - | `preserve -> "Preserve" - | `rename_restart -> "Rename restart" + match x with + `destroy -> "Destroy" + | `coredump_and_destroy -> "Core dump and destroy" + | `restart -> "Restart" + | `coredump_and_restart -> "Core dump and restart" + | `preserve -> "Preserve" + | `rename_restart -> "Rename restart" let string_to_on_crash_behaviour s= - match String.lowercase s with - | "destroy" -> `destroy - | "coredump_and_destroy" -> `coredump_and_destroy - | "restart" -> `restart - | "coredump_and_restart" -> `coredump_and_restart - | "preserve" -> `preserve - | "rename_restart" -> `rename_restart - | _ -> raise (Record_failure ("Expected 'destroy', 'coredump_and_destroy'," ^ - "'restart', 'coredump_and_restart', 'preserve' or 'rename_restart', got "^s)) + match String.lowercase s with + | "destroy" -> `destroy + | "coredump_and_destroy" -> `coredump_and_destroy + | "restart" -> `restart + | "coredump_and_restart" -> `coredump_and_restart + | "preserve" -> `preserve + | "rename_restart" -> `rename_restart + | _ -> raise (Record_failure ("Expected 'destroy', 'coredump_and_destroy'," ^ + "'restart', 'coredump_and_restart', 'preserve' or 'rename_restart', got "^s)) let host_display_to_string h = - match h with - | `enabled -> "enabled" - | `enable_on_reboot -> "enable_on_reboot" - | `disabled -> "disabled" - | `disable_on_reboot -> "disable_on_reboot" + match h with + | `enabled -> "enabled" + | `enable_on_reboot -> "enable_on_reboot" + | `disabled -> "disabled" + | `disable_on_reboot -> "disable_on_reboot" let pgpu_dom0_access_to_string x = - host_display_to_string x + host_display_to_string x let boot_type_to_string x = match x with @@ -322,36 +322,36 @@ let string_to_boot_type s = | _ -> raise (Record_failure ("Expected 'bios', 'grub' or 'kernelexternal', got "^s)) let string_to_vdi_onboot s = - match String.lowercase s with - | "persist" -> `persist - | "reset" -> `reset - | _ -> raise (Record_failure ("Expected 'persist' or 'reset', got "^s)) + match String.lowercase s with + | "persist" -> `persist + | "reset" -> `reset + | _ -> raise (Record_failure ("Expected 'persist' or 'reset', got "^s)) let string_to_vbd_mode s = - match String.lowercase s with - | "ro" -> `RO - | "rw" -> `RW - | _ -> raise (Record_failure ("Expected 'RO' or 'RW', got "^s)) + match String.lowercase s with + | "ro" -> `RO + | "rw" -> `RW + | _ -> raise (Record_failure ("Expected 'RO' or 'RW', got "^s)) let vbd_mode_to_string = function - | `RO -> "ro" - | `RW -> "rw" + | `RO -> "ro" + | `RW -> "rw" let string_to_vbd_type s = - match String.lowercase s with - | "cd" -> `CD - | "disk" -> `Disk - | "floppy" -> `Floppy - | _ -> raise (Record_failure ("Expected 'CD' or 'Disk', got "^s)) + match String.lowercase s with + | "cd" -> `CD + | "disk" -> `Disk + | "floppy" -> `Floppy + | _ -> raise (Record_failure ("Expected 'CD' or 'Disk', got "^s)) let power_to_string h = match h with - `Halted -> "halted" - | `Paused -> "paused" - | `Running -> "running" - | `Suspended -> "suspended" - | `ShuttingDown -> "shutting down" - | `Migrating -> "migrating" + `Halted -> "halted" + | `Paused -> "paused" + | `Running -> "running" + | `Suspended -> "suspended" + | `ShuttingDown -> "shutting down" + | `Migrating -> "migrating" let string_to_vdi_type x = match (String.lowercase x) with | "system" -> Some `system @@ -434,32 +434,32 @@ let primary_address_type_of_string m = | s -> raise (Record_failure ("Expected 'ipv4' or 'ipv6', got "^s)) let bond_mode_to_string = function - | `balanceslb -> "balance-slb" - | `activebackup -> "active-backup" - | `lacp -> "lacp" + | `balanceslb -> "balance-slb" + | `activebackup -> "active-backup" + | `lacp -> "lacp" let bond_mode_of_string m = - match String.lowercase m with - | "balance-slb" | "" -> `balanceslb - | "active-backup" -> `activebackup - | "lacp" -> `lacp - | s -> raise (Record_failure ("Invalid bond mode. Got " ^ s)) + match String.lowercase m with + | "balance-slb" | "" -> `balanceslb + | "active-backup" -> `activebackup + | "lacp" -> `lacp + | s -> raise (Record_failure ("Invalid bond mode. Got " ^ s)) let allocation_algorithm_to_string = function - | `depth_first -> "depth-first" - | `breadth_first -> "breadth-first" + | `depth_first -> "depth-first" + | `breadth_first -> "breadth-first" let allocation_algorithm_of_string a = - match String.lowercase a with - | "depth-first" -> `depth_first - | "breadth-first" -> `breadth_first - | s -> raise (Record_failure ("Invalid allocation algorithm. Got " ^ s)) + match String.lowercase a with + | "depth-first" -> `depth_first + | "breadth-first" -> `breadth_first + | s -> raise (Record_failure ("Invalid allocation algorithm. Got " ^ s)) let bool_of_string s = - match String.lowercase s with - |"true"|"yes"->true - |"false"|"no"->false - |_-> raise (Record_failure ("Expected 'true','yes','false','no', got "^s)) + match String.lowercase s with + |"true"|"yes"->true + |"false"|"no"->false + |_-> raise (Record_failure ("Expected 'true','yes','false','no', got "^s)) (* string_to_string_map_to_string *) let s2sm_to_string sep x = @@ -478,15 +478,15 @@ let i642sm_to_string sep x = String.concat sep (List.map (fun (a,b) -> Printf.sprintf "%Ld %s" a b) x) let on_boot_to_string onboot = - match onboot with - | `reset -> "reset" - | `persist -> "persist" + match onboot with + | `reset -> "reset" + | `persist -> "persist" let tristate_to_string tristate = - match tristate with - | `yes -> "true" - | `no -> "false" - | `unspecified -> "unspecified" + match tristate with + | `yes -> "true" + | `no -> "false" + | `unspecified -> "unspecified" let wrap f err x = try f x with _ -> err x let generic_error x = raise (Record_failure ("Unknown value: "^x)) @@ -510,24 +510,24 @@ let bytes_of_string field x = raise (Record_failure (Printf.sprintf "Failed to parse field '%s': number too big (maximum = %Ld TiB)" field max_size_TiB)) else raise (Record_failure (Printf.sprintf "Failed to parse field '%s': expecting an integer (possibly with suffix)" field)); - in + in match (String.split_f (fun c -> String.isspace c || (isdigit c)) x) with | [] -> - (* no suffix on the end *) - int64_of_string x + (* no suffix on the end *) + int64_of_string x | [ suffix ] -> begin - let number = match (String.split_f (fun x -> not (isdigit x)) x) with - | [ number ] -> int64_of_string number - | _ -> raise (Record_failure (Printf.sprintf "Failed to parse field '%s': expecting an integer (possibly with suffix)" field)) in - let multiplier = match suffix with - | "bytes" -> 1L - | "KiB" -> 1024L - | "MiB" -> 1024L ** 1024L - | "GiB" -> 1024L ** 1024L ** 1024L - | "TiB" -> 1024L ** 1024L ** 1024L ** 1024L - | x -> raise (Record_failure (Printf.sprintf "Failed to parse field '%s': Unknown suffix: '%s' (try KiB, MiB, GiB or TiB)" field x)) in - (* FIXME: detect overflow *) - number ** multiplier + let number = match (String.split_f (fun x -> not (isdigit x)) x) with + | [ number ] -> int64_of_string number + | _ -> raise (Record_failure (Printf.sprintf "Failed to parse field '%s': expecting an integer (possibly with suffix)" field)) in + let multiplier = match suffix with + | "bytes" -> 1L + | "KiB" -> 1024L + | "MiB" -> 1024L ** 1024L + | "GiB" -> 1024L ** 1024L ** 1024L + | "TiB" -> 1024L ** 1024L ** 1024L ** 1024L + | x -> raise (Record_failure (Printf.sprintf "Failed to parse field '%s': Unknown suffix: '%s' (try KiB, MiB, GiB or TiB)" field x)) in + (* FIXME: detect overflow *) + number ** multiplier end | _ -> raise (Record_failure (Printf.sprintf "Failed to parse field '%s': expecting an integer (possibly with suffix)" field)) @@ -535,8 +535,8 @@ let bytes_of_string field x = (* generate a random mac with XenSource OUI "00:16:3e" *) let random_mac () = - let macs = [0x00; 0x16; 0x3e] @ (List.map Random.int [0x80; 0x100; 0x100]) in - String.concat ":" (List.map (Printf.sprintf "%02x") macs) + let macs = [0x00; 0x16; 0x3e] @ (List.map Random.int [0x80; 0x100; 0x100]) in + String.concat ":" (List.map (Printf.sprintf "%02x") macs) let mac_from_int_array macs = (* make sure bit 1 (local) is set and bit 0 (unicast) is clear *) diff --git a/ocaml/client_records/records.ml b/ocaml/client_records/records.ml index f6f582161b4..c9e2b90f7f9 100644 --- a/ocaml/client_records/records.ml +++ b/ocaml/client_records/records.ml @@ -24,7 +24,7 @@ let nullref = Ref.string_of (Ref.null) let nid = "" let unknown_time = "" -let checknull f r = +let checknull f r = if (Ref.string_of r)=nullref then nid else try f r with _ -> nid @@ -37,42 +37,42 @@ let string_of_float f = Printf.sprintf "%.3f" f (* Splitting an empty string gives a list containing the empty string, which * is usually not what we want. *) let get_words separator = function - | "" -> [] - | str -> String.split separator str - -type field = { name: string; - get: (unit -> string); - set: (string -> unit) option; - get_set : (unit -> string list) option; (* gets the string list that is a representation of a set *) - add_to_set: (string -> unit) option; - remove_from_set: (string -> unit) option; - get_map : (unit -> (string * string) list) option; - add_to_map: (string -> string -> unit) option; - remove_from_map: (string -> unit) option; - set_in_map: (string -> string -> unit) option; (* Change the value of an existing map field, without using add/remove *) - expensive: bool; (* Simply means an extra API call is required to get it *) - hidden: bool; (* Meaning we don't show it unless it's *explicitly* asked for (i.e. hidden from *-list and *-param-list *) - deprecated: bool; - case_insensitive: bool; (* Use case-insensitive matching when selecting *) - } + | "" -> [] + | str -> String.split separator str + +type field = { name: string; + get: (unit -> string); + set: (string -> unit) option; + get_set : (unit -> string list) option; (* gets the string list that is a representation of a set *) + add_to_set: (string -> unit) option; + remove_from_set: (string -> unit) option; + get_map : (unit -> (string * string) list) option; + add_to_map: (string -> string -> unit) option; + remove_from_map: (string -> unit) option; + set_in_map: (string -> string -> unit) option; (* Change the value of an existing map field, without using add/remove *) + expensive: bool; (* Simply means an extra API call is required to get it *) + hidden: bool; (* Meaning we don't show it unless it's *explicitly* asked for (i.e. hidden from *-list and *-param-list *) + deprecated: bool; + case_insensitive: bool; (* Use case-insensitive matching when selecting *) + } type ('a,'b) record = { getref : unit -> 'a Ref.t; - record : unit -> 'b; - setref : 'a Ref.t -> unit; - setrefrec : 'a Ref.t * 'b -> unit; - fields : field list; } - -let make_field ?add_to_set ?remove_from_set ?add_to_map ?remove_from_map ?set_in_map ?set ?get_set ?get_map ?(expensive=false) ?(hidden=false) ?(deprecated=false) ?(case_insensitive=false) ~name ~get () = - { name = name; get = get; set = set; + record : unit -> 'b; + setref : 'a Ref.t -> unit; + setrefrec : 'a Ref.t * 'b -> unit; + fields : field list; } + +let make_field ?add_to_set ?remove_from_set ?add_to_map ?remove_from_map ?set_in_map ?set ?get_set ?get_map ?(expensive=false) ?(hidden=false) ?(deprecated=false) ?(case_insensitive=false) ~name ~get () = + { name = name; get = get; set = set; add_to_set = add_to_set; remove_from_set = remove_from_set; add_to_map = add_to_map; remove_from_map = remove_from_map; set_in_map = set_in_map; get_set = get_set; get_map = get_map; expensive = expensive; - hidden = hidden; case_insensitive = case_insensitive; + hidden = hidden; case_insensitive = case_insensitive; deprecated = deprecated } -let makeexpensivefield field = +let makeexpensivefield field = { field with get=(fun () -> "") } let safe_i64_of_string field str = @@ -86,8 +86,8 @@ type 'a lzy = Got of 'a | ToGet of (unit -> 'a) let lzy_get a = match !a with - | Got x -> x - | ToGet f -> let x = f () in a := Got x; x + | Got x -> x + | ToGet f -> let x = f () in a := Got x; x (* End of cache code *) @@ -99,37 +99,37 @@ let field_lookup recs name = match List.filter (fun x -> x.name = name) recs wit let safe_get_field x = try x.get () with - | Api_errors.Server_error(s,_) as e-> if s=Api_errors.handle_invalid then "" else raise e - | e -> raise e + | Api_errors.Server_error(s,_) as e-> if s=Api_errors.handle_invalid then "" else raise e + | e -> raise e let get_uuid_from_ref r = - try - match Ref_index.lookup (Ref.string_of r) with - | None -> raise (CLI_failed_to_find_param "uuid") - | Some x -> x.Ref_index.uuid - with _ -> nid + try + match Ref_index.lookup (Ref.string_of r) with + | None -> raise (CLI_failed_to_find_param "uuid") + | Some x -> x.Ref_index.uuid + with _ -> nid let get_name_from_ref r = - try - match Ref_index.lookup (Ref.string_of r) with - | None -> raise (CLI_failed_to_find_param "name") - | Some x -> - begin - match x.Ref_index.name_label with - | None -> raise (CLI_failed_to_find_param "name") - | Some y -> y - end - with _ -> nid + try + match Ref_index.lookup (Ref.string_of r) with + | None -> raise (CLI_failed_to_find_param "name") + | Some x -> + begin + match x.Ref_index.name_label with + | None -> raise (CLI_failed_to_find_param "name") + | Some y -> y + end + with _ -> nid (** If the given list is of length 1, get a ref for the PBD's host, otherwise return Ref.null *) let get_pbds_host rpc session_id pbds = match pbds with - [pbd] -> - Client.PBD.get_host rpc session_id pbd - | _ -> - Ref.null + [pbd] -> + Client.PBD.get_host rpc session_id pbd + | _ -> + Ref.null (** Get a ref for the single host to which the given SR is attached, or Ref.null if it's attached to 0 or >1 hosts. *) @@ -137,7 +137,7 @@ let get_sr_host rpc session_id record = get_pbds_host rpc session_id (record.API.sR_PBDs) -let bond_record rpc session_id bond = +let bond_record rpc session_id bond = let _ref = ref bond in let empty_record = ToGet (fun () -> Client.Bond.get_record rpc session_id !_ref) in let record = ref empty_record in @@ -157,11 +157,11 @@ let bond_record rpc session_id bond = ~get_map:(fun () -> (x ()).API.bond_properties) ~set_in_map:(fun k v -> Client.Bond.set_property rpc session_id bond k v) (); make_field ~name:"primary-slave" ~get:(fun () -> get_uuid_from_ref (x ()).API.bond_primary_slave) (); - make_field ~name:"links-up" ~get:(fun () -> Int64.to_string (x ()).API.bond_links_up) (); + make_field ~name:"links-up" ~get:(fun () -> Int64.to_string (x ()).API.bond_links_up) (); ] } -let vlan_record rpc session_id vlan = +let vlan_record rpc session_id vlan = let _ref = ref vlan in let empty_record = ToGet (fun () -> Client.VLAN.get_record rpc session_id !_ref) in let record = ref empty_record in @@ -172,14 +172,14 @@ let vlan_record rpc session_id vlan = getref=(fun () -> !_ref); fields= [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vLAN_uuid) (); - make_field ~name:"tagged-PIF" ~get:(fun () -> get_uuid_from_ref (x ()).API.vLAN_tagged_PIF) (); - make_field ~name:"untagged-PIF" ~get:(fun () -> get_uuid_from_ref (x ()).API.vLAN_untagged_PIF) (); - make_field ~name:"tag" ~get:(fun () -> Int64.to_string (x ()).API.vLAN_tag) (); + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vLAN_uuid) (); + make_field ~name:"tagged-PIF" ~get:(fun () -> get_uuid_from_ref (x ()).API.vLAN_tagged_PIF) (); + make_field ~name:"untagged-PIF" ~get:(fun () -> get_uuid_from_ref (x ()).API.vLAN_untagged_PIF) (); + make_field ~name:"tag" ~get:(fun () -> Int64.to_string (x ()).API.vLAN_tag) (); ] } - -let tunnel_record rpc session_id tunnel = + +let tunnel_record rpc session_id tunnel = let _ref = ref tunnel in let empty_record = ToGet (fun () -> Client.Tunnel.get_record rpc session_id !_ref) in let record = ref empty_record in @@ -190,19 +190,19 @@ let tunnel_record rpc session_id tunnel = getref=(fun () -> !_ref); fields= [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.tunnel_uuid) (); - make_field ~name:"access-PIF" ~get:(fun () -> get_uuid_from_ref (x ()).API.tunnel_access_PIF) (); - make_field ~name:"transport-PIF" ~get:(fun () -> get_uuid_from_ref (x ()).API.tunnel_transport_PIF) (); - make_field ~name:"status" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.tunnel_status) (); - make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.tunnel_other_config) - ~add_to_map:(fun k v -> Client.Tunnel.add_to_other_config rpc session_id tunnel k v) - ~remove_from_map:(fun k -> Client.Tunnel.remove_from_other_config rpc session_id tunnel k) - ~get_map:(fun () -> (x ()).API.tunnel_other_config) (); + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.tunnel_uuid) (); + make_field ~name:"access-PIF" ~get:(fun () -> get_uuid_from_ref (x ()).API.tunnel_access_PIF) (); + make_field ~name:"transport-PIF" ~get:(fun () -> get_uuid_from_ref (x ()).API.tunnel_transport_PIF) (); + make_field ~name:"status" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.tunnel_status) (); + make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.tunnel_other_config) + ~add_to_map:(fun k v -> Client.Tunnel.add_to_other_config rpc session_id tunnel k v) + ~remove_from_map:(fun k -> Client.Tunnel.remove_from_other_config rpc session_id tunnel k) + ~get_map:(fun () -> (x ()).API.tunnel_other_config) (); ] } -let message_record rpc session_id message = +let message_record rpc session_id message = let _ref = ref message in let empty_record = ToGet (fun () -> Client.Message.get_record rpc session_id !_ref) in let record = ref empty_record in @@ -213,13 +213,13 @@ let message_record rpc session_id message = getref=(fun () -> !_ref); fields= [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.message_uuid) (); - make_field ~name:"name" ~get:(fun () -> (x ()).API.message_name) (); - make_field ~name:"priority" ~get:(fun () -> Int64.to_string (x ()).API.message_priority) (); - make_field ~name:"class" ~get:(fun () -> match (x ()).API.message_cls with `VM -> "VM" | `Host -> "Host" | `SR -> "SR" | `Pool -> "Pool" | `VMPP -> "VMPP") (); - make_field ~name:"obj-uuid" ~get:(fun () -> (x ()).API.message_obj_uuid) (); - make_field ~name:"timestamp" ~get:(fun () -> Date.to_string (x ()).API.message_timestamp) (); - make_field ~name:"body" ~get:(fun () -> (x ()).API.message_body) (); + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.message_uuid) (); + make_field ~name:"name" ~get:(fun () -> (x ()).API.message_name) (); + make_field ~name:"priority" ~get:(fun () -> Int64.to_string (x ()).API.message_priority) (); + make_field ~name:"class" ~get:(fun () -> match (x ()).API.message_cls with `VM -> "VM" | `Host -> "Host" | `SR -> "SR" | `Pool -> "Pool" | `VMPP -> "VMPP") (); + make_field ~name:"obj-uuid" ~get:(fun () -> (x ()).API.message_obj_uuid) (); + make_field ~name:"timestamp" ~get:(fun () -> Date.to_string (x ()).API.message_timestamp) (); + make_field ~name:"body" ~get:(fun () -> (x ()).API.message_body) (); ] } @@ -237,74 +237,74 @@ let pif_record rpc session_id pif = getref=(fun () -> !_ref); fields= [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.pIF_uuid) (); - make_field ~name:"device" ~get:(fun () -> (x ()).API.pIF_device) (); - make_field ~name:"MAC" ~get:(fun () -> (x ()).API.pIF_MAC) - ~case_insensitive:true (); - make_field ~name:"physical" ~get:(fun () -> string_of_bool ((x ()).API.pIF_physical)) (); - make_field ~name:"managed" ~get:(fun () -> string_of_bool ((x ()).API.pIF_managed)) (); - make_field ~name:"currently-attached" ~get:(fun () -> string_of_bool ((x ()).API.pIF_currently_attached)) (); - make_field ~name:"MTU" ~get:(fun () -> (Int64.to_string (x ()).API.pIF_MTU)) (); - make_field ~name:"VLAN" ~get:(fun () -> (Int64.to_string (x ()).API.pIF_VLAN)) (); - make_field ~name:"bond-master-of" ~get:(fun () -> String.concat "; " (List.map (fun pif -> get_uuid_from_ref pif) (x ()).API.pIF_bond_master_of)) (); - make_field ~name:"bond-slave-of" ~get:(fun () -> get_uuid_from_ref (x ()).API.pIF_bond_slave_of) (); - make_field ~name:"tunnel-access-PIF-of" ~get:(fun () -> String.concat "; " (List.map (fun pif -> get_uuid_from_ref pif) (x ()).API.pIF_tunnel_access_PIF_of)) (); - make_field ~name:"tunnel-transport-PIF-of" ~get:(fun () -> String.concat "; " (List.map (fun pif -> get_uuid_from_ref pif) (x ()).API.pIF_tunnel_transport_PIF_of)) (); - make_field ~name:"management" ~get:(fun () -> string_of_bool ((x ()).API.pIF_management)) (); - make_field ~name:"network-uuid" ~get:(fun () -> try get_uuid_from_ref (x ()).API.pIF_network with _ -> nid) (); - make_field ~name:"network-name-label" ~get:(fun () -> try get_name_from_ref (x ()).API.pIF_network with _ -> nid) (); - make_field ~name:"host-uuid" ~get:(fun () -> try get_uuid_from_ref (x ()).API.pIF_host with _ -> nid) (); - make_field ~name:"host-name-label" ~get:(fun () -> try get_name_from_ref (x ()).API.pIF_host with _ -> nid) (); - make_field ~name:"IP-configuration-mode" ~get:(fun () -> Record_util.ip_configuration_mode_to_string (x ()).API.pIF_ip_configuration_mode) (); - make_field ~name:"IP" ~get:(fun () -> (x ()).API.pIF_IP) (); - make_field ~name:"netmask" ~get:(fun () -> (x ()).API.pIF_netmask) (); - make_field ~name:"gateway" ~get:(fun () -> (x ()).API.pIF_gateway) (); - make_field ~name:"IPv6-configuration-mode" ~get:(fun () -> Record_util.ipv6_configuration_mode_to_string (x ()).API.pIF_ipv6_configuration_mode) (); - make_field ~name:"IPv6" ~get:(fun () -> String.concat "; " (x ()).API.pIF_IPv6) (); - make_field ~name:"IPv6-gateway" ~get:(fun () -> (x ()).API.pIF_ipv6_gateway) (); - make_field ~name:"primary-address-type" ~get:(fun () -> Record_util.primary_address_type_to_string (x ()).API.pIF_primary_address_type) (); - make_field ~name:"DNS" ~get:(fun () -> (x ()).API.pIF_DNS) (); - make_field ~name:"properties" - ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pIF_properties) - ~get_map:(fun () -> (x ()).API.pIF_properties) - ~set_in_map:(fun k v -> Client.PIF.set_property rpc session_id pif k v) (); - make_field ~name:"capabilities" ~get:(fun () -> String.concat "; " (x ()).API.pIF_capabilities) - ~get_set:(fun () -> (x ()).API.pIF_capabilities) (); - make_field ~name:"io_read_kbs" ~get:(fun () -> - try - let host = (x ()).API.pIF_host in - let name = Printf.sprintf "pif_%s_rx" (x ()).API.pIF_device in - let value = Client.Host.query_data_source rpc session_id host name in - string_of_float (value /. 1024.0) - with _ -> "") ~expensive:true (); - make_field ~name:"io_write_kbs" ~get:(fun () -> - try - let host = (x ()).API.pIF_host in - let name = Printf.sprintf "pif_%s_tx" (x ()).API.pIF_device in - let value = Client.Host.query_data_source rpc session_id host name in - string_of_float (value /. 1024.0) - with _ -> "") ~expensive:true (); - make_field ~name:"carrier" ~get:(fun () -> default nid (may (fun m -> string_of_bool m.API.pIF_metrics_carrier) (xm ()))) (); - make_field ~name:"vendor-id" ~get:(fun () -> default nid (may (fun m -> m.API.pIF_metrics_vendor_id) (xm ()))) (); - make_field ~name:"vendor-name" ~get:(fun () -> default nid (may (fun m -> m.API.pIF_metrics_vendor_name) (xm ()))) (); - make_field ~name:"device-id" ~get:(fun () -> default nid (may (fun m -> m.API.pIF_metrics_device_id) (xm ()))) (); - make_field ~name:"device-name" ~get:(fun () -> default nid (may (fun m -> m.API.pIF_metrics_device_name) (xm ()))) (); - make_field ~name:"speed" ~get:(fun () -> default nid (may (fun m -> (Int64.to_string m.API.pIF_metrics_speed) ^ " Mbit/s") (xm ()))) (); - make_field ~name:"duplex" ~get:(fun () -> default nid (may (fun m -> - if m.API.pIF_metrics_duplex then - "full" - else if m.API.pIF_metrics_carrier then - "half" - else - "unknown" - ) (xm ()))) (); - make_field ~name:"disallow-unplug" ~get:(fun () -> string_of_bool ((x ()).API.pIF_disallow_unplug)) - ~set:(fun disallow_unplug -> Client.PIF.set_disallow_unplug rpc session_id pif (safe_bool_of_string "disallow-unplug" disallow_unplug)) (); - make_field ~name:"pci-bus-path" ~get:(fun () -> default nid (may (fun m -> m.API.pIF_metrics_pci_bus_path) (xm ()))) (); - make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pIF_other_config) - ~add_to_map:(fun k v -> Client.PIF.add_to_other_config rpc session_id pif k v) - ~remove_from_map:(fun k -> Client.PIF.remove_from_other_config rpc session_id pif k) - ~get_map:(fun () -> (x ()).API.pIF_other_config) (); + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.pIF_uuid) (); + make_field ~name:"device" ~get:(fun () -> (x ()).API.pIF_device) (); + make_field ~name:"MAC" ~get:(fun () -> (x ()).API.pIF_MAC) + ~case_insensitive:true (); + make_field ~name:"physical" ~get:(fun () -> string_of_bool ((x ()).API.pIF_physical)) (); + make_field ~name:"managed" ~get:(fun () -> string_of_bool ((x ()).API.pIF_managed)) (); + make_field ~name:"currently-attached" ~get:(fun () -> string_of_bool ((x ()).API.pIF_currently_attached)) (); + make_field ~name:"MTU" ~get:(fun () -> (Int64.to_string (x ()).API.pIF_MTU)) (); + make_field ~name:"VLAN" ~get:(fun () -> (Int64.to_string (x ()).API.pIF_VLAN)) (); + make_field ~name:"bond-master-of" ~get:(fun () -> String.concat "; " (List.map (fun pif -> get_uuid_from_ref pif) (x ()).API.pIF_bond_master_of)) (); + make_field ~name:"bond-slave-of" ~get:(fun () -> get_uuid_from_ref (x ()).API.pIF_bond_slave_of) (); + make_field ~name:"tunnel-access-PIF-of" ~get:(fun () -> String.concat "; " (List.map (fun pif -> get_uuid_from_ref pif) (x ()).API.pIF_tunnel_access_PIF_of)) (); + make_field ~name:"tunnel-transport-PIF-of" ~get:(fun () -> String.concat "; " (List.map (fun pif -> get_uuid_from_ref pif) (x ()).API.pIF_tunnel_transport_PIF_of)) (); + make_field ~name:"management" ~get:(fun () -> string_of_bool ((x ()).API.pIF_management)) (); + make_field ~name:"network-uuid" ~get:(fun () -> try get_uuid_from_ref (x ()).API.pIF_network with _ -> nid) (); + make_field ~name:"network-name-label" ~get:(fun () -> try get_name_from_ref (x ()).API.pIF_network with _ -> nid) (); + make_field ~name:"host-uuid" ~get:(fun () -> try get_uuid_from_ref (x ()).API.pIF_host with _ -> nid) (); + make_field ~name:"host-name-label" ~get:(fun () -> try get_name_from_ref (x ()).API.pIF_host with _ -> nid) (); + make_field ~name:"IP-configuration-mode" ~get:(fun () -> Record_util.ip_configuration_mode_to_string (x ()).API.pIF_ip_configuration_mode) (); + make_field ~name:"IP" ~get:(fun () -> (x ()).API.pIF_IP) (); + make_field ~name:"netmask" ~get:(fun () -> (x ()).API.pIF_netmask) (); + make_field ~name:"gateway" ~get:(fun () -> (x ()).API.pIF_gateway) (); + make_field ~name:"IPv6-configuration-mode" ~get:(fun () -> Record_util.ipv6_configuration_mode_to_string (x ()).API.pIF_ipv6_configuration_mode) (); + make_field ~name:"IPv6" ~get:(fun () -> String.concat "; " (x ()).API.pIF_IPv6) (); + make_field ~name:"IPv6-gateway" ~get:(fun () -> (x ()).API.pIF_ipv6_gateway) (); + make_field ~name:"primary-address-type" ~get:(fun () -> Record_util.primary_address_type_to_string (x ()).API.pIF_primary_address_type) (); + make_field ~name:"DNS" ~get:(fun () -> (x ()).API.pIF_DNS) (); + make_field ~name:"properties" + ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pIF_properties) + ~get_map:(fun () -> (x ()).API.pIF_properties) + ~set_in_map:(fun k v -> Client.PIF.set_property rpc session_id pif k v) (); + make_field ~name:"capabilities" ~get:(fun () -> String.concat "; " (x ()).API.pIF_capabilities) + ~get_set:(fun () -> (x ()).API.pIF_capabilities) (); + make_field ~name:"io_read_kbs" ~get:(fun () -> + try + let host = (x ()).API.pIF_host in + let name = Printf.sprintf "pif_%s_rx" (x ()).API.pIF_device in + let value = Client.Host.query_data_source rpc session_id host name in + string_of_float (value /. 1024.0) + with _ -> "") ~expensive:true (); + make_field ~name:"io_write_kbs" ~get:(fun () -> + try + let host = (x ()).API.pIF_host in + let name = Printf.sprintf "pif_%s_tx" (x ()).API.pIF_device in + let value = Client.Host.query_data_source rpc session_id host name in + string_of_float (value /. 1024.0) + with _ -> "") ~expensive:true (); + make_field ~name:"carrier" ~get:(fun () -> default nid (may (fun m -> string_of_bool m.API.pIF_metrics_carrier) (xm ()))) (); + make_field ~name:"vendor-id" ~get:(fun () -> default nid (may (fun m -> m.API.pIF_metrics_vendor_id) (xm ()))) (); + make_field ~name:"vendor-name" ~get:(fun () -> default nid (may (fun m -> m.API.pIF_metrics_vendor_name) (xm ()))) (); + make_field ~name:"device-id" ~get:(fun () -> default nid (may (fun m -> m.API.pIF_metrics_device_id) (xm ()))) (); + make_field ~name:"device-name" ~get:(fun () -> default nid (may (fun m -> m.API.pIF_metrics_device_name) (xm ()))) (); + make_field ~name:"speed" ~get:(fun () -> default nid (may (fun m -> (Int64.to_string m.API.pIF_metrics_speed) ^ " Mbit/s") (xm ()))) (); + make_field ~name:"duplex" ~get:(fun () -> default nid (may (fun m -> + if m.API.pIF_metrics_duplex then + "full" + else if m.API.pIF_metrics_carrier then + "half" + else + "unknown" + ) (xm ()))) (); + make_field ~name:"disallow-unplug" ~get:(fun () -> string_of_bool ((x ()).API.pIF_disallow_unplug)) + ~set:(fun disallow_unplug -> Client.PIF.set_disallow_unplug rpc session_id pif (safe_bool_of_string "disallow-unplug" disallow_unplug)) (); + make_field ~name:"pci-bus-path" ~get:(fun () -> default nid (may (fun m -> m.API.pIF_metrics_pci_bus_path) (xm ()))) (); + make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pIF_other_config) + ~add_to_map:(fun k v -> Client.PIF.add_to_other_config rpc session_id pif k v) + ~remove_from_map:(fun k -> Client.PIF.remove_from_other_config rpc session_id pif k) + ~get_map:(fun () -> (x ()).API.pIF_other_config) (); ] } let task_record rpc session_id task = @@ -331,14 +331,14 @@ let task_record rpc session_id task = make_field ~name:"finished" ~get:(fun () -> Date.to_string (x ()).API.task_finished) (); make_field ~name:"error_info" ~get:(fun () -> String.concat "; " (x ()).API.task_error_info) (); make_field ~name:"allowed_operations" ~get:(fun () -> String.concat "; " (List.map Record_util.task_allowed_operations_to_string (x ()).API.task_allowed_operations)) (); - make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.task_other_config) - ~add_to_map:(fun k v -> Client.Task.add_to_other_config rpc session_id task k v) - ~remove_from_map:(fun k -> Client.Task.remove_from_other_config rpc session_id task k) - ~get_map:(fun () -> (x ()).API.task_other_config) (); + make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.task_other_config) + ~add_to_map:(fun k v -> Client.Task.add_to_other_config rpc session_id task k v) + ~remove_from_map:(fun k -> Client.Task.remove_from_other_config rpc session_id task k) + ~get_map:(fun () -> (x ()).API.task_other_config) (); ]} -let vif_record rpc session_id vif = +let vif_record rpc session_id vif = let _ref = ref vif in let empty_record = ToGet (fun () -> Client.VIF.get_record rpc session_id !_ref) in let record = ref empty_record in @@ -349,197 +349,197 @@ let vif_record rpc session_id vif = getref=(fun () -> !_ref); fields= [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vIF_uuid) (); - make_field ~name:"vm-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.vIF_VM) (); - make_field ~name:"vm-name-label" ~get:(fun () -> get_name_from_ref (x ()).API.vIF_VM) (); - make_field ~name:"allowed-operations" - ~get:(fun () -> String.concat "; " (List.map Record_util.vif_operation_to_string (x ()).API.vIF_allowed_operations)) - ~get_set:(fun () -> List.map Record_util.vif_operation_to_string (x ()).API.vIF_allowed_operations) (); - make_field ~name:"current-operations" - ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.vif_operation_to_string b) (x ()).API.vIF_current_operations)) - ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.vif_operation_to_string b) (x ()).API.vIF_current_operations) (); - make_field ~name:"device" ~get:(fun () -> (x ()).API.vIF_device) (); - make_field ~name:"MAC" ~get:(fun () -> (x ()).API.vIF_MAC) - ~case_insensitive:true (); - make_field ~name:"MAC-autogenerated" ~get:(fun () -> string_of_bool (x ()).API.vIF_MAC_autogenerated) (); - make_field ~name:"MTU" ~get:(fun () -> Int64.to_string (x ()).API.vIF_MTU) (); - make_field ~name:"currently-attached" ~get:(fun () -> string_of_bool (x ()).API.vIF_currently_attached) (); - make_field ~name:"qos_algorithm_type" ~get:(fun () -> (x ()).API.vIF_qos_algorithm_type) - ~set:(fun qat -> Client.VIF.set_qos_algorithm_type rpc session_id vif qat) (); - make_field ~name:"qos_algorithm_params" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vIF_qos_algorithm_params) - ~add_to_map:(fun k v -> Client.VIF.add_to_qos_algorithm_params rpc session_id vif k v) - ~remove_from_map:(fun k -> Client.VIF.remove_from_qos_algorithm_params rpc session_id vif k) - ~get_map:(fun () -> (x ()).API.vIF_qos_algorithm_params) (); - make_field ~name:"qos_supported_algorithms" ~get:(fun () -> String.concat "; " (x ()).API.vIF_qos_supported_algorithms) - ~get_set:(fun () -> (x ()).API.vIF_qos_supported_algorithms) (); - make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vIF_other_config) - ~add_to_map:(fun k v -> Client.VIF.add_to_other_config rpc session_id vif k v) - ~remove_from_map:(fun k -> Client.VIF.remove_from_other_config rpc session_id vif k) - ~get_map:(fun () -> (x ()).API.vIF_other_config) (); - make_field ~name:"network-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.vIF_network) (); - make_field ~name:"network-name-label" ~get:(fun () -> get_name_from_ref (x ()).API.vIF_network) (); - make_field ~name:"io_read_kbs" ~get:(fun () -> - try - let name = Printf.sprintf "vif_%s_rx" (x ()).API.vIF_device in - string_of_float ((Client.VM.query_data_source rpc session_id (x ()).API.vIF_VM name) /. 1024.0) - with _ -> "") ~expensive:true (); - make_field ~name:"io_write_kbs" ~get:(fun () -> - try - let name = Printf.sprintf "vif_%s_tx" (x ()).API.vIF_device in - string_of_float ((Client.VM.query_data_source rpc session_id (x ()).API.vIF_VM name) /. 1024.0) - with _ -> "") ~expensive:true (); - make_field ~name:"locking-mode" - ~get:(fun () -> Record_util.vif_locking_mode_to_string (x ()).API.vIF_locking_mode) - ~set:(fun value -> Client.VIF.set_locking_mode rpc session_id vif (Record_util.string_to_vif_locking_mode value)) (); - make_field ~name:"ipv4-allowed" - ~get:(fun () -> String.concat "; " (x ()).API.vIF_ipv4_allowed) - ~get_set:(fun () -> (x ()).API.vIF_ipv4_allowed) - ~add_to_set:(fun value -> Client.VIF.add_ipv4_allowed rpc session_id vif value) - ~remove_from_set:(fun value -> Client.VIF.remove_ipv4_allowed rpc session_id vif value) - ~set:(fun value -> Client.VIF.set_ipv4_allowed rpc session_id vif (String.split ',' value)) (); - make_field ~name:"ipv6-allowed" - ~get:(fun () -> String.concat "; " (x ()).API.vIF_ipv6_allowed) - ~get_set:(fun () -> (x ()).API.vIF_ipv6_allowed) - ~add_to_set:(fun value -> Client.VIF.add_ipv6_allowed rpc session_id vif value) - ~remove_from_set:(fun value -> Client.VIF.remove_ipv6_allowed rpc session_id vif value) - ~set:(fun value -> Client.VIF.set_ipv6_allowed rpc session_id vif (String.split ',' value)) (); - make_field ~name:"ipv4-configuration-mode" ~get:(fun () -> Record_util.vif_ipv4_configuration_mode_to_string (x ()).API.vIF_ipv4_configuration_mode) (); - make_field ~name:"ipv4-addresses" ~get:(fun () -> String.concat "; " (x ()).API.vIF_ipv4_addresses) (); - make_field ~name:"ipv4-gateway" ~get:(fun () -> (x ()).API.vIF_ipv4_gateway) (); - make_field ~name:"ipv6-configuration-mode" ~get:(fun () -> Record_util.vif_ipv6_configuration_mode_to_string (x ()).API.vIF_ipv6_configuration_mode) (); - make_field ~name:"ipv6-addresses" ~get:(fun () -> String.concat "; " (x ()).API.vIF_ipv6_addresses) (); - make_field ~name:"ipv6-gateway" ~get:(fun () -> (x ()).API.vIF_ipv6_gateway) (); + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vIF_uuid) (); + make_field ~name:"vm-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.vIF_VM) (); + make_field ~name:"vm-name-label" ~get:(fun () -> get_name_from_ref (x ()).API.vIF_VM) (); + make_field ~name:"allowed-operations" + ~get:(fun () -> String.concat "; " (List.map Record_util.vif_operation_to_string (x ()).API.vIF_allowed_operations)) + ~get_set:(fun () -> List.map Record_util.vif_operation_to_string (x ()).API.vIF_allowed_operations) (); + make_field ~name:"current-operations" + ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.vif_operation_to_string b) (x ()).API.vIF_current_operations)) + ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.vif_operation_to_string b) (x ()).API.vIF_current_operations) (); + make_field ~name:"device" ~get:(fun () -> (x ()).API.vIF_device) (); + make_field ~name:"MAC" ~get:(fun () -> (x ()).API.vIF_MAC) + ~case_insensitive:true (); + make_field ~name:"MAC-autogenerated" ~get:(fun () -> string_of_bool (x ()).API.vIF_MAC_autogenerated) (); + make_field ~name:"MTU" ~get:(fun () -> Int64.to_string (x ()).API.vIF_MTU) (); + make_field ~name:"currently-attached" ~get:(fun () -> string_of_bool (x ()).API.vIF_currently_attached) (); + make_field ~name:"qos_algorithm_type" ~get:(fun () -> (x ()).API.vIF_qos_algorithm_type) + ~set:(fun qat -> Client.VIF.set_qos_algorithm_type rpc session_id vif qat) (); + make_field ~name:"qos_algorithm_params" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vIF_qos_algorithm_params) + ~add_to_map:(fun k v -> Client.VIF.add_to_qos_algorithm_params rpc session_id vif k v) + ~remove_from_map:(fun k -> Client.VIF.remove_from_qos_algorithm_params rpc session_id vif k) + ~get_map:(fun () -> (x ()).API.vIF_qos_algorithm_params) (); + make_field ~name:"qos_supported_algorithms" ~get:(fun () -> String.concat "; " (x ()).API.vIF_qos_supported_algorithms) + ~get_set:(fun () -> (x ()).API.vIF_qos_supported_algorithms) (); + make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vIF_other_config) + ~add_to_map:(fun k v -> Client.VIF.add_to_other_config rpc session_id vif k v) + ~remove_from_map:(fun k -> Client.VIF.remove_from_other_config rpc session_id vif k) + ~get_map:(fun () -> (x ()).API.vIF_other_config) (); + make_field ~name:"network-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.vIF_network) (); + make_field ~name:"network-name-label" ~get:(fun () -> get_name_from_ref (x ()).API.vIF_network) (); + make_field ~name:"io_read_kbs" ~get:(fun () -> + try + let name = Printf.sprintf "vif_%s_rx" (x ()).API.vIF_device in + string_of_float ((Client.VM.query_data_source rpc session_id (x ()).API.vIF_VM name) /. 1024.0) + with _ -> "") ~expensive:true (); + make_field ~name:"io_write_kbs" ~get:(fun () -> + try + let name = Printf.sprintf "vif_%s_tx" (x ()).API.vIF_device in + string_of_float ((Client.VM.query_data_source rpc session_id (x ()).API.vIF_VM name) /. 1024.0) + with _ -> "") ~expensive:true (); + make_field ~name:"locking-mode" + ~get:(fun () -> Record_util.vif_locking_mode_to_string (x ()).API.vIF_locking_mode) + ~set:(fun value -> Client.VIF.set_locking_mode rpc session_id vif (Record_util.string_to_vif_locking_mode value)) (); + make_field ~name:"ipv4-allowed" + ~get:(fun () -> String.concat "; " (x ()).API.vIF_ipv4_allowed) + ~get_set:(fun () -> (x ()).API.vIF_ipv4_allowed) + ~add_to_set:(fun value -> Client.VIF.add_ipv4_allowed rpc session_id vif value) + ~remove_from_set:(fun value -> Client.VIF.remove_ipv4_allowed rpc session_id vif value) + ~set:(fun value -> Client.VIF.set_ipv4_allowed rpc session_id vif (String.split ',' value)) (); + make_field ~name:"ipv6-allowed" + ~get:(fun () -> String.concat "; " (x ()).API.vIF_ipv6_allowed) + ~get_set:(fun () -> (x ()).API.vIF_ipv6_allowed) + ~add_to_set:(fun value -> Client.VIF.add_ipv6_allowed rpc session_id vif value) + ~remove_from_set:(fun value -> Client.VIF.remove_ipv6_allowed rpc session_id vif value) + ~set:(fun value -> Client.VIF.set_ipv6_allowed rpc session_id vif (String.split ',' value)) (); + make_field ~name:"ipv4-configuration-mode" ~get:(fun () -> Record_util.vif_ipv4_configuration_mode_to_string (x ()).API.vIF_ipv4_configuration_mode) (); + make_field ~name:"ipv4-addresses" ~get:(fun () -> String.concat "; " (x ()).API.vIF_ipv4_addresses) (); + make_field ~name:"ipv4-gateway" ~get:(fun () -> (x ()).API.vIF_ipv4_gateway) (); + make_field ~name:"ipv6-configuration-mode" ~get:(fun () -> Record_util.vif_ipv6_configuration_mode_to_string (x ()).API.vIF_ipv6_configuration_mode) (); + make_field ~name:"ipv6-addresses" ~get:(fun () -> String.concat "; " (x ()).API.vIF_ipv6_addresses) (); + make_field ~name:"ipv6-gateway" ~get:(fun () -> (x ()).API.vIF_ipv6_gateway) (); ]} let net_record rpc session_id net = - let _ref = ref net in - let empty_record = ToGet (fun () -> Client.Network.get_record rpc session_id !_ref) in - let record = ref empty_record in - let x () = lzy_get record in - { setref=(fun r -> _ref := r; record := empty_record ); - setrefrec=(fun (a,b) -> _ref := a; record := Got b); - record=x; - getref=(fun () -> !_ref); - fields = [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.network_uuid) (); - make_field ~name:"name-label" ~get:(fun () -> (x ()).API.network_name_label) - ~set:(fun x -> Client.Network.set_name_label rpc session_id net x) (); - make_field ~name:"name-description" ~get:(fun () -> (x ()).API.network_name_description) - ~set:(fun x -> Client.Network.set_name_description rpc session_id net x) (); - make_field ~name:"VIF-uuids" ~get:(fun () -> String.concat "; " (List.map (fun vif -> get_uuid_from_ref vif) (x ()).API.network_VIFs)) - ~get_set:(fun () -> (List.map (fun vif -> get_uuid_from_ref vif) (x ()).API.network_VIFs)) (); - make_field ~name:"PIF-uuids" ~get:(fun () -> String.concat "; " (List.map (fun pif -> get_uuid_from_ref pif) (x ()).API.network_PIFs)) - ~get_set:(fun () -> (List.map (fun pif -> get_uuid_from_ref pif) (x ()).API.network_PIFs)) (); - make_field ~name:"MTU" ~get:(fun () -> (Int64.to_string (x ()).API.network_MTU)) ~set:(fun x -> Client.Network.set_MTU rpc session_id net (Int64.of_string x)) (); - make_field ~name:"bridge" ~get:(fun () -> (x ()).API.network_bridge) (); - make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.network_other_config) - ~add_to_map:(fun k v -> Client.Network.add_to_other_config rpc session_id net k v) - ~remove_from_map:(fun k -> Client.Network.remove_from_other_config rpc session_id net k) - ~get_map:(fun () -> (x ()).API.network_other_config) (); - make_field ~name:"blobs" ~get:(fun () -> Record_util.s2brm_to_string get_uuid_from_ref "; " (x ()).API.network_blobs) (); - make_field ~name:"tags" - ~get:(fun () -> String.concat ", " (x ()).API.network_tags) - ~get_set:(fun () -> (x ()).API.network_tags) - ~add_to_set:(fun tag -> Client.Network.add_tags rpc session_id net tag) - ~remove_from_set:(fun tag -> Client.Network.remove_tags rpc session_id net tag) (); - make_field ~name:"default-locking-mode" - ~get:(fun () -> Record_util.network_default_locking_mode_to_string (x ()).API.network_default_locking_mode) - ~set:(fun value -> Client.Network.set_default_locking_mode rpc session_id net - (Record_util.string_to_network_default_locking_mode value)) (); - ]} + let _ref = ref net in + let empty_record = ToGet (fun () -> Client.Network.get_record rpc session_id !_ref) in + let record = ref empty_record in + let x () = lzy_get record in + { setref=(fun r -> _ref := r; record := empty_record ); + setrefrec=(fun (a,b) -> _ref := a; record := Got b); + record=x; + getref=(fun () -> !_ref); + fields = [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.network_uuid) (); + make_field ~name:"name-label" ~get:(fun () -> (x ()).API.network_name_label) + ~set:(fun x -> Client.Network.set_name_label rpc session_id net x) (); + make_field ~name:"name-description" ~get:(fun () -> (x ()).API.network_name_description) + ~set:(fun x -> Client.Network.set_name_description rpc session_id net x) (); + make_field ~name:"VIF-uuids" ~get:(fun () -> String.concat "; " (List.map (fun vif -> get_uuid_from_ref vif) (x ()).API.network_VIFs)) + ~get_set:(fun () -> (List.map (fun vif -> get_uuid_from_ref vif) (x ()).API.network_VIFs)) (); + make_field ~name:"PIF-uuids" ~get:(fun () -> String.concat "; " (List.map (fun pif -> get_uuid_from_ref pif) (x ()).API.network_PIFs)) + ~get_set:(fun () -> (List.map (fun pif -> get_uuid_from_ref pif) (x ()).API.network_PIFs)) (); + make_field ~name:"MTU" ~get:(fun () -> (Int64.to_string (x ()).API.network_MTU)) ~set:(fun x -> Client.Network.set_MTU rpc session_id net (Int64.of_string x)) (); + make_field ~name:"bridge" ~get:(fun () -> (x ()).API.network_bridge) (); + make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.network_other_config) + ~add_to_map:(fun k v -> Client.Network.add_to_other_config rpc session_id net k v) + ~remove_from_map:(fun k -> Client.Network.remove_from_other_config rpc session_id net k) + ~get_map:(fun () -> (x ()).API.network_other_config) (); + make_field ~name:"blobs" ~get:(fun () -> Record_util.s2brm_to_string get_uuid_from_ref "; " (x ()).API.network_blobs) (); + make_field ~name:"tags" + ~get:(fun () -> String.concat ", " (x ()).API.network_tags) + ~get_set:(fun () -> (x ()).API.network_tags) + ~add_to_set:(fun tag -> Client.Network.add_tags rpc session_id net tag) + ~remove_from_set:(fun tag -> Client.Network.remove_tags rpc session_id net tag) (); + make_field ~name:"default-locking-mode" + ~get:(fun () -> Record_util.network_default_locking_mode_to_string (x ()).API.network_default_locking_mode) + ~set:(fun value -> Client.Network.set_default_locking_mode rpc session_id net + (Record_util.string_to_network_default_locking_mode value)) (); + ]} let pool_record rpc session_id pool = - let _ref = ref pool in - let empty_record = ToGet (fun () -> Client.Pool.get_record rpc session_id !_ref) in - let record = ref empty_record in - let x () = lzy_get record in - { setref=(fun r -> _ref := r; record := empty_record ); - setrefrec=(fun (a,b) -> _ref := a; record := Got b); - record=x; - getref=(fun () -> !_ref); - fields = [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.pool_uuid) (); - make_field ~name:"name-label" ~get:(fun () -> (x ()).API.pool_name_label) - ~set:(fun x -> Client.Pool.set_name_label rpc session_id pool x) (); - make_field ~name:"name-description" ~get:(fun () -> (x ()).API.pool_name_description) - ~set:(fun x -> Client.Pool.set_name_description rpc session_id pool x) (); - make_field ~name:"master" - ~get:(fun () -> get_uuid_from_ref (x ()).API.pool_master) (); - make_field ~name:"default-SR" - ~get:(fun () -> get_uuid_from_ref (x ()).API.pool_default_SR) - ~set:(fun x -> - let sr_ref = if x="" then Ref.null else Client.SR.get_by_uuid rpc session_id x in - Client.Pool.set_default_SR rpc session_id pool sr_ref) (); - make_field ~name:"crash-dump-SR" - ~get:(fun () -> get_uuid_from_ref (x ()).API.pool_crash_dump_SR) - ~set:(fun x -> - let sr_ref = if x="" then Ref.null else Client.SR.get_by_uuid rpc session_id x in - Client.Pool.set_crash_dump_SR rpc session_id pool sr_ref) (); - make_field ~name:"suspend-image-SR" - ~get:(fun () -> get_uuid_from_ref (x ()).API.pool_suspend_image_SR) - ~set:(fun x -> - let sr_ref = if x="" then Ref.null else Client.SR.get_by_uuid rpc session_id x in - Client.Pool.set_suspend_image_SR rpc session_id pool sr_ref) (); - make_field ~name:"supported-sr-types" ~get:(fun () -> String.concat "; " (Client.SR.get_supported_types rpc session_id)) ~expensive:true (); - make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pool_other_config) - ~add_to_map:(fun k v -> Client.Pool.add_to_other_config rpc session_id pool k v) - ~remove_from_map:(fun k -> Client.Pool.remove_from_other_config rpc session_id pool k) - ~get_map:(fun () -> (x ()).API.pool_other_config) (); - make_field ~name:"allowed-operations" - ~get:(fun () -> String.concat "; " (List.map Record_util.pool_operation_to_string (x ()).API.pool_allowed_operations)) - ~get_set:(fun () -> List.map Record_util.pool_operation_to_string (x ()).API.pool_allowed_operations) (); - make_field ~name:"current-operations" - ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.pool_operation_to_string b) (x ()).API.pool_current_operations)) - ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.pool_operation_to_string b) (x ()).API.pool_current_operations) (); - make_field ~name:"ha-enabled" ~get:(fun () -> string_of_bool (x ()).API.pool_ha_enabled) (); - make_field ~name:"ha-configuration" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pool_ha_configuration) (); - make_field ~name:"ha-statefiles" ~get:(fun () -> String.concat "; " (List.map (fun x -> get_uuid_from_ref (Ref.of_string x)) (x ()).API.pool_ha_statefiles)) (); - make_field ~name:"ha-host-failures-to-tolerate" ~get:(fun () -> Int64.to_string (x ()).API.pool_ha_host_failures_to_tolerate) ~set:(fun x -> Client.Pool.set_ha_host_failures_to_tolerate rpc session_id pool (Int64.of_string x)) (); - make_field ~name:"ha-plan-exists-for" ~get:(fun () -> Int64.to_string (x ()).API.pool_ha_plan_exists_for) (); - make_field ~name:"ha-allow-overcommit" ~get:(fun () -> string_of_bool (x ()).API.pool_ha_allow_overcommit) ~set:(fun x -> Client.Pool.set_ha_allow_overcommit rpc session_id pool (bool_of_string x)) (); - make_field ~name:"ha-overcommitted" ~get:(fun () -> string_of_bool (x ()).API.pool_ha_overcommitted) (); - make_field ~name:"blobs" ~get:(fun () -> Record_util.s2brm_to_string get_uuid_from_ref "; " (x ()).API.pool_blobs) (); + let _ref = ref pool in + let empty_record = ToGet (fun () -> Client.Pool.get_record rpc session_id !_ref) in + let record = ref empty_record in + let x () = lzy_get record in + { setref=(fun r -> _ref := r; record := empty_record ); + setrefrec=(fun (a,b) -> _ref := a; record := Got b); + record=x; + getref=(fun () -> !_ref); + fields = [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.pool_uuid) (); + make_field ~name:"name-label" ~get:(fun () -> (x ()).API.pool_name_label) + ~set:(fun x -> Client.Pool.set_name_label rpc session_id pool x) (); + make_field ~name:"name-description" ~get:(fun () -> (x ()).API.pool_name_description) + ~set:(fun x -> Client.Pool.set_name_description rpc session_id pool x) (); + make_field ~name:"master" + ~get:(fun () -> get_uuid_from_ref (x ()).API.pool_master) (); + make_field ~name:"default-SR" + ~get:(fun () -> get_uuid_from_ref (x ()).API.pool_default_SR) + ~set:(fun x -> + let sr_ref = if x="" then Ref.null else Client.SR.get_by_uuid rpc session_id x in + Client.Pool.set_default_SR rpc session_id pool sr_ref) (); + make_field ~name:"crash-dump-SR" + ~get:(fun () -> get_uuid_from_ref (x ()).API.pool_crash_dump_SR) + ~set:(fun x -> + let sr_ref = if x="" then Ref.null else Client.SR.get_by_uuid rpc session_id x in + Client.Pool.set_crash_dump_SR rpc session_id pool sr_ref) (); + make_field ~name:"suspend-image-SR" + ~get:(fun () -> get_uuid_from_ref (x ()).API.pool_suspend_image_SR) + ~set:(fun x -> + let sr_ref = if x="" then Ref.null else Client.SR.get_by_uuid rpc session_id x in + Client.Pool.set_suspend_image_SR rpc session_id pool sr_ref) (); + make_field ~name:"supported-sr-types" ~get:(fun () -> String.concat "; " (Client.SR.get_supported_types rpc session_id)) ~expensive:true (); + make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pool_other_config) + ~add_to_map:(fun k v -> Client.Pool.add_to_other_config rpc session_id pool k v) + ~remove_from_map:(fun k -> Client.Pool.remove_from_other_config rpc session_id pool k) + ~get_map:(fun () -> (x ()).API.pool_other_config) (); + make_field ~name:"allowed-operations" + ~get:(fun () -> String.concat "; " (List.map Record_util.pool_operation_to_string (x ()).API.pool_allowed_operations)) + ~get_set:(fun () -> List.map Record_util.pool_operation_to_string (x ()).API.pool_allowed_operations) (); + make_field ~name:"current-operations" + ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.pool_operation_to_string b) (x ()).API.pool_current_operations)) + ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.pool_operation_to_string b) (x ()).API.pool_current_operations) (); + make_field ~name:"ha-enabled" ~get:(fun () -> string_of_bool (x ()).API.pool_ha_enabled) (); + make_field ~name:"ha-configuration" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pool_ha_configuration) (); + make_field ~name:"ha-statefiles" ~get:(fun () -> String.concat "; " (List.map (fun x -> get_uuid_from_ref (Ref.of_string x)) (x ()).API.pool_ha_statefiles)) (); + make_field ~name:"ha-host-failures-to-tolerate" ~get:(fun () -> Int64.to_string (x ()).API.pool_ha_host_failures_to_tolerate) ~set:(fun x -> Client.Pool.set_ha_host_failures_to_tolerate rpc session_id pool (Int64.of_string x)) (); + make_field ~name:"ha-plan-exists-for" ~get:(fun () -> Int64.to_string (x ()).API.pool_ha_plan_exists_for) (); + make_field ~name:"ha-allow-overcommit" ~get:(fun () -> string_of_bool (x ()).API.pool_ha_allow_overcommit) ~set:(fun x -> Client.Pool.set_ha_allow_overcommit rpc session_id pool (bool_of_string x)) (); + make_field ~name:"ha-overcommitted" ~get:(fun () -> string_of_bool (x ()).API.pool_ha_overcommitted) (); + make_field ~name:"blobs" ~get:(fun () -> Record_util.s2brm_to_string get_uuid_from_ref "; " (x ()).API.pool_blobs) (); make_field ~name:"wlb-url" ~get:(fun () -> (x ()).API.pool_wlb_url) (); make_field ~name:"wlb-username" ~get:(fun () -> (x ()).API.pool_wlb_username) (); make_field ~name:"wlb-enabled" ~get:(fun () -> string_of_bool (x ()).API.pool_wlb_enabled) ~set:(fun x -> Client.Pool.set_wlb_enabled rpc session_id pool (bool_of_string x)) (); make_field ~name:"wlb-verify-cert" ~get:(fun () -> string_of_bool (x ()).API.pool_wlb_verify_cert) ~set:(fun x -> Client.Pool.set_wlb_verify_cert rpc session_id pool (bool_of_string x)) (); - make_field ~name:"gui-config" - ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pool_gui_config) - ~add_to_map:(fun k v -> Client.Pool.add_to_gui_config rpc session_id pool k v) - ~remove_from_map:(fun k -> Client.Pool.remove_from_gui_config rpc session_id pool k) - ~get_map:(fun () -> (x ()).API.pool_gui_config) (); - make_field ~name:"health-check-config" - ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pool_health_check_config) - ~add_to_map:(fun k v -> Client.Pool.add_to_health_check_config rpc session_id pool k v) - ~remove_from_map:(fun k -> Client.Pool.remove_from_health_check_config rpc session_id pool k) - ~get_map:(fun () -> (x ()).API.pool_health_check_config) (); - make_field ~name:"vswitch-controller" ~hidden:true ~get:(fun () -> let r = (x ()).API.pool_vswitch_controller in if r = "" then "" else r) (); - make_field ~name:"restrictions" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pool_restrictions) (); - make_field ~name:"tags" - ~get:(fun () -> String.concat ", " (x ()).API.pool_tags) - ~get_set:(fun () -> (x ()).API.pool_tags) - ~add_to_set:(fun tag -> Client.Pool.add_tags rpc session_id pool tag) - ~remove_from_set:(fun tag -> Client.Pool.remove_tags rpc session_id pool tag) (); - make_field ~name:"license-state" - ~get:(fun () -> Record_util.s2sm_to_string "; " (Client.Pool.get_license_state rpc session_id pool)) (); - make_field ~name:"ha-cluster-stack" ~get:(fun () -> (x ()).API.pool_ha_cluster_stack) (); - make_field ~name:"guest-agent-config" - ~get:(fun () -> - Record_util.s2sm_to_string "; " (x ()).API.pool_guest_agent_config) - ~add_to_map:(fun k v -> - Client.Pool.add_to_guest_agent_config rpc session_id pool k v) - ~remove_from_map:(fun k -> - Client.Pool.remove_from_guest_agent_config rpc session_id pool k) - ~get_map:(fun () -> (x ()).API.pool_guest_agent_config) - (); - make_field ~name:"cpu_info" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pool_cpu_info) ~get_map:(fun () -> (x ()).API.pool_cpu_info) (); - make_field ~name:"policy-no-vendor-device" ~get:(fun () -> string_of_bool (x ()).API.pool_policy_no_vendor_device) ~set:(fun s -> Client.Pool.set_policy_no_vendor_device rpc session_id pool (safe_bool_of_string "policy-no-vendor-device" s)) (); - make_field ~name:"live-patching-disabled" ~get:(fun () -> string_of_bool (x ()).API.pool_live_patching_disabled) ~set:(fun s -> Client.Pool.set_live_patching_disabled rpc session_id pool (safe_bool_of_string "live-patching-disabled" s)) (); - ]} - -let subject_record rpc session_id subject = + make_field ~name:"gui-config" + ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pool_gui_config) + ~add_to_map:(fun k v -> Client.Pool.add_to_gui_config rpc session_id pool k v) + ~remove_from_map:(fun k -> Client.Pool.remove_from_gui_config rpc session_id pool k) + ~get_map:(fun () -> (x ()).API.pool_gui_config) (); + make_field ~name:"health-check-config" + ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pool_health_check_config) + ~add_to_map:(fun k v -> Client.Pool.add_to_health_check_config rpc session_id pool k v) + ~remove_from_map:(fun k -> Client.Pool.remove_from_health_check_config rpc session_id pool k) + ~get_map:(fun () -> (x ()).API.pool_health_check_config) (); + make_field ~name:"vswitch-controller" ~hidden:true ~get:(fun () -> let r = (x ()).API.pool_vswitch_controller in if r = "" then "" else r) (); + make_field ~name:"restrictions" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pool_restrictions) (); + make_field ~name:"tags" + ~get:(fun () -> String.concat ", " (x ()).API.pool_tags) + ~get_set:(fun () -> (x ()).API.pool_tags) + ~add_to_set:(fun tag -> Client.Pool.add_tags rpc session_id pool tag) + ~remove_from_set:(fun tag -> Client.Pool.remove_tags rpc session_id pool tag) (); + make_field ~name:"license-state" + ~get:(fun () -> Record_util.s2sm_to_string "; " (Client.Pool.get_license_state rpc session_id pool)) (); + make_field ~name:"ha-cluster-stack" ~get:(fun () -> (x ()).API.pool_ha_cluster_stack) (); + make_field ~name:"guest-agent-config" + ~get:(fun () -> + Record_util.s2sm_to_string "; " (x ()).API.pool_guest_agent_config) + ~add_to_map:(fun k v -> + Client.Pool.add_to_guest_agent_config rpc session_id pool k v) + ~remove_from_map:(fun k -> + Client.Pool.remove_from_guest_agent_config rpc session_id pool k) + ~get_map:(fun () -> (x ()).API.pool_guest_agent_config) + (); + make_field ~name:"cpu_info" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pool_cpu_info) ~get_map:(fun () -> (x ()).API.pool_cpu_info) (); + make_field ~name:"policy-no-vendor-device" ~get:(fun () -> string_of_bool (x ()).API.pool_policy_no_vendor_device) ~set:(fun s -> Client.Pool.set_policy_no_vendor_device rpc session_id pool (safe_bool_of_string "policy-no-vendor-device" s)) (); + make_field ~name:"live-patching-disabled" ~get:(fun () -> string_of_bool (x ()).API.pool_live_patching_disabled) ~set:(fun s -> Client.Pool.set_live_patching_disabled rpc session_id pool (safe_bool_of_string "live-patching-disabled" s)) (); + ]} + +let subject_record rpc session_id subject = let _ref = ref subject in let empty_record = ToGet (fun () -> Client.Subject.get_record rpc session_id !_ref) in let record = ref empty_record in @@ -549,28 +549,28 @@ let subject_record rpc session_id subject = record=x; getref=(fun () -> !_ref); fields = -[ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.subject_uuid) (); - make_field ~name:"subject-identifier" ~get:(fun () -> (x ()).API.subject_subject_identifier) (); - make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.subject_other_config) - ~get_map:(fun () -> (x ()).API.subject_other_config) (); - make_field ~name:"roles" - ~get:(fun () -> String.concat "; " - (try - List.map - (fun self -> try Client.Role.get_name_label rpc session_id self with _ -> nid) - (Client.Subject.get_roles rpc session_id subject) - with _ -> [] - ) - ) - ~expensive:false - ~get_set:(fun () -> try List.map - (fun self -> try Client.Role.get_name_label rpc session_id self with _ -> nid) - (Client.Subject.get_roles rpc session_id subject) with _ -> []) - (); -]} + [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.subject_uuid) (); + make_field ~name:"subject-identifier" ~get:(fun () -> (x ()).API.subject_subject_identifier) (); + make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.subject_other_config) + ~get_map:(fun () -> (x ()).API.subject_other_config) (); + make_field ~name:"roles" + ~get:(fun () -> String.concat "; " + (try + List.map + (fun self -> try Client.Role.get_name_label rpc session_id self with _ -> nid) + (Client.Subject.get_roles rpc session_id subject) + with _ -> [] + ) + ) + ~expensive:false + ~get_set:(fun () -> try List.map + (fun self -> try Client.Role.get_name_label rpc session_id self with _ -> nid) + (Client.Subject.get_roles rpc session_id subject) with _ -> []) + (); + ]} -let role_record rpc session_id role = +let role_record rpc session_id role = let _ref = ref role in let empty_record = ToGet (fun () -> Client.Role.get_record rpc session_id !_ref) in let record = ref empty_record in @@ -580,29 +580,29 @@ let role_record rpc session_id role = record=x; getref=(fun () -> !_ref); fields = -[ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.role_uuid) (); - make_field ~name:"name" ~get:(fun () -> (x ()).API.role_name_label) (); - make_field ~name:"description" ~get:(fun () -> (x ()).API.role_name_description) (); - (*make_field ~name:"subroles" - ~get:(fun () -> String.concat "; " - (try (Client.Role.get_permissions_name_label ~rpc ~session_id ~self:(!_ref)) with _ -> []) - ) - ~expensive:true - ~get_set:(fun () -> try (Client.Role.get_permissions_name_label ~rpc ~session_id ~self:(!_ref)) - with _ -> []) - ();*) - (*make_field ~name:"is_complete" ~get:(fun () -> string_of_bool (x ()).API.role_is_complete) ();*) - (*make_field ~name:"is_basic" ~get:(fun () -> string_of_bool (x ()).API.role_is_basic) ();*) -]} + [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.role_uuid) (); + make_field ~name:"name" ~get:(fun () -> (x ()).API.role_name_label) (); + make_field ~name:"description" ~get:(fun () -> (x ()).API.role_name_description) (); + (*make_field ~name:"subroles" + ~get:(fun () -> String.concat "; " + (try (Client.Role.get_permissions_name_label ~rpc ~session_id ~self:(!_ref)) with _ -> []) + ) + ~expensive:true + ~get_set:(fun () -> try (Client.Role.get_permissions_name_label ~rpc ~session_id ~self:(!_ref)) + with _ -> []) + ();*) + (*make_field ~name:"is_complete" ~get:(fun () -> string_of_bool (x ()).API.role_is_complete) ();*) + (*make_field ~name:"is_basic" ~get:(fun () -> string_of_bool (x ()).API.role_is_basic) ();*) + ]} (* -let alert_record rpc session_id pool = +let alert_record rpc session_id pool = let _ref = ref pool in let record = ref None in - let x () = match !record with - | Some x -> x - | None -> + let x () = match !record with + | Some x -> x + | None -> let x = Client.Alert.get_record rpc session_id !_ref in record := Some x; x @@ -623,7 +623,7 @@ let alert_record rpc session_id pool = *) -let console_record rpc session_id console = +let console_record rpc session_id console = let _ref = ref console in let empty_record = ToGet (fun () -> Client.Console.get_record rpc session_id !_ref) in let record = ref empty_record in @@ -632,311 +632,311 @@ let console_record rpc session_id console = setrefrec=(fun (a,b) -> _ref := a; record := Got b); record=x; getref=(fun () -> !_ref); - fields = - [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.console_uuid) (); - make_field ~name:"vm-uuid" - ~get:(fun () -> (get_uuid_from_ref (x ()).API.console_VM)) (); - make_field ~name:"vm-name-label" - ~get:(fun () -> (get_name_from_ref (x ()).API.console_VM)) (); - make_field ~name:"protocol" ~get:(fun () -> Record_util.protocol_to_string (x ()).API.console_protocol) (); - make_field ~name:"location" ~get:(fun () -> (x ()).API.console_location) (); - make_field ~name:"other-config" - ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.console_other_config) - ~add_to_map:(fun k v -> Client.Console.add_to_other_config rpc session_id console k v) - ~remove_from_map:(fun k -> Client.Console.remove_from_other_config rpc session_id console k) - ~get_map:(fun () -> (x ()).API.console_other_config) (); - ]} + fields = + [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.console_uuid) (); + make_field ~name:"vm-uuid" + ~get:(fun () -> (get_uuid_from_ref (x ()).API.console_VM)) (); + make_field ~name:"vm-name-label" + ~get:(fun () -> (get_name_from_ref (x ()).API.console_VM)) (); + make_field ~name:"protocol" ~get:(fun () -> Record_util.protocol_to_string (x ()).API.console_protocol) (); + make_field ~name:"location" ~get:(fun () -> (x ()).API.console_location) (); + make_field ~name:"other-config" + ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.console_other_config) + ~add_to_map:(fun k v -> Client.Console.add_to_other_config rpc session_id console k v) + ~remove_from_map:(fun k -> Client.Console.remove_from_other_config rpc session_id console k) + ~get_map:(fun () -> (x ()).API.console_other_config) (); + ]} let vm_record rpc session_id vm = - let _ref = ref vm in - let empty_record = ToGet (fun () -> Client.VM.get_record rpc session_id !_ref) in - let record = ref empty_record in - let x () = lzy_get record in - let empty_metrics = ToGet (fun () -> try Some (Client.VM_metrics.get_record rpc session_id (x ()).API.vM_metrics) with _ -> None) in - let metrics = ref empty_metrics in - let xm () = lzy_get metrics in - let empty_guest_metrics = ToGet (fun () -> try Some (Client.VM_guest_metrics.get_record rpc session_id (x ()).API.vM_guest_metrics) with _ -> None) in - let guest_metrics = ref empty_guest_metrics in - let get_vcpus_utilisation () = - let nvcpus = default 0 (may (fun m -> Int64.to_int m.API.vM_metrics_VCPUs_number) (xm ())) in - let rec inner n = - if n=nvcpus then [] else - (string_of_int n,string_of_float (Client.VM.query_data_source rpc session_id !_ref (Printf.sprintf "cpu%d" n)))::(inner (n+1)) - in - inner 0 - in - let get_memory_target () = - try - Int64.to_string ( - try - Int64.of_float ( - Client.VM.query_data_source - rpc session_id !_ref "memory_target" - ) - with Api_errors.Server_error (code, _) - when code = Api_errors.vm_bad_power_state -> 0L - ) - with _ -> "" - in - let xgm () = lzy_get guest_metrics in - { - setref = (fun r -> _ref := r; record := empty_record ); - setrefrec = (fun (a,b) -> _ref := a; record := Got b); - record = x; - getref = (fun () -> !_ref); - fields = [ - make_field ~name:"uuid" - ~get:(fun () -> (x ()).API.vM_uuid) (); - make_field ~name:"name-label" - ~get:(fun () -> (x ()).API.vM_name_label) - ~set:(fun x -> Client.VM.set_name_label rpc session_id vm x) (); - make_field ~name:"name-description" - ~get:(fun () -> (x ()).API.vM_name_description) - ~set:(fun x -> Client.VM.set_name_description rpc session_id vm x) (); - make_field ~name:"user-version" - ~get:(fun () -> Int64.to_string (x ()).API.vM_user_version) - ~set:(fun x -> Client.VM.set_user_version rpc session_id vm (safe_i64_of_string "user-version" x)) (); - make_field ~name:"is-a-template" - ~get:(fun () -> string_of_bool (x ()).API.vM_is_a_template) - ~set:(fun x -> Client.VM.set_is_a_template rpc session_id vm (safe_bool_of_string "is-a-template" x)) (); - make_field ~name:"is-a-snapshot" - ~get:(fun () -> string_of_bool (x ()).API.vM_is_a_snapshot) (); - make_field ~name:"snapshot-of" - ~get:(fun () -> get_uuid_from_ref (x ()).API.vM_snapshot_of) (); - make_field ~name:"snapshots" - ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.vM_snapshots)) (); - make_field ~name:"snapshot-time" - ~get:(fun () -> Date.to_string (x ()).API.vM_snapshot_time) (); - make_field ~name:"transportable-snapshot-id" ~hidden:true - ~get:(fun () -> (x()).API.vM_transportable_snapshot_id) (); - make_field ~name:"snapshot-info" - ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vM_snapshot_info) (); - make_field ~name:"parent" - ~get:(fun () -> get_uuid_from_ref (x()).API.vM_parent) (); - make_field ~name:"children" - ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.vM_children)) (); - make_field ~name:"is-control-domain" - ~get:(fun () -> string_of_bool (x ()).API.vM_is_control_domain) (); - make_field ~name:"power-state" - ~get:(fun () -> Record_util.power_to_string (x ()).API.vM_power_state) (); - make_field ~name:"memory-actual" - ~get:(fun () -> default nid (may (fun m -> Int64.to_string m.API.vM_metrics_memory_actual) (xm ()) )) (); - make_field ~name:"memory-target" ~expensive:true - ~get:(fun () -> get_memory_target ()) (); - make_field ~name:"memory-overhead" - ~get:(fun () -> Int64.to_string (x ()).API.vM_memory_overhead) (); - make_field ~name:"memory-static-max" - ~get:(fun () -> Int64.to_string (x ()).API.vM_memory_static_max) - ~set:(fun x -> Client.VM.set_memory_static_max rpc session_id vm (Record_util.bytes_of_string "memory-static-max" x)) (); - make_field ~name:"memory-dynamic-max" - ~get:(fun () -> Int64.to_string (x ()).API.vM_memory_dynamic_max) - ~set:(fun x -> Client.VM.set_memory_dynamic_max rpc session_id vm (Record_util.bytes_of_string "memory-dynamic-max" x)) (); - make_field ~name:"memory-dynamic-min" - ~get:(fun () -> Int64.to_string (x ()).API.vM_memory_dynamic_min) - ~set:(fun x -> Client.VM.set_memory_dynamic_min rpc session_id vm (Record_util.bytes_of_string "memory-dynamic-min" x)) (); - make_field ~name:"memory-static-min" - ~get:(fun () -> Int64.to_string (x ()).API.vM_memory_static_min) - ~set:(fun x -> Client.VM.set_memory_static_min rpc session_id vm (Record_util.bytes_of_string "memory-static-min" x)) (); - make_field ~name:"suspend-VDI-uuid" - ~get:(fun () -> get_uuid_from_ref (x ()).API.vM_suspend_VDI) - ~set:(fun x -> Client.VM.set_suspend_VDI rpc session_id vm (Client.VDI.get_by_uuid rpc session_id x)) (); - make_field ~name:"suspend-SR-uuid" - ~get:(fun () -> get_uuid_from_ref (x ()).API.vM_suspend_SR) - ~set:(fun x -> Client.VM.set_suspend_SR rpc session_id vm (Client.SR.get_by_uuid rpc session_id x)) (); - make_field ~name:"VCPUs-params" - ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vM_VCPUs_params) - ~add_to_map:(fun k v -> match k with - | "weight" | "cap" | "mask" -> Client.VM.add_to_VCPUs_params rpc session_id vm k v - | _ -> raise (Record_util.Record_failure ("Failed to add parameter '"^k^"': expecting 'weight','cap' or 'mask'"))) - ~remove_from_map:(fun k -> Client.VM.remove_from_VCPUs_params rpc session_id vm k) - ~get_map:(fun () -> (x ()).API.vM_VCPUs_params) (); - make_field ~name:"VCPUs-max" - ~get:(fun () -> Int64.to_string (x ()).API.vM_VCPUs_max) - ~set:(fun x -> Client.VM.set_VCPUs_max rpc session_id vm (safe_i64_of_string "VCPUs-max" x)) (); - make_field ~name:"VCPUs-at-startup" - ~get:(fun () -> Int64.to_string (x ()).API.vM_VCPUs_at_startup) - ~set:(fun x -> Client.VM.set_VCPUs_at_startup rpc session_id vm (safe_i64_of_string "VCPUs-at-startup" x)) (); - make_field ~name:"actions-after-shutdown" - ~get:(fun () -> Record_util.on_normal_exit_to_string (x ()).API.vM_actions_after_shutdown) - ~set:(fun x -> Client.VM.set_actions_after_shutdown rpc session_id vm (Record_util.string_to_on_normal_exit x)) (); - make_field ~name:"actions-after-reboot" - ~get:(fun () -> Record_util.on_normal_exit_to_string (x ()).API.vM_actions_after_reboot) - ~set:(fun x -> Client.VM.set_actions_after_reboot rpc session_id vm (Record_util.string_to_on_normal_exit x)) (); - make_field ~name:"actions-after-crash" - ~get:(fun () -> Record_util.on_crash_behaviour_to_string (x ()).API.vM_actions_after_crash) - ~set:(fun x -> Client.VM.set_actions_after_crash rpc session_id vm (Record_util.string_to_on_crash_behaviour x)) (); - make_field ~name:"console-uuids" - ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.vM_consoles)) - ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.vM_consoles) (); - make_field ~name:"hvm" - ~get:(fun () -> default "false" (may (fun m -> - string_of_bool m.API.vM_metrics_hvm) (xm ()) )) (); - make_field ~name:"nomigrate" - ~get:(fun () -> default "false" (may (fun m -> - string_of_bool m.API.vM_metrics_nomigrate) (xm ()) )) (); - make_field ~name:"nested-virt" - ~get:(fun () -> default "false" (may (fun m -> - string_of_bool m.API.vM_metrics_nested_virt) (xm ()) )) (); - make_field ~name:"platform" - ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vM_platform) - ~add_to_map:(fun k v -> Client.VM.add_to_platform rpc session_id vm k v) - ~remove_from_map:(fun k -> Client.VM.remove_from_platform rpc session_id vm k) - ~get_map:(fun () -> (x ()).API.vM_platform) (); - make_field ~name:"allowed-operations" - ~get:(fun () -> String.concat "; " (List.map Record_util.vm_operation_to_string (x ()).API.vM_allowed_operations)) - ~get_set:(fun () -> List.map Record_util.vm_operation_to_string (x ()).API.vM_allowed_operations) (); - make_field ~name:"current-operations" - ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.vm_operation_to_string b) (x ()).API.vM_current_operations)) - ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.vm_operation_to_string b) (x ()).API.vM_current_operations) (); - make_field ~name:"blocked-operations" - ~get:(fun () -> Record_util.s2sm_to_string "; " (List.map (fun (k, v) -> Record_util.vm_operation_to_string k, v) ((x ()).API.vM_blocked_operations))) - ~add_to_map:(fun k v -> Client.VM.add_to_blocked_operations rpc session_id vm (Record_util.string_to_vm_operation k) v) - ~remove_from_map:(fun k -> Client.VM.remove_from_blocked_operations rpc session_id vm (Record_util.string_to_vm_operation k)) - ~get_map:(fun () -> List.map (fun (k, v) -> Record_util.vm_operation_to_string k, v) ((x ()).API.vM_blocked_operations)) (); - (* These two don't work on Dom-0 at the moment, so catch the exception *) - make_field ~name:"allowed-VBD-devices" - ~get:(fun () -> String.concat "; " (try Client.VM.get_allowed_VBD_devices rpc session_id vm with _ -> [])) ~expensive:true - ~get_set:(fun () -> try Client.VM.get_allowed_VBD_devices rpc session_id vm with _ -> []) (); - make_field ~name:"allowed-VIF-devices" - ~get:(fun () -> String.concat "; " (try Client.VM.get_allowed_VIF_devices rpc session_id vm with _ -> [])) ~expensive:true - ~get_set:(fun () -> try Client.VM.get_allowed_VIF_devices rpc session_id vm with _ -> []) (); - make_field ~name:"possible-hosts" - ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (Client.VM.get_possible_hosts rpc session_id vm))) ~expensive:true (); - make_field ~name:"HVM-boot-policy" - ~get:(fun () -> (x ()).API.vM_HVM_boot_policy) - ~set:(fun x -> Client.VM.set_HVM_boot_policy rpc session_id vm x) (); - make_field ~name:"HVM-boot-params" - ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vM_HVM_boot_params) - ~add_to_map:(fun k v -> Client.VM.add_to_HVM_boot_params rpc session_id vm k v) - ~remove_from_map:(fun k -> Client.VM.remove_from_HVM_boot_params rpc session_id vm k) - ~get_map:(fun () -> (x ()).API.vM_HVM_boot_params) (); - make_field ~name:"HVM-shadow-multiplier" - ~get:(fun () -> string_of_float (x ()).API.vM_HVM_shadow_multiplier) - ~set:(fun x -> Client.VM.set_HVM_shadow_multiplier rpc session_id vm (float_of_string x)) (); - make_field ~name:"PV-kernel" - ~get:(fun () -> (x ()).API.vM_PV_kernel) - ~set:(fun x -> Client.VM.set_PV_kernel rpc session_id vm x) (); - make_field ~name:"PV-ramdisk" - ~get:(fun () -> (x ()).API.vM_PV_ramdisk) - ~set:(fun x -> Client.VM.set_PV_ramdisk rpc session_id vm x) (); - make_field ~name:"PV-args" - ~get:(fun () -> (x ()).API.vM_PV_args) - ~set:(fun x -> Client.VM.set_PV_args rpc session_id vm x) (); - make_field ~name:"PV-legacy-args" - ~get:(fun () -> (x ()).API.vM_PV_legacy_args) - ~set:(fun x -> Client.VM.set_PV_legacy_args rpc session_id vm x) (); - make_field ~name:"PV-bootloader" - ~get:(fun () -> (x ()).API.vM_PV_bootloader) - ~set:(fun x -> Client.VM.set_PV_bootloader rpc session_id vm x) (); - make_field ~name:"PV-bootloader-args" - ~get:(fun () -> (x ()).API.vM_PV_bootloader_args) - ~set:(fun x -> Client.VM.set_PV_bootloader_args rpc session_id vm x) (); - make_field ~name:"last-boot-CPU-flags" - ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vM_last_boot_CPU_flags) (); - make_field ~name:"last-boot-record" ~expensive:true - ~get:(fun () -> "'" ^ ((x ()).API.vM_last_booted_record) ^ "'") (); - make_field ~name:"resident-on" - ~get:(fun () -> get_uuid_from_ref (x ()).API.vM_resident_on) (); - make_field ~name:"affinity" - ~get:(fun () -> get_uuid_from_ref (x ()).API.vM_affinity) - ~set:(fun x -> if x="" then Client.VM.set_affinity rpc session_id vm Ref.null else Client.VM.set_affinity rpc session_id vm (Client.Host.get_by_uuid rpc session_id x)) (); - make_field ~name:"other-config" - ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vM_other_config) - ~add_to_map:(fun k v -> Client.VM.add_to_other_config rpc session_id vm k v) - ~remove_from_map:(fun k -> Client.VM.remove_from_other_config rpc session_id vm k) - ~get_map:(fun () -> (x ()).API.vM_other_config) (); - make_field ~name:"dom-id" - ~get:(fun () -> Int64.to_string (x ()).API.vM_domid) (); - make_field ~name:"recommendations" - ~get:(fun () -> (x ()).API.vM_recommendations) (); - make_field ~name:"xenstore-data" - ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vM_xenstore_data) - ~add_to_map:(fun k v -> Client.VM.add_to_xenstore_data rpc session_id vm k v) - ~remove_from_map:(fun k -> Client.VM.remove_from_xenstore_data rpc session_id vm k) - ~get_map:(fun () -> (x ()).API.vM_xenstore_data) (); - make_field ~name:"ha-always-run" ~deprecated:true - ~get:(fun () -> string_of_bool ((x ()).API.vM_ha_always_run)) - ~set:(fun x -> Client.VM.set_ha_always_run rpc session_id vm (bool_of_string x)) (); - make_field ~name:"ha-restart-priority" - ~get:(fun () -> (x ()).API.vM_ha_restart_priority) - ~set:(fun x -> Client.VM.set_ha_restart_priority rpc session_id vm x) (); - make_field ~name:"blobs" - ~get:(fun () -> Record_util.s2brm_to_string get_uuid_from_ref "; " (x ()).API.vM_blobs) (); - make_field ~name:"start-time" - ~get:(fun () -> default unknown_time (may (fun m -> Date.to_string m.API.vM_metrics_start_time) (xm ()) )) (); - make_field ~name:"install-time" - ~get:(fun () -> default unknown_time (may (fun m -> Date.to_string m.API.vM_metrics_install_time) (xm ()) )) (); - make_field ~name:"VCPUs-number" - ~get:(fun () -> default nid (may (fun m -> Int64.to_string m.API.vM_metrics_VCPUs_number) (xm ()) )) (); - make_field ~name:"VCPUs-utilisation" - ~get:(fun () -> try let info = get_vcpus_utilisation () in String.concat "; " (List.map (fun (a,b) -> Printf.sprintf "%s: %s" a b) info) with _ -> "") - ~get_map:(fun () -> try get_vcpus_utilisation () with _ -> []) ~expensive:true (); - make_field ~name:"os-version" - ~get:(fun () -> default nid (may (fun m -> Record_util.s2sm_to_string "; " m.API.vM_guest_metrics_os_version) (xgm ()))) - ~get_map:(fun () -> default [] (may (fun m -> m.API.vM_guest_metrics_os_version) (xgm ()))) (); - make_field ~name:"PV-drivers-version" - ~get:(fun () -> default nid (may (fun m -> Record_util.s2sm_to_string "; " m.API.vM_guest_metrics_PV_drivers_version) (xgm ()) )) - ~get_map:(fun () -> default [] (may (fun m -> m.API.vM_guest_metrics_PV_drivers_version) (xgm ()))) (); - make_field ~name:"PV-drivers-up-to-date" - ~get:(fun () -> default nid (may (fun m -> string_of_bool m.API.vM_guest_metrics_PV_drivers_up_to_date) (xgm ()) )) (); - make_field ~name:"memory" - ~get:(fun () -> default nid (may (fun m -> Record_util.s2sm_to_string "; " m.API.vM_guest_metrics_memory) (xgm ()))) - ~get_map:(fun () -> default [] (may (fun m -> m.API.vM_guest_metrics_memory) (xgm ()))) (); - make_field ~name:"disks" - ~get:(fun () -> default nid (may (fun m -> Record_util.s2sm_to_string "; " m.API.vM_guest_metrics_disks) (xgm ()) )) - ~get_map:(fun () -> default [] (may (fun m -> m.API.vM_guest_metrics_disks) (xgm ()))) (); - make_field ~name:"networks" - ~get:(fun () -> default nid (may (fun m -> Record_util.s2sm_to_string "; " m.API.vM_guest_metrics_networks) (xgm ()) )) - ~get_map:(fun () -> default [] (may (fun m -> m.API.vM_guest_metrics_networks) (xgm ()))) (); - make_field ~name:"PV-drivers-detected" - ~get:(fun () -> default nid (may (fun m -> string_of_bool m.API.vM_guest_metrics_PV_drivers_detected) (xgm ()) )) (); - make_field ~name:"other" - ~get:(fun () -> default nid (may (fun m -> Record_util.s2sm_to_string "; " m.API.vM_guest_metrics_other) (xgm ()) )) - ~get_map:(fun () -> default [] (may (fun m -> m.API.vM_guest_metrics_other) (xgm()))) (); - make_field ~name:"live" - ~get:(fun () -> default nid (may (fun m -> string_of_bool m.API.vM_guest_metrics_live) (xgm ()) )) (); - make_field ~name:"guest-metrics-last-updated" - ~get:(fun () -> default nid (may (fun m -> Date.to_string m.API.vM_guest_metrics_last_updated) (xgm ()) )) (); - make_field ~name:"can-use-hotplug-vbd" - ~get:(fun () -> default nid (may (fun m -> Record_util.tristate_to_string m.API.vM_guest_metrics_can_use_hotplug_vbd) (xgm ()) )) (); - make_field ~name:"can-use-hotplug-vif" - ~get:(fun () -> default nid (may (fun m -> Record_util.tristate_to_string m.API.vM_guest_metrics_can_use_hotplug_vif) (xgm ()) )) (); - make_field ~name:"cooperative" - (* NB this can receive VM_IS_SNAPSHOT *) - ~get:(fun () -> string_of_bool (try Client.VM.get_cooperative rpc session_id vm with _ -> true)) - ~expensive:true ~deprecated:true (); - make_field ~name:"tags" - ~get:(fun () -> String.concat ", " (x ()).API.vM_tags) - ~get_set:(fun () -> (x ()).API.vM_tags) - ~add_to_set:(fun tag -> Client.VM.add_tags rpc session_id vm tag) - ~remove_from_set:(fun tag -> Client.VM.remove_tags rpc session_id vm tag) (); - make_field ~name:"appliance" - ~get:(fun () -> get_uuid_from_ref (x ()).API.vM_appliance) - ~set:(fun x -> if x="" then Client.VM.set_appliance rpc session_id vm Ref.null else Client.VM.set_appliance rpc session_id vm (Client.VM_appliance.get_by_uuid rpc session_id x)) (); - make_field ~name:"start-delay" - ~get:(fun () -> Int64.to_string (x ()).API.vM_start_delay) - ~set:(fun x -> Client.VM.set_start_delay rpc session_id vm (safe_i64_of_string "start-delay" x)) (); - make_field ~name:"shutdown-delay" - ~get:(fun () -> Int64.to_string (x ()).API.vM_shutdown_delay) - ~set:(fun x -> Client.VM.set_shutdown_delay rpc session_id vm (safe_i64_of_string "shutdown-delay" x)) (); - make_field ~name:"order" - ~get:(fun () -> Int64.to_string (x ()).API.vM_order) - ~set:(fun x -> Client.VM.set_order rpc session_id vm (safe_i64_of_string "order" x)) (); - make_field ~name:"version" - ~get:(fun () -> Int64.to_string (x ()).API.vM_version) (); - make_field ~name:"generation-id" - ~get:(fun () -> (x ()).API.vM_generation_id) (); - make_field ~name:"hardware-platform-version" - ~get:(fun () -> Int64.to_string (x ()).API.vM_hardware_platform_version) (); - make_field ~name:"has-vendor-device" - ~get:(fun () -> string_of_bool (x ()).API.vM_has_vendor_device) - ~set:(fun x -> Client.VM.set_has_vendor_device rpc session_id vm (safe_bool_of_string "has-vendor-device" x)) (); - make_field ~name:"requires-reboot" - ~get:(fun () -> string_of_bool (x ()).API.vM_requires_reboot) (); - ]} - -let host_crashdump_record rpc session_id host = + let _ref = ref vm in + let empty_record = ToGet (fun () -> Client.VM.get_record rpc session_id !_ref) in + let record = ref empty_record in + let x () = lzy_get record in + let empty_metrics = ToGet (fun () -> try Some (Client.VM_metrics.get_record rpc session_id (x ()).API.vM_metrics) with _ -> None) in + let metrics = ref empty_metrics in + let xm () = lzy_get metrics in + let empty_guest_metrics = ToGet (fun () -> try Some (Client.VM_guest_metrics.get_record rpc session_id (x ()).API.vM_guest_metrics) with _ -> None) in + let guest_metrics = ref empty_guest_metrics in + let get_vcpus_utilisation () = + let nvcpus = default 0 (may (fun m -> Int64.to_int m.API.vM_metrics_VCPUs_number) (xm ())) in + let rec inner n = + if n=nvcpus then [] else + (string_of_int n,string_of_float (Client.VM.query_data_source rpc session_id !_ref (Printf.sprintf "cpu%d" n)))::(inner (n+1)) + in + inner 0 + in + let get_memory_target () = + try + Int64.to_string ( + try + Int64.of_float ( + Client.VM.query_data_source + rpc session_id !_ref "memory_target" + ) + with Api_errors.Server_error (code, _) + when code = Api_errors.vm_bad_power_state -> 0L + ) + with _ -> "" + in + let xgm () = lzy_get guest_metrics in + { + setref = (fun r -> _ref := r; record := empty_record ); + setrefrec = (fun (a,b) -> _ref := a; record := Got b); + record = x; + getref = (fun () -> !_ref); + fields = [ + make_field ~name:"uuid" + ~get:(fun () -> (x ()).API.vM_uuid) (); + make_field ~name:"name-label" + ~get:(fun () -> (x ()).API.vM_name_label) + ~set:(fun x -> Client.VM.set_name_label rpc session_id vm x) (); + make_field ~name:"name-description" + ~get:(fun () -> (x ()).API.vM_name_description) + ~set:(fun x -> Client.VM.set_name_description rpc session_id vm x) (); + make_field ~name:"user-version" + ~get:(fun () -> Int64.to_string (x ()).API.vM_user_version) + ~set:(fun x -> Client.VM.set_user_version rpc session_id vm (safe_i64_of_string "user-version" x)) (); + make_field ~name:"is-a-template" + ~get:(fun () -> string_of_bool (x ()).API.vM_is_a_template) + ~set:(fun x -> Client.VM.set_is_a_template rpc session_id vm (safe_bool_of_string "is-a-template" x)) (); + make_field ~name:"is-a-snapshot" + ~get:(fun () -> string_of_bool (x ()).API.vM_is_a_snapshot) (); + make_field ~name:"snapshot-of" + ~get:(fun () -> get_uuid_from_ref (x ()).API.vM_snapshot_of) (); + make_field ~name:"snapshots" + ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.vM_snapshots)) (); + make_field ~name:"snapshot-time" + ~get:(fun () -> Date.to_string (x ()).API.vM_snapshot_time) (); + make_field ~name:"transportable-snapshot-id" ~hidden:true + ~get:(fun () -> (x()).API.vM_transportable_snapshot_id) (); + make_field ~name:"snapshot-info" + ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vM_snapshot_info) (); + make_field ~name:"parent" + ~get:(fun () -> get_uuid_from_ref (x()).API.vM_parent) (); + make_field ~name:"children" + ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.vM_children)) (); + make_field ~name:"is-control-domain" + ~get:(fun () -> string_of_bool (x ()).API.vM_is_control_domain) (); + make_field ~name:"power-state" + ~get:(fun () -> Record_util.power_to_string (x ()).API.vM_power_state) (); + make_field ~name:"memory-actual" + ~get:(fun () -> default nid (may (fun m -> Int64.to_string m.API.vM_metrics_memory_actual) (xm ()) )) (); + make_field ~name:"memory-target" ~expensive:true + ~get:(fun () -> get_memory_target ()) (); + make_field ~name:"memory-overhead" + ~get:(fun () -> Int64.to_string (x ()).API.vM_memory_overhead) (); + make_field ~name:"memory-static-max" + ~get:(fun () -> Int64.to_string (x ()).API.vM_memory_static_max) + ~set:(fun x -> Client.VM.set_memory_static_max rpc session_id vm (Record_util.bytes_of_string "memory-static-max" x)) (); + make_field ~name:"memory-dynamic-max" + ~get:(fun () -> Int64.to_string (x ()).API.vM_memory_dynamic_max) + ~set:(fun x -> Client.VM.set_memory_dynamic_max rpc session_id vm (Record_util.bytes_of_string "memory-dynamic-max" x)) (); + make_field ~name:"memory-dynamic-min" + ~get:(fun () -> Int64.to_string (x ()).API.vM_memory_dynamic_min) + ~set:(fun x -> Client.VM.set_memory_dynamic_min rpc session_id vm (Record_util.bytes_of_string "memory-dynamic-min" x)) (); + make_field ~name:"memory-static-min" + ~get:(fun () -> Int64.to_string (x ()).API.vM_memory_static_min) + ~set:(fun x -> Client.VM.set_memory_static_min rpc session_id vm (Record_util.bytes_of_string "memory-static-min" x)) (); + make_field ~name:"suspend-VDI-uuid" + ~get:(fun () -> get_uuid_from_ref (x ()).API.vM_suspend_VDI) + ~set:(fun x -> Client.VM.set_suspend_VDI rpc session_id vm (Client.VDI.get_by_uuid rpc session_id x)) (); + make_field ~name:"suspend-SR-uuid" + ~get:(fun () -> get_uuid_from_ref (x ()).API.vM_suspend_SR) + ~set:(fun x -> Client.VM.set_suspend_SR rpc session_id vm (Client.SR.get_by_uuid rpc session_id x)) (); + make_field ~name:"VCPUs-params" + ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vM_VCPUs_params) + ~add_to_map:(fun k v -> match k with + | "weight" | "cap" | "mask" -> Client.VM.add_to_VCPUs_params rpc session_id vm k v + | _ -> raise (Record_util.Record_failure ("Failed to add parameter '"^k^"': expecting 'weight','cap' or 'mask'"))) + ~remove_from_map:(fun k -> Client.VM.remove_from_VCPUs_params rpc session_id vm k) + ~get_map:(fun () -> (x ()).API.vM_VCPUs_params) (); + make_field ~name:"VCPUs-max" + ~get:(fun () -> Int64.to_string (x ()).API.vM_VCPUs_max) + ~set:(fun x -> Client.VM.set_VCPUs_max rpc session_id vm (safe_i64_of_string "VCPUs-max" x)) (); + make_field ~name:"VCPUs-at-startup" + ~get:(fun () -> Int64.to_string (x ()).API.vM_VCPUs_at_startup) + ~set:(fun x -> Client.VM.set_VCPUs_at_startup rpc session_id vm (safe_i64_of_string "VCPUs-at-startup" x)) (); + make_field ~name:"actions-after-shutdown" + ~get:(fun () -> Record_util.on_normal_exit_to_string (x ()).API.vM_actions_after_shutdown) + ~set:(fun x -> Client.VM.set_actions_after_shutdown rpc session_id vm (Record_util.string_to_on_normal_exit x)) (); + make_field ~name:"actions-after-reboot" + ~get:(fun () -> Record_util.on_normal_exit_to_string (x ()).API.vM_actions_after_reboot) + ~set:(fun x -> Client.VM.set_actions_after_reboot rpc session_id vm (Record_util.string_to_on_normal_exit x)) (); + make_field ~name:"actions-after-crash" + ~get:(fun () -> Record_util.on_crash_behaviour_to_string (x ()).API.vM_actions_after_crash) + ~set:(fun x -> Client.VM.set_actions_after_crash rpc session_id vm (Record_util.string_to_on_crash_behaviour x)) (); + make_field ~name:"console-uuids" + ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.vM_consoles)) + ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.vM_consoles) (); + make_field ~name:"hvm" + ~get:(fun () -> default "false" (may (fun m -> + string_of_bool m.API.vM_metrics_hvm) (xm ()) )) (); + make_field ~name:"nomigrate" + ~get:(fun () -> default "false" (may (fun m -> + string_of_bool m.API.vM_metrics_nomigrate) (xm ()) )) (); + make_field ~name:"nested-virt" + ~get:(fun () -> default "false" (may (fun m -> + string_of_bool m.API.vM_metrics_nested_virt) (xm ()) )) (); + make_field ~name:"platform" + ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vM_platform) + ~add_to_map:(fun k v -> Client.VM.add_to_platform rpc session_id vm k v) + ~remove_from_map:(fun k -> Client.VM.remove_from_platform rpc session_id vm k) + ~get_map:(fun () -> (x ()).API.vM_platform) (); + make_field ~name:"allowed-operations" + ~get:(fun () -> String.concat "; " (List.map Record_util.vm_operation_to_string (x ()).API.vM_allowed_operations)) + ~get_set:(fun () -> List.map Record_util.vm_operation_to_string (x ()).API.vM_allowed_operations) (); + make_field ~name:"current-operations" + ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.vm_operation_to_string b) (x ()).API.vM_current_operations)) + ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.vm_operation_to_string b) (x ()).API.vM_current_operations) (); + make_field ~name:"blocked-operations" + ~get:(fun () -> Record_util.s2sm_to_string "; " (List.map (fun (k, v) -> Record_util.vm_operation_to_string k, v) ((x ()).API.vM_blocked_operations))) + ~add_to_map:(fun k v -> Client.VM.add_to_blocked_operations rpc session_id vm (Record_util.string_to_vm_operation k) v) + ~remove_from_map:(fun k -> Client.VM.remove_from_blocked_operations rpc session_id vm (Record_util.string_to_vm_operation k)) + ~get_map:(fun () -> List.map (fun (k, v) -> Record_util.vm_operation_to_string k, v) ((x ()).API.vM_blocked_operations)) (); + (* These two don't work on Dom-0 at the moment, so catch the exception *) + make_field ~name:"allowed-VBD-devices" + ~get:(fun () -> String.concat "; " (try Client.VM.get_allowed_VBD_devices rpc session_id vm with _ -> [])) ~expensive:true + ~get_set:(fun () -> try Client.VM.get_allowed_VBD_devices rpc session_id vm with _ -> []) (); + make_field ~name:"allowed-VIF-devices" + ~get:(fun () -> String.concat "; " (try Client.VM.get_allowed_VIF_devices rpc session_id vm with _ -> [])) ~expensive:true + ~get_set:(fun () -> try Client.VM.get_allowed_VIF_devices rpc session_id vm with _ -> []) (); + make_field ~name:"possible-hosts" + ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (Client.VM.get_possible_hosts rpc session_id vm))) ~expensive:true (); + make_field ~name:"HVM-boot-policy" + ~get:(fun () -> (x ()).API.vM_HVM_boot_policy) + ~set:(fun x -> Client.VM.set_HVM_boot_policy rpc session_id vm x) (); + make_field ~name:"HVM-boot-params" + ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vM_HVM_boot_params) + ~add_to_map:(fun k v -> Client.VM.add_to_HVM_boot_params rpc session_id vm k v) + ~remove_from_map:(fun k -> Client.VM.remove_from_HVM_boot_params rpc session_id vm k) + ~get_map:(fun () -> (x ()).API.vM_HVM_boot_params) (); + make_field ~name:"HVM-shadow-multiplier" + ~get:(fun () -> string_of_float (x ()).API.vM_HVM_shadow_multiplier) + ~set:(fun x -> Client.VM.set_HVM_shadow_multiplier rpc session_id vm (float_of_string x)) (); + make_field ~name:"PV-kernel" + ~get:(fun () -> (x ()).API.vM_PV_kernel) + ~set:(fun x -> Client.VM.set_PV_kernel rpc session_id vm x) (); + make_field ~name:"PV-ramdisk" + ~get:(fun () -> (x ()).API.vM_PV_ramdisk) + ~set:(fun x -> Client.VM.set_PV_ramdisk rpc session_id vm x) (); + make_field ~name:"PV-args" + ~get:(fun () -> (x ()).API.vM_PV_args) + ~set:(fun x -> Client.VM.set_PV_args rpc session_id vm x) (); + make_field ~name:"PV-legacy-args" + ~get:(fun () -> (x ()).API.vM_PV_legacy_args) + ~set:(fun x -> Client.VM.set_PV_legacy_args rpc session_id vm x) (); + make_field ~name:"PV-bootloader" + ~get:(fun () -> (x ()).API.vM_PV_bootloader) + ~set:(fun x -> Client.VM.set_PV_bootloader rpc session_id vm x) (); + make_field ~name:"PV-bootloader-args" + ~get:(fun () -> (x ()).API.vM_PV_bootloader_args) + ~set:(fun x -> Client.VM.set_PV_bootloader_args rpc session_id vm x) (); + make_field ~name:"last-boot-CPU-flags" + ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vM_last_boot_CPU_flags) (); + make_field ~name:"last-boot-record" ~expensive:true + ~get:(fun () -> "'" ^ ((x ()).API.vM_last_booted_record) ^ "'") (); + make_field ~name:"resident-on" + ~get:(fun () -> get_uuid_from_ref (x ()).API.vM_resident_on) (); + make_field ~name:"affinity" + ~get:(fun () -> get_uuid_from_ref (x ()).API.vM_affinity) + ~set:(fun x -> if x="" then Client.VM.set_affinity rpc session_id vm Ref.null else Client.VM.set_affinity rpc session_id vm (Client.Host.get_by_uuid rpc session_id x)) (); + make_field ~name:"other-config" + ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vM_other_config) + ~add_to_map:(fun k v -> Client.VM.add_to_other_config rpc session_id vm k v) + ~remove_from_map:(fun k -> Client.VM.remove_from_other_config rpc session_id vm k) + ~get_map:(fun () -> (x ()).API.vM_other_config) (); + make_field ~name:"dom-id" + ~get:(fun () -> Int64.to_string (x ()).API.vM_domid) (); + make_field ~name:"recommendations" + ~get:(fun () -> (x ()).API.vM_recommendations) (); + make_field ~name:"xenstore-data" + ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vM_xenstore_data) + ~add_to_map:(fun k v -> Client.VM.add_to_xenstore_data rpc session_id vm k v) + ~remove_from_map:(fun k -> Client.VM.remove_from_xenstore_data rpc session_id vm k) + ~get_map:(fun () -> (x ()).API.vM_xenstore_data) (); + make_field ~name:"ha-always-run" ~deprecated:true + ~get:(fun () -> string_of_bool ((x ()).API.vM_ha_always_run)) + ~set:(fun x -> Client.VM.set_ha_always_run rpc session_id vm (bool_of_string x)) (); + make_field ~name:"ha-restart-priority" + ~get:(fun () -> (x ()).API.vM_ha_restart_priority) + ~set:(fun x -> Client.VM.set_ha_restart_priority rpc session_id vm x) (); + make_field ~name:"blobs" + ~get:(fun () -> Record_util.s2brm_to_string get_uuid_from_ref "; " (x ()).API.vM_blobs) (); + make_field ~name:"start-time" + ~get:(fun () -> default unknown_time (may (fun m -> Date.to_string m.API.vM_metrics_start_time) (xm ()) )) (); + make_field ~name:"install-time" + ~get:(fun () -> default unknown_time (may (fun m -> Date.to_string m.API.vM_metrics_install_time) (xm ()) )) (); + make_field ~name:"VCPUs-number" + ~get:(fun () -> default nid (may (fun m -> Int64.to_string m.API.vM_metrics_VCPUs_number) (xm ()) )) (); + make_field ~name:"VCPUs-utilisation" + ~get:(fun () -> try let info = get_vcpus_utilisation () in String.concat "; " (List.map (fun (a,b) -> Printf.sprintf "%s: %s" a b) info) with _ -> "") + ~get_map:(fun () -> try get_vcpus_utilisation () with _ -> []) ~expensive:true (); + make_field ~name:"os-version" + ~get:(fun () -> default nid (may (fun m -> Record_util.s2sm_to_string "; " m.API.vM_guest_metrics_os_version) (xgm ()))) + ~get_map:(fun () -> default [] (may (fun m -> m.API.vM_guest_metrics_os_version) (xgm ()))) (); + make_field ~name:"PV-drivers-version" + ~get:(fun () -> default nid (may (fun m -> Record_util.s2sm_to_string "; " m.API.vM_guest_metrics_PV_drivers_version) (xgm ()) )) + ~get_map:(fun () -> default [] (may (fun m -> m.API.vM_guest_metrics_PV_drivers_version) (xgm ()))) (); + make_field ~name:"PV-drivers-up-to-date" + ~get:(fun () -> default nid (may (fun m -> string_of_bool m.API.vM_guest_metrics_PV_drivers_up_to_date) (xgm ()) )) (); + make_field ~name:"memory" + ~get:(fun () -> default nid (may (fun m -> Record_util.s2sm_to_string "; " m.API.vM_guest_metrics_memory) (xgm ()))) + ~get_map:(fun () -> default [] (may (fun m -> m.API.vM_guest_metrics_memory) (xgm ()))) (); + make_field ~name:"disks" + ~get:(fun () -> default nid (may (fun m -> Record_util.s2sm_to_string "; " m.API.vM_guest_metrics_disks) (xgm ()) )) + ~get_map:(fun () -> default [] (may (fun m -> m.API.vM_guest_metrics_disks) (xgm ()))) (); + make_field ~name:"networks" + ~get:(fun () -> default nid (may (fun m -> Record_util.s2sm_to_string "; " m.API.vM_guest_metrics_networks) (xgm ()) )) + ~get_map:(fun () -> default [] (may (fun m -> m.API.vM_guest_metrics_networks) (xgm ()))) (); + make_field ~name:"PV-drivers-detected" + ~get:(fun () -> default nid (may (fun m -> string_of_bool m.API.vM_guest_metrics_PV_drivers_detected) (xgm ()) )) (); + make_field ~name:"other" + ~get:(fun () -> default nid (may (fun m -> Record_util.s2sm_to_string "; " m.API.vM_guest_metrics_other) (xgm ()) )) + ~get_map:(fun () -> default [] (may (fun m -> m.API.vM_guest_metrics_other) (xgm()))) (); + make_field ~name:"live" + ~get:(fun () -> default nid (may (fun m -> string_of_bool m.API.vM_guest_metrics_live) (xgm ()) )) (); + make_field ~name:"guest-metrics-last-updated" + ~get:(fun () -> default nid (may (fun m -> Date.to_string m.API.vM_guest_metrics_last_updated) (xgm ()) )) (); + make_field ~name:"can-use-hotplug-vbd" + ~get:(fun () -> default nid (may (fun m -> Record_util.tristate_to_string m.API.vM_guest_metrics_can_use_hotplug_vbd) (xgm ()) )) (); + make_field ~name:"can-use-hotplug-vif" + ~get:(fun () -> default nid (may (fun m -> Record_util.tristate_to_string m.API.vM_guest_metrics_can_use_hotplug_vif) (xgm ()) )) (); + make_field ~name:"cooperative" + (* NB this can receive VM_IS_SNAPSHOT *) + ~get:(fun () -> string_of_bool (try Client.VM.get_cooperative rpc session_id vm with _ -> true)) + ~expensive:true ~deprecated:true (); + make_field ~name:"tags" + ~get:(fun () -> String.concat ", " (x ()).API.vM_tags) + ~get_set:(fun () -> (x ()).API.vM_tags) + ~add_to_set:(fun tag -> Client.VM.add_tags rpc session_id vm tag) + ~remove_from_set:(fun tag -> Client.VM.remove_tags rpc session_id vm tag) (); + make_field ~name:"appliance" + ~get:(fun () -> get_uuid_from_ref (x ()).API.vM_appliance) + ~set:(fun x -> if x="" then Client.VM.set_appliance rpc session_id vm Ref.null else Client.VM.set_appliance rpc session_id vm (Client.VM_appliance.get_by_uuid rpc session_id x)) (); + make_field ~name:"start-delay" + ~get:(fun () -> Int64.to_string (x ()).API.vM_start_delay) + ~set:(fun x -> Client.VM.set_start_delay rpc session_id vm (safe_i64_of_string "start-delay" x)) (); + make_field ~name:"shutdown-delay" + ~get:(fun () -> Int64.to_string (x ()).API.vM_shutdown_delay) + ~set:(fun x -> Client.VM.set_shutdown_delay rpc session_id vm (safe_i64_of_string "shutdown-delay" x)) (); + make_field ~name:"order" + ~get:(fun () -> Int64.to_string (x ()).API.vM_order) + ~set:(fun x -> Client.VM.set_order rpc session_id vm (safe_i64_of_string "order" x)) (); + make_field ~name:"version" + ~get:(fun () -> Int64.to_string (x ()).API.vM_version) (); + make_field ~name:"generation-id" + ~get:(fun () -> (x ()).API.vM_generation_id) (); + make_field ~name:"hardware-platform-version" + ~get:(fun () -> Int64.to_string (x ()).API.vM_hardware_platform_version) (); + make_field ~name:"has-vendor-device" + ~get:(fun () -> string_of_bool (x ()).API.vM_has_vendor_device) + ~set:(fun x -> Client.VM.set_has_vendor_device rpc session_id vm (safe_bool_of_string "has-vendor-device" x)) (); + make_field ~name:"requires-reboot" + ~get:(fun () -> string_of_bool (x ()).API.vM_requires_reboot) (); + ]} + +let host_crashdump_record rpc session_id host = let _ref = ref host in let empty_record = ToGet (fun () -> Client.Host_crashdump.get_record rpc session_id !_ref) in let record = ref empty_record in @@ -945,24 +945,24 @@ let host_crashdump_record rpc session_id host = setrefrec=(fun (a,b) -> _ref := a; record := Got b); record=x; getref=(fun () -> !_ref); - fields = - [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.host_crashdump_uuid) (); - make_field ~name:"host-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.host_crashdump_host) (); - make_field ~name:"timestamp" ~get:(fun () -> Date.to_string (x ()).API.host_crashdump_timestamp) (); - make_field ~name:"size" ~get:(fun () -> Int64.to_string (x ()).API.host_crashdump_size) (); - ]} - -let pool_patch_record rpc session_id patch = + fields = + [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.host_crashdump_uuid) (); + make_field ~name:"host-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.host_crashdump_host) (); + make_field ~name:"timestamp" ~get:(fun () -> Date.to_string (x ()).API.host_crashdump_timestamp) (); + make_field ~name:"size" ~get:(fun () -> Int64.to_string (x ()).API.host_crashdump_size) (); + ]} + +let pool_patch_record rpc session_id patch = let _ref = ref patch in let empty_record = ToGet (fun () -> Client.Pool_patch.get_record rpc session_id !_ref) in let record = ref empty_record in let x () = lzy_get record in - let get_hosts () = + let get_hosts () = let host_patch_refs = (x ()).API.pool_patch_host_patches in let host_refs = List.map (fun x -> Client.Host_patch.get_host ~rpc ~session_id ~self:x) host_patch_refs in let host_uuids = List.map (fun x -> Client.Host.get_uuid ~rpc ~session_id ~self:x) host_refs in - host_uuids + host_uuids in let after_apply_guidance_to_string = function | `restartHVM -> "restartHVM" @@ -970,28 +970,28 @@ let pool_patch_record rpc session_id patch = | `restartHost -> "restartHost" | `restartXAPI -> "restartXAPI" in - let after_apply_guidance_to_string_set = - List.map after_apply_guidance_to_string + let after_apply_guidance_to_string_set = + List.map after_apply_guidance_to_string in let after_apply_guidance () = after_apply_guidance_to_string_set (x ()).API.pool_patch_after_apply_guidance in - { setref=(fun r -> _ref := r; record := empty_record ); - setrefrec=(fun (a,b) -> _ref := a; record := Got b); - record=x; - getref=(fun () -> !_ref); - fields = - [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.pool_patch_uuid) (); - make_field ~name:"name-label" ~get:(fun () -> (x ()).API.pool_patch_name_label) (); - make_field ~name:"name-description" ~get:(fun () -> (x ()).API.pool_patch_name_description) (); - make_field ~name:"size" ~get:(fun () -> Int64.to_string (x ()).API.pool_patch_size) (); - make_field ~name:"hosts" ~get:(fun () -> String.concat ", " (get_hosts ())) ~get_set:get_hosts (); - make_field ~name:"after-apply-guidance" ~get:(fun () -> String.concat ", " (after_apply_guidance ())) ~get_set:after_apply_guidance (); - ]} - - -let host_cpu_record rpc session_id host_cpu = + { setref=(fun r -> _ref := r; record := empty_record ); + setrefrec=(fun (a,b) -> _ref := a; record := Got b); + record=x; + getref=(fun () -> !_ref); + fields = + [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.pool_patch_uuid) (); + make_field ~name:"name-label" ~get:(fun () -> (x ()).API.pool_patch_name_label) (); + make_field ~name:"name-description" ~get:(fun () -> (x ()).API.pool_patch_name_description) (); + make_field ~name:"size" ~get:(fun () -> Int64.to_string (x ()).API.pool_patch_size) (); + make_field ~name:"hosts" ~get:(fun () -> String.concat ", " (get_hosts ())) ~get_set:get_hosts (); + make_field ~name:"after-apply-guidance" ~get:(fun () -> String.concat ", " (after_apply_guidance ())) ~get_set:after_apply_guidance (); + ]} + + +let host_cpu_record rpc session_id host_cpu = let _ref = ref host_cpu in let empty_record = ToGet (fun () -> Client.Host_cpu.get_record rpc session_id !_ref) in let record = ref empty_record in @@ -1000,200 +1000,200 @@ let host_cpu_record rpc session_id host_cpu = setrefrec=(fun (a,b) -> _ref := a; record := Got b); record=x; getref=(fun () -> !_ref); - fields = - [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.host_cpu_uuid) (); - make_field ~name:"host-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.host_cpu_host) (); - make_field ~name:"number" ~get:(fun () -> Int64.to_string ((x ()).API.host_cpu_number)) (); - make_field ~name:"vendor" ~get:(fun () -> (x ()).API.host_cpu_vendor) (); - make_field ~name:"speed" ~get:(fun () -> Int64.to_string ((x ()).API.host_cpu_speed)) (); - make_field ~name:"model" ~get:(fun () -> Int64.to_string ((x ()).API.host_cpu_model)) (); - make_field ~name:"family" ~get:(fun () -> Int64.to_string ((x ()).API.host_cpu_family)) (); - make_field ~name:"modelname" ~get:(fun () -> (x ()).API.host_cpu_modelname) (); - make_field ~name:"stepping" ~get:(fun () -> (x ()).API.host_cpu_stepping) (); - make_field ~name:"flags" ~get:(fun () -> (x ()).API.host_cpu_flags) (); - make_field ~name:"utilisation" ~get:(fun () -> - try - string_of_float (Client.Host.query_data_source rpc session_id (x ()).API.host_cpu_host (Printf.sprintf "cpu%Ld" (x ()).API.host_cpu_number)) - with _ -> "") ~expensive:true (); - ]} + fields = + [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.host_cpu_uuid) (); + make_field ~name:"host-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.host_cpu_host) (); + make_field ~name:"number" ~get:(fun () -> Int64.to_string ((x ()).API.host_cpu_number)) (); + make_field ~name:"vendor" ~get:(fun () -> (x ()).API.host_cpu_vendor) (); + make_field ~name:"speed" ~get:(fun () -> Int64.to_string ((x ()).API.host_cpu_speed)) (); + make_field ~name:"model" ~get:(fun () -> Int64.to_string ((x ()).API.host_cpu_model)) (); + make_field ~name:"family" ~get:(fun () -> Int64.to_string ((x ()).API.host_cpu_family)) (); + make_field ~name:"modelname" ~get:(fun () -> (x ()).API.host_cpu_modelname) (); + make_field ~name:"stepping" ~get:(fun () -> (x ()).API.host_cpu_stepping) (); + make_field ~name:"flags" ~get:(fun () -> (x ()).API.host_cpu_flags) (); + make_field ~name:"utilisation" ~get:(fun () -> + try + string_of_float (Client.Host.query_data_source rpc session_id (x ()).API.host_cpu_host (Printf.sprintf "cpu%Ld" (x ()).API.host_cpu_number)) + with _ -> "") ~expensive:true (); + ]} let host_record rpc session_id host = - let _ref = ref host in - let empty_record = ToGet (fun () -> Client.Host.get_record rpc session_id !_ref) in - let record = ref empty_record in - let x () = lzy_get record in - let metrics = ref (ToGet (fun () -> try Some (Client.Host_metrics.get_record rpc session_id (x ()).API.host_metrics) with _ -> None)) in - let xm () = lzy_get metrics in - let get_patches () = - let host_patch_refs = (x ()).API.host_patches in - let patch_refs = List.map (fun x -> Client.Host_patch.get_pool_patch ~rpc ~session_id ~self:x) host_patch_refs in - let patch_uuids = List.map (fun x -> Client.Pool_patch.get_uuid ~rpc ~session_id ~self:x) patch_refs in - patch_uuids - in - { setref=(fun r -> _ref := r; record := empty_record ); - setrefrec=(fun (a,b) -> _ref := a; record := Got b); - record=x; - getref=(fun () -> !_ref); - fields = [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.host_uuid) (); - make_field ~name:"name-label" ~get:(fun () -> (x ()).API.host_name_label) ~set:(fun s -> Client.Host.set_name_label rpc session_id host s) (); - make_field ~name:"name-description" ~get:(fun () -> (x ()).API.host_name_description) ~set:(fun s -> Client.Host.set_name_description rpc session_id host s) (); - make_field ~name:"allowed-operations" - ~get:(fun () -> String.concat "; " (List.map Record_util.host_operation_to_string (x ()).API.host_allowed_operations)) - ~get_set:(fun () -> List.map Record_util.host_operation_to_string (x ()).API.host_allowed_operations) (); - make_field ~name:"current-operations" - ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.host_operation_to_string b) (x ()).API.host_current_operations)) - ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.host_operation_to_string b) (x ()).API.host_current_operations) (); - make_field ~name:"enabled" ~get:(fun () -> string_of_bool (x ()).API.host_enabled) (); - make_field ~name:"display" ~get:(fun () -> Record_util.host_display_to_string (x ()).API.host_display) (); - make_field ~name:"API-version-major" ~get:(fun () -> Int64.to_string (x ()).API.host_API_version_major) (); - make_field ~name:"API-version-minor" ~get:(fun () -> Int64.to_string (x ()).API.host_API_version_minor) (); - make_field ~name:"API-version-vendor" ~get:(fun () -> (x ()).API.host_API_version_vendor) (); - make_field ~name:"API-version-vendor-implementation" - ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_API_version_vendor_implementation) - ~get_map:(fun () -> (x ()).API.host_API_version_vendor_implementation) (); - make_field ~name:"logging" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_logging) - ~add_to_map:(fun k v -> Client.Host.add_to_logging rpc session_id host k v) - ~remove_from_map:(fun k -> Client.Host.remove_from_logging rpc session_id host k) - ~get_map:(fun () -> (x ()).API.host_logging) (); - make_field ~name:"suspend-image-sr-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.host_suspend_image_sr) - ~set:(fun s -> Client.Host.set_suspend_image_sr rpc session_id host (if s="" then Ref.null else Client.SR.get_by_uuid rpc session_id s)) (); - make_field ~name:"crash-dump-sr-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.host_crash_dump_sr) - ~set:(fun s -> Client.Host.set_crash_dump_sr rpc session_id host (if s="" then Ref.null else Client.SR.get_by_uuid rpc session_id s)) (); - make_field ~name:"software-version" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_software_version) - ~get_map:(fun () -> (x ()).API.host_software_version) (); - make_field ~name:"capabilities" ~get:(fun () -> String.concat "; " (x ()).API.host_capabilities) - ~get_set:(fun () -> (x ()).API.host_capabilities) (); - make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_other_config) - ~add_to_map:(fun k v -> Client.Host.add_to_other_config rpc session_id host k v) - ~remove_from_map:(fun k -> Client.Host.remove_from_other_config rpc session_id host k) - ~get_map:(fun () -> (x ()).API.host_other_config) (); - make_field ~name:"cpu_info" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_cpu_info) ~get_map:(fun () -> (x ()).API.host_cpu_info) (); - make_field ~name:"chipset-info" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_chipset_info) ~get_map:(fun () -> (x ()).API.host_chipset_info) (); - make_field ~name:"hostname" ~get:(fun () -> (x ()).API.host_hostname) (); - make_field ~name:"address" ~get:(fun () -> (x ()).API.host_address) (); - make_field ~name:"supported-bootloaders" ~get:(fun () -> String.concat "; " (x ()).API.host_supported_bootloaders) - ~get_set:(fun () -> (x ()).API.host_supported_bootloaders) (); - make_field ~name:"blobs" ~get:(fun () -> Record_util.s2brm_to_string get_uuid_from_ref "; " (x ()).API.host_blobs) (); - make_field ~name:"memory-overhead" ~get:(fun () -> Int64.to_string (x ()).API.host_memory_overhead) (); - make_field ~name:"memory-total" ~get:(fun () -> default nid (may (fun m -> Int64.to_string m.API.host_metrics_memory_total) (xm ()) )) (); - make_field ~name:"memory-free" ~get:(fun () -> default nid (may (fun m -> Int64.to_string m.API.host_metrics_memory_free) (xm ()) )) (); - make_field ~name:"memory-free-computed" ~expensive:true ~get:(fun () -> Int64.to_string (Client.Host.compute_free_memory rpc session_id host)) (); - make_field ~name:"host-metrics-live" ~get:(fun () -> default nid (may (fun m -> string_of_bool m.API.host_metrics_live) (xm ()) )) (); - make_field ~name:"patches" ~get:(fun () -> String.concat ", " (get_patches ())) ~get_set:get_patches (); - make_field ~name:"ha-statefiles" ~get:(fun () -> String.concat "; " (List.map (fun x -> get_uuid_from_ref (Ref.of_string x)) (x ()).API.host_ha_statefiles)) (); - make_field ~name:"ha-network-peers" ~get:(fun () -> String.concat "; " (List.map (fun x -> get_uuid_from_ref (Ref.of_string x)) (x ()).API.host_ha_network_peers)) (); - make_field ~name:"external-auth-type" ~get:(fun () -> (x ()).API.host_external_auth_type) (); - make_field ~name:"external-auth-service-name" ~get:(fun () -> (x ()).API.host_external_auth_service_name) (); - make_field ~name:"external-auth-configuration" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_external_auth_configuration) - ~get_map:(fun () -> (x ()).API.host_external_auth_configuration) (); - make_field ~name:"edition" ~get:(fun () -> (x ()).API.host_edition) (); - make_field ~name:"license-server" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_license_server) ~get_map:(fun () -> (x ()).API.host_license_server) (); - make_field ~name:"power-on-mode" ~get:(fun () -> (x ()).API.host_power_on_mode) (); - make_field ~name:"power-on-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_power_on_config) - ~get_map:(fun () -> (x ()).API.host_power_on_config) (); - make_field ~name:"local-cache-sr" ~get:(fun () -> get_uuid_from_ref (x ()).API.host_local_cache_sr) (); - make_field ~name:"tags" - ~get:(fun () -> String.concat ", " (x ()).API.host_tags) - ~get_set:(fun () -> (x ()).API.host_tags) - ~add_to_set:(fun tag -> Client.Host.add_tags rpc session_id host tag) - ~remove_from_set:(fun tag -> Client.Host.remove_tags rpc session_id host tag) (); - make_field ~name:"ssl-legacy" - ~get:(fun () -> string_of_bool (x ()).API.host_ssl_legacy) - ~set:(fun s -> Client.Host.set_ssl_legacy rpc session_id host (safe_bool_of_string "ssl-legacy" s)) (); - make_field ~name:"guest_VCPUs_params" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_guest_VCPUs_params) - ~get_map:(fun () -> (x ()).API.host_guest_VCPUs_params) - ~add_to_map:(fun k v -> Client.Host.add_to_guest_VCPUs_params rpc session_id host k v) - ~remove_from_map:(fun k -> Client.Host.remove_from_guest_VCPUs_params rpc session_id host k) (); - make_field ~name:"virtual-hardware-platform-versions" - ~get:(fun () -> String.concat "; " (List.map Int64.to_string (x ()).API.host_virtual_hardware_platform_versions)) - ~get_set:(fun () -> List.map Int64.to_string (x ()).API.host_virtual_hardware_platform_versions) (); - make_field ~name:"control-domain-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.host_control_domain) (); - make_field ~name:"resident-vms" - ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.host_resident_VMs)) - ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.host_resident_VMs) (); - make_field ~name:"patches-requiring-reboot" - ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.host_patches_requiring_reboot)) - ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.host_patches_requiring_reboot) (); - ]} + let _ref = ref host in + let empty_record = ToGet (fun () -> Client.Host.get_record rpc session_id !_ref) in + let record = ref empty_record in + let x () = lzy_get record in + let metrics = ref (ToGet (fun () -> try Some (Client.Host_metrics.get_record rpc session_id (x ()).API.host_metrics) with _ -> None)) in + let xm () = lzy_get metrics in + let get_patches () = + let host_patch_refs = (x ()).API.host_patches in + let patch_refs = List.map (fun x -> Client.Host_patch.get_pool_patch ~rpc ~session_id ~self:x) host_patch_refs in + let patch_uuids = List.map (fun x -> Client.Pool_patch.get_uuid ~rpc ~session_id ~self:x) patch_refs in + patch_uuids + in + { setref=(fun r -> _ref := r; record := empty_record ); + setrefrec=(fun (a,b) -> _ref := a; record := Got b); + record=x; + getref=(fun () -> !_ref); + fields = [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.host_uuid) (); + make_field ~name:"name-label" ~get:(fun () -> (x ()).API.host_name_label) ~set:(fun s -> Client.Host.set_name_label rpc session_id host s) (); + make_field ~name:"name-description" ~get:(fun () -> (x ()).API.host_name_description) ~set:(fun s -> Client.Host.set_name_description rpc session_id host s) (); + make_field ~name:"allowed-operations" + ~get:(fun () -> String.concat "; " (List.map Record_util.host_operation_to_string (x ()).API.host_allowed_operations)) + ~get_set:(fun () -> List.map Record_util.host_operation_to_string (x ()).API.host_allowed_operations) (); + make_field ~name:"current-operations" + ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.host_operation_to_string b) (x ()).API.host_current_operations)) + ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.host_operation_to_string b) (x ()).API.host_current_operations) (); + make_field ~name:"enabled" ~get:(fun () -> string_of_bool (x ()).API.host_enabled) (); + make_field ~name:"display" ~get:(fun () -> Record_util.host_display_to_string (x ()).API.host_display) (); + make_field ~name:"API-version-major" ~get:(fun () -> Int64.to_string (x ()).API.host_API_version_major) (); + make_field ~name:"API-version-minor" ~get:(fun () -> Int64.to_string (x ()).API.host_API_version_minor) (); + make_field ~name:"API-version-vendor" ~get:(fun () -> (x ()).API.host_API_version_vendor) (); + make_field ~name:"API-version-vendor-implementation" + ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_API_version_vendor_implementation) + ~get_map:(fun () -> (x ()).API.host_API_version_vendor_implementation) (); + make_field ~name:"logging" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_logging) + ~add_to_map:(fun k v -> Client.Host.add_to_logging rpc session_id host k v) + ~remove_from_map:(fun k -> Client.Host.remove_from_logging rpc session_id host k) + ~get_map:(fun () -> (x ()).API.host_logging) (); + make_field ~name:"suspend-image-sr-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.host_suspend_image_sr) + ~set:(fun s -> Client.Host.set_suspend_image_sr rpc session_id host (if s="" then Ref.null else Client.SR.get_by_uuid rpc session_id s)) (); + make_field ~name:"crash-dump-sr-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.host_crash_dump_sr) + ~set:(fun s -> Client.Host.set_crash_dump_sr rpc session_id host (if s="" then Ref.null else Client.SR.get_by_uuid rpc session_id s)) (); + make_field ~name:"software-version" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_software_version) + ~get_map:(fun () -> (x ()).API.host_software_version) (); + make_field ~name:"capabilities" ~get:(fun () -> String.concat "; " (x ()).API.host_capabilities) + ~get_set:(fun () -> (x ()).API.host_capabilities) (); + make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_other_config) + ~add_to_map:(fun k v -> Client.Host.add_to_other_config rpc session_id host k v) + ~remove_from_map:(fun k -> Client.Host.remove_from_other_config rpc session_id host k) + ~get_map:(fun () -> (x ()).API.host_other_config) (); + make_field ~name:"cpu_info" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_cpu_info) ~get_map:(fun () -> (x ()).API.host_cpu_info) (); + make_field ~name:"chipset-info" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_chipset_info) ~get_map:(fun () -> (x ()).API.host_chipset_info) (); + make_field ~name:"hostname" ~get:(fun () -> (x ()).API.host_hostname) (); + make_field ~name:"address" ~get:(fun () -> (x ()).API.host_address) (); + make_field ~name:"supported-bootloaders" ~get:(fun () -> String.concat "; " (x ()).API.host_supported_bootloaders) + ~get_set:(fun () -> (x ()).API.host_supported_bootloaders) (); + make_field ~name:"blobs" ~get:(fun () -> Record_util.s2brm_to_string get_uuid_from_ref "; " (x ()).API.host_blobs) (); + make_field ~name:"memory-overhead" ~get:(fun () -> Int64.to_string (x ()).API.host_memory_overhead) (); + make_field ~name:"memory-total" ~get:(fun () -> default nid (may (fun m -> Int64.to_string m.API.host_metrics_memory_total) (xm ()) )) (); + make_field ~name:"memory-free" ~get:(fun () -> default nid (may (fun m -> Int64.to_string m.API.host_metrics_memory_free) (xm ()) )) (); + make_field ~name:"memory-free-computed" ~expensive:true ~get:(fun () -> Int64.to_string (Client.Host.compute_free_memory rpc session_id host)) (); + make_field ~name:"host-metrics-live" ~get:(fun () -> default nid (may (fun m -> string_of_bool m.API.host_metrics_live) (xm ()) )) (); + make_field ~name:"patches" ~get:(fun () -> String.concat ", " (get_patches ())) ~get_set:get_patches (); + make_field ~name:"ha-statefiles" ~get:(fun () -> String.concat "; " (List.map (fun x -> get_uuid_from_ref (Ref.of_string x)) (x ()).API.host_ha_statefiles)) (); + make_field ~name:"ha-network-peers" ~get:(fun () -> String.concat "; " (List.map (fun x -> get_uuid_from_ref (Ref.of_string x)) (x ()).API.host_ha_network_peers)) (); + make_field ~name:"external-auth-type" ~get:(fun () -> (x ()).API.host_external_auth_type) (); + make_field ~name:"external-auth-service-name" ~get:(fun () -> (x ()).API.host_external_auth_service_name) (); + make_field ~name:"external-auth-configuration" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_external_auth_configuration) + ~get_map:(fun () -> (x ()).API.host_external_auth_configuration) (); + make_field ~name:"edition" ~get:(fun () -> (x ()).API.host_edition) (); + make_field ~name:"license-server" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_license_server) ~get_map:(fun () -> (x ()).API.host_license_server) (); + make_field ~name:"power-on-mode" ~get:(fun () -> (x ()).API.host_power_on_mode) (); + make_field ~name:"power-on-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_power_on_config) + ~get_map:(fun () -> (x ()).API.host_power_on_config) (); + make_field ~name:"local-cache-sr" ~get:(fun () -> get_uuid_from_ref (x ()).API.host_local_cache_sr) (); + make_field ~name:"tags" + ~get:(fun () -> String.concat ", " (x ()).API.host_tags) + ~get_set:(fun () -> (x ()).API.host_tags) + ~add_to_set:(fun tag -> Client.Host.add_tags rpc session_id host tag) + ~remove_from_set:(fun tag -> Client.Host.remove_tags rpc session_id host tag) (); + make_field ~name:"ssl-legacy" + ~get:(fun () -> string_of_bool (x ()).API.host_ssl_legacy) + ~set:(fun s -> Client.Host.set_ssl_legacy rpc session_id host (safe_bool_of_string "ssl-legacy" s)) (); + make_field ~name:"guest_VCPUs_params" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.host_guest_VCPUs_params) + ~get_map:(fun () -> (x ()).API.host_guest_VCPUs_params) + ~add_to_map:(fun k v -> Client.Host.add_to_guest_VCPUs_params rpc session_id host k v) + ~remove_from_map:(fun k -> Client.Host.remove_from_guest_VCPUs_params rpc session_id host k) (); + make_field ~name:"virtual-hardware-platform-versions" + ~get:(fun () -> String.concat "; " (List.map Int64.to_string (x ()).API.host_virtual_hardware_platform_versions)) + ~get_set:(fun () -> List.map Int64.to_string (x ()).API.host_virtual_hardware_platform_versions) (); + make_field ~name:"control-domain-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.host_control_domain) (); + make_field ~name:"resident-vms" + ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.host_resident_VMs)) + ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.host_resident_VMs) (); + make_field ~name:"patches-requiring-reboot" + ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.host_patches_requiring_reboot)) + ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.host_patches_requiring_reboot) (); + ]} let vdi_record rpc session_id vdi = - let _ref = ref vdi in - let empty_record = ToGet (fun () -> Client.VDI.get_record rpc session_id !_ref) in - let record = ref empty_record in - let x () = lzy_get record in - { setref=(fun r -> _ref := r; record := empty_record ); - setrefrec=(fun (a,b) -> _ref := a; record := Got b); - record=x; - getref=(fun () -> !_ref); - fields = [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vDI_uuid) (); - make_field ~name:"name-label" ~get:(fun () -> (x ()).API.vDI_name_label) - ~set:(fun label -> Client.VDI.set_name_label rpc session_id vdi label) (); - make_field ~name:"name-description" ~get:(fun () -> (x ()).API.vDI_name_description) - ~set:(fun desc -> Client.VDI.set_name_description rpc session_id vdi desc) (); - make_field ~name:"is-a-snapshot" ~get:(fun () -> string_of_bool (x ()).API.vDI_is_a_snapshot) (); - make_field ~name:"snapshot-of" ~get:(fun () -> get_uuid_from_ref (x ()).API.vDI_snapshot_of) (); - make_field ~name:"snapshots" ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.vDI_snapshots)) (); - make_field ~name:"snapshot-time" ~get:(fun () -> Date.to_string (x ()).API.vDI_snapshot_time) (); - make_field ~name:"allowed-operations" - ~get:(fun () -> String.concat "; " (List.map Record_util.vdi_operation_to_string (x ()).API.vDI_allowed_operations)) - ~get_set:(fun () -> List.map Record_util.vdi_operation_to_string (x ()).API.vDI_allowed_operations) (); - make_field ~name:"current-operations" - ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.vdi_operation_to_string b) (x ()).API.vDI_current_operations)) - ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.vdi_operation_to_string b) (x ()).API.vDI_current_operations) (); - make_field ~name:"sr-uuid" - ~get:(fun () -> get_uuid_from_ref (x ()).API.vDI_SR) (); - make_field ~name:"sr-name-label" - ~get:(fun () -> get_name_from_ref (x ()).API.vDI_SR) (); - make_field ~name:"vbd-uuids" - ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.vDI_VBDs)) - ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.vDI_VBDs) (); - make_field ~name:"crashdump-uuids" - ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.vDI_crash_dumps)) - ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.vDI_crash_dumps) (); - make_field ~name:"virtual-size" ~get:(fun () -> Int64.to_string (x ()).API.vDI_virtual_size) (); - make_field ~name:"physical-utilisation" ~get:(fun () -> Int64.to_string (x ()).API.vDI_physical_utilisation) (); - make_field ~name:"location" ~get:(fun () -> (x ()).API.vDI_location) (); - make_field ~name:"type" ~get:(fun () -> Record_util.vdi_type_to_string (x ()).API.vDI_type) (); - make_field ~name:"sharable" ~get:(fun () -> string_of_bool (x ()).API.vDI_sharable) (); - make_field ~name:"read-only" ~get:(fun () -> string_of_bool (x ()).API.vDI_read_only) (); - make_field ~name:"storage-lock" ~get:(fun () -> string_of_bool (x ()).API.vDI_storage_lock) (); - make_field ~name:"managed" ~get:(fun () -> string_of_bool (x ()).API.vDI_managed) (); - make_field ~name:"parent" ~get:(fun () -> get_uuid_from_ref (x ()).API.vDI_parent) (); - make_field ~name:"missing" ~get:(fun () -> string_of_bool (x ()).API.vDI_missing) (); - make_field ~name:"is-tools-iso" ~get:(fun () -> string_of_bool (x ()).API.vDI_is_tools_iso) (); - make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vDI_other_config) - ~add_to_map:(fun k v -> Client.VDI.add_to_other_config rpc session_id vdi k v) - ~remove_from_map:(fun k -> Client.VDI.remove_from_other_config rpc session_id vdi k) - ~get_map:(fun () -> (x ()).API.vDI_other_config) (); - make_field ~name:"xenstore-data" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vDI_xenstore_data) - ~get_map:(fun () -> (x ()).API.vDI_xenstore_data) (); - make_field ~name:"sm-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vDI_sm_config) - ~get_map:(fun () -> (x ()).API.vDI_sm_config) (); - make_field ~name:"on-boot" ~get:(fun () -> Record_util.on_boot_to_string (x ()).API.vDI_on_boot) - ~set:(fun onboot -> Client.VDI.set_on_boot rpc session_id vdi (Record_util.string_to_vdi_onboot onboot)) (); - make_field ~name:"allow-caching" ~get:(fun () -> string_of_bool (x ()).API.vDI_allow_caching) - ~set:(fun b -> Client.VDI.set_allow_caching rpc session_id vdi (bool_of_string b)) (); - make_field ~name:"metadata-latest" ~get:(fun () -> string_of_bool (x ()).API.vDI_metadata_latest) (); - make_field ~name:"metadata-of-pool" - ~get:(fun () -> - let local_pool = List.hd (Client.Pool.get_all ~rpc ~session_id) in - let vdi_pool = (x ()).API.vDI_metadata_of_pool in - if local_pool = vdi_pool then - get_uuid_from_ref local_pool - else begin - match Client.VDI.read_database_pool_uuid ~rpc ~session_id ~self:vdi with - | "" -> nid - | pool_uuid -> pool_uuid - end) (); - make_field ~name:"tags" - ~get:(fun () -> String.concat ", " (x ()).API.vDI_tags) - ~get_set:(fun () -> (x ()).API.vDI_tags) - ~add_to_set:(fun tag -> Client.VDI.add_tags rpc session_id vdi tag) - ~remove_from_set:(fun tag -> Client.VDI.remove_tags rpc session_id vdi tag) (); - ]} + let _ref = ref vdi in + let empty_record = ToGet (fun () -> Client.VDI.get_record rpc session_id !_ref) in + let record = ref empty_record in + let x () = lzy_get record in + { setref=(fun r -> _ref := r; record := empty_record ); + setrefrec=(fun (a,b) -> _ref := a; record := Got b); + record=x; + getref=(fun () -> !_ref); + fields = [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vDI_uuid) (); + make_field ~name:"name-label" ~get:(fun () -> (x ()).API.vDI_name_label) + ~set:(fun label -> Client.VDI.set_name_label rpc session_id vdi label) (); + make_field ~name:"name-description" ~get:(fun () -> (x ()).API.vDI_name_description) + ~set:(fun desc -> Client.VDI.set_name_description rpc session_id vdi desc) (); + make_field ~name:"is-a-snapshot" ~get:(fun () -> string_of_bool (x ()).API.vDI_is_a_snapshot) (); + make_field ~name:"snapshot-of" ~get:(fun () -> get_uuid_from_ref (x ()).API.vDI_snapshot_of) (); + make_field ~name:"snapshots" ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.vDI_snapshots)) (); + make_field ~name:"snapshot-time" ~get:(fun () -> Date.to_string (x ()).API.vDI_snapshot_time) (); + make_field ~name:"allowed-operations" + ~get:(fun () -> String.concat "; " (List.map Record_util.vdi_operation_to_string (x ()).API.vDI_allowed_operations)) + ~get_set:(fun () -> List.map Record_util.vdi_operation_to_string (x ()).API.vDI_allowed_operations) (); + make_field ~name:"current-operations" + ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.vdi_operation_to_string b) (x ()).API.vDI_current_operations)) + ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.vdi_operation_to_string b) (x ()).API.vDI_current_operations) (); + make_field ~name:"sr-uuid" + ~get:(fun () -> get_uuid_from_ref (x ()).API.vDI_SR) (); + make_field ~name:"sr-name-label" + ~get:(fun () -> get_name_from_ref (x ()).API.vDI_SR) (); + make_field ~name:"vbd-uuids" + ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.vDI_VBDs)) + ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.vDI_VBDs) (); + make_field ~name:"crashdump-uuids" + ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.vDI_crash_dumps)) + ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.vDI_crash_dumps) (); + make_field ~name:"virtual-size" ~get:(fun () -> Int64.to_string (x ()).API.vDI_virtual_size) (); + make_field ~name:"physical-utilisation" ~get:(fun () -> Int64.to_string (x ()).API.vDI_physical_utilisation) (); + make_field ~name:"location" ~get:(fun () -> (x ()).API.vDI_location) (); + make_field ~name:"type" ~get:(fun () -> Record_util.vdi_type_to_string (x ()).API.vDI_type) (); + make_field ~name:"sharable" ~get:(fun () -> string_of_bool (x ()).API.vDI_sharable) (); + make_field ~name:"read-only" ~get:(fun () -> string_of_bool (x ()).API.vDI_read_only) (); + make_field ~name:"storage-lock" ~get:(fun () -> string_of_bool (x ()).API.vDI_storage_lock) (); + make_field ~name:"managed" ~get:(fun () -> string_of_bool (x ()).API.vDI_managed) (); + make_field ~name:"parent" ~get:(fun () -> get_uuid_from_ref (x ()).API.vDI_parent) (); + make_field ~name:"missing" ~get:(fun () -> string_of_bool (x ()).API.vDI_missing) (); + make_field ~name:"is-tools-iso" ~get:(fun () -> string_of_bool (x ()).API.vDI_is_tools_iso) (); + make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vDI_other_config) + ~add_to_map:(fun k v -> Client.VDI.add_to_other_config rpc session_id vdi k v) + ~remove_from_map:(fun k -> Client.VDI.remove_from_other_config rpc session_id vdi k) + ~get_map:(fun () -> (x ()).API.vDI_other_config) (); + make_field ~name:"xenstore-data" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vDI_xenstore_data) + ~get_map:(fun () -> (x ()).API.vDI_xenstore_data) (); + make_field ~name:"sm-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vDI_sm_config) + ~get_map:(fun () -> (x ()).API.vDI_sm_config) (); + make_field ~name:"on-boot" ~get:(fun () -> Record_util.on_boot_to_string (x ()).API.vDI_on_boot) + ~set:(fun onboot -> Client.VDI.set_on_boot rpc session_id vdi (Record_util.string_to_vdi_onboot onboot)) (); + make_field ~name:"allow-caching" ~get:(fun () -> string_of_bool (x ()).API.vDI_allow_caching) + ~set:(fun b -> Client.VDI.set_allow_caching rpc session_id vdi (bool_of_string b)) (); + make_field ~name:"metadata-latest" ~get:(fun () -> string_of_bool (x ()).API.vDI_metadata_latest) (); + make_field ~name:"metadata-of-pool" + ~get:(fun () -> + let local_pool = List.hd (Client.Pool.get_all ~rpc ~session_id) in + let vdi_pool = (x ()).API.vDI_metadata_of_pool in + if local_pool = vdi_pool then + get_uuid_from_ref local_pool + else begin + match Client.VDI.read_database_pool_uuid ~rpc ~session_id ~self:vdi with + | "" -> nid + | pool_uuid -> pool_uuid + end) (); + make_field ~name:"tags" + ~get:(fun () -> String.concat ", " (x ()).API.vDI_tags) + ~get_set:(fun () -> (x ()).API.vDI_tags) + ~add_to_set:(fun tag -> Client.VDI.add_tags rpc session_id vdi tag) + ~remove_from_set:(fun tag -> Client.VDI.remove_tags rpc session_id vdi tag) (); + ]} let vbd_record rpc session_id vbd = let _ref = ref vbd in @@ -1205,60 +1205,60 @@ let vbd_record rpc session_id vbd = record=x; getref=(fun () -> !_ref); fields = - [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vBD_uuid) (); - make_field ~name:"vm-uuid" - ~get:(fun () -> get_uuid_from_ref (x ()).API.vBD_VM) (); - make_field ~name:"vm-name-label" - ~get:(fun () -> get_name_from_ref (x ()).API.vBD_VM) (); - make_field ~name:"vdi-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.vBD_VDI) (); - make_field ~name:"vdi-name-label" ~get:(fun () -> if (x ()).API.vBD_empty then "" else get_name_from_ref (x ()).API.vBD_VDI) (); - make_field ~name:"allowed-operations" - ~get:(fun () -> String.concat "; " (List.map Record_util.vbd_operation_to_string (x ()).API.vBD_allowed_operations)) - ~get_set:(fun () -> List.map Record_util.vbd_operation_to_string (x ()).API.vBD_allowed_operations) (); - make_field ~name:"current-operations" - ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.vbd_operation_to_string b) (x ()).API.vBD_current_operations)) - ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.vbd_operation_to_string b) (x ()).API.vBD_current_operations) (); - make_field ~name:"empty" ~get:(fun () -> string_of_bool (x ()).API.vBD_empty) (); - make_field ~name:"device" ~get:(fun () -> (x ()).API.vBD_device) (); - make_field ~name:"userdevice" ~get:(fun () -> (x ()).API.vBD_userdevice) - ~set:(fun dev -> Client.VBD.set_userdevice rpc session_id vbd dev) (); - make_field ~name:"bootable" ~get:(fun () -> string_of_bool (x ()).API.vBD_bootable) - ~set:(fun boot -> Client.VBD.set_bootable rpc session_id vbd (safe_bool_of_string "bootable" boot)) (); - make_field ~name:"mode" ~get:(fun () -> match (x ()).API.vBD_mode with `RO -> "RO" | `RW -> "RW") - ~set:(fun mode -> Client.VBD.set_mode rpc session_id vbd (Record_util.string_to_vbd_mode mode)) (); - make_field ~name:"type" ~get:(fun () -> match (x ()).API.vBD_type with `CD -> "CD" | `Disk -> "Disk" | `Floppy -> "Floppy") - ~set:(fun ty -> Client.VBD.set_type rpc session_id vbd (Record_util.string_to_vbd_type ty)) (); - make_field ~name:"unpluggable" ~get:(fun () -> string_of_bool (x ()).API.vBD_unpluggable) - ~set:(fun unpluggable -> Client.VBD.set_unpluggable rpc session_id vbd (safe_bool_of_string "unpluggable" unpluggable)) (); - make_field ~name:"currently-attached" ~get:(fun () -> string_of_bool (x ()).API.vBD_currently_attached) (); - make_field ~name:"attachable" ~get:(fun () -> try Client.VBD.assert_attachable rpc session_id vbd; "true" with e -> Printf.sprintf "false (error: %s)" (Printexc.to_string e)) ~expensive:true (); - make_field ~name:"storage-lock" ~get:(fun () -> string_of_bool (x ()).API.vBD_storage_lock) (); - make_field ~name:"status-code" ~get:(fun () -> Int64.to_string (x ()).API.vBD_status_code) (); - make_field ~name:"status-detail" ~get:(fun () -> (x ()).API.vBD_status_detail) (); - make_field ~name:"qos_algorithm_type" ~get:(fun () -> (x ()).API.vBD_qos_algorithm_type) - ~set:(fun qat -> Client.VBD.set_qos_algorithm_type rpc session_id vbd qat) (); - make_field ~name:"qos_algorithm_params" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vBD_qos_algorithm_params) - ~get_map:(fun () -> (x ()).API.vBD_qos_algorithm_params) - ~add_to_map:(fun k v -> Client.VBD.add_to_qos_algorithm_params rpc session_id vbd k v) - ~remove_from_map:(fun k -> Client.VBD.remove_from_qos_algorithm_params rpc session_id vbd k) (); - make_field ~name:"qos_supported_algorithms" ~get:(fun () -> String.concat "; " (x ()).API.vBD_qos_supported_algorithms) - ~get_set:(fun () -> (x ()).API.vBD_qos_supported_algorithms) (); - make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vBD_other_config) - ~add_to_map:(fun k v -> Client.VBD.add_to_other_config rpc session_id vbd k v) - ~remove_from_map:(fun k -> Client.VBD.remove_from_other_config rpc session_id vbd k) - ~get_map:(fun () -> (x ()).API.vBD_other_config) (); - make_field ~name:"io_read_kbs" ~get:(fun () -> - try - let name = Printf.sprintf "vbd_%s_read" (x ()).API.vBD_device in - string_of_float ((Client.VM.query_data_source rpc session_id (x ()).API.vBD_VM name) /. 1024.0) - with _ -> "") ~expensive:true (); - make_field ~name:"io_write_kbs" ~get:(fun () -> - try - let name = Printf.sprintf "vbd_%s_write" (x ()).API.vBD_device in - string_of_float ((Client.VM.query_data_source rpc session_id (x ()).API.vBD_VM name) /. 1024.0) - with _ -> "") ~expensive:true (); - ]} + [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vBD_uuid) (); + make_field ~name:"vm-uuid" + ~get:(fun () -> get_uuid_from_ref (x ()).API.vBD_VM) (); + make_field ~name:"vm-name-label" + ~get:(fun () -> get_name_from_ref (x ()).API.vBD_VM) (); + make_field ~name:"vdi-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.vBD_VDI) (); + make_field ~name:"vdi-name-label" ~get:(fun () -> if (x ()).API.vBD_empty then "" else get_name_from_ref (x ()).API.vBD_VDI) (); + make_field ~name:"allowed-operations" + ~get:(fun () -> String.concat "; " (List.map Record_util.vbd_operation_to_string (x ()).API.vBD_allowed_operations)) + ~get_set:(fun () -> List.map Record_util.vbd_operation_to_string (x ()).API.vBD_allowed_operations) (); + make_field ~name:"current-operations" + ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.vbd_operation_to_string b) (x ()).API.vBD_current_operations)) + ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.vbd_operation_to_string b) (x ()).API.vBD_current_operations) (); + make_field ~name:"empty" ~get:(fun () -> string_of_bool (x ()).API.vBD_empty) (); + make_field ~name:"device" ~get:(fun () -> (x ()).API.vBD_device) (); + make_field ~name:"userdevice" ~get:(fun () -> (x ()).API.vBD_userdevice) + ~set:(fun dev -> Client.VBD.set_userdevice rpc session_id vbd dev) (); + make_field ~name:"bootable" ~get:(fun () -> string_of_bool (x ()).API.vBD_bootable) + ~set:(fun boot -> Client.VBD.set_bootable rpc session_id vbd (safe_bool_of_string "bootable" boot)) (); + make_field ~name:"mode" ~get:(fun () -> match (x ()).API.vBD_mode with `RO -> "RO" | `RW -> "RW") + ~set:(fun mode -> Client.VBD.set_mode rpc session_id vbd (Record_util.string_to_vbd_mode mode)) (); + make_field ~name:"type" ~get:(fun () -> match (x ()).API.vBD_type with `CD -> "CD" | `Disk -> "Disk" | `Floppy -> "Floppy") + ~set:(fun ty -> Client.VBD.set_type rpc session_id vbd (Record_util.string_to_vbd_type ty)) (); + make_field ~name:"unpluggable" ~get:(fun () -> string_of_bool (x ()).API.vBD_unpluggable) + ~set:(fun unpluggable -> Client.VBD.set_unpluggable rpc session_id vbd (safe_bool_of_string "unpluggable" unpluggable)) (); + make_field ~name:"currently-attached" ~get:(fun () -> string_of_bool (x ()).API.vBD_currently_attached) (); + make_field ~name:"attachable" ~get:(fun () -> try Client.VBD.assert_attachable rpc session_id vbd; "true" with e -> Printf.sprintf "false (error: %s)" (Printexc.to_string e)) ~expensive:true (); + make_field ~name:"storage-lock" ~get:(fun () -> string_of_bool (x ()).API.vBD_storage_lock) (); + make_field ~name:"status-code" ~get:(fun () -> Int64.to_string (x ()).API.vBD_status_code) (); + make_field ~name:"status-detail" ~get:(fun () -> (x ()).API.vBD_status_detail) (); + make_field ~name:"qos_algorithm_type" ~get:(fun () -> (x ()).API.vBD_qos_algorithm_type) + ~set:(fun qat -> Client.VBD.set_qos_algorithm_type rpc session_id vbd qat) (); + make_field ~name:"qos_algorithm_params" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vBD_qos_algorithm_params) + ~get_map:(fun () -> (x ()).API.vBD_qos_algorithm_params) + ~add_to_map:(fun k v -> Client.VBD.add_to_qos_algorithm_params rpc session_id vbd k v) + ~remove_from_map:(fun k -> Client.VBD.remove_from_qos_algorithm_params rpc session_id vbd k) (); + make_field ~name:"qos_supported_algorithms" ~get:(fun () -> String.concat "; " (x ()).API.vBD_qos_supported_algorithms) + ~get_set:(fun () -> (x ()).API.vBD_qos_supported_algorithms) (); + make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vBD_other_config) + ~add_to_map:(fun k v -> Client.VBD.add_to_other_config rpc session_id vbd k v) + ~remove_from_map:(fun k -> Client.VBD.remove_from_other_config rpc session_id vbd k) + ~get_map:(fun () -> (x ()).API.vBD_other_config) (); + make_field ~name:"io_read_kbs" ~get:(fun () -> + try + let name = Printf.sprintf "vbd_%s_read" (x ()).API.vBD_device in + string_of_float ((Client.VM.query_data_source rpc session_id (x ()).API.vBD_VM name) /. 1024.0) + with _ -> "") ~expensive:true (); + make_field ~name:"io_write_kbs" ~get:(fun () -> + try + let name = Printf.sprintf "vbd_%s_write" (x ()).API.vBD_device in + string_of_float ((Client.VM.query_data_source rpc session_id (x ()).API.vBD_VM name) /. 1024.0) + with _ -> "") ~expensive:true (); + ]} let crashdump_record rpc session_id crashdump = let _ref = ref crashdump in @@ -1270,16 +1270,16 @@ let crashdump_record rpc session_id crashdump = record=x; getref=(fun () -> !_ref); fields = - [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.crashdump_uuid) (); - make_field ~name:"vm-uuid" - ~get:(fun () -> get_uuid_from_ref (x ()).API.crashdump_VM) (); - make_field ~name:"vm-name-label" - ~get:(fun () -> get_name_from_ref (x ()).API.crashdump_VM) (); - make_field ~name:"vdi-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.crashdump_VDI) (); - ]} - -let sm_record rpc session_id sm = + [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.crashdump_uuid) (); + make_field ~name:"vm-uuid" + ~get:(fun () -> get_uuid_from_ref (x ()).API.crashdump_VM) (); + make_field ~name:"vm-name-label" + ~get:(fun () -> get_name_from_ref (x ()).API.crashdump_VM) (); + make_field ~name:"vdi-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.crashdump_VDI) (); + ]} + +let sm_record rpc session_id sm = let _ref = ref sm in let empty_record = ToGet (fun () -> Client.SM.get_record rpc session_id !_ref) in let record = ref empty_record in @@ -1289,111 +1289,111 @@ let sm_record rpc session_id sm = setrefrec=(fun (a,b) -> _ref := a; record := Got b); record=x; getref=(fun () -> !_ref); - fields = - [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.sM_uuid) (); - make_field ~name:"name-label" ~get:(fun () -> (x ()).API.sM_name_label) (); - make_field ~name:"name-description" ~get:(fun () -> (x ()).API.sM_name_description) (); - make_field ~name:"type" ~get:(fun () -> (x ()).API.sM_type) (); - make_field ~name:"vendor" ~get:(fun () -> (x ()).API.sM_vendor) (); - make_field ~name:"copyright" ~get:(fun () -> (x ()).API.sM_copyright) (); - make_field ~name:"required-api-version" ~get:(fun () -> (x ()).API.sM_required_api_version) (); - make_field ~name:"capabilities" ~deprecated:true ~get:(fun () -> String.concat "; " (x ()).API.sM_capabilities) (); - make_field ~name:"features" - ~get:(fun () -> Record_util.s2sm_to_string "; " (s2i64_to_string (x ()).API.sM_features)) - ~get_map:(fun () -> s2i64_to_string (x ()).API.sM_features) (); - make_field ~name:"configuration" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.sM_configuration) (); - make_field ~name:"driver-filename" ~get:(fun () -> (x ()).API.sM_driver_filename) (); - make_field ~name:"required-cluster-stack" - ~get:(fun () -> String.concat ", " (x ()).API.sM_required_cluster_stack) (); - ]} + fields = + [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.sM_uuid) (); + make_field ~name:"name-label" ~get:(fun () -> (x ()).API.sM_name_label) (); + make_field ~name:"name-description" ~get:(fun () -> (x ()).API.sM_name_description) (); + make_field ~name:"type" ~get:(fun () -> (x ()).API.sM_type) (); + make_field ~name:"vendor" ~get:(fun () -> (x ()).API.sM_vendor) (); + make_field ~name:"copyright" ~get:(fun () -> (x ()).API.sM_copyright) (); + make_field ~name:"required-api-version" ~get:(fun () -> (x ()).API.sM_required_api_version) (); + make_field ~name:"capabilities" ~deprecated:true ~get:(fun () -> String.concat "; " (x ()).API.sM_capabilities) (); + make_field ~name:"features" + ~get:(fun () -> Record_util.s2sm_to_string "; " (s2i64_to_string (x ()).API.sM_features)) + ~get_map:(fun () -> s2i64_to_string (x ()).API.sM_features) (); + make_field ~name:"configuration" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.sM_configuration) (); + make_field ~name:"driver-filename" ~get:(fun () -> (x ()).API.sM_driver_filename) (); + make_field ~name:"required-cluster-stack" + ~get:(fun () -> String.concat ", " (x ()).API.sM_required_cluster_stack) (); + ]} let sr_record rpc session_id sr = - let _ref = ref sr in - let empty_record = ToGet (fun () -> Client.SR.get_record rpc session_id !_ref) in - let record = ref empty_record in - let x () = lzy_get record in - { setref=(fun r -> _ref := r; record := empty_record ); - setrefrec=(fun (a,b) -> _ref := a; record := Got b); - record=x; - getref=(fun () -> !_ref); - fields = [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.sR_uuid) (); - make_field ~name:"name-label" ~get:(fun () -> (x ()).API.sR_name_label) - ~set:(fun x -> Client.SR.set_name_label rpc session_id sr x) (); - make_field ~name:"name-description" ~get:(fun () -> (x ()).API.sR_name_description) - ~set:(fun x -> Client.SR.set_name_description rpc session_id sr x) (); - make_field ~name:"host" - ~get:(fun () -> - let sr_rec = x() in - let pbds = sr_rec.API.sR_PBDs in - if List.length pbds>1 then "" - else get_name_from_ref (get_sr_host rpc session_id sr_rec)) (); - make_field ~name:"allowed-operations" - ~get:(fun () -> String.concat "; " (List.map Record_util.sr_operation_to_string (x ()).API.sR_allowed_operations)) - ~get_set:(fun () -> List.map Record_util.sr_operation_to_string (x ()).API.sR_allowed_operations) (); - make_field ~name:"current-operations" - ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.sr_operation_to_string b) (x ()).API.sR_current_operations)) - ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.sr_operation_to_string b) (x ()).API.sR_current_operations) (); - make_field ~name:"VDIs" ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.sR_VDIs)) - ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.sR_VDIs) (); - make_field ~name:"PBDs" ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.sR_PBDs)) - ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.sR_PBDs) (); - make_field ~name:"virtual-allocation" ~get:(fun () -> Int64.to_string (x ()).API.sR_virtual_allocation) (); - make_field ~name:"physical-utilisation" ~get:(fun () -> Int64.to_string (x ()).API.sR_physical_utilisation) (); - make_field ~name:"physical-size" ~get:(fun () -> Int64.to_string (x ()).API.sR_physical_size) (); - make_field ~name:"type" ~get:(fun () -> (x ()).API.sR_type) (); - make_field ~name:"content-type" ~get:(fun () -> (x ()).API.sR_content_type) (); - make_field ~name:"shared" - ~get:(fun () -> string_of_bool ((x ()).API.sR_shared)) - ~set:(fun x -> Client.SR.set_shared rpc session_id sr (safe_bool_of_string "shared" x)) (); - make_field ~name:"introduced-by" - ~get:(fun () -> (get_uuid_from_ref (x ()).API.sR_introduced_by)) (); - make_field ~name:"is-tools-sr" ~get:(fun () -> string_of_bool (x ()).API.sR_is_tools_sr) (); - make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.sR_other_config) - ~add_to_map:(fun k v -> Client.SR.add_to_other_config rpc session_id sr k v) - ~remove_from_map:(fun k -> Client.SR.remove_from_other_config rpc session_id sr k) - ~get_map:(fun () -> (x ()).API.sR_other_config) (); - make_field ~name:"sm-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.sR_sm_config) - ~get_map:(fun () -> (x ()).API.sR_sm_config) (); - make_field ~name:"blobs" ~get:(fun () -> Record_util.s2brm_to_string get_uuid_from_ref "; " (x ()).API.sR_blobs) (); - make_field ~name:"local-cache-enabled" ~get:(fun () -> string_of_bool (x ()).API.sR_local_cache_enabled) (); - make_field ~name:"tags" - ~get:(fun () -> String.concat ", " (x ()).API.sR_tags) - ~get_set:(fun () -> (x ()).API.sR_tags) - ~add_to_set:(fun tag -> Client.SR.add_tags rpc session_id sr tag) - ~remove_from_set:(fun tag -> Client.SR.remove_tags rpc session_id sr tag) (); - make_field ~name:"clustered" - ~get:(fun () -> string_of_bool ((x ()).API.sR_clustered)) (); - ]} + let _ref = ref sr in + let empty_record = ToGet (fun () -> Client.SR.get_record rpc session_id !_ref) in + let record = ref empty_record in + let x () = lzy_get record in + { setref=(fun r -> _ref := r; record := empty_record ); + setrefrec=(fun (a,b) -> _ref := a; record := Got b); + record=x; + getref=(fun () -> !_ref); + fields = [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.sR_uuid) (); + make_field ~name:"name-label" ~get:(fun () -> (x ()).API.sR_name_label) + ~set:(fun x -> Client.SR.set_name_label rpc session_id sr x) (); + make_field ~name:"name-description" ~get:(fun () -> (x ()).API.sR_name_description) + ~set:(fun x -> Client.SR.set_name_description rpc session_id sr x) (); + make_field ~name:"host" + ~get:(fun () -> + let sr_rec = x() in + let pbds = sr_rec.API.sR_PBDs in + if List.length pbds>1 then "" + else get_name_from_ref (get_sr_host rpc session_id sr_rec)) (); + make_field ~name:"allowed-operations" + ~get:(fun () -> String.concat "; " (List.map Record_util.sr_operation_to_string (x ()).API.sR_allowed_operations)) + ~get_set:(fun () -> List.map Record_util.sr_operation_to_string (x ()).API.sR_allowed_operations) (); + make_field ~name:"current-operations" + ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.sr_operation_to_string b) (x ()).API.sR_current_operations)) + ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.sr_operation_to_string b) (x ()).API.sR_current_operations) (); + make_field ~name:"VDIs" ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.sR_VDIs)) + ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.sR_VDIs) (); + make_field ~name:"PBDs" ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.sR_PBDs)) + ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.sR_PBDs) (); + make_field ~name:"virtual-allocation" ~get:(fun () -> Int64.to_string (x ()).API.sR_virtual_allocation) (); + make_field ~name:"physical-utilisation" ~get:(fun () -> Int64.to_string (x ()).API.sR_physical_utilisation) (); + make_field ~name:"physical-size" ~get:(fun () -> Int64.to_string (x ()).API.sR_physical_size) (); + make_field ~name:"type" ~get:(fun () -> (x ()).API.sR_type) (); + make_field ~name:"content-type" ~get:(fun () -> (x ()).API.sR_content_type) (); + make_field ~name:"shared" + ~get:(fun () -> string_of_bool ((x ()).API.sR_shared)) + ~set:(fun x -> Client.SR.set_shared rpc session_id sr (safe_bool_of_string "shared" x)) (); + make_field ~name:"introduced-by" + ~get:(fun () -> (get_uuid_from_ref (x ()).API.sR_introduced_by)) (); + make_field ~name:"is-tools-sr" ~get:(fun () -> string_of_bool (x ()).API.sR_is_tools_sr) (); + make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.sR_other_config) + ~add_to_map:(fun k v -> Client.SR.add_to_other_config rpc session_id sr k v) + ~remove_from_map:(fun k -> Client.SR.remove_from_other_config rpc session_id sr k) + ~get_map:(fun () -> (x ()).API.sR_other_config) (); + make_field ~name:"sm-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.sR_sm_config) + ~get_map:(fun () -> (x ()).API.sR_sm_config) (); + make_field ~name:"blobs" ~get:(fun () -> Record_util.s2brm_to_string get_uuid_from_ref "; " (x ()).API.sR_blobs) (); + make_field ~name:"local-cache-enabled" ~get:(fun () -> string_of_bool (x ()).API.sR_local_cache_enabled) (); + make_field ~name:"tags" + ~get:(fun () -> String.concat ", " (x ()).API.sR_tags) + ~get_set:(fun () -> (x ()).API.sR_tags) + ~add_to_set:(fun tag -> Client.SR.add_tags rpc session_id sr tag) + ~remove_from_set:(fun tag -> Client.SR.remove_tags rpc session_id sr tag) (); + make_field ~name:"clustered" + ~get:(fun () -> string_of_bool ((x ()).API.sR_clustered)) (); + ]} let pbd_record rpc session_id pbd = - let _ref = ref pbd in - let empty_record = ToGet (fun () -> Client.PBD.get_record rpc session_id !_ref) in - let record = ref empty_record in - let x () = lzy_get record in - { setref=(fun r -> _ref := r; record := empty_record ) - ; setrefrec=(fun (a,b) -> _ref := a; record := Got b) - ; record=x - ; getref=(fun () -> !_ref) - ; fields = - [ make_field ~name:"uuid" ~get:(fun () -> (x ()).API.pBD_uuid) () - ; make_field ~name:"host" ~get:(fun () -> get_uuid_from_ref (x ()).API.pBD_host) ~deprecated:true () - ; make_field ~name:"host-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.pBD_host) () - ; make_field ~name:"host-name-label" ~get:(fun () -> get_name_from_ref (x ()).API.pBD_host) () - ; make_field ~name:"sr-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.pBD_SR) () - ; make_field ~name:"sr-name-label" ~get:(fun () -> get_name_from_ref (x ()).API.pBD_SR) () - ; make_field ~name:"device-config" - ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pBD_device_config) - ~get_map:(fun () -> (x ()).API.pBD_device_config) () - ; make_field ~name:"currently-attached" ~get:(fun () -> string_of_bool (x ()).API.pBD_currently_attached) () - ; make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pBD_other_config) - ~add_to_map:(fun k v -> Client.PBD.add_to_other_config rpc session_id pbd k v) - ~remove_from_map:(fun k -> Client.PBD.remove_from_other_config rpc session_id pbd k) - ~get_map:(fun () -> (x ()).API.pBD_other_config) () - ] - } + let _ref = ref pbd in + let empty_record = ToGet (fun () -> Client.PBD.get_record rpc session_id !_ref) in + let record = ref empty_record in + let x () = lzy_get record in + { setref=(fun r -> _ref := r; record := empty_record ) + ; setrefrec=(fun (a,b) -> _ref := a; record := Got b) + ; record=x + ; getref=(fun () -> !_ref) + ; fields = + [ make_field ~name:"uuid" ~get:(fun () -> (x ()).API.pBD_uuid) () + ; make_field ~name:"host" ~get:(fun () -> get_uuid_from_ref (x ()).API.pBD_host) ~deprecated:true () + ; make_field ~name:"host-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.pBD_host) () + ; make_field ~name:"host-name-label" ~get:(fun () -> get_name_from_ref (x ()).API.pBD_host) () + ; make_field ~name:"sr-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.pBD_SR) () + ; make_field ~name:"sr-name-label" ~get:(fun () -> get_name_from_ref (x ()).API.pBD_SR) () + ; make_field ~name:"device-config" + ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pBD_device_config) + ~get_map:(fun () -> (x ()).API.pBD_device_config) () + ; make_field ~name:"currently-attached" ~get:(fun () -> string_of_bool (x ()).API.pBD_currently_attached) () + ; make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pBD_other_config) + ~add_to_map:(fun k v -> Client.PBD.add_to_other_config rpc session_id pbd k v) + ~remove_from_map:(fun k -> Client.PBD.remove_from_other_config rpc session_id pbd k) + ~get_map:(fun () -> (x ()).API.pBD_other_config) () + ] + } let session_record rpc session_id session = let _ref = ref session in @@ -1404,10 +1404,10 @@ let session_record rpc session_id session = setrefrec=(fun (a,b) -> _ref := a; record := Got b); record=x; getref=(fun () -> !_ref); - fields = - [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.session_uuid) (); - ]} + fields = + [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.session_uuid) (); + ]} let blob_record rpc session_id blob = let _ref = ref blob in @@ -1419,94 +1419,94 @@ let blob_record rpc session_id blob = record=x; getref=(fun () -> !_ref); fields = - [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.blob_uuid) (); - make_field ~name:"name-label" ~get:(fun () -> (x ()).API.blob_name_label) - ~set:(fun x -> Client.Blob.set_name_label rpc session_id !_ref x) (); - make_field ~name:"name-description" ~get:(fun () -> (x ()).API.blob_name_description) - ~set:(fun x -> Client.Blob.set_name_description rpc session_id !_ref x) (); - make_field ~name:"last_updated" ~get:(fun () -> Date.to_string (x ()).API.blob_last_updated) (); - make_field ~name:"size" ~get:(fun () -> Int64.to_string (x ()).API.blob_size) (); - make_field ~name:"mime-type" ~get:(fun () -> (x ()).API.blob_mime_type) (); - ]} + [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.blob_uuid) (); + make_field ~name:"name-label" ~get:(fun () -> (x ()).API.blob_name_label) + ~set:(fun x -> Client.Blob.set_name_label rpc session_id !_ref x) (); + make_field ~name:"name-description" ~get:(fun () -> (x ()).API.blob_name_description) + ~set:(fun x -> Client.Blob.set_name_description rpc session_id !_ref x) (); + make_field ~name:"last_updated" ~get:(fun () -> Date.to_string (x ()).API.blob_last_updated) (); + make_field ~name:"size" ~get:(fun () -> Int64.to_string (x ()).API.blob_size) (); + make_field ~name:"mime-type" ~get:(fun () -> (x ()).API.blob_mime_type) (); + ]} let secret_record rpc session_id secret = - let _ref = ref secret in - let empty_record = ToGet (fun () -> - Client.Secret.get_record ~rpc ~session_id ~self:!_ref) in - let record = ref empty_record in - let x () = lzy_get record in - { setref = (fun r -> _ref := r; record := empty_record ) - ; setrefrec = (fun (a, b) -> _ref := a; record := Got b ) - ; record = x - ; getref = (fun () -> !_ref ) - ; fields = - [ make_field ~name:"uuid" ~get:(fun () -> (x ()).API.secret_uuid) () - ; make_field ~name:"value" ~get:(fun () -> (x ()).API.secret_value) - ~set:(fun x -> - Client.Secret.set_value ~rpc ~session_id ~self:!_ref ~value:x) - () - ] - } + let _ref = ref secret in + let empty_record = ToGet (fun () -> + Client.Secret.get_record ~rpc ~session_id ~self:!_ref) in + let record = ref empty_record in + let x () = lzy_get record in + { setref = (fun r -> _ref := r; record := empty_record ) + ; setrefrec = (fun (a, b) -> _ref := a; record := Got b ) + ; record = x + ; getref = (fun () -> !_ref ) + ; fields = + [ make_field ~name:"uuid" ~get:(fun () -> (x ()).API.secret_uuid) () + ; make_field ~name:"value" ~get:(fun () -> (x ()).API.secret_value) + ~set:(fun x -> + Client.Secret.set_value ~rpc ~session_id ~self:!_ref ~value:x) + () + ] + } let vm_appliance_record rpc session_id vm_appliance = - let _ref = ref vm_appliance in - let empty_record = ToGet (fun () -> - Client.VM_appliance.get_record ~rpc ~session_id ~self:!_ref) in - let record = ref empty_record in - let x () = lzy_get record in - { - setref = (fun r -> _ref := r; record := empty_record); - setrefrec = (fun (a, b) -> _ref := a; record := Got b); - record = x; - getref = (fun () -> !_ref); - fields = - [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vM_appliance_uuid) (); - make_field ~name:"name-label" ~get:(fun () -> (x ()).API.vM_appliance_name_label) - ~set:(fun x -> Client.VM_appliance.set_name_label rpc session_id !_ref x) (); - make_field ~name:"name-description" ~get:(fun () -> (x ()).API.vM_appliance_name_description) - ~set:(fun x -> Client.VM_appliance.set_name_description rpc session_id !_ref x) (); - make_field ~name:"VMs" ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.vM_appliance_VMs)) - ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.vM_appliance_VMs) (); - make_field ~name:"allowed-operations" - ~get:(fun () -> String.concat "; " (List.map Record_util.vm_appliance_operation_to_string (x ()).API.vM_appliance_allowed_operations)) - ~get_set:(fun () -> List.map Record_util.vm_appliance_operation_to_string (x ()).API.vM_appliance_allowed_operations) (); - make_field ~name:"current-operations" - ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.vm_appliance_operation_to_string b) (x ()).API.vM_appliance_current_operations)) - ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.vm_appliance_operation_to_string b) (x ()).API.vM_appliance_current_operations) (); - ] - } + let _ref = ref vm_appliance in + let empty_record = ToGet (fun () -> + Client.VM_appliance.get_record ~rpc ~session_id ~self:!_ref) in + let record = ref empty_record in + let x () = lzy_get record in + { + setref = (fun r -> _ref := r; record := empty_record); + setrefrec = (fun (a, b) -> _ref := a; record := Got b); + record = x; + getref = (fun () -> !_ref); + fields = + [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vM_appliance_uuid) (); + make_field ~name:"name-label" ~get:(fun () -> (x ()).API.vM_appliance_name_label) + ~set:(fun x -> Client.VM_appliance.set_name_label rpc session_id !_ref x) (); + make_field ~name:"name-description" ~get:(fun () -> (x ()).API.vM_appliance_name_description) + ~set:(fun x -> Client.VM_appliance.set_name_description rpc session_id !_ref x) (); + make_field ~name:"VMs" ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.vM_appliance_VMs)) + ~get_set:(fun () -> List.map get_uuid_from_ref (x ()).API.vM_appliance_VMs) (); + make_field ~name:"allowed-operations" + ~get:(fun () -> String.concat "; " (List.map Record_util.vm_appliance_operation_to_string (x ()).API.vM_appliance_allowed_operations)) + ~get_set:(fun () -> List.map Record_util.vm_appliance_operation_to_string (x ()).API.vM_appliance_allowed_operations) (); + make_field ~name:"current-operations" + ~get:(fun () -> String.concat "; " (List.map (fun (a,b) -> Record_util.vm_appliance_operation_to_string b) (x ()).API.vM_appliance_current_operations)) + ~get_set:(fun () -> List.map (fun (a,b) -> Record_util.vm_appliance_operation_to_string b) (x ()).API.vM_appliance_current_operations) (); + ] + } let dr_task_record rpc session_id dr_task = - let _ref = ref dr_task in - let empty_record = ToGet (fun () -> - Client.DR_task.get_record ~rpc ~session_id ~self:!_ref) in - let record = ref empty_record in - let x () = lzy_get record in - { - setref = (fun r -> _ref := r; record := empty_record); - setrefrec = (fun (a, b) -> _ref := a; record := Got b); - record = x; - getref = (fun () -> !_ref); - fields = - [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.dR_task_uuid) (); - make_field ~name:"introduced-SRs" ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.dR_task_introduced_SRs)) (); - ] - } + let _ref = ref dr_task in + let empty_record = ToGet (fun () -> + Client.DR_task.get_record ~rpc ~session_id ~self:!_ref) in + let record = ref empty_record in + let x () = lzy_get record in + { + setref = (fun r -> _ref := r; record := empty_record); + setrefrec = (fun (a, b) -> _ref := a; record := Got b); + record = x; + getref = (fun () -> !_ref); + fields = + [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.dR_task_uuid) (); + make_field ~name:"introduced-SRs" ~get:(fun () -> String.concat "; " (List.map get_uuid_from_ref (x ()).API.dR_task_introduced_SRs)) (); + ] + } (*let record_from_ref rpc session_id ref = let all = [ - "PBD",(fun ref -> snd (pbd_record rpc session_id (Ref.of_string ref))); - "SR",(fun ref -> snd (sr_record rpc session_id (Ref.of_string ref))); - "VBD",(fun ref -> snd (vbd_record rpc session_id (Ref.of_string ref))); - "VIF",(fun ref -> snd (vif_record rpc session_id (Ref.of_string ref))); - "PIF",(fun ref -> snd (pif_record rpc session_id (Ref.of_string ref))); - "VDI",(fun ref -> snd (vdi_record rpc session_id (Ref.of_string ref))); - "Host",(fun ref -> snd (host_record rpc session_id (Ref.of_string ref))); - "Network",(fun ref -> snd (net_record rpc session_id (Ref.of_string ref))); + "PBD",(fun ref -> snd (pbd_record rpc session_id (Ref.of_string ref))); + "SR",(fun ref -> snd (sr_record rpc session_id (Ref.of_string ref))); + "VBD",(fun ref -> snd (vbd_record rpc session_id (Ref.of_string ref))); + "VIF",(fun ref -> snd (vif_record rpc session_id (Ref.of_string ref))); + "PIF",(fun ref -> snd (pif_record rpc session_id (Ref.of_string ref))); + "VDI",(fun ref -> snd (vdi_record rpc session_id (Ref.of_string ref))); + "Host",(fun ref -> snd (host_record rpc session_id (Ref.of_string ref))); + "Network",(fun ref -> snd (net_record rpc session_id (Ref.of_string ref))); "VM",(fun ref -> snd (vm_record rpc session_id (Ref.of_string ref))); "Session",(fun ref -> snd (session_record rpc session_id (Ref.of_string ref)));] in let findfn (name,record) = @@ -1516,188 +1516,188 @@ let dr_task_record rpc session_id dr_task = u.get (); true with - _ -> false + _ -> false in try let (n,r) = List.find findfn all in (n,r ref) with _ -> ("Unknown",[]) - *) + *) let pgpu_record rpc session_id pgpu = - let _ref = ref pgpu in - let empty_record = ToGet (fun () -> Client.PGPU.get_record rpc session_id !_ref) in - let record = ref empty_record in - let x () = lzy_get record in - let pci_record p = ref (ToGet (fun () -> Client.PCI.get_record rpc session_id p)) in - let xp0 p = lzy_get (pci_record p) in - let xp () = xp0 (x ()).API.pGPU_PCI in - { - setref = (fun r -> _ref := r; record := empty_record ); - setrefrec = (fun (a,b) -> _ref := a; record := Got b); - record = x; - getref = (fun () -> !_ref); - fields = [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.pGPU_uuid) (); - make_field ~name:"vendor-name" ~get:(fun () -> try (xp ()).API.pCI_vendor_name with _ -> nid) (); - make_field ~name:"device-name" ~get:(fun () -> try (xp ()).API.pCI_device_name with _ -> nid) (); - make_field ~name:"dom0-access" ~get:(fun () -> Record_util.pgpu_dom0_access_to_string (x ()).API.pGPU_dom0_access ) (); - make_field ~name:"is-system-display-device" ~get:(fun () -> string_of_bool (x ()).API.pGPU_is_system_display_device ) (); - make_field ~name:"gpu-group-uuid" - ~get:(fun () -> try get_uuid_from_ref (x ()).API.pGPU_GPU_group with _ -> nid) - ~set:(fun gpu_group_uuid -> - let gpu_group = Client.GPU_group.get_by_uuid rpc session_id gpu_group_uuid in - Client.PGPU.set_GPU_group rpc session_id pgpu gpu_group) (); - make_field ~name:"gpu-group-name-label" ~get:(fun () -> try get_name_from_ref (x ()).API.pGPU_GPU_group with _ -> nid) (); - make_field ~name:"host-uuid" ~get:(fun () -> try get_uuid_from_ref (x ()).API.pGPU_host with _ -> nid) (); - make_field ~name:"host-name-label" ~get:(fun () -> try get_name_from_ref (x ()).API.pGPU_host with _ -> nid) (); - make_field ~name:"pci-id" ~get:(fun () -> try (xp ()).API.pCI_pci_id with _ -> nid) (); - make_field ~name:"dependencies" ~get:(fun () -> String.concat "; " (List.map (fun pci -> (xp0 pci).API.pCI_pci_id) (xp ()).API.pCI_dependencies)) - ~get_set:(fun () -> (List.map (fun pci -> (xp0 pci).API.pCI_pci_id) (xp ()).API.pCI_dependencies)) (); - make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pGPU_other_config) - ~add_to_map:(fun k v -> Client.PGPU.add_to_other_config rpc session_id pgpu k v) - ~remove_from_map:(fun k -> Client.PGPU.remove_from_other_config rpc session_id pgpu k) - ~get_map:(fun () -> (x ()).API.pGPU_other_config) (); - make_field ~name:"supported-VGPU-types" - ~get:(fun () -> - String.concat "; " - (List.map - get_uuid_from_ref - (x ()).API.pGPU_supported_VGPU_types)) (); - make_field ~name:"enabled-VGPU-types" - ~get:(fun () -> - String.concat "; " - (List.map - get_uuid_from_ref - (x ()).API.pGPU_enabled_VGPU_types)) - ~get_set:(fun () -> - (List.map - (fun vgpu_type -> get_uuid_from_ref vgpu_type) - (x ()).API.pGPU_enabled_VGPU_types)) - ~add_to_set:(fun vgpu_type_uuid -> - Client.PGPU.add_enabled_VGPU_types rpc session_id pgpu - (Client.VGPU_type.get_by_uuid rpc session_id vgpu_type_uuid)) - ~remove_from_set:(fun vgpu_type_uuid -> - Client.PGPU.remove_enabled_VGPU_types rpc session_id pgpu - (Client.VGPU_type.get_by_uuid rpc session_id vgpu_type_uuid)) - ~set:(fun vgpu_type_uuids -> - Client.PGPU.set_enabled_VGPU_types rpc session_id pgpu - (List.map - (fun vgpu_type_uuid -> - Client.VGPU_type.get_by_uuid rpc session_id vgpu_type_uuid) - (get_words ',' vgpu_type_uuids))) - (); - make_field ~name:"resident-VGPUs" - ~get:(fun () -> - String.concat "; " - (List.map get_uuid_from_ref - (x ()).API.pGPU_resident_VGPUs)) (); - ] - } + let _ref = ref pgpu in + let empty_record = ToGet (fun () -> Client.PGPU.get_record rpc session_id !_ref) in + let record = ref empty_record in + let x () = lzy_get record in + let pci_record p = ref (ToGet (fun () -> Client.PCI.get_record rpc session_id p)) in + let xp0 p = lzy_get (pci_record p) in + let xp () = xp0 (x ()).API.pGPU_PCI in + { + setref = (fun r -> _ref := r; record := empty_record ); + setrefrec = (fun (a,b) -> _ref := a; record := Got b); + record = x; + getref = (fun () -> !_ref); + fields = [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.pGPU_uuid) (); + make_field ~name:"vendor-name" ~get:(fun () -> try (xp ()).API.pCI_vendor_name with _ -> nid) (); + make_field ~name:"device-name" ~get:(fun () -> try (xp ()).API.pCI_device_name with _ -> nid) (); + make_field ~name:"dom0-access" ~get:(fun () -> Record_util.pgpu_dom0_access_to_string (x ()).API.pGPU_dom0_access ) (); + make_field ~name:"is-system-display-device" ~get:(fun () -> string_of_bool (x ()).API.pGPU_is_system_display_device ) (); + make_field ~name:"gpu-group-uuid" + ~get:(fun () -> try get_uuid_from_ref (x ()).API.pGPU_GPU_group with _ -> nid) + ~set:(fun gpu_group_uuid -> + let gpu_group = Client.GPU_group.get_by_uuid rpc session_id gpu_group_uuid in + Client.PGPU.set_GPU_group rpc session_id pgpu gpu_group) (); + make_field ~name:"gpu-group-name-label" ~get:(fun () -> try get_name_from_ref (x ()).API.pGPU_GPU_group with _ -> nid) (); + make_field ~name:"host-uuid" ~get:(fun () -> try get_uuid_from_ref (x ()).API.pGPU_host with _ -> nid) (); + make_field ~name:"host-name-label" ~get:(fun () -> try get_name_from_ref (x ()).API.pGPU_host with _ -> nid) (); + make_field ~name:"pci-id" ~get:(fun () -> try (xp ()).API.pCI_pci_id with _ -> nid) (); + make_field ~name:"dependencies" ~get:(fun () -> String.concat "; " (List.map (fun pci -> (xp0 pci).API.pCI_pci_id) (xp ()).API.pCI_dependencies)) + ~get_set:(fun () -> (List.map (fun pci -> (xp0 pci).API.pCI_pci_id) (xp ()).API.pCI_dependencies)) (); + make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.pGPU_other_config) + ~add_to_map:(fun k v -> Client.PGPU.add_to_other_config rpc session_id pgpu k v) + ~remove_from_map:(fun k -> Client.PGPU.remove_from_other_config rpc session_id pgpu k) + ~get_map:(fun () -> (x ()).API.pGPU_other_config) (); + make_field ~name:"supported-VGPU-types" + ~get:(fun () -> + String.concat "; " + (List.map + get_uuid_from_ref + (x ()).API.pGPU_supported_VGPU_types)) (); + make_field ~name:"enabled-VGPU-types" + ~get:(fun () -> + String.concat "; " + (List.map + get_uuid_from_ref + (x ()).API.pGPU_enabled_VGPU_types)) + ~get_set:(fun () -> + (List.map + (fun vgpu_type -> get_uuid_from_ref vgpu_type) + (x ()).API.pGPU_enabled_VGPU_types)) + ~add_to_set:(fun vgpu_type_uuid -> + Client.PGPU.add_enabled_VGPU_types rpc session_id pgpu + (Client.VGPU_type.get_by_uuid rpc session_id vgpu_type_uuid)) + ~remove_from_set:(fun vgpu_type_uuid -> + Client.PGPU.remove_enabled_VGPU_types rpc session_id pgpu + (Client.VGPU_type.get_by_uuid rpc session_id vgpu_type_uuid)) + ~set:(fun vgpu_type_uuids -> + Client.PGPU.set_enabled_VGPU_types rpc session_id pgpu + (List.map + (fun vgpu_type_uuid -> + Client.VGPU_type.get_by_uuid rpc session_id vgpu_type_uuid) + (get_words ',' vgpu_type_uuids))) + (); + make_field ~name:"resident-VGPUs" + ~get:(fun () -> + String.concat "; " + (List.map get_uuid_from_ref + (x ()).API.pGPU_resident_VGPUs)) (); + ] + } let gpu_group_record rpc session_id gpu_group = - let _ref = ref gpu_group in - let empty_record = ToGet (fun () -> Client.GPU_group.get_record rpc session_id !_ref) in - let record = ref empty_record in - let x () = lzy_get record in - { - setref = (fun r -> _ref := r; record := empty_record ); - setrefrec = (fun (a,b) -> _ref := a; record := Got b); - record = x; - getref = (fun () -> !_ref); - fields = [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.gPU_group_uuid) (); - make_field ~name:"name-label" ~get:(fun () -> (x ()).API.gPU_group_name_label) - ~set:(fun x -> Client.GPU_group.set_name_label rpc session_id gpu_group x) (); - make_field ~name:"name-description" ~get:(fun () -> (x ()).API.gPU_group_name_description) - ~set:(fun x -> Client.GPU_group.set_name_description rpc session_id gpu_group x) (); - make_field ~name:"VGPU-uuids" ~get:(fun () -> String.concat "; " (List.map (fun vgpu -> get_uuid_from_ref vgpu) (x ()).API.gPU_group_VGPUs)) - ~get_set:(fun () -> (List.map (fun vgpu -> get_uuid_from_ref vgpu) (x ()).API.gPU_group_VGPUs)) (); - make_field ~name:"PGPU-uuids" ~get:(fun () -> String.concat "; " (List.map (fun pgpu -> get_uuid_from_ref pgpu) (x ()).API.gPU_group_PGPUs)) - ~get_set:(fun () -> (List.map (fun pgpu -> get_uuid_from_ref pgpu) (x ()).API.gPU_group_PGPUs)) (); - make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.gPU_group_other_config) - ~add_to_map:(fun k v -> Client.GPU_group.add_to_other_config rpc session_id gpu_group k v) - ~remove_from_map:(fun k -> Client.GPU_group.remove_from_other_config rpc session_id gpu_group k) - ~get_map:(fun () -> (x ()).API.gPU_group_other_config) (); - make_field ~name:"enabled-VGPU-types" - ~get:(fun () -> - String.concat "; " - (List.map - get_uuid_from_ref - (Client.GPU_group.get_enabled_VGPU_types rpc session_id gpu_group))) - (); - make_field ~name:"supported-VGPU-types" - ~get:(fun () -> - String.concat "; " - (List.map - get_uuid_from_ref - (Client.GPU_group.get_supported_VGPU_types rpc session_id gpu_group))) - (); - make_field ~name:"allocation-algorithm" - ~get:(fun () -> - Record_util.allocation_algorithm_to_string - (x ()).API.gPU_group_allocation_algorithm) - ~set:(fun ty -> - Client.GPU_group.set_allocation_algorithm rpc session_id - gpu_group (Record_util.allocation_algorithm_of_string ty)) (); - ] - } + let _ref = ref gpu_group in + let empty_record = ToGet (fun () -> Client.GPU_group.get_record rpc session_id !_ref) in + let record = ref empty_record in + let x () = lzy_get record in + { + setref = (fun r -> _ref := r; record := empty_record ); + setrefrec = (fun (a,b) -> _ref := a; record := Got b); + record = x; + getref = (fun () -> !_ref); + fields = [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.gPU_group_uuid) (); + make_field ~name:"name-label" ~get:(fun () -> (x ()).API.gPU_group_name_label) + ~set:(fun x -> Client.GPU_group.set_name_label rpc session_id gpu_group x) (); + make_field ~name:"name-description" ~get:(fun () -> (x ()).API.gPU_group_name_description) + ~set:(fun x -> Client.GPU_group.set_name_description rpc session_id gpu_group x) (); + make_field ~name:"VGPU-uuids" ~get:(fun () -> String.concat "; " (List.map (fun vgpu -> get_uuid_from_ref vgpu) (x ()).API.gPU_group_VGPUs)) + ~get_set:(fun () -> (List.map (fun vgpu -> get_uuid_from_ref vgpu) (x ()).API.gPU_group_VGPUs)) (); + make_field ~name:"PGPU-uuids" ~get:(fun () -> String.concat "; " (List.map (fun pgpu -> get_uuid_from_ref pgpu) (x ()).API.gPU_group_PGPUs)) + ~get_set:(fun () -> (List.map (fun pgpu -> get_uuid_from_ref pgpu) (x ()).API.gPU_group_PGPUs)) (); + make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.gPU_group_other_config) + ~add_to_map:(fun k v -> Client.GPU_group.add_to_other_config rpc session_id gpu_group k v) + ~remove_from_map:(fun k -> Client.GPU_group.remove_from_other_config rpc session_id gpu_group k) + ~get_map:(fun () -> (x ()).API.gPU_group_other_config) (); + make_field ~name:"enabled-VGPU-types" + ~get:(fun () -> + String.concat "; " + (List.map + get_uuid_from_ref + (Client.GPU_group.get_enabled_VGPU_types rpc session_id gpu_group))) + (); + make_field ~name:"supported-VGPU-types" + ~get:(fun () -> + String.concat "; " + (List.map + get_uuid_from_ref + (Client.GPU_group.get_supported_VGPU_types rpc session_id gpu_group))) + (); + make_field ~name:"allocation-algorithm" + ~get:(fun () -> + Record_util.allocation_algorithm_to_string + (x ()).API.gPU_group_allocation_algorithm) + ~set:(fun ty -> + Client.GPU_group.set_allocation_algorithm rpc session_id + gpu_group (Record_util.allocation_algorithm_of_string ty)) (); + ] + } let vgpu_record rpc session_id vgpu = - let _ref = ref vgpu in - let empty_record = ToGet (fun () -> Client.VGPU.get_record rpc session_id !_ref) in - let record = ref empty_record in - let x () = lzy_get record in - { - setref = (fun r -> _ref := r; record := empty_record ); - setrefrec = (fun (a,b) -> _ref := a; record := Got b); - record = x; - getref = (fun () -> !_ref); - fields = [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vGPU_uuid) (); - make_field ~name:"vm-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.vGPU_VM) (); - make_field ~name:"vm-name-label" ~get:(fun () -> get_name_from_ref (x ()).API.vGPU_VM) (); - make_field ~name:"gpu-group-uuid" ~get:(fun () -> try get_uuid_from_ref (x ()).API.vGPU_GPU_group with _ -> nid) (); - make_field ~name:"gpu-group-name-label" ~get:(fun () -> try get_name_from_ref (x ()).API.vGPU_GPU_group with _ -> nid) (); - make_field ~name:"currently-attached" ~get:(fun () -> string_of_bool (x ()).API.vGPU_currently_attached) (); - make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vGPU_other_config) - ~add_to_map:(fun k v -> Client.VGPU.add_to_other_config rpc session_id vgpu k v) - ~remove_from_map:(fun k -> Client.VGPU.remove_from_other_config rpc session_id vgpu k) - ~get_map:(fun () -> (x ()).API.vGPU_other_config) (); - make_field ~name:"type-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.vGPU_type) (); - make_field ~name:"type-model-name" ~get:(fun () -> try Client.VGPU_type.get_model_name rpc session_id ((x ()).API.vGPU_type) with _ -> nid) (); - make_field ~name:"resident-on" ~get:(fun () -> try get_uuid_from_ref (x ()).API.vGPU_resident_on with _ -> nid) (); - ] - } + let _ref = ref vgpu in + let empty_record = ToGet (fun () -> Client.VGPU.get_record rpc session_id !_ref) in + let record = ref empty_record in + let x () = lzy_get record in + { + setref = (fun r -> _ref := r; record := empty_record ); + setrefrec = (fun (a,b) -> _ref := a; record := Got b); + record = x; + getref = (fun () -> !_ref); + fields = [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vGPU_uuid) (); + make_field ~name:"vm-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.vGPU_VM) (); + make_field ~name:"vm-name-label" ~get:(fun () -> get_name_from_ref (x ()).API.vGPU_VM) (); + make_field ~name:"gpu-group-uuid" ~get:(fun () -> try get_uuid_from_ref (x ()).API.vGPU_GPU_group with _ -> nid) (); + make_field ~name:"gpu-group-name-label" ~get:(fun () -> try get_name_from_ref (x ()).API.vGPU_GPU_group with _ -> nid) (); + make_field ~name:"currently-attached" ~get:(fun () -> string_of_bool (x ()).API.vGPU_currently_attached) (); + make_field ~name:"other-config" ~get:(fun () -> Record_util.s2sm_to_string "; " (x ()).API.vGPU_other_config) + ~add_to_map:(fun k v -> Client.VGPU.add_to_other_config rpc session_id vgpu k v) + ~remove_from_map:(fun k -> Client.VGPU.remove_from_other_config rpc session_id vgpu k) + ~get_map:(fun () -> (x ()).API.vGPU_other_config) (); + make_field ~name:"type-uuid" ~get:(fun () -> get_uuid_from_ref (x ()).API.vGPU_type) (); + make_field ~name:"type-model-name" ~get:(fun () -> try Client.VGPU_type.get_model_name rpc session_id ((x ()).API.vGPU_type) with _ -> nid) (); + make_field ~name:"resident-on" ~get:(fun () -> try get_uuid_from_ref (x ()).API.vGPU_resident_on with _ -> nid) (); + ] + } let vgpu_type_record rpc session_id vgpu_type = - let _ref = ref vgpu_type in - let empty_record = ToGet (fun () -> Client.VGPU_type.get_record rpc session_id !_ref) in - let record = ref empty_record in - let x () = lzy_get record in - { - setref = (fun r -> _ref := r; record := empty_record); - setrefrec = (fun (a,b) -> _ref := a; record := Got b); - record = x; - getref = (fun () -> !_ref); - fields = [ - make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vGPU_type_uuid) (); - make_field ~name:"vendor-name" ~get:(fun () -> (x ()).API.vGPU_type_vendor_name) (); - make_field ~name:"model-name" - ~get:(fun () -> (x ()).API.vGPU_type_model_name) (); - make_field ~name:"framebuffer-size" - ~get:(fun () -> Int64.to_string (x ()).API.vGPU_type_framebuffer_size) (); - make_field ~name:"max-heads" - ~get:(fun () -> Int64.to_string (x ()).API.vGPU_type_max_heads) (); - make_field ~name:"max-resolution" - ~get:(fun () -> String.concat "x" (List.map Int64.to_string [(x ()).API.vGPU_type_max_resolution_x; (x ()).API.vGPU_type_max_resolution_y])) (); - make_field ~name:"supported-on-PGPUs" - ~get:(fun () -> String.concat "; " (List.map (fun p -> get_uuid_from_ref p) (x ()).API.vGPU_type_supported_on_PGPUs)) (); - make_field ~name:"enabled-on-PGPUs" - ~get:(fun () -> String.concat "; " (List.map (fun p -> get_uuid_from_ref p) (x ()).API.vGPU_type_enabled_on_PGPUs)) (); - make_field ~name:"supported-on-GPU-groups" - ~get:(fun () -> String.concat "; " (List.map (fun p -> get_uuid_from_ref p) (x ()).API.vGPU_type_supported_on_GPU_groups)) (); - make_field ~name:"enabled-on-GPU-groups" - ~get:(fun () -> String.concat "; " (List.map (fun p -> get_uuid_from_ref p) (x ()).API.vGPU_type_enabled_on_GPU_groups)) (); - make_field ~name:"VGPU-uuids" ~get:(fun () -> String.concat "; " (List.map (fun v -> get_uuid_from_ref v) (x ()).API.vGPU_type_VGPUs)) (); - make_field ~name:"experimental" - ~get:(fun () -> string_of_bool (x ()).API.vGPU_type_experimental) (); - ] - } + let _ref = ref vgpu_type in + let empty_record = ToGet (fun () -> Client.VGPU_type.get_record rpc session_id !_ref) in + let record = ref empty_record in + let x () = lzy_get record in + { + setref = (fun r -> _ref := r; record := empty_record); + setrefrec = (fun (a,b) -> _ref := a; record := Got b); + record = x; + getref = (fun () -> !_ref); + fields = [ + make_field ~name:"uuid" ~get:(fun () -> (x ()).API.vGPU_type_uuid) (); + make_field ~name:"vendor-name" ~get:(fun () -> (x ()).API.vGPU_type_vendor_name) (); + make_field ~name:"model-name" + ~get:(fun () -> (x ()).API.vGPU_type_model_name) (); + make_field ~name:"framebuffer-size" + ~get:(fun () -> Int64.to_string (x ()).API.vGPU_type_framebuffer_size) (); + make_field ~name:"max-heads" + ~get:(fun () -> Int64.to_string (x ()).API.vGPU_type_max_heads) (); + make_field ~name:"max-resolution" + ~get:(fun () -> String.concat "x" (List.map Int64.to_string [(x ()).API.vGPU_type_max_resolution_x; (x ()).API.vGPU_type_max_resolution_y])) (); + make_field ~name:"supported-on-PGPUs" + ~get:(fun () -> String.concat "; " (List.map (fun p -> get_uuid_from_ref p) (x ()).API.vGPU_type_supported_on_PGPUs)) (); + make_field ~name:"enabled-on-PGPUs" + ~get:(fun () -> String.concat "; " (List.map (fun p -> get_uuid_from_ref p) (x ()).API.vGPU_type_enabled_on_PGPUs)) (); + make_field ~name:"supported-on-GPU-groups" + ~get:(fun () -> String.concat "; " (List.map (fun p -> get_uuid_from_ref p) (x ()).API.vGPU_type_supported_on_GPU_groups)) (); + make_field ~name:"enabled-on-GPU-groups" + ~get:(fun () -> String.concat "; " (List.map (fun p -> get_uuid_from_ref p) (x ()).API.vGPU_type_enabled_on_GPU_groups)) (); + make_field ~name:"VGPU-uuids" ~get:(fun () -> String.concat "; " (List.map (fun v -> get_uuid_from_ref v) (x ()).API.vGPU_type_VGPUs)) (); + make_field ~name:"experimental" + ~get:(fun () -> string_of_bool (x ()).API.vGPU_type_experimental) (); + ] + } diff --git a/ocaml/console/client.ml b/ocaml/console/client.ml index a15597be233..de3e503eff7 100644 --- a/ocaml/console/client.ml +++ b/ocaml/console/client.ml @@ -12,61 +12,61 @@ * GNU Lesser General Public License for more details. *) let init_term () = - let attr = Unix.tcgetattr Unix.stdin in + let attr = Unix.tcgetattr Unix.stdin in - let nattr = { attr with - Unix.c_ignbrk = false; Unix.c_brkint = false; - Unix.c_parmrk = false; Unix.c_inlcr = false; - Unix.c_igncr = false; Unix.c_icrnl = false; - Unix.c_ixon = false; + let nattr = { attr with + Unix.c_ignbrk = false; Unix.c_brkint = false; + Unix.c_parmrk = false; Unix.c_inlcr = false; + Unix.c_igncr = false; Unix.c_icrnl = false; + Unix.c_ixon = false; - Unix.c_opost = false; + Unix.c_opost = false; - Unix.c_echo = false; Unix.c_echonl = false; - Unix.c_icanon = false; Unix.c_isig = false; - (* Unix.c_iexten = false; *) - Unix.c_csize = 8; Unix.c_parenb = true; (* Unix.c_cs = false *) } in - Unix.tcsetattr Unix.stdin Unix.TCSAFLUSH nattr; - attr + Unix.c_echo = false; Unix.c_echonl = false; + Unix.c_icanon = false; Unix.c_isig = false; + (* Unix.c_iexten = false; *) + Unix.c_csize = 8; Unix.c_parenb = true; (* Unix.c_cs = false *) } in + Unix.tcsetattr Unix.stdin Unix.TCSAFLUSH nattr; + attr let console_loop fd = - let buf = String.make 512 '\000' in - let quit = ref false in - while not !quit - do - let r,_,_ = Unix.select [ Unix.stdin; fd ] [] [] (-1.) in - if List.mem fd r then ( - let rd = Unix.read fd buf 0 512 in - ignore (Unix.write Unix.stdin buf 0 rd) - ) else if List.mem Unix.stdin r then ( - let rd = Unix.read Unix.stdin buf 0 60 in - if rd = 1 && buf.[0] = (Char.chr 0x1d) then - quit := true - else - ignore (Unix.write fd buf 0 rd) - ) - done + let buf = String.make 512 '\000' in + let quit = ref false in + while not !quit + do + let r,_,_ = Unix.select [ Unix.stdin; fd ] [] [] (-1.) in + if List.mem fd r then ( + let rd = Unix.read fd buf 0 512 in + ignore (Unix.write Unix.stdin buf 0 rd) + ) else if List.mem Unix.stdin r then ( + let rd = Unix.read Unix.stdin buf 0 60 in + if rd = 1 && buf.[0] = (Char.chr 0x1d) then + quit := true + else + ignore (Unix.write fd buf 0 rd) + ) + done let restore_term attr = - Unix.tcsetattr Unix.stdin Unix.TCSAFLUSH attr + Unix.tcsetattr Unix.stdin Unix.TCSAFLUSH attr let _ = - if Array.length Sys.argv < 3 then ( - Printf.eprintf "usage: console "; - exit 1 - ); - let host = Sys.argv.(1) and port = int_of_string (Sys.argv.(2)) in + if Array.length Sys.argv < 3 then ( + Printf.eprintf "usage: console "; + exit 1 + ); + let host = Sys.argv.(1) and port = int_of_string (Sys.argv.(2)) in - let fd = Unix.socket Unix.PF_INET Unix.SOCK_STREAM 0 in - let he = Unix.gethostbyname host in + let fd = Unix.socket Unix.PF_INET Unix.SOCK_STREAM 0 in + let he = Unix.gethostbyname host in - if Array.length he.Unix.h_addr_list = 0 then - failwith (Printf.sprintf "Couldn't resolve hostname: %s" host); - let ip = he.Unix.h_addr_list.(0) in - let addr = Unix.ADDR_INET(ip, port) in + if Array.length he.Unix.h_addr_list = 0 then + failwith (Printf.sprintf "Couldn't resolve hostname: %s" host); + let ip = he.Unix.h_addr_list.(0) in + let addr = Unix.ADDR_INET(ip, port) in - Unix.connect fd addr; + Unix.connect fd addr; - let oldattr = init_term () in - console_loop fd; - restore_term oldattr; + let oldattr = init_term () in + console_loop fd; + restore_term oldattr; diff --git a/ocaml/database/backend_xml.ml b/ocaml/database/backend_xml.ml index e9244697d11..ee9f432bf8c 100644 --- a/ocaml/database/backend_xml.ml +++ b/ocaml/database/backend_xml.ml @@ -18,24 +18,24 @@ open D open Db_cache_types open Db_backend -let unmarshall schema dbconn = +let unmarshall schema dbconn = let filename = dbconn.Parse_db_conf.path in if not dbconn.Parse_db_conf.compress then Db_xml.From.file schema filename - else + else let compressed = Unix.openfile filename [ Unix.O_RDONLY ] 0o0 in Stdext.Pervasiveext.finally - (fun () -> - let result = ref None in - Gzip.decompress_passive compressed - (fun uncompressed -> - result := Some (Db_xml.From.channel schema (Unix.in_channel_of_descr uncompressed)) - ); - match !result with - | None -> failwith "unmarshal failure" - | Some x -> x + (fun () -> + let result = ref None in + Gzip.decompress_passive compressed + (fun uncompressed -> + result := Some (Db_xml.From.channel schema (Unix.in_channel_of_descr uncompressed)) + ); + match !result with + | None -> failwith "unmarshal failure" + | Some x -> x ) - (fun () -> Unix.close compressed) + (fun () -> Unix.close compressed) (* Given table name, read all rows from db and store in cache *) let populate schema dbconn = @@ -43,48 +43,48 @@ let populate schema dbconn = let db = unmarshall schema dbconn in let major, minor = Manifest.schema (Database.manifest db) in debug "database unmarshalled, schema version = %d.%d" major minor; - (* version_check manifest; *) + (* version_check manifest; *) db (* atomically flush entire db cache to disk. If we are given a cache then flush that, otherwise flush the current state of the global in-memory cache *) let flush dbconn db = - let time = Unix.gettimeofday() in + let time = Unix.gettimeofday() in - let do_flush_xml db filename = - Redo_log.flush_db_to_all_active_redo_logs db; - Stdext.Unixext.atomic_write_to_file filename 0o0644 - (fun fd -> - if not dbconn.Parse_db_conf.compress - then Db_xml.To.fd fd db - else - Gzip.compress fd - (fun uncompressed -> Db_xml.To.fd uncompressed db) - ) in + let do_flush_xml db filename = + Redo_log.flush_db_to_all_active_redo_logs db; + Stdext.Unixext.atomic_write_to_file filename 0o0644 + (fun fd -> + if not dbconn.Parse_db_conf.compress + then Db_xml.To.fd fd db + else + Gzip.compress fd + (fun uncompressed -> Db_xml.To.fd uncompressed db) + ) in - let do_flush_gen db filename = - let generation = Manifest.generation (Database.manifest db) in - Stdext.Unixext.write_string_to_file filename (Generation.to_string generation) in + let do_flush_gen db filename = + let generation = Manifest.generation (Database.manifest db) in + Stdext.Unixext.write_string_to_file filename (Generation.to_string generation) in - let filename = dbconn.Parse_db_conf.path in - do_flush_xml db filename; - let generation_filename = Parse_db_conf.generation_filename dbconn in - do_flush_gen db generation_filename; + let filename = dbconn.Parse_db_conf.path in + do_flush_xml db filename; + let generation_filename = Parse_db_conf.generation_filename dbconn in + do_flush_gen db generation_filename; - debug "XML backend [%s] -- Write buffer flushed. Time: %f" filename (Unix.gettimeofday() -. time) + debug "XML backend [%s] -- Write buffer flushed. Time: %f" filename (Unix.gettimeofday() -. time) (* NB We don't do incremental flushing *) let flush_dirty dbconn = - let db = Db_ref.get_database (Db_backend.make ()) in - let g = Manifest.generation (Database.manifest db) in - if g > dbconn.Parse_db_conf.last_generation_count then begin - flush dbconn db; - dbconn.Parse_db_conf.last_generation_count <- g; - true - end else false + let db = Db_ref.get_database (Db_backend.make ()) in + let g = Manifest.generation (Database.manifest db) in + if g > dbconn.Parse_db_conf.last_generation_count then begin + flush dbconn db; + dbconn.Parse_db_conf.last_generation_count <- g; + true + end else false + - diff --git a/ocaml/database/block_device_io.ml b/ocaml/database/block_device_io.ml index 81d8ef2edea..c4987f13373 100644 --- a/ocaml/database/block_device_io.ml +++ b/ocaml/database/block_device_io.ml @@ -13,7 +13,7 @@ *) (* * Code to store a database and deltas in a block device and retrieve it again later. - * + * * This module can be compiled and executed in a stand-alone fashion from the command-line. * The filename of the block device and the filename of a file to use as a Unix domain socket are provided as command-line parameters. * The process must have read- and write-permission to the block device. @@ -35,7 +35,7 @@ (* * On-disk structure * ----------------- - * + * * The database is double-buffered, so that in case of corruption or write errors, there will always be an intact version of the data preserved. * There is a "validity byte" indicating which buffer is currently being written to. * Each buffer starts with a database record followed by zero or more deltas. @@ -65,7 +65,7 @@ (* * Communications protocol * ----------------------- - * + * * The control socket is used for another process to send commands and receive responses. * The response time for command is guaranteed to be no greater than a particular maximum delay (not accounting for network delay between the processes). * If the command could not complete in the available time, the response indicates that it failed. @@ -121,8 +121,8 @@ exception NotEnoughSpace (* Make informational output go to the syslog *) let initialise_logging () = - Debug.set_facility Syslog.Local5; - Debug.disable ~level:Syslog.Debug name + Debug.set_facility Syslog.Local5; + Debug.disable ~level:Syslog.Debug name (* --------------------------------------------- *) (* Functions to deal with layout of block device *) @@ -135,9 +135,9 @@ let start_of_half half = let half_to_pointer half = let ptr = match half with - | Neither -> raise InvalidBlockDevice (* half_to_pointer should never be called on "Neither" *) - | First -> pointer_first_half - | Second -> pointer_second_half in + | Neither -> raise InvalidBlockDevice (* half_to_pointer should never be called on "Neither" *) + | First -> pointer_first_half + | Second -> pointer_second_half in ptr let half_to_string half = @@ -186,7 +186,7 @@ let open_block_device block_dev target_response_time = R.warn "Magic string not matched. Initialising redo log..."; initialise_redo_log block_dev_fd target_response_time end; - block_dev_fd + block_dev_fd (* Within the given block device, seek to the position of the validity byte. *) let seek_to_validity_byte block_dev_fd = @@ -284,8 +284,8 @@ let listen_on sock = let s = Unix.socket Unix.PF_UNIX Unix.SOCK_STREAM 0 in (* Remove any existing socket file *) begin try - Unix.unlink sock - with Unix.Unix_error _ -> () + Unix.unlink sock + with Unix.Unix_error _ -> () end; Unix.bind s (Unix.ADDR_UNIX sock); Unix.listen s 1; (* 1 = maximum number of pending requests *) @@ -307,43 +307,43 @@ let accept_conn s latest_response_time = (* Listen on a given socket. Accept a single connection and transfer all the data from it to dest_fd, or raise Timeout if target_response_time happens first. *) (* Raises NotEnoughSpace if the next write would exceed the available_space. *) let transfer_data_from_sock_to_fd sock dest_fd available_space target_response_time = - (* Open the data channel *) - let s = listen_on sock in - try - (* May raise a Timeout exception: CA-106403 *) - let data_client = accept_conn s target_response_time in - R.info "Accepted connection on data socket"; - ignore_exn (fun () -> Unix.close s); - - (* Read all the data from the data channel, writing it straight into the block device, keeping track of accumulated length *) - let total_length = ref 0 in - R.debug "Reading from data socket, writing to the block device..."; - let bytes_read = finally - (fun () -> - (* Read data from the client until EOF. Returns the length read. *) - Unixext.read_data_in_chunks (fun chunk len -> - (* Check that there's enough space *) - if available_space - !total_length < len then raise NotEnoughSpace; - (* Otherwise write it *) - Unixext.time_limited_write dest_fd len chunk target_response_time; - total_length := !total_length + len - ) ~block_size:16384 data_client - ) - (fun () -> - (* Close the connection *) - (* CA-42914: If there was an exception, note that we are forcibly closing the connection when possibly the client (xapi) is still trying to write data. This will cause it to see a 'connection reset by peer' error. *) - R.info "Closing connection on data socket"; - try - Unix.shutdown data_client Unix.SHUTDOWN_ALL; - Unix.close data_client - with e -> - R.warn "Exception %s while closing socket" (Printexc.to_string e); - ) in - R.debug "Finished reading from data socket"; - bytes_read - with Unixext.Timeout -> (* Raised by accept_conn *) - ignore_exn (fun () -> Unix.close s); - raise Unixext.Timeout + (* Open the data channel *) + let s = listen_on sock in + try + (* May raise a Timeout exception: CA-106403 *) + let data_client = accept_conn s target_response_time in + R.info "Accepted connection on data socket"; + ignore_exn (fun () -> Unix.close s); + + (* Read all the data from the data channel, writing it straight into the block device, keeping track of accumulated length *) + let total_length = ref 0 in + R.debug "Reading from data socket, writing to the block device..."; + let bytes_read = finally + (fun () -> + (* Read data from the client until EOF. Returns the length read. *) + Unixext.read_data_in_chunks (fun chunk len -> + (* Check that there's enough space *) + if available_space - !total_length < len then raise NotEnoughSpace; + (* Otherwise write it *) + Unixext.time_limited_write dest_fd len chunk target_response_time; + total_length := !total_length + len + ) ~block_size:16384 data_client + ) + (fun () -> + (* Close the connection *) + (* CA-42914: If there was an exception, note that we are forcibly closing the connection when possibly the client (xapi) is still trying to write data. This will cause it to see a 'connection reset by peer' error. *) + R.info "Closing connection on data socket"; + try + Unix.shutdown data_client Unix.SHUTDOWN_ALL; + Unix.close data_client + with e -> + R.warn "Exception %s while closing socket" (Printexc.to_string e); + ) in + R.debug "Finished reading from data socket"; + bytes_read + with Unixext.Timeout -> (* Raised by accept_conn *) + ignore_exn (fun () -> Unix.close s); + raise Unixext.Timeout let transfer_database_to_sock sock db_fn target_response_time = (* Open the data channel *) @@ -354,12 +354,12 @@ let transfer_database_to_sock sock db_fn target_response_time = finally (fun () -> - (* Read the data and send it down the socket *) - db_fn (fun chunk len -> Unixext.time_limited_write data_client len chunk target_response_time) + (* Read the data and send it down the socket *) + db_fn (fun chunk len -> Unixext.time_limited_write data_client len chunk target_response_time) ) - (fun () -> - (* Close the socket *) - Unix.close data_client + (fun () -> + (* Close the socket *) + Unix.close data_client ) (* --------------------------------------------------- *) @@ -375,7 +375,7 @@ let send_response client str = else R.debug "Sent response [%s] to client" str (* Write a string containing a text string message. *) -let send_failure client prefix error = +let send_failure client prefix error = let len = String.length error in let str = Printf.sprintf "%s|%016d|%s" prefix len error in Unixext.really_write_string client str; @@ -428,9 +428,9 @@ let action_writedb block_dev_fd client datasock target_response_time = (* Decide which half of the double-buffered file to use. If the first half is currently valid, use the second half; and vice versa. *) let half_to_use = match validity with - | "1" -> Second - | "2" -> First - | _ -> First (* if neither half is valid, use the first half *) in + | "1" -> Second + | "2" -> First + | _ -> First (* if neither half is valid, use the first half *) in (* Seek to the start of the chosen half *) ignore_int (Unixext.seek_to block_dev_fd (start_of_half half_to_use)); @@ -527,9 +527,9 @@ let action_writedelta block_dev_fd client datasock target_response_time = (* Decide which half of the double-buffered file to use *) let half_to_use = match validity with - | "1" -> First - | "2" -> Second - | _ -> raise InvalidBlockDevice (* the log cannot accept deltas *) in + | "1" -> First + | "2" -> Second + | _ -> raise InvalidBlockDevice (* the log cannot accept deltas *) in (* Seek to the position to which to write *) let ptr = get_pointer half_to_use in @@ -614,9 +614,9 @@ let action_read block_dev_fd client datasock target_response_time = (* Decide which half of the double-buffered file to use *) let half_to_use = match validity with - | "1" -> First - | "2" -> Second - | _ -> raise InvalidBlockDevice in (* the log is empty *) + | "1" -> First + | "2" -> Second + | _ -> raise InvalidBlockDevice in (* the log is empty *) (* Seek to the start of the chosen half *) ignore_int (Unixext.seek_to block_dev_fd (start_of_half half_to_use)); @@ -662,7 +662,7 @@ let datasock = ref "" let dump = ref false let empty = ref false -let _ = +let _ = (* Initialise debug logging *) initialise_logging(); @@ -701,39 +701,39 @@ let _ = (* Read the validity byte *) let validity = read_validity_byte block_dev_fd target_response_time in Printf.printf "*** Validity byte: [%s]\n" validity; - + let halves = [First; Second] in List.iter (fun half -> - Printf.printf "*** [Half %s] Entering half.\n" (half_to_string half); - - (* Seek to the start of the chosen half *) - ignore_int (Unixext.seek_to block_dev_fd (start_of_half half)); - - begin - try - (* Attempt to read a database record *) - let length, db_fn, generation_count, marker = read_database block_dev_fd target_response_time in - Printf.printf "*** [Half %s] Database with generation count [%Ld] and length %d:\n" (half_to_string half) generation_count length; - db_fn (fun chunk len -> print_string chunk); - Printf.printf "\n"; - Printf.printf "*** [Half %s] Marker [%s]\n" (half_to_string half) marker; - - (* Attempt to read the deltas *) - while true do - let length, delta, generation_count, marker' = read_delta block_dev_fd target_response_time in - if marker <> marker' then raise (NonMatchingMarkers(marker, marker')) - else - (* Send the delta to the client *) - Printf.printf "*** [Half %s] Delta with generation count [%Ld] and length %d:\n" (half_to_string half) generation_count length; + Printf.printf "*** [Half %s] Entering half.\n" (half_to_string half); + + (* Seek to the start of the chosen half *) + ignore_int (Unixext.seek_to block_dev_fd (start_of_half half)); + + begin + try + (* Attempt to read a database record *) + let length, db_fn, generation_count, marker = read_database block_dev_fd target_response_time in + Printf.printf "*** [Half %s] Database with generation count [%Ld] and length %d:\n" (half_to_string half) generation_count length; + db_fn (fun chunk len -> print_string chunk); + Printf.printf "\n"; + Printf.printf "*** [Half %s] Marker [%s]\n" (half_to_string half) marker; + + (* Attempt to read the deltas *) + while true do + let length, delta, generation_count, marker' = read_delta block_dev_fd target_response_time in + if marker <> marker' then raise (NonMatchingMarkers(marker, marker')) + else + (* Send the delta to the client *) + Printf.printf "*** [Half %s] Delta with generation count [%Ld] and length %d:\n" (half_to_string half) generation_count length; Printf.printf "%s\n" delta; Printf.printf "*** [Half %s] Marker [%s]\n" (half_to_string half) marker' - done - with - | EndOfDeltas -> Printf.printf "*** [Half %s] No more deltas.\n" (half_to_string half) - | InvalidBlockDevice -> Printf.printf "*** [Half %s] Error: no database found\n%!" (half_to_string half) - | NonMatchingMarkers(a,b) -> Printf.printf "*** [Half %s] Error: non-matching marker found: expected [%s], got [%s]\n%!" (half_to_string half) a b - end - ) halves; + done + with + | EndOfDeltas -> Printf.printf "*** [Half %s] No more deltas.\n" (half_to_string half) + | InvalidBlockDevice -> Printf.printf "*** [Half %s] Error: no database found\n%!" (half_to_string half) + | NonMatchingMarkers(a,b) -> Printf.printf "*** [Half %s] Error: non-matching marker found: expected [%s], got [%s]\n%!" (half_to_string half) a b + end + ) halves; Printf.printf "*** End.\n" with | InvalidBlockDevice -> @@ -757,7 +757,7 @@ let _ = if !ctrlsock <> "" && !datasock <> "" then begin let connect_success_mesg = "connect|ack_" in let connect_failure_mesg = "connect|nack" in - + let s = listen_on !ctrlsock in (* Main loop: accept a new client, communicate with it until it stops sending commands, repeat. *) @@ -768,7 +768,7 @@ let _ = R.debug "Awaiting incoming connections on %s..." !ctrlsock; let client = accept_conn s target_startup_response_time in R.debug "Accepted a connection"; - + try (* Open the block device *) let block_dev_fd = open_block_device !block_dev target_startup_response_time in @@ -776,42 +776,42 @@ let _ = finally (fun () -> - (* If no exception was thrown, respond to the client saying that all was okay *) - send_response client connect_success_mesg; - - (* Now read and act upon a sequence of commands, until we receive EOF *) - let stop = ref false in - while not !stop do - R.debug "Reading from client..."; - try - let str = String.make command_size '\000' in - Unixext.really_read client str 0 command_size; - - (* Note: none of the action functions throw any exceptions; they report errors directly to the client. *) - let (action_fn, block_time) = match str with - | "writedelta" -> action_writedelta, !Xapi_globs.redo_log_max_block_time_writedelta - | "writedb___" -> action_writedb, !Xapi_globs.redo_log_max_block_time_writedb - | "read______" -> action_read, !Xapi_globs.redo_log_max_block_time_read - | "empty_____" -> action_empty, !Xapi_globs.redo_log_max_block_time_empty - | _ -> (fun _ _ _ _ -> send_failure client (str^"|nack") ("Unknown command "^str)), 0. - in - (* "Start the clock!" -- set the latest time by which we need to have responded to the client. *) - let target_response_time = Unix.gettimeofday() +. block_time in - action_fn block_dev_fd client !datasock target_response_time - with (* this must be an exception in Unixext.really_read because action_fn doesn't throw exceptions *) - | End_of_file -> - R.info "The client sent EOF"; - stop := true - | e -> - R.info "Unexpected error when trying to read from client: %s. Closing connection." (Printexc.to_string e); - stop := true - done; - R.debug "Stopping."; - ignore_exn (fun () -> Unix.close client) + (* If no exception was thrown, respond to the client saying that all was okay *) + send_response client connect_success_mesg; + + (* Now read and act upon a sequence of commands, until we receive EOF *) + let stop = ref false in + while not !stop do + R.debug "Reading from client..."; + try + let str = String.make command_size '\000' in + Unixext.really_read client str 0 command_size; + + (* Note: none of the action functions throw any exceptions; they report errors directly to the client. *) + let (action_fn, block_time) = match str with + | "writedelta" -> action_writedelta, !Xapi_globs.redo_log_max_block_time_writedelta + | "writedb___" -> action_writedb, !Xapi_globs.redo_log_max_block_time_writedb + | "read______" -> action_read, !Xapi_globs.redo_log_max_block_time_read + | "empty_____" -> action_empty, !Xapi_globs.redo_log_max_block_time_empty + | _ -> (fun _ _ _ _ -> send_failure client (str^"|nack") ("Unknown command "^str)), 0. + in + (* "Start the clock!" -- set the latest time by which we need to have responded to the client. *) + let target_response_time = Unix.gettimeofday() +. block_time in + action_fn block_dev_fd client !datasock target_response_time + with (* this must be an exception in Unixext.really_read because action_fn doesn't throw exceptions *) + | End_of_file -> + R.info "The client sent EOF"; + stop := true + | e -> + R.info "Unexpected error when trying to read from client: %s. Closing connection." (Printexc.to_string e); + stop := true + done; + R.debug "Stopping."; + ignore_exn (fun () -> Unix.close client) ) (fun () -> - (* Ensure that the block device FD is always closed *) - ignore_exn (fun () -> Unix.close block_dev_fd) + (* Ensure that the block device FD is always closed *) + ignore_exn (fun () -> Unix.close block_dev_fd) ) with (* problems opening block device *) | Unix.Unix_error(a,b,c) -> diff --git a/ocaml/database/database_server_main.ml b/ocaml/database/database_server_main.ml index 62ce7ec7abd..899ecada91f 100644 --- a/ocaml/database/database_server_main.ml +++ b/ocaml/database/database_server_main.ml @@ -1,8 +1,8 @@ open Threadext -type mode = - | Slave of string (* master IP *) - | Master of string (* database filename *) +type mode = + | Slave of string (* master IP *) + | Master of string (* database filename *) let mode = ref None @@ -11,75 +11,75 @@ let m = Mutex.create () let c = Condition.create () (** Handler for the remote database access URL *) -let remote_database_access_handler_v1 req bio = - try - Db_remote_cache_access_v1.handler req bio - with e -> - Printf.printf "Caught: %s\n" (Printexc.to_string e); - Printexc.print_backtrace stdout; - flush stdout; - raise e +let remote_database_access_handler_v1 req bio = + try + Db_remote_cache_access_v1.handler req bio + with e -> + Printf.printf "Caught: %s\n" (Printexc.to_string e); + Printexc.print_backtrace stdout; + flush stdout; + raise e (** Handler for the remote database access URL *) -let remote_database_access_handler_v2 req bio = - try - Db_remote_cache_access_v2.handler req bio - with e -> - Printf.printf "Caught: %s\n" (Printexc.to_string e); - Printexc.print_backtrace stdout; - flush stdout; - raise e +let remote_database_access_handler_v2 req bio = + try + Db_remote_cache_access_v2.handler req bio + with e -> + Printf.printf "Caught: %s\n" (Printexc.to_string e); + Printexc.print_backtrace stdout; + flush stdout; + raise e module Local_tests = Database_test.Tests(Db_cache_impl) let schema = Test_schemas.schema -let _ = - let listen_path = ref "./database" in - let self_test = ref false in - Printexc.record_backtrace true; +let _ = + let listen_path = ref "./database" in + let self_test = ref false in + Printexc.record_backtrace true; - Arg.parse [ - "--slave-of", Arg.String (fun master -> mode := Some(Slave master)), "run as a slave of a remote db"; - "--master", Arg.String (fun db -> mode := Some(Master db)), "run as a master from the given db filename"; - "--listen-on", Arg.Set_string listen_path, Printf.sprintf "listen for requests on path (default %s)" !listen_path; - "--test", Arg.Set self_test, "Run unit tests in-process"; - ] (fun x -> Printf.fprintf stderr "Ignoring unknown parameter: %s\n%!" x) - "run a stand-alone database server"; + Arg.parse [ + "--slave-of", Arg.String (fun master -> mode := Some(Slave master)), "run as a slave of a remote db"; + "--master", Arg.String (fun db -> mode := Some(Master db)), "run as a master from the given db filename"; + "--listen-on", Arg.Set_string listen_path, Printf.sprintf "listen for requests on path (default %s)" !listen_path; + "--test", Arg.Set self_test, "Run unit tests in-process"; + ] (fun x -> Printf.fprintf stderr "Ignoring unknown parameter: %s\n%!" x) + "run a stand-alone database server"; - match !mode with - | None -> failwith "Requires either --slave-of or --master arguments" - | Some mode -> - begin match mode with - | Slave _ -> failwith "unimplemented" - | Master db_filename -> - Printf.printf "Database path: %s\n%!" db_filename; - let db = Parse_db_conf.make db_filename in - Db_conn_store.initialise_db_connections [ db ]; - let t = Db_backend.make () in - Db_cache_impl.make t [ db ] schema; - Db_cache_impl.sync [ db ] (Db_ref.get_database t); + match !mode with + | None -> failwith "Requires either --slave-of or --master arguments" + | Some mode -> + begin match mode with + | Slave _ -> failwith "unimplemented" + | Master db_filename -> + Printf.printf "Database path: %s\n%!" db_filename; + let db = Parse_db_conf.make db_filename in + Db_conn_store.initialise_db_connections [ db ]; + let t = Db_backend.make () in + Db_cache_impl.make t [ db ] schema; + Db_cache_impl.sync [ db ] (Db_ref.get_database t); + + Unixext.unlink_safe !listen_path; + let sockaddr = Unix.ADDR_UNIX !listen_path in + let socket = Http_svr.bind sockaddr "unix_rpc" in + let server = Http_svr.Server.empty in + Http_svr.add_handler server Http.Post "/post_remote_db_access" (Http_svr.BufIO remote_database_access_handler_v1); + Http_svr.add_handler server Http.Post "/post_remote_db_access_v2" (Http_svr.BufIO remote_database_access_handler_v2); + Http_svr.start server socket; + Printf.printf "server listening\n%!"; + if !self_test then begin + Printf.printf "Running unit-tests\n%!"; + Local_tests.main true; + Printf.printf "All tests passed\n%!"; + end; + (* Wait for either completion *) + Mutex.execute m + (fun () -> + while not (!finished) do + Condition.wait c m + done + ); + Http_svr.stop socket + end - Unixext.unlink_safe !listen_path; - let sockaddr = Unix.ADDR_UNIX !listen_path in - let socket = Http_svr.bind sockaddr "unix_rpc" in - let server = Http_svr.Server.empty in - Http_svr.add_handler server Http.Post "/post_remote_db_access" (Http_svr.BufIO remote_database_access_handler_v1); - Http_svr.add_handler server Http.Post "/post_remote_db_access_v2" (Http_svr.BufIO remote_database_access_handler_v2); - Http_svr.start server socket; - Printf.printf "server listening\n%!"; - if !self_test then begin - Printf.printf "Running unit-tests\n%!"; - Local_tests.main true; - Printf.printf "All tests passed\n%!"; - end; - (* Wait for either completion *) - Mutex.execute m - (fun () -> - while not (!finished) do - Condition.wait c m - done - ); - Http_svr.stop socket - end - diff --git a/ocaml/database/database_test.ml b/ocaml/database/database_test.ml index 9e9e58e67bf..c42ee943bfa 100644 --- a/ocaml/database/database_test.ml +++ b/ocaml/database/database_test.ml @@ -15,681 +15,681 @@ module Tests = functor(Client: Db_interface.DB_ACCESS) -> struct - let name = "thevmname" - let invalid_name = "notavmname" - - let make_vm r uuid = - [ - "uuid", uuid; - "name__description", ""; - "other_config", "()"; - "tags", "()"; - "name__label", name; - ] - - let make_vbd vm r uuid = [ -(* "ref", r; *) - "qos__supported_algorithms", "()"; - "other_config", "(('owner' ''))"; - "uuid", uuid; - "allowed_operations", "('attach')"; - "qos__algorithm_params", "()"; - "type", "Disk"; - "VM", vm; - "VDI", "OpaqueRef:NULL"; - "qos__algorithm_type", ""; - "metrics", "OpaqueRef:NULL"; - "device", ""; - "empty", "false"; - "bootable", "false"; - "current_operations", "()"; - "unpluggable", "true"; - "status_detail", ""; - "runtime_properties", "()"; - "userdevice", "0"; - "mode", "RW"; - "storage_lock", "false"; - "status_code", "0"; - "currently_attached", "false"; - ] - - let expect_missing_row tbl r f = - try - f () - with Db_exn.DBCache_NotFound("missing row", tbl', r') when tbl' = tbl && r = r' -> () - - let expect_missing_tbl tbl f = - try - f () - with Db_exn.DBCache_NotFound("missing table", tbl', "") when tbl' = tbl -> () - - let expect_uniqueness_violation tbl fld v f = - try - f () - with Db_exn.Uniqueness_constraint_violation(tbl', fld', v') when tbl' = tbl && fld' = fld && v' = v -> () - - let expect_missing_uuid tbl uuid f = - try - f () - with Db_exn.Read_missing_uuid(tbl', "", uuid') when tbl' = tbl && uuid' = uuid -> () - - let expect_missing_field name f = - try - f () - with Db_exn.DBCache_NotFound("missing field", name', "") when name' = name -> () - - let test_invalid_where_record fn_name fn = - Printf.printf "%s ...\n" fn_name; - expect_missing_tbl "Vm" - (fun () -> - let (_: string list) = fn { Db_cache_types.table = "Vm"; return = ""; where_field = ""; where_value = "" } in - failwith (Printf.sprintf "%s " fn_name) - ); - Printf.printf "%s \n" fn_name; - expect_missing_field "wibble" - (fun () -> - let (_: string list) = fn { Db_cache_types.table = "VM"; return = "wibble"; where_field = Escaping.escape_id [ "name"; "label" ]; where_value = name } in - failwith (Printf.sprintf "%s " fn_name) - ); - Printf.printf "%s \n" fn_name; - expect_missing_field "wibble" - (fun () -> - let (_: string list) = fn { Db_cache_types.table = "VM"; return = Escaping.escape_id [ "name"; "label" ]; where_field = "wibble"; where_value = "" } in - failwith (Printf.sprintf "%s " fn_name) - ) - - (* Verify the ref_index contents are correct for a given [tblname] and [key] (uuid/ref) *) - let check_ref_index t tblname key = match Ref_index.lookup key with - | None -> - (* We should fail to find the row *) - expect_missing_row tblname key - (fun () -> let (_: string) = Client.read_field t tblname "uuid" key in ()); - expect_missing_uuid tblname key - (fun () -> let (_: string) = Client.db_get_by_uuid t tblname key in ()) - | Some { Ref_index.name_label = name_label; uuid = uuid; _ref = _ref } -> - (* key should be either uuid or _ref *) - if key <> uuid && (key <> _ref) - then failwith (Printf.sprintf "check_ref_index %s key %s: got ref %s uuid %s" tblname key _ref uuid); - let real_ref = if Client.is_valid_ref t key then key else Client.db_get_by_uuid t tblname key in - let real_name_label = - try Some (Client.read_field t tblname "name__label" real_ref) - with _ -> None in - if name_label <> real_name_label - then failwith (Printf.sprintf "check_ref_index %s key %s: ref_index name_label = %s; db has %s" tblname key (Opt.default "None" name_label) (Opt.default "None" real_name_label)) - - - open Pervasiveext - open Db_cache_types - - let create_test_db () = - let schema = Test_schemas.many_to_many in - let db = - ((fun x -> x) - ++ (Db_backend.blow_away_non_persistent_fields schema) - ++ (Db_upgrade.generic_database_upgrade)) - (Database.make schema) in - - db - - let check_many_to_many () = - let db = create_test_db () in - (* make a foo with bars = [] *) - (* make a bar with foos = [] *) - (* add 'bar' to foo.bars *) - let db = - ((fun x -> x) - ++ (set_field "foo" "foo:1" "bars" (add_to_set "bar:1" "()")) - ++ (add_row "foo" "foo:1" (Row.add 0L Db_names.ref "foo:1" (Row.add 0L "bars" "()" Row.empty))) - ++ (add_row "bar" "bar:1" (Row.add 0L Db_names.ref "bar:1" (Row.add 0L "foos" "()" Row.empty)))) db in - (* check that 'bar.foos' includes 'foo' *) - let bar_1 = Table.find "bar:1" (TableSet.find "bar" (Database.tableset db)) in - let bar_foos = Row.find "foos" bar_1 in - if bar_foos <> "('foo:1')" - then failwith (Printf.sprintf "check_many_to_many: bar(bar:1).foos expected ('foo:1') got %s" bar_foos); - - (* set foo.bars to [] *) -(* let foo_1 = Table.find "foo:1" (TableSet.find "foo" (Database.tableset db)) in*) - let db = set_field "foo" "foo:1" "bars" "()" db in - (* check that 'bar.foos' is empty *) - let bar_1 = Table.find "bar:1" (TableSet.find "bar" (Database.tableset db)) in - let bar_foos = Row.find "foos" bar_1 in - if bar_foos <> "()" - then failwith (Printf.sprintf "check_many_to_many: bar(bar:1).foos expected () got %s" bar_foos); - (* add 'bar' to foo.bars *) - let db = set_field "foo" "foo:1" "bars" "('bar:1')" db in - (* check that 'bar.foos' includes 'foo' *) - let bar_1 = Table.find "bar:1" (TableSet.find "bar" (Database.tableset db)) in - let bar_foos = Row.find "foos" bar_1 in - if bar_foos <> "('foo:1')" - then failwith (Printf.sprintf "check_many_to_many: bar(bar:1).foos expected ('foo:1') got %s - 2" bar_foos); - (* delete 'bar' *) - let db = remove_row "bar" "bar:1" db in - (* check that 'foo.bars' is empty *) - let foo_1 = Table.find "foo:1" (TableSet.find "foo" (Database.tableset db)) in - let foo_bars = Row.find "bars" foo_1 in - if foo_bars <> "()" - then failwith (Printf.sprintf "check_many_to_many: foo(foo:1).foos expected () got %s" foo_bars); - () - - let check_events t = - let dump db g = - let tables = Db_cache_types.Database.tableset db in - Db_cache_types.TableSet.fold_over_recent g - (fun name _ table acc -> - Db_cache_types.Table.fold_over_recent g - (fun r { Db_cache_types.Stat.created; modified; deleted } _ acc -> - let s = - try - let row = Db_cache_types.Table.find r table in - let s = Db_cache_types.Row.fold_over_recent g - (fun k _ v acc -> - Printf.sprintf "%s %s=%s" acc k v) row "" in - s - with _ -> "(deleted)" - in - Printf.printf "%s(%s): (%Ld %Ld %Ld) %s\n" name r created modified deleted s; - () - ) table ()) tables () - in - - let get_created db g = - let tables = Db_cache_types.Database.tableset db in - Db_cache_types.TableSet.fold_over_recent g - (fun name _ table acc -> - Db_cache_types.Table.fold_over_recent g - (fun r { Db_cache_types.Stat.created } _ acc -> - if created>=g then (name,r)::acc else acc) table acc - ) tables [] - in - - let get_updated db g = - let tables = Db_cache_types.Database.tableset db in - Db_cache_types.TableSet.fold_over_recent g - (fun name _ table acc -> - Db_cache_types.Table.fold_over_recent g - (fun r _ _ acc -> - let row = Db_cache_types.Table.find r table in - Db_cache_types.Row.fold_over_recent g - (fun k _ v acc -> - (r,(k,v))::acc) row acc) - table acc) tables [] - in - - let get_deleted db g = - let tables = Db_cache_types.Database.tableset db in - Db_cache_types.TableSet.fold_over_recent g - (fun name _ table acc -> - Db_cache_types.Table.fold_over_deleted g - (fun r { Db_cache_types.Stat.deleted } acc -> - if deleted > g then r::acc else acc) - table acc) tables [] - in - - let get_max db = - let tables = Db_cache_types.Database.tableset db in - Db_cache_types.TableSet.fold_over_recent (-1L) - (fun _ { Db_cache_types.Stat.created; modified; deleted } _ largest -> - max created (max modified (max deleted largest))) tables (-1L) - in - - let db = Db_ref.get_database t in - let g = get_max db in - Printf.printf "check_events: current generation is: %Ld\n" g; - - let vm = "vmref" in - let vm_uuid = "vmuuid" in - let vbd = "vbdref" in - let vbd_uuid = "vbduuid" in - let vbd2 = "vbdref2" in - let vbd_uuid2 = "vbduuid2" in - - Client.create_row t "VM" (make_vm vm vm_uuid) vm; - let db = Db_ref.get_database t in - let g2 = get_max db in - Printf.printf "generation after create_row is: %Ld\n" g2; - dump db g; - let created = get_created db g in - Printf.printf "===TEST=== Checking that the VM creation event is reported: "; - if (List.exists (fun (table,r) -> table="VM" && r=vm) created) - then (Printf.printf "Pass\n") - else (Printf.printf "Fail\n"; failwith "Event problem"); - - let (_: unit) = Client.write_field t "VM" vm "name__label" "moo" in - let db = Db_ref.get_database t in - let g3 = get_max db in - Printf.printf "generation after write_field is: %Ld\n" g3; - dump db g2; - let updated = get_updated db g2 in - let vm_updated = List.filter (fun (r,_) -> r=vm) updated in - let vm_updated = List.map snd vm_updated in - Printf.printf "===TEST=== Checking that the VM field update is reported: "; - if (List.mem_assoc "name__label" vm_updated) - then (Printf.printf "Pass\n") - else (Printf.printf "Fail\n"; failwith "Event problem"); - - Client.create_row t "VBD" (make_vbd vm vbd vbd_uuid) vbd; - let db = Db_ref.get_database t in - let g4 = get_max db in - Printf.printf "generation after create VBD is: %Ld\n" g4; - dump db g3; - let updated = get_updated db g3 in - Printf.printf "===TEST=== Checking one-to-many after one-create: "; - let vm_updated = List.filter (fun (r,_) -> r=vm) updated in - let vm_updated = List.map snd vm_updated in - if (List.mem_assoc "VBDs" vm_updated) - then (Printf.printf "Pass\n") - else (Printf.printf "Fail\n"; failwith "Event problem"); - - let (_: unit) = Client.write_field t "VBD" vbd "VM" "moo" in - let db = Db_ref.get_database t in - let g5 = get_max db in - Printf.printf "generation after write_field is: %Ld\n" g5; - dump db g4; - let updated = get_updated db g4 in - Printf.printf "===TEST=== Checking one-to-many after one-update: "; - let vm_updated = List.filter (fun (r,_) -> r=vm) updated in - let vm_updated = List.map snd vm_updated in - if (List.mem_assoc "VBDs" vm_updated) - then (Printf.printf "Pass\n") - else (Printf.printf "Fail\n"; failwith "Event problem"); - - let (_: unit) = Client.write_field t "VBD" vbd "type" "Banana" in - let db = Db_ref.get_database t in - let g6 = get_max db in - Printf.printf "generation after write_field is: %Ld\n" g6; - dump db g5; - let updated = get_updated db g5 in - Printf.printf "===TEST=== Checking one-to-many after one-update of non-reference field: "; - let vm_updated = List.filter (fun (r,_) -> r=vm) updated in - let vm_updated = List.map snd vm_updated in - if not (List.mem_assoc "VBDs" vm_updated) - then (Printf.printf "Pass\n") - else (Printf.printf "Fail\n"; failwith "Event problem"); - - let (_ : unit) = Client.delete_row t "VBD" vbd in - let db = Db_ref.get_database t in - let g7 = get_max db in - Printf.printf "generation after delete VBD is: %Ld\n" g7; - Printf.printf "===TEST=== Checking deleted event: "; - let deleted = get_deleted db g6 in - if (List.mem vbd deleted) - then (Printf.printf "Pass\n") - else (Printf.printf "Fail\n"; failwith "Event problem"); - - Client.create_row t "VBD" (make_vbd vm vbd vbd_uuid) vbd; - let (_ : unit) = Client.delete_row t "VBD" vbd in - let db = Db_ref.get_database t in - let g8 = get_max db in - Printf.printf "generation after create/delete VBD is: %Ld\n" g8; - Printf.printf "===TEST=== Checking the VBD doesn't appear in the deleted list: "; - let deleted = get_deleted db g7 in - if not (List.mem vbd deleted) - then (Printf.printf "Pass\n") - else (Printf.printf "Fail\n"; failwith "Event problem"); - dump db g7; - - Client.create_row t "VBD" (make_vbd vm vbd vbd_uuid) vbd; - let db = Db_ref.get_database t in - let g9 = get_max db in - let (_ : unit) = Client.delete_row t "VBD" vbd in - Client.create_row t "VBD" (make_vbd vm vbd2 vbd_uuid2) vbd2; - let (_ : unit) = Client.delete_row t "VBD" vbd2 in - let db = Db_ref.get_database t in - let g10 = get_max db in - - Printf.printf "===TEST=== Checking for masking of delete events: "; - - - let deleted = get_deleted db g9 in - if (List.mem vbd deleted) - then (Printf.printf "Pass\n") - else (Printf.printf "Fail\n"; failwith "Event problem"); - dump db g9; - ignore(g10); - - - - () - - let main in_process = - (* reference which we create *) - let valid_ref = "ref1" in - let valid_uuid = "uuid1" in - let invalid_ref = "foo" in - let invalid_uuid = "bar" in - - let t = if in_process then Db_backend.make () else Db_ref.Remote in - - let vbd_ref = "waz" in - let vbd_uuid = "whatever" in - - check_many_to_many (); - - (* Before we begin, clear out any old state: *) - expect_missing_row "VM" valid_ref - (fun () -> - Client.delete_row t "VM" valid_ref; - ); - if in_process then check_ref_index t "VM" valid_ref; - - expect_missing_row "VBD" vbd_ref - (fun () -> - Client.delete_row t "VBD" vbd_ref; - ); - if in_process then check_ref_index t "VBD" vbd_ref; - - Printf.printf "Deleted stale state from previous test\n"; - - Printf.printf "get_table_from_ref \n"; - begin - match Client.get_table_from_ref t invalid_ref with - | None -> Printf.printf "Reference '%s' has no associated table\n" invalid_ref - | Some t -> failwith (Printf.sprintf "Reference '%s' exists in table '%s'" invalid_ref t) - end; - Printf.printf "is_valid_ref \n"; - if Client.is_valid_ref t invalid_ref then failwith "is_valid_ref = true"; - - Printf.printf "read_refs \n"; - let existing_refs = Client.read_refs t "VM" in - Printf.printf "VM refs: [ %s ]\n" (String.concat "; " existing_refs); - Printf.printf "read_refs \n"; - expect_missing_tbl "Vm" - (fun () -> - let (_: string list) = Client.read_refs t "Vm" in - () - ); - Printf.printf "delete_row \n"; - expect_missing_row "VM" invalid_ref - (fun () -> - Client.delete_row t "VM" invalid_ref; - failwith "delete_row of a non-existent row silently succeeded" - ); - Printf.printf "create_row \n"; - expect_missing_field "name__label" - (fun () -> - let broken_vm = List.filter (fun (k, _) -> k <> "name__label") (make_vm valid_ref valid_uuid) in - Client.create_row t "VM" broken_vm valid_ref; - failwith "create_row " - ); - Printf.printf "create_row \n"; - Client.create_row t "VM" (make_vm valid_ref valid_uuid) valid_ref; - if in_process then check_ref_index t "VM" valid_ref; - Printf.printf "is_valid_ref \n"; - if not (Client.is_valid_ref t valid_ref) - then failwith "is_valid_ref = false, after create_row"; - Printf.printf "get_table_from_ref \n"; - begin match Client.get_table_from_ref t valid_ref with - | Some "VM" -> () - | Some t -> failwith "get_table_from_ref : invalid table" - | None -> failwith "get_table_from_ref : None" - end; - Printf.printf "read_refs includes \n"; - if not (List.mem valid_ref (Client.read_refs t "VM")) - then failwith "read_refs did not include "; - - Printf.printf "create_row \n"; - expect_uniqueness_violation "VM" "_ref" valid_ref - (fun () -> - Client.create_row t "VM" (make_vm valid_ref (valid_uuid ^ "unique")) valid_ref; - failwith "create_row " - ); - Printf.printf "create_row \n"; - expect_uniqueness_violation "VM" "uuid" valid_uuid - (fun () -> - Client.create_row t "VM" (make_vm (valid_ref ^ "unique") valid_uuid) (valid_ref ^ "unique"); - failwith "create_row " - ); - Printf.printf "db_get_by_uuid \n"; - let r = Client.db_get_by_uuid t "VM" valid_uuid in - if r <> valid_ref - then failwith (Printf.sprintf "db_get_by_uuid : got %s; expected %s" r valid_ref); - Printf.printf "db_get_by_uuid \n"; - expect_missing_uuid "VM" invalid_uuid - (fun () -> - let (_: string) = Client.db_get_by_uuid t "VM" invalid_uuid in - failwith "db_get_by_uuid " - ); - Printf.printf "get_by_name_label \n"; - if Client.db_get_by_name_label t "VM" invalid_name <> [] - then failwith "db_get_by_name_label "; - - Printf.printf "get_by_name_label \n"; - if Client.db_get_by_name_label t "VM" name <> [ valid_ref ] - then failwith "db_get_by_name_label "; - - Printf.printf "read_field \n"; - if Client.read_field t "VM" "name__label" valid_ref <> name - then failwith "read_field : invalid name"; - - Printf.printf "read_field \n"; - if Client.read_field t "VM" "protection_policy" valid_ref <> "OpaqueRef:NULL" - then failwith "read_field : invalid protection_policy"; - - Printf.printf "read_field \n"; - expect_missing_row "VM" invalid_ref - (fun () -> - let (_: string) = Client.read_field t "VM" "name__label" invalid_ref in - failwith "read_field " - ); - Printf.printf "read_field \n"; - expect_missing_field "name_label" - (fun () -> - let (_: string) = Client.read_field t "VM" "name_label" valid_ref in - failwith "read_field " - ); - Printf.printf "read_field \n"; - expect_missing_row "VM" invalid_ref - (fun () -> - let (_: string) = Client.read_field t "VM" "name_label" invalid_ref in - failwith "read_field " - ); - Printf.printf "read_field_where \n"; - let where_name_label = - { Db_cache_types.table = "VM"; return = Escaping.escape_id(["name"; "label"]); where_field="uuid"; where_value = valid_uuid } in - let xs = Client.read_field_where t where_name_label in - if not (List.mem name xs) - then failwith "read_field_where "; - test_invalid_where_record "read_field_where" (Client.read_field_where t); - - let xs = Client.read_set_ref t where_name_label in - if not (List.mem name xs) - then failwith "read_set_ref "; - test_invalid_where_record "read_set_ref" (Client.read_set_ref t); - - Printf.printf "write_field \n"; - expect_missing_tbl "Vm" - (fun () -> - let (_: unit) = Client.write_field t "Vm" "" "" "" in - failwith "write_field " - ); - Printf.printf "write_field \n"; - expect_missing_row "VM" invalid_ref - (fun () -> - let (_: unit) = Client.write_field t "VM" invalid_ref "" "" in - failwith "write_field " - ); - Printf.printf "write_field \n"; - expect_missing_field "wibble" - (fun () -> - let (_: unit) = Client.write_field t "VM" valid_ref "wibble" "" in - failwith "write_field " - ); - Printf.printf "write_field \n"; - let (_: unit) = Client.write_field t "VM" valid_ref (Escaping.escape_id ["name"; "description"]) "description" in - if in_process then check_ref_index t "VM" valid_ref; - Printf.printf "write_field - invalidating ref_index\n"; - let (_: unit) = Client.write_field t "VM" valid_ref (Escaping.escape_id ["name"; "label"]) "newlabel" in - if in_process then check_ref_index t "VM" valid_ref; - - Printf.printf "read_record \n"; - expect_missing_tbl "Vm" - (fun () -> - let _ = Client.read_record t "Vm" invalid_ref in - failwith "read_record " - ); - Printf.printf "read_record \n"; - expect_missing_row "VM" invalid_ref - (fun () -> - let _ = Client.read_record t "VM" invalid_ref in - failwith "read_record " - ); - Printf.printf "read_record \n"; - let fv_list, fvs_list = Client.read_record t "VM" valid_ref in - if not(List.mem_assoc (Escaping.escape_id [ "name"; "label" ]) fv_list) - then failwith "read_record 1"; - if List.assoc "VBDs" fvs_list <> [] - then failwith "read_record 2"; - Printf.printf "read_record foreign key\n"; - Client.create_row t "VBD" (make_vbd valid_ref vbd_ref vbd_uuid) vbd_ref; - let fv_list, fvs_list = Client.read_record t "VM" valid_ref in - if List.assoc "VBDs" fvs_list <> [ vbd_ref ] then begin - Printf.printf "fv_list = [ %s ] fvs_list = [ %s ]\n%!" (String.concat "; " (List.map (fun (k, v) -> k ^":" ^ v) fv_list)) (String.concat "; " (List.map (fun (k, v) -> k ^ ":" ^ (String.concat ", " v)) fvs_list)); - failwith "read_record 3" - end; - Printf.printf "read_record deleted foreign key\n"; - Client.delete_row t "VBD" vbd_ref; - let fv_list, fvs_list = Client.read_record t "VM" valid_ref in - if List.assoc "VBDs" fvs_list <> [] - then failwith "read_record 4"; - Printf.printf "read_record overwritten foreign key\n"; - Client.create_row t "VBD" (make_vbd valid_ref vbd_ref vbd_uuid) vbd_ref; - let fv_list, fvs_list = Client.read_record t "VM" valid_ref in - if List.assoc "VBDs" fvs_list = [] - then failwith "read_record 5"; - Client.write_field t "VBD" vbd_ref (Escaping.escape_id [ "VM" ]) "overwritten"; - let fv_list, fvs_list = Client.read_record t "VM" valid_ref in - if List.assoc "VBDs" fvs_list <> [] - then failwith "read_record 6"; - - expect_missing_tbl "Vm" - (fun () -> - let _ = Client.read_records_where t "Vm" Db_filter_types.True in - () - ); - let xs = Client.read_records_where t "VM" Db_filter_types.True in - if List.length xs <> 1 - then failwith "read_records_where 2"; - let xs = Client.read_records_where t "VM" Db_filter_types.False in - if xs <> [] - then failwith "read_records_where 3"; - - expect_missing_tbl "Vm" - (fun () -> - let _ = Client.find_refs_with_filter t "Vm" Db_filter_types.True in - failwith "find_refs_with_filter "; - ); - let xs = Client.find_refs_with_filter t "VM" Db_filter_types.True in - if List.length xs <> 1 - then failwith "find_refs_with_filter 1"; - let xs = Client.find_refs_with_filter t "VM" Db_filter_types.False in - if xs <> [] - then failwith "find_refs_with_filter 2"; - - expect_missing_tbl "Vm" - (fun () -> - Client.process_structured_field t ("","") "Vm" "wibble" invalid_ref Db_cache_types.AddSet; - failwith "process_structure_field " - ); - expect_missing_field "wibble" - (fun () -> - Client.process_structured_field t ("","") "VM" "wibble" valid_ref Db_cache_types.AddSet; - failwith "process_structure_field " - ); - expect_missing_row "VM" invalid_ref - (fun () -> - Client.process_structured_field t ("","") "VM" (Escaping.escape_id ["name"; "label"]) invalid_ref Db_cache_types.AddSet; - failwith "process_structure_field " - ); - Client.process_structured_field t ("foo", "") "VM" "tags" valid_ref Db_cache_types.AddSet; - if Client.read_field t "VM" "tags" valid_ref <> "('foo')" - then failwith "process_structure_field expected ('foo')"; - Client.process_structured_field t ("foo", "") "VM" "tags" valid_ref Db_cache_types.AddSet; - if Client.read_field t "VM" "tags" valid_ref <> "('foo')" - then failwith "process_structure_field expected ('foo') 2"; - Client.process_structured_field t ("foo", "bar") "VM" "other_config" valid_ref Db_cache_types.AddMap; - - if Client.read_field t "VM" "other_config" valid_ref <> "(('foo' 'bar'))" - then failwith "process_structure_field expected (('foo' 'bar')) 3"; - - begin - try - Client.process_structured_field t ("foo", "bar") "VM" "other_config" valid_ref Db_cache_types.AddMap; - with Db_exn.Duplicate_key("VM", "other_config", r', "foo") when r' = valid_ref -> () - end; - if Client.read_field t "VM" "other_config" valid_ref <> "(('foo' 'bar'))" - then failwith "process_structure_field expected (('foo' 'bar')) 4"; - - (* Check that non-persistent fields are filled with an empty value *) - - (* Event tests *) - - check_events t; - - (* Performance test *) - if in_process then begin - let time n f = - let start = Unix.gettimeofday () in - for i = 0 to n do - f i - done; - let total = Unix.gettimeofday () -. start in - float_of_int n /. total in - - let n = 5000 in - - let rpc_time = time n (fun _ -> - let (_: bool) = Client.is_valid_ref t valid_ref in ()) in - - Printf.printf "%.2f primitive RPC calls/sec\n" rpc_time; - - (* Delete stuff left-over from the previous run *) - let delete_time = time n - (fun i -> - let rf = Printf.sprintf "%s:%d" vbd_ref i in - try - Client.delete_row t "VBD" rf - with _ -> () - ) in - Printf.printf "Deleted %d VBD records, %.2f calls/sec\n%!" n delete_time; - - expect_missing_row "VBD" vbd_ref - (fun () -> - Client.delete_row t "VBD" vbd_ref; - ); - - (* Create lots of VBDs referening no VM *) - let create_time = time n - (fun i -> - let rf = Printf.sprintf "%s:%d" vbd_ref i in - let uuid = Printf.sprintf "%s:%d" vbd_uuid i in - Client.create_row t "VBD" (make_vbd invalid_ref rf uuid) rf; - ) in - Printf.printf "Created %d VBD records, %.2f calls/sec\n%!" n create_time; - - let m = 300000 in (* multiple of 3 *) - - (* Time a benign VM create_row, delete_row, read_record sequence *) - let benign_time = time m - (fun i -> - if i < (m / 3 * 2) then begin - if i mod 2 = 0 - then Client.create_row t "VBD" (make_vbd valid_ref vbd_ref vbd_uuid) vbd_ref - else Client.delete_row t "VBD" vbd_ref - end else - let _ = Client.read_record t "VM" valid_ref in - () - ) in - Printf.printf "good sequence: %.2f calls/sec\n%!" benign_time; - - let malign_time = time m - (fun i -> - match i mod 3 with - | 0 -> Client.create_row t "VBD" (make_vbd valid_ref vbd_ref vbd_uuid) vbd_ref - | 1 -> Client.delete_row t "VBD" vbd_ref - | 2 -> let _ = Client.read_record t "VM" valid_ref in () - | _ -> () - ) in - Printf.printf "bad sequence: %.2f calls/sec\n%!" malign_time; - end + let name = "thevmname" + let invalid_name = "notavmname" + + let make_vm r uuid = + [ + "uuid", uuid; + "name__description", ""; + "other_config", "()"; + "tags", "()"; + "name__label", name; + ] + + let make_vbd vm r uuid = [ + (* "ref", r; *) + "qos__supported_algorithms", "()"; + "other_config", "(('owner' ''))"; + "uuid", uuid; + "allowed_operations", "('attach')"; + "qos__algorithm_params", "()"; + "type", "Disk"; + "VM", vm; + "VDI", "OpaqueRef:NULL"; + "qos__algorithm_type", ""; + "metrics", "OpaqueRef:NULL"; + "device", ""; + "empty", "false"; + "bootable", "false"; + "current_operations", "()"; + "unpluggable", "true"; + "status_detail", ""; + "runtime_properties", "()"; + "userdevice", "0"; + "mode", "RW"; + "storage_lock", "false"; + "status_code", "0"; + "currently_attached", "false"; + ] + + let expect_missing_row tbl r f = + try + f () + with Db_exn.DBCache_NotFound("missing row", tbl', r') when tbl' = tbl && r = r' -> () + + let expect_missing_tbl tbl f = + try + f () + with Db_exn.DBCache_NotFound("missing table", tbl', "") when tbl' = tbl -> () + + let expect_uniqueness_violation tbl fld v f = + try + f () + with Db_exn.Uniqueness_constraint_violation(tbl', fld', v') when tbl' = tbl && fld' = fld && v' = v -> () + + let expect_missing_uuid tbl uuid f = + try + f () + with Db_exn.Read_missing_uuid(tbl', "", uuid') when tbl' = tbl && uuid' = uuid -> () + + let expect_missing_field name f = + try + f () + with Db_exn.DBCache_NotFound("missing field", name', "") when name' = name -> () + + let test_invalid_where_record fn_name fn = + Printf.printf "%s ...\n" fn_name; + expect_missing_tbl "Vm" + (fun () -> + let (_: string list) = fn { Db_cache_types.table = "Vm"; return = ""; where_field = ""; where_value = "" } in + failwith (Printf.sprintf "%s " fn_name) + ); + Printf.printf "%s \n" fn_name; + expect_missing_field "wibble" + (fun () -> + let (_: string list) = fn { Db_cache_types.table = "VM"; return = "wibble"; where_field = Escaping.escape_id [ "name"; "label" ]; where_value = name } in + failwith (Printf.sprintf "%s " fn_name) + ); + Printf.printf "%s \n" fn_name; + expect_missing_field "wibble" + (fun () -> + let (_: string list) = fn { Db_cache_types.table = "VM"; return = Escaping.escape_id [ "name"; "label" ]; where_field = "wibble"; where_value = "" } in + failwith (Printf.sprintf "%s " fn_name) + ) + + (* Verify the ref_index contents are correct for a given [tblname] and [key] (uuid/ref) *) + let check_ref_index t tblname key = match Ref_index.lookup key with + | None -> + (* We should fail to find the row *) + expect_missing_row tblname key + (fun () -> let (_: string) = Client.read_field t tblname "uuid" key in ()); + expect_missing_uuid tblname key + (fun () -> let (_: string) = Client.db_get_by_uuid t tblname key in ()) + | Some { Ref_index.name_label = name_label; uuid = uuid; _ref = _ref } -> + (* key should be either uuid or _ref *) + if key <> uuid && (key <> _ref) + then failwith (Printf.sprintf "check_ref_index %s key %s: got ref %s uuid %s" tblname key _ref uuid); + let real_ref = if Client.is_valid_ref t key then key else Client.db_get_by_uuid t tblname key in + let real_name_label = + try Some (Client.read_field t tblname "name__label" real_ref) + with _ -> None in + if name_label <> real_name_label + then failwith (Printf.sprintf "check_ref_index %s key %s: ref_index name_label = %s; db has %s" tblname key (Opt.default "None" name_label) (Opt.default "None" real_name_label)) + + + open Pervasiveext + open Db_cache_types + + let create_test_db () = + let schema = Test_schemas.many_to_many in + let db = + ((fun x -> x) + ++ (Db_backend.blow_away_non_persistent_fields schema) + ++ (Db_upgrade.generic_database_upgrade)) + (Database.make schema) in + + db + + let check_many_to_many () = + let db = create_test_db () in + (* make a foo with bars = [] *) + (* make a bar with foos = [] *) + (* add 'bar' to foo.bars *) + let db = + ((fun x -> x) + ++ (set_field "foo" "foo:1" "bars" (add_to_set "bar:1" "()")) + ++ (add_row "foo" "foo:1" (Row.add 0L Db_names.ref "foo:1" (Row.add 0L "bars" "()" Row.empty))) + ++ (add_row "bar" "bar:1" (Row.add 0L Db_names.ref "bar:1" (Row.add 0L "foos" "()" Row.empty)))) db in + (* check that 'bar.foos' includes 'foo' *) + let bar_1 = Table.find "bar:1" (TableSet.find "bar" (Database.tableset db)) in + let bar_foos = Row.find "foos" bar_1 in + if bar_foos <> "('foo:1')" + then failwith (Printf.sprintf "check_many_to_many: bar(bar:1).foos expected ('foo:1') got %s" bar_foos); + + (* set foo.bars to [] *) + (* let foo_1 = Table.find "foo:1" (TableSet.find "foo" (Database.tableset db)) in*) + let db = set_field "foo" "foo:1" "bars" "()" db in + (* check that 'bar.foos' is empty *) + let bar_1 = Table.find "bar:1" (TableSet.find "bar" (Database.tableset db)) in + let bar_foos = Row.find "foos" bar_1 in + if bar_foos <> "()" + then failwith (Printf.sprintf "check_many_to_many: bar(bar:1).foos expected () got %s" bar_foos); + (* add 'bar' to foo.bars *) + let db = set_field "foo" "foo:1" "bars" "('bar:1')" db in + (* check that 'bar.foos' includes 'foo' *) + let bar_1 = Table.find "bar:1" (TableSet.find "bar" (Database.tableset db)) in + let bar_foos = Row.find "foos" bar_1 in + if bar_foos <> "('foo:1')" + then failwith (Printf.sprintf "check_many_to_many: bar(bar:1).foos expected ('foo:1') got %s - 2" bar_foos); + (* delete 'bar' *) + let db = remove_row "bar" "bar:1" db in + (* check that 'foo.bars' is empty *) + let foo_1 = Table.find "foo:1" (TableSet.find "foo" (Database.tableset db)) in + let foo_bars = Row.find "bars" foo_1 in + if foo_bars <> "()" + then failwith (Printf.sprintf "check_many_to_many: foo(foo:1).foos expected () got %s" foo_bars); + () + + let check_events t = + let dump db g = + let tables = Db_cache_types.Database.tableset db in + Db_cache_types.TableSet.fold_over_recent g + (fun name _ table acc -> + Db_cache_types.Table.fold_over_recent g + (fun r { Db_cache_types.Stat.created; modified; deleted } _ acc -> + let s = + try + let row = Db_cache_types.Table.find r table in + let s = Db_cache_types.Row.fold_over_recent g + (fun k _ v acc -> + Printf.sprintf "%s %s=%s" acc k v) row "" in + s + with _ -> "(deleted)" + in + Printf.printf "%s(%s): (%Ld %Ld %Ld) %s\n" name r created modified deleted s; + () + ) table ()) tables () + in + + let get_created db g = + let tables = Db_cache_types.Database.tableset db in + Db_cache_types.TableSet.fold_over_recent g + (fun name _ table acc -> + Db_cache_types.Table.fold_over_recent g + (fun r { Db_cache_types.Stat.created } _ acc -> + if created>=g then (name,r)::acc else acc) table acc + ) tables [] + in + + let get_updated db g = + let tables = Db_cache_types.Database.tableset db in + Db_cache_types.TableSet.fold_over_recent g + (fun name _ table acc -> + Db_cache_types.Table.fold_over_recent g + (fun r _ _ acc -> + let row = Db_cache_types.Table.find r table in + Db_cache_types.Row.fold_over_recent g + (fun k _ v acc -> + (r,(k,v))::acc) row acc) + table acc) tables [] + in + + let get_deleted db g = + let tables = Db_cache_types.Database.tableset db in + Db_cache_types.TableSet.fold_over_recent g + (fun name _ table acc -> + Db_cache_types.Table.fold_over_deleted g + (fun r { Db_cache_types.Stat.deleted } acc -> + if deleted > g then r::acc else acc) + table acc) tables [] + in + + let get_max db = + let tables = Db_cache_types.Database.tableset db in + Db_cache_types.TableSet.fold_over_recent (-1L) + (fun _ { Db_cache_types.Stat.created; modified; deleted } _ largest -> + max created (max modified (max deleted largest))) tables (-1L) + in + + let db = Db_ref.get_database t in + let g = get_max db in + Printf.printf "check_events: current generation is: %Ld\n" g; + + let vm = "vmref" in + let vm_uuid = "vmuuid" in + let vbd = "vbdref" in + let vbd_uuid = "vbduuid" in + let vbd2 = "vbdref2" in + let vbd_uuid2 = "vbduuid2" in + + Client.create_row t "VM" (make_vm vm vm_uuid) vm; + let db = Db_ref.get_database t in + let g2 = get_max db in + Printf.printf "generation after create_row is: %Ld\n" g2; + dump db g; + let created = get_created db g in + Printf.printf "===TEST=== Checking that the VM creation event is reported: "; + if (List.exists (fun (table,r) -> table="VM" && r=vm) created) + then (Printf.printf "Pass\n") + else (Printf.printf "Fail\n"; failwith "Event problem"); + + let (_: unit) = Client.write_field t "VM" vm "name__label" "moo" in + let db = Db_ref.get_database t in + let g3 = get_max db in + Printf.printf "generation after write_field is: %Ld\n" g3; + dump db g2; + let updated = get_updated db g2 in + let vm_updated = List.filter (fun (r,_) -> r=vm) updated in + let vm_updated = List.map snd vm_updated in + Printf.printf "===TEST=== Checking that the VM field update is reported: "; + if (List.mem_assoc "name__label" vm_updated) + then (Printf.printf "Pass\n") + else (Printf.printf "Fail\n"; failwith "Event problem"); + + Client.create_row t "VBD" (make_vbd vm vbd vbd_uuid) vbd; + let db = Db_ref.get_database t in + let g4 = get_max db in + Printf.printf "generation after create VBD is: %Ld\n" g4; + dump db g3; + let updated = get_updated db g3 in + Printf.printf "===TEST=== Checking one-to-many after one-create: "; + let vm_updated = List.filter (fun (r,_) -> r=vm) updated in + let vm_updated = List.map snd vm_updated in + if (List.mem_assoc "VBDs" vm_updated) + then (Printf.printf "Pass\n") + else (Printf.printf "Fail\n"; failwith "Event problem"); + + let (_: unit) = Client.write_field t "VBD" vbd "VM" "moo" in + let db = Db_ref.get_database t in + let g5 = get_max db in + Printf.printf "generation after write_field is: %Ld\n" g5; + dump db g4; + let updated = get_updated db g4 in + Printf.printf "===TEST=== Checking one-to-many after one-update: "; + let vm_updated = List.filter (fun (r,_) -> r=vm) updated in + let vm_updated = List.map snd vm_updated in + if (List.mem_assoc "VBDs" vm_updated) + then (Printf.printf "Pass\n") + else (Printf.printf "Fail\n"; failwith "Event problem"); + + let (_: unit) = Client.write_field t "VBD" vbd "type" "Banana" in + let db = Db_ref.get_database t in + let g6 = get_max db in + Printf.printf "generation after write_field is: %Ld\n" g6; + dump db g5; + let updated = get_updated db g5 in + Printf.printf "===TEST=== Checking one-to-many after one-update of non-reference field: "; + let vm_updated = List.filter (fun (r,_) -> r=vm) updated in + let vm_updated = List.map snd vm_updated in + if not (List.mem_assoc "VBDs" vm_updated) + then (Printf.printf "Pass\n") + else (Printf.printf "Fail\n"; failwith "Event problem"); + + let (_ : unit) = Client.delete_row t "VBD" vbd in + let db = Db_ref.get_database t in + let g7 = get_max db in + Printf.printf "generation after delete VBD is: %Ld\n" g7; + Printf.printf "===TEST=== Checking deleted event: "; + let deleted = get_deleted db g6 in + if (List.mem vbd deleted) + then (Printf.printf "Pass\n") + else (Printf.printf "Fail\n"; failwith "Event problem"); + + Client.create_row t "VBD" (make_vbd vm vbd vbd_uuid) vbd; + let (_ : unit) = Client.delete_row t "VBD" vbd in + let db = Db_ref.get_database t in + let g8 = get_max db in + Printf.printf "generation after create/delete VBD is: %Ld\n" g8; + Printf.printf "===TEST=== Checking the VBD doesn't appear in the deleted list: "; + let deleted = get_deleted db g7 in + if not (List.mem vbd deleted) + then (Printf.printf "Pass\n") + else (Printf.printf "Fail\n"; failwith "Event problem"); + dump db g7; + + Client.create_row t "VBD" (make_vbd vm vbd vbd_uuid) vbd; + let db = Db_ref.get_database t in + let g9 = get_max db in + let (_ : unit) = Client.delete_row t "VBD" vbd in + Client.create_row t "VBD" (make_vbd vm vbd2 vbd_uuid2) vbd2; + let (_ : unit) = Client.delete_row t "VBD" vbd2 in + let db = Db_ref.get_database t in + let g10 = get_max db in + + Printf.printf "===TEST=== Checking for masking of delete events: "; + + + let deleted = get_deleted db g9 in + if (List.mem vbd deleted) + then (Printf.printf "Pass\n") + else (Printf.printf "Fail\n"; failwith "Event problem"); + dump db g9; + ignore(g10); + + + + () + + let main in_process = + (* reference which we create *) + let valid_ref = "ref1" in + let valid_uuid = "uuid1" in + let invalid_ref = "foo" in + let invalid_uuid = "bar" in + + let t = if in_process then Db_backend.make () else Db_ref.Remote in + + let vbd_ref = "waz" in + let vbd_uuid = "whatever" in + + check_many_to_many (); + + (* Before we begin, clear out any old state: *) + expect_missing_row "VM" valid_ref + (fun () -> + Client.delete_row t "VM" valid_ref; + ); + if in_process then check_ref_index t "VM" valid_ref; + + expect_missing_row "VBD" vbd_ref + (fun () -> + Client.delete_row t "VBD" vbd_ref; + ); + if in_process then check_ref_index t "VBD" vbd_ref; + + Printf.printf "Deleted stale state from previous test\n"; + + Printf.printf "get_table_from_ref \n"; + begin + match Client.get_table_from_ref t invalid_ref with + | None -> Printf.printf "Reference '%s' has no associated table\n" invalid_ref + | Some t -> failwith (Printf.sprintf "Reference '%s' exists in table '%s'" invalid_ref t) + end; + Printf.printf "is_valid_ref \n"; + if Client.is_valid_ref t invalid_ref then failwith "is_valid_ref = true"; + + Printf.printf "read_refs \n"; + let existing_refs = Client.read_refs t "VM" in + Printf.printf "VM refs: [ %s ]\n" (String.concat "; " existing_refs); + Printf.printf "read_refs \n"; + expect_missing_tbl "Vm" + (fun () -> + let (_: string list) = Client.read_refs t "Vm" in + () + ); + Printf.printf "delete_row \n"; + expect_missing_row "VM" invalid_ref + (fun () -> + Client.delete_row t "VM" invalid_ref; + failwith "delete_row of a non-existent row silently succeeded" + ); + Printf.printf "create_row \n"; + expect_missing_field "name__label" + (fun () -> + let broken_vm = List.filter (fun (k, _) -> k <> "name__label") (make_vm valid_ref valid_uuid) in + Client.create_row t "VM" broken_vm valid_ref; + failwith "create_row " + ); + Printf.printf "create_row \n"; + Client.create_row t "VM" (make_vm valid_ref valid_uuid) valid_ref; + if in_process then check_ref_index t "VM" valid_ref; + Printf.printf "is_valid_ref \n"; + if not (Client.is_valid_ref t valid_ref) + then failwith "is_valid_ref = false, after create_row"; + Printf.printf "get_table_from_ref \n"; + begin match Client.get_table_from_ref t valid_ref with + | Some "VM" -> () + | Some t -> failwith "get_table_from_ref : invalid table" + | None -> failwith "get_table_from_ref : None" + end; + Printf.printf "read_refs includes \n"; + if not (List.mem valid_ref (Client.read_refs t "VM")) + then failwith "read_refs did not include "; + + Printf.printf "create_row \n"; + expect_uniqueness_violation "VM" "_ref" valid_ref + (fun () -> + Client.create_row t "VM" (make_vm valid_ref (valid_uuid ^ "unique")) valid_ref; + failwith "create_row " + ); + Printf.printf "create_row \n"; + expect_uniqueness_violation "VM" "uuid" valid_uuid + (fun () -> + Client.create_row t "VM" (make_vm (valid_ref ^ "unique") valid_uuid) (valid_ref ^ "unique"); + failwith "create_row " + ); + Printf.printf "db_get_by_uuid \n"; + let r = Client.db_get_by_uuid t "VM" valid_uuid in + if r <> valid_ref + then failwith (Printf.sprintf "db_get_by_uuid : got %s; expected %s" r valid_ref); + Printf.printf "db_get_by_uuid \n"; + expect_missing_uuid "VM" invalid_uuid + (fun () -> + let (_: string) = Client.db_get_by_uuid t "VM" invalid_uuid in + failwith "db_get_by_uuid " + ); + Printf.printf "get_by_name_label \n"; + if Client.db_get_by_name_label t "VM" invalid_name <> [] + then failwith "db_get_by_name_label "; + + Printf.printf "get_by_name_label \n"; + if Client.db_get_by_name_label t "VM" name <> [ valid_ref ] + then failwith "db_get_by_name_label "; + + Printf.printf "read_field \n"; + if Client.read_field t "VM" "name__label" valid_ref <> name + then failwith "read_field : invalid name"; + + Printf.printf "read_field \n"; + if Client.read_field t "VM" "protection_policy" valid_ref <> "OpaqueRef:NULL" + then failwith "read_field : invalid protection_policy"; + + Printf.printf "read_field \n"; + expect_missing_row "VM" invalid_ref + (fun () -> + let (_: string) = Client.read_field t "VM" "name__label" invalid_ref in + failwith "read_field " + ); + Printf.printf "read_field \n"; + expect_missing_field "name_label" + (fun () -> + let (_: string) = Client.read_field t "VM" "name_label" valid_ref in + failwith "read_field " + ); + Printf.printf "read_field \n"; + expect_missing_row "VM" invalid_ref + (fun () -> + let (_: string) = Client.read_field t "VM" "name_label" invalid_ref in + failwith "read_field " + ); + Printf.printf "read_field_where \n"; + let where_name_label = + { Db_cache_types.table = "VM"; return = Escaping.escape_id(["name"; "label"]); where_field="uuid"; where_value = valid_uuid } in + let xs = Client.read_field_where t where_name_label in + if not (List.mem name xs) + then failwith "read_field_where "; + test_invalid_where_record "read_field_where" (Client.read_field_where t); + + let xs = Client.read_set_ref t where_name_label in + if not (List.mem name xs) + then failwith "read_set_ref "; + test_invalid_where_record "read_set_ref" (Client.read_set_ref t); + + Printf.printf "write_field \n"; + expect_missing_tbl "Vm" + (fun () -> + let (_: unit) = Client.write_field t "Vm" "" "" "" in + failwith "write_field " + ); + Printf.printf "write_field \n"; + expect_missing_row "VM" invalid_ref + (fun () -> + let (_: unit) = Client.write_field t "VM" invalid_ref "" "" in + failwith "write_field " + ); + Printf.printf "write_field \n"; + expect_missing_field "wibble" + (fun () -> + let (_: unit) = Client.write_field t "VM" valid_ref "wibble" "" in + failwith "write_field " + ); + Printf.printf "write_field \n"; + let (_: unit) = Client.write_field t "VM" valid_ref (Escaping.escape_id ["name"; "description"]) "description" in + if in_process then check_ref_index t "VM" valid_ref; + Printf.printf "write_field - invalidating ref_index\n"; + let (_: unit) = Client.write_field t "VM" valid_ref (Escaping.escape_id ["name"; "label"]) "newlabel" in + if in_process then check_ref_index t "VM" valid_ref; + + Printf.printf "read_record \n"; + expect_missing_tbl "Vm" + (fun () -> + let _ = Client.read_record t "Vm" invalid_ref in + failwith "read_record " + ); + Printf.printf "read_record \n"; + expect_missing_row "VM" invalid_ref + (fun () -> + let _ = Client.read_record t "VM" invalid_ref in + failwith "read_record " + ); + Printf.printf "read_record \n"; + let fv_list, fvs_list = Client.read_record t "VM" valid_ref in + if not(List.mem_assoc (Escaping.escape_id [ "name"; "label" ]) fv_list) + then failwith "read_record 1"; + if List.assoc "VBDs" fvs_list <> [] + then failwith "read_record 2"; + Printf.printf "read_record foreign key\n"; + Client.create_row t "VBD" (make_vbd valid_ref vbd_ref vbd_uuid) vbd_ref; + let fv_list, fvs_list = Client.read_record t "VM" valid_ref in + if List.assoc "VBDs" fvs_list <> [ vbd_ref ] then begin + Printf.printf "fv_list = [ %s ] fvs_list = [ %s ]\n%!" (String.concat "; " (List.map (fun (k, v) -> k ^":" ^ v) fv_list)) (String.concat "; " (List.map (fun (k, v) -> k ^ ":" ^ (String.concat ", " v)) fvs_list)); + failwith "read_record 3" + end; + Printf.printf "read_record deleted foreign key\n"; + Client.delete_row t "VBD" vbd_ref; + let fv_list, fvs_list = Client.read_record t "VM" valid_ref in + if List.assoc "VBDs" fvs_list <> [] + then failwith "read_record 4"; + Printf.printf "read_record overwritten foreign key\n"; + Client.create_row t "VBD" (make_vbd valid_ref vbd_ref vbd_uuid) vbd_ref; + let fv_list, fvs_list = Client.read_record t "VM" valid_ref in + if List.assoc "VBDs" fvs_list = [] + then failwith "read_record 5"; + Client.write_field t "VBD" vbd_ref (Escaping.escape_id [ "VM" ]) "overwritten"; + let fv_list, fvs_list = Client.read_record t "VM" valid_ref in + if List.assoc "VBDs" fvs_list <> [] + then failwith "read_record 6"; + + expect_missing_tbl "Vm" + (fun () -> + let _ = Client.read_records_where t "Vm" Db_filter_types.True in + () + ); + let xs = Client.read_records_where t "VM" Db_filter_types.True in + if List.length xs <> 1 + then failwith "read_records_where 2"; + let xs = Client.read_records_where t "VM" Db_filter_types.False in + if xs <> [] + then failwith "read_records_where 3"; + + expect_missing_tbl "Vm" + (fun () -> + let _ = Client.find_refs_with_filter t "Vm" Db_filter_types.True in + failwith "find_refs_with_filter "; + ); + let xs = Client.find_refs_with_filter t "VM" Db_filter_types.True in + if List.length xs <> 1 + then failwith "find_refs_with_filter 1"; + let xs = Client.find_refs_with_filter t "VM" Db_filter_types.False in + if xs <> [] + then failwith "find_refs_with_filter 2"; + + expect_missing_tbl "Vm" + (fun () -> + Client.process_structured_field t ("","") "Vm" "wibble" invalid_ref Db_cache_types.AddSet; + failwith "process_structure_field " + ); + expect_missing_field "wibble" + (fun () -> + Client.process_structured_field t ("","") "VM" "wibble" valid_ref Db_cache_types.AddSet; + failwith "process_structure_field " + ); + expect_missing_row "VM" invalid_ref + (fun () -> + Client.process_structured_field t ("","") "VM" (Escaping.escape_id ["name"; "label"]) invalid_ref Db_cache_types.AddSet; + failwith "process_structure_field " + ); + Client.process_structured_field t ("foo", "") "VM" "tags" valid_ref Db_cache_types.AddSet; + if Client.read_field t "VM" "tags" valid_ref <> "('foo')" + then failwith "process_structure_field expected ('foo')"; + Client.process_structured_field t ("foo", "") "VM" "tags" valid_ref Db_cache_types.AddSet; + if Client.read_field t "VM" "tags" valid_ref <> "('foo')" + then failwith "process_structure_field expected ('foo') 2"; + Client.process_structured_field t ("foo", "bar") "VM" "other_config" valid_ref Db_cache_types.AddMap; + + if Client.read_field t "VM" "other_config" valid_ref <> "(('foo' 'bar'))" + then failwith "process_structure_field expected (('foo' 'bar')) 3"; + + begin + try + Client.process_structured_field t ("foo", "bar") "VM" "other_config" valid_ref Db_cache_types.AddMap; + with Db_exn.Duplicate_key("VM", "other_config", r', "foo") when r' = valid_ref -> () + end; + if Client.read_field t "VM" "other_config" valid_ref <> "(('foo' 'bar'))" + then failwith "process_structure_field expected (('foo' 'bar')) 4"; + + (* Check that non-persistent fields are filled with an empty value *) + + (* Event tests *) + + check_events t; + + (* Performance test *) + if in_process then begin + let time n f = + let start = Unix.gettimeofday () in + for i = 0 to n do + f i + done; + let total = Unix.gettimeofday () -. start in + float_of_int n /. total in + + let n = 5000 in + + let rpc_time = time n (fun _ -> + let (_: bool) = Client.is_valid_ref t valid_ref in ()) in + + Printf.printf "%.2f primitive RPC calls/sec\n" rpc_time; + + (* Delete stuff left-over from the previous run *) + let delete_time = time n + (fun i -> + let rf = Printf.sprintf "%s:%d" vbd_ref i in + try + Client.delete_row t "VBD" rf + with _ -> () + ) in + Printf.printf "Deleted %d VBD records, %.2f calls/sec\n%!" n delete_time; + + expect_missing_row "VBD" vbd_ref + (fun () -> + Client.delete_row t "VBD" vbd_ref; + ); + + (* Create lots of VBDs referening no VM *) + let create_time = time n + (fun i -> + let rf = Printf.sprintf "%s:%d" vbd_ref i in + let uuid = Printf.sprintf "%s:%d" vbd_uuid i in + Client.create_row t "VBD" (make_vbd invalid_ref rf uuid) rf; + ) in + Printf.printf "Created %d VBD records, %.2f calls/sec\n%!" n create_time; + + let m = 300000 in (* multiple of 3 *) + + (* Time a benign VM create_row, delete_row, read_record sequence *) + let benign_time = time m + (fun i -> + if i < (m / 3 * 2) then begin + if i mod 2 = 0 + then Client.create_row t "VBD" (make_vbd valid_ref vbd_ref vbd_uuid) vbd_ref + else Client.delete_row t "VBD" vbd_ref + end else + let _ = Client.read_record t "VM" valid_ref in + () + ) in + Printf.printf "good sequence: %.2f calls/sec\n%!" benign_time; + + let malign_time = time m + (fun i -> + match i mod 3 with + | 0 -> Client.create_row t "VBD" (make_vbd valid_ref vbd_ref vbd_uuid) vbd_ref + | 1 -> Client.delete_row t "VBD" vbd_ref + | 2 -> let _ = Client.read_record t "VM" valid_ref in () + | _ -> () + ) in + Printf.printf "bad sequence: %.2f calls/sec\n%!" malign_time; + end end diff --git a/ocaml/database/database_test.mli b/ocaml/database/database_test.mli index d81d546174a..13a8bae5c11 100644 --- a/ocaml/database/database_test.mli +++ b/ocaml/database/database_test.mli @@ -13,5 +13,5 @@ *) module Tests : functor (Client: Db_interface.DB_ACCESS) -> sig - val main: bool -> unit + val main: bool -> unit end diff --git a/ocaml/database/database_test_main.ml b/ocaml/database/database_test_main.ml index 13fe16920a0..2da22b70f5d 100644 --- a/ocaml/database/database_test_main.ml +++ b/ocaml/database/database_test_main.ml @@ -16,41 +16,41 @@ open Database_test let path = ref "./database" -let rpc_common url content_type request = - let version = "1.1" in - let content_length = String.length request in - let request = Http.Request.make ~version ~content_type:"text/json" - ~user_agent:"database_test" - ~length:(Int64.of_int content_length) Http.Post url in - let open Xmlrpc_client in - with_transport (Unix !path) - (with_http request - (fun (response, fd) -> - match response.Http.Response.content_length with - | None -> failwith "Need a content-length" - | Some l -> Db_interface.String - (Unixext.really_read_string fd (Int64.to_int l)) - ) - ) +let rpc_common url content_type request = + let version = "1.1" in + let content_length = String.length request in + let request = Http.Request.make ~version ~content_type:"text/json" + ~user_agent:"database_test" + ~length:(Int64.of_int content_length) Http.Post url in + let open Xmlrpc_client in + with_transport (Unix !path) + (with_http request + (fun (response, fd) -> + match response.Http.Response.content_length with + | None -> failwith "Need a content-length" + | Some l -> Db_interface.String + (Unixext.really_read_string fd (Int64.to_int l)) + ) + ) module Client_v1 = Db_rpc_client_v1.Make(struct - let initialise () = () - let rpc request = rpc_common "/post_remote_db_access" "text/xml" request -end) + let initialise () = () + let rpc request = rpc_common "/post_remote_db_access" "text/xml" request + end) module Client_v2 = Db_rpc_client_v2.Make(struct - let initialise () = () - let rpc request = rpc_common "/post_remote_db_access_v2" "text/json" request -end) + let initialise () = () + let rpc request = rpc_common "/post_remote_db_access_v2" "text/json" request + end) module T = Tests(Client_v2) -let _ = - Printexc.record_backtrace true; - Arg.parse [ - "--connect-to", Arg.Set_string path, Printf.sprintf "connect to server on path (default %s)" !path; - ] (fun x -> Printf.fprintf stderr "Ignoring unknown parameter: %s\n%!" x) - "query a database server"; - ignore(T.main true) +let _ = + Printexc.record_backtrace true; + Arg.parse [ + "--connect-to", Arg.Set_string path, Printf.sprintf "connect to server on path (default %s)" !path; + ] (fun x -> Printf.fprintf stderr "Ignoring unknown parameter: %s\n%!" x) + "query a database server"; + ignore(T.main true) diff --git a/ocaml/database/db_action_helper.ml b/ocaml/database/db_action_helper.ml index 96d911b3eb2..4d4673dca39 100644 --- a/ocaml/database/db_action_helper.ml +++ b/ocaml/database/db_action_helper.ml @@ -17,14 +17,14 @@ let __callback : ((?snapshot: Rpc.t -> string -> string -> string -> unit) option ref) = ref None let events_register f = __callback := Some f let events_unregister () = __callback := None - + let events_notify ?(snapshot) ty op ref = match !__callback with - | None -> () - | Some f -> f ?snapshot ty op ref - (* + | None -> () + | Some f -> f ?snapshot ty op ref + (* exception Db_set_or_map_parse_fail of string - + let parse_sexpr s : SExpr.t list = match SExpr_TS.of_string s with | SExpr.Node xs -> xs diff --git a/ocaml/database/db_backend.ml b/ocaml/database/db_backend.ml index f65c8378249..b9faa5990bd 100644 --- a/ocaml/database/db_backend.ml +++ b/ocaml/database/db_backend.ml @@ -38,57 +38,57 @@ let make () = Db_ref.in_memory (ref master_database) (* non-persistent fields will have been flushed to disk anyway [since non-persistent just means dont trigger a flush if I change]. Hence we blank non-persistent fields with a suitable empty value, depending on their type *) let blow_away_non_persistent_fields (schema: Schema.t) db = - let g = Manifest.generation (Database.manifest db) in - (* Generate a new row given a table schema *) - let row schema row : Row.t * int64 = - Row.fold - (fun name { Stat.created; modified } v (acc,max_upd) -> - try - let col = Schema.Table.find name schema in - let empty = col.Schema.Column.empty in - let v',modified' = if col.Schema.Column.persistent then v,modified else empty,g in - (Row.update modified' name empty (fun _ -> v') (Row.add created name v' acc),max max_upd modified') - with Not_found -> - Printf.printf "Skipping unknown column: %s\n%!" name; - (acc,max max_upd modified)) row (Row.empty,0L) in - (* Generate a new table *) - let table tblname tbl : Table.t = - let schema = Schema.Database.find tblname schema.Schema.database in - Table.fold - (fun objref { Stat.created; modified } r acc -> - let (r,updated) = row schema r in - Table.update modified objref Row.empty (fun _ -> r) (Table.add created objref r acc)) tbl Table.empty in - Database.update - (fun ts -> - TableSet.fold - (fun tblname { Stat.created; modified } tbl acc -> - let tbl' = table tblname tbl in - TableSet.add modified tblname tbl' acc) ts TableSet.empty) - db + let g = Manifest.generation (Database.manifest db) in + (* Generate a new row given a table schema *) + let row schema row : Row.t * int64 = + Row.fold + (fun name { Stat.created; modified } v (acc,max_upd) -> + try + let col = Schema.Table.find name schema in + let empty = col.Schema.Column.empty in + let v',modified' = if col.Schema.Column.persistent then v,modified else empty,g in + (Row.update modified' name empty (fun _ -> v') (Row.add created name v' acc),max max_upd modified') + with Not_found -> + Printf.printf "Skipping unknown column: %s\n%!" name; + (acc,max max_upd modified)) row (Row.empty,0L) in + (* Generate a new table *) + let table tblname tbl : Table.t = + let schema = Schema.Database.find tblname schema.Schema.database in + Table.fold + (fun objref { Stat.created; modified } r acc -> + let (r,updated) = row schema r in + Table.update modified objref Row.empty (fun _ -> r) (Table.add created objref r acc)) tbl Table.empty in + Database.update + (fun ts -> + TableSet.fold + (fun tblname { Stat.created; modified } tbl acc -> + let tbl' = table tblname tbl in + TableSet.add modified tblname tbl' acc) ts TableSet.empty) + db let db_registration_mutex = Mutex.create () let foreign_databases: ((API.ref_session, Db_ref.t) Hashtbl.t) = Hashtbl.create 5 open Stdext.Threadext let create_registered_session create_session db_ref = - Mutex.execute db_registration_mutex - (fun () -> - let session = create_session () in - Hashtbl.replace foreign_databases session db_ref; - session) + Mutex.execute db_registration_mutex + (fun () -> + let session = create_session () in + Hashtbl.replace foreign_databases session db_ref; + session) let unregister_session session = - Mutex.execute db_registration_mutex - (fun () -> Hashtbl.remove foreign_databases session) + Mutex.execute db_registration_mutex + (fun () -> Hashtbl.remove foreign_databases session) let is_session_registered session = - Mutex.execute db_registration_mutex - (fun () -> Hashtbl.mem foreign_databases session) + Mutex.execute db_registration_mutex + (fun () -> Hashtbl.mem foreign_databases session) let get_registered_database session = - Mutex.execute db_registration_mutex - (fun () -> - if Hashtbl.mem foreign_databases session then - Some (Hashtbl.find foreign_databases session) - else - None) + Mutex.execute db_registration_mutex + (fun () -> + if Hashtbl.mem foreign_databases session then + Some (Hashtbl.find foreign_databases session) + else + None) diff --git a/ocaml/database/db_backend.mli b/ocaml/database/db_backend.mli index 3045c55f71d..06654428dea 100644 --- a/ocaml/database/db_backend.mli +++ b/ocaml/database/db_backend.mli @@ -20,10 +20,10 @@ val __test_set_master_database : Db_cache_types.Database.t -> unit val make : unit -> Db_ref.t val blow_away_non_persistent_fields : - Schema.t -> Db_cache_types.Database.t -> Db_cache_types.Database.t + Schema.t -> Db_cache_types.Database.t -> Db_cache_types.Database.t val create_registered_session : - (unit -> API.ref_session) -> Db_ref.t -> API.ref_session + (unit -> API.ref_session) -> Db_ref.t -> API.ref_session val unregister_session : API.ref_session -> unit diff --git a/ocaml/database/db_cache.ml b/ocaml/database/db_cache.ml index f7fc9851dab..1ebaf4af4ff 100644 --- a/ocaml/database/db_cache.ml +++ b/ocaml/database/db_cache.ml @@ -24,25 +24,25 @@ module Local_db : DB_ACCESS = Db_cache_impl (** Slaves will use this to call the master by XMLRPC *) module Remote_db : DB_ACCESS = Db_rpc_client_v1.Make(struct - let initialise () = - ignore (Master_connection.start_master_connection_watchdog()); - ignore (Master_connection.open_secure_connection()) - let rpc request = Master_connection.execute_remote_fn request Constants.remote_db_access_uri -end) + let initialise () = + ignore (Master_connection.start_master_connection_watchdog()); + ignore (Master_connection.open_secure_connection()) + let rpc request = Master_connection.execute_remote_fn request Constants.remote_db_access_uri + end) let get = function - | Db_ref.In_memory _ -> (module Local_db : DB_ACCESS) - | Db_ref.Remote -> (module Remote_db : DB_ACCESS) + | Db_ref.In_memory _ -> (module Local_db : DB_ACCESS) + | Db_ref.Remote -> (module Remote_db : DB_ACCESS) let apply_delta_to_cache entry db_ref = - let module DB = (Local_db : DB_ACCESS) in - match entry with - | Redo_log.CreateRow(tblname, objref, kvs) -> - debug "Redoing create_row %s (%s)" tblname objref; - DB.create_row db_ref tblname kvs objref - | Redo_log.DeleteRow(tblname, objref) -> - debug "Redoing delete_row %s (%s)" tblname objref; - DB.delete_row db_ref tblname objref - | Redo_log.WriteField(tblname, objref, fldname, newval) -> - debug "Redoing write_field %s (%s) [%s -> %s]" tblname objref fldname newval; - DB.write_field db_ref tblname objref fldname newval + let module DB = (Local_db : DB_ACCESS) in + match entry with + | Redo_log.CreateRow(tblname, objref, kvs) -> + debug "Redoing create_row %s (%s)" tblname objref; + DB.create_row db_ref tblname kvs objref + | Redo_log.DeleteRow(tblname, objref) -> + debug "Redoing delete_row %s (%s)" tblname objref; + DB.delete_row db_ref tblname objref + | Redo_log.WriteField(tblname, objref, fldname, newval) -> + debug "Redoing write_field %s (%s) [%s -> %s]" tblname objref fldname newval; + DB.write_field db_ref tblname objref fldname newval diff --git a/ocaml/database/db_cache_impl.ml b/ocaml/database/db_cache_impl.ml index 4ae5edf4796..446629899c5 100644 --- a/ocaml/database/db_cache_impl.ml +++ b/ocaml/database/db_cache_impl.ml @@ -26,7 +26,7 @@ open Db_lock module D = Debug.Make(struct let name = "sql" end) open D module W = Debug.Make(struct let name = "db_write" end) - + open Db_cache_types open Db_ref @@ -37,88 +37,88 @@ let initialise () = () (* This fn is part of external interface, so need to take lock *) let get_table_from_ref t objref = - try - Some (Database.table_of_ref objref (get_database t)) - with Not_found -> - None - + try + Some (Database.table_of_ref objref (get_database t)) + with Not_found -> + None + let is_valid_ref t objref = - match (get_table_from_ref t objref) with - | Some _ -> true - | None -> false - -let read_field_internal t tblname fldname objref db = - try - Row.find fldname (Table.find objref (TableSet.find tblname (Database.tableset db))) - with Not_found -> - raise (DBCache_NotFound ("missing row", tblname, objref)) + match (get_table_from_ref t objref) with + | Some _ -> true + | None -> false + +let read_field_internal t tblname fldname objref db = + try + Row.find fldname (Table.find objref (TableSet.find tblname (Database.tableset db))) + with Not_found -> + raise (DBCache_NotFound ("missing row", tblname, objref)) (* Read field from cache *) let read_field t tblname fldname objref = - Schema.Value.marshal (read_field_internal t tblname fldname objref (get_database t)) + Schema.Value.marshal (read_field_internal t tblname fldname objref (get_database t)) (** Finds the longest XML-compatible UTF-8 prefix of the given *) (** string, by truncating the string at the first incompatible *) (** character. Writes a warning to the debug log if truncation *) (** occurs. *) let ensure_utf8_xml string = - let length = String.length string in - let prefix = Stdext.Encodings.UTF8_XML.longest_valid_prefix string in - if length > String.length prefix then - warn "string truncated to: '%s'." prefix; - prefix + let length = String.length string in + let prefix = Stdext.Encodings.UTF8_XML.longest_valid_prefix string in + if length > String.length prefix then + warn "string truncated to: '%s'." prefix; + prefix + - (* Write field in cache *) let write_field_locked t tblname objref fldname newval = - let current_val = get_field tblname objref fldname (get_database t) in - update_database t (set_field tblname objref fldname newval); - Database.notify (WriteField(tblname, objref, fldname, current_val, newval)) (get_database t) - + let current_val = get_field tblname objref fldname (get_database t) in + update_database t (set_field tblname objref fldname newval); + Database.notify (WriteField(tblname, objref, fldname, current_val, newval)) (get_database t) + let write_field t tblname objref fldname newval = - let db = get_database t in - let schema = Schema.table tblname (Database.schema db) in - let column = Schema.Table.find fldname schema in - let newval = Schema.Value.unmarshal column.Schema.Column.ty newval in - with_lock (fun () -> - write_field_locked t tblname objref fldname newval) + let db = get_database t in + let schema = Schema.table tblname (Database.schema db) in + let column = Schema.Table.find fldname schema in + let newval = Schema.Value.unmarshal column.Schema.Column.ty newval in + with_lock (fun () -> + write_field_locked t tblname objref fldname newval) let touch_row t tblname objref = - update_database t (touch tblname objref); - Database.notify (RefreshRow(tblname, objref)) (get_database t) + update_database t (touch tblname objref); + Database.notify (RefreshRow(tblname, objref)) (get_database t) (* This function *should* only be used by db_actions code looking up Set(Ref _) fields: if we detect another (illegal) use we log the problem and fall back to a slow scan *) let read_set_ref t rcd = - let db = get_database t in - (* The where_record should correspond to the 'one' end of a 'one to many' *) - let one_tbl = rcd.table in - let one_fld = rcd.where_field in - let rels = - try - Schema.one_to_many one_tbl (Database.schema db) - with Not_found -> - raise (Db_exn.DBCache_NotFound("missing table", one_tbl, "")) - in - (* This is an 'illegal' use if: *) - let illegal = rcd.return <> Db_names.ref || (List.filter (fun (a, _, _) -> a = one_fld) rels = []) in - if not illegal then begin - let _, many_tbl, many_fld = List.find (fun (a, _, _) -> a = one_fld) rels in - let objref = rcd.where_value in - - Schema.Value.Unsafe_cast.set (read_field_internal t many_tbl many_fld objref db) - end else begin - error "Illegal read_set_ref query { table = %s; where_field = %s; where_value = %s; return = %s }; falling back to linear scan" rcd.table rcd.where_field rcd.where_value rcd.return; - Printf.printf "Illegal read_set_ref query { table = %s; where_field = %s; where_value = %s; return = %s }; falling back to linear scan\n%!" rcd.table rcd.where_field rcd.where_value rcd.return; - let tbl = TableSet.find rcd.table (Database.tableset db) in - Table.fold - (fun rf _ row acc -> - let v = Schema.Value.Unsafe_cast.string (Row.find rcd.where_field row) in - if v = rcd.where_value - then v :: acc else acc) - tbl [] - end - + let db = get_database t in + (* The where_record should correspond to the 'one' end of a 'one to many' *) + let one_tbl = rcd.table in + let one_fld = rcd.where_field in + let rels = + try + Schema.one_to_many one_tbl (Database.schema db) + with Not_found -> + raise (Db_exn.DBCache_NotFound("missing table", one_tbl, "")) + in + (* This is an 'illegal' use if: *) + let illegal = rcd.return <> Db_names.ref || (List.filter (fun (a, _, _) -> a = one_fld) rels = []) in + if not illegal then begin + let _, many_tbl, many_fld = List.find (fun (a, _, _) -> a = one_fld) rels in + let objref = rcd.where_value in + + Schema.Value.Unsafe_cast.set (read_field_internal t many_tbl many_fld objref db) + end else begin + error "Illegal read_set_ref query { table = %s; where_field = %s; where_value = %s; return = %s }; falling back to linear scan" rcd.table rcd.where_field rcd.where_value rcd.return; + Printf.printf "Illegal read_set_ref query { table = %s; where_field = %s; where_value = %s; return = %s }; falling back to linear scan\n%!" rcd.table rcd.where_field rcd.where_value rcd.return; + let tbl = TableSet.find rcd.table (Database.tableset db) in + Table.fold + (fun rf _ row acc -> + let v = Schema.Value.Unsafe_cast.string (Row.find rcd.where_field row) in + if v = rcd.where_value + then v :: acc else acc) + tbl [] + end + (* setrefs contain the relationships from tbl to other tables in the form: local-classname, local-fieldname, remote-classname, remote-fieldname. @@ -127,275 +127,275 @@ let read_set_ref t rcd = name of the Set Ref field in tbl; and ref list is the list of foreign keys from related table with remote-fieldname=objref] *) let read_record_internal db tblname objref = - try - let tbl = TableSet.find tblname (Database.tableset db) in - let row = Table.find objref tbl in - let fvlist = Row.fold (fun k _ d env -> (k,d)::env) row [] in - (* Unfortunately the interface distinguishes between Set(Ref _) types and - ordinary fields *) - let schema = Schema.table tblname (Database.schema db) in - let set_ref = List.filter (fun (k, _) -> - let column = Schema.Table.find k schema in - column.Schema.Column.issetref - ) fvlist in - let fvlist = List.map (fun (k, v) -> - k, Schema.Value.marshal v - ) fvlist in - (* the set_ref fields must be converted back into lists *) - let set_ref = List.map (fun (k, v) -> - k, Schema.Value.Unsafe_cast.set v) set_ref in - (fvlist, set_ref) - with Not_found -> - raise (DBCache_NotFound ("missing row", tblname, objref)) + try + let tbl = TableSet.find tblname (Database.tableset db) in + let row = Table.find objref tbl in + let fvlist = Row.fold (fun k _ d env -> (k,d)::env) row [] in + (* Unfortunately the interface distinguishes between Set(Ref _) types and + ordinary fields *) + let schema = Schema.table tblname (Database.schema db) in + let set_ref = List.filter (fun (k, _) -> + let column = Schema.Table.find k schema in + column.Schema.Column.issetref + ) fvlist in + let fvlist = List.map (fun (k, v) -> + k, Schema.Value.marshal v + ) fvlist in + (* the set_ref fields must be converted back into lists *) + let set_ref = List.map (fun (k, v) -> + k, Schema.Value.Unsafe_cast.set v) set_ref in + (fvlist, set_ref) + with Not_found -> + raise (DBCache_NotFound ("missing row", tblname, objref)) let read_record t = read_record_internal (get_database t) (* Delete row from tbl *) let delete_row_locked t tblname objref = - try - W.debug "delete_row %s (%s)" tblname objref; - - let tbl = TableSet.find tblname (Database.tableset (get_database t)) in - let row = Table.find objref tbl in - - let db = get_database t in - Database.notify (PreDelete(tblname, objref)) db; - update_database t (remove_row tblname objref); - Database.notify (Delete(tblname, objref, Row.fold (fun k _ v acc -> (k, v) :: acc) row [])) (get_database t) - with Not_found -> - raise (DBCache_NotFound ("missing row", tblname, objref)) - -let delete_row t tblname objref = - with_lock (fun () -> delete_row_locked t tblname objref) + try + W.debug "delete_row %s (%s)" tblname objref; + + let tbl = TableSet.find tblname (Database.tableset (get_database t)) in + let row = Table.find objref tbl in + + let db = get_database t in + Database.notify (PreDelete(tblname, objref)) db; + update_database t (remove_row tblname objref); + Database.notify (Delete(tblname, objref, Row.fold (fun k _ v acc -> (k, v) :: acc) row [])) (get_database t) + with Not_found -> + raise (DBCache_NotFound ("missing row", tblname, objref)) + +let delete_row t tblname objref = + with_lock (fun () -> delete_row_locked t tblname objref) (* Create new row in tbl containing specified k-v pairs *) let create_row_locked t tblname kvs' new_objref = - let db = get_database t in - let schema = Schema.table tblname (Database.schema db) in - - let kvs' = List.map (fun (key, value) -> - let value = ensure_utf8_xml value in - let column = Schema.Table.find key schema in - key, Schema.Value.unmarshal column.Schema.Column.ty value - ) kvs' in - - (* we add the reference to the row itself so callers can use read_field_where to - return the reference: awkward if it is just the key *) - let kvs' = (Db_names.ref, Schema.Value.String new_objref) :: kvs' in - let g = Manifest.generation (Database.manifest (get_database t)) in - let row = List.fold_left (fun row (k, v) -> Row.add g k v row) Row.empty kvs' in - let schema = Schema.table tblname (Database.schema (get_database t)) in - (* fill in default values if kv pairs for these are not supplied already *) - let row = Row.add_defaults g schema row in - W.debug "create_row %s (%s) [%s]" tblname new_objref (String.concat "," (List.map (fun (k,v)->"("^k^","^"v"^")") kvs')); - update_database t (add_row tblname new_objref row); - Database.notify (Create(tblname, new_objref, Row.fold (fun k _ v acc -> (k, v) :: acc) row [])) (get_database t) - + let db = get_database t in + let schema = Schema.table tblname (Database.schema db) in + + let kvs' = List.map (fun (key, value) -> + let value = ensure_utf8_xml value in + let column = Schema.Table.find key schema in + key, Schema.Value.unmarshal column.Schema.Column.ty value + ) kvs' in + + (* we add the reference to the row itself so callers can use read_field_where to + return the reference: awkward if it is just the key *) + let kvs' = (Db_names.ref, Schema.Value.String new_objref) :: kvs' in + let g = Manifest.generation (Database.manifest (get_database t)) in + let row = List.fold_left (fun row (k, v) -> Row.add g k v row) Row.empty kvs' in + let schema = Schema.table tblname (Database.schema (get_database t)) in + (* fill in default values if kv pairs for these are not supplied already *) + let row = Row.add_defaults g schema row in + W.debug "create_row %s (%s) [%s]" tblname new_objref (String.concat "," (List.map (fun (k,v)->"("^k^","^"v"^")") kvs')); + update_database t (add_row tblname new_objref row); + Database.notify (Create(tblname, new_objref, Row.fold (fun k _ v acc -> (k, v) :: acc) row [])) (get_database t) + let create_row t tblname kvs' new_objref = - with_lock (fun () -> create_row_locked t tblname kvs' new_objref) + with_lock (fun () -> create_row_locked t tblname kvs' new_objref) (* Do linear scan to find field values which match where clause *) let read_field_where t rcd = - let db = get_database t in - let tbl = TableSet.find rcd.table (Database.tableset db) in - Table.fold - (fun r _ row acc -> - let field = Schema.Value.marshal (Row.find rcd.where_field row) in - if field = rcd.where_value then Schema.Value.marshal (Row.find rcd.return row) :: acc else acc - ) tbl [] - + let db = get_database t in + let tbl = TableSet.find rcd.table (Database.tableset db) in + Table.fold + (fun r _ row acc -> + let field = Schema.Value.marshal (Row.find rcd.where_field row) in + if field = rcd.where_value then Schema.Value.marshal (Row.find rcd.return row) :: acc else acc + ) tbl [] + let db_get_by_uuid t tbl uuid_val = - match (read_field_where t - {table=tbl; return=Db_names.ref; - where_field=Db_names.uuid; where_value=uuid_val}) with - | [] -> raise (Read_missing_uuid (tbl, "", uuid_val)) - | [r] -> r - | _ -> raise (Too_many_values (tbl, "", uuid_val)) - + match (read_field_where t + {table=tbl; return=Db_names.ref; + where_field=Db_names.uuid; where_value=uuid_val}) with + | [] -> raise (Read_missing_uuid (tbl, "", uuid_val)) + | [r] -> r + | _ -> raise (Too_many_values (tbl, "", uuid_val)) + (** Return reference fields from tbl that matches specified name_label field *) let db_get_by_name_label t tbl label = - read_field_where t - {table=tbl; return=Db_names.ref; - where_field=(Escaping.escape_id ["name"; "label"]); - where_value=label} - + read_field_where t + {table=tbl; return=Db_names.ref; + where_field=(Escaping.escape_id ["name"; "label"]); + where_value=label} + (* Read references from tbl *) let read_refs t tblname = - let tbl = TableSet.find tblname (Database.tableset (get_database t)) in - Table.fold (fun r _ _ acc -> r :: acc) tbl [] - + let tbl = TableSet.find tblname (Database.tableset (get_database t)) in + Table.fold (fun r _ _ acc -> r :: acc) tbl [] + (* Return a list of all the refs for which the expression returns true. *) let find_refs_with_filter_internal db (tblname: string) (expr: Db_filter_types.expr) = - let tbl = TableSet.find tblname (Database.tableset db) in - let eval_val row = function - | Db_filter_types.Literal x -> x - | Db_filter_types.Field x -> Schema.Value.marshal (Row.find x row) in - Table.fold - (fun r _ row acc -> - if Db_filter.eval_expr (eval_val row) expr - then Schema.Value.Unsafe_cast.string (Row.find Db_names.ref row) :: acc else acc - ) tbl [] + let tbl = TableSet.find tblname (Database.tableset db) in + let eval_val row = function + | Db_filter_types.Literal x -> x + | Db_filter_types.Field x -> Schema.Value.marshal (Row.find x row) in + Table.fold + (fun r _ row acc -> + if Db_filter.eval_expr (eval_val row) expr + then Schema.Value.Unsafe_cast.string (Row.find Db_names.ref row) :: acc else acc + ) tbl [] let find_refs_with_filter t = find_refs_with_filter_internal (get_database t) - + let read_records_where t tbl expr = - let db = get_database t in - let reqd_refs = find_refs_with_filter_internal db tbl expr in - if !fist_delay_read_records_where then Thread.delay 0.5; - List.map (fun ref->ref, read_record_internal db tbl ref) reqd_refs - + let db = get_database t in + let reqd_refs = find_refs_with_filter_internal db tbl expr in + if !fist_delay_read_records_where then Thread.delay 0.5; + List.map (fun ref->ref, read_record_internal db tbl ref) reqd_refs + let process_structured_field_locked t (key,value) tblname fld objref proc_fn_selector = - (* Ensure that both keys and values are valid for UTF-8-encoded XML. *) - let key = ensure_utf8_xml key in - let value = ensure_utf8_xml value in - try - let tbl = TableSet.find tblname (Database.tableset (get_database t)) in - let row = Table.find objref tbl in - let existing_str = Row.find fld row in - let newval = match proc_fn_selector with - | AddSet -> add_to_set key existing_str - | RemoveSet -> remove_from_set key existing_str - | AddMap -> - begin - try - add_to_map key value existing_str - with Duplicate -> - error "Duplicate key in set or map: table %s; field %s; ref %s; key %s" tblname fld objref key; - raise (Duplicate_key (tblname,fld,objref,key)); - end - | RemoveMap -> remove_from_map key existing_str in - write_field_locked t tblname objref fld newval - with Not_found -> - raise (DBCache_NotFound ("missing row", tblname, objref)) - + (* Ensure that both keys and values are valid for UTF-8-encoded XML. *) + let key = ensure_utf8_xml key in + let value = ensure_utf8_xml value in + try + let tbl = TableSet.find tblname (Database.tableset (get_database t)) in + let row = Table.find objref tbl in + let existing_str = Row.find fld row in + let newval = match proc_fn_selector with + | AddSet -> add_to_set key existing_str + | RemoveSet -> remove_from_set key existing_str + | AddMap -> + begin + try + add_to_map key value existing_str + with Duplicate -> + error "Duplicate key in set or map: table %s; field %s; ref %s; key %s" tblname fld objref key; + raise (Duplicate_key (tblname,fld,objref,key)); + end + | RemoveMap -> remove_from_map key existing_str in + write_field_locked t tblname objref fld newval + with Not_found -> + raise (DBCache_NotFound ("missing row", tblname, objref)) + let process_structured_field t (key,value) tblname fld objref proc_fn_selector = - with_lock (fun () -> - process_structured_field_locked t (key,value) tblname fld objref proc_fn_selector) - + with_lock (fun () -> + process_structured_field_locked t (key,value) tblname fld objref proc_fn_selector) + (* -------------------------------------------------------------------- *) - + let load connections default_schema = - - (* We also consider populating from the HA metadata LUN and the general metadata LUN *) - let connections = - Parse_db_conf.make Xapi_globs.ha_metadata_db :: - (Parse_db_conf.make Xapi_globs.gen_metadata_db) :: connections in - - (* If we have a temporary_restore_path (backup uploaded in previous run of xapi process) then restore from that *) - let populate db = - match Db_connections.choose connections with - | Some c -> Backend_xml.populate default_schema c - | None -> db in (* empty *) - - let empty = Database.update_manifest (Manifest.update_schema (fun _ -> Some (default_schema.Schema.major_vsn, default_schema.Schema.minor_vsn))) (Database.make default_schema) in - let open Stdext.Fun in - let db = - ((Db_backend.blow_away_non_persistent_fields default_schema) - ++ Db_upgrade.generic_database_upgrade - ++ populate) empty in - - db - - + + (* We also consider populating from the HA metadata LUN and the general metadata LUN *) + let connections = + Parse_db_conf.make Xapi_globs.ha_metadata_db :: + (Parse_db_conf.make Xapi_globs.gen_metadata_db) :: connections in + + (* If we have a temporary_restore_path (backup uploaded in previous run of xapi process) then restore from that *) + let populate db = + match Db_connections.choose connections with + | Some c -> Backend_xml.populate default_schema c + | None -> db in (* empty *) + + let empty = Database.update_manifest (Manifest.update_schema (fun _ -> Some (default_schema.Schema.major_vsn, default_schema.Schema.minor_vsn))) (Database.make default_schema) in + let open Stdext.Fun in + let db = + ((Db_backend.blow_away_non_persistent_fields default_schema) + ++ Db_upgrade.generic_database_upgrade + ++ populate) empty in + + db + + let sync conns db = - (* Flush the in-memory cache to the redo-log *) - Redo_log.flush_db_to_all_active_redo_logs db; - (* and then to the filesystem *) - List.iter (fun c -> Db_connections.flush c db) conns - + (* Flush the in-memory cache to the redo-log *) + Redo_log.flush_db_to_all_active_redo_logs db; + (* and then to the filesystem *) + List.iter (fun c -> Db_connections.flush c db) conns + let flush_dirty dbconn = Db_connections.flush_dirty_and_maybe_exit dbconn None let flush_and_exit dbconn ret_code = ignore (Db_connections.flush_dirty_and_maybe_exit dbconn (Some ret_code)) - - + + let spawn_db_flush_threads() = - (* Spawn threads that flush cache to db connections at regular intervals *) - List.iter - (fun dbconn -> - let db_path = dbconn.Parse_db_conf.path in - ignore (Thread.create - (fun ()-> - Debug.with_thread_named ("dbflush [" ^ db_path ^ "]") - (fun () -> - Db_connections.inc_db_flush_thread_refcount(); - let my_writes_this_period = ref 0 in - - (* the collesce_period_start records the time of the last write *) - let coallesce_period_start = ref (Unix.gettimeofday()) in - let period_start = ref (Unix.gettimeofday()) in - - (* we set a coallesce period of min(5 mins, write_limit_period / write_limit_write_cycles) *) - (* if we're not write limiting then set the coallesce period to 5 minutes; otherwise set coallesce period to divide the - number of write cycles across the ... - *) - let coallesce_time = float_of_int (5*60) (* coallesce writes for 5 minutes to avoid serializing db to disk all the time. *) in - debug "In memory DB flushing thread created [%s]. %s" db_path - (if dbconn.Parse_db_conf.mode <> Parse_db_conf.No_limit then - "Write limited with coallesce_time="^(string_of_float coallesce_time) - else ""); - (* check if we are currently in a coallescing_period *) - let in_coallescing_period() = - (Unix.gettimeofday() -. !coallesce_period_start < coallesce_time) in - - while (true) do - try - begin - Thread.delay Db_backend.db_FLUSH_TIMER; - (* If I have some writing capacity left in this write period then consider doing a write; or - if the connection is not write-limited then consider doing a write too. - We also have to consider doing a write if exit_on_next_flush is set: because when this is - set (by a signal handler) we want to do a flush whether or not our write limit has been - exceeded. - *) - if !Db_connections.exit_on_next_flush (* always flush straight away; this request is urgent *) || - (* otherwise, we only write if (i) "coalesscing period has come to an end"; and (ii) "write limiting requirements are met": *) - ((not (in_coallescing_period())) (* see (i) above *) && - ((!my_writes_this_period < dbconn.Parse_db_conf.write_limit_write_cycles) || dbconn.Parse_db_conf.mode = Parse_db_conf.No_limit (* (ii) above *) - ) - ) - then - begin - (* debug "[%s] considering flush" db_path; *) - let was_anything_flushed = Stdext.Threadext.Mutex.execute Db_lock.global_flush_mutex (fun ()->flush_dirty dbconn) in - if was_anything_flushed then - begin - my_writes_this_period := !my_writes_this_period + 1; - (* when we do a write, reset the coallesce_period_start to now -- recall that this - variable tracks the time since last write *) - coallesce_period_start := Unix.gettimeofday() - end - end; - (* else debug "[%s] not flushing because write-limit exceeded" db_path; *) - (* Check to see if the current write period has finished yet.. *) - if (Unix.gettimeofday() -. !period_start > (float_of_int dbconn.Parse_db_conf.write_limit_period)) then - begin - (* debug "[%s] resetting write-limit counters: start of new period" db_path; *) - (* We're at the start of a new writing period! *) - period_start := Unix.gettimeofday(); - my_writes_this_period := 0; - end - (* else debug "[%s] not resetting write-limit counters: not in new period yet" db_path *) - end - with - e -> debug "Exception in DB flushing thread: %s" (Printexc.to_string e) - done) ()) ()) - ) (Db_conn_store.read_db_connections()) - - + (* Spawn threads that flush cache to db connections at regular intervals *) + List.iter + (fun dbconn -> + let db_path = dbconn.Parse_db_conf.path in + ignore (Thread.create + (fun ()-> + Debug.with_thread_named ("dbflush [" ^ db_path ^ "]") + (fun () -> + Db_connections.inc_db_flush_thread_refcount(); + let my_writes_this_period = ref 0 in + + (* the collesce_period_start records the time of the last write *) + let coallesce_period_start = ref (Unix.gettimeofday()) in + let period_start = ref (Unix.gettimeofday()) in + + (* we set a coallesce period of min(5 mins, write_limit_period / write_limit_write_cycles) *) + (* if we're not write limiting then set the coallesce period to 5 minutes; otherwise set coallesce period to divide the + number of write cycles across the ... + *) + let coallesce_time = float_of_int (5*60) (* coallesce writes for 5 minutes to avoid serializing db to disk all the time. *) in + debug "In memory DB flushing thread created [%s]. %s" db_path + (if dbconn.Parse_db_conf.mode <> Parse_db_conf.No_limit then + "Write limited with coallesce_time="^(string_of_float coallesce_time) + else ""); + (* check if we are currently in a coallescing_period *) + let in_coallescing_period() = + (Unix.gettimeofday() -. !coallesce_period_start < coallesce_time) in + + while (true) do + try + begin + Thread.delay Db_backend.db_FLUSH_TIMER; + (* If I have some writing capacity left in this write period then consider doing a write; or + if the connection is not write-limited then consider doing a write too. + We also have to consider doing a write if exit_on_next_flush is set: because when this is + set (by a signal handler) we want to do a flush whether or not our write limit has been + exceeded. + *) + if !Db_connections.exit_on_next_flush (* always flush straight away; this request is urgent *) || + (* otherwise, we only write if (i) "coalesscing period has come to an end"; and (ii) "write limiting requirements are met": *) + ((not (in_coallescing_period())) (* see (i) above *) && + ((!my_writes_this_period < dbconn.Parse_db_conf.write_limit_write_cycles) || dbconn.Parse_db_conf.mode = Parse_db_conf.No_limit (* (ii) above *) + ) + ) + then + begin + (* debug "[%s] considering flush" db_path; *) + let was_anything_flushed = Stdext.Threadext.Mutex.execute Db_lock.global_flush_mutex (fun ()->flush_dirty dbconn) in + if was_anything_flushed then + begin + my_writes_this_period := !my_writes_this_period + 1; + (* when we do a write, reset the coallesce_period_start to now -- recall that this + variable tracks the time since last write *) + coallesce_period_start := Unix.gettimeofday() + end + end; + (* else debug "[%s] not flushing because write-limit exceeded" db_path; *) + (* Check to see if the current write period has finished yet.. *) + if (Unix.gettimeofday() -. !period_start > (float_of_int dbconn.Parse_db_conf.write_limit_period)) then + begin + (* debug "[%s] resetting write-limit counters: start of new period" db_path; *) + (* We're at the start of a new writing period! *) + period_start := Unix.gettimeofday(); + my_writes_this_period := 0; + end + (* else debug "[%s] not resetting write-limit counters: not in new period yet" db_path *) + end + with + e -> debug "Exception in DB flushing thread: %s" (Printexc.to_string e) + done) ()) ()) + ) (Db_conn_store.read_db_connections()) + + (* Called by server at start-of-day to initialiase cache. Populates cache and starts flushing threads *) let make t connections default_schema = - let db = load connections default_schema in - let db = Database.reindex db in - update_database t (fun _ -> db); + let db = load connections default_schema in + let db = Database.reindex db in + update_database t (fun _ -> db); + + spawn_db_flush_threads() - spawn_db_flush_threads() - (** Return an association list of table name * record count *) -let stats t = - TableSet.fold (fun name _ tbl acc -> - let size = Table.fold (fun _ _ _ acc -> acc + 1) tbl 0 in - (name, size) :: acc) - (Database.tableset (get_database t)) - [] +let stats t = + TableSet.fold (fun name _ tbl acc -> + let size = Table.fold (fun _ _ _ acc -> acc + 1) tbl 0 in + (name, size) :: acc) + (Database.tableset (get_database t)) + [] diff --git a/ocaml/database/db_cache_impl.mli b/ocaml/database/db_cache_impl.mli index 4304586f1ab..493c18f3c33 100644 --- a/ocaml/database/db_cache_impl.mli +++ b/ocaml/database/db_cache_impl.mli @@ -4,7 +4,7 @@ include Db_interface.DB_ACCESS val make : Db_ref.t -> Parse_db_conf.db_connection list -> Schema.t -> unit (** [flush_and_exit db code] flushes the specific backend [db] and exits - xapi with [code] *) + xapi with [code] *) val flush_and_exit : Parse_db_conf.db_connection -> int -> unit (** [sync db] forcibly flushes the database to disk *) diff --git a/ocaml/database/db_cache_test.ml b/ocaml/database/db_cache_test.ml index d1512247523..c69b77572b6 100644 --- a/ocaml/database/db_cache_test.ml +++ b/ocaml/database/db_cache_test.ml @@ -17,7 +17,7 @@ open Stdext.Fun let create_test_db () = let schema = Test_schemas.many_to_many in - let db = + let db = ((fun x -> x) ++ (Db_backend.blow_away_non_persistent_fields schema) ++ (Db_upgrade.generic_database_upgrade)) @@ -25,12 +25,12 @@ let create_test_db () = db -let check_many_to_many () = +let check_many_to_many () = let db = create_test_db () in (* make a foo with bars = [] *) (* make a bar with foos = [] *) (* add 'bar' to foo.bars *) - let db = + let db = ((fun x -> x) ++ (set_field "foo" "foo:1" "bars" (add_to_set "bar:1" (Schema.Value.Set []))) ++ (add_row "foo" "foo:1" (Row.add 0L Db_names.ref (Schema.Value.String "foo:1") (Row.add 0L "bars" (Schema.Value.Set []) Row.empty))) @@ -59,7 +59,7 @@ let check_many_to_many () = (* delete 'bar' *) let db = remove_row "bar" "bar:1" db in (* check that 'foo.bars' is empty *) - let foo_1 = Table.find "foo:1" (TableSet.find "foo" (Database.tableset db)) in + let foo_1 = Table.find "foo:1" (TableSet.find "foo" (Database.tableset db)) in let foo_bars = Row.find "bars" foo_1 in if foo_bars <> (Schema.Value.Set []) then failwith (Printf.sprintf "check_many_to_many: foo(foo:1).foos expected () got %s" (Sexplib.Sexp.to_string (Schema.Value.sexp_of_t foo_bars))); diff --git a/ocaml/database/db_cache_types.ml b/ocaml/database/db_cache_types.ml index cdeaa0dec29..0de98ad76ae 100644 --- a/ocaml/database/db_cache_types.ml +++ b/ocaml/database/db_cache_types.ml @@ -15,489 +15,489 @@ open Db_exn module Time = struct - type t = Generation.t + type t = Generation.t end module Stat = struct - type t = { - created: Time.t; - modified: Time.t; - deleted: Time.t; - } - let make x = { created = x; modified = x; deleted = 0L } + type t = { + created: Time.t; + modified: Time.t; + deleted: Time.t; + } + let make x = { created = x; modified = x; deleted = 0L } end (** Database tables, columns and rows are all indexed by string, each - using a specialised StringMap *) + using a specialised StringMap *) module StringMap = struct - include Map.Make(struct - type t = string - let compare = Pervasives.compare - end) - let update key default f t = - let v = try find key t with Not_found -> default in - add key (f v) t + include Map.Make(struct + type t = string + let compare = Pervasives.compare + end) + let update key default f t = + let v = try find key t with Not_found -> default in + add key (f v) t end module type VAL = sig - type t + type t end module type MAP = sig - type t - type value - val empty : t - val add: Time.t -> string -> value -> t -> t - val remove : Time.t -> string -> t -> t - val fold : (string -> Stat.t -> value -> 'b -> 'b) -> t -> 'b -> 'b - val fold_over_recent : Time.t -> (string -> Stat.t -> value -> 'b -> 'b) -> t -> 'b -> 'b - val find : string -> t -> value - val mem : string -> t -> bool - val iter : (string -> value -> unit) -> t -> unit - val update : int64 -> string -> value -> (value -> value) -> t -> t - val touch : int64 -> string -> value -> t -> t + type t + type value + val empty : t + val add: Time.t -> string -> value -> t -> t + val remove : Time.t -> string -> t -> t + val fold : (string -> Stat.t -> value -> 'b -> 'b) -> t -> 'b -> 'b + val fold_over_recent : Time.t -> (string -> Stat.t -> value -> 'b -> 'b) -> t -> 'b -> 'b + val find : string -> t -> value + val mem : string -> t -> bool + val iter : (string -> value -> unit) -> t -> unit + val update : int64 -> string -> value -> (value -> value) -> t -> t + val touch : int64 -> string -> value -> t -> t end (** A specialised StringMap whose range type is V.t, and which keeps a record of when records are created/updated *) module Make = functor(V: VAL) -> struct - type x = { - stat: Stat.t; - v : V.t - } - type map_t = x StringMap.t - let empty = StringMap.empty - let fold f = StringMap.fold (fun key x -> f key x.stat x.v) - let add generation key v = - let stat = Stat.make generation in - StringMap.add key { stat; v } - let find key map = (StringMap.find key map).v - let mem = StringMap.mem - let iter f = StringMap.iter (fun key x -> f key x.v) - let remove _ = StringMap.remove - let touch generation key default row = - let default = { stat = Stat.make generation; v = default } in - StringMap.update key default (fun x -> { x with stat = { x.stat with Stat.modified=generation } }) row - let update generation key default f row = - let default = { stat = Stat.make generation; v = default } in - let updatefn () = StringMap.update key default (fun x -> { stat = { x.stat with Stat.modified=generation }; v=f x.v}) row in - if mem key row - then - let old = find key row in - let newv = f old in - if newv == old - then row - else updatefn () - else - updatefn () - let fold_over_recent since f t initial = StringMap.fold (fun x y z -> if y.stat.Stat.modified > since then f x y.stat y.v z else z) t initial + type x = { + stat: Stat.t; + v : V.t + } + type map_t = x StringMap.t + let empty = StringMap.empty + let fold f = StringMap.fold (fun key x -> f key x.stat x.v) + let add generation key v = + let stat = Stat.make generation in + StringMap.add key { stat; v } + let find key map = (StringMap.find key map).v + let mem = StringMap.mem + let iter f = StringMap.iter (fun key x -> f key x.v) + let remove _ = StringMap.remove + let touch generation key default row = + let default = { stat = Stat.make generation; v = default } in + StringMap.update key default (fun x -> { x with stat = { x.stat with Stat.modified=generation } }) row + let update generation key default f row = + let default = { stat = Stat.make generation; v = default } in + let updatefn () = StringMap.update key default (fun x -> { stat = { x.stat with Stat.modified=generation }; v=f x.v}) row in + if mem key row + then + let old = find key row in + let newv = f old in + if newv == old + then row + else updatefn () + else + updatefn () + let fold_over_recent since f t initial = StringMap.fold (fun x y z -> if y.stat.Stat.modified > since then f x y.stat y.v z else z) t initial end module Row = struct - include Make(Schema.Value) - - type t=map_t - type value = Schema.Value.t - let find key t = - try find key t - with Not_found -> raise (DBCache_NotFound ("missing field", key, "")) - let add_defaults g (schema: Schema.Table.t) t = - List.fold_left (fun t c -> - if not(mem c.Schema.Column.name t) - then match c.Schema.Column.default with - | Some default -> add g c.Schema.Column.name default t - | None -> raise (DBCache_NotFound ("missing field", c.Schema.Column.name, "")) - else t) t schema.Schema.Table.columns + include Make(Schema.Value) + + type t=map_t + type value = Schema.Value.t + let find key t = + try find key t + with Not_found -> raise (DBCache_NotFound ("missing field", key, "")) + let add_defaults g (schema: Schema.Table.t) t = + List.fold_left (fun t c -> + if not(mem c.Schema.Column.name t) + then match c.Schema.Column.default with + | Some default -> add g c.Schema.Column.name default t + | None -> raise (DBCache_NotFound ("missing field", c.Schema.Column.name, "")) + else t) t schema.Schema.Table.columns end module Table = struct - module StringRowMap = Make(Row) - - type t = { rows : StringRowMap.map_t; - deleted_len : int; - deleted : (Time.t * Time.t * string) list } - type value = Row.t - let add g key value t = {t with rows=StringRowMap.add g key value t.rows} - let empty = {rows=StringRowMap.empty; deleted_len = 1; deleted=[(0L,0L,"")] } - let fold f t acc = StringRowMap.fold f t.rows acc - let find key t = StringRowMap.find key t.rows - let mem key t = StringRowMap.mem key t.rows - let iter f t = StringRowMap.iter f t.rows - let remove g key t = - let upper_length_deleted_queue = 512 in - let lower_length_deleted_queue = 256 in - let created = (StringMap.find key t.rows).StringRowMap.stat.Stat.created in - let new_element = (created,g,key) in - let new_len,new_deleted = - if t.deleted_len + 1 < upper_length_deleted_queue - then t.deleted_len + 1, (new_element::t.deleted) - else lower_length_deleted_queue + 1, (new_element::(Stdext.Listext.List.take lower_length_deleted_queue t.deleted)) - in - {rows = StringRowMap.remove g key t.rows; - deleted_len = new_len; - deleted = new_deleted} - let touch g key default t = {t with rows = StringRowMap.touch g key default t.rows } - let update g key default f t = {t with rows = StringRowMap.update g key default f t.rows} - let fold_over_recent since f t acc = StringRowMap.fold_over_recent since f t.rows acc - - let fold_over_deleted since f t acc = - let rec loop xs acc = match xs with - | (created,deleted,r)::xs -> - let new_acc = - if (deleted > since) && (created <= since) - then (f r { Stat.created; modified = deleted; deleted } acc) - else acc - in - if deleted <= since then new_acc else loop xs new_acc - | [] -> - acc in - loop t.deleted acc + module StringRowMap = Make(Row) + + type t = { rows : StringRowMap.map_t; + deleted_len : int; + deleted : (Time.t * Time.t * string) list } + type value = Row.t + let add g key value t = {t with rows=StringRowMap.add g key value t.rows} + let empty = {rows=StringRowMap.empty; deleted_len = 1; deleted=[(0L,0L,"")] } + let fold f t acc = StringRowMap.fold f t.rows acc + let find key t = StringRowMap.find key t.rows + let mem key t = StringRowMap.mem key t.rows + let iter f t = StringRowMap.iter f t.rows + let remove g key t = + let upper_length_deleted_queue = 512 in + let lower_length_deleted_queue = 256 in + let created = (StringMap.find key t.rows).StringRowMap.stat.Stat.created in + let new_element = (created,g,key) in + let new_len,new_deleted = + if t.deleted_len + 1 < upper_length_deleted_queue + then t.deleted_len + 1, (new_element::t.deleted) + else lower_length_deleted_queue + 1, (new_element::(Stdext.Listext.List.take lower_length_deleted_queue t.deleted)) + in + {rows = StringRowMap.remove g key t.rows; + deleted_len = new_len; + deleted = new_deleted} + let touch g key default t = {t with rows = StringRowMap.touch g key default t.rows } + let update g key default f t = {t with rows = StringRowMap.update g key default f t.rows} + let fold_over_recent since f t acc = StringRowMap.fold_over_recent since f t.rows acc + + let fold_over_deleted since f t acc = + let rec loop xs acc = match xs with + | (created,deleted,r)::xs -> + let new_acc = + if (deleted > since) && (created <= since) + then (f r { Stat.created; modified = deleted; deleted } acc) + else acc + in + if deleted <= since then new_acc else loop xs new_acc + | [] -> + acc in + loop t.deleted acc end module TableSet = struct - include Make(Table) + include Make(Table) - type t=map_t - type value = Table.t - let find key t = - try find key t - with Not_found -> raise (DBCache_NotFound ("missing table", key, "")) + type t=map_t + type value = Table.t + let find key t = + try find key t + with Not_found -> raise (DBCache_NotFound ("missing table", key, "")) end type common_key = - | Ref of string - | Uuid of string + | Ref of string + | Uuid of string let string_of_common_key = function - | Ref x -> x - | Uuid x -> x + | Ref x -> x + | Uuid x -> x module KeyMap = struct - include Map.Make(struct - type t = common_key - let compare = Pervasives.compare - end) - let add_unique tblname fldname k v t = - if mem k t - then raise (Uniqueness_constraint_violation ( tblname, fldname, string_of_common_key k )) - else add k v t + include Map.Make(struct + type t = common_key + let compare = Pervasives.compare + end) + let add_unique tblname fldname k v t = + if mem k t + then raise (Uniqueness_constraint_violation ( tblname, fldname, string_of_common_key k )) + else add k v t end module Manifest = struct - type t = { - schema : (int * int) option; - generation_count : Generation.t - } + type t = { + schema : (int * int) option; + generation_count : Generation.t + } - let empty = { - schema = None; generation_count = Generation.null_generation - } + let empty = { + schema = None; generation_count = Generation.null_generation + } - let make schema_major_vsn schema_minor_vsn gen_count = { - schema = Some (schema_major_vsn, schema_minor_vsn); - generation_count = gen_count - } + let make schema_major_vsn schema_minor_vsn gen_count = { + schema = Some (schema_major_vsn, schema_minor_vsn); + generation_count = gen_count + } - let generation x = x.generation_count + let generation x = x.generation_count - let touch f x = { - x with generation_count = f x.generation_count - } + let touch f x = { + x with generation_count = f x.generation_count + } - let next = touch (Int64.add 1L) + let next = touch (Int64.add 1L) - let schema x = match x.schema with - | None -> 0, 0 - | Some (x, y) -> x, y + let schema x = match x.schema with + | None -> 0, 0 + | Some (x, y) -> x, y - let update_schema f x = { - x with schema = f x.schema - } + let update_schema f x = { + x with schema = f x.schema + } end (** The core database updates (RefreshRow and PreDelete is more of an 'event') *) type update = - | RefreshRow of string (* tblname *) * string (* objref *) - | WriteField of string (* tblname *) * string (* objref *) * string (* fldname *) * Schema.Value.t (* oldval *) * Schema.Value.t (* newval *) - | PreDelete of string (* tblname *) * string (* objref *) - | Delete of string (* tblname *) * string (* objref *) * (string * Schema.Value.t) list (* values *) - | Create of string (* tblname *) * string (* objref *) * (string * Schema.Value.t) list (* values *) + | RefreshRow of string (* tblname *) * string (* objref *) + | WriteField of string (* tblname *) * string (* objref *) * string (* fldname *) * Schema.Value.t (* oldval *) * Schema.Value.t (* newval *) + | PreDelete of string (* tblname *) * string (* objref *) + | Delete of string (* tblname *) * string (* objref *) * (string * Schema.Value.t) list (* values *) + | Create of string (* tblname *) * string (* objref *) * (string * Schema.Value.t) list (* values *) module Database = struct - type t = { - tables: TableSet.t; - manifest : Manifest.t; - schema: Schema.t; - keymap: (string * string) KeyMap.t; - callbacks: (string * (update -> t -> unit)) list - } - let update_manifest f x = - { x with manifest = f x.manifest } - - let manifest x = x.manifest - - let increment = update_manifest Manifest.next - - let tableset x = x.tables - - let schema x = x.schema - - let update f x = - { x with tables = f x.tables } - - let set_generation g = - update_manifest (Manifest.touch (fun _ -> g)) - - let update_tableset f x = - { x with tables = f x.tables } - - let update_keymap f x = - { x with keymap = f x.keymap } - - let register_callback name f x = - { x with callbacks = (name, f) :: x.callbacks } - - let unregister_callback name x = - { x with callbacks = List.filter (fun (x, _) -> x <> name) x.callbacks } - - let notify e db = - List.iter (fun (name, f) -> - try - f e db - with e -> - Printf.printf "Caught %s from database callback '%s'\n%!" (Printexc.to_string e) name; - () - ) db.callbacks - - let reindex x = - let g = x.manifest.Manifest.generation_count in - (* Recompute the keymap *) - let keymap = - TableSet.fold - (fun tblname _ tbl acc -> - Table.fold - (fun rf _ row acc -> - let acc = KeyMap.add_unique tblname Db_names.ref (Ref rf) (tblname, rf) acc in - if Row.mem Db_names.uuid row - then KeyMap.add_unique tblname Db_names.uuid (Uuid (Schema.Value.Unsafe_cast.string (Row.find Db_names.uuid row))) (tblname, rf) acc - else acc - ) - tbl acc) - x.tables KeyMap.empty in - (* For each of the one-to-many relationships, recompute the many end *) - let tables = - Schema.ForeignMap.fold - (fun one_tblname rels tables -> - List.fold_left (fun tables (one_fldname, many_tblname, many_fldname) -> - (* VBD.VM : Ref(VM) -> VM.VBDs : Set(Ref(VBD)) *) - let one_tbl = TableSet.find one_tblname tables in - let many_tbl = TableSet.find many_tblname tables in - (* Initialise all VM.VBDs = [] (otherwise VMs with no - VBDs may be missing a VBDs field altogether on - upgrade) *) - let many_tbl' = Table.fold - (fun vm _ row acc -> - let row' = Row.add g many_fldname (Schema.Value.Set []) row in - Table.add g vm row' acc) - many_tbl Table.empty in - - (* Build up a table of VM -> VBDs *) - - let vm_to_vbds = Table.fold - (fun vbd _ row acc -> - let vm = Schema.Value.Unsafe_cast.string (Row.find one_fldname row) in - let existing = if Schema.ForeignMap.mem vm acc then Schema.ForeignMap.find vm acc else [] in - Schema.ForeignMap.add vm (vbd :: existing) acc) - one_tbl Schema.ForeignMap.empty in - let many_tbl'' = Schema.ForeignMap.fold - (fun vm vbds acc -> - if not(Table.mem vm acc) - then acc - else - let row = Table.find vm acc in - let row' = Row.add g many_fldname (Schema.Value.Set vbds) row in - Table.add g vm row' acc) - vm_to_vbds many_tbl' in - TableSet.add g many_tblname many_tbl'' tables) - tables rels) - x.schema.Schema.one_to_many - x.tables in - - { x with keymap = keymap; tables = tables } - - - let table_of_ref rf db = fst (KeyMap.find (Ref rf) db.keymap) - let lookup_key key db = - if KeyMap.mem (Ref key) db.keymap - then Some (KeyMap.find (Ref key) db.keymap) - else None - - let make schema = { - tables = TableSet.empty; - manifest = Manifest.empty; - schema = schema; - keymap = KeyMap.empty; - callbacks = []; - } + type t = { + tables: TableSet.t; + manifest : Manifest.t; + schema: Schema.t; + keymap: (string * string) KeyMap.t; + callbacks: (string * (update -> t -> unit)) list + } + let update_manifest f x = + { x with manifest = f x.manifest } + + let manifest x = x.manifest + + let increment = update_manifest Manifest.next + + let tableset x = x.tables + + let schema x = x.schema + + let update f x = + { x with tables = f x.tables } + + let set_generation g = + update_manifest (Manifest.touch (fun _ -> g)) + + let update_tableset f x = + { x with tables = f x.tables } + + let update_keymap f x = + { x with keymap = f x.keymap } + + let register_callback name f x = + { x with callbacks = (name, f) :: x.callbacks } + + let unregister_callback name x = + { x with callbacks = List.filter (fun (x, _) -> x <> name) x.callbacks } + + let notify e db = + List.iter (fun (name, f) -> + try + f e db + with e -> + Printf.printf "Caught %s from database callback '%s'\n%!" (Printexc.to_string e) name; + () + ) db.callbacks + + let reindex x = + let g = x.manifest.Manifest.generation_count in + (* Recompute the keymap *) + let keymap = + TableSet.fold + (fun tblname _ tbl acc -> + Table.fold + (fun rf _ row acc -> + let acc = KeyMap.add_unique tblname Db_names.ref (Ref rf) (tblname, rf) acc in + if Row.mem Db_names.uuid row + then KeyMap.add_unique tblname Db_names.uuid (Uuid (Schema.Value.Unsafe_cast.string (Row.find Db_names.uuid row))) (tblname, rf) acc + else acc + ) + tbl acc) + x.tables KeyMap.empty in + (* For each of the one-to-many relationships, recompute the many end *) + let tables = + Schema.ForeignMap.fold + (fun one_tblname rels tables -> + List.fold_left (fun tables (one_fldname, many_tblname, many_fldname) -> + (* VBD.VM : Ref(VM) -> VM.VBDs : Set(Ref(VBD)) *) + let one_tbl = TableSet.find one_tblname tables in + let many_tbl = TableSet.find many_tblname tables in + (* Initialise all VM.VBDs = [] (otherwise VMs with no + VBDs may be missing a VBDs field altogether on + upgrade) *) + let many_tbl' = Table.fold + (fun vm _ row acc -> + let row' = Row.add g many_fldname (Schema.Value.Set []) row in + Table.add g vm row' acc) + many_tbl Table.empty in + + (* Build up a table of VM -> VBDs *) + + let vm_to_vbds = Table.fold + (fun vbd _ row acc -> + let vm = Schema.Value.Unsafe_cast.string (Row.find one_fldname row) in + let existing = if Schema.ForeignMap.mem vm acc then Schema.ForeignMap.find vm acc else [] in + Schema.ForeignMap.add vm (vbd :: existing) acc) + one_tbl Schema.ForeignMap.empty in + let many_tbl'' = Schema.ForeignMap.fold + (fun vm vbds acc -> + if not(Table.mem vm acc) + then acc + else + let row = Table.find vm acc in + let row' = Row.add g many_fldname (Schema.Value.Set vbds) row in + Table.add g vm row' acc) + vm_to_vbds many_tbl' in + TableSet.add g many_tblname many_tbl'' tables) + tables rels) + x.schema.Schema.one_to_many + x.tables in + + { x with keymap = keymap; tables = tables } + + + let table_of_ref rf db = fst (KeyMap.find (Ref rf) db.keymap) + let lookup_key key db = + if KeyMap.mem (Ref key) db.keymap + then Some (KeyMap.find (Ref key) db.keymap) + else None + + let make schema = { + tables = TableSet.empty; + manifest = Manifest.empty; + schema = schema; + keymap = KeyMap.empty; + callbacks = []; + } end (* Helper functions to deal with Sets and Maps *) let add_to_set key t = - let t = Schema.Value.Unsafe_cast.set t in - Schema.Value.Set (if List.mem key t then t else key :: t) + let t = Schema.Value.Unsafe_cast.set t in + Schema.Value.Set (if List.mem key t then t else key :: t) let remove_from_set key t = - let t = Schema.Value.Unsafe_cast.set t in - Schema.Value.Set (List.filter (fun x -> x <> key) t) + let t = Schema.Value.Unsafe_cast.set t in + Schema.Value.Set (List.filter (fun x -> x <> key) t) exception Duplicate let add_to_map key value t = - let t = Schema.Value.Unsafe_cast.pairs t in - if List.mem key (List.map fst t) then raise Duplicate; - Schema.Value.Pairs ((key, value) :: t) + let t = Schema.Value.Unsafe_cast.pairs t in + if List.mem key (List.map fst t) then raise Duplicate; + Schema.Value.Pairs ((key, value) :: t) let remove_from_map key t = - let t = Schema.Value.Unsafe_cast.pairs t in - Schema.Value.Pairs (List.filter (fun (k, _) -> k <> key) t) + let t = Schema.Value.Unsafe_cast.pairs t in + Schema.Value.Pairs (List.filter (fun (k, _) -> k <> key) t) let (++) f g x = f (g x) let id x = x let is_valid tblname objref db = - Table.mem objref (TableSet.find tblname (Database.tableset db)) + Table.mem objref (TableSet.find tblname (Database.tableset db)) let get_field tblname objref fldname db = - try - Row.find fldname (Table.find objref (TableSet.find tblname (Database.tableset db))) - with Not_found -> - raise (DBCache_NotFound ("missing row", tblname, objref)) + try + Row.find fldname (Table.find objref (TableSet.find tblname (Database.tableset db))) + with Not_found -> + raise (DBCache_NotFound ("missing row", tblname, objref)) let unsafe_set_field g tblname objref fldname newval = - (Database.update - ++ (TableSet.update g tblname Table.empty) - ++ (Table.update g objref Row.empty) - ++ (Row.update g fldname (Schema.Value.String ""))) - (fun _ -> newval) + (Database.update + ++ (TableSet.update g tblname Table.empty) + ++ (Table.update g objref Row.empty) + ++ (Row.update g fldname (Schema.Value.String ""))) + (fun _ -> newval) let update_one_to_many g tblname objref f db = - if not (is_valid tblname objref db) then db else - List.fold_left (fun db (one_fld, many_tbl, many_fld) -> - (* the value one_fld_val is the Ref _ *) - let one_fld_val = Schema.Value.Unsafe_cast.string (get_field tblname objref one_fld db) in - let valid = try ignore(Database.table_of_ref one_fld_val db); true with _ -> false in - if valid - then unsafe_set_field g many_tbl one_fld_val many_fld (f objref (get_field many_tbl one_fld_val many_fld db)) db - else db - ) db (Schema.one_to_many tblname (Database.schema db)) + if not (is_valid tblname objref db) then db else + List.fold_left (fun db (one_fld, many_tbl, many_fld) -> + (* the value one_fld_val is the Ref _ *) + let one_fld_val = Schema.Value.Unsafe_cast.string (get_field tblname objref one_fld db) in + let valid = try ignore(Database.table_of_ref one_fld_val db); true with _ -> false in + if valid + then unsafe_set_field g many_tbl one_fld_val many_fld (f objref (get_field many_tbl one_fld_val many_fld db)) db + else db + ) db (Schema.one_to_many tblname (Database.schema db)) let update_many_to_many g tblname objref f db = - if not (is_valid tblname objref db) then db else - List.fold_left (fun db (this_fld, other_tbl, other_fld) -> - let this_fld_refs = Schema.Value.Unsafe_cast.set (get_field tblname objref this_fld db) in - (* for each of this_fld_refs, apply f *) - List.fold_left (fun db other_ref -> - let valid = try ignore(Database.table_of_ref other_ref db); true with _ -> false in - if valid - then - let other_field = get_field other_tbl other_ref other_fld db in - unsafe_set_field g other_tbl other_ref other_fld (f objref other_field) db - else db) - db this_fld_refs - ) db (Schema.many_to_many tblname (Database.schema db)) + if not (is_valid tblname objref db) then db else + List.fold_left (fun db (this_fld, other_tbl, other_fld) -> + let this_fld_refs = Schema.Value.Unsafe_cast.set (get_field tblname objref this_fld db) in + (* for each of this_fld_refs, apply f *) + List.fold_left (fun db other_ref -> + let valid = try ignore(Database.table_of_ref other_ref db); true with _ -> false in + if valid + then + let other_field = get_field other_tbl other_ref other_fld db in + unsafe_set_field g other_tbl other_ref other_fld (f objref other_field) db + else db) + db this_fld_refs + ) db (Schema.many_to_many tblname (Database.schema db)) let set_field tblname objref fldname newval db = - if fldname = Db_names.ref - then failwith (Printf.sprintf "Cannot safely update field: %s" fldname); - let need_other_table_update = - let schema = Database.schema db in - match (Schema.one_to_many tblname schema, Schema.many_to_many tblname schema) with - | [],[] -> - false - | o2m,m2m -> - List.exists (fun (fld,tbl,otherfld) -> fld = fldname) o2m - || List.exists (fun (fld,tbl,otherfld) -> fld = fldname) m2m - in - - if need_other_table_update then begin - let g = Manifest.generation (Database.manifest db) in - (Database.increment - ++ (update_one_to_many g tblname objref add_to_set) - ++ (update_many_to_many g tblname objref add_to_set) - ++ ((Database.update - ++ (TableSet.update g tblname Table.empty) - ++ (Table.update g objref Row.empty) - ++ (Row.update g fldname (Schema.Value.String ""))) (fun _ -> newval)) - ++ (update_one_to_many g tblname objref remove_from_set) - ++ (update_many_to_many g tblname objref remove_from_set)) db - end else begin - let g = Manifest.generation (Database.manifest db) in - (Database.increment - ++ ((Database.update - ++ (TableSet.update g tblname Table.empty) - ++ (Table.update g objref Row.empty) - ++ (Row.update g fldname (Schema.Value.String ""))) - (fun _ -> newval))) db - end + if fldname = Db_names.ref + then failwith (Printf.sprintf "Cannot safely update field: %s" fldname); + let need_other_table_update = + let schema = Database.schema db in + match (Schema.one_to_many tblname schema, Schema.many_to_many tblname schema) with + | [],[] -> + false + | o2m,m2m -> + List.exists (fun (fld,tbl,otherfld) -> fld = fldname) o2m + || List.exists (fun (fld,tbl,otherfld) -> fld = fldname) m2m + in + + if need_other_table_update then begin + let g = Manifest.generation (Database.manifest db) in + (Database.increment + ++ (update_one_to_many g tblname objref add_to_set) + ++ (update_many_to_many g tblname objref add_to_set) + ++ ((Database.update + ++ (TableSet.update g tblname Table.empty) + ++ (Table.update g objref Row.empty) + ++ (Row.update g fldname (Schema.Value.String ""))) (fun _ -> newval)) + ++ (update_one_to_many g tblname objref remove_from_set) + ++ (update_many_to_many g tblname objref remove_from_set)) db + end else begin + let g = Manifest.generation (Database.manifest db) in + (Database.increment + ++ ((Database.update + ++ (TableSet.update g tblname Table.empty) + ++ (Table.update g objref Row.empty) + ++ (Row.update g fldname (Schema.Value.String ""))) + (fun _ -> newval))) db + end let touch tblname objref db = - let g = Manifest.generation (Database.manifest db) in - (* We update the generation twice so that we can return the lower count - for the "event.inject" API to guarantee that the token from a later - event.from will always compare as strictly greater. See the definition - of the event token datatype. *) - (Database.increment ++ Database.increment - ++ ((Database.update - ++ (TableSet.update g tblname Table.empty) - ++ (Table.touch g objref)) Row.empty - )) db + let g = Manifest.generation (Database.manifest db) in + (* We update the generation twice so that we can return the lower count + for the "event.inject" API to guarantee that the token from a later + event.from will always compare as strictly greater. See the definition + of the event token datatype. *) + (Database.increment ++ Database.increment + ++ ((Database.update + ++ (TableSet.update g tblname Table.empty) + ++ (Table.touch g objref)) Row.empty + )) db let add_row tblname objref newval db = - let g = db.Database.manifest.Manifest.generation_count in - (Database.increment - (* Update foreign Set(Ref _) fields *) - (* NB this requires the new row to exist already *) - ++ (update_one_to_many g tblname objref add_to_set) - ++ (update_many_to_many g tblname objref add_to_set) - ++ ((Database.update ++ (TableSet.update g tblname Table.empty) ++ (Table.update g objref newval)) - (fun _ -> newval)) - ++ (Database.update_keymap (KeyMap.add_unique tblname Db_names.ref (Ref objref) (tblname, objref))) - ++ (Database.update_keymap (fun m -> - if Row.mem Db_names.uuid newval - then KeyMap.add_unique tblname Db_names.uuid (Uuid (Schema.Value.Unsafe_cast.string (Row.find Db_names.uuid newval))) (tblname, objref) m - else m))) db + let g = db.Database.manifest.Manifest.generation_count in + (Database.increment + (* Update foreign Set(Ref _) fields *) + (* NB this requires the new row to exist already *) + ++ (update_one_to_many g tblname objref add_to_set) + ++ (update_many_to_many g tblname objref add_to_set) + ++ ((Database.update ++ (TableSet.update g tblname Table.empty) ++ (Table.update g objref newval)) + (fun _ -> newval)) + ++ (Database.update_keymap (KeyMap.add_unique tblname Db_names.ref (Ref objref) (tblname, objref))) + ++ (Database.update_keymap (fun m -> + if Row.mem Db_names.uuid newval + then KeyMap.add_unique tblname Db_names.uuid (Uuid (Schema.Value.Unsafe_cast.string (Row.find Db_names.uuid newval))) (tblname, objref) m + else m))) db let remove_row tblname objref db = - let uuid = - try - Some (Schema.Value.Unsafe_cast.string (Row.find Db_names.uuid (Table.find objref (TableSet.find tblname (Database.tableset db))))) - with _ -> None in - let g = db.Database.manifest.Manifest.generation_count in - (Database.increment - ++ ((Database.update ++ (TableSet.update g tblname Table.empty)) - (Table.remove g objref)) - (* Update foreign (Set(Ref _)) fields *) - (* NB this requires the original row to still exist *) - ++ (update_one_to_many g tblname objref remove_from_set) - ++ (update_many_to_many g tblname objref remove_from_set) - ++ (Database.update_keymap (KeyMap.remove (Ref objref))) - ++ (Database.update_keymap (fun m -> - match uuid with - | Some u -> KeyMap.remove (Uuid u) m - | None -> m))) db + let uuid = + try + Some (Schema.Value.Unsafe_cast.string (Row.find Db_names.uuid (Table.find objref (TableSet.find tblname (Database.tableset db))))) + with _ -> None in + let g = db.Database.manifest.Manifest.generation_count in + (Database.increment + ++ ((Database.update ++ (TableSet.update g tblname Table.empty)) + (Table.remove g objref)) + (* Update foreign (Set(Ref _)) fields *) + (* NB this requires the original row to still exist *) + ++ (update_one_to_many g tblname objref remove_from_set) + ++ (update_many_to_many g tblname objref remove_from_set) + ++ (Database.update_keymap (KeyMap.remove (Ref objref))) + ++ (Database.update_keymap (fun m -> + match uuid with + | Some u -> KeyMap.remove (Uuid u) m + | None -> m))) db type where_record = { - table: string; (** table from which ... *) - return: string; (** we'd like to return this field... *) - where_field: string; (** where this other field... *) - where_value: string; (** contains this value *) + table: string; (** table from which ... *) + return: string; (** we'd like to return this field... *) + where_field: string; (** where this other field... *) + where_value: string; (** contains this value *) } with rpc type structured_op_t = - | AddSet - | RemoveSet - | AddMap - | RemoveMap + | AddSet + | RemoveSet + | AddMap + | RemoveMap with rpc diff --git a/ocaml/database/db_cache_types.mli b/ocaml/database/db_cache_types.mli index b7b84140888..028fa177893 100644 --- a/ocaml/database/db_cache_types.mli +++ b/ocaml/database/db_cache_types.mli @@ -13,135 +13,135 @@ *) module Time : sig - type t = Generation.t - (** A monotonically increasing counter associated with this database *) + type t = Generation.t + (** A monotonically increasing counter associated with this database *) end module Stat : sig - type t = { - created: Time.t; (** Time this value was created *) - modified: Time.t; (** Time this value was last modified *) - deleted: Time.t; (** Time this value was deleted (or 0L meaning it is still alive) *) - } - (** Metadata associated with a database value *) + type t = { + created: Time.t; (** Time this value was created *) + modified: Time.t; (** Time this value was last modified *) + deleted: Time.t; (** Time this value was deleted (or 0L meaning it is still alive) *) + } + (** Metadata associated with a database value *) end module type MAP = sig - type t - (** A map from string to some value *) - - type value - (** The type of the values in the map *) - - val empty : t - (** The empty map *) - - val add: Time.t -> string -> value -> t -> t - (** [add now key value map] returns a new map with [key] associated with [value], - with creation time [now] *) - - val remove : Time.t -> string -> t -> t - (** [remove now key t] removes the binding of [key] from [t]. *) - - val fold : (string -> Stat.t -> value -> 'b -> 'b) -> t -> 'b -> 'b - (** [fold f t initial] folds [f key stats value acc] over the items in [t] *) - - val fold_over_recent : Time.t -> (string -> Stat.t -> value -> 'b -> 'b) -> t -> 'b -> 'b - (** [fold_over_recent since f t initial] folds [f key stats value acc] over all the - items with a modified time larger than [since] *) - - val find : string -> t -> value - (** [find key t] returns the value associated with [key] in [t] or raises - [DBCache_NotFound] *) - - val mem : string -> t -> bool - (** [mem key t] returns true if [value] is associated with [key] in [t] or false - otherwise *) - - val iter : (string -> value -> unit) -> t -> unit - (** [iter f t] applies [f key value] to each binding in [t] *) - - val update : Time.t -> string -> value -> (value -> value) -> t -> t - (** [update now key default f t] returns a new map which is the same as [t] except: - if there is a value associated with [key] it is replaced with [f key] - or if there is no value associated with [key] then [default] is associated with [key]. - This function touches the modification time of [key] *unless* [f key] is physically - equal with the current value: in this case the modification time isn't bumped as - an optimisation. - *) - - val touch : Time.t -> string -> value -> t -> t - (** [touch now key default t] returns a new map which is the same as [t] except: - if there is a value associated with [t] then its modification time is set to [now]; - if there is no value associated with [t] then one is created with value [default]. - On exit there will be a binding of [key] whose modification time is [now] *) + type t + (** A map from string to some value *) + + type value + (** The type of the values in the map *) + + val empty : t + (** The empty map *) + + val add: Time.t -> string -> value -> t -> t + (** [add now key value map] returns a new map with [key] associated with [value], + with creation time [now] *) + + val remove : Time.t -> string -> t -> t + (** [remove now key t] removes the binding of [key] from [t]. *) + + val fold : (string -> Stat.t -> value -> 'b -> 'b) -> t -> 'b -> 'b + (** [fold f t initial] folds [f key stats value acc] over the items in [t] *) + + val fold_over_recent : Time.t -> (string -> Stat.t -> value -> 'b -> 'b) -> t -> 'b -> 'b + (** [fold_over_recent since f t initial] folds [f key stats value acc] over all the + items with a modified time larger than [since] *) + + val find : string -> t -> value + (** [find key t] returns the value associated with [key] in [t] or raises + [DBCache_NotFound] *) + + val mem : string -> t -> bool + (** [mem key t] returns true if [value] is associated with [key] in [t] or false + otherwise *) + + val iter : (string -> value -> unit) -> t -> unit + (** [iter f t] applies [f key value] to each binding in [t] *) + + val update : Time.t -> string -> value -> (value -> value) -> t -> t + (** [update now key default f t] returns a new map which is the same as [t] except: + if there is a value associated with [key] it is replaced with [f key] + or if there is no value associated with [key] then [default] is associated with [key]. + This function touches the modification time of [key] *unless* [f key] is physically + equal with the current value: in this case the modification time isn't bumped as + an optimisation. + *) + + val touch : Time.t -> string -> value -> t -> t + (** [touch now key default t] returns a new map which is the same as [t] except: + if there is a value associated with [t] then its modification time is set to [now]; + if there is no value associated with [t] then one is created with value [default]. + On exit there will be a binding of [key] whose modification time is [now] *) end module Row : sig - include MAP - with type value = Schema.Value.t + include MAP + with type value = Schema.Value.t - val add_defaults: Time.t -> Schema.Table.t -> t -> t - (** [add_defaults now schema t]: returns a row which is [t] extended to contain - all the columns specified in the schema, with default values set if not already - in [t]. If the schema is missing a default value then raises [DBCache_NotFound]: - this would happen if a client failed to provide a necessary field. *) + val add_defaults: Time.t -> Schema.Table.t -> t -> t + (** [add_defaults now schema t]: returns a row which is [t] extended to contain + all the columns specified in the schema, with default values set if not already + in [t]. If the schema is missing a default value then raises [DBCache_NotFound]: + this would happen if a client failed to provide a necessary field. *) end module Table : sig - include MAP - with type value = Row.t + include MAP + with type value = Row.t - val fold_over_deleted : Time.t -> (string -> Stat.t -> 'b -> 'b) -> t -> 'b -> 'b - (** [fold_over_deleted now f t initial] folds [f key stat acc] over the keys - which have been recently deleted. Note this is not guaranteed to remember - all events, so the list may be short. *) + val fold_over_deleted : Time.t -> (string -> Stat.t -> 'b -> 'b) -> t -> 'b -> 'b + (** [fold_over_deleted now f t initial] folds [f key stat acc] over the keys + which have been recently deleted. Note this is not guaranteed to remember + all events, so the list may be short. *) end module TableSet : MAP with type value = Table.t module Manifest : - sig - type t - val empty : t - val make : int -> int -> Generation.t -> t - val generation : t -> Generation.t - val touch : (Generation.t -> Generation.t) -> t -> t - val next : t -> t - val schema : t -> int * int - val update_schema : ((int * int) option -> (int * int) option) -> t -> t - end +sig + type t + val empty : t + val make : int -> int -> Generation.t -> t + val generation : t -> Generation.t + val touch : (Generation.t -> Generation.t) -> t -> t + val next : t -> t + val schema : t -> int * int + val update_schema : ((int * int) option -> (int * int) option) -> t -> t +end (** The core database updates (RefreshRow and PreDelete is more of an 'event') *) -type update = - | RefreshRow of string (* tblname *) * string (* objref *) - | WriteField of string (* tblname *) * string (* objref *) * string (* fldname *) * Schema.Value.t (* oldval *) * Schema.Value.t (* newval *) - | PreDelete of string (* tblname *) * string (* objref *) - | Delete of string (* tblname *) * string (* objref *) * (string * Schema.Value.t) list (* values *) - | Create of string (* tblname *) * string (* objref *) * (string * Schema.Value.t) list (* values *) +type update = + | RefreshRow of string (* tblname *) * string (* objref *) + | WriteField of string (* tblname *) * string (* objref *) * string (* fldname *) * Schema.Value.t (* oldval *) * Schema.Value.t (* newval *) + | PreDelete of string (* tblname *) * string (* objref *) + | Delete of string (* tblname *) * string (* objref *) * (string * Schema.Value.t) list (* values *) + | Create of string (* tblname *) * string (* objref *) * (string * Schema.Value.t) list (* values *) module Database : - sig - type t - val update_manifest : (Manifest.t -> Manifest.t) -> t -> t - val update_tableset : (TableSet.t -> TableSet.t) -> t -> t - val manifest : t -> Manifest.t - val tableset : t -> TableSet.t - val schema : t -> Schema.t - val increment : t -> t - val update : (TableSet.t -> TableSet.t) -> t -> t - val set_generation : Generation.t -> t -> t - val make : Schema.t -> t - - val table_of_ref : string -> t -> string - val lookup_key : string -> t -> (string * string) option - val reindex : t -> t - - val register_callback : string -> (update -> t -> unit) -> t -> t - val unregister_callback : string -> t -> t - val notify : update -> t -> unit - end +sig + type t + val update_manifest : (Manifest.t -> Manifest.t) -> t -> t + val update_tableset : (TableSet.t -> TableSet.t) -> t -> t + val manifest : t -> Manifest.t + val tableset : t -> TableSet.t + val schema : t -> Schema.t + val increment : t -> t + val update : (TableSet.t -> TableSet.t) -> t -> t + val set_generation : Generation.t -> t -> t + val make : Schema.t -> t + + val table_of_ref : string -> t -> string + val lookup_key : string -> t -> (string * string) option + val reindex : t -> t + + val register_callback : string -> (update -> t -> unit) -> t -> t + val unregister_callback : string -> t -> t + val notify : update -> t -> unit +end exception Duplicate val add_to_set : string -> Schema.Value.t -> Schema.Value.t @@ -156,18 +156,18 @@ val add_row : string -> string -> Row.t -> Database.t -> Database.t val touch : string -> string -> Database.t -> Database.t type where_record = { - table: string; (** table from which ... *) - return: string; (** we'd like to return this field... *) - where_field: string; (** where this other field... *) - where_value: string; (** contains this value *) + table: string; (** table from which ... *) + return: string; (** we'd like to return this field... *) + where_field: string; (** where this other field... *) + where_value: string; (** contains this value *) } val where_record_of_rpc: Rpc.t -> where_record val rpc_of_where_record: where_record -> Rpc.t -type structured_op_t = - | AddSet - | RemoveSet - | AddMap - | RemoveMap +type structured_op_t = + | AddSet + | RemoveSet + | AddMap + | RemoveMap val structured_op_t_of_rpc: Rpc.t -> structured_op_t val rpc_of_structured_op_t: structured_op_t -> Rpc.t diff --git a/ocaml/database/db_conn_store.ml b/ocaml/database/db_conn_store.ml index c77f7b2b91b..dcd3bddda9e 100644 --- a/ocaml/database/db_conn_store.ml +++ b/ocaml/database/db_conn_store.ml @@ -25,24 +25,24 @@ let initialise_db_connections dbs = (* create a lock for each of our db connections *) Threadext.Mutex.execute db_conn_locks_m (fun () -> - List.iter (fun dbconn->Hashtbl.replace db_conn_locks dbconn (Mutex.create())) dbs); + List.iter (fun dbconn->Hashtbl.replace db_conn_locks dbconn (Mutex.create())) dbs); db_connections := dbs - + let read_db_connections() = !db_connections let with_db_conn_lock db_conn f = let db_conn_m = Threadext.Mutex.execute db_conn_locks_m (fun () -> - try - Hashtbl.find db_conn_locks db_conn - with _ -> - (* If we don't have a lock already for this connection then go make one dynamically and use that from then on *) - begin - let new_dbconn_mutex = Mutex.create() in - Hashtbl.replace db_conn_locks db_conn new_dbconn_mutex; - new_dbconn_mutex - end + try + Hashtbl.find db_conn_locks db_conn + with _ -> + (* If we don't have a lock already for this connection then go make one dynamically and use that from then on *) + begin + let new_dbconn_mutex = Mutex.create() in + Hashtbl.replace db_conn_locks db_conn new_dbconn_mutex; + new_dbconn_mutex + end ) in Threadext.Mutex.execute db_conn_m (fun () -> diff --git a/ocaml/database/db_connections.ml b/ocaml/database/db_connections.ml index a24e3c3032d..91668fdd49e 100644 --- a/ocaml/database/db_connections.ml +++ b/ocaml/database/db_connections.ml @@ -19,27 +19,27 @@ let get_dbs_and_gen_counts() = List.map (fun conn->(Parse_db_conf.generation_read conn, conn)) (Db_conn_store.read_db_connections()) (** Returns true if the supplied connection actually exists, false otherwise. - Note that, although the two files should be present (or absent) together, - after upgrade we only have a database. In this case the generation - defaults back to 0L *) -let exists connection = - Sys.file_exists connection.Parse_db_conf.path - + Note that, although the two files should be present (or absent) together, + after upgrade we only have a database. In this case the generation + defaults back to 0L *) +let exists connection = + Sys.file_exists connection.Parse_db_conf.path + (* This returns the most recent of the db connections to populate from. It also initialises the in-memory generation count to the largest of the db connections' generation counts *) let choose connections = match List.filter exists connections with -| [] -> None -| (c :: cs) as connections -> - List.iter (fun c -> debug "Dbconf contains: %s (generation %Ld)" c.Parse_db_conf.path (Parse_db_conf.generation_read c)) connections; - let gen, most_recent = List.fold_left (fun (g, c) c' -> - let g' = Parse_db_conf.generation_read c' in - if g' > g then (g', c') else (g, c)) - (Parse_db_conf.generation_read c, c) cs in - debug "Most recent db is %s (generation %Ld)" most_recent.Parse_db_conf.path gen; - Some most_recent + | [] -> None + | (c :: cs) as connections -> + List.iter (fun c -> debug "Dbconf contains: %s (generation %Ld)" c.Parse_db_conf.path (Parse_db_conf.generation_read c)) connections; + let gen, most_recent = List.fold_left (fun (g, c) c' -> + let g' = Parse_db_conf.generation_read c' in + if g' > g then (g', c') else (g, c)) + (Parse_db_conf.generation_read c, c) cs in + debug "Most recent db is %s (generation %Ld)" most_recent.Parse_db_conf.path gen; + Some most_recent let preferred_write_db () = - List.hd (Db_conn_store.read_db_connections()) (* !!! FIX ME *) + List.hd (Db_conn_store.read_db_connections()) (* !!! FIX ME *) (* This is set by signal handlers. It instructs the db thread to call exit after the next flush *) let exit_on_next_flush = ref false @@ -72,39 +72,39 @@ let flush_dirty_and_maybe_exit dbconn exit_spec = Db_conn_store.with_db_conn_lock dbconn (fun () -> (* if we're being told to shutdown by signal handler then flush every connection - - the rationale is that we're not sure which db connections will be available on next restart *) + - the rationale is that we're not sure which db connections will be available on next restart *) if !exit_on_next_flush then - begin - let (_: bool) = Backend_xml.flush_dirty dbconn in - let refcount = dec_and_read_db_flush_thread_refcount() in - (* last flushing thread close the door on the way out.. *) - if refcount = 0 then - begin - debug "refcount is 0; exiting"; - pre_exit_hook(); - exit 0 - end - else - debug "refcount is %d; not exiting" refcount - end; - + begin + let (_: bool) = Backend_xml.flush_dirty dbconn in + let refcount = dec_and_read_db_flush_thread_refcount() in + (* last flushing thread close the door on the way out.. *) + if refcount = 0 then + begin + debug "refcount is 0; exiting"; + pre_exit_hook(); + exit 0 + end + else + debug "refcount is %d; not exiting" refcount + end; + let was_anything_flushed = Backend_xml.flush_dirty dbconn in - + (* exit if we've been told to by caller *) begin - match exit_spec with - None -> () - | (Some ret_code) -> pre_exit_hook(); exit ret_code + match exit_spec with + None -> () + | (Some ret_code) -> pre_exit_hook(); exit ret_code end; - was_anything_flushed + was_anything_flushed ) let flush dbconn db = - debug "About to flush database: %s" dbconn.Parse_db_conf.path; - Db_conn_store.with_db_conn_lock dbconn - (fun () -> - Backend_xml.flush dbconn db - ) + debug "About to flush database: %s" dbconn.Parse_db_conf.path; + Db_conn_store.with_db_conn_lock dbconn + (fun () -> + Backend_xml.flush dbconn db + ) diff --git a/ocaml/database/db_filter.ml b/ocaml/database/db_filter.ml index 9578f8b6d60..d4e1421b75b 100644 --- a/ocaml/database/db_filter.ml +++ b/ocaml/database/db_filter.ml @@ -22,7 +22,7 @@ let string_of_val = function | Field x -> "Field " ^ x | Literal x -> "Literal " ^ x -let rec string_of_expr = +let rec string_of_expr = let binexpr name a b = Printf.sprintf "%s (%s, %s)" name (string_of_expr a) (string_of_expr b) in let binval name a b = Printf.sprintf "%s (%s, %s)" name (string_of_val a) (string_of_val b) in function @@ -36,58 +36,58 @@ let rec string_of_expr = exception XML_unmarshall_error let val_of_xml xml = match (XMLRPC.From.array (fun x->x) xml) with - [a; s] -> - begin - match (XMLRPC.From.string a) with - "field" -> Field (XMLRPC.From.string s) - | "literal" -> Literal (XMLRPC.From.string s) - | _ -> raise XML_unmarshall_error - end - | _ -> raise XML_unmarshall_error + [a; s] -> + begin + match (XMLRPC.From.string a) with + "field" -> Field (XMLRPC.From.string s) + | "literal" -> Literal (XMLRPC.From.string s) + | _ -> raise XML_unmarshall_error + end + | _ -> raise XML_unmarshall_error let rec expr_of_xml xml = match (XMLRPC.From.array (fun x->x) xml) with - [x] -> - begin - match (XMLRPC.From.string x) with - "true" -> True - | "false" -> False - | _ -> raise XML_unmarshall_error - end - | [x;y] -> - begin - match (XMLRPC.From.string x) with - "not" -> Not (expr_of_xml y) - | _ -> raise XML_unmarshall_error - end - | [x;y;z] -> - begin - match (XMLRPC.From.string x) with - "and" -> And(expr_of_xml y, expr_of_xml z) - | "or" -> Or(expr_of_xml y, expr_of_xml z) - | "eq" -> Eq(val_of_xml y, val_of_xml z) - | _ -> raise XML_unmarshall_error - end - | _ -> raise XML_unmarshall_error + [x] -> + begin + match (XMLRPC.From.string x) with + "true" -> True + | "false" -> False + | _ -> raise XML_unmarshall_error + end + | [x;y] -> + begin + match (XMLRPC.From.string x) with + "not" -> Not (expr_of_xml y) + | _ -> raise XML_unmarshall_error + end + | [x;y;z] -> + begin + match (XMLRPC.From.string x) with + "and" -> And(expr_of_xml y, expr_of_xml z) + | "or" -> Or(expr_of_xml y, expr_of_xml z) + | "eq" -> Eq(val_of_xml y, val_of_xml z) + | _ -> raise XML_unmarshall_error + end + | _ -> raise XML_unmarshall_error let xml_of_val v = match v with - Field s -> XMLRPC.To.array [XMLRPC.To.string "field"; XMLRPC.To.string s] - | Literal s -> XMLRPC.To.array [XMLRPC.To.string "literal"; XMLRPC.To.string s] + Field s -> XMLRPC.To.array [XMLRPC.To.string "field"; XMLRPC.To.string s] + | Literal s -> XMLRPC.To.array [XMLRPC.To.string "literal"; XMLRPC.To.string s] let rec xml_of_expr e = match e with - True -> XMLRPC.To.array [XMLRPC.To.string "true"] - | False -> XMLRPC.To.array [XMLRPC.To.string "false"] - | Not e -> XMLRPC.To.array [XMLRPC.To.string "not";xml_of_expr e] - | And(a,b) -> XMLRPC.To.array [XMLRPC.To.string "and"; xml_of_expr a; xml_of_expr b] - | Or(a,b) -> XMLRPC.To.array [XMLRPC.To.string "or"; xml_of_expr a; xml_of_expr b] - | Eq(a,b) -> XMLRPC.To.array [XMLRPC.To.string "eq"; xml_of_val a; xml_of_val b] + True -> XMLRPC.To.array [XMLRPC.To.string "true"] + | False -> XMLRPC.To.array [XMLRPC.To.string "false"] + | Not e -> XMLRPC.To.array [XMLRPC.To.string "not";xml_of_expr e] + | And(a,b) -> XMLRPC.To.array [XMLRPC.To.string "and"; xml_of_expr a; xml_of_expr b] + | Or(a,b) -> XMLRPC.To.array [XMLRPC.To.string "or"; xml_of_expr a; xml_of_expr b] + | Eq(a,b) -> XMLRPC.To.array [XMLRPC.To.string "eq"; xml_of_val a; xml_of_val b] -(** Evaluate a predicate over a database row represented by a function +(** Evaluate a predicate over a database row represented by a function 'lookup_val' which knows how to return the contents of fields. *) -let eval_expr (lookup_val: _val -> string) = +let eval_expr (lookup_val: _val -> string) = let compare f _a _b = f (lookup_val _a) (lookup_val _b) in let rec f = function | True -> true @@ -101,9 +101,9 @@ let eval_expr (lookup_val: _val -> string) = exception Expression_error of (string * exn) (* A simple parser for the expression language: *) -let expr_of_string x = try - Db_filter_parse.exprstr Db_filter_lex.lexer - (Lexing.from_string x) +let expr_of_string x = try + Db_filter_parse.exprstr Db_filter_lex.lexer + (Lexing.from_string x) with e -> raise (Expression_error (x, e)) diff --git a/ocaml/database/db_filter_types.ml b/ocaml/database/db_filter_types.ml index 2413b29ca03..ebc78a49b2b 100644 --- a/ocaml/database/db_filter_types.ml +++ b/ocaml/database/db_filter_types.ml @@ -12,17 +12,17 @@ * GNU Lesser General Public License for more details. *) -type _val = - | Field of string - | Literal of string +type _val = + | Field of string + | Literal of string with rpc (** Represent a predicate: table row -> bool *) -type expr = - | True - | False - | Not of expr - | Eq of _val * _val - | And of expr * expr - | Or of expr * expr +type expr = + | True + | False + | Not of expr + | Eq of _val * _val + | And of expr * expr + | Or of expr * expr with rpc diff --git a/ocaml/database/db_interface.ml b/ocaml/database/db_interface.ml index dc70d3f036d..b990b7e0446 100644 --- a/ocaml/database/db_interface.ml +++ b/ocaml/database/db_interface.ml @@ -13,92 +13,92 @@ *) type response = - | String of string - | Bigbuf of Stdext.Bigbuffer.t + | String of string + | Bigbuf of Stdext.Bigbuffer.t (** A generic RPC interface *) -module type RPC = sig - - (** [initialise ()] should be called before [rpc] *) - val initialise : unit -> unit - - (** [rpc request] transmits [request] and receives a response *) - val rpc : string -> response +module type RPC = sig + + (** [initialise ()] should be called before [rpc] *) + val initialise : unit -> unit + + (** [rpc request] transmits [request] and receives a response *) + val rpc : string -> response end (** dictionary of regular fields x dictionary of associated set_ref values *) -type db_record = (string * string) list * (string * (string list)) list +type db_record = (string * string) list * (string * (string list)) list (** The client interface to the database *) module type DB_ACCESS = sig - (** [initialise ()] must be called before any other function in this - interface *) - val initialise : unit -> unit - - (** [get_table_from_ref ref] returns [Some tbl] if [ref] is a - valid reference; None otherwise *) - val get_table_from_ref : Db_ref.t -> string -> string option - - (** [is_valid_ref ref] returns true if [ref] is valid; false otherwise *) - val is_valid_ref : Db_ref.t -> string -> bool - - (** [read_refs tbl] returns a list of all references in table [tbl] *) - val read_refs : Db_ref.t -> string -> string list - - (** [find_refs_with_filter tbl expr] returns a list of all references - to rows which match [expr] *) - val find_refs_with_filter : - Db_ref.t -> string -> Db_filter_types.expr -> string list - - (** [read_field_where {tbl,return,where_field,where_value}] returns a - list of the [return] fields in table [tbl] where the [where_field] - equals [where_value] *) - val read_field_where : Db_ref.t -> Db_cache_types.where_record -> string list - - (** [db_get_by_uuid tbl uuid] returns the single object reference - associated with [uuid] *) - val db_get_by_uuid : Db_ref.t -> string -> string -> string - - (** [db_get_by_name_label tbl label] returns the list of object references - associated with [label] *) - val db_get_by_name_label : Db_ref.t -> string -> string -> string list - - (** [read_set_ref {tbl,return,where_field,where_value}] is identical - to [read_field_where ...]. *) - val read_set_ref : Db_ref.t -> Db_cache_types.where_record -> string list - - (** [create_row tbl kvpairs ref] create a new row in [tbl] with - key [ref] and contents [kvpairs] *) - val create_row : - Db_ref.t -> string -> (string * string) list -> string -> unit - - (** [delete_row context tbl ref] deletes row [ref] from table [tbl] *) - val delete_row : Db_ref.t -> string -> string -> unit - - (** [write_field context tbl ref fld val] changes field [fld] to [val] in - row [ref] in table [tbl] *) - val write_field : Db_ref.t -> string -> string -> string -> string -> unit - - (** [read_field context tbl ref fld] returns the value of field [fld] - in row [ref] in table [tbl] *) - val read_field : Db_ref.t -> string -> string -> string -> string - - (** [read_record tbl ref] returns - [ (field, value) ] * [ (set_ref fieldname * [ ref ]) ] *) - val read_record : Db_ref.t -> string -> string -> db_record - - (** [read_records_where tbl expr] returns a list of the values returned - by read_record that match the expression *) - val read_records_where : Db_ref.t -> string -> Db_filter_types.expr -> - (string * db_record) list - - (** [process_structured_field context kv tbl fld ref op] modifies the - value of field [fld] in row [ref] in table [tbl] according to [op] - which may be one of AddSet RemoveSet AddMap RemoveMap with - arguments [kv] *) - val process_structured_field : - Db_ref.t -> string * string -> - string -> string -> string -> Db_cache_types.structured_op_t -> unit + (** [initialise ()] must be called before any other function in this + interface *) + val initialise : unit -> unit + + (** [get_table_from_ref ref] returns [Some tbl] if [ref] is a + valid reference; None otherwise *) + val get_table_from_ref : Db_ref.t -> string -> string option + + (** [is_valid_ref ref] returns true if [ref] is valid; false otherwise *) + val is_valid_ref : Db_ref.t -> string -> bool + + (** [read_refs tbl] returns a list of all references in table [tbl] *) + val read_refs : Db_ref.t -> string -> string list + + (** [find_refs_with_filter tbl expr] returns a list of all references + to rows which match [expr] *) + val find_refs_with_filter : + Db_ref.t -> string -> Db_filter_types.expr -> string list + + (** [read_field_where {tbl,return,where_field,where_value}] returns a + list of the [return] fields in table [tbl] where the [where_field] + equals [where_value] *) + val read_field_where : Db_ref.t -> Db_cache_types.where_record -> string list + + (** [db_get_by_uuid tbl uuid] returns the single object reference + associated with [uuid] *) + val db_get_by_uuid : Db_ref.t -> string -> string -> string + + (** [db_get_by_name_label tbl label] returns the list of object references + associated with [label] *) + val db_get_by_name_label : Db_ref.t -> string -> string -> string list + + (** [read_set_ref {tbl,return,where_field,where_value}] is identical + to [read_field_where ...]. *) + val read_set_ref : Db_ref.t -> Db_cache_types.where_record -> string list + + (** [create_row tbl kvpairs ref] create a new row in [tbl] with + key [ref] and contents [kvpairs] *) + val create_row : + Db_ref.t -> string -> (string * string) list -> string -> unit + + (** [delete_row context tbl ref] deletes row [ref] from table [tbl] *) + val delete_row : Db_ref.t -> string -> string -> unit + + (** [write_field context tbl ref fld val] changes field [fld] to [val] in + row [ref] in table [tbl] *) + val write_field : Db_ref.t -> string -> string -> string -> string -> unit + + (** [read_field context tbl ref fld] returns the value of field [fld] + in row [ref] in table [tbl] *) + val read_field : Db_ref.t -> string -> string -> string -> string + + (** [read_record tbl ref] returns + [ (field, value) ] * [ (set_ref fieldname * [ ref ]) ] *) + val read_record : Db_ref.t -> string -> string -> db_record + + (** [read_records_where tbl expr] returns a list of the values returned + by read_record that match the expression *) + val read_records_where : Db_ref.t -> string -> Db_filter_types.expr -> + (string * db_record) list + + (** [process_structured_field context kv tbl fld ref op] modifies the + value of field [fld] in row [ref] in table [tbl] according to [op] + which may be one of AddSet RemoveSet AddMap RemoveMap with + arguments [kv] *) + val process_structured_field : + Db_ref.t -> string * string -> + string -> string -> string -> Db_cache_types.structured_op_t -> unit end diff --git a/ocaml/database/db_lock.ml b/ocaml/database/db_lock.ml index cd2d41cfa5c..c4af4243097 100644 --- a/ocaml/database/db_lock.ml +++ b/ocaml/database/db_lock.ml @@ -13,15 +13,15 @@ *) (* Lock shared between client/slave implementations *) - module D = Debug.Make(struct let name = "db_lock" end) - open D +module D = Debug.Make(struct let name = "db_lock" end) +open D - open Stdext.Threadext - open Stdext.Pervasiveext +open Stdext.Threadext +open Stdext.Pervasiveext (* Withlock takes dbcache_mutex, and ref-counts to allow the same thread to re-enter without blocking as many times as it wants. *) -let dbcache_mutex = Mutex.create() +let dbcache_mutex = Mutex.create() let time = ref 0.0 let n = ref 0 let maxtime = ref (neg_infinity) @@ -46,28 +46,28 @@ let with_lock f = allow_thread_through_dbcache_mutex := Some me; thread_reenter_count := 1; finally - f - (fun () -> - thread_reenter_count := !thread_reenter_count -1; - if !thread_reenter_count = 0 then - begin - allow_thread_through_dbcache_mutex := None; - Mutex.unlock dbcache_mutex - end - ) + f + (fun () -> + thread_reenter_count := !thread_reenter_count -1; + if !thread_reenter_count = 0 then + begin + allow_thread_through_dbcache_mutex := None; + Mutex.unlock dbcache_mutex + end + ) in - match !allow_thread_through_dbcache_mutex with - | None -> do_with_lock() - | (Some id) -> - if id=me then - begin - thread_reenter_count := !thread_reenter_count + 1; - finally - f - (fun () -> thread_reenter_count := !thread_reenter_count - 1) - end - else - do_with_lock() + match !allow_thread_through_dbcache_mutex with + | None -> do_with_lock() + | (Some id) -> + if id=me then + begin + thread_reenter_count := !thread_reenter_count + 1; + finally + f + (fun () -> thread_reenter_count := !thread_reenter_count - 1) + end + else + do_with_lock() end (* Global flush lock: all db flushes are performed holding this lock *) diff --git a/ocaml/database/db_ref.ml b/ocaml/database/db_ref.ml index ab7861e3327..f8905693b8e 100644 --- a/ocaml/database/db_ref.ml +++ b/ocaml/database/db_ref.ml @@ -12,21 +12,21 @@ * GNU Lesser General Public License for more details. *) -type t = - | In_memory of Db_cache_types.Database.t ref ref - | Remote +type t = + | In_memory of Db_cache_types.Database.t ref ref + | Remote exception Database_not_in_memory let in_memory (rf: Db_cache_types.Database.t ref ref) = In_memory rf let get_database = function - | In_memory x -> !(!(x)) - | Remote -> raise Database_not_in_memory + | In_memory x -> !(!(x)) + | Remote -> raise Database_not_in_memory let update_database t f = match t with - | In_memory x -> - let d : Db_cache_types.Database.t = f (get_database t) in - (!(x)) := d - | Remote -> raise Database_not_in_memory + | In_memory x -> + let d : Db_cache_types.Database.t = f (get_database t) in + (!(x)) := d + | Remote -> raise Database_not_in_memory diff --git a/ocaml/database/db_remote_cache_access_v1.ml b/ocaml/database/db_remote_cache_access_v1.ml index bf26d57ad34..6d9d9817c38 100644 --- a/ocaml/database/db_remote_cache_access_v1.ml +++ b/ocaml/database/db_remote_cache_access_v1.ml @@ -1,132 +1,132 @@ open Stdext.Threadext - + module DBCacheRemoteListener = struct - open Db_rpc_common_v1 - open Db_action_helper - open Db_cache - open Db_exn - - exception DBCacheListenerInvalidMessageReceived - exception DBCacheListenerUnknownMessageName of string - - module D = Debug.Make(struct let name = "db_server" end) - open D - - let ctr_mutex = Mutex.create() - let calls_processed = ref 0 - let total_recv_len = ref 0 - let total_transmit_len = ref 0 - - (* Performance counters for debugging *) - let update_lengths msg resp = - Mutex.lock ctr_mutex; - total_transmit_len := (!total_transmit_len) + (String.length (Xml.to_string_fmt resp)); - total_recv_len := (!total_recv_len) + (String.length (Xml.to_string_fmt msg)); - Mutex.unlock ctr_mutex - - let success xml = - let resp = - XMLRPC.To.array - [XMLRPC.To.string "success"; - xml] in - (* update_lengths xml resp; *) - (* let s = Xml.to_string_fmt resp in *) - (* debug "Resp [Len = %d]: %s" (String.length s) s; *) - resp - - let failure exn_name xml = - let resp = - XMLRPC.To.array - [XMLRPC.To.string "failure"; - XMLRPC.To.array - [XMLRPC.To.string exn_name; - xml]] in - (* update_lengths xml resp; *) - resp - - module DBCache : Db_interface.DB_ACCESS = Db_cache_impl - - (** Unmarshals the request, calls the DBCache function and marshals the result. - Note that, although the messages still contain the pool_secret for historical reasons, - access has already been applied by the RBAC code in Xapi_http.add_handler. *) - let process_xmlrpc xml = - Mutex.execute ctr_mutex - (fun () -> calls_processed := !calls_processed + 1); + open Db_rpc_common_v1 + open Db_action_helper + open Db_cache + open Db_exn + + exception DBCacheListenerInvalidMessageReceived + exception DBCacheListenerUnknownMessageName of string + + module D = Debug.Make(struct let name = "db_server" end) + open D + + let ctr_mutex = Mutex.create() + let calls_processed = ref 0 + let total_recv_len = ref 0 + let total_transmit_len = ref 0 + + (* Performance counters for debugging *) + let update_lengths msg resp = + Mutex.lock ctr_mutex; + total_transmit_len := (!total_transmit_len) + (String.length (Xml.to_string_fmt resp)); + total_recv_len := (!total_recv_len) + (String.length (Xml.to_string_fmt msg)); + Mutex.unlock ctr_mutex + + let success xml = + let resp = + XMLRPC.To.array + [XMLRPC.To.string "success"; + xml] in + (* update_lengths xml resp; *) + (* let s = Xml.to_string_fmt resp in *) + (* debug "Resp [Len = %d]: %s" (String.length s) s; *) + resp + + let failure exn_name xml = + let resp = + XMLRPC.To.array + [XMLRPC.To.string "failure"; + XMLRPC.To.array + [XMLRPC.To.string exn_name; + xml]] in + (* update_lengths xml resp; *) + resp + + module DBCache : Db_interface.DB_ACCESS = Db_cache_impl + + (** Unmarshals the request, calls the DBCache function and marshals the result. + Note that, although the messages still contain the pool_secret for historical reasons, + access has already been applied by the RBAC code in Xapi_http.add_handler. *) + let process_xmlrpc xml = + Mutex.execute ctr_mutex + (fun () -> calls_processed := !calls_processed + 1); - let fn_name, args = - match (XMLRPC.From.array (fun x->x) xml) with - [fn_name; _; args] -> - XMLRPC.From.string fn_name, args - | _ -> raise DBCacheListenerInvalidMessageReceived in - let t = Db_backend.make () in - try - match fn_name with - "get_table_from_ref" -> - let s = unmarshall_get_table_from_ref_args args in - success (marshall_get_table_from_ref_response (DBCache.get_table_from_ref t s)) - | "is_valid_ref" -> - let s = unmarshall_is_valid_ref_args args in - success (marshall_is_valid_ref_response (DBCache.is_valid_ref t s)) - | "read_refs" -> - let s = unmarshall_read_refs_args args in - success (marshall_read_refs_response (DBCache.read_refs t s)) - | "read_field_where" -> - let w = unmarshall_read_field_where_args args in - success (marshall_read_field_where_response (DBCache.read_field_where t w)) - | "read_set_ref" -> - let w = unmarshall_read_set_ref_args args in - success (marshall_read_set_ref_response (DBCache.read_field_where t w)) - | "create_row" -> - let (s1,ssl,s2) = unmarshall_create_row_args args in - success (marshall_create_row_response (DBCache.create_row t s1 ssl s2)) - | "delete_row" -> - let (s1,s2) = unmarshall_delete_row_args args in - success (marshall_delete_row_response (DBCache.delete_row t s1 s2)) - | "write_field" -> - let (s1,s2,s3,s4) = unmarshall_write_field_args args in - success (marshall_write_field_response (DBCache.write_field t s1 s2 s3 s4)) - | "read_field" -> - let (s1,s2,s3) = unmarshall_read_field_args args in - success (marshall_read_field_response (DBCache.read_field t s1 s2 s3)) - | "find_refs_with_filter" -> - let (s,e) = unmarshall_find_refs_with_filter_args args in - success (marshall_find_refs_with_filter_response (DBCache.find_refs_with_filter t s e)) - | "process_structured_field" -> - let (ss,s1,s2,s3,op) = unmarshall_process_structured_field_args args in - success (marshall_process_structured_field_response (DBCache.process_structured_field t ss s1 s2 s3 op)) - | "read_record" -> - let (s1,s2) = unmarshall_read_record_args args in - success (marshall_read_record_response (DBCache.read_record t s1 s2)) - | "read_records_where" -> - let (s,e) = unmarshall_read_records_where_args args in - success (marshall_read_records_where_response (DBCache.read_records_where t s e)) - | "db_get_by_uuid" -> - let (s,e) = unmarshall_db_get_by_uuid_args args in - success (marshall_db_get_by_uuid_response (DBCache.db_get_by_uuid t s e)) - | "db_get_by_name_label" -> - let (s,e) = unmarshall_db_get_by_name_label_args args in - success (marshall_db_get_by_name_label_response (DBCache.db_get_by_name_label t s e)) - | _ -> raise (DBCacheListenerUnknownMessageName fn_name) - with - Duplicate_key (c,f,u,k) -> - failure "duplicate_key_of" (marshall_4strings (c,f,u,k)) - | DBCache_NotFound (s1,s2,s3) -> - failure "dbcache_notfound" (marshall_3strings (s1,s2,s3)) - | Uniqueness_constraint_violation (s1,s2,s3) -> - failure "uniqueness_constraint_violation" (marshall_3strings (s1,s2,s3)) - | Read_missing_uuid (s1,s2,s3) -> - failure "read_missing_uuid" (marshall_3strings (s1,s2,s3)) - | Too_many_values (s1,s2,s3) -> - failure "too_many_values" (marshall_3strings (s1,s2,s3)) - | e -> raise e + let fn_name, args = + match (XMLRPC.From.array (fun x->x) xml) with + [fn_name; _; args] -> + XMLRPC.From.string fn_name, args + | _ -> raise DBCacheListenerInvalidMessageReceived in + let t = Db_backend.make () in + try + match fn_name with + "get_table_from_ref" -> + let s = unmarshall_get_table_from_ref_args args in + success (marshall_get_table_from_ref_response (DBCache.get_table_from_ref t s)) + | "is_valid_ref" -> + let s = unmarshall_is_valid_ref_args args in + success (marshall_is_valid_ref_response (DBCache.is_valid_ref t s)) + | "read_refs" -> + let s = unmarshall_read_refs_args args in + success (marshall_read_refs_response (DBCache.read_refs t s)) + | "read_field_where" -> + let w = unmarshall_read_field_where_args args in + success (marshall_read_field_where_response (DBCache.read_field_where t w)) + | "read_set_ref" -> + let w = unmarshall_read_set_ref_args args in + success (marshall_read_set_ref_response (DBCache.read_field_where t w)) + | "create_row" -> + let (s1,ssl,s2) = unmarshall_create_row_args args in + success (marshall_create_row_response (DBCache.create_row t s1 ssl s2)) + | "delete_row" -> + let (s1,s2) = unmarshall_delete_row_args args in + success (marshall_delete_row_response (DBCache.delete_row t s1 s2)) + | "write_field" -> + let (s1,s2,s3,s4) = unmarshall_write_field_args args in + success (marshall_write_field_response (DBCache.write_field t s1 s2 s3 s4)) + | "read_field" -> + let (s1,s2,s3) = unmarshall_read_field_args args in + success (marshall_read_field_response (DBCache.read_field t s1 s2 s3)) + | "find_refs_with_filter" -> + let (s,e) = unmarshall_find_refs_with_filter_args args in + success (marshall_find_refs_with_filter_response (DBCache.find_refs_with_filter t s e)) + | "process_structured_field" -> + let (ss,s1,s2,s3,op) = unmarshall_process_structured_field_args args in + success (marshall_process_structured_field_response (DBCache.process_structured_field t ss s1 s2 s3 op)) + | "read_record" -> + let (s1,s2) = unmarshall_read_record_args args in + success (marshall_read_record_response (DBCache.read_record t s1 s2)) + | "read_records_where" -> + let (s,e) = unmarshall_read_records_where_args args in + success (marshall_read_records_where_response (DBCache.read_records_where t s e)) + | "db_get_by_uuid" -> + let (s,e) = unmarshall_db_get_by_uuid_args args in + success (marshall_db_get_by_uuid_response (DBCache.db_get_by_uuid t s e)) + | "db_get_by_name_label" -> + let (s,e) = unmarshall_db_get_by_name_label_args args in + success (marshall_db_get_by_name_label_response (DBCache.db_get_by_name_label t s e)) + | _ -> raise (DBCacheListenerUnknownMessageName fn_name) + with + Duplicate_key (c,f,u,k) -> + failure "duplicate_key_of" (marshall_4strings (c,f,u,k)) + | DBCache_NotFound (s1,s2,s3) -> + failure "dbcache_notfound" (marshall_3strings (s1,s2,s3)) + | Uniqueness_constraint_violation (s1,s2,s3) -> + failure "uniqueness_constraint_violation" (marshall_3strings (s1,s2,s3)) + | Read_missing_uuid (s1,s2,s3) -> + failure "read_missing_uuid" (marshall_3strings (s1,s2,s3)) + | Too_many_values (s1,s2,s3) -> + failure "too_many_values" (marshall_3strings (s1,s2,s3)) + | e -> raise e end let handler req bio _ = - let fd = Buf_io.fd_of bio in (* fd only used for writing *) - let body = Http_svr.read_body ~limit:Xapi_globs.http_limit_max_rpc_size req bio in - let body_xml = Xml.parse_string body in - let reply_xml = DBCacheRemoteListener.process_xmlrpc body_xml in - let response = Xml.to_bigbuffer reply_xml in - Http_svr.response_fct req fd (Stdext.Bigbuffer.length response) - (fun fd -> Stdext.Bigbuffer.to_fct response (fun s -> ignore(Unix.write fd s 0 (String.length s)))) + let fd = Buf_io.fd_of bio in (* fd only used for writing *) + let body = Http_svr.read_body ~limit:Xapi_globs.http_limit_max_rpc_size req bio in + let body_xml = Xml.parse_string body in + let reply_xml = DBCacheRemoteListener.process_xmlrpc body_xml in + let response = Xml.to_bigbuffer reply_xml in + Http_svr.response_fct req fd (Stdext.Bigbuffer.length response) + (fun fd -> Stdext.Bigbuffer.to_fct response (fun s -> ignore(Unix.write fd s 0 (String.length s)))) diff --git a/ocaml/database/db_remote_cache_access_v2.ml b/ocaml/database/db_remote_cache_access_v2.ml index 37e30749d93..836b59d0a1d 100644 --- a/ocaml/database/db_remote_cache_access_v2.ml +++ b/ocaml/database/db_remote_cache_access_v2.ml @@ -18,63 +18,63 @@ open Db_rpc_common_v2 open Db_exn (** Convert a marshalled Request Rpc.t into a marshalled Response Rpc.t *) -let process_rpc (req: Rpc.t) = - let module DB = (Db_cache_impl : Db_interface.DB_ACCESS) in - let t = Db_backend.make () in - Response.rpc_of_t - (try - match Request.t_of_rpc req with - | Request.Get_table_from_ref x -> - Response.Get_table_from_ref (DB.get_table_from_ref t x) - | Request.Is_valid_ref x -> - Response.Is_valid_ref (DB.is_valid_ref t x) - | Request.Read_refs x -> - Response.Read_refs (DB.read_refs t x) - | Request.Find_refs_with_filter (x, e) -> - Response.Find_refs_with_filter (DB.find_refs_with_filter t x e) - | Request.Read_field_where w -> - Response.Read_field_where (DB.read_field_where t w) - | Request.Db_get_by_uuid (a, b) -> - Response.Db_get_by_uuid (DB.db_get_by_uuid t a b) - | Request.Db_get_by_name_label (a, b) -> - Response.Db_get_by_name_label (DB.db_get_by_name_label t a b) - | Request.Read_set_ref w -> - Response.Read_set_ref (DB.read_set_ref t w) - | Request.Create_row (a, b, c) -> - Response.Create_row (DB.create_row t a b c) - | Request.Delete_row (a, b) -> - Response.Delete_row (DB.delete_row t a b) - | Request.Write_field (a, b, c, d) -> - Response.Write_field (DB.write_field t a b c d) - | Request.Read_field (a, b, c) -> - Response.Read_field (DB.read_field t a b c) - | Request.Read_record (a, b) -> - let a', b' = DB.read_record t a b in - Response.Read_record (a', b') - | Request.Read_records_where (a, b) -> - Response.Read_records_where (DB.read_records_where t a b) - | Request.Process_structured_field (a, b, c, d, e) -> - Response.Process_structured_field (DB.process_structured_field t a b c d e) - with - | DBCache_NotFound (x,y,z) -> - Response.Dbcache_notfound (x, y, z) - | Duplicate_key (w,x,y,z) -> - Response.Duplicate_key_of (w, x, y, z) - | Uniqueness_constraint_violation (x,y,z) -> - Response.Uniqueness_constraint_violation (x, y, z) - | Read_missing_uuid (x,y,z) -> - Response.Read_missing_uuid (x, y, z) - | Too_many_values (x,y,z) -> - Response.Too_many_values (x, y, z) +let process_rpc (req: Rpc.t) = + let module DB = (Db_cache_impl : Db_interface.DB_ACCESS) in + let t = Db_backend.make () in + Response.rpc_of_t + (try + match Request.t_of_rpc req with + | Request.Get_table_from_ref x -> + Response.Get_table_from_ref (DB.get_table_from_ref t x) + | Request.Is_valid_ref x -> + Response.Is_valid_ref (DB.is_valid_ref t x) + | Request.Read_refs x -> + Response.Read_refs (DB.read_refs t x) + | Request.Find_refs_with_filter (x, e) -> + Response.Find_refs_with_filter (DB.find_refs_with_filter t x e) + | Request.Read_field_where w -> + Response.Read_field_where (DB.read_field_where t w) + | Request.Db_get_by_uuid (a, b) -> + Response.Db_get_by_uuid (DB.db_get_by_uuid t a b) + | Request.Db_get_by_name_label (a, b) -> + Response.Db_get_by_name_label (DB.db_get_by_name_label t a b) + | Request.Read_set_ref w -> + Response.Read_set_ref (DB.read_set_ref t w) + | Request.Create_row (a, b, c) -> + Response.Create_row (DB.create_row t a b c) + | Request.Delete_row (a, b) -> + Response.Delete_row (DB.delete_row t a b) + | Request.Write_field (a, b, c, d) -> + Response.Write_field (DB.write_field t a b c d) + | Request.Read_field (a, b, c) -> + Response.Read_field (DB.read_field t a b c) + | Request.Read_record (a, b) -> + let a', b' = DB.read_record t a b in + Response.Read_record (a', b') + | Request.Read_records_where (a, b) -> + Response.Read_records_where (DB.read_records_where t a b) + | Request.Process_structured_field (a, b, c, d, e) -> + Response.Process_structured_field (DB.process_structured_field t a b c d e) + with + | DBCache_NotFound (x,y,z) -> + Response.Dbcache_notfound (x, y, z) + | Duplicate_key (w,x,y,z) -> + Response.Duplicate_key_of (w, x, y, z) + | Uniqueness_constraint_violation (x,y,z) -> + Response.Uniqueness_constraint_violation (x, y, z) + | Read_missing_uuid (x,y,z) -> + Response.Read_missing_uuid (x, y, z) + | Too_many_values (x,y,z) -> + Response.Too_many_values (x, y, z) + + ) - ) - let handler req bio _ = - let fd = Buf_io.fd_of bio in (* fd only used for writing *) - let body = Http_svr.read_body ~limit:Xapi_globs.http_limit_max_rpc_size req bio in - let request_rpc = Jsonrpc.of_string body in - let reply_rpc = process_rpc request_rpc in - (* XXX: need to cope with > 16MiB responses *) - let response = Jsonrpc.to_string reply_rpc in - Http_svr.response_str req fd response + let fd = Buf_io.fd_of bio in (* fd only used for writing *) + let body = Http_svr.read_body ~limit:Xapi_globs.http_limit_max_rpc_size req bio in + let request_rpc = Jsonrpc.of_string body in + let reply_rpc = process_rpc request_rpc in + (* XXX: need to cope with > 16MiB responses *) + let response = Jsonrpc.to_string reply_rpc in + Http_svr.response_str req fd response diff --git a/ocaml/database/db_rpc_client_v1.ml b/ocaml/database/db_rpc_client_v1.ml index 4da61a3cd5d..6a28119d1e9 100644 --- a/ocaml/database/db_rpc_client_v1.ml +++ b/ocaml/database/db_rpc_client_v1.ml @@ -16,157 +16,157 @@ open Db_rpc_common_v1 open Db_exn module Make = functor(RPC: Db_interface.RPC) -> struct - exception Remote_db_server_returned_unknown_exception - - (* Process an exception returned from server, throwing local exception *) - let process_exception_xml xml = - match XMLRPC.From.array (fun x->x) xml with - [exn_name_xml; exn_params_xml] -> - let exn_name = XMLRPC.From.string exn_name_xml in - begin - match exn_name with - | "dbcache_notfound" -> - let (x,y,z) = unmarshall_3strings exn_params_xml in - raise (DBCache_NotFound (x,y,z)) - | "duplicate_key_of" -> - let (w,x,y,z) = unmarshall_4strings exn_params_xml in - raise (Duplicate_key (w,x,y,z)) - | "uniqueness_constraint_violation" -> - let (x,y,z) = unmarshall_3strings exn_params_xml in - raise (Uniqueness_constraint_violation (x,y,z)) - | "read_missing_uuid" -> - let (x,y,z) = unmarshall_3strings exn_params_xml in - raise (Read_missing_uuid (x,y,z)) - | "too_many_values" -> - let (x,y,z) = unmarshall_3strings exn_params_xml in - raise (Too_many_values (x,y,z)) - | _ -> raise DB_remote_marshall_error - end - | _ -> raise Remote_db_server_returned_unknown_exception - - - exception Remote_db_server_returned_bad_message - let do_remote_call marshall_args unmarshall_resp fn_name args = - let xml = marshall_args args in - let xml = XMLRPC.To.array [XMLRPC.To.string fn_name; XMLRPC.To.string "" (* unused *); xml] in - let resp = match RPC.rpc (Xml.to_string xml) with - | Db_interface.String s -> Xml.parse_string s - | Db_interface.Bigbuf b -> Xml.parse_bigbuffer b - in - match XMLRPC.From.array (fun x->x) resp with - [status_xml; resp_xml] -> - let status = XMLRPC.From.string status_xml in - if status="success" then unmarshall_resp resp_xml - else process_exception_xml resp_xml - | _ -> raise Remote_db_server_returned_bad_message - - let get_table_from_ref _ x = - do_remote_call - marshall_get_table_from_ref_args - unmarshall_get_table_from_ref_response - "get_table_from_ref" - x - - let is_valid_ref _ x = - do_remote_call - marshall_is_valid_ref_args - unmarshall_is_valid_ref_response - "is_valid_ref" - x - - let read_refs _ x = - do_remote_call - marshall_read_refs_args - unmarshall_read_refs_response - "read_refs" - x - - let read_field_where _ x = - do_remote_call - marshall_read_field_where_args - unmarshall_read_field_where_response - "read_field_where" - x - - - let db_get_by_uuid _ t u = - do_remote_call - marshall_db_get_by_uuid_args - unmarshall_db_get_by_uuid_response - "db_get_by_uuid" - (t,u) - - let db_get_by_name_label _ t l = - do_remote_call - marshall_db_get_by_name_label_args - unmarshall_db_get_by_name_label_response - "db_get_by_name_label" - (t,l) - - let read_set_ref _ x = - do_remote_call - marshall_read_set_ref_args - unmarshall_read_set_ref_response - "read_set_ref" - x - - - let create_row _ x y z = - do_remote_call - marshall_create_row_args - unmarshall_create_row_response - "create_row" - (x,y,z) - - let delete_row _ x y = - do_remote_call - marshall_delete_row_args - unmarshall_delete_row_response - "delete_row" - (x,y) - - let write_field _ a b c d = - do_remote_call - marshall_write_field_args - unmarshall_write_field_response - "write_field" - (a,b,c,d) - - let read_field _ x y z = - do_remote_call - marshall_read_field_args - unmarshall_read_field_response - "read_field" - (x,y,z) - - let find_refs_with_filter _ s e = - do_remote_call - marshall_find_refs_with_filter_args - unmarshall_find_refs_with_filter_response - "find_refs_with_filter" - (s,e) - - let read_record _ x y = - do_remote_call - marshall_read_record_args - unmarshall_read_record_response - "read_record" - (x,y) - - let read_records_where _ x e = - do_remote_call - marshall_read_records_where_args - unmarshall_read_records_where_response - "read_records_where" - (x,e) - - let process_structured_field _ a b c d e = - do_remote_call - marshall_process_structured_field_args - unmarshall_process_structured_field_response - "process_structured_field" - (a,b,c,d,e) - - let initialise = RPC.initialise + exception Remote_db_server_returned_unknown_exception + + (* Process an exception returned from server, throwing local exception *) + let process_exception_xml xml = + match XMLRPC.From.array (fun x->x) xml with + [exn_name_xml; exn_params_xml] -> + let exn_name = XMLRPC.From.string exn_name_xml in + begin + match exn_name with + | "dbcache_notfound" -> + let (x,y,z) = unmarshall_3strings exn_params_xml in + raise (DBCache_NotFound (x,y,z)) + | "duplicate_key_of" -> + let (w,x,y,z) = unmarshall_4strings exn_params_xml in + raise (Duplicate_key (w,x,y,z)) + | "uniqueness_constraint_violation" -> + let (x,y,z) = unmarshall_3strings exn_params_xml in + raise (Uniqueness_constraint_violation (x,y,z)) + | "read_missing_uuid" -> + let (x,y,z) = unmarshall_3strings exn_params_xml in + raise (Read_missing_uuid (x,y,z)) + | "too_many_values" -> + let (x,y,z) = unmarshall_3strings exn_params_xml in + raise (Too_many_values (x,y,z)) + | _ -> raise DB_remote_marshall_error + end + | _ -> raise Remote_db_server_returned_unknown_exception + + + exception Remote_db_server_returned_bad_message + let do_remote_call marshall_args unmarshall_resp fn_name args = + let xml = marshall_args args in + let xml = XMLRPC.To.array [XMLRPC.To.string fn_name; XMLRPC.To.string "" (* unused *); xml] in + let resp = match RPC.rpc (Xml.to_string xml) with + | Db_interface.String s -> Xml.parse_string s + | Db_interface.Bigbuf b -> Xml.parse_bigbuffer b + in + match XMLRPC.From.array (fun x->x) resp with + [status_xml; resp_xml] -> + let status = XMLRPC.From.string status_xml in + if status="success" then unmarshall_resp resp_xml + else process_exception_xml resp_xml + | _ -> raise Remote_db_server_returned_bad_message + + let get_table_from_ref _ x = + do_remote_call + marshall_get_table_from_ref_args + unmarshall_get_table_from_ref_response + "get_table_from_ref" + x + + let is_valid_ref _ x = + do_remote_call + marshall_is_valid_ref_args + unmarshall_is_valid_ref_response + "is_valid_ref" + x + + let read_refs _ x = + do_remote_call + marshall_read_refs_args + unmarshall_read_refs_response + "read_refs" + x + + let read_field_where _ x = + do_remote_call + marshall_read_field_where_args + unmarshall_read_field_where_response + "read_field_where" + x + + + let db_get_by_uuid _ t u = + do_remote_call + marshall_db_get_by_uuid_args + unmarshall_db_get_by_uuid_response + "db_get_by_uuid" + (t,u) + + let db_get_by_name_label _ t l = + do_remote_call + marshall_db_get_by_name_label_args + unmarshall_db_get_by_name_label_response + "db_get_by_name_label" + (t,l) + + let read_set_ref _ x = + do_remote_call + marshall_read_set_ref_args + unmarshall_read_set_ref_response + "read_set_ref" + x + + + let create_row _ x y z = + do_remote_call + marshall_create_row_args + unmarshall_create_row_response + "create_row" + (x,y,z) + + let delete_row _ x y = + do_remote_call + marshall_delete_row_args + unmarshall_delete_row_response + "delete_row" + (x,y) + + let write_field _ a b c d = + do_remote_call + marshall_write_field_args + unmarshall_write_field_response + "write_field" + (a,b,c,d) + + let read_field _ x y z = + do_remote_call + marshall_read_field_args + unmarshall_read_field_response + "read_field" + (x,y,z) + + let find_refs_with_filter _ s e = + do_remote_call + marshall_find_refs_with_filter_args + unmarshall_find_refs_with_filter_response + "find_refs_with_filter" + (s,e) + + let read_record _ x y = + do_remote_call + marshall_read_record_args + unmarshall_read_record_response + "read_record" + (x,y) + + let read_records_where _ x e = + do_remote_call + marshall_read_records_where_args + unmarshall_read_records_where_response + "read_records_where" + (x,e) + + let process_structured_field _ a b c d e = + do_remote_call + marshall_process_structured_field_args + unmarshall_process_structured_field_response + "process_structured_field" + (a,b,c,d,e) + + let initialise = RPC.initialise end diff --git a/ocaml/database/db_rpc_client_v2.ml b/ocaml/database/db_rpc_client_v2.ml index d332a9968d3..3dc5dc16e89 100644 --- a/ocaml/database/db_rpc_client_v2.ml +++ b/ocaml/database/db_rpc_client_v2.ml @@ -18,99 +18,99 @@ open Db_rpc_common_v2 open Db_exn module Make = functor(RPC: Db_interface.RPC) -> struct - let initialise = RPC.initialise - let rpc x = - match RPC.rpc (Jsonrpc.to_string x) with - | Db_interface.String s -> Jsonrpc.of_string s - | Db_interface.Bigbuf b -> raise (Failure "Response too large - cannot convert bigbuffer to json!") - - let process (x: Request.t) = - let y : Response.t = Response.t_of_rpc (rpc (Request.rpc_of_t x)) in - match y with - | Response.Dbcache_notfound (x, y, z) -> - raise (DBCache_NotFound (x,y,z)) - | Response.Duplicate_key_of (w, x, y, z) -> - raise (Duplicate_key (w,x,y,z)) - | Response.Uniqueness_constraint_violation (x, y, z) -> - raise (Uniqueness_constraint_violation (x,y,z)) - | Response.Read_missing_uuid (x, y, z) -> - raise (Read_missing_uuid (x,y,z)) - | Response.Too_many_values (x, y, z) -> - raise (Too_many_values (x,y,z)) - | y -> y - - let get_table_from_ref _ x = - match process (Request.Get_table_from_ref x) with - | Response.Get_table_from_ref y -> y - | _ -> raise Remote_db_server_returned_bad_message - - let is_valid_ref _ x = - match process (Request.Is_valid_ref x) with - | Response.Is_valid_ref y -> y - | _ -> raise Remote_db_server_returned_bad_message - - let read_refs _ x = - match process (Request.Read_refs x) with - | Response.Read_refs y -> y - | _ -> raise Remote_db_server_returned_bad_message - - let read_field_where _ x = - match process (Request.Read_field_where x) with - | Response.Read_field_where y -> y - | _ -> raise Remote_db_server_returned_bad_message - - let db_get_by_uuid _ t u = - match process (Request.Db_get_by_uuid (t, u)) with - | Response.Db_get_by_uuid y -> y - | _ -> raise Remote_db_server_returned_bad_message - - let db_get_by_name_label _ t l = - match process (Request.Db_get_by_name_label (t, l)) with - | Response.Db_get_by_name_label y -> y - | _ -> raise Remote_db_server_returned_bad_message - - let read_set_ref _ x = - match process (Request.Read_set_ref x) with - | Response.Read_set_ref y -> y - | _ -> raise Remote_db_server_returned_bad_message - - let create_row _ x y z = - match process (Request.Create_row (x, y, z)) with - | Response.Create_row y -> y - | _ -> raise Remote_db_server_returned_bad_message - - let delete_row _ x y = - match process (Request.Delete_row (x, y)) with - | Response.Delete_row y -> y - | _ -> raise Remote_db_server_returned_bad_message - - let write_field _ a b c d = - match process (Request.Write_field (a, b, c, d)) with - | Response.Write_field y -> y - | _ -> raise Remote_db_server_returned_bad_message - - let read_field _ x y z = - match process (Request.Read_field (x, y, z)) with - | Response.Read_field y -> y - | _ -> raise Remote_db_server_returned_bad_message - - let find_refs_with_filter _ s e = - match process (Request.Find_refs_with_filter (s, e)) with - | Response.Find_refs_with_filter y -> y - | _ -> raise Remote_db_server_returned_bad_message - - let read_record _ x y = - match process (Request.Read_record (x, y)) with - | Response.Read_record (x, y) -> x, y - | _ -> raise Remote_db_server_returned_bad_message - - let read_records_where _ x e = - match process (Request.Read_records_where (x, e)) with - | Response.Read_records_where y -> y - | _ -> raise Remote_db_server_returned_bad_message - - let process_structured_field _ a b c d e = - match process (Request.Process_structured_field(a, b, c, d, e)) with - | Response.Process_structured_field y -> y - | _ -> raise Remote_db_server_returned_bad_message + let initialise = RPC.initialise + let rpc x = + match RPC.rpc (Jsonrpc.to_string x) with + | Db_interface.String s -> Jsonrpc.of_string s + | Db_interface.Bigbuf b -> raise (Failure "Response too large - cannot convert bigbuffer to json!") + + let process (x: Request.t) = + let y : Response.t = Response.t_of_rpc (rpc (Request.rpc_of_t x)) in + match y with + | Response.Dbcache_notfound (x, y, z) -> + raise (DBCache_NotFound (x,y,z)) + | Response.Duplicate_key_of (w, x, y, z) -> + raise (Duplicate_key (w,x,y,z)) + | Response.Uniqueness_constraint_violation (x, y, z) -> + raise (Uniqueness_constraint_violation (x,y,z)) + | Response.Read_missing_uuid (x, y, z) -> + raise (Read_missing_uuid (x,y,z)) + | Response.Too_many_values (x, y, z) -> + raise (Too_many_values (x,y,z)) + | y -> y + + let get_table_from_ref _ x = + match process (Request.Get_table_from_ref x) with + | Response.Get_table_from_ref y -> y + | _ -> raise Remote_db_server_returned_bad_message + + let is_valid_ref _ x = + match process (Request.Is_valid_ref x) with + | Response.Is_valid_ref y -> y + | _ -> raise Remote_db_server_returned_bad_message + + let read_refs _ x = + match process (Request.Read_refs x) with + | Response.Read_refs y -> y + | _ -> raise Remote_db_server_returned_bad_message + + let read_field_where _ x = + match process (Request.Read_field_where x) with + | Response.Read_field_where y -> y + | _ -> raise Remote_db_server_returned_bad_message + + let db_get_by_uuid _ t u = + match process (Request.Db_get_by_uuid (t, u)) with + | Response.Db_get_by_uuid y -> y + | _ -> raise Remote_db_server_returned_bad_message + + let db_get_by_name_label _ t l = + match process (Request.Db_get_by_name_label (t, l)) with + | Response.Db_get_by_name_label y -> y + | _ -> raise Remote_db_server_returned_bad_message + + let read_set_ref _ x = + match process (Request.Read_set_ref x) with + | Response.Read_set_ref y -> y + | _ -> raise Remote_db_server_returned_bad_message + + let create_row _ x y z = + match process (Request.Create_row (x, y, z)) with + | Response.Create_row y -> y + | _ -> raise Remote_db_server_returned_bad_message + + let delete_row _ x y = + match process (Request.Delete_row (x, y)) with + | Response.Delete_row y -> y + | _ -> raise Remote_db_server_returned_bad_message + + let write_field _ a b c d = + match process (Request.Write_field (a, b, c, d)) with + | Response.Write_field y -> y + | _ -> raise Remote_db_server_returned_bad_message + + let read_field _ x y z = + match process (Request.Read_field (x, y, z)) with + | Response.Read_field y -> y + | _ -> raise Remote_db_server_returned_bad_message + + let find_refs_with_filter _ s e = + match process (Request.Find_refs_with_filter (s, e)) with + | Response.Find_refs_with_filter y -> y + | _ -> raise Remote_db_server_returned_bad_message + + let read_record _ x y = + match process (Request.Read_record (x, y)) with + | Response.Read_record (x, y) -> x, y + | _ -> raise Remote_db_server_returned_bad_message + + let read_records_where _ x e = + match process (Request.Read_records_where (x, e)) with + | Response.Read_records_where y -> y + | _ -> raise Remote_db_server_returned_bad_message + + let process_structured_field _ a b c d e = + match process (Request.Process_structured_field(a, b, c, d, e)) with + | Response.Process_structured_field y -> y + | _ -> raise Remote_db_server_returned_bad_message end diff --git a/ocaml/database/db_rpc_common_v1.ml b/ocaml/database/db_rpc_common_v1.ml index 0a9cb98e062..ce649775009 100644 --- a/ocaml/database/db_rpc_common_v1.ml +++ b/ocaml/database/db_rpc_common_v1.ml @@ -15,300 +15,300 @@ (** Marshall/unmarshall functions for relevant types to XMLRPC *) open Db_cache_types - + exception DB_remote_marshall_error - + let marshall_2strings (x,y) = - XMLRPC.To.array [XMLRPC.To.string x; XMLRPC.To.string y] + XMLRPC.To.array [XMLRPC.To.string x; XMLRPC.To.string y] let unmarshall_2strings xml = - match (XMLRPC.From.array (fun x->x) xml) with - [x1;x2] -> - (XMLRPC.From.string x1, - XMLRPC.From.string x2) - | _ -> raise DB_remote_marshall_error - + match (XMLRPC.From.array (fun x->x) xml) with + [x1;x2] -> + (XMLRPC.From.string x1, + XMLRPC.From.string x2) + | _ -> raise DB_remote_marshall_error + let marshall_4strings (x,y,w,z) = - XMLRPC.To.array [XMLRPC.To.string x; XMLRPC.To.string y; XMLRPC.To.string w; XMLRPC.To.string z] + XMLRPC.To.array [XMLRPC.To.string x; XMLRPC.To.string y; XMLRPC.To.string w; XMLRPC.To.string z] let unmarshall_4strings xml = - match (XMLRPC.From.array (fun x->x) xml) with - [x1;x2;x3;x4] -> - (XMLRPC.From.string x1, - XMLRPC.From.string x2, - XMLRPC.From.string x3, - XMLRPC.From.string x4) - | _ -> raise DB_remote_marshall_error - + match (XMLRPC.From.array (fun x->x) xml) with + [x1;x2;x3;x4] -> + (XMLRPC.From.string x1, + XMLRPC.From.string x2, + XMLRPC.From.string x3, + XMLRPC.From.string x4) + | _ -> raise DB_remote_marshall_error + let marshall_3strings (x,y,w) = - XMLRPC.To.array [XMLRPC.To.string x; XMLRPC.To.string y; XMLRPC.To.string w] + XMLRPC.To.array [XMLRPC.To.string x; XMLRPC.To.string y; XMLRPC.To.string w] let unmarshall_3strings xml = - match (XMLRPC.From.array (fun x->x) xml) with - [x1;x2;x3] -> - (XMLRPC.From.string x1, - XMLRPC.From.string x2, - XMLRPC.From.string x3) - | _ -> raise DB_remote_marshall_error - + match (XMLRPC.From.array (fun x->x) xml) with + [x1;x2;x3] -> + (XMLRPC.From.string x1, + XMLRPC.From.string x2, + XMLRPC.From.string x3) + | _ -> raise DB_remote_marshall_error + let marshall_stringlist sl = - XMLRPC.To.array (List.map XMLRPC.To.string sl) + XMLRPC.To.array (List.map XMLRPC.To.string sl) let unmarshall_stringlist xml = - List.map XMLRPC.From.string (XMLRPC.From.array (fun x->x) xml) - + List.map XMLRPC.From.string (XMLRPC.From.array (fun x->x) xml) + let marshall_stringstringlist ssl = - XMLRPC.To.array (List.map marshall_2strings ssl) + XMLRPC.To.array (List.map marshall_2strings ssl) let unmarshall_stringstringlist xml = - List.map unmarshall_2strings (XMLRPC.From.array (fun x->x) xml) - + List.map unmarshall_2strings (XMLRPC.From.array (fun x->x) xml) + let marshall_stringopt x = - match x with - None -> XMLRPC.To.array [] - | (Some x) -> XMLRPC.To.array [XMLRPC.To.string x] + match x with + None -> XMLRPC.To.array [] + | (Some x) -> XMLRPC.To.array [XMLRPC.To.string x] let unmarshall_stringopt xml = - match (XMLRPC.From.array (fun x->x) xml) with - [] -> None - | [xml] -> Some (XMLRPC.From.string xml) - | _ -> raise DB_remote_marshall_error - + match (XMLRPC.From.array (fun x->x) xml) with + [] -> None + | [xml] -> Some (XMLRPC.From.string xml) + | _ -> raise DB_remote_marshall_error + let marshall_expr x = - Db_filter.xml_of_expr x + Db_filter.xml_of_expr x let unmarshall_expr xml = - Db_filter.expr_of_xml xml - + Db_filter.expr_of_xml xml + let marshall_structured_op x = - let str = - match x with - AddSet -> "addset" - | RemoveSet -> "removeset" - | AddMap -> "addmap" - | RemoveMap -> "removemap" in - XMLRPC.To.string str + let str = + match x with + AddSet -> "addset" + | RemoveSet -> "removeset" + | AddMap -> "addmap" + | RemoveMap -> "removemap" in + XMLRPC.To.string str let unmarshall_structured_op xml = - match (XMLRPC.From.string xml) with - "addset" -> AddSet - | "removeset" -> RemoveSet - | "addmap" -> AddMap - | "removemap" -> RemoveMap - | _ -> raise DB_remote_marshall_error - + match (XMLRPC.From.string xml) with + "addset" -> AddSet + | "removeset" -> RemoveSet + | "addmap" -> AddMap + | "removemap" -> RemoveMap + | _ -> raise DB_remote_marshall_error + let marshall_where_rec r = - XMLRPC.To.array [XMLRPC.To.string r.table; - XMLRPC.To.string r.return; - XMLRPC.To.string r.where_field; - XMLRPC.To.string r.where_value] + XMLRPC.To.array [XMLRPC.To.string r.table; + XMLRPC.To.string r.return; + XMLRPC.To.string r.where_field; + XMLRPC.To.string r.where_value] let unmarshall_where_rec xml = - match (XMLRPC.From.array (fun x->x) xml) with - [t;r;wf;wv] -> - {table=XMLRPC.From.string t; return=XMLRPC.From.string r; - where_field=XMLRPC.From.string wf; where_value=XMLRPC.From.string wv} - | _ -> raise DB_remote_marshall_error - + match (XMLRPC.From.array (fun x->x) xml) with + [t;r;wf;wv] -> + {table=XMLRPC.From.string t; return=XMLRPC.From.string r; + where_field=XMLRPC.From.string wf; where_value=XMLRPC.From.string wv} + | _ -> raise DB_remote_marshall_error + let marshall_unit () = - XMLRPC.To.string "" + XMLRPC.To.string "" let unmarshall_unit xml = - match XMLRPC.From.string xml with - "" -> () - | _ -> raise DB_remote_marshall_error - + match XMLRPC.From.string xml with + "" -> () + | _ -> raise DB_remote_marshall_error + (* get_table_from_ref *) let marshall_get_table_from_ref_args s = XMLRPC.To.string s let unmarshall_get_table_from_ref_args xml = XMLRPC.From.string xml let marshall_get_table_from_ref_response so = marshall_stringopt so let unmarshall_get_table_from_ref_response so = unmarshall_stringopt so - + (* is_valid_ref *) let marshall_is_valid_ref_args s = XMLRPC.To.string s let unmarshall_is_valid_ref_args xml = XMLRPC.From.string xml let marshall_is_valid_ref_response b = XMLRPC.To.boolean b let unmarshall_is_valid_ref_response xml = XMLRPC.From.boolean xml - + (* read_refs *) let marshall_read_refs_args s = XMLRPC.To.string s let unmarshall_read_refs_args s = XMLRPC.From.string s let marshall_read_refs_response sl = marshall_stringlist sl let unmarshall_read_refs_response xml = unmarshall_stringlist xml - + (* read_field_where *) let marshall_read_field_where_args w = marshall_where_rec w let unmarshall_read_field_where_args xml = unmarshall_where_rec xml let marshall_read_field_where_response sl = - marshall_stringlist sl + marshall_stringlist sl let unmarshall_read_field_where_response xml = - unmarshall_stringlist xml - + unmarshall_stringlist xml + (* db_get_by_uuid *) let marshall_db_get_by_uuid_args (s1,s2) = - marshall_2strings (s1,s2) + marshall_2strings (s1,s2) let unmarshall_db_get_by_uuid_args xml = - unmarshall_2strings xml + unmarshall_2strings xml let marshall_db_get_by_uuid_response s = - XMLRPC.To.string s + XMLRPC.To.string s let unmarshall_db_get_by_uuid_response xml = - XMLRPC.From.string xml - + XMLRPC.From.string xml + (* db_get_by_name_label *) let marshall_db_get_by_name_label_args (s1,s2) = - marshall_2strings (s1,s2) + marshall_2strings (s1,s2) let unmarshall_db_get_by_name_label_args xml = - unmarshall_2strings xml + unmarshall_2strings xml let marshall_db_get_by_name_label_response sl = - marshall_stringlist sl + marshall_stringlist sl let unmarshall_db_get_by_name_label_response xml = - unmarshall_stringlist xml - + unmarshall_stringlist xml + (* read_set_ref *) let marshall_read_set_ref_args w = marshall_where_rec w let unmarshall_read_set_ref_args xml = unmarshall_where_rec xml let marshall_read_set_ref_response sl = - marshall_stringlist sl + marshall_stringlist sl let unmarshall_read_set_ref_response xml = - unmarshall_stringlist xml - - + unmarshall_stringlist xml + + (* create_row *) let marshall_create_row_args (s1,ssl,s2) = - XMLRPC.To.array - [XMLRPC.To.string s1; - XMLRPC.To.array (List.map marshall_2strings ssl); - XMLRPC.To.string s2] + XMLRPC.To.array + [XMLRPC.To.string s1; + XMLRPC.To.array (List.map marshall_2strings ssl); + XMLRPC.To.string s2] let unmarshall_create_row_args xml = - match (XMLRPC.From.array (fun x->x) xml) with - [s1_xml; ssl_xml; s2_xml] -> - (XMLRPC.From.string s1_xml, - List.map unmarshall_2strings (XMLRPC.From.array (fun x->x) ssl_xml), - XMLRPC.From.string s2_xml) - | _ -> raise DB_remote_marshall_error + match (XMLRPC.From.array (fun x->x) xml) with + [s1_xml; ssl_xml; s2_xml] -> + (XMLRPC.From.string s1_xml, + List.map unmarshall_2strings (XMLRPC.From.array (fun x->x) ssl_xml), + XMLRPC.From.string s2_xml) + | _ -> raise DB_remote_marshall_error let marshall_create_row_response () = - marshall_unit () + marshall_unit () let unmarshall_create_row_response xml = - unmarshall_unit xml - + unmarshall_unit xml + (* delete_row *) let marshall_delete_row_args (s1,s2) = - XMLRPC.To.array - [XMLRPC.To.string s1; - XMLRPC.To.string s2] + XMLRPC.To.array + [XMLRPC.To.string s1; + XMLRPC.To.string s2] let unmarshall_delete_row_args xml = - match (XMLRPC.From.array (fun x->x) xml) with - [s1_xml; s2_xml] -> - (XMLRPC.From.string s1_xml, XMLRPC.From.string s2_xml) - | _ -> raise DB_remote_marshall_error + match (XMLRPC.From.array (fun x->x) xml) with + [s1_xml; s2_xml] -> + (XMLRPC.From.string s1_xml, XMLRPC.From.string s2_xml) + | _ -> raise DB_remote_marshall_error let marshall_delete_row_response () = - marshall_unit () + marshall_unit () let unmarshall_delete_row_response xml = - unmarshall_unit xml - + unmarshall_unit xml + (* write_field *) let marshall_write_field_args (s1,s2,s3,s4) = - XMLRPC.To.array - (List.map XMLRPC.To.string [s1;s2;s3;s4]) + XMLRPC.To.array + (List.map XMLRPC.To.string [s1;s2;s3;s4]) let unmarshall_write_field_args xml = - match (XMLRPC.From.array (fun x->x) xml) with - [s1x;s2x;s3x;s4x] -> - (XMLRPC.From.string s1x, XMLRPC.From.string s2x, - XMLRPC.From.string s3x, XMLRPC.From.string s4x) - | _ -> raise DB_remote_marshall_error + match (XMLRPC.From.array (fun x->x) xml) with + [s1x;s2x;s3x;s4x] -> + (XMLRPC.From.string s1x, XMLRPC.From.string s2x, + XMLRPC.From.string s3x, XMLRPC.From.string s4x) + | _ -> raise DB_remote_marshall_error let marshall_write_field_response () = - marshall_unit () + marshall_unit () let unmarshall_write_field_response xml = - unmarshall_unit xml - + unmarshall_unit xml + (* read_field *) let marshall_read_field_args (s1,s2,s3) = - XMLRPC.To.array - (List.map XMLRPC.To.string [s1;s2;s3]) + XMLRPC.To.array + (List.map XMLRPC.To.string [s1;s2;s3]) let unmarshall_read_field_args xml = - match (XMLRPC.From.array (fun x->x) xml) with - [s1x;s2x;s3x] -> - (XMLRPC.From.string s1x, XMLRPC.From.string s2x, - XMLRPC.From.string s3x) - | _ -> raise DB_remote_marshall_error + match (XMLRPC.From.array (fun x->x) xml) with + [s1x;s2x;s3x] -> + (XMLRPC.From.string s1x, XMLRPC.From.string s2x, + XMLRPC.From.string s3x) + | _ -> raise DB_remote_marshall_error let marshall_read_field_response s = - XMLRPC.To.string s + XMLRPC.To.string s let unmarshall_read_field_response xml = - XMLRPC.From.string xml - + XMLRPC.From.string xml + (* find_refs_with_filter *) let marshall_find_refs_with_filter_args (s,e) = - XMLRPC.To.array - [XMLRPC.To.string s; marshall_expr e] + XMLRPC.To.array + [XMLRPC.To.string s; marshall_expr e] let unmarshall_find_refs_with_filter_args xml = - match (XMLRPC.From.array (fun x->x) xml) with - [s;e] -> (XMLRPC.From.string s, unmarshall_expr e) - | _ -> raise DB_remote_marshall_error + match (XMLRPC.From.array (fun x->x) xml) with + [s;e] -> (XMLRPC.From.string s, unmarshall_expr e) + | _ -> raise DB_remote_marshall_error let marshall_find_refs_with_filter_response sl = - marshall_stringlist sl + marshall_stringlist sl let unmarshall_find_refs_with_filter_response xml = - unmarshall_stringlist xml - + unmarshall_stringlist xml + (* process_structured_field *) let marshall_process_structured_field_args (ss,s1,s2,s3,op) = - XMLRPC.To.array - [marshall_2strings ss; - XMLRPC.To.string s1; - XMLRPC.To.string s2; - XMLRPC.To.string s3; - marshall_structured_op op] + XMLRPC.To.array + [marshall_2strings ss; + XMLRPC.To.string s1; + XMLRPC.To.string s2; + XMLRPC.To.string s3; + marshall_structured_op op] let unmarshall_process_structured_field_args xml = - match (XMLRPC.From.array (fun x->x) xml) with - [ss_xml;s1_xml;s2_xml;s3_xml;op_xml] -> - (unmarshall_2strings ss_xml, - XMLRPC.From.string s1_xml, - XMLRPC.From.string s2_xml, - XMLRPC.From.string s3_xml, - unmarshall_structured_op op_xml) - | _ -> raise DB_remote_marshall_error + match (XMLRPC.From.array (fun x->x) xml) with + [ss_xml;s1_xml;s2_xml;s3_xml;op_xml] -> + (unmarshall_2strings ss_xml, + XMLRPC.From.string s1_xml, + XMLRPC.From.string s2_xml, + XMLRPC.From.string s3_xml, + unmarshall_structured_op op_xml) + | _ -> raise DB_remote_marshall_error let marshall_process_structured_field_response () = - marshall_unit () + marshall_unit () let unmarshall_process_structured_field_response xml = - unmarshall_unit xml - + unmarshall_unit xml + (* read_record *) let marshall_read_record_args = marshall_2strings let unmarshall_read_record_args = unmarshall_2strings let marshall_read_record_response (ssl, ssll) = - XMLRPC.To.array - [XMLRPC.To.array (List.map marshall_2strings ssl); - XMLRPC.To.array - (List.map - (fun (s,sl) -> - XMLRPC.To.array [XMLRPC.To.string s; - XMLRPC.To.array (List.map XMLRPC.To.string sl)]) ssll)] + XMLRPC.To.array + [XMLRPC.To.array (List.map marshall_2strings ssl); + XMLRPC.To.array + (List.map + (fun (s,sl) -> + XMLRPC.To.array [XMLRPC.To.string s; + XMLRPC.To.array (List.map XMLRPC.To.string sl)]) ssll)] let unmarshall_read_record_response xml = - match (XMLRPC.From.array (fun x->x) xml) with - [ssl_xml; ssll_xml] -> - (List.map unmarshall_2strings (XMLRPC.From.array (fun x->x) ssl_xml), - List.map - (fun xml -> - match XMLRPC.From.array (fun x->x) xml with - [s_xml; sl_xml] -> (XMLRPC.From.string s_xml, unmarshall_stringlist sl_xml) - | _ -> raise DB_remote_marshall_error) - (XMLRPC.From.array (fun x->x) ssll_xml)) - | _ -> raise DB_remote_marshall_error - + match (XMLRPC.From.array (fun x->x) xml) with + [ssl_xml; ssll_xml] -> + (List.map unmarshall_2strings (XMLRPC.From.array (fun x->x) ssl_xml), + List.map + (fun xml -> + match XMLRPC.From.array (fun x->x) xml with + [s_xml; sl_xml] -> (XMLRPC.From.string s_xml, unmarshall_stringlist sl_xml) + | _ -> raise DB_remote_marshall_error) + (XMLRPC.From.array (fun x->x) ssll_xml)) + | _ -> raise DB_remote_marshall_error + (* read_records_where *) let marshall_read_records_where_args (s,e) = - XMLRPC.To.array - [XMLRPC.To.string s; marshall_expr e] + XMLRPC.To.array + [XMLRPC.To.string s; marshall_expr e] let unmarshall_read_records_where_args xml = - match (XMLRPC.From.array (fun x->x) xml) with - [s_xml; expr_xml] -> - (XMLRPC.From.string s_xml, - unmarshall_expr expr_xml) - | _ -> raise DB_remote_marshall_error - + match (XMLRPC.From.array (fun x->x) xml) with + [s_xml; expr_xml] -> + (XMLRPC.From.string s_xml, + unmarshall_expr expr_xml) + | _ -> raise DB_remote_marshall_error + let marshall_read_records_where_response refs_and_recs_list = - XMLRPC.To.array - (List.map - (fun (ref,record)-> - XMLRPC.To.array - [XMLRPC.To.string ref; - marshall_read_record_response record]) refs_and_recs_list) + XMLRPC.To.array + (List.map + (fun (ref,record)-> + XMLRPC.To.array + [XMLRPC.To.string ref; + marshall_read_record_response record]) refs_and_recs_list) let unmarshall_read_records_where_response xml = - match (XMLRPC.From.array (fun x->x) xml) with - xml_refs_and_recs_list -> - List.map - (fun xml_ref_and_rec -> - match (XMLRPC.From.array (fun x->x) xml_ref_and_rec) with - [ref_xml; rec_xml] -> (XMLRPC.From.string ref_xml, unmarshall_read_record_response rec_xml) - | _ -> raise DB_remote_marshall_error) - xml_refs_and_recs_list - + match (XMLRPC.From.array (fun x->x) xml) with + xml_refs_and_recs_list -> + List.map + (fun xml_ref_and_rec -> + match (XMLRPC.From.array (fun x->x) xml_ref_and_rec) with + [ref_xml; rec_xml] -> (XMLRPC.From.string ref_xml, unmarshall_read_record_response rec_xml) + | _ -> raise DB_remote_marshall_error) + xml_refs_and_recs_list + diff --git a/ocaml/database/db_rpc_common_v2.ml b/ocaml/database/db_rpc_common_v2.ml index 35c8c811682..b9b426a7ba1 100644 --- a/ocaml/database/db_rpc_common_v2.ml +++ b/ocaml/database/db_rpc_common_v2.ml @@ -15,52 +15,52 @@ (** Marshall/unmarshall functions and types for db remote access protocol v2 *) module Request = struct - - (** All possible request messages *) - type t = - | Get_table_from_ref of string - | Is_valid_ref of string - | Read_refs of string - | Find_refs_with_filter of string * Db_filter_types.expr - | Read_field_where of Db_cache_types.where_record - | Db_get_by_uuid of string * string - | Db_get_by_name_label of string * string - | Read_set_ref of Db_cache_types.where_record - | Create_row of string * (string * string) list * string - | Delete_row of string * string - | Write_field of string * string * string * string - | Read_field of string * string * string - | Read_record of string * string - | Read_records_where of string * Db_filter_types.expr - | Process_structured_field of (string * string) * string * string * string * Db_cache_types.structured_op_t - with rpc + + (** All possible request messages *) + type t = + | Get_table_from_ref of string + | Is_valid_ref of string + | Read_refs of string + | Find_refs_with_filter of string * Db_filter_types.expr + | Read_field_where of Db_cache_types.where_record + | Db_get_by_uuid of string * string + | Db_get_by_name_label of string * string + | Read_set_ref of Db_cache_types.where_record + | Create_row of string * (string * string) list * string + | Delete_row of string * string + | Write_field of string * string * string * string + | Read_field of string * string * string + | Read_record of string * string + | Read_records_where of string * Db_filter_types.expr + | Process_structured_field of (string * string) * string * string * string * Db_cache_types.structured_op_t + with rpc end module Response = struct - (** All possible response messages *) - type t = - | Get_table_from_ref of string option - | Is_valid_ref of bool - | Read_refs of string list - | Find_refs_with_filter of string list - | Read_field_where of string list - | Db_get_by_uuid of string - | Db_get_by_name_label of string list - | Read_set_ref of string list - | Create_row of unit - | Delete_row of unit - | Write_field of unit - | Read_field of string - | Read_record of (string * string) list * (string * string list) list - | Read_records_where of (string * ((string * string) list * (string * string list) list )) list - | Process_structured_field of unit + (** All possible response messages *) + type t = + | Get_table_from_ref of string option + | Is_valid_ref of bool + | Read_refs of string list + | Find_refs_with_filter of string list + | Read_field_where of string list + | Db_get_by_uuid of string + | Db_get_by_name_label of string list + | Read_set_ref of string list + | Create_row of unit + | Delete_row of unit + | Write_field of unit + | Read_field of string + | Read_record of (string * string) list * (string * string list) list + | Read_records_where of (string * ((string * string) list * (string * string list) list )) list + | Process_structured_field of unit - | Dbcache_notfound of string * string * string - | Duplicate_key_of of string * string * string * string - | Uniqueness_constraint_violation of string * string * string - | Read_missing_uuid of string * string * string - | Too_many_values of string * string * string - with rpc + | Dbcache_notfound of string * string * string + | Duplicate_key_of of string * string * string * string + | Uniqueness_constraint_violation of string * string * string + | Read_missing_uuid of string * string * string + | Too_many_values of string * string * string + with rpc end diff --git a/ocaml/database/db_upgrade.ml b/ocaml/database/db_upgrade.ml index 1d4fe65e366..7e859e703e3 100644 --- a/ocaml/database/db_upgrade.ml +++ b/ocaml/database/db_upgrade.ml @@ -24,22 +24,22 @@ let generic_database_upgrade db = let created_table_names = Stdext.Listext.List.set_difference schema_table_names existing_table_names in let g = Manifest.generation (Database.manifest db) in let db = Database.update - (fun ts -> - List.fold_left (fun ts tblname -> - debug "Adding new database table: '%s'" tblname; - TableSet.add g tblname Table.empty ts) ts created_table_names) db in - + (fun ts -> + List.fold_left (fun ts tblname -> + debug "Adding new database table: '%s'" tblname; + TableSet.add g tblname Table.empty ts) ts created_table_names) db in + (* for each table, go through and fill in missing default values *) let open Stdext.Fun in List.fold_left - (fun db tblname -> - let tbl = TableSet.find tblname (Database.tableset db) in - let schema = Schema.table tblname (Database.schema db) in - let add_fields_to_row objref _ r tbl : Table.t = - let row = Row.add_defaults g schema r in - Table.add g objref row tbl in - let tbl = Table.fold add_fields_to_row tbl Table.empty in - let g = Manifest.generation (Database.manifest db) in - ((Database.update ++ (TableSet.update g tblname Table.empty)) (fun _ -> tbl)) db - ) db schema_table_names + (fun db tblname -> + let tbl = TableSet.find tblname (Database.tableset db) in + let schema = Schema.table tblname (Database.schema db) in + let add_fields_to_row objref _ r tbl : Table.t = + let row = Row.add_defaults g schema r in + Table.add g objref row tbl in + let tbl = Table.fold add_fields_to_row tbl Table.empty in + let g = Manifest.generation (Database.manifest db) in + ((Database.update ++ (TableSet.update g tblname Table.empty)) (fun _ -> tbl)) db + ) db schema_table_names diff --git a/ocaml/database/db_xml.ml b/ocaml/database/db_xml.ml index 27c76e13c98..e348d379568 100644 --- a/ocaml/database/db_xml.ml +++ b/ocaml/database/db_xml.ml @@ -31,9 +31,9 @@ let _generation_count = "generation_count" module To = struct (* Write out a key/value pair *) - let pair (output: Xmlm.output) (key: string) (v: string) = + let pair (output: Xmlm.output) (key: string) (v: string) = Xmlm.output output (`El_start (make_tag "pair" [ "key", key; "value", v ])); - Xmlm.output output `El_end + Xmlm.output output `El_end (* Write out a string *) let string (output: Xmlm.output) (key: string) (x: string) = pair output key x (* Write out an int *) @@ -42,14 +42,14 @@ module To = struct let int64 (output: Xmlm.output) (key: string) (x: Int64.t) = pair output key (Int64.to_string x) (* Marshal a whole database table to an Xmlm output abstraction *) - let table schema (output: Xmlm.output) name (tbl: Table.t) = - let record rf { Stat.created; modified } (row: Row.t) _ = - let preamble = - if persist_generation_counts - then [("__mtime",Generation.to_string modified); ("__ctime",Generation.to_string created); ("ref",rf)] - else [("ref",rf)] - in - let (tag: Xmlm.tag) = make_tag "row" (List.rev (Row.fold (fun k _ v acc -> (k, Xml_spaces.protect (Schema.Value.marshal v)) :: acc) row preamble)) in + let table schema (output: Xmlm.output) name (tbl: Table.t) = + let record rf { Stat.created; modified } (row: Row.t) _ = + let preamble = + if persist_generation_counts + then [("__mtime",Generation.to_string modified); ("__ctime",Generation.to_string created); ("ref",rf)] + else [("ref",rf)] + in + let (tag: Xmlm.tag) = make_tag "row" (List.rev (Row.fold (fun k _ v acc -> (k, Xml_spaces.protect (Schema.Value.marshal v)) :: acc) row preamble)) in Xmlm.output output (`El_start tag); Xmlm.output output `El_end in let tag = make_tag "table" [ "name", name ] in @@ -57,33 +57,33 @@ module To = struct (* we write a table entry whether or not the table persists, because populate happens to assume that all tables will be present. However, if the table is marked as "don't persist" then we don't write any row entries: *) - if Schema.is_table_persistent schema name - then Table.fold record tbl (); + if Schema.is_table_persistent schema name + then Table.fold record tbl (); Xmlm.output output `El_end - + (* Write out a manifest *) - let manifest (output: Xmlm.output) (manifest: Manifest.t) : unit = - Xmlm.output output (`El_start (make_tag "manifest" [])); - let major, minor = Manifest.schema manifest in - int output _schema_major_vsn major; - int output _schema_minor_vsn minor; - int64 output _generation_count (Manifest.generation manifest); - Xmlm.output output `El_end + let manifest (output: Xmlm.output) (manifest: Manifest.t) : unit = + Xmlm.output output (`El_start (make_tag "manifest" [])); + let major, minor = Manifest.schema manifest in + int output _schema_major_vsn major; + int output _schema_minor_vsn minor; + int64 output _generation_count (Manifest.generation manifest); + Xmlm.output output `El_end (* Write out a full database *) let database (output: Xmlm.output) db : unit = - Xmlm.output output (`Dtd None); + Xmlm.output output (`Dtd None); Xmlm.output output (`El_start (make_tag "database" [])); manifest output (Database.manifest db); TableSet.iter (table (Database.schema db) output) (Database.tableset db); Xmlm.output output `El_end - let fd (fd: Unix.file_descr) db : unit = + let fd (fd: Unix.file_descr) db : unit = let oc = Unix.out_channel_of_descr fd in database (Xmlm.make_output (`Channel oc)) db; flush oc - let file (filename: string) db : unit = + let file (filename: string) db : unit = let fdescr = Unix.openfile filename [ Unix.O_WRONLY; Unix.O_CREAT; Unix.O_TRUNC ] 0o600 in Stdext.Pervasiveext.finally (fun () -> fd fdescr db) @@ -92,99 +92,99 @@ end module From = struct - let database schema (input: Xmlm.input) = - let tags = Stack.create () in - let maybe_return f accu = - if Xmlm.eoi input then begin - if Stack.is_empty tags then - accu - else - raise (Unmarshall_error "Unexpected end of file") - end else - f accu in - let schema_vsn_of_manifest manifest = - let major_vsn = int_of_string (List.assoc _schema_major_vsn manifest) in - let minor_vsn = int_of_string (List.assoc _schema_minor_vsn manifest) in - (major_vsn, minor_vsn) in - let rec f ((tableset, table, tblname, manifest) as acc) = match Xmlm.input input with - (* On reading a start tag... *) - | `El_start (tag: Xmlm.tag) -> - Stack.push tag tags; - begin match tag with - | (_, ("database" | "manifest")), _ -> f acc - | (_, "table"), [ (_, "name"), tblname ] -> - f (tableset, Table.empty, tblname, manifest) - | (_, "row"), ((_, "ref"), rf) :: rest -> - (* Remove any other duplicate "ref"s which might have sneaked in there *) - let rest = List.filter (fun ((_,k), _) -> k <> "ref") rest in - let (ctime_l,rest) = List.partition (fun ((_, k), _) -> k="__ctime") rest in - let (mtime_l,rest) = List.partition (fun ((_, k), _) -> k="__mtime") rest in - let ctime = match ctime_l with | [(_,ctime_s)] -> Int64.of_string ctime_s | _ -> 0L in - let mtime = match mtime_l with | [(_,mtime_s)] -> Int64.of_string mtime_s | _ -> 0L in - let row = List.fold_left (fun row ((_, k), v) -> - let table_schema = Schema.Database.find tblname schema.Schema.database in - try - let column_schema = Schema.Table.find k table_schema in - let value = Schema.Value.unmarshal column_schema.Schema.Column.ty (Xml_spaces.unprotect v) in - let empty = column_schema.Schema.Column.empty in - Row.update mtime k empty (fun _ -> value) (Row.add ctime k value row) - with Not_found -> - (* This means there's an unexpected field, so we should normally fail. However, fields - * present in Tech Preview releases are permitted to disappear on upgrade, so suppress - * such errors on such upgrades. *) - let exc = Unmarshall_error (Printf.sprintf "Unexpected column in table %s: %s" tblname k) in - let (this_maj, this_min) = try - schema_vsn_of_manifest manifest - with Not_found -> - (* Probably the database didn't have a at the start. So at this point - * we don't know the schema version of the database we're loading. *) - D.error "Unmarshalling removed column %s from table %s but don't know schema version because manifest not yet read" k tblname; - raise exc - in - if List.mem (this_maj, this_min) Datamodel.tech_preview_releases then ( - (* Suppress error for fields that only temporarily existed in the datamodel *) - D.warn "Upgrading from Tech Preview schema %d.%d so removing deleted field %s from table %s" this_maj this_min k tblname; - row - ) else - (* For any genuinely unexpected fields, fail *) - raise exc - ) Row.empty rest in - f (tableset, (Table.update mtime rf Row.empty (fun _ -> row) (Table.add ctime rf row table)), tblname, manifest) - | (_, "pair"), [ (_, "key"), k; (_, "value"), v ] -> - f (tableset, table, tblname, (k, v) :: manifest) - | (_, name), _ -> - raise (Unmarshall_error (Printf.sprintf "Unexpected tag: %s" name)) - end - (* On reading an end tag... *) - | `El_end -> - let tag = Stack.pop tags in - begin match tag with - | (_, ("database" | "manifest" | "row" | "pair")), _ -> maybe_return f acc - | (_, "table"), [ (_, "name"), name ] -> - maybe_return f (TableSet.add 0L name table tableset, Table.empty, "", manifest) - | (_, name), _ -> - raise (Unmarshall_error (Printf.sprintf "Unexpected tag: %s" name)) - end - | _ -> f acc - in - let (ts, _, _, manifest) = f (TableSet.empty, Table.empty, "", []) in - let g = Int64.of_string (List.assoc _generation_count manifest) in - let (major_vsn, minor_vsn) = schema_vsn_of_manifest manifest in - let manifest = Manifest.make major_vsn minor_vsn g in - let open Stdext.Fun in - ((Database.update_manifest (fun _ -> manifest)) - ++ (Database.update_tableset (fun _ -> ts))) - (Database.make schema) + let database schema (input: Xmlm.input) = + let tags = Stack.create () in + let maybe_return f accu = + if Xmlm.eoi input then begin + if Stack.is_empty tags then + accu + else + raise (Unmarshall_error "Unexpected end of file") + end else + f accu in + let schema_vsn_of_manifest manifest = + let major_vsn = int_of_string (List.assoc _schema_major_vsn manifest) in + let minor_vsn = int_of_string (List.assoc _schema_minor_vsn manifest) in + (major_vsn, minor_vsn) in + let rec f ((tableset, table, tblname, manifest) as acc) = match Xmlm.input input with + (* On reading a start tag... *) + | `El_start (tag: Xmlm.tag) -> + Stack.push tag tags; + begin match tag with + | (_, ("database" | "manifest")), _ -> f acc + | (_, "table"), [ (_, "name"), tblname ] -> + f (tableset, Table.empty, tblname, manifest) + | (_, "row"), ((_, "ref"), rf) :: rest -> + (* Remove any other duplicate "ref"s which might have sneaked in there *) + let rest = List.filter (fun ((_,k), _) -> k <> "ref") rest in + let (ctime_l,rest) = List.partition (fun ((_, k), _) -> k="__ctime") rest in + let (mtime_l,rest) = List.partition (fun ((_, k), _) -> k="__mtime") rest in + let ctime = match ctime_l with | [(_,ctime_s)] -> Int64.of_string ctime_s | _ -> 0L in + let mtime = match mtime_l with | [(_,mtime_s)] -> Int64.of_string mtime_s | _ -> 0L in + let row = List.fold_left (fun row ((_, k), v) -> + let table_schema = Schema.Database.find tblname schema.Schema.database in + try + let column_schema = Schema.Table.find k table_schema in + let value = Schema.Value.unmarshal column_schema.Schema.Column.ty (Xml_spaces.unprotect v) in + let empty = column_schema.Schema.Column.empty in + Row.update mtime k empty (fun _ -> value) (Row.add ctime k value row) + with Not_found -> + (* This means there's an unexpected field, so we should normally fail. However, fields + * present in Tech Preview releases are permitted to disappear on upgrade, so suppress + * such errors on such upgrades. *) + let exc = Unmarshall_error (Printf.sprintf "Unexpected column in table %s: %s" tblname k) in + let (this_maj, this_min) = try + schema_vsn_of_manifest manifest + with Not_found -> + (* Probably the database didn't have a at the start. So at this point + * we don't know the schema version of the database we're loading. *) + D.error "Unmarshalling removed column %s from table %s but don't know schema version because manifest not yet read" k tblname; + raise exc + in + if List.mem (this_maj, this_min) Datamodel.tech_preview_releases then ( + (* Suppress error for fields that only temporarily existed in the datamodel *) + D.warn "Upgrading from Tech Preview schema %d.%d so removing deleted field %s from table %s" this_maj this_min k tblname; + row + ) else + (* For any genuinely unexpected fields, fail *) + raise exc + ) Row.empty rest in + f (tableset, (Table.update mtime rf Row.empty (fun _ -> row) (Table.add ctime rf row table)), tblname, manifest) + | (_, "pair"), [ (_, "key"), k; (_, "value"), v ] -> + f (tableset, table, tblname, (k, v) :: manifest) + | (_, name), _ -> + raise (Unmarshall_error (Printf.sprintf "Unexpected tag: %s" name)) + end + (* On reading an end tag... *) + | `El_end -> + let tag = Stack.pop tags in + begin match tag with + | (_, ("database" | "manifest" | "row" | "pair")), _ -> maybe_return f acc + | (_, "table"), [ (_, "name"), name ] -> + maybe_return f (TableSet.add 0L name table tableset, Table.empty, "", manifest) + | (_, name), _ -> + raise (Unmarshall_error (Printf.sprintf "Unexpected tag: %s" name)) + end + | _ -> f acc + in + let (ts, _, _, manifest) = f (TableSet.empty, Table.empty, "", []) in + let g = Int64.of_string (List.assoc _generation_count manifest) in + let (major_vsn, minor_vsn) = schema_vsn_of_manifest manifest in + let manifest = Manifest.make major_vsn minor_vsn g in + let open Stdext.Fun in + ((Database.update_manifest (fun _ -> manifest)) + ++ (Database.update_tableset (fun _ -> ts))) + (Database.make schema) let file schema xml_filename = - let input = open_in xml_filename in - Stdext.Pervasiveext.finally - (fun () -> database schema (Xmlm.make_input (`Channel input))) - (fun () -> close_in input) + let input = open_in xml_filename in + Stdext.Pervasiveext.finally + (fun () -> database schema (Xmlm.make_input (`Channel input))) + (fun () -> close_in input) let channel schema inchan = - database schema (Xmlm.make_input (`Channel inchan)) + database schema (Xmlm.make_input (`Channel inchan)) end diff --git a/ocaml/database/eventgen.ml b/ocaml/database/eventgen.ml index 00d29db6622..2ae2ce121ae 100644 --- a/ocaml/database/eventgen.ml +++ b/ocaml/database/eventgen.ml @@ -18,15 +18,15 @@ type getrecord = unit -> Rpc.t let get_record_table : (string, __context:Context.t -> self:string -> getrecord ) Hashtbl.t = Hashtbl.create 20 -let find_get_record x ~__context ~self () : Rpc.t option = - if Hashtbl.mem get_record_table x - then Some (Hashtbl.find get_record_table x ~__context ~self ()) - else None +let find_get_record x ~__context ~self () : Rpc.t option = + if Hashtbl.mem get_record_table x + then Some (Hashtbl.find get_record_table x ~__context ~self ()) + else None - (* If a record is created or destroyed, then - for any (Ref _) field which is one end of a relationship, need to send - modified events for all those other objects. *) - (* we build a hashtable of these references and then look them up by object on each db write: *) +(* If a record is created or destroyed, then + for any (Ref _) field which is one end of a relationship, need to send + modified events for all those other objects. *) +(* we build a hashtable of these references and then look them up by object on each db write: *) let compute_object_references_to_follow (obj_name:string) = let api = Datamodel.all_api in let objs = Dm_api.objects_of_api api in @@ -34,18 +34,18 @@ let compute_object_references_to_follow (obj_name:string) = let relations = Dm_api.relations_of_api api in let symmetric = List.concat (List.map (fun (a, b) -> [ a, b; b, a ]) relations) in let set = Stdext.Listext.List.setify symmetric in - List.concat - (List.map - (function { Datamodel_types.ty = Datamodel_types.Ref x; - Datamodel_types.field_name = field_name } -> - let this_end = obj.Datamodel_types.name, field_name in - if List.mem_assoc this_end set - then begin - let other_end = List.assoc this_end set in - let other_obj = fst other_end in - [ other_obj, field_name ] - end else [] - | _ -> []) (Datamodel_utils.fields_of_obj obj)) + List.concat + (List.map + (function { Datamodel_types.ty = Datamodel_types.Ref x; + Datamodel_types.field_name = field_name } -> + let this_end = obj.Datamodel_types.name, field_name in + if List.mem_assoc this_end set + then begin + let other_end = List.assoc this_end set in + let other_obj = fst other_end in + [ other_obj, field_name ] + end else [] + | _ -> []) (Datamodel_utils.fields_of_obj obj)) let obj_references_table : (string, (string*string) list) Hashtbl.t = Hashtbl.create 30 (* populate obj references table *) @@ -60,137 +60,137 @@ let follow_references (obj_name:string) = Hashtbl.find obj_references_table obj_ (** Compute a set of modify events but skip any for objects which were missing (must have been dangling references) *) -let events_of_other_tbl_refs other_tbl_refs = - List.concat - (List.map (fun (tbl, fld, x) -> - try [ tbl, fld, x () ] - with _ -> - (* Probably means the reference was dangling *) - warn "skipping event for dangling reference %s: %s" tbl fld; - []) other_tbl_refs) +let events_of_other_tbl_refs other_tbl_refs = + List.concat + (List.map (fun (tbl, fld, x) -> + try [ tbl, fld, x () ] + with _ -> + (* Probably means the reference was dangling *) + warn "skipping event for dangling reference %s: %s" tbl fld; + []) other_tbl_refs) open Db_cache_types open Db_action_helper -let database_callback event db = - let context = Context.make "eventgen" in +let database_callback event db = + let context = Context.make "eventgen" in - let other_tbl_refs tblname = follow_references tblname in - let other_tbl_refs_for_this_field tblname fldname = - List.filter (fun (_,fld) -> fld=fldname) (other_tbl_refs tblname) in + let other_tbl_refs tblname = follow_references tblname in + let other_tbl_refs_for_this_field tblname fldname = + List.filter (fun (_,fld) -> fld=fldname) (other_tbl_refs tblname) in - let is_valid_ref = function - | Schema.Value.String r -> - begin - try - ignore(Database.table_of_ref r db); - true - with _ -> false - end - | _ -> false in + let is_valid_ref = function + | Schema.Value.String r -> + begin + try + ignore(Database.table_of_ref r db); + true + with _ -> false + end + | _ -> false in - match event with - | RefreshRow (tblname, objref) -> - (* Generate event *) - let snapshot = find_get_record tblname ~__context:context ~self:objref in - let record = snapshot() in - begin match record with - | None -> - error "Failed to send MOD event for %s %s" tblname objref; - Printf.printf "Failed to send MOD event for %s %s\n%!" tblname objref; - | Some record -> - events_notify ~snapshot:record tblname "mod" objref; - end - | WriteField (tblname, objref, fldname, oldval, newval) -> - let events_old_val = - if is_valid_ref oldval then - let oldval = Schema.Value.Unsafe_cast.string oldval in - events_of_other_tbl_refs - (List.map (fun (tbl,fld) -> - (tbl, oldval, find_get_record tbl ~__context:context ~self:oldval)) (other_tbl_refs_for_this_field tblname fldname)) - else [] in - let events_new_val = - if is_valid_ref newval then - let newval = Schema.Value.Unsafe_cast.string newval in - events_of_other_tbl_refs - (List.map (fun (tbl,fld) -> - (tbl, newval, find_get_record tbl ~__context:context ~self:newval)) (other_tbl_refs_for_this_field tblname fldname)) - else [] - in - (* Generate event *) - let snapshot = find_get_record tblname ~__context:context ~self:objref in - let record = snapshot() in - List.iter (function - | tbl, ref, None -> - error "Failed to send MOD event for %s %s" tbl ref; - Printf.printf "Failed to send MOD event for %s %s\n%!" tbl ref; - | tbl, ref, Some s -> - events_notify ~snapshot:s tbl "mod" ref - ) events_old_val; - begin match record with - | None -> - error "Failed to send MOD event for %s %s" tblname objref; - Printf.printf "Failed to send MOD event for %s %s\n%!" tblname objref; - | Some record -> - events_notify ~snapshot:record tblname "mod" objref; - end; - List.iter (function - | tbl, ref, None -> - error "Failed to send MOD event for %s %s" tbl ref; - Printf.printf "Failed to send MOD event for %s %s\n%!" tbl ref; - | tbl, ref, Some s -> - events_notify ~snapshot:s tbl "mod" ref - ) events_new_val; - | PreDelete(tblname, objref) -> - begin match find_get_record tblname ~__context:context ~self:objref () with - | None -> - error "Failed to generate DEL event for %s %s" tblname objref; - (* Printf.printf "Failed to generate DEL event for %s %s\n%!" tblname objref; *) - | Some snapshot -> - events_notify ~snapshot tblname "del" objref - end - | Delete(tblname, objref, kv) -> - let other_tbl_refs = follow_references tblname in - let other_tbl_refs = - List.fold_left (fun accu (remote_tbl,fld) -> - let fld_value = List.assoc fld kv in - if is_valid_ref fld_value then begin - let fld_value = Schema.Value.Unsafe_cast.string fld_value in - (remote_tbl, fld_value, find_get_record remote_tbl ~__context:context ~self:fld_value) :: accu - end else accu) - [] other_tbl_refs in - let other_tbl_ref_events = events_of_other_tbl_refs other_tbl_refs in - List.iter (function - | tbl, ref, None -> - error "Failed to generate MOD event on %s %s" tbl ref; -(* Printf.printf "Failed to generate MOD event on %s %s\n%!" tbl ref; *) - | tbl, ref, Some s -> - events_notify ~snapshot:s tbl "mod" ref - ) other_tbl_ref_events + match event with + | RefreshRow (tblname, objref) -> + (* Generate event *) + let snapshot = find_get_record tblname ~__context:context ~self:objref in + let record = snapshot() in + begin match record with + | None -> + error "Failed to send MOD event for %s %s" tblname objref; + Printf.printf "Failed to send MOD event for %s %s\n%!" tblname objref; + | Some record -> + events_notify ~snapshot:record tblname "mod" objref; + end + | WriteField (tblname, objref, fldname, oldval, newval) -> + let events_old_val = + if is_valid_ref oldval then + let oldval = Schema.Value.Unsafe_cast.string oldval in + events_of_other_tbl_refs + (List.map (fun (tbl,fld) -> + (tbl, oldval, find_get_record tbl ~__context:context ~self:oldval)) (other_tbl_refs_for_this_field tblname fldname)) + else [] in + let events_new_val = + if is_valid_ref newval then + let newval = Schema.Value.Unsafe_cast.string newval in + events_of_other_tbl_refs + (List.map (fun (tbl,fld) -> + (tbl, newval, find_get_record tbl ~__context:context ~self:newval)) (other_tbl_refs_for_this_field tblname fldname)) + else [] + in + (* Generate event *) + let snapshot = find_get_record tblname ~__context:context ~self:objref in + let record = snapshot() in + List.iter (function + | tbl, ref, None -> + error "Failed to send MOD event for %s %s" tbl ref; + Printf.printf "Failed to send MOD event for %s %s\n%!" tbl ref; + | tbl, ref, Some s -> + events_notify ~snapshot:s tbl "mod" ref + ) events_old_val; + begin match record with + | None -> + error "Failed to send MOD event for %s %s" tblname objref; + Printf.printf "Failed to send MOD event for %s %s\n%!" tblname objref; + | Some record -> + events_notify ~snapshot:record tblname "mod" objref; + end; + List.iter (function + | tbl, ref, None -> + error "Failed to send MOD event for %s %s" tbl ref; + Printf.printf "Failed to send MOD event for %s %s\n%!" tbl ref; + | tbl, ref, Some s -> + events_notify ~snapshot:s tbl "mod" ref + ) events_new_val; + | PreDelete(tblname, objref) -> + begin match find_get_record tblname ~__context:context ~self:objref () with + | None -> + error "Failed to generate DEL event for %s %s" tblname objref; + (* Printf.printf "Failed to generate DEL event for %s %s\n%!" tblname objref; *) + | Some snapshot -> + events_notify ~snapshot tblname "del" objref + end + | Delete(tblname, objref, kv) -> + let other_tbl_refs = follow_references tblname in + let other_tbl_refs = + List.fold_left (fun accu (remote_tbl,fld) -> + let fld_value = List.assoc fld kv in + if is_valid_ref fld_value then begin + let fld_value = Schema.Value.Unsafe_cast.string fld_value in + (remote_tbl, fld_value, find_get_record remote_tbl ~__context:context ~self:fld_value) :: accu + end else accu) + [] other_tbl_refs in + let other_tbl_ref_events = events_of_other_tbl_refs other_tbl_refs in + List.iter (function + | tbl, ref, None -> + error "Failed to generate MOD event on %s %s" tbl ref; + (* Printf.printf "Failed to generate MOD event on %s %s\n%!" tbl ref; *) + | tbl, ref, Some s -> + events_notify ~snapshot:s tbl "mod" ref + ) other_tbl_ref_events - | Create (tblname, new_objref, kv) -> - let snapshot = find_get_record tblname ~__context:context ~self:new_objref in - let other_tbl_refs = follow_references tblname in - let other_tbl_refs = - List.fold_left (fun accu (tbl,fld) -> - let fld_value = List.assoc fld kv in - if is_valid_ref fld_value then begin - let fld_value = Schema.Value.Unsafe_cast.string fld_value in - (tbl, fld_value, find_get_record tbl ~__context:context ~self:fld_value) :: accu - end else accu) - [] other_tbl_refs in - let other_tbl_events = events_of_other_tbl_refs other_tbl_refs in - begin match snapshot() with - | None -> - error "Failed to generate ADD event for %s %s" tblname new_objref; - (* Printf.printf "Failed to generate ADD event for %s %s\n%!" tblname new_objref; *) - | Some snapshot -> - events_notify ~snapshot tblname "add" new_objref; - end; - List.iter (function - | tbl, ref, None -> - error "Failed to generate MOD event for %s %s" tbl ref; - (* Printf.printf "Failed to generate MOD event for %s %s\n%!" tbl ref;*) - | tbl, ref, Some s -> - events_notify ~snapshot:s tbl "mod" ref - ) other_tbl_events + | Create (tblname, new_objref, kv) -> + let snapshot = find_get_record tblname ~__context:context ~self:new_objref in + let other_tbl_refs = follow_references tblname in + let other_tbl_refs = + List.fold_left (fun accu (tbl,fld) -> + let fld_value = List.assoc fld kv in + if is_valid_ref fld_value then begin + let fld_value = Schema.Value.Unsafe_cast.string fld_value in + (tbl, fld_value, find_get_record tbl ~__context:context ~self:fld_value) :: accu + end else accu) + [] other_tbl_refs in + let other_tbl_events = events_of_other_tbl_refs other_tbl_refs in + begin match snapshot() with + | None -> + error "Failed to generate ADD event for %s %s" tblname new_objref; + (* Printf.printf "Failed to generate ADD event for %s %s\n%!" tblname new_objref; *) + | Some snapshot -> + events_notify ~snapshot tblname "add" new_objref; + end; + List.iter (function + | tbl, ref, None -> + error "Failed to generate MOD event for %s %s" tbl ref; + (* Printf.printf "Failed to generate MOD event for %s %s\n%!" tbl ref;*) + | tbl, ref, Some s -> + events_notify ~snapshot:s tbl "mod" ref + ) other_tbl_events diff --git a/ocaml/database/master_connection.ml b/ocaml/database/master_connection.ml index 82f353c096b..a701af7b73e 100644 --- a/ocaml/database/master_connection.ml +++ b/ocaml/database/master_connection.ml @@ -25,9 +25,9 @@ module D = Debug.Make(struct let name = "master_connection" end) open D let my_connection : Stunnel.t option ref = ref None - + exception Cannot_connect_to_master - + (* kill the stunnel underlying the connection.. When the master dies then read/writes to the connection block for ages waiting for the TCP timeout. By killing the stunnel, we can get these calls to @@ -37,38 +37,38 @@ exception Cannot_connect_to_master the (now dead!) stunnel process. *) let force_connection_reset () = - (* Cleanup cached stunnel connections to the master, so that future API - calls won't be blocked. *) - if Pool_role.is_slave () then begin - let host = Pool_role.get_master_address () in - let port = !Xapi_globs.https_port in - (* We don't currently have a method to enumerate all the stunnel links - to an address in the cache. The easiest way is, for each valid config - combination, we pop out (remove) its links until Not_found is raised. - Here, we have two such combinations, i.e. verify_cert=true/false, as - host and port are fixed values. *) - let purge_stunnels verify_cert = - try - while true do - let st = Stunnel_cache.remove host port verify_cert in - try Stunnel.disconnect ~wait:false ~force:true st with _ -> () - done - with Not_found -> () in - purge_stunnels true; purge_stunnels false; - info "force_connection_reset: all cached connections to the master have been purged"; - end; - match !my_connection with - | None -> () - | Some st_proc -> - (info "stunnel reset pid=%d fd=%d" (Stunnel.getpid st_proc.Stunnel.pid) (Stdext.Unixext.int_of_file_descr st_proc.Stunnel.fd); - Unix.kill (Stunnel.getpid st_proc.Stunnel.pid) Sys.sigterm) + (* Cleanup cached stunnel connections to the master, so that future API + calls won't be blocked. *) + if Pool_role.is_slave () then begin + let host = Pool_role.get_master_address () in + let port = !Xapi_globs.https_port in + (* We don't currently have a method to enumerate all the stunnel links + to an address in the cache. The easiest way is, for each valid config + combination, we pop out (remove) its links until Not_found is raised. + Here, we have two such combinations, i.e. verify_cert=true/false, as + host and port are fixed values. *) + let purge_stunnels verify_cert = + try + while true do + let st = Stunnel_cache.remove host port verify_cert in + try Stunnel.disconnect ~wait:false ~force:true st with _ -> () + done + with Not_found -> () in + purge_stunnels true; purge_stunnels false; + info "force_connection_reset: all cached connections to the master have been purged"; + end; + match !my_connection with + | None -> () + | Some st_proc -> + (info "stunnel reset pid=%d fd=%d" (Stunnel.getpid st_proc.Stunnel.pid) (Stdext.Unixext.int_of_file_descr st_proc.Stunnel.fd); + Unix.kill (Stunnel.getpid st_proc.Stunnel.pid) Sys.sigterm) (* whenever a call is made that involves read/write to the master connection, a timestamp is written into this global: *) let last_master_connection_call : float option ref = ref None - (* the master_connection_watchdog uses this time to determine whether the master connection - should be reset *) - +(* the master_connection_watchdog uses this time to determine whether the master connection + should be reset *) + (* Set and unset the timestamp global. (No locking required since we are operating under mutual exclusion provided by the database lock here anyway) *) let with_timestamp f = @@ -76,7 +76,7 @@ let with_timestamp f = Stdext.Pervasiveext.finally f (fun ()->last_master_connection_call := None) - + (* call force_connection_reset if we detect that a master-connection is blocked for too long. One common way this can happen is if we end up blocked waiting for a TCP timeout when the master goes away unexpectedly... *) @@ -85,29 +85,29 @@ let my_watchdog : Thread.t option ref = ref None let start_master_connection_watchdog() = Mutex.execute watchdog_start_mutex (fun () -> - match !my_watchdog with - | None -> - my_watchdog := Some (Thread.create (fun () -> - while (true) do - try - begin - match !last_master_connection_call with - | None -> () - | Some t -> - let now = Unix.gettimeofday() in - let since_last_call = now -. t in - if since_last_call > !Xapi_globs.master_connection_reset_timeout then - begin - debug "Master connection timeout: forcibly resetting master connection"; - force_connection_reset() - end - end; - Thread.delay 10. - with _ -> () - done - ) ()) - | Some _ -> - () + match !my_watchdog with + | None -> + my_watchdog := Some (Thread.create (fun () -> + while (true) do + try + begin + match !last_master_connection_call with + | None -> () + | Some t -> + let now = Unix.gettimeofday() in + let since_last_call = now -. t in + if since_last_call > !Xapi_globs.master_connection_reset_timeout then + begin + debug "Master connection timeout: forcibly resetting master connection"; + force_connection_reset() + end + end; + Thread.delay 10. + with _ -> () + done + ) ()) + | Some _ -> + () ) module StunnelDebug=Debug.Make(struct let name="stunnel" end) @@ -122,18 +122,18 @@ let open_secure_connection () = let host = Pool_role.get_master_address () in let port = !Xapi_globs.https_port in let st_proc = Stunnel.connect ~use_fork_exec_helper:true - ~extended_diagnosis:true - ~write_to_log:(fun x -> debug "stunnel: %s\n" x) host port in + ~extended_diagnosis:true + ~write_to_log:(fun x -> debug "stunnel: %s\n" x) host port in let fd_closed = Thread.wait_timed_read st_proc.Stunnel.fd 5. in let proc_quit = try Unix.kill (Stunnel.getpid st_proc.Stunnel.pid) 0; false with e -> true in if not fd_closed && not proc_quit then begin - info "stunnel connected pid=%d fd=%d" (Stunnel.getpid st_proc.Stunnel.pid) (Stdext.Unixext.int_of_file_descr st_proc.Stunnel.fd); - my_connection := Some st_proc; - !on_database_connection_established () + info "stunnel connected pid=%d fd=%d" (Stunnel.getpid st_proc.Stunnel.pid) (Stdext.Unixext.int_of_file_descr st_proc.Stunnel.fd); + my_connection := Some st_proc; + !on_database_connection_established () end else begin - info "stunnel disconnected fd_closed=%s proc_quit=%s" (string_of_bool fd_closed) (string_of_bool proc_quit); - let () = try Stunnel.disconnect st_proc with _ -> () in - raise Goto_handler + info "stunnel disconnected fd_closed=%s proc_quit=%s" (string_of_bool fd_closed) (string_of_bool proc_quit); + let () = try Stunnel.disconnect st_proc with _ -> () in + raise Goto_handler end (* Do a db xml_rpc request, catching exception and trying to reopen the connection if it @@ -147,7 +147,7 @@ let restart_on_connection_timeout = ref true exception Content_length_required -let do_db_xml_rpc_persistent_with_reopen ~host ~path (req: string) : Db_interface.response = +let do_db_xml_rpc_persistent_with_reopen ~host ~path (req: string) : Db_interface.response = let time_call_started = Unix.gettimeofday() in let write_ok = ref false in let result = ref (Db_interface.String "") in @@ -155,107 +155,107 @@ let do_db_xml_rpc_persistent_with_reopen ~host ~path (req: string) : Db_interfac let backoff_delay = ref 2.0 in (* initial delay = 2s *) let update_backoff_delay () = backoff_delay := !backoff_delay *. 2.0; - if !backoff_delay < 2.0 then backoff_delay := 2.0 + if !backoff_delay < 2.0 then backoff_delay := 2.0 else if !backoff_delay > 256.0 then backoff_delay := 256.0 - in + in while (not !write_ok) do begin try - let req_string = req in - let length = String.length req_string in - if length > Xapi_globs.http_limit_max_rpc_size - then raise Http_svr.Client_requested_size_over_limit; - (* The pool_secret is added here and checked by the Xapi_http.add_handler RBAC code. *) - let open Xmlrpc_client in - let request = xmlrpc - ~version:"1.1" ~frame:true ~keep_alive:true - ~length:(Int64.of_int length) - ~cookie:["pool_secret", !Xapi_globs.pool_secret] ~body:req path in - match !my_connection with - None -> raise Goto_handler - | (Some stunnel_proc) -> - let fd = stunnel_proc.Stunnel.fd in - with_timestamp (fun () -> - with_http request (fun (response, _) -> - (* XML responses must have a content-length because we cannot use the Xml.parse_in - in_channel function: the input channel will buffer an arbitrary amount of stuff - and we'll be out of sync with the next request. *) - let res = match response.Http.Response.content_length with - | None -> raise Content_length_required - | Some l -> begin - if (Int64.to_int l) <= Sys.max_string_length then - Db_interface.String (Stdext.Unixext.really_read_string fd (Int64.to_int l)) - else - let buf = Stdext.Bigbuffer.make () in - Stdext.Unixext.really_read_bigbuffer fd buf l; - Db_interface.Bigbuf buf - end - in - write_ok := true; - result := res (* yippeee! return and exit from while loop *) - ) fd - ) + let req_string = req in + let length = String.length req_string in + if length > Xapi_globs.http_limit_max_rpc_size + then raise Http_svr.Client_requested_size_over_limit; + (* The pool_secret is added here and checked by the Xapi_http.add_handler RBAC code. *) + let open Xmlrpc_client in + let request = xmlrpc + ~version:"1.1" ~frame:true ~keep_alive:true + ~length:(Int64.of_int length) + ~cookie:["pool_secret", !Xapi_globs.pool_secret] ~body:req path in + match !my_connection with + None -> raise Goto_handler + | (Some stunnel_proc) -> + let fd = stunnel_proc.Stunnel.fd in + with_timestamp (fun () -> + with_http request (fun (response, _) -> + (* XML responses must have a content-length because we cannot use the Xml.parse_in + in_channel function: the input channel will buffer an arbitrary amount of stuff + and we'll be out of sync with the next request. *) + let res = match response.Http.Response.content_length with + | None -> raise Content_length_required + | Some l -> begin + if (Int64.to_int l) <= Sys.max_string_length then + Db_interface.String (Stdext.Unixext.really_read_string fd (Int64.to_int l)) + else + let buf = Stdext.Bigbuffer.make () in + Stdext.Unixext.really_read_bigbuffer fd buf l; + Db_interface.Bigbuf buf + end + in + write_ok := true; + result := res (* yippeee! return and exit from while loop *) + ) fd + ) with | Http_svr.Client_requested_size_over_limit -> - error "Content length larger than known limit (%d)." Xapi_globs.http_limit_max_rpc_size; - debug "Re-raising exception to caller."; - raise Http_svr.Client_requested_size_over_limit + error "Content length larger than known limit (%d)." Xapi_globs.http_limit_max_rpc_size; + debug "Re-raising exception to caller."; + raise Http_svr.Client_requested_size_over_limit (* TODO: This http exception handler caused CA-36936 and can probably be removed now that there's backoff delay in the generic handler _ below *) | Http_client.Http_error (http_code,err_msg) -> - error "Received HTTP error %s (%s) from master. This suggests our master address is wrong. Sleeping for %.0fs and then restarting." http_code err_msg !Xapi_globs.permanent_master_failure_retry_interval; - Thread.delay !Xapi_globs.permanent_master_failure_retry_interval; - exit Xapi_globs.restart_return_code + error "Received HTTP error %s (%s) from master. This suggests our master address is wrong. Sleeping for %.0fs and then restarting." http_code err_msg !Xapi_globs.permanent_master_failure_retry_interval; + Thread.delay !Xapi_globs.permanent_master_failure_retry_interval; + exit Xapi_globs.restart_return_code | e -> - begin - error "Caught %s" (Printexc.to_string e); - (* RPC failed - there's no way we can recover from this so try reopening connection every 2s + backoff delay *) - begin - match !my_connection with - None -> () - | (Some st_proc) -> - my_connection := None; (* don't want to try closing multiple times *) - (try Stunnel.disconnect st_proc with _ -> ()) - end; - let time_sofar = Unix.gettimeofday() -. time_call_started in - if !connection_timeout < 0. then - begin - if not !surpress_no_timeout_logs then - begin - debug "Connection to master died. I will continue to retry indefinitely (supressing future logging of this message)."; - error "Connection to master died. I will continue to retry indefinitely (supressing future logging of this message)."; - end; - surpress_no_timeout_logs := true - end - else - debug "Connection to master died: time taken so far in this call '%f'; will %s" - time_sofar (if !connection_timeout < 0. - then "never timeout" - else Printf.sprintf "timeout after '%f'" !connection_timeout); - if time_sofar > !connection_timeout && !connection_timeout >= 0. then - begin - if !restart_on_connection_timeout then - begin - debug "Exceeded timeout for retrying master connection: restarting xapi"; - exit Xapi_globs.restart_return_code - end - else - begin - debug "Exceeded timeout for retrying master connection: raising Cannot_connect_to_master"; - raise Cannot_connect_to_master - end - end; - debug "Sleeping %f seconds before retrying master connection..." !backoff_delay; - Thread.delay !backoff_delay; - update_backoff_delay (); - try - open_secure_connection() - with _ -> () (* oh well, maybe nextime... *) - end + begin + error "Caught %s" (Printexc.to_string e); + (* RPC failed - there's no way we can recover from this so try reopening connection every 2s + backoff delay *) + begin + match !my_connection with + None -> () + | (Some st_proc) -> + my_connection := None; (* don't want to try closing multiple times *) + (try Stunnel.disconnect st_proc with _ -> ()) + end; + let time_sofar = Unix.gettimeofday() -. time_call_started in + if !connection_timeout < 0. then + begin + if not !surpress_no_timeout_logs then + begin + debug "Connection to master died. I will continue to retry indefinitely (supressing future logging of this message)."; + error "Connection to master died. I will continue to retry indefinitely (supressing future logging of this message)."; + end; + surpress_no_timeout_logs := true + end + else + debug "Connection to master died: time taken so far in this call '%f'; will %s" + time_sofar (if !connection_timeout < 0. + then "never timeout" + else Printf.sprintf "timeout after '%f'" !connection_timeout); + if time_sofar > !connection_timeout && !connection_timeout >= 0. then + begin + if !restart_on_connection_timeout then + begin + debug "Exceeded timeout for retrying master connection: restarting xapi"; + exit Xapi_globs.restart_return_code + end + else + begin + debug "Exceeded timeout for retrying master connection: raising Cannot_connect_to_master"; + raise Cannot_connect_to_master + end + end; + debug "Sleeping %f seconds before retrying master connection..." !backoff_delay; + Thread.delay !backoff_delay; + update_backoff_delay (); + try + open_secure_connection() + with _ -> () (* oh well, maybe nextime... *) + end end done; !result - + let execute_remote_fn string path = let host = Pool_role.get_master_address () in Db_lock.with_lock diff --git a/ocaml/database/redo_log.ml b/ocaml/database/redo_log.ml index 0b90858a8eb..cda8102378b 100644 --- a/ocaml/database/redo_log.ml +++ b/ocaml/database/redo_log.ml @@ -23,10 +23,10 @@ module R = Debug.Make(struct let name = "redo_log" end) let get_static_device reason = (* Specifically use Static_vdis_list rather than Static_vdis to avoid the - cyclic dependency caused by reference to Server_helpers in Static_vdis *) + cyclic dependency caused by reference to Server_helpers in Static_vdis *) let vdis = List.filter - (fun x -> x.Static_vdis_list.reason = reason && x.Static_vdis_list.currently_attached) - (Static_vdis_list.list ()) + (fun x -> x.Static_vdis_list.reason = reason && x.Static_vdis_list.currently_attached) + (Static_vdis_list.list ()) in (* Return the path to the first attached VDI which matches the reason *) R.debug "Found %d VDIs matching [%s]" (List.length vdis) reason; @@ -46,27 +46,27 @@ let redo_log_sm_config = [ "type", "raw" ] (* Encapsulate the state of a single redo_log instance. *) type redo_log = { - name: string; - marker: string; - read_only: bool; - enabled: bool ref; - device: string option ref; - currently_accessible: bool ref; - state_change_callback: (bool -> unit) option; - time_of_last_failure: float ref; - backoff_delay: int ref; - sock: Unix.file_descr option ref; - pid: (Forkhelpers.pidty * string * string) option ref; - dying_processes_mutex: Mutex.t; - num_dying_processes: int ref; + name: string; + marker: string; + read_only: bool; + enabled: bool ref; + device: string option ref; + currently_accessible: bool ref; + state_change_callback: (bool -> unit) option; + time_of_last_failure: float ref; + backoff_delay: int ref; + sock: Unix.file_descr option ref; + pid: (Forkhelpers.pidty * string * string) option ref; + dying_processes_mutex: Mutex.t; + num_dying_processes: int ref; } module RedoLogSet = Set.Make( - struct - type t = redo_log - let compare = fun log1 log2 -> compare log1.marker log2.marker - end -) + struct + type t = redo_log + let compare = fun log1 log2 -> compare log1.marker log2.marker + end + ) (* Keep a store of all redo_logs - this will make it easy to write to all the active ones. *) let all_redo_logs = ref (RedoLogSet.empty) @@ -99,31 +99,31 @@ let disable log = let redo_log_events = Event.new_channel () let cannot_connect_fn log = - if !(log.currently_accessible) then begin - R.debug "Signalling unable to access redo log"; - Event.sync (Event.send redo_log_events (log.name, false)); - Opt.iter (fun callback -> callback false) log.state_change_callback - end; - log.currently_accessible := false + if !(log.currently_accessible) then begin + R.debug "Signalling unable to access redo log"; + Event.sync (Event.send redo_log_events (log.name, false)); + Opt.iter (fun callback -> callback false) log.state_change_callback + end; + log.currently_accessible := false let can_connect_fn log = - if not !(log.currently_accessible) then begin - R.debug "Signalling redo log is healthy"; - Event.sync (Event.send redo_log_events (log.name, true)); - Opt.iter (fun callback -> callback true) log.state_change_callback - end; - log.currently_accessible := true + if not !(log.currently_accessible) then begin + R.debug "Signalling redo log is healthy"; + Event.sync (Event.send redo_log_events (log.name, true)); + Opt.iter (fun callback -> callback true) log.state_change_callback + end; + log.currently_accessible := true (* ----------------------------------------------------------- *) (* Functions relating to the serialisation of redo log entries *) (* The type of a delta, describing an incremental change to the database. *) type t = - (* (tblname, newobjref, (k,v) list) *) + (* (tblname, newobjref, (k,v) list) *) | CreateRow of string * string * (string*string) list - (* (tblname, objref) *) + (* (tblname, objref) *) | DeleteRow of string * string - (* (tblname, objref, fldname, newval) *) + (* (tblname, objref, fldname, newval) *) | WriteField of string * string * string * string (* First 9 bytes of encoding of entries is an ASCII string indicating the kind of record, from {"CreateRow", "DeleteRow", "WriteFiel"} *) @@ -139,7 +139,7 @@ let redo_log_entry_to_string r = | CreateRow(tbl, objref, kvs) -> Printf.sprintf "CreateRow%08d%s%08d%s%08d%s" (String.length tbl) tbl (String.length objref) objref (List.length kvs) (String.concat "" - (List.map (fun (k,v) -> Printf.sprintf "%08d%s%08d%s" (String.length k) k (String.length v) v) kvs) + (List.map (fun (k,v) -> Printf.sprintf "%08d%s%08d%s" (String.length k) k (String.length v) v) kvs) ) | DeleteRow(tbl, objref) -> Printf.sprintf "DeleteRow%08d%s%08d%s" (String.length tbl) tbl (String.length objref) objref @@ -171,11 +171,11 @@ let string_to_redo_log_entry str = (* Parse key-value pairs *) let parse_kvs n = let rec aux acc = function - | 0 -> acc - | n -> - let k = parse_length_and_string str pos in - let v = parse_length_and_string str pos in - aux ((k,v)::acc) (n-1) + | 0 -> acc + | n -> + let k = parse_length_and_string str pos in + let v = parse_length_and_string str pos in + aux ((k,v)::acc) (n-1) in aux [] n in let kvs = parse_kvs num_kvs in @@ -185,7 +185,7 @@ let string_to_redo_log_entry str = let tbl = parse_length_and_string str pos in let objref = parse_length_and_string str pos in DeleteRow (tbl, objref) - + | "WriteFiel" -> let tbl = parse_length_and_string str pos in let objref = parse_length_and_string str pos in @@ -272,13 +272,13 @@ let read_database f gen_count sock latest_response_time datasockpath = finally (fun () -> - (* Pass the gen_count and the socket's fd to f. f may raise Unixext.Timeout if it cannot complete before latest_response_time. *) - f gen_count datasock expected_length latest_response_time; + (* Pass the gen_count and the socket's fd to f. f may raise Unixext.Timeout if it cannot complete before latest_response_time. *) + f gen_count datasock expected_length latest_response_time; ) (fun () -> - (* Close the data socket *) - R.debug "Closing the data socket"; - Unix.close datasock + (* Close the data socket *) + R.debug "Closing the data socket"; + Unix.close datasock ) let read_delta f gen_count sock latest_response_time = @@ -312,14 +312,14 @@ let rec read_read_response sock fn_db fn_delta expected_gen_count latest_respons read_read_response sock fn_db fn_delta (Generation.add_int gen_count 1) latest_response_time datasockpath end | "end__" -> R.debug "Reached the end of the read response"; () - | "nack_" -> + | "nack_" -> (* Read the error message *) let error = read_length_and_string sock latest_response_time in R.warn "Read error received: [%s]" error; if error = Block_device_io_errors.timeout_error_msg then raise Unixext.Timeout else raise (RedoLogFailure error) | e -> raise (CommunicationsProblem ("unrecognised read response prefix ["^e^"]")) - + let action_empty sock datasockpath = R.debug "Performing empty"; (* Compute desired response time *) @@ -364,34 +364,34 @@ let action_write_db marker generation_count write_fn sock datasockpath = * listening on the socket. *) let datasock = connect datasockpath latest_response_time in - + finally (fun () -> - (* Send data straight down the data channel, then close it to send an EOF. *) - (* Ideally, we would check whether this completes before the latest_response_time. Could implement this by performing the write in a separate thread. *) - - try - write_fn datasock; - R.debug "Finished writing database to data socket"; - with - | Sys_error("Connection reset by peer") -> - (* CA-41914: Note that if the block_device_io process internally - * throws Timeout (or indeed any other exception), it will forcibly - * close this connection, we'll see a Sys_error("Connection reset by - * peer"). This can be safely suppressed because we'll hear all the - * gory details in the response we read over the control socket. *) - R.warn "I/O process forcibly closed the data socket while trying to write database to it. Await the response to see why it did that."; - | e -> - (* We'll re-raise other exceptions, though. *) - R.error "Got an unexpected exception while trying to write database to the data socket: %s. Re-raising." (Printexc.to_string e); - raise e + (* Send data straight down the data channel, then close it to send an EOF. *) + (* Ideally, we would check whether this completes before the latest_response_time. Could implement this by performing the write in a separate thread. *) + + try + write_fn datasock; + R.debug "Finished writing database to data socket"; + with + | Sys_error("Connection reset by peer") -> + (* CA-41914: Note that if the block_device_io process internally + * throws Timeout (or indeed any other exception), it will forcibly + * close this connection, we'll see a Sys_error("Connection reset by + * peer"). This can be safely suppressed because we'll hear all the + * gory details in the response we read over the control socket. *) + R.warn "I/O process forcibly closed the data socket while trying to write database to it. Await the response to see why it did that."; + | e -> + (* We'll re-raise other exceptions, though. *) + R.error "Got an unexpected exception while trying to write database to the data socket: %s. Re-raising." (Printexc.to_string e); + raise e ) (fun () -> - (* Ensure the data socket is closed even if exception is thrown from write_fn *) - R.info "Closing data socket"; - Unix.close datasock; + (* Ensure the data socket is closed even if exception is thrown from write_fn *) + R.info "Closing data socket"; + Unix.close datasock; ); - + (* Read response *) let response_length = 12 in R.debug "Reading response..."; @@ -487,29 +487,29 @@ let shutdown log = (* Now we can forget about the communication channel to the process *) log.sock := None; end; - + (* Terminate the child process *) - let ipid = Forkhelpers.getpid p in + let ipid = Forkhelpers.getpid p in R.info "Killing I/O process with pid %d" ipid; Unix.kill ipid Sys.sigkill; (* Wait for the process to die. This is done in a separate thread in case it does not respond to the signal immediately. *) ignore (Thread.create (fun () -> - R.debug "Waiting for I/O process with pid %d to die..." ipid; - Mutex.execute log.dying_processes_mutex - (fun () -> log.num_dying_processes := !(log.num_dying_processes) + 1); - ignore(Forkhelpers.waitpid p); - R.debug "Finished waiting for process %d" ipid; - Mutex.execute log.dying_processes_mutex - (fun () -> log.num_dying_processes := !(log.num_dying_processes) - 1) - ) ()); + R.debug "Waiting for I/O process with pid %d to die..." ipid; + Mutex.execute log.dying_processes_mutex + (fun () -> log.num_dying_processes := !(log.num_dying_processes) + 1); + ignore(Forkhelpers.waitpid p); + R.debug "Finished waiting for process %d" ipid; + Mutex.execute log.dying_processes_mutex + (fun () -> log.num_dying_processes := !(log.num_dying_processes) - 1) + ) ()); (* Forget about that process *) log.pid := None; - + (* Attempt to remove the sockets *) List.iter (fun sockpath -> - R.debug "Removing socket %s" sockpath; - Unixext.unlink_safe sockpath - ) [ctrlsockpath; datasockpath] + R.debug "Removing socket %s" sockpath; + Unixext.unlink_safe sockpath + ) [ctrlsockpath; datasockpath] end; with _ -> () (* ignore any errors *) end @@ -561,7 +561,7 @@ let startup log = end end; match !(log.pid) with - | Some (_, ctrlsockpath, _) -> + | Some (_, ctrlsockpath, _) -> begin match !(log.sock) with | Some _ -> () (* We're already connected *) @@ -572,33 +572,33 @@ let startup log = let s = connect ctrlsockpath latest_connect_time in finally (fun () -> - try - begin - (* Check that we connected okay by reading the startup message *) - let response_length = 12 in - let response = Unixext.time_limited_read s response_length latest_connect_time in - match response with - | "connect|ack_" -> - R.info "Connect was successful"; - (* Save the socket. This defers the responsibility for closing it to shutdown(). *) - log.sock := Some s - | "connect|nack" -> - (* Read the error message *) - let error = read_length_and_string s latest_connect_time in - R.warn "Connect was unsuccessful: [%s]" error; - broken log; - | e -> - R.warn "Received unexpected connect response: [%s]" e; - broken log - end - with Unixext.Timeout -> R.warn "Timed out waiting to connect"; broken log - ) - (fun () -> - (* If the socket s has been opened, but sock hasn't been set then close it here. *) - match !(log.sock) with - | Some _ -> () - | None -> ignore_exn (fun () -> Unix.close s) - ) + try + begin + (* Check that we connected okay by reading the startup message *) + let response_length = 12 in + let response = Unixext.time_limited_read s response_length latest_connect_time in + match response with + | "connect|ack_" -> + R.info "Connect was successful"; + (* Save the socket. This defers the responsibility for closing it to shutdown(). *) + log.sock := Some s + | "connect|nack" -> + (* Read the error message *) + let error = read_length_and_string s latest_connect_time in + R.warn "Connect was unsuccessful: [%s]" error; + broken log; + | e -> + R.warn "Received unexpected connect response: [%s]" e; + broken log + end + with Unixext.Timeout -> R.warn "Timed out waiting to connect"; broken log + ) + (fun () -> + (* If the socket s has been opened, but sock hasn't been set then close it here. *) + match !(log.sock) with + | Some _ -> () + | None -> ignore_exn (fun () -> Unix.close s) + ) end | None -> () (* don't attempt to connect *) with TooManyProcesses -> @@ -613,7 +613,7 @@ let switch log vdi_reason = (* Given a socket, execute a function and catch exceptions. *) let perform_action f desc sock log = try - match !(log.pid) with + match !(log.pid) with | None -> () | Some (_, _, datasockpath) -> R.debug "About to perform action %s" desc; @@ -658,40 +658,40 @@ let connect_and_perform_action f desc log = let redo_log_creation_mutex = Mutex.create () let create ~name ~state_change_callback ~read_only = - let instance = { - name = name; - marker = Uuid.to_string (Uuid.make_uuid ()); - read_only = read_only; - enabled = ref false; - device = ref None; - currently_accessible = ref true; - state_change_callback = state_change_callback; - time_of_last_failure = ref 0.; - backoff_delay = ref Xapi_globs.redo_log_initial_backoff_delay; - sock = ref None; - pid = ref None; - dying_processes_mutex = Mutex.create (); - num_dying_processes = ref 0; - } in - Mutex.execute redo_log_creation_mutex - (fun () -> all_redo_logs := RedoLogSet.add instance !all_redo_logs); - instance + let instance = { + name = name; + marker = Uuid.to_string (Uuid.make_uuid ()); + read_only = read_only; + enabled = ref false; + device = ref None; + currently_accessible = ref true; + state_change_callback = state_change_callback; + time_of_last_failure = ref 0.; + backoff_delay = ref Xapi_globs.redo_log_initial_backoff_delay; + sock = ref None; + pid = ref None; + dying_processes_mutex = Mutex.create (); + num_dying_processes = ref 0; + } in + Mutex.execute redo_log_creation_mutex + (fun () -> all_redo_logs := RedoLogSet.add instance !all_redo_logs); + instance let delete log = - shutdown log; - disable log; - Mutex.execute redo_log_creation_mutex - (fun () -> all_redo_logs := RedoLogSet.remove log !all_redo_logs) + shutdown log; + disable log; + Mutex.execute redo_log_creation_mutex + (fun () -> all_redo_logs := RedoLogSet.remove log !all_redo_logs) (* -------------------------------------------------------- *) (* Helper functions for interacting with multiple redo_logs *) let with_active_redo_logs f = - Mutex.execute redo_log_creation_mutex - (fun () -> - let active_redo_logs = - RedoLogSet.filter (fun log -> (is_enabled log) && not(log.read_only)) !(all_redo_logs) - in - RedoLogSet.iter f active_redo_logs) + Mutex.execute redo_log_creation_mutex + (fun () -> + let active_redo_logs = + RedoLogSet.filter (fun log -> (is_enabled log) && not(log.read_only)) !(all_redo_logs) + in + RedoLogSet.iter f active_redo_logs) (* --------------------------------------------------------------- *) (* Functions which interact with the redo log on the block device. *) @@ -742,46 +742,46 @@ let empty log = (* Flush the database to the given redo_log instance. *) let flush_db_to_redo_log db log = - R.info "Flushing database to redo_log [%s]" log.name; - let write_db_to_fd = (fun out_fd -> Db_xml.To.fd out_fd db) in - write_db (Db_cache_types.Manifest.generation (Db_cache_types.Database.manifest db)) write_db_to_fd log; - !(log.currently_accessible) + R.info "Flushing database to redo_log [%s]" log.name; + let write_db_to_fd = (fun out_fd -> Db_xml.To.fd out_fd db) in + write_db (Db_cache_types.Manifest.generation (Db_cache_types.Database.manifest db)) write_db_to_fd log; + !(log.currently_accessible) (* Write the given database to all active redo_logs *) let flush_db_to_all_active_redo_logs db = - R.info "Flushing database to all active redo-logs"; - with_active_redo_logs (fun log -> - ignore(flush_db_to_redo_log db log)) + R.info "Flushing database to all active redo-logs"; + with_active_redo_logs (fun log -> + ignore(flush_db_to_redo_log db log)) (* Write a delta to all active redo_logs *) let database_callback event db = - let to_write = - match event with - | Db_cache_types.RefreshRow (tblname, objref) -> - None - | Db_cache_types.WriteField (tblname, objref, fldname, oldval, newval) -> - R.debug "WriteField(%s, %s, %s, %s, %s)" tblname objref fldname (Schema.Value.marshal oldval) (Schema.Value.marshal newval); - if Schema.is_field_persistent (Db_cache_types.Database.schema db) tblname fldname - then Some (WriteField(tblname, objref, fldname, Schema.Value.marshal newval)) - else None - | Db_cache_types.PreDelete (tblname, objref) -> - None - | Db_cache_types.Delete (tblname, objref, _) -> - if Schema.is_table_persistent (Db_cache_types.Database.schema db) tblname - then Some (DeleteRow(tblname, objref)) - else None - | Db_cache_types.Create (tblname, objref, kvs) -> - if Schema.is_table_persistent (Db_cache_types.Database.schema db) tblname - then Some (CreateRow(tblname, objref, (List.map (fun (k, v) -> k, Schema.Value.marshal v) kvs))) - else None - in - - Opt.iter (fun entry -> - with_active_redo_logs (fun log -> - write_delta (Db_cache_types.Manifest.generation (Db_cache_types.Database.manifest db)) entry - (fun () -> - (* the function which will be invoked if a database write is required instead of a delta *) - ignore(flush_db_to_redo_log db log)) - log - ) - ) to_write + let to_write = + match event with + | Db_cache_types.RefreshRow (tblname, objref) -> + None + | Db_cache_types.WriteField (tblname, objref, fldname, oldval, newval) -> + R.debug "WriteField(%s, %s, %s, %s, %s)" tblname objref fldname (Schema.Value.marshal oldval) (Schema.Value.marshal newval); + if Schema.is_field_persistent (Db_cache_types.Database.schema db) tblname fldname + then Some (WriteField(tblname, objref, fldname, Schema.Value.marshal newval)) + else None + | Db_cache_types.PreDelete (tblname, objref) -> + None + | Db_cache_types.Delete (tblname, objref, _) -> + if Schema.is_table_persistent (Db_cache_types.Database.schema db) tblname + then Some (DeleteRow(tblname, objref)) + else None + | Db_cache_types.Create (tblname, objref, kvs) -> + if Schema.is_table_persistent (Db_cache_types.Database.schema db) tblname + then Some (CreateRow(tblname, objref, (List.map (fun (k, v) -> k, Schema.Value.marshal v) kvs))) + else None + in + + Opt.iter (fun entry -> + with_active_redo_logs (fun log -> + write_delta (Db_cache_types.Manifest.generation (Db_cache_types.Database.manifest db)) entry + (fun () -> + (* the function which will be invoked if a database write is required instead of a delta *) + ignore(flush_db_to_redo_log db log)) + log + ) + ) to_write diff --git a/ocaml/database/redo_log.mli b/ocaml/database/redo_log.mli index 67573cb1b3b..c606400f041 100644 --- a/ocaml/database/redo_log.mli +++ b/ocaml/database/redo_log.mli @@ -17,26 +17,26 @@ val get_static_device : string -> string option (** Finds an attached metadata VDI with a given reason *) -val minimum_vdi_size : int64 +val minimum_vdi_size : int64 (** Minimum size for redo log VDI *) val redo_log_sm_config : (string * string) list (** SM config for redo log VDI *) (** {redo_log data type} *) type redo_log = { - name: string; - marker: string; - read_only: bool; - enabled: bool ref; - device: string option ref; - currently_accessible: bool ref; - state_change_callback: (bool -> unit) option; - time_of_last_failure: float ref; - backoff_delay: int ref; - sock: Unix.file_descr option ref; - pid: (Forkhelpers.pidty * string * string) option ref; - dying_processes_mutex: Mutex.t; - num_dying_processes: int ref; + name: string; + marker: string; + read_only: bool; + enabled: bool ref; + device: string option ref; + currently_accessible: bool ref; + state_change_callback: (bool -> unit) option; + time_of_last_failure: float ref; + backoff_delay: int ref; + sock: Unix.file_descr option ref; + pid: (Forkhelpers.pidty * string * string) option ref; + dying_processes_mutex: Mutex.t; + num_dying_processes: int ref; } (** {2 Enabling and disabling writing} *) @@ -79,14 +79,14 @@ val with_active_redo_logs : (redo_log -> unit) -> unit (** The type of a delta, describing an incremental change to the database. *) type t = | CreateRow of string * string * (string*string) list - (** [CreateRow (tblname, newobjref, [(k1,v1); ...])] - represents the creation of a row in table [tblname], with key [newobjref], and columns [[k1; ...]] having values [[v1; ...]]. *) + (** [CreateRow (tblname, newobjref, [(k1,v1); ...])] + represents the creation of a row in table [tblname], with key [newobjref], and columns [[k1; ...]] having values [[v1; ...]]. *) | DeleteRow of string * string - (** [DeleteRow (tblname, objref)] - represents the deletion of a row in table [tblname] with key [objref]. *) + (** [DeleteRow (tblname, objref)] + represents the deletion of a row in table [tblname] with key [objref]. *) | WriteField of string * string * string * string - (** [WriteField (tblname, objref, fldname, newval)] - represents the write to the field with name [fldname] of a row in table [tblname] with key [objref], overwriting its value with [newval]. *) + (** [WriteField (tblname, objref, fldname, newval)] + represents the write to the field with name [fldname] of a row in table [tblname] with key [objref], overwriting its value with [newval]. *) val write_db : Generation.t -> (Unix.file_descr -> unit) -> redo_log -> unit (** Write a database. diff --git a/ocaml/database/ref_index.ml b/ocaml/database/ref_index.ml index 7646355ff6b..9f5e7fb3296 100644 --- a/ocaml/database/ref_index.ml +++ b/ocaml/database/ref_index.ml @@ -19,22 +19,22 @@ open Db_cache_types open Stdext type indexrec = { - name_label:string option; - uuid: string; - _ref:string + name_label:string option; + uuid: string; + _ref:string } let string_of (x: indexrec) = Printf.sprintf "%s%s" x.uuid (Opt.default "" (Opt.map (fun name -> Printf.sprintf " (%s)" name) x.name_label)) let lookup key = - let t = Db_backend.make () in - let db = Db_ref.get_database t in - let r (tblname, objref) = - let row = Table.find objref (TableSet.find tblname (Database.tableset db)) in { - name_label = (try Some (Schema.Value.Unsafe_cast.string (Row.find Db_names.name_label row)) with _ -> None); - uuid = Schema.Value.Unsafe_cast.string (Row.find Db_names.uuid row); - _ref = Schema.Value.Unsafe_cast.string (Row.find Db_names.ref row); - } in - Opt.map r (Database.lookup_key key db) + let t = Db_backend.make () in + let db = Db_ref.get_database t in + let r (tblname, objref) = + let row = Table.find objref (TableSet.find tblname (Database.tableset db)) in { + name_label = (try Some (Schema.Value.Unsafe_cast.string (Row.find Db_names.name_label row)) with _ -> None); + uuid = Schema.Value.Unsafe_cast.string (Row.find Db_names.uuid row); + _ref = Schema.Value.Unsafe_cast.string (Row.find Db_names.ref row); + } in + Opt.map r (Database.lookup_key key db) diff --git a/ocaml/database/ref_index.mli b/ocaml/database/ref_index.mli index 37dd07b01c7..16cd44cf3ff 100644 --- a/ocaml/database/ref_index.mli +++ b/ocaml/database/ref_index.mli @@ -12,9 +12,9 @@ * GNU Lesser General Public License for more details. *) type indexrec = { - name_label: string option; - uuid: string; - _ref: string + name_label: string option; + uuid: string; + _ref: string } val string_of : indexrec -> string val lookup : string (* ref or uuid *) -> indexrec option diff --git a/ocaml/database/schema.ml b/ocaml/database/schema.ml index d0b32b6aa0c..ca8d613299b 100644 --- a/ocaml/database/schema.ml +++ b/ocaml/database/schema.ml @@ -14,87 +14,87 @@ open Sexplib.Std module Type = struct - type t = - | String - | Set (* of strings *) - | Pairs (* of string * string *) - with sexp - - exception Error of t * t - let _ = Printexc.register_printer (function - | Error (expected, actual) -> - Some (Printf.sprintf "Schema.Type.Error: expected %s; received %s" - (Sexplib.Sexp.to_string_hum (sexp_of_t expected)) - (Sexplib.Sexp.to_string_hum (sexp_of_t actual))) - | _ -> None - ) + type t = + | String + | Set (* of strings *) + | Pairs (* of string * string *) + with sexp + + exception Error of t * t + let _ = Printexc.register_printer (function + | Error (expected, actual) -> + Some (Printf.sprintf "Schema.Type.Error: expected %s; received %s" + (Sexplib.Sexp.to_string_hum (sexp_of_t expected)) + (Sexplib.Sexp.to_string_hum (sexp_of_t actual))) + | _ -> None + ) end module Value = struct - type t = - | String of string - | Set of string list - | Pairs of (string * string) list - with sexp - - let marshal = function - | String x -> x - | Set xs -> String_marshall_helper.set (fun x -> x) xs - | Pairs xs -> String_marshall_helper.map (fun x -> x) (fun x -> x) xs - - let unmarshal ty x = match ty with - | Type.String -> String x - | Type.Set -> Set (String_unmarshall_helper.set (fun x -> x) x) - | Type.Pairs -> Pairs (String_unmarshall_helper.map (fun x -> x) (fun x -> x) x) - - module Unsafe_cast = struct - let string = function - | String x -> x - | Set _ -> raise (Type.Error(Type.String, Type.Set)) - | Pairs _ -> raise (Type.Error(Type.String, Type.Pairs)) - - let set = function - | Set xs -> xs - | String _ -> raise (Type.Error(Type.Set, Type.String)) - | Pairs _ -> raise (Type.Error(Type.Set, Type.Pairs)) - - let pairs = function - | Pairs x -> x - | String _ -> raise (Type.Error(Type.Pairs, Type.String)) - | Set _ -> raise (Type.Error(Type.Pairs, Type.Set)) - end + type t = + | String of string + | Set of string list + | Pairs of (string * string) list + with sexp + + let marshal = function + | String x -> x + | Set xs -> String_marshall_helper.set (fun x -> x) xs + | Pairs xs -> String_marshall_helper.map (fun x -> x) (fun x -> x) xs + + let unmarshal ty x = match ty with + | Type.String -> String x + | Type.Set -> Set (String_unmarshall_helper.set (fun x -> x) x) + | Type.Pairs -> Pairs (String_unmarshall_helper.map (fun x -> x) (fun x -> x) x) + + module Unsafe_cast = struct + let string = function + | String x -> x + | Set _ -> raise (Type.Error(Type.String, Type.Set)) + | Pairs _ -> raise (Type.Error(Type.String, Type.Pairs)) + + let set = function + | Set xs -> xs + | String _ -> raise (Type.Error(Type.Set, Type.String)) + | Pairs _ -> raise (Type.Error(Type.Set, Type.Pairs)) + + let pairs = function + | Pairs x -> x + | String _ -> raise (Type.Error(Type.Pairs, Type.String)) + | Set _ -> raise (Type.Error(Type.Pairs, Type.Set)) + end end module Column = struct - type t = { - name: string; - persistent: bool; (** see is_field_persistent *) - empty: Value.t; (** fresh value used when loading non-persistent fields *) - default: Value.t option; (** if column is missing, this is default value is used *) - ty: Type.t; (** the type of the value in the column *) - issetref: bool; (** only so we can special case set refs in the interface *) - } with sexp + type t = { + name: string; + persistent: bool; (** see is_field_persistent *) + empty: Value.t; (** fresh value used when loading non-persistent fields *) + default: Value.t option; (** if column is missing, this is default value is used *) + ty: Type.t; (** the type of the value in the column *) + issetref: bool; (** only so we can special case set refs in the interface *) + } with sexp end module Table = struct - type t = { - name: string; - columns: Column.t list; - persistent: bool; - } with sexp - let find name t = List.find (fun col -> col.Column.name = name) t.columns + type t = { + name: string; + columns: Column.t list; + persistent: bool; + } with sexp + let find name t = List.find (fun col -> col.Column.name = name) t.columns end -type relationship = - | OneToMany of string * string * string * string - with sexp +type relationship = + | OneToMany of string * string * string * string +with sexp module Database = struct - type t = { - tables: Table.t list; - } with sexp + type t = { + tables: Table.t list; + } with sexp - let find name t = List.find (fun tbl -> tbl.Table.name = name) t.tables + let find name t = List.find (fun tbl -> tbl.Table.name = name) t.tables end (** indexed by table name, a list of (this field, foreign table, foreign field) *) @@ -102,72 +102,72 @@ type foreign = (string * string * string) list with sexp module ForeignMap = struct - include Map.Make(struct - type t = string - let compare = Pervasives.compare - end) - - type t' = (string * foreign) list - with sexp - - type m = foreign t - let sexp_of_m t : Sexplib.Sexp.t = - let t' = fold (fun key foreign acc -> (key, foreign) :: acc) t [] in - sexp_of_t' t' - - let m_of_sexp sexp : m = - let t' = t'_of_sexp sexp in - List.fold_left (fun acc (key, foreign) -> add key foreign acc) empty t' + include Map.Make(struct + type t = string + let compare = Pervasives.compare + end) + + type t' = (string * foreign) list + with sexp + + type m = foreign t + let sexp_of_m t : Sexplib.Sexp.t = + let t' = fold (fun key foreign acc -> (key, foreign) :: acc) t [] in + sexp_of_t' t' + + let m_of_sexp sexp : m = + let t' = t'_of_sexp sexp in + List.fold_left (fun acc (key, foreign) -> add key foreign acc) empty t' end type t = { - major_vsn: int; - minor_vsn: int; - database: Database.t; - (** indexed by table name, a list of (this field, foreign table, foreign field) *) - one_to_many: ForeignMap.m; - many_to_many: ForeignMap.m; + major_vsn: int; + minor_vsn: int; + database: Database.t; + (** indexed by table name, a list of (this field, foreign table, foreign field) *) + one_to_many: ForeignMap.m; + many_to_many: ForeignMap.m; } with sexp let database x = x.database -let table tblname x = - try - Database.find tblname (database x) - with Not_found as e -> - Printf.printf "Failed to find table: %s\n%!" tblname; - raise e +let table tblname x = + try + Database.find tblname (database x) + with Not_found as e -> + Printf.printf "Failed to find table: %s\n%!" tblname; + raise e let empty = { - major_vsn = 0; - minor_vsn = 0; - database = { Database.tables = [] }; - one_to_many = ForeignMap.empty; - many_to_many = ForeignMap.empty; + major_vsn = 0; + minor_vsn = 0; + database = { Database.tables = [] }; + one_to_many = ForeignMap.empty; + many_to_many = ForeignMap.empty; } -let is_table_persistent schema tblname = - (table tblname schema).Table.persistent +let is_table_persistent schema tblname = + (table tblname schema).Table.persistent -let is_field_persistent schema tblname fldname = - let tbl = table tblname schema in - let col = Table.find fldname tbl in - tbl.Table.persistent && col.Column.persistent +let is_field_persistent schema tblname fldname = + let tbl = table tblname schema in + let col = Table.find fldname tbl in + tbl.Table.persistent && col.Column.persistent -let table_names schema = - List.map (fun t -> t.Table.name) (database schema).Database.tables +let table_names schema = + List.map (fun t -> t.Table.name) (database schema).Database.tables module D=Debug.Make(struct let name="xapi" end) open D -let one_to_many tblname schema = - (* If there is no entry in the map it means that the table has no one-to-many relationships *) - try - ForeignMap.find tblname schema.one_to_many - with Not_found -> [] - -let many_to_many tblname schema = - (* If there is no entry in the map it means that the table has no many-to-many relationships *) - try - ForeignMap.find tblname schema.many_to_many - with Not_found -> [] +let one_to_many tblname schema = + (* If there is no entry in the map it means that the table has no one-to-many relationships *) + try + ForeignMap.find tblname schema.one_to_many + with Not_found -> [] + +let many_to_many tblname schema = + (* If there is no entry in the map it means that the table has no many-to-many relationships *) + try + ForeignMap.find tblname schema.many_to_many + with Not_found -> [] diff --git a/ocaml/database/string_marshall_helper.ml b/ocaml/database/string_marshall_helper.ml index 99dd772a81e..42392badb5a 100644 --- a/ocaml/database/string_marshall_helper.ml +++ b/ocaml/database/string_marshall_helper.ml @@ -19,7 +19,7 @@ open D (* We need to ensure that there are no nasty characters in the set/map marshalling below. The autogenerated db_actions.ml file, calls these directly in a few scenarios, including when someone does a Db.*_set of a structure field (i.e. set or map). The bug that triggered - this realisation was: CA-22302. *) + this realisation was: CA-22302. *) let ensure_utf8_xml string = let length = String.length string in @@ -32,7 +32,7 @@ let set f ks = SExpr.string_of (SExpr.Node (List.map (fun x -> SExpr.String (ensure_utf8_xml (f x))) ks)) -let map f g kv = +let map f g kv = SExpr.string_of - (SExpr.Node (List.map (fun (k, v) -> - SExpr.Node [ SExpr.String (ensure_utf8_xml (f k)); SExpr.String (ensure_utf8_xml (g v)) ]) kv)) + (SExpr.Node (List.map (fun (k, v) -> + SExpr.Node [ SExpr.String (ensure_utf8_xml (f k)); SExpr.String (ensure_utf8_xml (g v)) ]) kv)) diff --git a/ocaml/database/string_unmarshall_helper.ml b/ocaml/database/string_unmarshall_helper.ml index d90e0846086..40e9317d98a 100644 --- a/ocaml/database/string_unmarshall_helper.ml +++ b/ocaml/database/string_unmarshall_helper.ml @@ -15,18 +15,18 @@ exception Failure of string -let set f (m: string) = +let set f (m: string) = match SExpr_TS.of_string m with | SExpr.Node xs -> - List.map (function SExpr.String x -> f x - | _ -> raise (Failure m)) xs + List.map (function SExpr.String x -> f x + | _ -> raise (Failure m)) xs | _ -> raise (Failure m) -let map f g (m: string) = +let map f g (m: string) = match SExpr_TS.of_string m with | SExpr.Node xs -> - List.map (function SExpr.Node [ SExpr.String k; SExpr.String v ] -> f k, g v - | _ -> raise (Failure m)) xs + List.map (function SExpr.Node [ SExpr.String k; SExpr.String v ] -> f k, g v + | _ -> raise (Failure m)) xs | _ -> raise (Failure m) diff --git a/ocaml/database/test_schemas.ml b/ocaml/database/test_schemas.ml index 90452e57a0c..38509aab50a 100644 --- a/ocaml/database/test_schemas.ml +++ b/ocaml/database/test_schemas.ml @@ -1,121 +1,121 @@ -let schema = - let _ref = { - Schema.Column.name = Db_names.ref; - persistent = true; - empty = Schema.Value.String ""; - default = None; - ty = Schema.Type.String; - issetref = false; - } in - let uuid = { - Schema.Column.name = Db_names.uuid; - persistent = true; - empty = Schema.Value.String ""; - default = None; - ty = Schema.Type.String; - issetref = false; - } in - let name_label = { - Schema.Column.name = Db_names.name_label; - persistent = true; - empty = Schema.Value.String ""; - default = None; - ty = Schema.Type.String; - issetref = false; - } in - let name_description = { - Schema.Column.name = "name__description"; - persistent = true; - empty = Schema.Value.String ""; - default = None; - ty = Schema.Type.String; - issetref = false; - } in - let vbds = { - Schema.Column.name = "VBDs"; - persistent = false; - empty = Schema.Value.Set []; - default = Some(Schema.Value.Set []); - ty = Schema.Type.Set; - issetref = true; - } in - let other_config = { - Schema.Column.name = "other_config"; - persistent = false; - empty = Schema.Value.Pairs []; - default = Some(Schema.Value.Pairs []); - ty = Schema.Type.Pairs; - issetref = false; - } in - let pp = { - Schema.Column.name = "protection_policy"; - persistent = true; - empty = Schema.Value.String ""; - default = Some(Schema.Value.String "OpaqueRef:NULL"); - ty = Schema.Type.String; - issetref = false; - } in - let tags = { - Schema.Column.name = "tags"; - persistent = true; - empty = Schema.Value.Set []; - default = Some(Schema.Value.Set []); - ty = Schema.Type.Set; - issetref = false; - } in - let vm = { - Schema.Column.name = "VM"; - persistent = true; - empty = Schema.Value.String ""; - default = None; - ty = Schema.Type.String; - issetref = false; - } in +let schema = + let _ref = { + Schema.Column.name = Db_names.ref; + persistent = true; + empty = Schema.Value.String ""; + default = None; + ty = Schema.Type.String; + issetref = false; + } in + let uuid = { + Schema.Column.name = Db_names.uuid; + persistent = true; + empty = Schema.Value.String ""; + default = None; + ty = Schema.Type.String; + issetref = false; + } in + let name_label = { + Schema.Column.name = Db_names.name_label; + persistent = true; + empty = Schema.Value.String ""; + default = None; + ty = Schema.Type.String; + issetref = false; + } in + let name_description = { + Schema.Column.name = "name__description"; + persistent = true; + empty = Schema.Value.String ""; + default = None; + ty = Schema.Type.String; + issetref = false; + } in + let vbds = { + Schema.Column.name = "VBDs"; + persistent = false; + empty = Schema.Value.Set []; + default = Some(Schema.Value.Set []); + ty = Schema.Type.Set; + issetref = true; + } in + let other_config = { + Schema.Column.name = "other_config"; + persistent = false; + empty = Schema.Value.Pairs []; + default = Some(Schema.Value.Pairs []); + ty = Schema.Type.Pairs; + issetref = false; + } in + let pp = { + Schema.Column.name = "protection_policy"; + persistent = true; + empty = Schema.Value.String ""; + default = Some(Schema.Value.String "OpaqueRef:NULL"); + ty = Schema.Type.String; + issetref = false; + } in + let tags = { + Schema.Column.name = "tags"; + persistent = true; + empty = Schema.Value.Set []; + default = Some(Schema.Value.Set []); + ty = Schema.Type.Set; + issetref = false; + } in + let vm = { + Schema.Column.name = "VM"; + persistent = true; + empty = Schema.Value.String ""; + default = None; + ty = Schema.Type.String; + issetref = false; + } in - let vm_table = { - Schema.Table.name = "VM"; - columns = [ _ref; uuid; name_label; vbds; pp; name_description; tags; other_config ]; - persistent = true; - } in - let vbd_table = { - Schema.Table.name = "VBD"; - columns = [ _ref; uuid; vm ]; - persistent = true; - } in - let database = { - Schema.Database.tables = [ vm_table; vbd_table ]; - } in - let one_to_many = Schema.ForeignMap.add "VBD" [ "VM", "VM", "VBDs" ] (Schema.ForeignMap.empty) in - { - - Schema.major_vsn = 1; - minor_vsn = 1; - database = database; - (** indexed by table name, a list of (this field, foreign table, foreign field) *) - one_to_many = one_to_many; - many_to_many = Schema.ForeignMap.empty; - } + let vm_table = { + Schema.Table.name = "VM"; + columns = [ _ref; uuid; name_label; vbds; pp; name_description; tags; other_config ]; + persistent = true; + } in + let vbd_table = { + Schema.Table.name = "VBD"; + columns = [ _ref; uuid; vm ]; + persistent = true; + } in + let database = { + Schema.Database.tables = [ vm_table; vbd_table ]; + } in + let one_to_many = Schema.ForeignMap.add "VBD" [ "VM", "VM", "VBDs" ] (Schema.ForeignMap.empty) in + { + + Schema.major_vsn = 1; + minor_vsn = 1; + database = database; + (** indexed by table name, a list of (this field, foreign table, foreign field) *) + one_to_many = one_to_many; + many_to_many = Schema.ForeignMap.empty; + } let many_to_many = - let bar_column = { Schema.Column.name = "bars"; - persistent = false; - empty = Schema.Value.Pairs []; - default = None; - ty = Schema.Type.Pairs; - issetref = false; - } in - let foo_column = { bar_column with Schema.Column.name = "foos" } in - let foo_table = { Schema.Table.name = "foo"; columns = [ bar_column ]; persistent = true } in - let bar_table = { Schema.Table.name = "bar"; columns = [ foo_column ]; persistent = true } in - - let database = { Schema.Database.tables = [ foo_table; bar_table ] } in - let many_to_many = - Schema.ForeignMap.add "foo" [ "bars", "bar", "foos" ] - (Schema.ForeignMap.add "bar" [ "foos", "foo", "bars" ] - Schema.ForeignMap.empty) in - let schema = { Schema.empty with - Schema.database = database; - many_to_many = many_to_many - } in - schema + let bar_column = { Schema.Column.name = "bars"; + persistent = false; + empty = Schema.Value.Pairs []; + default = None; + ty = Schema.Type.Pairs; + issetref = false; + } in + let foo_column = { bar_column with Schema.Column.name = "foos" } in + let foo_table = { Schema.Table.name = "foo"; columns = [ bar_column ]; persistent = true } in + let bar_table = { Schema.Table.name = "bar"; columns = [ foo_column ]; persistent = true } in + + let database = { Schema.Database.tables = [ foo_table; bar_table ] } in + let many_to_many = + Schema.ForeignMap.add "foo" [ "bars", "bar", "foos" ] + (Schema.ForeignMap.add "bar" [ "foos", "foo", "bars" ] + Schema.ForeignMap.empty) in + let schema = { Schema.empty with + Schema.database = database; + many_to_many = many_to_many + } in + schema diff --git a/ocaml/database/unit_test_marshall.ml b/ocaml/database/unit_test_marshall.ml index c04733beea4..9c4f1541f7f 100644 --- a/ocaml/database/unit_test_marshall.ml +++ b/ocaml/database/unit_test_marshall.ml @@ -28,11 +28,11 @@ let gen_random_string() = let rec fillstr l = if l=len then () else begin - String.set string l (ranchar()); - fillstr (l+1) + String.set string l (ranchar()); + fillstr (l+1) end in - fillstr 0; - string + fillstr 0; + string let gen_random_string_option() = if (Random.int 2)=0 then @@ -44,12 +44,12 @@ let gen_random_bool() = (Random.int 2)=0 let gen_random_list f = - let len = Random.int 50 in - let rec makesl x = - match x with - 0 -> [] - | n -> (f())::(makesl (n-1)) in - makesl len + let len = Random.int 50 in + let rec makesl x = + match x with + 0 -> [] + | n -> (f())::(makesl (n-1)) in + makesl len let gen_random_where () = {table=gen_random_string(); @@ -60,11 +60,11 @@ let gen_random_where () = exception RandomRangeError let gen_random_structured_op () = match (Random.int 4) with - 0 -> AddSet - | 1 -> RemoveSet - | 2 -> AddMap - | 3 -> RemoveMap - | _ -> raise RandomRangeError (* should never be thrown *) + 0 -> AddSet + | 1 -> RemoveSet + | 2 -> AddMap + | 3 -> RemoveMap + | _ -> raise RandomRangeError (* should never be thrown *) let gen_random_2string() = (gen_random_string(), gen_random_string()) @@ -79,12 +79,12 @@ let gen_random_4string() = (* test marshall unmarshall is id *) let tm u m x = let s = m x in - print_string (Xml.to_string_fmt s); - print_string "\n\n"; - (u s)=x + print_string (Xml.to_string_fmt s); + print_string "\n\n"; + (u s)=x let test_gtfr_args() = - tm + tm unmarshall_get_table_from_ref_args marshall_get_table_from_ref_args (gen_random_string()) @@ -208,8 +208,8 @@ let test_readrec_response() = let test_exp = And(Eq(Field "asd", Literal "qwe"), Or(Eq(Field "asd", Literal "qwe"), - And(Eq(Field "asd", Literal "qwe"), - Not False))) + And(Eq(Field "asd", Literal "qwe"), + Not False))) let test_find_refs_with_filter_args() = tm diff --git a/ocaml/database/xml_spaces.ml b/ocaml/database/xml_spaces.ml index b2ad569bdcd..bc03514082d 100644 --- a/ocaml/database/xml_spaces.ml +++ b/ocaml/database/xml_spaces.ml @@ -15,73 +15,73 @@ let protect_char = '%' type change = - | No_change - | Replace of char * char (* replace 2 chars by 2 other chars *) - | Compress of char (* replace 2 chars by 1 char *) - | Expand of char * char (* replace the first char of 2 chars by 2 chars *) + | No_change + | Replace of char * char (* replace 2 chars by 2 other chars *) + | Compress of char (* replace 2 chars by 1 char *) + | Expand of char * char (* replace the first char of 2 chars by 2 chars *) (* remark: every real operation makes the map2 function to create a new buffer (even the for the replace operation). *) (* Indeed, we want to keep the string immutable in xapi, in order to avoid nasty string modifications. *) let map2_unlikely f s = - let changed = ref false in - let m = ref 0 in - let i = ref 0 in - let buf = Buffer.create 0 in - let length_s = String.length s in - let aux c d = - match f (c, d) with - | No_change -> () - | Replace (c,d) -> - changed := true; - Buffer.add_substring buf s !m (!i - !m); - Buffer.add_char buf c; - Buffer.add_char buf d; - incr i; - m := !i + 1 - | Compress char -> - changed := true; - Buffer.add_substring buf s !m (!i - !m); - Buffer.add_char buf char; - incr i; - m := !i + 1 - | Expand (c,d) -> - changed := true; - Buffer.add_substring buf s !m (!i - !m); - Buffer.add_char buf c; - Buffer.add_char buf d; - m := !i + 1 - in - (* main loop *) - while !i <= length_s - 2 do - aux s.[!i] (Some s.[!i+1]); - incr i; - done; - (* process the last character *) - if !i = length_s - 1 - then aux s.[!i] None; - if !changed then begin - Buffer.add_substring buf s !m (String.length s - !m); - Buffer.contents buf - end else - s + let changed = ref false in + let m = ref 0 in + let i = ref 0 in + let buf = Buffer.create 0 in + let length_s = String.length s in + let aux c d = + match f (c, d) with + | No_change -> () + | Replace (c,d) -> + changed := true; + Buffer.add_substring buf s !m (!i - !m); + Buffer.add_char buf c; + Buffer.add_char buf d; + incr i; + m := !i + 1 + | Compress char -> + changed := true; + Buffer.add_substring buf s !m (!i - !m); + Buffer.add_char buf char; + incr i; + m := !i + 1 + | Expand (c,d) -> + changed := true; + Buffer.add_substring buf s !m (!i - !m); + Buffer.add_char buf c; + Buffer.add_char buf d; + m := !i + 1 + in + (* main loop *) + while !i <= length_s - 2 do + aux s.[!i] (Some s.[!i+1]); + incr i; + done; + (* process the last character *) + if !i = length_s - 1 + then aux s.[!i] None; + if !changed then begin + Buffer.add_substring buf s !m (String.length s - !m); + Buffer.contents buf + end else + s let protect_fn = function - | ' ', Some ' ' -> Replace (protect_char, '_' ) - | '\t', _ -> Expand (protect_char, 't' ) - | '\n', _ -> Expand (protect_char, 'n' ) - | '\r', _ -> Expand (protect_char, 'r' ) - | c, _ when c = protect_char -> - Expand (protect_char, protect_char) - | _ , _ -> No_change + | ' ', Some ' ' -> Replace (protect_char, '_' ) + | '\t', _ -> Expand (protect_char, 't' ) + | '\n', _ -> Expand (protect_char, 'n' ) + | '\r', _ -> Expand (protect_char, 'r' ) + | c, _ when c = protect_char -> + Expand (protect_char, protect_char) + | _ , _ -> No_change let unprotect_fn = function - | c, Some '_' when c=protect_char -> Replace (' ', ' ') - | c, Some 't' when c=protect_char -> Compress '\t' - | c, Some 'n' when c=protect_char -> Compress '\n' - | c, Some 'r' when c=protect_char -> Compress '\r' - | c, Some d when c=protect_char && d=protect_char -> - Compress protect_char - | _ , _ -> No_change + | c, Some '_' when c=protect_char -> Replace (' ', ' ') + | c, Some 't' when c=protect_char -> Compress '\t' + | c, Some 'n' when c=protect_char -> Compress '\n' + | c, Some 'r' when c=protect_char -> Compress '\r' + | c, Some d when c=protect_char && d=protect_char -> + Compress protect_char + | _ , _ -> No_change let protect = map2_unlikely protect_fn -let unprotect = map2_unlikely unprotect_fn +let unprotect = map2_unlikely unprotect_fn diff --git a/ocaml/db_process/xapi-db-upgrade-4.2.ml b/ocaml/db_process/xapi-db-upgrade-4.2.ml index e64a28f6213..c28cd2d3452 100644 --- a/ocaml/db_process/xapi-db-upgrade-4.2.ml +++ b/ocaml/db_process/xapi-db-upgrade-4.2.ml @@ -11,7 +11,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(* +(* * Helpers for upgrading from a normal XML database to a protected XML database, where multiple spaces, '\t', '\n', '\r' and '\' * are protected inside attributes. *) @@ -19,33 +19,33 @@ (* We assume here that the upgrade done by host-installer creates a XML file where only double quotes are used around attributes. *) (* This is normally done internally by the miami version of xapi-db-process, so it should be OK. *) let perform_inside_quotes_fn f inside_quotes = function - | ('"', _) -> inside_quotes := not (!inside_quotes); - Xml_spaces.No_change - | c when !inside_quotes -> f c - | _ -> Xml_spaces.No_change + | ('"', _) -> inside_quotes := not (!inside_quotes); + Xml_spaces.No_change + | c when !inside_quotes -> f c + | _ -> Xml_spaces.No_change let (_:unit) = - (* By default, we upgrade the database, ie. we protect multiple spaces and so on. *) - (* However, in case something went wrong, we can downgrade the database using '--downgrade' argument *) - let fn_to_apply = - let inside_quotes = ref false in - if Array.length Sys.argv = 2 && Sys.argv.(1) = "--downgrade" - then begin - Printf.printf "Warning: unprotecting characters of the XML database\n%!"; - perform_inside_quotes_fn Xml_spaces.unprotect_fn inside_quotes; - end else - perform_inside_quotes_fn Xml_spaces.protect_fn inside_quotes - in - let paths = Parse_db_conf.parse_db_conf !Xapi_globs.db_conf_path in - List.iter - (fun dbconn -> - let path = dbconn.Parse_db_conf.path in - (* first, fit the database into memory *) - let state_db = Unixext.string_of_file path in - (* second, save the original database in a backup file, using a timestamp *) - Unixext.write_string_to_file (path ^ ".prev_version." ^ (string_of_float (Unix.gettimeofday()))) state_db; - (* finally, transform the database and replace the database file by a new one *) - let result = Xml_spaces.map2_unlikely fn_to_apply state_db in - (* Remark: Unixext.write_string_to_file is atomic *) - Unixext.write_string_to_file path result) - paths + (* By default, we upgrade the database, ie. we protect multiple spaces and so on. *) + (* However, in case something went wrong, we can downgrade the database using '--downgrade' argument *) + let fn_to_apply = + let inside_quotes = ref false in + if Array.length Sys.argv = 2 && Sys.argv.(1) = "--downgrade" + then begin + Printf.printf "Warning: unprotecting characters of the XML database\n%!"; + perform_inside_quotes_fn Xml_spaces.unprotect_fn inside_quotes; + end else + perform_inside_quotes_fn Xml_spaces.protect_fn inside_quotes + in + let paths = Parse_db_conf.parse_db_conf !Xapi_globs.db_conf_path in + List.iter + (fun dbconn -> + let path = dbconn.Parse_db_conf.path in + (* first, fit the database into memory *) + let state_db = Unixext.string_of_file path in + (* second, save the original database in a backup file, using a timestamp *) + Unixext.write_string_to_file (path ^ ".prev_version." ^ (string_of_float (Unix.gettimeofday()))) state_db; + (* finally, transform the database and replace the database file by a new one *) + let result = Xml_spaces.map2_unlikely fn_to_apply state_db in + (* Remark: Unixext.write_string_to_file is atomic *) + Unixext.write_string_to_file path result) + paths diff --git a/ocaml/db_process/xapi_db_process.ml b/ocaml/db_process/xapi_db_process.ml index 8015238aa81..05d77d90100 100644 --- a/ocaml/db_process/xapi_db_process.ml +++ b/ocaml/db_process/xapi_db_process.ml @@ -29,9 +29,9 @@ let fatal_error s = print_string s; print_string "\n"; exit 1 - + type operation = Write_database | Read_gencount | Read_hostiqn | Write_hostiqn - | Am_i_in_the_database | Unknown of string + | Am_i_in_the_database | Unknown of string let parse_operation s = match (String.lowercase s) with | "write_db" -> Write_database @@ -42,8 +42,8 @@ let parse_operation s = | s -> (Unknown s) let initialise_db_connections() = - let dbs = Parse_db_conf.parse_db_conf - (if !config="" then !Xapi_globs.db_conf_path else !config) in + let dbs = Parse_db_conf.parse_db_conf + (if !config="" then !Xapi_globs.db_conf_path else !config) in Db_conn_store.initialise_db_connections dbs; dbs @@ -53,19 +53,19 @@ let read_in_database() = Db_cache_impl.make (Db_backend.make ()) connections Schema.empty let write_out_databases() = - Db_cache_impl.sync (Db_conn_store.read_db_connections ()) (Db_ref.get_database (Db_backend.make ())) + Db_cache_impl.sync (Db_conn_store.read_db_connections ()) (Db_ref.get_database (Db_backend.make ())) (* should never be thrown due to checking argument at start *) exception UnknownFormat - + let write_out_database filename = print_string ("Dumping database to: "^filename^"\n"); Db_cache_impl.sync [ { - Parse_db_conf.dummy_conf with - Parse_db_conf.path=filename; - Parse_db_conf.mode=Parse_db_conf.No_limit; - Parse_db_conf.compress=(!compress) + Parse_db_conf.dummy_conf with + Parse_db_conf.path=filename; + Parse_db_conf.mode=Parse_db_conf.No_limit; + Parse_db_conf.compress=(!compress) } ] (Db_ref.get_database (Db_backend.make ())) let help_pad = " " @@ -88,11 +88,11 @@ let do_write_database() = begin read_in_database(); if !xmltostdout then - Db_xml.To.fd (Unix.descr_of_out_channel stdout) (Db_ref.get_database (Db_backend.make())) + Db_xml.To.fd (Unix.descr_of_out_channel stdout) (Db_ref.get_database (Db_backend.make())) else write_out_database !filename end - + let find_my_host_row() = Xapi_inventory.read_inventory (); let localhost_uuid = Xapi_inventory.lookup Xapi_inventory._installation_uuid in @@ -106,10 +106,10 @@ let _other_config = "other_config" let do_read_hostiqn() = read_in_database(); match find_my_host_row() with - | None -> failwith "No row for localhost" - | Some (_, row) -> - let other_config = Schema.Value.Unsafe_cast.pairs (Row.find Db_names.other_config row) in - Printf.printf "%s" (List.assoc _iscsi_iqn other_config) + | None -> failwith "No row for localhost" + | Some (_, row) -> + let other_config = Schema.Value.Unsafe_cast.pairs (Row.find Db_names.other_config row) in + Printf.printf "%s" (List.assoc _iscsi_iqn other_config) let do_write_hostiqn() = if !iqn = "" then @@ -117,35 +117,35 @@ let do_write_hostiqn() = let new_iqn = !iqn in read_in_database(); match find_my_host_row() with - | None -> failwith "No row for localhost" - | Some (r, row) -> - (* read other_config from my row, replace host_iqn if already there, add it if its not there and write back *) - let other_config = Schema.Value.Unsafe_cast.pairs (Row.find Db_names.other_config row) in - let other_config = - if List.mem_assoc _iscsi_iqn other_config then - (* replace if key already exists *) - List.map (fun (k,v) ->k, if k=_iscsi_iqn then new_iqn else v) other_config - else - (* ... otherwise add new key/value pair *) - (_iscsi_iqn,new_iqn)::other_config in - let other_config = Schema.Value.Pairs other_config in - Db_ref.update_database (Db_backend.make ()) (set_field Db_names.host r Db_names.other_config other_config); - write_out_databases() - -let do_am_i_in_the_database () = - read_in_database(); - Printf.printf "%b" (find_my_host_row () <> None) + | None -> failwith "No row for localhost" + | Some (r, row) -> + (* read other_config from my row, replace host_iqn if already there, add it if its not there and write back *) + let other_config = Schema.Value.Unsafe_cast.pairs (Row.find Db_names.other_config row) in + let other_config = + if List.mem_assoc _iscsi_iqn other_config then + (* replace if key already exists *) + List.map (fun (k,v) ->k, if k=_iscsi_iqn then new_iqn else v) other_config + else + (* ... otherwise add new key/value pair *) + (_iscsi_iqn,new_iqn)::other_config in + let other_config = Schema.Value.Pairs other_config in + Db_ref.update_database (Db_backend.make ()) (set_field Db_names.host r Db_names.other_config other_config); + write_out_databases() + +let do_am_i_in_the_database () = + read_in_database(); + Printf.printf "%b" (find_my_host_row () <> None) let _ = init_logs(); Arg.parse ([ - "-compress", Arg.Set compress, "whether to compress the XML output"; - "-config", Arg.Set_string config, "config file to read"; - "-filename", Arg.Set_string filename, "filename to write to"; - "-xmltostdout", Arg.Set xmltostdout, "write XML db to stdout [compress/filename ignored if this option is present]"; - "-operation", Arg.Set_string operation, "operation to perform:\n"^operation_list^"\n"^help_pad^"(defaults to write_db if no operation specified)"; - "-hostiqn", Arg.Set_string iqn, "hostiqn value" - ]) + "-compress", Arg.Set compress, "whether to compress the XML output"; + "-config", Arg.Set_string config, "config file to read"; + "-filename", Arg.Set_string filename, "filename to write to"; + "-xmltostdout", Arg.Set xmltostdout, "write XML db to stdout [compress/filename ignored if this option is present]"; + "-operation", Arg.Set_string operation, "operation to perform:\n"^operation_list^"\n"^help_pad^"(defaults to write_db if no operation specified)"; + "-hostiqn", Arg.Set_string iqn, "hostiqn value" + ]) (fun x -> print_string ("Warning, ignoring unknown argument: "^x)) "XE database tool"; if !operation = "" then @@ -153,12 +153,12 @@ let _ = info "xapi-db-process executed: operation='%s'" !operation; match parse_operation !operation with | Write_database -> - do_write_database() + do_write_database() | Read_hostiqn -> - do_read_hostiqn() + do_read_hostiqn() | Write_hostiqn -> - do_write_hostiqn() + do_write_hostiqn() | Am_i_in_the_database -> - do_am_i_in_the_database() + do_am_i_in_the_database() | _ -> - error "unknown operation %s" !operation + error "unknown operation %s" !operation diff --git a/ocaml/doc/jsapi.ml b/ocaml/doc/jsapi.ml index 8fd6fbb0be6..8236fe1b0dc 100644 --- a/ocaml/doc/jsapi.ml +++ b/ocaml/doc/jsapi.ml @@ -11,7 +11,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) - + open Datamodel_types type change_t = lifecycle_change * string * string @@ -19,57 +19,57 @@ and changes_t = change_t list with rpc let _ = - let api = (Datamodel.all_api) in - let objs = Dm_api.objects_of_api api in - let create_json obj = - let name = obj.name in - let s = Jsonrpc.to_string (rpc_of_obj obj) in - Stdext.Unixext.write_string_to_file ("api/" ^ name ^ ".json") ("clsdoc = " ^ s); - name - in - let names = List.map create_json objs in - let class_list = String.concat ", " (List.map (fun s -> "'" ^ s ^ "'") names) in - Stdext.Unixext.write_string_to_file "api/index.json" ("classes = [" ^ class_list ^ "]"); - - let changes_in_release rel = - let search_obj obj = - let changes = List.filter (fun (transition, release, doc) -> release = rel) obj.obj_lifecycle in - let obj_changes : changes_t = - List.map (fun (transition, release, doc) -> - (transition, obj.name, if doc = "" && transition = Published then obj.description else doc) - ) changes in - - let changes_for_msg m = - let changes = List.filter (fun (transition, release, doc) -> release = rel) m.msg_lifecycle in - List.map (fun (transition, release, doc) -> - (transition, m.msg_name, if doc = "" && transition = Published then m.msg_doc else doc) - ) changes - in - let msgs = List.filter (fun m -> not m.msg_hide_from_docs) obj.messages in - let msg_changes : changes_t = List.fold_left (fun l m -> l @ (changes_for_msg m)) [] msgs in - - let changes_for_field f = - let changes = List.filter (fun (transition, release, doc) -> release = rel) f.lifecycle in - let field_name = String.concat "_" f.full_name in - List.map (fun (transition, release, doc) -> - (transition, field_name, if doc = "" && transition = Published then f.field_description else doc) - ) changes - in - let rec flatten_contents contents = - List.fold_left (fun l -> function - | Field f -> f :: l - | Namespace (name, contents) -> flatten_contents contents @ l - ) [] contents - in - let fields = flatten_contents obj.contents in - let fields = List.filter (fun f -> not f.internal_only) fields in - let field_changes : changes_t = List.fold_left (fun l f -> l @ (changes_for_field f)) [] fields in - - "{'cls': '" ^ obj.name ^ "', 'obj_changes': " ^ Jsonrpc.to_string (rpc_of_changes_t obj_changes) ^ ", 'field_changes': " ^ Jsonrpc.to_string (rpc_of_changes_t field_changes) ^ ", 'msg_changes': " ^ Jsonrpc.to_string (rpc_of_changes_t msg_changes) ^ "}" - in - let release_info = String.concat ", " (List.map search_obj objs) in - Stdext.Unixext.write_string_to_file ("api/" ^ rel ^ ".json") ("release_info = [" ^ release_info ^ "]") - in - List.iter changes_in_release release_order; - let release_list = String.concat ", " (List.map (fun s -> "'" ^ s ^ "'") release_order) in - Stdext.Unixext.write_string_to_file "api/releases.json" ("releases = [" ^ release_list ^ "]"); + let api = (Datamodel.all_api) in + let objs = Dm_api.objects_of_api api in + let create_json obj = + let name = obj.name in + let s = Jsonrpc.to_string (rpc_of_obj obj) in + Stdext.Unixext.write_string_to_file ("api/" ^ name ^ ".json") ("clsdoc = " ^ s); + name + in + let names = List.map create_json objs in + let class_list = String.concat ", " (List.map (fun s -> "'" ^ s ^ "'") names) in + Stdext.Unixext.write_string_to_file "api/index.json" ("classes = [" ^ class_list ^ "]"); + + let changes_in_release rel = + let search_obj obj = + let changes = List.filter (fun (transition, release, doc) -> release = rel) obj.obj_lifecycle in + let obj_changes : changes_t = + List.map (fun (transition, release, doc) -> + (transition, obj.name, if doc = "" && transition = Published then obj.description else doc) + ) changes in + + let changes_for_msg m = + let changes = List.filter (fun (transition, release, doc) -> release = rel) m.msg_lifecycle in + List.map (fun (transition, release, doc) -> + (transition, m.msg_name, if doc = "" && transition = Published then m.msg_doc else doc) + ) changes + in + let msgs = List.filter (fun m -> not m.msg_hide_from_docs) obj.messages in + let msg_changes : changes_t = List.fold_left (fun l m -> l @ (changes_for_msg m)) [] msgs in + + let changes_for_field f = + let changes = List.filter (fun (transition, release, doc) -> release = rel) f.lifecycle in + let field_name = String.concat "_" f.full_name in + List.map (fun (transition, release, doc) -> + (transition, field_name, if doc = "" && transition = Published then f.field_description else doc) + ) changes + in + let rec flatten_contents contents = + List.fold_left (fun l -> function + | Field f -> f :: l + | Namespace (name, contents) -> flatten_contents contents @ l + ) [] contents + in + let fields = flatten_contents obj.contents in + let fields = List.filter (fun f -> not f.internal_only) fields in + let field_changes : changes_t = List.fold_left (fun l f -> l @ (changes_for_field f)) [] fields in + + "{'cls': '" ^ obj.name ^ "', 'obj_changes': " ^ Jsonrpc.to_string (rpc_of_changes_t obj_changes) ^ ", 'field_changes': " ^ Jsonrpc.to_string (rpc_of_changes_t field_changes) ^ ", 'msg_changes': " ^ Jsonrpc.to_string (rpc_of_changes_t msg_changes) ^ "}" + in + let release_info = String.concat ", " (List.map search_obj objs) in + Stdext.Unixext.write_string_to_file ("api/" ^ rel ^ ".json") ("release_info = [" ^ release_info ^ "]") + in + List.iter changes_in_release release_order; + let release_list = String.concat ", " (List.map (fun s -> "'" ^ s ^ "'") release_order) in + Stdext.Unixext.write_string_to_file "api/releases.json" ("releases = [" ^ release_list ^ "]"); diff --git a/ocaml/events/event_listen.ml b/ocaml/events/event_listen.ml index 9306e821bb8..d2be829752c 100644 --- a/ocaml/events/event_listen.ml +++ b/ocaml/events/event_listen.ml @@ -20,9 +20,9 @@ let password = ref "" (* The interface to the ocaml client bindings requires a function which performs the XMLRPC call: *) let rpc xml = - let open Xmlrpc_client in - let http = xmlrpc ~version:"1.0" "/" in - XMLRPC_protocol.rpc ~srcstr:"event_listen" ~dststr:"xapi" ~transport:(TCP(!host, !port)) ~http xml + let open Xmlrpc_client in + let http = xmlrpc ~version:"1.0" "/" in + XMLRPC_protocol.rpc ~srcstr:"event_listen" ~dststr:"xapi" ~transport:(TCP(!host, !port)) ~http xml open Client open Printf @@ -39,7 +39,7 @@ let _ = "Subscribe to an event stream and print the results"; Printf.printf "Connecting to Host: %s; Port: %d; Username: %s" !host !port !username; - + (* Interesting event stuff starts here: *) let session_id = Client.Session.login_with_password ~rpc ~uname:!username ~pwd:!password ~version:"1.2" ~originator:"event_listen" in Client.Event.register ~rpc ~session_id ~classes:["*"]; diff --git a/ocaml/gpg/gpg.ml b/ocaml/gpg/gpg.ml index 5ad6a80c23d..e06a37b8a1b 100644 --- a/ocaml/gpg/gpg.ml +++ b/ocaml/gpg/gpg.ml @@ -37,70 +37,70 @@ let parse_gpg_status status_data = if status_contains validsig then let validsigline = List.find (fun s -> String.startswith validsig s) lines in match String.split ' ' validsigline with - _::_::fingerprint::_ -> Some fingerprint - | _ -> None + _::_::fingerprint::_ -> Some fingerprint + | _ -> None else - None + None let simple_checksum file = Digest.to_hex (Digest.file file) let common ty filename signature size f = let tmp_file, tmp_oc = Filename.open_temp_file ~mode:[Open_binary] "gpg" "" in let result_in = Unix.descr_of_out_channel tmp_oc in - let result_out = Unix.openfile tmp_file [Unix.O_RDONLY] 0o0 in - Unix.unlink tmp_file; + let result_out = Unix.openfile tmp_file [Unix.O_RDONLY] 0o0 in + Unix.unlink tmp_file; (* no need to close the 'tmp_oc' -> closing the fd is enough *) let status_out, status_in = Unix.pipe() in let status_in_uuid = Uuid.to_string (Uuid.make_uuid ()) in (* from the parent's PoV *) let fds_to_close = ref [ result_out; result_in; status_out; status_in ] in - let close' fd = - if List.mem fd !fds_to_close + let close' fd = + if List.mem fd !fds_to_close then (Unix.close fd; fds_to_close := List.filter (fun x -> x <> fd) !fds_to_close) in let gpg_pub_keyring = Filename.concat !Xapi_globs.gpg_homedir "pubring.gpg" in let gpg_args = match ty with | `signed_cleartext -> - [ - "--homedir"; !Xapi_globs.gpg_homedir; - "--no-default-keyring"; - "--keyring"; gpg_pub_keyring; - "--status-fd"; status_in_uuid; - "--decrypt"; filename - ] + [ + "--homedir"; !Xapi_globs.gpg_homedir; + "--no-default-keyring"; + "--keyring"; gpg_pub_keyring; + "--status-fd"; status_in_uuid; + "--decrypt"; filename + ] | `detached_signature -> - [ - filename; Int64.to_string size; - "--homedir"; !Xapi_globs.gpg_homedir; - "--no-default-keyring"; - "--keyring"; gpg_pub_keyring; - "--status-fd"; status_in_uuid; - "--verify"; signature - ] + [ + filename; Int64.to_string size; + "--homedir"; !Xapi_globs.gpg_homedir; + "--no-default-keyring"; + "--keyring"; gpg_pub_keyring; + "--status-fd"; status_in_uuid; + "--verify"; signature + ] in finally (* make sure I close all my open fds in the end *) (fun () -> - (* Capture stderr output for logging *) - match Forkhelpers.with_logfile_fd "gpg" - (fun log_fd -> - let pid = Forkhelpers.safe_close_and_exec None (Some result_in) (Some log_fd) [(status_in_uuid,status_in)] - gpg_binary_path gpg_args in - (* parent *) - List.iter close' [ result_in; status_in ]; - finally (* always waitpid eventually *) - (fun () -> - let gpg_status = Unixext.string_of_fd status_out in - let fingerprint = parse_gpg_status gpg_status in - f fingerprint result_out) - (fun () -> Forkhelpers.waitpid_fail_if_bad_exit pid)) with - | Forkhelpers.Success(_, x) -> debug "gpg subprocess succeeded"; x - | Forkhelpers.Failure(log, Forkhelpers.Subprocess_failed 2) -> - (* Happens when gpg cannot find a readable signature *) - raise InvalidSignature - | Forkhelpers.Failure(log, exn) -> - debug "Error from gpg: %s" log; - raise exn) + (* Capture stderr output for logging *) + match Forkhelpers.with_logfile_fd "gpg" + (fun log_fd -> + let pid = Forkhelpers.safe_close_and_exec None (Some result_in) (Some log_fd) [(status_in_uuid,status_in)] + gpg_binary_path gpg_args in + (* parent *) + List.iter close' [ result_in; status_in ]; + finally (* always waitpid eventually *) + (fun () -> + let gpg_status = Unixext.string_of_fd status_out in + let fingerprint = parse_gpg_status gpg_status in + f fingerprint result_out) + (fun () -> Forkhelpers.waitpid_fail_if_bad_exit pid)) with + | Forkhelpers.Success(_, x) -> debug "gpg subprocess succeeded"; x + | Forkhelpers.Failure(log, Forkhelpers.Subprocess_failed 2) -> + (* Happens when gpg cannot find a readable signature *) + raise InvalidSignature + | Forkhelpers.Failure(log, exn) -> + debug "Error from gpg: %s" log; + raise exn) (fun () -> List.iter Unix.close !fds_to_close) let with_signed_cleartext filename f = diff --git a/ocaml/graph/graph.ml b/ocaml/graph/graph.ml index 2cfca111717..4b75a2f8924 100644 --- a/ocaml/graph/graph.ml +++ b/ocaml/graph/graph.ml @@ -20,36 +20,36 @@ open Datamodel_types (** Return all references contained within a getrecord response of type cls *) -let refs_of_record cls record = +let refs_of_record cls record = let obj = Dm_api.get_obj_by_name Datamodel.all_api ~objname:cls in let fields = Datamodel_utils.fields_of_obj obj in let rec refs_of ty xml = match ty with | Ref _ -> [ XMLRPC.From.string xml ] | Set t -> List.concat (API.Legacy.From.set (refs_of t) xml) | Map(k, v) -> - let pairs = API.Legacy.From.map (fun x -> x) (refs_of v) xml in - let vs = List.concat (List.map snd pairs) in - begin match k with - | Ref _ -> List.map fst pairs @ vs - | _ -> vs - end + let pairs = API.Legacy.From.map (fun x -> x) (refs_of v) xml in + let vs = List.concat (List.map snd pairs) in + begin match k with + | Ref _ -> List.map fst pairs @ vs + | _ -> vs + end | _ -> [] in let pairs = XMLRPC.From.structure record in - let refs_of_field fld = + let refs_of_field fld = let field_name = String.concat "_" fld.full_name in if not(List.mem_assoc field_name pairs) then [] (* internal? *) else refs_of fld.ty (List.assoc field_name pairs) in List.concat (List.map refs_of_field fields) -let name_label_of_record cls record = - let pairs = XMLRPC.From.structure record in - if List.mem_assoc "name_label" pairs - then XMLRPC.From.string (List.assoc "name_label" pairs) - else "unknown " ^ cls +let name_label_of_record cls record = + let pairs = XMLRPC.From.structure record in + if List.mem_assoc "name_label" pairs + then XMLRPC.From.string (List.assoc "name_label" pairs) + else "unknown " ^ cls -let all_classes = List.map (fun x -> x.name) - (Dm_api.objects_of_api Datamodel.all_api) +let all_classes = List.map (fun x -> x.name) + (Dm_api.objects_of_api Datamodel.all_api) open XMLRPC let do_rpc rpc name args = @@ -60,7 +60,7 @@ let do_rpc rpc name args = | Success [x] -> x | _ -> assert false -let get_all rpc session_id cls = +let get_all rpc session_id cls = let name = Printf.sprintf "%s.get_all_records_where" cls in let args = [ To.string (Ref.string_of session_id); To.string "true" ] in API.Legacy.From.map (fun x -> x) (fun x -> x) (do_rpc rpc name args) @@ -74,12 +74,12 @@ module NodeSet = Set.Make( let compare a b = compare a.id b.id end) module EdgeSet = Set.Make( - struct + struct type t = edge let compare x y = if x.a = y.a then compare x.b y.b else compare x.a y.a end) -let node_of_id nodes id = +let node_of_id nodes id = let one = NodeSet.filter (fun x -> x.id = id) nodes in NodeSet.choose one @@ -91,23 +91,23 @@ let colour_of_cls = function | "VDI" -> "orange" | _ -> "white" -let output_dot nodes edges oc = +let output_dot nodes edges oc = let labels = NodeSet.fold (fun x acc -> x :: acc) nodes [] in - let edges = EdgeSet.fold (fun x acc -> - try - ignore(node_of_id nodes x.a); - ignore(node_of_id nodes x.b); - x :: acc - with Not_found -> acc - ) edges [] in - let output = + let edges = EdgeSet.fold (fun x acc -> + try + ignore(node_of_id nodes x.a); + ignore(node_of_id nodes x.b); + x :: acc + with Not_found -> acc + ) edges [] in + let output = [ "digraph g{"; ] @ - (List.map (fun x -> Printf.sprintf "node [label=\"%s\" style=filled fillcolor=%s]; \"%s\";" x.label (colour_of_cls x.cls) x.id) labels) + (List.map (fun x -> Printf.sprintf "node [label=\"%s\" style=filled fillcolor=%s]; \"%s\";" x.label (colour_of_cls x.cls) x.id) labels) @ - (List.map (fun x -> Printf.sprintf "\"%s\" -> \"%s\";" x.a x.b) edges) + (List.map (fun x -> Printf.sprintf "\"%s\" -> \"%s\";" x.a x.b) edges) @ [ - "}"; - ] in + "}"; + ] in List.iter (fun x -> output_string oc x; output_string oc "\n") output let nodes = ref NodeSet.empty @@ -124,13 +124,13 @@ let all = ref false let singleton = ref false (* The interface to the ocaml client bindings requires a function which performs the XMLRPC call: *) -let rpc xml = - let open Xmlrpc_client in - XML_protocol.rpc ~srcstr:"graph" ~dststr:"xapi" ~transport:(TCP(!host, !port)) ~http:(xmlrpc ~version:"1.0" "/") xml - -let newrpc xml = - let open Xmlrpc_client in - XMLRPC_protocol.rpc ~srcstr:"graph" ~dststr:"xapi" ~transport:(TCP(!host, !port)) ~http:(xmlrpc ~version:"1.0" "/") xml +let rpc xml = + let open Xmlrpc_client in + XML_protocol.rpc ~srcstr:"graph" ~dststr:"xapi" ~transport:(TCP(!host, !port)) ~http:(xmlrpc ~version:"1.0" "/") xml + +let newrpc xml = + let open Xmlrpc_client in + XMLRPC_protocol.rpc ~srcstr:"graph" ~dststr:"xapi" ~transport:(TCP(!host, !port)) ~http:(xmlrpc ~version:"1.0" "/") xml let _ = let wanted = ref [] in @@ -154,24 +154,24 @@ let _ = let classes = List.filter (fun x -> !all || (List.mem x !wanted)) classes in - List.iter + List.iter (fun cls -> let all = get_all rpc session_id cls in - List.iter (fun (x, xr) -> - let node = { id = x; cls = cls; label = name_label_of_record cls xr } in - nodes := NodeSet.add node !nodes; - let links = refs_of_record cls xr in - List.iter (fun y -> edges := EdgeSet.add { a = x; b = y } !edges) links - ) all) classes; + List.iter (fun (x, xr) -> + let node = { id = x; cls = cls; label = name_label_of_record cls xr } in + nodes := NodeSet.add node !nodes; + let links = refs_of_record cls xr in + List.iter (fun y -> edges := EdgeSet.add { a = x; b = y } !edges) links + ) all) classes; (* Filter all singleton nodes *) - let is_connected edges nodes node = + let is_connected edges nodes node = let node_exists nodes id = try ignore(node_of_id nodes id); true with _ -> false in - EdgeSet.fold (fun edge acc -> - (edge.a = node.id && node_exists nodes edge.b) - || - (edge.b = node.id && node_exists nodes edge.a) - || - acc) edges false in + EdgeSet.fold (fun edge acc -> + (edge.a = node.id && node_exists nodes edge.b) + || + (edge.b = node.id && node_exists nodes edge.a) + || + acc) edges false in let nodes = NodeSet.filter (fun x -> !singleton || is_connected !edges !nodes x) !nodes in output_dot nodes !edges stdout (* diff --git a/ocaml/idl/api_errors.ml b/ocaml/idl/api_errors.ml index 6892214e6b0..6ee6517adf6 100644 --- a/ocaml/idl/api_errors.ml +++ b/ocaml/idl/api_errors.ml @@ -15,13 +15,13 @@ exception Server_error of string * string list let to_string = function | Server_error (name, args) -> - Printf.sprintf "Server_error(%s, [ %a ])" name (fun () -> String.concat "; ") args + Printf.sprintf "Server_error(%s, [ %a ])" name (fun () -> String.concat "; ") args | e -> Printexc.to_string e let _ = Printexc.register_printer (function - | Server_error(code, params) as e -> Some (to_string e) - | _ -> None) + | Server_error(code, params) as e -> Some (to_string e) + | _ -> None) let message_deprecated = "MESSAGE_DEPRECATED" let message_removed = "MESSAGE_REMOVED" @@ -249,7 +249,7 @@ let sr_operation_not_supported = "SR_OPERATION_NOT_SUPPORTED" let sr_not_sharable = "SR_NOT_SHARABLE" let sr_indestructible = "SR_INDESTRUCTIBLE" let clustered_sr_degraded = "CLUSTERED_SR_DEGRADED" - + let sm_plugin_communication_failure = "SM_PLUGIN_COMMUNICATION_FAILURE" let pbd_exists = "PBD_EXISTS" diff --git a/ocaml/idl/api_lowlevel.ml b/ocaml/idl/api_lowlevel.ml index 8df37a2afed..0ac403b6548 100644 --- a/ocaml/idl/api_lowlevel.ml +++ b/ocaml/idl/api_lowlevel.ml @@ -17,7 +17,7 @@ module DT = Datamodel_types type field_op = Get | Set | Add | Remove type obj_op = Make | Delete | GetAll -type operation = +type operation = | Field of field_op * DT.obj * DT.field | Object of obj_op * DT.obj | Msg of DT.obj * DT.message @@ -28,33 +28,33 @@ let obj_of_operation = function | Msg (x, _) -> x (** Computes the RPC wire name of an operation *) -let wire_name_of_operation ~sync operation = - (if sync +let wire_name_of_operation ~sync operation = + (if sync then "" else "Async.") ^ - String.capitalize ((obj_of_operation operation).DT.name) ^ "." ^ - (match operation with - | Field(op, obj, fld) -> - (match op with - | Get -> "get_" | Set -> "set_" - | Add -> "add_" | Remove -> "remove_") ^ - (String.concat "__" fld.DT.full_name) - | Object(Make, obj) -> "make" - | Object(Delete, obj) -> "delete" - | Object(GetAll, _) -> failwith "GetAll not implemented yet" - | Msg(obj, msg) -> "do_" ^ msg.DT.msg_name) + String.capitalize ((obj_of_operation operation).DT.name) ^ "." ^ + (match operation with + | Field(op, obj, fld) -> + (match op with + | Get -> "get_" | Set -> "set_" + | Add -> "add_" | Remove -> "remove_") ^ + (String.concat "__" fld.DT.full_name) + | Object(Make, obj) -> "make" + | Object(Delete, obj) -> "delete" + | Object(GetAll, _) -> failwith "GetAll not implemented yet" + | Msg(obj, msg) -> "do_" ^ msg.DT.msg_name) (** A flat list of all the possible operations concerning an object. Ideally filter the datamodel on release (opensource, closed) first and then filter this according to the needs of the specific backend *) -let operations_of_obj (x: DT.obj) : operation list = +let operations_of_obj (x: DT.obj) : operation list = let rec of_contents = function | DT.Namespace(_, xs) -> List.concat (List.map of_contents xs) - | DT.Field y -> List.map (fun tag -> Field(tag, x, y)) - [ Get; Set; Add; Remove ] in + | DT.Field y -> List.map (fun tag -> Field(tag, x, y)) + [ Get; Set; Add; Remove ] in let fields = List.concat (List.map of_contents x.DT.contents) in - let objects = List.map (fun tag -> Object(tag, x)) - [ Make; Delete; GetAll ] in + let objects = List.map (fun tag -> Object(tag, x)) + [ Make; Delete; GetAll ] in let msg = List.map (fun msg -> Msg(x, msg)) x.DT.messages in objects @ fields @ msg @@ -65,19 +65,19 @@ let filter (operation: operation -> bool) (api: t) = List.map (fun (obj, ops) -> obj, List.filter operation ops) api let operations_which_make_sense = function - (* cannot atomically set all values in a set or a map *) + (* cannot atomically set all values in a set or a map *) | Field(Set, _, ({ DT.ty = DT.Set _ } | { DT.ty = DT.Map(_,_)})) -> false - (* Set(Ref _) values are stored as foreign keys in other tables *) + (* Set(Ref _) values are stored as foreign keys in other tables *) | Field((Add | Remove), _, { DT.ty = DT.Set (DT.Ref _) }) -> false - (* Add/Remove from 'normal' sets and maps is fine *) + (* Add/Remove from 'normal' sets and maps is fine *) | Field((Add | Remove), _, ({ DT.ty = DT.Set _ }|{ DT.ty = DT.Map(_,_) }) ) -> true - (* Add/Remove from anything else is bad *) + (* Add/Remove from anything else is bad *) | Field((Add | Remove), _, _) -> false - + | _ -> true -let of_api (api: Dm_api.api) : t = +let of_api (api: Dm_api.api) : t = let objects = Dm_api.objects_of_api api in let api = List.map (fun obj -> obj, operations_of_obj obj) objects in filter operations_which_make_sense api diff --git a/ocaml/idl/api_lowlevel.mli b/ocaml/idl/api_lowlevel.mli index 77f8ef36def..6d64031f0ce 100644 --- a/ocaml/idl/api_lowlevel.mli +++ b/ocaml/idl/api_lowlevel.mli @@ -18,7 +18,7 @@ type field_op = Get | Set | Add | Remove (** Operations generated for each object can be any of these *) type obj_op = Make | Delete | GetAll -(** Represents an individual operation (on the wire), generating from +(** Represents an individual operation (on the wire), generating from either a field, an object or a message *) type operation = Field of field_op * Datamodel_types.obj * Datamodel_types.field @@ -31,7 +31,7 @@ val obj_of_operation : operation -> Datamodel_types.obj (** Returns the XMLRPC wire name of the operation (eg Async.VM.do_clean_shutdown) *) val wire_name_of_operation : sync:bool -> operation -> string -(** A 'lowlevel api' consists of an association list of objects and their +(** A 'lowlevel api' consists of an association list of objects and their generated operations *) type t = (Datamodel_types.obj * operation list) list diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 4736c33036e..b5ea3f26402 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -91,8 +91,8 @@ let last_release_schema_minor_vsn = dundee_release_schema_minor_vsn (* List of tech-preview releases. Fields in these releases are not guaranteed to be retained when * upgrading to a full release. *) let tech_preview_releases = [ - vgpu_tech_preview_release_schema_major_vsn, vgpu_tech_preview_release_schema_minor_vsn; - dundee_tech_preview_release_schema_major_vsn, dundee_tech_preview_release_schema_minor_vsn; + vgpu_tech_preview_release_schema_major_vsn, vgpu_tech_preview_release_schema_minor_vsn; + dundee_tech_preview_release_schema_major_vsn, dundee_tech_preview_release_schema_minor_vsn; ] (** Bindings for currently specified releases *) @@ -159,28 +159,28 @@ let role_vm_power_admin = "vm-power-admin" let role_vm_admin = "vm-admin" let role_vm_operator = "vm-operator" let role_read_only = "read-only" -let roles_all = - [ (* in decreasing total linear order of privileges *) - role_pool_admin; - role_pool_operator; - role_vm_power_admin; - role_vm_admin; - role_vm_operator; - role_read_only - ] +let roles_all = + [ (* in decreasing total linear order of privileges *) + role_pool_admin; + role_pool_operator; + role_vm_power_admin; + role_vm_admin; + role_vm_operator; + role_read_only + ] let role_description = [ - role_pool_admin,"The Pool Administrator role has full access to all features and settings, including accessing Dom0 and managing subjects, roles and external authentication"; - role_pool_operator,"The Pool Operator role manages host- and pool-wide resources, including setting up storage, creating resource pools and managing patches, high availability (HA) and workload balancing (WLB)"; - role_vm_power_admin,"The VM Power Administrator role has full access to VM and template management and can choose where to start VMs and use the dynamic memory control and VM snapshot features"; - role_vm_admin,"The VM Administrator role can manage VMs and templates"; - role_vm_operator,"The VM Operator role can use VMs and interact with VM consoles"; - role_read_only,"The Read-Only role can log in with basic read-only access"; + role_pool_admin,"The Pool Administrator role has full access to all features and settings, including accessing Dom0 and managing subjects, roles and external authentication"; + role_pool_operator,"The Pool Operator role manages host- and pool-wide resources, including setting up storage, creating resource pools and managing patches, high availability (HA) and workload balancing (WLB)"; + role_vm_power_admin,"The VM Power Administrator role has full access to VM and template management and can choose where to start VMs and use the dynamic memory control and VM snapshot features"; + role_vm_admin,"The VM Administrator role can manage VMs and templates"; + role_vm_operator,"The VM Operator role can use VMs and interact with VM consoles"; + role_read_only,"The Read-Only role can log in with basic read-only access"; ] (* obtain all roles with at least the specified role privileges *) -let roles_gte role = - let rec gte = function []->failwith "invalid role" - |x::xs->if x=role then x::[] else x::gte xs in - gte roles_all +let roles_gte role = + let rec gte = function []->failwith "invalid role" + |x::xs->if x=role then x::[] else x::gte xs in + gte roles_all (* shortcuts to subsets of greater than or equal roles *) let _R_LOCAL_ROOT_ONLY = Some([]) (* only local root, emergency and pool-secret *) let _R_POOL_ADMIN = Some(roles_gte role_pool_admin) @@ -213,64 +213,64 @@ let get_product_releases in_product_since = in go_through_release_order release_order let dundee_plus_release = - { internal = get_product_releases rel_dundee_plus - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal = get_product_releases rel_dundee_plus + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let dundee_release = - { internal = get_product_releases rel_dundee - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal = get_product_releases rel_dundee + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let cream_release = - { internal = get_product_releases rel_cream - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal = get_product_releases rel_cream + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let creedence_release = - { internal = get_product_releases rel_creedence - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal = get_product_releases rel_creedence + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let clearwater_felton_release = - { internal=get_product_releases rel_clearwater_felton - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal=get_product_releases rel_clearwater_felton + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let vgpu_productisation_release = - { internal=get_product_releases rel_vgpu_productisation - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal=get_product_releases rel_vgpu_productisation + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let vgpu_tech_preview_release = - { internal=get_product_releases rel_vgpu_tech_preview - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal=get_product_releases rel_vgpu_tech_preview + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let clearwater_release = - { internal=get_product_releases rel_clearwater - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal=get_product_releases rel_clearwater + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let tampa_release = - { internal=get_product_releases rel_tampa - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal=get_product_releases rel_tampa + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let boston_release = - { internal=get_product_releases rel_boston - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal=get_product_releases rel_boston + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let cowley_release = { internal=get_product_releases rel_cowley @@ -279,141 +279,141 @@ let cowley_release = } let midnight_ride_release = - { internal=get_product_releases "midnight-ride" - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal=get_product_releases "midnight-ride" + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let george_release = - { internal=get_product_releases "george" - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal=get_product_releases "george" + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let orlando_release = - { internal=get_product_releases "orlando" - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal=get_product_releases "orlando" + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let miami_symc_release = - { internal=get_product_releases "symc" - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal=get_product_releases "symc" + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let miami_release = - { internal=get_product_releases "miami" - ; opensource=get_oss_releases None - ; internal_deprecated_since=None - } + { internal=get_product_releases "miami" + ; opensource=get_oss_releases None + ; internal_deprecated_since=None + } let rio_release = - { internal=get_product_releases "rio" - ; opensource=get_oss_releases (Some "3.0.3") - ; internal_deprecated_since=None - } + { internal=get_product_releases "rio" + ; opensource=get_oss_releases (Some "3.0.3") + ; internal_deprecated_since=None + } let get_published lifecycle = - try - let _, published, _ = List.find (fun (t, _, _) -> t = Published) lifecycle in - Some published - with Not_found -> None + try + let _, published, _ = List.find (fun (t, _, _) -> t = Published) lifecycle in + Some published + with Not_found -> None let get_deprecated lifecycle = - try - let _, deprecated, _ = List.find (fun (t, _, _) -> t = Deprecated) lifecycle in - Some deprecated - with Not_found -> None + try + let _, deprecated, _ = List.find (fun (t, _, _) -> t = Deprecated) lifecycle in + Some deprecated + with Not_found -> None let call ~name ?(doc="") ?(in_oss_since=Some "3.0.3") ?in_product_since ?internal_deprecated_since - ?result ?(flags=[`Session;`Async]) - ?(effect=true) ?(tag=Custom) ?(errs=[]) ?(custom_marshaller=false) ?(db_only=false) - ?(no_current_operations=false) ?(secret=false) ?(hide_from_docs=false) - ?(pool_internal=false) - ~allowed_roles - ?(map_keys_roles=[]) - ?(params=[]) ?versioned_params ?lifecycle ?(doc_tags=[]) - ?forward_to () = - (* if you specify versioned_params then these get put in the params field of the message record; - * otherwise params go in with no default values and param_release=call_release... - *) - if lifecycle = None && in_product_since = None then - failwith ("Lifecycle for message '" ^ name ^ "' not specified"); - let lifecycle = match lifecycle with - | None -> - let published = match in_product_since with - | None -> [] - | Some rel -> [Published, rel, doc] - in - let deprecated = match internal_deprecated_since with - | None -> [] - | Some rel -> [Deprecated, rel, ""] - in - published @ deprecated - | Some l -> l - in - let call_release = - { - internal = (match get_published lifecycle with - | Some published -> get_product_releases published - | None -> ["closed"]); - opensource = get_oss_releases in_oss_since; - internal_deprecated_since = get_deprecated lifecycle; - } - in - { - msg_name = name; - msg_params = - (match versioned_params with - | None -> - List.map (fun (ptype, pname, pdoc) -> {param_type=ptype; param_name=pname; - param_doc=pdoc; param_release=call_release; param_default=None}) params - | Some ps -> ps); - msg_result = result; msg_doc = doc; - msg_session = List.mem `Session flags; msg_async = List.mem `Async flags; - msg_db_only = db_only; - msg_release = call_release; - msg_lifecycle = lifecycle; - msg_has_effect = effect; msg_tag = tag; msg_obj_name=""; - msg_force_custom = None; - msg_errors = List.map (Hashtbl.find errors) errs; msg_secret = secret; - msg_custom_marshaller = custom_marshaller; - msg_no_current_operations = no_current_operations; - msg_hide_from_docs = hide_from_docs; - msg_pool_internal = pool_internal; - msg_allowed_roles = allowed_roles; - msg_map_keys_roles = map_keys_roles; - msg_doc_tags = doc_tags; - msg_forward_to = forward_to; - } + ?result ?(flags=[`Session;`Async]) + ?(effect=true) ?(tag=Custom) ?(errs=[]) ?(custom_marshaller=false) ?(db_only=false) + ?(no_current_operations=false) ?(secret=false) ?(hide_from_docs=false) + ?(pool_internal=false) + ~allowed_roles + ?(map_keys_roles=[]) + ?(params=[]) ?versioned_params ?lifecycle ?(doc_tags=[]) + ?forward_to () = + (* if you specify versioned_params then these get put in the params field of the message record; + * otherwise params go in with no default values and param_release=call_release... + *) + if lifecycle = None && in_product_since = None then + failwith ("Lifecycle for message '" ^ name ^ "' not specified"); + let lifecycle = match lifecycle with + | None -> + let published = match in_product_since with + | None -> [] + | Some rel -> [Published, rel, doc] + in + let deprecated = match internal_deprecated_since with + | None -> [] + | Some rel -> [Deprecated, rel, ""] + in + published @ deprecated + | Some l -> l + in + let call_release = + { + internal = (match get_published lifecycle with + | Some published -> get_product_releases published + | None -> ["closed"]); + opensource = get_oss_releases in_oss_since; + internal_deprecated_since = get_deprecated lifecycle; + } + in + { + msg_name = name; + msg_params = + (match versioned_params with + | None -> + List.map (fun (ptype, pname, pdoc) -> {param_type=ptype; param_name=pname; + param_doc=pdoc; param_release=call_release; param_default=None}) params + | Some ps -> ps); + msg_result = result; msg_doc = doc; + msg_session = List.mem `Session flags; msg_async = List.mem `Async flags; + msg_db_only = db_only; + msg_release = call_release; + msg_lifecycle = lifecycle; + msg_has_effect = effect; msg_tag = tag; msg_obj_name=""; + msg_force_custom = None; + msg_errors = List.map (Hashtbl.find errors) errs; msg_secret = secret; + msg_custom_marshaller = custom_marshaller; + msg_no_current_operations = no_current_operations; + msg_hide_from_docs = hide_from_docs; + msg_pool_internal = pool_internal; + msg_allowed_roles = allowed_roles; + msg_map_keys_roles = map_keys_roles; + msg_doc_tags = doc_tags; + msg_forward_to = forward_to; + } let errnames_of_call c = - List.map (fun e -> e.err_name) c.msg_errors - -let assert_operation_valid enum cls self = call - ~in_oss_since:None - ~in_product_since:rel_rio - ~name:"assert_operation_valid" - ~doc:"Check to see whether this operation is acceptable in the current state of the system, raising an error if the operation is invalid for some reason" - ~params:[Ref cls, self, "reference to the object"; - enum, "op", "proposed operation" ] - ~allowed_roles:_R_POOL_ADMIN - () + List.map (fun e -> e.err_name) c.msg_errors + +let assert_operation_valid enum cls self = call + ~in_oss_since:None + ~in_product_since:rel_rio + ~name:"assert_operation_valid" + ~doc:"Check to see whether this operation is acceptable in the current state of the system, raising an error if the operation is invalid for some reason" + ~params:[Ref cls, self, "reference to the object"; + enum, "op", "proposed operation" ] + ~allowed_roles:_R_POOL_ADMIN + () let update_allowed_operations enum cls self = call - ~in_oss_since:None - ~in_product_since:rel_rio - ~name:"update_allowed_operations" - ~doc:"Recomputes the list of acceptable operations" - ~params:[Ref cls, self, "reference to the object"] - ~allowed_roles:_R_POOL_ADMIN - () + ~in_oss_since:None + ~in_product_since:rel_rio + ~name:"update_allowed_operations" + ~doc:"Recomputes the list of acceptable operations" + ~params:[Ref cls, self, "reference to the object"] + ~allowed_roles:_R_POOL_ADMIN + () (** Compute an enum constant corresponding to an operation, for current_operations, allowed_operations.*) -let operation_enum x = - x.msg_name, Printf.sprintf "refers to the operation \"%s\"" x.msg_name +let operation_enum x = + x.msg_name, Printf.sprintf "refers to the operation \"%s\"" x.msg_name let error name params ?(doc="") () = Hashtbl.add errors name { @@ -430,7 +430,7 @@ let message name ?(doc="") () = let _ = (* Internal *) - error Api_errors.internal_error ["message"] + error Api_errors.internal_error ["message"] ~doc:"The server failed to handle your request, due to an internal error. The given message may give details useful for debugging the problem." (); error Api_errors.message_deprecated [] @@ -470,10 +470,10 @@ let _ = ~doc:"This license file is no longer accepted. Please upgrade to the new licensing system." (); error Api_errors.activation_while_not_free [] ~doc:"An activation key can only be applied when the edition is set to 'free'." (); - + error Api_errors.feature_restricted [] ~doc:"The use of this feature is restricted." (); - + error Api_errors.cannot_contact_host ["host"] ~doc:"Cannot forward messages because the host cannot be contacted. The host may be switched off or there may be network connectivity problems." (); @@ -485,10 +485,10 @@ let _ = ~doc:"You tried to add a key-value pair to a map, but that key is already there." (); error Api_errors.xmlrpc_unmarshal_failure [ "expected"; "received" ] ~doc:"The server failed to unmarshal the XMLRPC message; it was expecting one element and received something else." (); - - error Api_errors.message_method_unknown ["method"] + + error Api_errors.message_method_unknown ["method"] ~doc:"You tried to call a method that does not exist. The method name that you used is echoed." (); - error Api_errors.message_parameter_count_mismatch ["method"; "expected"; "received"] + error Api_errors.message_parameter_count_mismatch ["method"; "expected"; "received"] ~doc:"You tried to call a method with the incorrect number of parameters. The fully-qualified method name that you used, and the number of received and expected parameters are returned." (); error Api_errors.value_not_supported ["field"; "value"; "reason"] ~doc:"You attempted to set a value that is not supported by this implementation. The fully-qualified field name and the value that you tried to set are returned. Also returned is a developer-only diagnostic reason." (); @@ -500,10 +500,10 @@ let _ = ~doc:"You attempted an operation that was not allowed." (); error Api_errors.operation_blocked ["ref"; "code"] ~doc:"You attempted an operation that was explicitly blocked (see the blocked_operations field of the given object)." (); - error Api_errors.not_implemented ["function"] + error Api_errors.not_implemented ["function"] ~doc:"The function is not implemented" (); error Api_errors.unimplemented_in_sm_backend ["message"] - ~doc:"You have attempted a function which is not implemented" (); + ~doc:"You have attempted a function which is not implemented" (); (* DB errors *) error Api_errors.handle_invalid ["class"; "handle"] ~doc:"You gave an invalid object reference. The object may have recently been deleted. The class parameter gives the type of reference given, and the handle parameter echoes the bad value given." (); @@ -513,9 +513,9 @@ let _ = ~doc:"A VDI with the specified location already exists within the SR" (); (* Session errors *) - error Api_errors.session_authentication_failed [] + error Api_errors.session_authentication_failed [] ~doc:"The credentials given by the user are incorrect, so access has been denied, and you have not been issued a session handle." (); - error Api_errors.session_invalid ["handle"] + error Api_errors.session_invalid ["handle"] ~doc:"You gave an invalid session reference. It may have been invalidated by a server restart, or timed out. You should get a new session handle, using one of the session.login_ calls. This error does not invalidate the current connection. The handle parameter echoes the bad value given." (); error Api_errors.change_password_rejected [ "msg" ] ~doc:"The system rejected the password change request; perhaps the new password was too short?" (); @@ -526,7 +526,7 @@ let _ = error Api_errors.network_already_connected ["network"; "connected PIF"] ~doc:"You tried to create a PIF, but the network you tried to attach it to is already attached to some other PIF, and so the creation failed." (); error Api_errors.cannot_destroy_system_network [ "network" ] - ~doc:"You tried to destroy a system network: these cannot be destroyed." (); + ~doc:"You tried to destroy a system network: these cannot be destroyed." (); error Api_errors.pif_is_physical ["PIF"] ~doc:"You tried to destroy a PIF, but it represents an aspect of the physical host configuration, and so cannot be destroyed. The parameter echoes the PIF handle you gave." (); error Api_errors.pif_is_vlan ["PIF"] @@ -596,15 +596,15 @@ let _ = error Api_errors.vlan_tag_invalid ["VLAN"] ~doc:"You tried to create a VLAN, but the tag you gave was invalid -- it must be between 0 and 4094. The parameter echoes the VLAN tag you gave." (); - error Api_errors.network_contains_vif ["vifs"] + error Api_errors.network_contains_vif ["vifs"] ~doc:"The network contains active VIFs and cannot be deleted." (); - error Api_errors.network_contains_pif ["pifs"] + error Api_errors.network_contains_pif ["pifs"] ~doc:"The network contains active PIFs and cannot be deleted." (); - error Api_errors.gpu_group_contains_vgpu ["vgpus"] + error Api_errors.gpu_group_contains_vgpu ["vgpus"] ~doc:"The GPU group contains active VGPUs and cannot be deleted." (); - error Api_errors.gpu_group_contains_pgpu ["pgpus"] + error Api_errors.gpu_group_contains_pgpu ["pgpus"] ~doc:"The GPU group contains active PGPUs and cannot be deleted." (); - error Api_errors.gpu_group_contains_no_pgpus ["gpu_group"] + error Api_errors.gpu_group_contains_no_pgpus ["gpu_group"] ~doc:"The GPU group does not contain any PGPUs." (); error Api_errors.device_attach_timeout [ "type"; "ref" ] ~doc:"A timeout happened while attempting to attach a device to a VM." (); @@ -638,7 +638,7 @@ let _ = ~doc:"You tried to create a VLAN or tunnel on top of a tunnel access PIF - use the underlying transport PIF instead." (); error Api_errors.pif_tunnel_still_exists ["PIF"] ~doc:"Operation cannot proceed while a tunnel exists on this interface." (); - error Api_errors.bridge_not_available [ "bridge" ] + error Api_errors.bridge_not_available [ "bridge" ] ~doc:"Could not find bridge required by VM." (); (* VM specific errors *) error Api_errors.vm_is_protected [ "vm" ] @@ -652,7 +652,7 @@ let _ = error Api_errors.vm_duplicate_vbd_device [ "vm"; "vbd"; "device" ] ~doc:"The specified VM has a duplicate VBD device and cannot be started." (); error Api_errors.illegal_vbd_device [ "vbd"; "device" ] - ~doc:"The specified VBD device is not recognized: please use a non-negative integer" (); + ~doc:"The specified VBD device is not recognized: please use a non-negative integer" (); error Api_errors.vm_not_resident_here [ "vm"; "host" ] ~doc:"The specified VM is not currently resident on the specified host." (); error Api_errors.domain_exists [ "vm"; "domid" ] @@ -660,7 +660,7 @@ let _ = error Api_errors.cannot_reset_control_domain [ "vm" ] ~doc:"The power-state of a control domain cannot be reset." (); error Api_errors.not_system_domain [ "vm" ] - ~doc:"The given VM is not registered as a system domain. This operation can only be performed on a registered system domain." (); + ~doc:"The given VM is not registered as a system domain. This operation can only be performed on a registered system domain." (); error Api_errors.vm_cannot_delete_default_template ["vm"] ~doc:"You cannot delete the specified default template." (); error Api_errors.vm_bad_power_state ["vm"; "expected"; "actual"] @@ -670,15 +670,15 @@ let _ = error Api_errors.vm_old_pv_drivers [ "vm"; "major"; "minor" ] ~doc:"You attempted an operation on a VM which requires a more recent version of the PV drivers. Please upgrade your PV drivers." (); error Api_errors.vm_lacks_feature_shutdown [ "vm" ] - ~doc:"You attempted an operation which needs the cooperative shutdown feature on a VM which lacks it." (); + ~doc:"You attempted an operation which needs the cooperative shutdown feature on a VM which lacks it." (); error Api_errors.vm_lacks_feature_vcpu_hotplug [ "vm" ] - ~doc:"You attempted an operation which needs the VM hotplug-vcpu feature on a VM which lacks it." (); + ~doc:"You attempted an operation which needs the VM hotplug-vcpu feature on a VM which lacks it." (); error Api_errors.vm_lacks_feature_suspend [ "vm" ] - ~doc:"You attempted an operation which needs the VM cooperative suspend feature on a VM which lacks it." (); + ~doc:"You attempted an operation which needs the VM cooperative suspend feature on a VM which lacks it." (); error Api_errors.vm_lacks_feature_static_ip_setting [ "vm" ] - ~doc:"You attempted an operation which needs the VM static-ip-setting feature on a VM which lacks it." (); + ~doc:"You attempted an operation which needs the VM static-ip-setting feature on a VM which lacks it." (); error Api_errors.vm_lacks_feature [ "vm" ] - ~doc:"You attempted an operation on a VM which lacks the feature." (); + ~doc:"You attempted an operation on a VM which lacks the feature." (); error Api_errors.vm_is_template ["vm"] ~doc:"The operation attempted is not valid for a template VM" (); error Api_errors.other_operation_in_progress ["class"; "object"] @@ -744,9 +744,9 @@ let _ = ~doc:"An error occured while restoring the memory image of the specified virtual machine" (); error Api_errors.vm_pv_drivers_in_use [ "vm" ] ~doc:"VM PV drivers still in use" (); - (* VM appliance errors *) - error Api_errors.operation_partially_failed [ "operation" ] - ~doc:"Some VMs belonging to the appliance threw an exception while carrying out the specified operation" (); + (* VM appliance errors *) + error Api_errors.operation_partially_failed [ "operation" ] + ~doc:"Some VMs belonging to the appliance threw an exception while carrying out the specified operation" (); (* Host errors *) error Api_errors.host_offline [ "host" ] @@ -939,7 +939,7 @@ let _ = error Api_errors.vm_host_incompatible_version ["host"; "vm"] ~doc:"This VM operation cannot be performed on an older-versioned host during an upgrade." (); error Api_errors.vm_host_incompatible_virtual_hardware_platform_version ["host"; "host_versions"; "vm"; "vm_version"] - ~doc:"You attempted to run a VM on a host that cannot provide the VM's required Virtual Hardware Platform version." (); + ~doc:"You attempted to run a VM on a host that cannot provide the VM's required Virtual Hardware Platform version." (); error Api_errors.vm_has_pci_attached ["vm"] ~doc:"This operation could not be performed, because the VM has one or more PCI devices passed through." (); error Api_errors.vm_has_vgpu ["vm"] @@ -967,19 +967,19 @@ let _ = error Api_errors.vm_shutdown_timeout [ "vm"; "timeout" ] ~doc:"VM failed to shutdown before the timeout expired" (); error Api_errors.vm_crashed [ "vm" ] - ~doc:"The VM crashed" (); + ~doc:"The VM crashed" (); error Api_errors.vm_rebooted [ "vm" ] - ~doc:"The VM unexpectedly rebooted" (); + ~doc:"The VM unexpectedly rebooted" (); error Api_errors.vm_halted [ "vm" ] - ~doc:"The VM unexpectedly halted" (); + ~doc:"The VM unexpectedly halted" (); error Api_errors.bootloader_failed [ "vm"; "msg" ] ~doc:"The bootloader returned an error" (); error Api_errors.unknown_bootloader [ "vm"; "bootloader" ] ~doc:"The requested bootloader is unknown" (); - error Api_errors.failed_to_start_emulator [ "vm"; "name"; "msg" ] - ~doc:"An emulator required to run this VM failed to start" (); - error Api_errors.vm_attached_to_more_than_one_vdi_with_timeoffset_marked_as_reset_on_boot [ "vm" ] - ~doc:"You attempted to start a VM that's attached to more than one VDI with a timeoffset marked as reset-on-boot." (); + error Api_errors.failed_to_start_emulator [ "vm"; "name"; "msg" ] + ~doc:"An emulator required to run this VM failed to start" (); + error Api_errors.vm_attached_to_more_than_one_vdi_with_timeoffset_marked_as_reset_on_boot [ "vm" ] + ~doc:"You attempted to start a VM that's attached to more than one VDI with a timeoffset marked as reset-on-boot." (); error Api_errors.vms_failed_to_cooperate [ ] ~doc:"The given VMs failed to release memory when instructed to do so" (); @@ -995,11 +995,11 @@ let _ = ~doc:"An SR with that uuid already exists." (); error Api_errors.sr_no_pbds ["sr"] ~doc:"The SR has no attached PBDs" (); - error Api_errors.sr_full ["requested";"maximum"] + error Api_errors.sr_full ["requested";"maximum"] ~doc:"The SR is full. Requested new size exceeds the maximum size" (); error Api_errors.pbd_exists ["sr";"host";"pbd"] ~doc:"A PBD already exists connecting the SR to the host" (); - error Api_errors.sr_has_pbd ["sr"] + error Api_errors.sr_has_pbd ["sr"] ~doc:"The SR is still connected to a host via a PBD. It cannot be destroyed or forgotten." (); error Api_errors.sr_has_multiple_pbds [ "PBD" ] ~doc:"The SR.shared flag cannot be set to false while the SR remains connected to multiple hosts" (); @@ -1022,7 +1022,7 @@ let _ = error Api_errors.vdi_is_not_iso [ "vdi"; "type" ] ~doc:"This operation can only be performed on CD VDIs (iso files or CDROM drives)" (); error Api_errors.host_cd_drive_empty [ ] - ~doc:"The host CDROM drive does not contain a valid CD" (); + ~doc:"The host CDROM drive does not contain a valid CD" (); error Api_errors.vdi_in_use [ "vdi"; "operation" ] ~doc:"This operation cannot be performed because this VDI is in use by some other operation" (); error Api_errors.vdi_not_available [ "vdi" ] @@ -1039,8 +1039,8 @@ let _ = ~doc:"This VDI was not mapped to a destination SR in VM.migrate_send operation" () ; error Api_errors.vdi_copy_failed [] ~doc:"The VDI copy action has failed" (); - error Api_errors.vdi_on_boot_mode_incompatible_with_operation [] - ~doc:"This operation is not permitted on VMs containing VDIs in the 'on-boot=reset' mode" (); + error Api_errors.vdi_on_boot_mode_incompatible_with_operation [] + ~doc:"This operation is not permitted on VMs containing VDIs in the 'on-boot=reset' mode" (); error Api_errors.cannot_create_state_file [] ~doc:"An HA statefile could not be created, perhaps because no SR with the appropriate capability was found." (); error Api_errors.vif_not_in_map [ "vif" ] @@ -1051,7 +1051,7 @@ let _ = error Api_errors.sr_not_empty [ ] ~doc:"The SR operation cannot be performed because the SR is not empty." (); error Api_errors.sr_device_in_use [ ] - ~doc:"The SR operation cannot be performed because a device underlying the SR is in use by the host." (); + ~doc:"The SR operation cannot be performed because a device underlying the SR is in use by the host." (); error Api_errors.sr_not_sharable [ "sr"; "host" ] ~doc:"The PBD could not be plugged because the SR is in use by another host and is not marked as sharable." (); error Api_errors.sr_indestructible ["sr"] @@ -1063,12 +1063,12 @@ let _ = error Api_errors.sm_plugin_communication_failure ["sm"] ~doc:"The SM plugin did not respond to a query." (); - - error Api_errors.device_already_attached ["device"] + + error Api_errors.device_already_attached ["device"] ~doc:"The device is already attached to a VM" (); - error Api_errors.device_already_detached ["device"] + error Api_errors.device_already_detached ["device"] ~doc:"The device is not currently attached" (); - error Api_errors.device_already_exists ["device"] + error Api_errors.device_already_exists ["device"] ~doc:"A device with the name given already exists on the selected VM" (); error Api_errors.invalid_device ["device"] ~doc:"The device name is invalid" (); @@ -1083,7 +1083,7 @@ let _ = (* Import export errors *) error Api_errors.import_incompatible_version [ ] - ~doc:"The import failed because this export has been created by a different (incompatible) product version" (); + ~doc:"The import failed because this export has been created by a different (incompatible) product version" (); error Api_errors.import_error_generic [ "msg" ] ~doc:"The VM could not be imported." (); error Api_errors.import_error_premature_eof [] @@ -1101,7 +1101,7 @@ let _ = (* Restore errors *) error Api_errors.restore_incompatible_version [ ] - ~doc:"The restore could not be performed because this backup has been created by a different (incompatible) product version" (); + ~doc:"The restore could not be performed because this backup has been created by a different (incompatible) product version" (); error Api_errors.restore_target_missing_device [ "device" ] ~doc:"The restore could not be performed because a network interface is missing" (); error Api_errors.restore_target_mgmt_if_not_in_backup [ ] @@ -1116,13 +1116,13 @@ let _ = - (* Event errors *) + (* Event errors *) error Api_errors.events_lost [] ~doc:"Some events have been lost from the queue and cannot be retrieved." (); error Api_errors.event_subscription_parse_failure [ "subscription" ] - ~doc:"The server failed to parse your event subscription. Valid values include: *, class-name, class-name/object-reference." (); + ~doc:"The server failed to parse your event subscription. Valid values include: *, class-name, class-name/object-reference." (); error Api_errors.event_from_token_parse_failure [ "token" ] - ~doc:"The event.from token could not be parsed. Valid values include: '', and a value returned from a previous event.from call." (); + ~doc:"The event.from token could not be parsed. Valid values include: '', and a value returned from a previous event.from call." (); error Api_errors.session_not_registered ["handle"] ~doc:"This session is not registered to receive events. You must call event.register before event.next. The session handle you are using is echoed." (); @@ -1153,7 +1153,7 @@ let _ = error Api_errors.patch_apply_failed [ "output" ] ~doc:"The patch apply failed. Please see attached output." (); error Api_errors.patch_apply_failed_backup_files_exist [ "output" ] - ~doc:"The patch apply failed: there are backup files created while applying patch. Please remove these backup files before applying patch again." (); + ~doc:"The patch apply failed: there are backup files created while applying patch. Please remove these backup files before applying patch again." (); error Api_errors.patch_precheck_failed_unknown_error [ "patch"; "info" ] ~doc:"The patch precheck stage failed with an unknown error. See attached info for more details." (); error Api_errors.patch_precheck_failed_prerequisite_missing [ "patch"; "prerequisite_patch_uuid_list" ] @@ -1213,7 +1213,7 @@ let _ = error Api_errors.ha_abort_new_master [ "reason" ] ~doc:"This host cannot accept the proposed new master setting at this time." (); - + error Api_errors.ha_no_plan [ ] ~doc:"Cannot find a plan for placement of VMs as there are no other hosts available." (); @@ -1234,9 +1234,9 @@ let _ = ~doc:"This operation cannot be performed because it would invalidate VM failover planning such that the system would be unable to guarantee to restart protected VMs after a Host failure." (); - error Api_errors.ha_cannot_change_bond_status_of_mgmt_iface [ ] - ~doc:"This operation cannot be performed because creating or deleting a bond involving the management interface is not allowed while HA is on. In order to do that, disable HA, create or delete the bond then re-enable HA." - (); + error Api_errors.ha_cannot_change_bond_status_of_mgmt_iface [ ] + ~doc:"This operation cannot be performed because creating or deleting a bond involving the management interface is not allowed while HA is on. In order to do that, disable HA, create or delete the bond then re-enable HA." + (); error Api_errors.incompatible_statefile_sr ["SR type"] ~doc:"The specified SR is incompatible with the selected HA cluster stack." @@ -1252,7 +1252,7 @@ let _ = error Api_errors.system_status_retrieval_failed ["reason"] ~doc:"Retrieving system status from the host failed. A diagnostic reason suitable for support organisations is also returned." (); - + error Api_errors.system_status_must_use_tar_on_oem [] ~doc:"You must use tar output to retrieve system status from an OEM host." (); @@ -1295,68 +1295,68 @@ let _ = error Api_errors.ssl_verify_error ["reason"] ~doc:"The remote system's SSL certificate failed to verify against our certificate library." (); - - error Api_errors.cannot_enable_redo_log ["reason"] - ~doc:"Could not enable redo log." (); - error Api_errors.redo_log_is_enabled [] - ~doc:"The operation could not be performed because a redo log is enabled on the Pool." (); - + error Api_errors.cannot_enable_redo_log ["reason"] + ~doc:"Could not enable redo log." (); + + error Api_errors.redo_log_is_enabled [] + ~doc:"The operation could not be performed because a redo log is enabled on the Pool." (); + error Api_errors.vm_bios_strings_already_set [] ~doc:"The BIOS strings for this VM have already been set and cannot be changed anymore." (); - + (* CPU feature masking (a.k.a. Intel FlexMigration or AMD Extended Migration technology) *) - + error Api_errors.invalid_feature_string ["details"] - ~doc:"The given feature string is not valid." (); - + ~doc:"The given feature string is not valid." (); + error Api_errors.cpu_feature_masking_not_supported ["details"] - ~doc:"The CPU does not support masking of features." (); + ~doc:"The CPU does not support masking of features." (); error Api_errors.feature_requires_hvm ["details"] ~doc:"The VM is set up to use a feature that requires it to boot as HVM." (); - (* Disaster recovery errors *) - error Api_errors.vdi_contains_metadata_of_this_pool ["vdi"; "pool"] - ~doc:"The VDI could not be opened for metadata recovery as it contains the current pool's metadata." (); + (* Disaster recovery errors *) + error Api_errors.vdi_contains_metadata_of_this_pool ["vdi"; "pool"] + ~doc:"The VDI could not be opened for metadata recovery as it contains the current pool's metadata." (); - error Api_errors.no_more_redo_logs_allowed [] - ~doc:"The upper limit of active redo log instances was reached." (); + error Api_errors.no_more_redo_logs_allowed [] + ~doc:"The upper limit of active redo log instances was reached." (); - error Api_errors.could_not_import_database ["reason"] - ~doc:"An error occurred while attempting to import a database from a metadata VDI" (); + error Api_errors.could_not_import_database ["reason"] + ~doc:"An error occurred while attempting to import a database from a metadata VDI" (); - error Api_errors.vm_incompatible_with_this_host ["vm"; "host"; "reason"] - ~doc:"The VM is incompatible with the CPU features of this host." (); + error Api_errors.vm_incompatible_with_this_host ["vm"; "host"; "reason"] + ~doc:"The VM is incompatible with the CPU features of this host." (); - error Api_errors.cannot_destroy_disaster_recovery_task ["reason"] - ~doc:"The disaster recovery task could not be cleanly destroyed." (); + error Api_errors.cannot_destroy_disaster_recovery_task ["reason"] + ~doc:"The disaster recovery task could not be cleanly destroyed." (); - error Api_errors.vm_is_part_of_an_appliance ["vm"; "appliance"] - ~doc:"This operation is not allowed as the VM is part of an appliance." (); + error Api_errors.vm_is_part_of_an_appliance ["vm"; "appliance"] + ~doc:"This operation is not allowed as the VM is part of an appliance." (); - error Api_errors.vm_to_import_is_not_newer_version ["vm"; "existing_version"; "version_to_import"] - ~doc:"The VM cannot be imported unforced because it is either the same version or an older version of an existing VM." (); + error Api_errors.vm_to_import_is_not_newer_version ["vm"; "existing_version"; "version_to_import"] + ~doc:"The VM cannot be imported unforced because it is either the same version or an older version of an existing VM." (); - error Api_errors.vm_call_plugin_rate_limit ["VM"; "interval"; "wait"] - ~doc:"There is a minimal interval required between consecutive plugin calls made on the same VM, please wait before retry." (); + error Api_errors.vm_call_plugin_rate_limit ["VM"; "interval"; "wait"] + ~doc:"There is a minimal interval required between consecutive plugin calls made on the same VM, please wait before retry." (); - error Api_errors.vm_is_immobile ["VM"] - ~doc:"The VM is configured in a way that prevents it from being mobile." () + error Api_errors.vm_is_immobile ["VM"] + ~doc:"The VM is configured in a way that prevents it from being mobile." () let _ = - message (fst Api_messages.ha_pool_overcommitted) ~doc:"Pool has become overcommitted: it can no longer guarantee to restart protected VMs if the configured number of hosts fail." (); - message (fst Api_messages.ha_statefile_lost) ~doc:"Host lost access to HA storage heartbeat" (); - message (fst Api_messages.ha_heartbeat_approaching_timeout) ~doc:"HA network heartbeat almost timed-out" (); - message (fst Api_messages.ha_statefile_approaching_timeout) ~doc:"HA storage heartbeat almost timed-out" (); - message (fst Api_messages.ha_xapi_healthcheck_approaching_timeout) ~doc:"HA xapi healthcheck almost timed-out" (); - message (fst Api_messages.ha_network_bonding_error) ~doc:"HA network heartbeat interface bonding error" (); - message (fst Api_messages.vif_qos_failed) ~doc:"Applying QoS to VIF failed." (); - message (fst Api_messages.vbd_qos_failed) ~doc:"Applying QoS to VBD failed." (); - message (fst Api_messages.vcpu_qos_failed) ~doc:"Applying QoS to VCPU failed." (); - message (fst Api_messages.pool_master_transition) ~doc:"Host has become the new Pool master." (); - message (fst Api_messages.pbd_plug_failed_on_server_start) ~doc:"Host failed to attach one or more Storage Repositories." (); - () + message (fst Api_messages.ha_pool_overcommitted) ~doc:"Pool has become overcommitted: it can no longer guarantee to restart protected VMs if the configured number of hosts fail." (); + message (fst Api_messages.ha_statefile_lost) ~doc:"Host lost access to HA storage heartbeat" (); + message (fst Api_messages.ha_heartbeat_approaching_timeout) ~doc:"HA network heartbeat almost timed-out" (); + message (fst Api_messages.ha_statefile_approaching_timeout) ~doc:"HA storage heartbeat almost timed-out" (); + message (fst Api_messages.ha_xapi_healthcheck_approaching_timeout) ~doc:"HA xapi healthcheck almost timed-out" (); + message (fst Api_messages.ha_network_bonding_error) ~doc:"HA network heartbeat interface bonding error" (); + message (fst Api_messages.vif_qos_failed) ~doc:"Applying QoS to VIF failed." (); + message (fst Api_messages.vbd_qos_failed) ~doc:"Applying QoS to VBD failed." (); + message (fst Api_messages.vcpu_qos_failed) ~doc:"Applying QoS to VCPU failed." (); + message (fst Api_messages.pool_master_transition) ~doc:"Host has become the new Pool master." (); + message (fst Api_messages.pbd_plug_failed_on_server_start) ~doc:"Host failed to attach one or more Storage Repositories." (); + () (* ------------------------------------------------------------------------------------------------------------ Session Management @@ -1365,144 +1365,144 @@ let _ = (* Session.Login *) let session_login = call ~flags:[] - ~name:"login_with_password" - ~in_product_since:rel_rio - ~doc:"Attempt to authenticate the user, returning a session reference if successful" - ~result:(Ref _session,"reference of newly created session") - ~versioned_params: - [{param_type=String; param_name="uname"; param_doc="Username for login."; param_release=rio_release; param_default=None}; - {param_type=String; param_name="pwd"; param_doc="Password for login."; param_release=rio_release; param_default=None}; - {param_type=String; param_name="version"; param_doc="Client API version."; param_release=miami_release; param_default=Some (VString "1.1")}; - {param_type=String; param_name="originator"; param_doc="Key string for distinguishing different API users sharing the same login name."; param_release=clearwater_release; param_default=Some (VString "")} - ] - ~errs:[Api_errors.session_authentication_failed; Api_errors.host_is_slave] - ~secret:true - ~allowed_roles:_R_ALL (*any static role can try to create a user session*) - () + ~name:"login_with_password" + ~in_product_since:rel_rio + ~doc:"Attempt to authenticate the user, returning a session reference if successful" + ~result:(Ref _session,"reference of newly created session") + ~versioned_params: + [{param_type=String; param_name="uname"; param_doc="Username for login."; param_release=rio_release; param_default=None}; + {param_type=String; param_name="pwd"; param_doc="Password for login."; param_release=rio_release; param_default=None}; + {param_type=String; param_name="version"; param_doc="Client API version."; param_release=miami_release; param_default=Some (VString "1.1")}; + {param_type=String; param_name="originator"; param_doc="Key string for distinguishing different API users sharing the same login name."; param_release=clearwater_release; param_default=Some (VString "")} + ] + ~errs:[Api_errors.session_authentication_failed; Api_errors.host_is_slave] + ~secret:true + ~allowed_roles:_R_ALL (*any static role can try to create a user session*) + () let slave_login = call ~flags:[] - ~name:"slave_login" - ~doc:"Attempt to authenticate to the pool master by presenting the slave's host ref and pool secret" - ~result:(Ref _session,"ID of newly created session") - ~params:[ - Ref _host, "host", "Host id of slave"; - String, "psecret", "Pool secret" - ] - ~in_oss_since:None - ~in_product_since:rel_rio - ~secret:true - ~hide_from_docs:true - ~allowed_roles:_R_POOL_ADMIN (*system can create a slave session !!! *) - () + ~name:"slave_login" + ~doc:"Attempt to authenticate to the pool master by presenting the slave's host ref and pool secret" + ~result:(Ref _session,"ID of newly created session") + ~params:[ + Ref _host, "host", "Host id of slave"; + String, "psecret", "Pool secret" + ] + ~in_oss_since:None + ~in_product_since:rel_rio + ~secret:true + ~hide_from_docs:true + ~allowed_roles:_R_POOL_ADMIN (*system can create a slave session !!! *) + () let slave_local_login = call ~flags:[] - ~in_product_since:rel_miami - ~name:"slave_local_login" - ~doc:"Authenticate locally against a slave in emergency mode. Note the resulting sessions are only good for use on this host." - ~result:(Ref _session,"ID of newly created session") - ~params:[ - String, "psecret", "Pool secret" - ] - ~in_oss_since:None - ~secret:true - ~hide_from_docs:true - ~allowed_roles:_R_POOL_ADMIN (*system can create a slave session*) - () + ~in_product_since:rel_miami + ~name:"slave_local_login" + ~doc:"Authenticate locally against a slave in emergency mode. Note the resulting sessions are only good for use on this host." + ~result:(Ref _session,"ID of newly created session") + ~params:[ + String, "psecret", "Pool secret" + ] + ~in_oss_since:None + ~secret:true + ~hide_from_docs:true + ~allowed_roles:_R_POOL_ADMIN (*system can create a slave session*) + () let slave_local_login_with_password = call ~flags:[] - ~in_product_since:rel_miami - ~name:"slave_local_login_with_password" - ~doc:"Authenticate locally against a slave in emergency mode. Note the resulting sessions are only good for use on this host." - ~result:(Ref _session,"ID of newly created session") - ~params:[ - String, "uname", "Username for login."; - String, "pwd", "Password for login."; - ] - ~in_oss_since:None - ~secret:true - ~allowed_roles:_R_POOL_ADMIN (*only root can do an emergency slave login*) - () + ~in_product_since:rel_miami + ~name:"slave_local_login_with_password" + ~doc:"Authenticate locally against a slave in emergency mode. Note the resulting sessions are only good for use on this host." + ~result:(Ref _session,"ID of newly created session") + ~params:[ + String, "uname", "Username for login."; + String, "pwd", "Password for login."; + ] + ~in_oss_since:None + ~secret:true + ~allowed_roles:_R_POOL_ADMIN (*only root can do an emergency slave login*) + () let session_create_from_db_file = call - ~lifecycle:[Published, rel_dundee, ""] - ~name:"create_from_db_file" - ~params:[String, "filename", "Database dump filename."] - ~result:(Ref _session, "ID of newly created session") - ~in_oss_since:None - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~lifecycle:[Published, rel_dundee, ""] + ~name:"create_from_db_file" + ~params:[String, "filename", "Database dump filename."] + ~result:(Ref _session, "ID of newly created session") + ~in_oss_since:None + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let local_logout = call ~flags:[`Session] - ~in_product_since:rel_miami - ~name:"local_logout" - ~doc:"Log out of local session." - ~params:[] - ~in_oss_since:None - ~allowed_roles:_R_POOL_ADMIN (*system can destroy a local session*) - () + ~in_product_since:rel_miami + ~name:"local_logout" + ~doc:"Log out of local session." + ~params:[] + ~in_oss_since:None + ~allowed_roles:_R_POOL_ADMIN (*system can destroy a local session*) + () (* Session.Logout *) - + let session_logout = call ~flags:[`Session] - ~in_product_since:rel_rio - ~name:"logout" - ~doc:"Log out of a session" - ~params:[] - ~allowed_roles:_R_ALL (*any role can destroy a known user session*) - () + ~in_product_since:rel_rio + ~name:"logout" + ~doc:"Log out of a session" + ~params:[] + ~allowed_roles:_R_ALL (*any role can destroy a known user session*) + () let session_chpass = call ~flags:[`Session] - ~name:"change_password" - ~doc:"Change the account password; if your session is authenticated with root priviledges then the old_pwd is validated and the new_pwd is set regardless" - ~params:[ - String, "old_pwd", "Old password for account"; - String, "new_pwd", "New password for account" - ] - ~in_product_since:rel_rio - ~in_oss_since:None - ~allowed_roles:_R_LOCAL_ROOT_ONLY (*not even pool-admin can change passwords, only root*) - () + ~name:"change_password" + ~doc:"Change the account password; if your session is authenticated with root priviledges then the old_pwd is validated and the new_pwd is set regardless" + ~params:[ + String, "old_pwd", "Old password for account"; + String, "new_pwd", "New password for account" + ] + ~in_product_since:rel_rio + ~in_oss_since:None + ~allowed_roles:_R_LOCAL_ROOT_ONLY (*not even pool-admin can change passwords, only root*) + () (* static function for class session *) let session_get_all_subject_identifiers = call - ~name:"get_all_subject_identifiers" - ~doc:"Return a list of all the user subject-identifiers of all existing sessions" - ~result:(Set (String), "The list of user subject-identifiers of all existing sessions") - ~params:[] - ~in_product_since:rel_george - ~in_oss_since:None - ~allowed_roles:_R_ALL - () + ~name:"get_all_subject_identifiers" + ~doc:"Return a list of all the user subject-identifiers of all existing sessions" + ~result:(Set (String), "The list of user subject-identifiers of all existing sessions") + ~params:[] + ~in_product_since:rel_george + ~in_oss_since:None + ~allowed_roles:_R_ALL + () (* static function for class session *) let session_logout_subject_identifier = call - ~name:"logout_subject_identifier" - ~doc:"Log out all sessions associated to a user subject-identifier, except the session associated with the context calling this function" - ~params:[ - String, "subject_identifier", "User subject-identifier of the sessions to be destroyed" - ] - ~in_product_since:rel_george - ~in_oss_since:None - ~allowed_roles:_R_POOL_OP - () + ~name:"logout_subject_identifier" + ~doc:"Log out all sessions associated to a user subject-identifier, except the session associated with the context calling this function" + ~params:[ + String, "subject_identifier", "User subject-identifier of the sessions to be destroyed" + ] + ~in_product_since:rel_george + ~in_oss_since:None + ~allowed_roles:_R_POOL_OP + () (* ------------------------------------------------------------------------------------------------------------ Asynchronous Task Management ------------------------------------------------------------------------------------------------------------ *) let cancel_result = Enum ("cancel_result", - [ "OK", "OK"; - "Failed", "Not OK" ]) + [ "OK", "OK"; + "Failed", "Not OK" ]) (* ------------------------------------------------------------------------------------------------------------ - RRD Consolidation function specification + RRD Consolidation function specification ------------------------------------------------------------------------------------------------------------ *) let rrd_cf_type = Enum ("rrd_cf_type", - [ "Average", "Average"; - "Min", "Minimum"; - "Max", "Maximum"; - "Last", "Last value" ]) + [ "Average", "Average"; + "Min", "Minimum"; + "Max", "Maximum"; + "Last", "Last value" ]) (* ------------------------------------------------------------------------------------------------------------ @@ -1512,1651 +1512,1651 @@ let rrd_cf_type = Enum ("rrd_cf_type", (* Install and UnInstall correspond to autogenerate create/delete functions *) let vm_get_boot_record = call - ~name:"get_boot_record" - ~in_oss_since:None - ~in_product_since:rel_rio - ~doc:"Returns a record describing the VM's dynamic state, initialised when the VM boots and updated to reflect runtime configuration changes e.g. CPU hotplug" - ~result:(Record _vm, "A record describing the VM") - ~params:[Ref _vm, "self", "The VM whose boot-time state to return"] - ~errs:[] - ~flags:[`Session] (* no async *) - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_boot_record" + ~in_oss_since:None + ~in_product_since:rel_rio + ~doc:"Returns a record describing the VM's dynamic state, initialised when the VM boots and updated to reflect runtime configuration changes e.g. CPU hotplug" + ~result:(Record _vm, "A record describing the VM") + ~params:[Ref _vm, "self", "The VM whose boot-time state to return"] + ~errs:[] + ~flags:[`Session] (* no async *) + ~allowed_roles:_R_READ_ONLY + () let vm_get_data_sources = call - ~name:"get_data_sources" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~doc:"" - ~result:(Set (Record _data_source), "A set of data sources") - ~params:[Ref _vm, "self", "The VM to interrogate"] - ~errs:[] - ~flags:[`Session] - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_data_sources" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~doc:"" + ~result:(Set (Record _data_source), "A set of data sources") + ~params:[Ref _vm, "self", "The VM to interrogate"] + ~errs:[] + ~flags:[`Session] + ~allowed_roles:_R_READ_ONLY + () let vm_record_data_source = call - ~name:"record_data_source" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~doc:"Start recording the specified data source" - ~params:[Ref _vm, "self", "The VM"; - String, "data_source", "The data source to record"] - ~errs:[] - ~flags:[`Session] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"record_data_source" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~doc:"Start recording the specified data source" + ~params:[Ref _vm, "self", "The VM"; + String, "data_source", "The data source to record"] + ~errs:[] + ~flags:[`Session] + ~allowed_roles:_R_VM_ADMIN + () let vm_query_data_source = call - ~name:"query_data_source" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~doc:"Query the latest value of the specified data source" - ~params:[Ref _vm, "self", "The VM"; - String, "data_source", "The data source to query"] - ~result:(Float,"The latest value, averaged over the last 5 seconds") - ~errs:[] - ~flags:[`Session] - ~allowed_roles:_R_READ_ONLY - () + ~name:"query_data_source" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~doc:"Query the latest value of the specified data source" + ~params:[Ref _vm, "self", "The VM"; + String, "data_source", "The data source to query"] + ~result:(Float,"The latest value, averaged over the last 5 seconds") + ~errs:[] + ~flags:[`Session] + ~allowed_roles:_R_READ_ONLY + () let vm_forget_data_source_archives = call - ~name:"forget_data_source_archives" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~doc:"Forget the recorded statistics related to the specified data source" - ~params:[Ref _vm, "self", "The VM"; - String, "data_source", "The data source whose archives are to be forgotten"] - ~flags:[`Session] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"forget_data_source_archives" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~doc:"Forget the recorded statistics related to the specified data source" + ~params:[Ref _vm, "self", "The VM"; + String, "data_source", "The data source whose archives are to be forgotten"] + ~flags:[`Session] + ~allowed_roles:_R_VM_ADMIN + () let vm_set_ha_always_run = call - ~name:"set_ha_always_run" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~doc:"Set the value of the ha_always_run" - ~params:[Ref _vm, "self", "The VM"; - Bool, "value", "The value"] - ~flags:[`Session] - ~allowed_roles:_R_POOL_OP - ~internal_deprecated_since:rel_boston - () + ~name:"set_ha_always_run" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~doc:"Set the value of the ha_always_run" + ~params:[Ref _vm, "self", "The VM"; + Bool, "value", "The value"] + ~flags:[`Session] + ~allowed_roles:_R_POOL_OP + ~internal_deprecated_since:rel_boston + () let vm_set_ha_restart_priority = call - ~name:"set_ha_restart_priority" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~doc:"Set the value of the ha_restart_priority field" - ~params:[Ref _vm, "self", "The VM"; - String, "value", "The value"] - ~flags:[`Session] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_ha_restart_priority" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~doc:"Set the value of the ha_restart_priority field" + ~params:[Ref _vm, "self", "The VM"; + String, "value", "The value"] + ~flags:[`Session] + ~allowed_roles:_R_POOL_OP + () (* VM.Clone *) let vm_clone = call - ~name:"clone" - ~in_product_since:rel_rio - ~doc:"Clones the specified VM, making a new VM. Clone automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write). This function can only be called when the VM is in the Halted State." - ~result:(Ref _vm, "The reference of the newly created VM.") - ~params:[ - Ref _vm, "vm", "The VM to be cloned"; - String, "new_name", "The name of the cloned VM" - ] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.sr_full; Api_errors.operation_not_allowed - ;Api_errors.license_restriction - ] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"clone" + ~in_product_since:rel_rio + ~doc:"Clones the specified VM, making a new VM. Clone automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write). This function can only be called when the VM is in the Halted State." + ~result:(Ref _vm, "The reference of the newly created VM.") + ~params:[ + Ref _vm, "vm", "The VM to be cloned"; + String, "new_name", "The name of the cloned VM" + ] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.sr_full; Api_errors.operation_not_allowed + ;Api_errors.license_restriction + ] + ~allowed_roles:_R_VM_ADMIN + () (* VM.Copy *) let vm_copy = call - ~name:"copy" - ~lifecycle:[ - Published, rel_rio, "Copies a VM to an SR. There must be a host that can see both the source and destination SRs simultaneously"; - Extended, rel_cowley, "The copy can now be performed between any two SRs." ] - ~doc:"Copied the specified VM, making a new VM. Unlike clone, copy does not exploits the capabilities of the underlying storage repository in which the VM's disk images are stored. Instead, copy guarantees that the disk images of the newly created VM will be 'full disks' - i.e. not part of a CoW chain. This function can only be called when the VM is in the Halted State." - ~result:(Ref _vm, "The reference of the newly created VM.") - ~params:[ - Ref _vm, "vm", "The VM to be copied"; - String, "new_name", "The name of the copied VM"; - Ref _sr, "sr", "An SR to copy all the VM's disks into (if an invalid reference then it uses the existing SRs)"; - ] - ~errs:(errnames_of_call vm_clone) - ~allowed_roles:_R_VM_ADMIN - () + ~name:"copy" + ~lifecycle:[ + Published, rel_rio, "Copies a VM to an SR. There must be a host that can see both the source and destination SRs simultaneously"; + Extended, rel_cowley, "The copy can now be performed between any two SRs." ] + ~doc:"Copied the specified VM, making a new VM. Unlike clone, copy does not exploits the capabilities of the underlying storage repository in which the VM's disk images are stored. Instead, copy guarantees that the disk images of the newly created VM will be 'full disks' - i.e. not part of a CoW chain. This function can only be called when the VM is in the Halted State." + ~result:(Ref _vm, "The reference of the newly created VM.") + ~params:[ + Ref _vm, "vm", "The VM to be copied"; + String, "new_name", "The name of the copied VM"; + Ref _sr, "sr", "An SR to copy all the VM's disks into (if an invalid reference then it uses the existing SRs)"; + ] + ~errs:(errnames_of_call vm_clone) + ~allowed_roles:_R_VM_ADMIN + () (* VM.snapshot *) let vm_snapshot_with_quiesce = call - ~name:"snapshot_with_quiesce" - ~in_product_since: rel_orlando - ~doc:"Snapshots the specified VM with quiesce, making a new VM. Snapshot automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write)." - ~result: (Ref _vm, "The reference of the newly created VM.") - ~params:[ - Ref _vm, "vm", "The VM to be snapshotted"; - String, "new_name", "The name of the snapshotted VM" - ] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.sr_full; Api_errors.operation_not_allowed; - Api_errors.vm_snapshot_with_quiesce_failed; - Api_errors.vm_snapshot_with_quiesce_timeout; - Api_errors.vm_snapshot_with_quiesce_plugin_does_not_respond; - Api_errors.vm_snapshot_with_quiesce_not_supported ] - ~allowed_roles:_R_VM_POWER_ADMIN - () + ~name:"snapshot_with_quiesce" + ~in_product_since: rel_orlando + ~doc:"Snapshots the specified VM with quiesce, making a new VM. Snapshot automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write)." + ~result: (Ref _vm, "The reference of the newly created VM.") + ~params:[ + Ref _vm, "vm", "The VM to be snapshotted"; + String, "new_name", "The name of the snapshotted VM" + ] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.sr_full; Api_errors.operation_not_allowed; + Api_errors.vm_snapshot_with_quiesce_failed; + Api_errors.vm_snapshot_with_quiesce_timeout; + Api_errors.vm_snapshot_with_quiesce_plugin_does_not_respond; + Api_errors.vm_snapshot_with_quiesce_not_supported ] + ~allowed_roles:_R_VM_POWER_ADMIN + () let vm_update_snapshot_metadata = call - ~name:"update_snapshot_metadata" - ~in_product_since: rel_george - ~internal_deprecated_since:rel_midnight_ride - ~doc:"" - ~hide_from_docs:true - ~params:[ - Ref _vm, "vm", "The VM to update"; - Ref _vm, "snapshot_of", ""; - DateTime, "snapshot_time", ""; - String, "transportable_snapshot_id", "" ] - ~allowed_roles:_R_POOL_OP - () + ~name:"update_snapshot_metadata" + ~in_product_since: rel_george + ~internal_deprecated_since:rel_midnight_ride + ~doc:"" + ~hide_from_docs:true + ~params:[ + Ref _vm, "vm", "The VM to update"; + Ref _vm, "snapshot_of", ""; + DateTime, "snapshot_time", ""; + String, "transportable_snapshot_id", "" ] + ~allowed_roles:_R_POOL_OP + () let vm_snapshot = call - ~name:"snapshot" - ~in_product_since: rel_orlando - ~doc:"Snapshots the specified VM, making a new VM. Snapshot automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write)." - ~result: (Ref _vm, "The reference of the newly created VM.") - ~params:[ - Ref _vm, "vm", "The VM to be snapshotted"; - String, "new_name", "The name of the snapshotted VM" - ] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.sr_full; Api_errors.operation_not_allowed] - ~allowed_roles:_R_VM_POWER_ADMIN - ~doc_tags:[Snapshots] - () + ~name:"snapshot" + ~in_product_since: rel_orlando + ~doc:"Snapshots the specified VM, making a new VM. Snapshot automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write)." + ~result: (Ref _vm, "The reference of the newly created VM.") + ~params:[ + Ref _vm, "vm", "The VM to be snapshotted"; + String, "new_name", "The name of the snapshotted VM" + ] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.sr_full; Api_errors.operation_not_allowed] + ~allowed_roles:_R_VM_POWER_ADMIN + ~doc_tags:[Snapshots] + () let vm_revert = call - ~name:"revert" - ~in_product_since: rel_midnight_ride - ~doc:"Reverts the specified VM to a previous state." - ~params:[Ref _vm, "snapshot", "The snapshotted state that we revert to"] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.operation_not_allowed; - Api_errors.sr_full; Api_errors.vm_revert_failed ] - ~allowed_roles:_R_VM_POWER_ADMIN - ~doc_tags:[Snapshots] - () + ~name:"revert" + ~in_product_since: rel_midnight_ride + ~doc:"Reverts the specified VM to a previous state." + ~params:[Ref _vm, "snapshot", "The snapshotted state that we revert to"] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.operation_not_allowed; + Api_errors.sr_full; Api_errors.vm_revert_failed ] + ~allowed_roles:_R_VM_POWER_ADMIN + ~doc_tags:[Snapshots] + () let vm_checkpoint = call - ~name:"checkpoint" - ~in_product_since: rel_midnight_ride - ~doc:"Checkpoints the specified VM, making a new VM. Checkpoint automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write) and saves the memory image as well." - ~result: (Ref _vm, "The reference of the newly created VM.") - ~params:[ - Ref _vm, "vm", "The VM to be checkpointed"; - String, "new_name", "The name of the checkpointed VM" - ] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.sr_full; Api_errors.operation_not_allowed; - Api_errors.vm_checkpoint_suspend_failed; Api_errors.vm_checkpoint_resume_failed] - ~allowed_roles:_R_VM_POWER_ADMIN - () + ~name:"checkpoint" + ~in_product_since: rel_midnight_ride + ~doc:"Checkpoints the specified VM, making a new VM. Checkpoint automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write) and saves the memory image as well." + ~result: (Ref _vm, "The reference of the newly created VM.") + ~params:[ + Ref _vm, "vm", "The VM to be checkpointed"; + String, "new_name", "The name of the checkpointed VM" + ] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.sr_full; Api_errors.operation_not_allowed; + Api_errors.vm_checkpoint_suspend_failed; Api_errors.vm_checkpoint_resume_failed] + ~allowed_roles:_R_VM_POWER_ADMIN + () let vm_create_template = call - ~name:"create_template" - ~hide_from_docs:true - ~internal_deprecated_since:rel_midnight_ride - ~in_product_since:rel_midnight_ride - ~doc:"Deprecated: use VM.clone or VM.copy instead." - ~result:(Ref _vm, "") - ~params:[ - Ref _vm, "vm", ""; - String, "new_name", "" - ] - ~errs:[] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"create_template" + ~hide_from_docs:true + ~internal_deprecated_since:rel_midnight_ride + ~in_product_since:rel_midnight_ride + ~doc:"Deprecated: use VM.clone or VM.copy instead." + ~result:(Ref _vm, "") + ~params:[ + Ref _vm, "vm", ""; + String, "new_name", "" + ] + ~errs:[] + ~allowed_roles:_R_VM_ADMIN + () let vm_import_convert = call - ~name:"import_convert" - ~in_product_since:rel_tampa - ~doc:"Import using a conversion service." - ~params:[ - String, "type", "Type of the conversion"; - String, "username", "Admin username on the host"; - String, "password", "Password on the host"; - Ref _sr, "sr", "The destination SR"; - Map(String, String), "remote_config", "Remote configuration options" - ] - ~errs:[] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"import_convert" + ~in_product_since:rel_tampa + ~doc:"Import using a conversion service." + ~params:[ + String, "type", "Type of the conversion"; + String, "username", "Admin username on the host"; + String, "password", "Password on the host"; + Ref _sr, "sr", "The destination SR"; + Map(String, String), "remote_config", "Remote configuration options" + ] + ~errs:[] + ~allowed_roles:_R_VM_ADMIN + () (* VM.Provision -- causes the template's disks to be instantiated *) let vm_provision = call - ~name:"provision" - ~doc:"Inspects the disk configuration contained within the VM's other_config, creates VDIs and VBDs and then executes any applicable post-install script." - ~params:[ - Ref _vm, "vm", "The VM to be provisioned"; - ] - ~in_oss_since:None - ~in_product_since:rel_rio - ~errs:(errnames_of_call vm_clone) - ~allowed_roles:_R_VM_ADMIN - () + ~name:"provision" + ~doc:"Inspects the disk configuration contained within the VM's other_config, creates VDIs and VBDs and then executes any applicable post-install script." + ~params:[ + Ref _vm, "vm", "The VM to be provisioned"; + ] + ~in_oss_since:None + ~in_product_since:rel_rio + ~errs:(errnames_of_call vm_clone) + ~allowed_roles:_R_VM_ADMIN + () (* VM.Start *) let vm_start = call - ~name:"start" - ~in_product_since:rel_rio - ~doc:"Start the specified VM. This function can only be called with the VM is in the Halted State." - ~params:[ - Ref _vm, "vm", "The VM to start"; - Bool, "start_paused", "Instantiate VM in paused state if set to true."; - Bool, "force", "Attempt to force the VM to start. If this flag is false then the VM may fail pre-boot safety checks (e.g. if the CPU the VM last booted on looks substantially different to the current one)"; - ] - ~errs:[ - Api_errors.vm_bad_power_state; - Api_errors.vm_hvm_required; - Api_errors.vm_is_template; - Api_errors.other_operation_in_progress; - Api_errors.operation_not_allowed; - Api_errors.bootloader_failed; - Api_errors.unknown_bootloader; - Api_errors.no_hosts_available; - Api_errors.license_restriction; - ] - ~allowed_roles:_R_VM_OP - () + ~name:"start" + ~in_product_since:rel_rio + ~doc:"Start the specified VM. This function can only be called with the VM is in the Halted State." + ~params:[ + Ref _vm, "vm", "The VM to start"; + Bool, "start_paused", "Instantiate VM in paused state if set to true."; + Bool, "force", "Attempt to force the VM to start. If this flag is false then the VM may fail pre-boot safety checks (e.g. if the CPU the VM last booted on looks substantially different to the current one)"; + ] + ~errs:[ + Api_errors.vm_bad_power_state; + Api_errors.vm_hvm_required; + Api_errors.vm_is_template; + Api_errors.other_operation_in_progress; + Api_errors.operation_not_allowed; + Api_errors.bootloader_failed; + Api_errors.unknown_bootloader; + Api_errors.no_hosts_available; + Api_errors.license_restriction; + ] + ~allowed_roles:_R_VM_OP + () let vm_assert_can_boot_here = call - ~name:"assert_can_boot_here" - ~in_product_since:rel_rio - ~doc:"Returns an error if the VM could not boot on this host for some reason" - ~params:[ - Ref _vm, "self", "The VM"; - Ref _host, "host", "The host"; - ] - ~allowed_roles:_R_READ_ONLY - ~errs:[ - Api_errors.host_not_enough_free_memory; - Api_errors.vm_requires_sr; - Api_errors.vm_host_incompatible_version; - Api_errors.vm_host_incompatible_virtual_hardware_platform_version; - ] - ~doc_tags:[Memory] - () + ~name:"assert_can_boot_here" + ~in_product_since:rel_rio + ~doc:"Returns an error if the VM could not boot on this host for some reason" + ~params:[ + Ref _vm, "self", "The VM"; + Ref _host, "host", "The host"; + ] + ~allowed_roles:_R_READ_ONLY + ~errs:[ + Api_errors.host_not_enough_free_memory; + Api_errors.vm_requires_sr; + Api_errors.vm_host_incompatible_version; + Api_errors.vm_host_incompatible_virtual_hardware_platform_version; + ] + ~doc_tags:[Memory] + () let vm_assert_agile = call - ~name:"assert_agile" - ~in_product_since:rel_orlando - ~doc:"Returns an error if the VM is not considered agile e.g. because it is tied to a resource local to a host" - ~params:[Ref _vm, "self", "The VM"] - ~allowed_roles:_R_READ_ONLY - () + ~name:"assert_agile" + ~in_product_since:rel_orlando + ~doc:"Returns an error if the VM is not considered agile e.g. because it is tied to a resource local to a host" + ~params:[Ref _vm, "self", "The VM"] + ~allowed_roles:_R_READ_ONLY + () let vm_get_possible_hosts = call - ~name:"get_possible_hosts" - ~in_product_since:rel_rio - ~doc:"Return the list of hosts on which this VM may run." - ~params:[Ref _vm, "vm", "The VM" ] - ~result:(Set (Ref _host), "The possible hosts") - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_possible_hosts" + ~in_product_since:rel_rio + ~doc:"Return the list of hosts on which this VM may run." + ~params:[Ref _vm, "vm", "The VM" ] + ~result:(Set (Ref _host), "The possible hosts") + ~allowed_roles:_R_READ_ONLY + () let vm_retrieve_wlb_recommendations = call - ~name:"retrieve_wlb_recommendations" - ~in_product_since:rel_george - ~doc:"Returns mapping of hosts to ratings, indicating the suitability of starting the VM at that location according to wlb. Rating is replaced with an error if the VM cannot boot there." - ~params:[Ref _vm, "vm", "The VM";] - ~result:(Map (Ref _host, Set(String)), "The potential hosts and their corresponding recommendations or errors") - ~allowed_roles:_R_READ_ONLY - () - + ~name:"retrieve_wlb_recommendations" + ~in_product_since:rel_george + ~doc:"Returns mapping of hosts to ratings, indicating the suitability of starting the VM at that location according to wlb. Rating is replaced with an error if the VM cannot boot there." + ~params:[Ref _vm, "vm", "The VM";] + ~result:(Map (Ref _host, Set(String)), "The potential hosts and their corresponding recommendations or errors") + ~allowed_roles:_R_READ_ONLY + () + let vm_maximise_memory = call - ~in_product_since:rel_miami - ~name:"maximise_memory" - ~doc:"Returns the maximum amount of guest memory which will fit, together with overheads, in the supplied amount of physical memory. If 'exact' is true then an exact calculation is performed using the VM's current settings. If 'exact' is false then a more conservative approximation is used" - ~params:[Ref _vm, "self", "The VM"; - Int, "total", "Total amount of physical RAM to fit within"; - Bool, "approximate", "If false the limit is calculated with the guest's current exact configuration. Otherwise a more approximate calculation is performed"; - ] - ~result:(Int, "The maximum possible static-max") - ~allowed_roles:_R_READ_ONLY - ~doc_tags:[Memory] - () + ~in_product_since:rel_miami + ~name:"maximise_memory" + ~doc:"Returns the maximum amount of guest memory which will fit, together with overheads, in the supplied amount of physical memory. If 'exact' is true then an exact calculation is performed using the VM's current settings. If 'exact' is false then a more conservative approximation is used" + ~params:[Ref _vm, "self", "The VM"; + Int, "total", "Total amount of physical RAM to fit within"; + Bool, "approximate", "If false the limit is calculated with the guest's current exact configuration. Otherwise a more approximate calculation is performed"; + ] + ~result:(Int, "The maximum possible static-max") + ~allowed_roles:_R_READ_ONLY + ~doc_tags:[Memory] + () let vm_get_allowed_VBD_devices = call ~flags:[`Session] ~no_current_operations:true - ~in_product_since:rel_rio - ~name:"get_allowed_VBD_devices" - ~doc:"Returns a list of the allowed values that a VBD device field can take" - ~params:[Ref _vm,"vm","The VM to query"] - ~result:(Set String, "The allowed values") - ~allowed_roles:_R_READ_ONLY - () + ~in_product_since:rel_rio + ~name:"get_allowed_VBD_devices" + ~doc:"Returns a list of the allowed values that a VBD device field can take" + ~params:[Ref _vm,"vm","The VM to query"] + ~result:(Set String, "The allowed values") + ~allowed_roles:_R_READ_ONLY + () let vm_get_allowed_VIF_devices = call ~flags:[`Session] ~no_current_operations:true - ~in_product_since:rel_rio - ~name:"get_allowed_VIF_devices" - ~doc:"Returns a list of the allowed values that a VIF device field can take" - ~params:[Ref _vm,"vm","The VM to query"] - ~result:(Set String, "The allowed values") - ~allowed_roles:_R_READ_ONLY - () + ~in_product_since:rel_rio + ~name:"get_allowed_VIF_devices" + ~doc:"Returns a list of the allowed values that a VIF device field can take" + ~params:[Ref _vm,"vm","The VM to query"] + ~result:(Set String, "The allowed values") + ~allowed_roles:_R_READ_ONLY + () (* VM.atomic_set_resident_on *) (* an internal call that sets resident_on and clears the scheduled_to_be_resident_on atomically *) let vm_atomic_set_resident_on = call - ~in_product_since:rel_rio - ~pool_internal:true - ~hide_from_docs:true - ~name:"atomic_set_resident_on" - ~doc:"" - ~params:[Ref _vm, "vm", "The VM to modify"; - Ref _host, "host", "The host to set resident_on to" - ] - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_product_since:rel_rio + ~pool_internal:true + ~hide_from_docs:true + ~name:"atomic_set_resident_on" + ~doc:"" + ~params:[Ref _vm, "vm", "The VM to modify"; + Ref _host, "host", "The host to set resident_on to" + ] + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let vm_compute_memory_overhead = call - ~in_product_since:rel_midnight_ride - ~name:"compute_memory_overhead" - ~doc:"Computes the virtualization memory overhead of a VM." - ~params:[Ref _vm, "vm", "The VM for which to compute the memory overhead"] - ~pool_internal:false - ~hide_from_docs:false - ~result:(Int, "the virtualization memory overhead of the VM.") - ~allowed_roles:_R_READ_ONLY - ~doc_tags:[Memory] - () + ~in_product_since:rel_midnight_ride + ~name:"compute_memory_overhead" + ~doc:"Computes the virtualization memory overhead of a VM." + ~params:[Ref _vm, "vm", "The VM for which to compute the memory overhead"] + ~pool_internal:false + ~hide_from_docs:false + ~result:(Int, "the virtualization memory overhead of the VM.") + ~allowed_roles:_R_READ_ONLY + ~doc_tags:[Memory] + () let vm_set_memory_dynamic_max = call ~flags:[`Session] - ~in_product_since:rel_midnight_ride - ~name:"set_memory_dynamic_max" - ~doc:"Set the value of the memory_dynamic_max field" - ~params:[ - Ref _vm, "self", "The VM to modify"; - Int, "value", "The new value of memory_dynamic_max"; - ] - ~allowed_roles:_R_VM_POWER_ADMIN - ~errs:[] - ~doc_tags:[Memory] - () + ~in_product_since:rel_midnight_ride + ~name:"set_memory_dynamic_max" + ~doc:"Set the value of the memory_dynamic_max field" + ~params:[ + Ref _vm, "self", "The VM to modify"; + Int, "value", "The new value of memory_dynamic_max"; + ] + ~allowed_roles:_R_VM_POWER_ADMIN + ~errs:[] + ~doc_tags:[Memory] + () let vm_set_memory_dynamic_min = call ~flags:[`Session] - ~in_product_since:rel_midnight_ride - ~name:"set_memory_dynamic_min" - ~doc:"Set the value of the memory_dynamic_min field" - ~params:[ - Ref _vm, "self", "The VM to modify"; - Int, "value", "The new value of memory_dynamic_min"; - ] - ~allowed_roles:_R_VM_POWER_ADMIN - ~errs:[] - ~doc_tags:[Memory] - () + ~in_product_since:rel_midnight_ride + ~name:"set_memory_dynamic_min" + ~doc:"Set the value of the memory_dynamic_min field" + ~params:[ + Ref _vm, "self", "The VM to modify"; + Int, "value", "The new value of memory_dynamic_min"; + ] + ~allowed_roles:_R_VM_POWER_ADMIN + ~errs:[] + ~doc_tags:[Memory] + () let vm_set_memory_dynamic_range = call - ~name:"set_memory_dynamic_range" - ~in_product_since:rel_midnight_ride - ~doc:"Set the minimum and maximum amounts of physical memory the VM is \ - allowed to use." - ~allowed_roles:_R_VM_POWER_ADMIN - ~params:[ - Ref _vm, "self", "The VM"; - Int, "min", "The new minimum value"; - Int, "max", "The new maximum value"; - ] - ~doc_tags:[Memory] - () + ~name:"set_memory_dynamic_range" + ~in_product_since:rel_midnight_ride + ~doc:"Set the minimum and maximum amounts of physical memory the VM is \ + allowed to use." + ~allowed_roles:_R_VM_POWER_ADMIN + ~params:[ + Ref _vm, "self", "The VM"; + Int, "min", "The new minimum value"; + Int, "max", "The new maximum value"; + ] + ~doc_tags:[Memory] + () (* When HA is enabled we need to prevent memory *) (* changes which will break the recovery plan. *) let vm_set_memory_static_max = call ~flags:[`Session] - ~in_product_since:rel_orlando - ~name:"set_memory_static_max" - ~doc:"Set the value of the memory_static_max field" - ~errs:[Api_errors.ha_operation_would_break_failover_plan] - ~allowed_roles:_R_VM_POWER_ADMIN - ~params:[ - Ref _vm, "self", "The VM to modify"; - Int, "value", "The new value of memory_static_max"; - ] - ~doc_tags:[Memory] - () + ~in_product_since:rel_orlando + ~name:"set_memory_static_max" + ~doc:"Set the value of the memory_static_max field" + ~errs:[Api_errors.ha_operation_would_break_failover_plan] + ~allowed_roles:_R_VM_POWER_ADMIN + ~params:[ + Ref _vm, "self", "The VM to modify"; + Int, "value", "The new value of memory_static_max"; + ] + ~doc_tags:[Memory] + () let vm_set_memory_static_min = call ~flags:[`Session] - ~in_product_since:rel_midnight_ride - ~name:"set_memory_static_min" - ~doc:"Set the value of the memory_static_min field" - ~errs:[] - ~allowed_roles:_R_VM_POWER_ADMIN - ~params:[ - Ref _vm, "self", "The VM to modify"; - Int, "value", "The new value of memory_static_min"; - ] - ~doc_tags:[Memory] - () + ~in_product_since:rel_midnight_ride + ~name:"set_memory_static_min" + ~doc:"Set the value of the memory_static_min field" + ~errs:[] + ~allowed_roles:_R_VM_POWER_ADMIN + ~params:[ + Ref _vm, "self", "The VM to modify"; + Int, "value", "The new value of memory_static_min"; + ] + ~doc_tags:[Memory] + () let vm_set_memory_static_range = call - ~name:"set_memory_static_range" - ~in_product_since:rel_midnight_ride - ~doc:"Set the static (ie boot-time) range of virtual memory that the VM is \ - allowed to use." - ~allowed_roles:_R_VM_POWER_ADMIN - ~params:[Ref _vm, "self", "The VM"; - Int, "min", "The new minimum value"; - Int, "max", "The new maximum value"; - ] - ~doc_tags:[Memory] - () + ~name:"set_memory_static_range" + ~in_product_since:rel_midnight_ride + ~doc:"Set the static (ie boot-time) range of virtual memory that the VM is \ + allowed to use." + ~allowed_roles:_R_VM_POWER_ADMIN + ~params:[Ref _vm, "self", "The VM"; + Int, "min", "The new minimum value"; + Int, "max", "The new maximum value"; + ] + ~doc_tags:[Memory] + () let vm_set_memory_limits = call - ~name:"set_memory_limits" - ~in_product_since:rel_midnight_ride - ~doc:"Set the memory limits of this VM." - ~allowed_roles:_R_VM_POWER_ADMIN - ~params:[Ref _vm, "self", "The VM"; - Int, "static_min", "The new value of memory_static_min."; - Int, "static_max", "The new value of memory_static_max."; - Int, "dynamic_min", "The new value of memory_dynamic_min."; - Int, "dynamic_max", "The new value of memory_dynamic_max."; - ] - ~doc_tags:[Memory] - () + ~name:"set_memory_limits" + ~in_product_since:rel_midnight_ride + ~doc:"Set the memory limits of this VM." + ~allowed_roles:_R_VM_POWER_ADMIN + ~params:[Ref _vm, "self", "The VM"; + Int, "static_min", "The new value of memory_static_min."; + Int, "static_max", "The new value of memory_static_max."; + Int, "dynamic_min", "The new value of memory_dynamic_min."; + Int, "dynamic_max", "The new value of memory_dynamic_max."; + ] + ~doc_tags:[Memory] + () let vm_set_memory = call - ~name:"set_memory" - ~in_product_since:rel_ely - ~doc:"Set the memory allocation of this VM. Sets all of memory_static_max, memory_dynamic_min, and memory_dynamic_max to the given value, and leaves memory_static_min untouched." - ~allowed_roles:_R_VM_POWER_ADMIN - ~params:[ - Ref _vm, "self", "The VM"; - Int, "value", "The new memory allocation (bytes)."; - ] - ~doc_tags:[Memory] - () + ~name:"set_memory" + ~in_product_since:rel_ely + ~doc:"Set the memory allocation of this VM. Sets all of memory_static_max, memory_dynamic_min, and memory_dynamic_max to the given value, and leaves memory_static_min untouched." + ~allowed_roles:_R_VM_POWER_ADMIN + ~params:[ + Ref _vm, "self", "The VM"; + Int, "value", "The new memory allocation (bytes)."; + ] + ~doc_tags:[Memory] + () let vm_set_memory_target_live = call - ~name:"set_memory_target_live" - ~in_product_since:rel_rio - ~internal_deprecated_since:rel_midnight_ride - ~doc:"Set the memory target for a running VM" - ~allowed_roles:_R_VM_POWER_ADMIN - ~params:[ - Ref _vm, "self", "The VM"; - Int, "target", "The target in bytes"; - ] - ~doc_tags:[Memory] - () + ~name:"set_memory_target_live" + ~in_product_since:rel_rio + ~internal_deprecated_since:rel_midnight_ride + ~doc:"Set the memory target for a running VM" + ~allowed_roles:_R_VM_POWER_ADMIN + ~params:[ + Ref _vm, "self", "The VM"; + Int, "target", "The target in bytes"; + ] + ~doc_tags:[Memory] + () let vm_wait_memory_target_live = call - ~name:"wait_memory_target_live" - ~in_product_since:rel_orlando - ~internal_deprecated_since:rel_midnight_ride - ~doc:"Wait for a running VM to reach its current memory target" - ~allowed_roles:_R_READ_ONLY - ~params:[ - Ref _vm, "self", "The VM"; - ] - ~doc_tags:[Memory] - () + ~name:"wait_memory_target_live" + ~in_product_since:rel_orlando + ~internal_deprecated_since:rel_midnight_ride + ~doc:"Wait for a running VM to reach its current memory target" + ~allowed_roles:_R_READ_ONLY + ~params:[ + Ref _vm, "self", "The VM"; + ] + ~doc_tags:[Memory] + () let vm_get_cooperative = call - ~name:"get_cooperative" - ~in_product_since:rel_midnight_ride - ~internal_deprecated_since:rel_tampa - ~doc:"Return true if the VM is currently 'co-operative' i.e. is expected to reach a balloon target and actually has done" - ~params:[ - Ref _vm, "self", "The VM"; - ] - ~result:(Bool, "true if the VM is currently 'co-operative'; false otherwise") - ~allowed_roles:_R_READ_ONLY - ~doc_tags:[Memory] - () + ~name:"get_cooperative" + ~in_product_since:rel_midnight_ride + ~internal_deprecated_since:rel_tampa + ~doc:"Return true if the VM is currently 'co-operative' i.e. is expected to reach a balloon target and actually has done" + ~params:[ + Ref _vm, "self", "The VM"; + ] + ~result:(Bool, "true if the VM is currently 'co-operative'; false otherwise") + ~allowed_roles:_R_READ_ONLY + ~doc_tags:[Memory] + () let vm_query_services = call - ~name:"query_services" - ~in_product_since:rel_tampa - ~doc:"Query the system services advertised by this VM and register them. This can only be applied to a system domain." - ~params:[ - Ref _vm, "self", "The VM"; - ] - ~result:(Map(String, String), "map of service type to name") - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"query_services" + ~in_product_since:rel_tampa + ~doc:"Query the system services advertised by this VM and register them. This can only be applied to a system domain." + ~params:[ + Ref _vm, "self", "The VM"; + ] + ~result:(Map(String, String), "map of service type to name") + ~allowed_roles:_R_POOL_ADMIN + () (* VM.StartOn *) let vm_start_on = call - ~in_product_since:rel_rio - ~name:"start_on" - ~doc:"Start the specified VM on a particular host. This function can only be called with the VM is in the Halted State." - ~in_oss_since:None - ~params:[Ref _vm, "vm", "The VM to start"; - Ref _host, "host", "The Host on which to start the VM"; - Bool, "start_paused", "Instantiate VM in paused state if set to true."; - Bool, "force", "Attempt to force the VM to start. If this flag is false then the VM may fail pre-boot safety checks (e.g. if the CPU the VM last booted on looks substantially different to the current one)"; - ] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.vm_is_template; Api_errors.other_operation_in_progress; - Api_errors.operation_not_allowed; - Api_errors.bootloader_failed; - Api_errors.unknown_bootloader; -] - ~allowed_roles:_R_VM_POWER_ADMIN - () - -(* VM.Pause *) + ~in_product_since:rel_rio + ~name:"start_on" + ~doc:"Start the specified VM on a particular host. This function can only be called with the VM is in the Halted State." + ~in_oss_since:None + ~params:[Ref _vm, "vm", "The VM to start"; + Ref _host, "host", "The Host on which to start the VM"; + Bool, "start_paused", "Instantiate VM in paused state if set to true."; + Bool, "force", "Attempt to force the VM to start. If this flag is false then the VM may fail pre-boot safety checks (e.g. if the CPU the VM last booted on looks substantially different to the current one)"; + ] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.vm_is_template; Api_errors.other_operation_in_progress; + Api_errors.operation_not_allowed; + Api_errors.bootloader_failed; + Api_errors.unknown_bootloader; + ] + ~allowed_roles:_R_VM_POWER_ADMIN + () + +(* VM.Pause *) let vm_pause = call - ~in_product_since:rel_rio - ~name:"pause" - ~doc:"Pause the specified VM. This can only be called when the specified VM is in the Running state." - ~params:[Ref _vm, "vm", "The VM to pause"] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.operation_not_allowed; - Api_errors.vm_is_template] - ~allowed_roles:_R_VM_OP - () + ~in_product_since:rel_rio + ~name:"pause" + ~doc:"Pause the specified VM. This can only be called when the specified VM is in the Running state." + ~params:[Ref _vm, "vm", "The VM to pause"] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.operation_not_allowed; + Api_errors.vm_is_template] + ~allowed_roles:_R_VM_OP + () (* VM.UnPause *) let vm_unpause = call - ~in_product_since:rel_rio - ~name:"unpause" - ~doc:"Resume the specified VM. This can only be called when the specified VM is in the Paused state." - ~params:[Ref _vm, "vm", "The VM to unpause"] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.operation_not_allowed; Api_errors.vm_is_template] - ~allowed_roles:_R_VM_OP - () + ~in_product_since:rel_rio + ~name:"unpause" + ~doc:"Resume the specified VM. This can only be called when the specified VM is in the Paused state." + ~params:[Ref _vm, "vm", "The VM to unpause"] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.operation_not_allowed; Api_errors.vm_is_template] + ~allowed_roles:_R_VM_OP + () (* VM.CleanShutdown *) let vm_cleanShutdown = call - ~in_product_since:rel_rio - ~name:"clean_shutdown" - ~doc:"Attempt to cleanly shutdown the specified VM. (Note: this may not be supported---e.g. if a guest agent is not installed). This can only be called when the specified VM is in the Running state." - ~params:[Ref _vm, "vm", "The VM to shutdown"] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.operation_not_allowed; - Api_errors.vm_is_template] - ~allowed_roles:_R_VM_OP - () + ~in_product_since:rel_rio + ~name:"clean_shutdown" + ~doc:"Attempt to cleanly shutdown the specified VM. (Note: this may not be supported---e.g. if a guest agent is not installed). This can only be called when the specified VM is in the Running state." + ~params:[Ref _vm, "vm", "The VM to shutdown"] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.operation_not_allowed; + Api_errors.vm_is_template] + ~allowed_roles:_R_VM_OP + () (* VM.CleanReboot *) let vm_cleanReboot = call - ~in_product_since:rel_rio - ~name:"clean_reboot" - ~doc:"Attempt to cleanly shutdown the specified VM (Note: this may not be supported---e.g. if a guest agent is not installed). This can only be called when the specified VM is in the Running state." - ~params:[Ref _vm, "vm", "The VM to shutdown"] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.operation_not_allowed; - Api_errors.vm_is_template] - ~allowed_roles:_R_VM_OP - () + ~in_product_since:rel_rio + ~name:"clean_reboot" + ~doc:"Attempt to cleanly shutdown the specified VM (Note: this may not be supported---e.g. if a guest agent is not installed). This can only be called when the specified VM is in the Running state." + ~params:[Ref _vm, "vm", "The VM to shutdown"] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.operation_not_allowed; + Api_errors.vm_is_template] + ~allowed_roles:_R_VM_OP + () (* VM.HardShutdown *) let vm_hardShutdown = call - ~in_product_since:rel_rio - ~name:"hard_shutdown" - ~doc:"Stop executing the specified VM without attempting a clean shutdown." - ~params:[Ref _vm, "vm", "The VM to destroy"] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.operation_not_allowed; - Api_errors.vm_is_template] - ~allowed_roles:_R_VM_OP - () + ~in_product_since:rel_rio + ~name:"hard_shutdown" + ~doc:"Stop executing the specified VM without attempting a clean shutdown." + ~params:[Ref _vm, "vm", "The VM to destroy"] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.operation_not_allowed; + Api_errors.vm_is_template] + ~allowed_roles:_R_VM_OP + () (* VM.Shutdown *) let vm_shutdown = call - ~in_product_since:rel_clearwater - ~name:"shutdown" - ~doc:"Attempts to first clean shutdown a VM and if it should fail then perform a hard shutdown on it." - ~params:[Ref _vm, "vm", "The VM to shutdown"] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.operation_not_allowed; - Api_errors.vm_is_template] - ~allowed_roles:_R_VM_OP - () + ~in_product_since:rel_clearwater + ~name:"shutdown" + ~doc:"Attempts to first clean shutdown a VM and if it should fail then perform a hard shutdown on it." + ~params:[Ref _vm, "vm", "The VM to shutdown"] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.operation_not_allowed; + Api_errors.vm_is_template] + ~allowed_roles:_R_VM_OP + () (* VM.PowerStateReset *) let vm_stateReset = call - ~in_product_since:rel_rio - ~name:"power_state_reset" - ~doc:"Reset the power-state of the VM to halted in the database only. (Used to recover from slave failures in pooling scenarios by resetting the power-states of VMs running on dead slaves to halted.) This is a potentially dangerous operation; use with care." - ~params:[Ref _vm, "vm", "The VM to reset"] - ~errs:[] - ~allowed_roles:_R_POOL_OP - () + ~in_product_since:rel_rio + ~name:"power_state_reset" + ~doc:"Reset the power-state of the VM to halted in the database only. (Used to recover from slave failures in pooling scenarios by resetting the power-states of VMs running on dead slaves to halted.) This is a potentially dangerous operation; use with care." + ~params:[Ref _vm, "vm", "The VM to reset"] + ~errs:[] + ~allowed_roles:_R_POOL_OP + () (* VM.HardReboot *) let vm_hardReboot = call - ~in_product_since:rel_rio - ~name:"hard_reboot" - ~doc:"Stop executing the specified VM without attempting a clean shutdown and immediately restart the VM." - ~params:[Ref _vm, "vm", "The VM to reboot"] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.operation_not_allowed; - Api_errors.vm_is_template] - ~allowed_roles:_R_VM_OP - () + ~in_product_since:rel_rio + ~name:"hard_reboot" + ~doc:"Stop executing the specified VM without attempting a clean shutdown and immediately restart the VM." + ~params:[Ref _vm, "vm", "The VM to reboot"] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.operation_not_allowed; + Api_errors.vm_is_template] + ~allowed_roles:_R_VM_OP + () let vm_hardReboot_internal = call - ~in_product_since:rel_orlando - ~name:"hard_reboot_internal" - ~doc:"Internal function which immediately restarts the specified VM." - ~params:[Ref _vm, "vm", "The VM to reboot"] - ~pool_internal:true - ~hide_from_docs:true - ~internal_deprecated_since:rel_midnight_ride - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () - + ~in_product_since:rel_orlando + ~name:"hard_reboot_internal" + ~doc:"Internal function which immediately restarts the specified VM." + ~params:[Ref _vm, "vm", "The VM to reboot"] + ~pool_internal:true + ~hide_from_docs:true + ~internal_deprecated_since:rel_midnight_ride + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () + (* VM.Hibernate *) - + let vm_suspend = call - ~in_product_since:rel_rio - ~name:"suspend" - ~doc:"Suspend the specified VM to disk. This can only be called when the specified VM is in the Running state." - ~params:[Ref _vm, "vm", "The VM to suspend"] - (* Bool, "live", "If set to true, perform a live hibernate; otherwise suspend the VM before commencing hibernate" *) - ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.operation_not_allowed; - Api_errors.vm_is_template] - ~allowed_roles:_R_VM_OP - () + ~in_product_since:rel_rio + ~name:"suspend" + ~doc:"Suspend the specified VM to disk. This can only be called when the specified VM is in the Running state." + ~params:[Ref _vm, "vm", "The VM to suspend"] + (* Bool, "live", "If set to true, perform a live hibernate; otherwise suspend the VM before commencing hibernate" *) + ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.operation_not_allowed; + Api_errors.vm_is_template] + ~allowed_roles:_R_VM_OP + () (* VM.clsp -- clone suspended, undocumented API for VMLogix *) let csvm = call - ~name:"csvm" - ~in_product_since:rel_rio - ~doc:"undocumented. internal use only. This call is deprecated." - ~params:[Ref _vm, "vm", ""] - ~result:(Ref _vm, "") - ~errs:(errnames_of_call vm_clone) - ~hide_from_docs:true - ~internal_deprecated_since:rel_miami - ~allowed_roles:_R_VM_ADMIN - () - + ~name:"csvm" + ~in_product_since:rel_rio + ~doc:"undocumented. internal use only. This call is deprecated." + ~params:[Ref _vm, "vm", ""] + ~result:(Ref _vm, "") + ~errs:(errnames_of_call vm_clone) + ~hide_from_docs:true + ~internal_deprecated_since:rel_miami + ~allowed_roles:_R_VM_ADMIN + () + (* VM.UnHibernate *) - + let vm_resume = call - ~name:"resume" - ~in_product_since:rel_rio - ~doc:"Awaken the specified VM and resume it. This can only be called when the specified VM is in the Suspended state." - ~params:[Ref _vm, "vm", "The VM to resume"; - Bool, "start_paused", "Resume VM in paused state if set to true."; - Bool, "force", "Attempt to force the VM to resume. If this flag is false then the VM may fail pre-resume safety checks (e.g. if the CPU the VM was running on looks substantially different to the current one)"; - ] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.operation_not_allowed; Api_errors.vm_is_template] - ~allowed_roles:_R_VM_OP - () + ~name:"resume" + ~in_product_since:rel_rio + ~doc:"Awaken the specified VM and resume it. This can only be called when the specified VM is in the Suspended state." + ~params:[Ref _vm, "vm", "The VM to resume"; + Bool, "start_paused", "Resume VM in paused state if set to true."; + Bool, "force", "Attempt to force the VM to resume. If this flag is false then the VM may fail pre-resume safety checks (e.g. if the CPU the VM was running on looks substantially different to the current one)"; + ] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.operation_not_allowed; Api_errors.vm_is_template] + ~allowed_roles:_R_VM_OP + () let vm_resume_on = call - ~name:"resume_on" - ~in_product_since:rel_rio - ~doc:"Awaken the specified VM and resume it on a particular Host. This can only be called when the specified VM is in the Suspended state." - ~in_oss_since:None - ~params:[Ref _vm, "vm", "The VM to resume"; - Ref _host, "host", "The Host on which to resume the VM"; - Bool, "start_paused", "Resume VM in paused state if set to true."; - Bool, "force", "Attempt to force the VM to resume. If this flag is false then the VM may fail pre-resume safety checks (e.g. if the CPU the VM was running on looks substantially different to the current one)"; -] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.operation_not_allowed; Api_errors.vm_is_template] - ~allowed_roles:_R_VM_POWER_ADMIN - () + ~name:"resume_on" + ~in_product_since:rel_rio + ~doc:"Awaken the specified VM and resume it on a particular Host. This can only be called when the specified VM is in the Suspended state." + ~in_oss_since:None + ~params:[Ref _vm, "vm", "The VM to resume"; + Ref _host, "host", "The Host on which to resume the VM"; + Bool, "start_paused", "Resume VM in paused state if set to true."; + Bool, "force", "Attempt to force the VM to resume. If this flag is false then the VM may fail pre-resume safety checks (e.g. if the CPU the VM was running on looks substantially different to the current one)"; + ] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.operation_not_allowed; Api_errors.vm_is_template] + ~allowed_roles:_R_VM_POWER_ADMIN + () let vm_pool_migrate = call - ~in_oss_since:None - ~in_product_since:rel_rio - ~name:"pool_migrate" - ~doc:"Migrate a VM to another Host." - ~params:[Ref _vm, "vm", "The VM to migrate"; - Ref _host, "host", "The target host"; - Map(String, String), "options", "Extra configuration operations" ] - ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.vm_is_template; Api_errors.operation_not_allowed; Api_errors.vm_migrate_failed] - ~allowed_roles:_R_VM_POWER_ADMIN - () + ~in_oss_since:None + ~in_product_since:rel_rio + ~name:"pool_migrate" + ~doc:"Migrate a VM to another Host." + ~params:[Ref _vm, "vm", "The VM to migrate"; + Ref _host, "host", "The target host"; + Map(String, String), "options", "Extra configuration operations" ] + ~errs:[Api_errors.vm_bad_power_state; Api_errors.other_operation_in_progress; Api_errors.vm_is_template; Api_errors.operation_not_allowed; Api_errors.vm_migrate_failed] + ~allowed_roles:_R_VM_POWER_ADMIN + () let vm_pool_migrate_complete = call - ~in_oss_since:None - ~in_product_since:rel_tampa - ~name:"pool_migrate_complete" - ~doc:"Tell a destination host that migration is complete." - ~params:[Ref _vm, "vm", "The VM which has finished migrating"; - Ref _host, "host", "The target host" ] - ~hide_from_docs:true - ~pool_internal:false (* needed for cross-pool migrate too *) - ~allowed_roles:_R_VM_POWER_ADMIN - () + ~in_oss_since:None + ~in_product_since:rel_tampa + ~name:"pool_migrate_complete" + ~doc:"Tell a destination host that migration is complete." + ~params:[Ref _vm, "vm", "The VM which has finished migrating"; + Ref _host, "host", "The target host" ] + ~hide_from_docs:true + ~pool_internal:false (* needed for cross-pool migrate too *) + ~allowed_roles:_R_VM_POWER_ADMIN + () let host_migrate_receive = call - ~in_oss_since:None - ~in_product_since:rel_tampa - ~name:"migrate_receive" - ~doc:"Prepare to receive a VM, returning a token which can be passed to VM.migrate." - ~params:[Ref _host, "host", "The target host"; - Ref _network, "network", "The network through which migration traffic should be received."; - Map(String, String), "options", "Extra configuration operations" ] - ~result:(Map(String,String), "A value which should be passed to VM.migrate") - ~allowed_roles:_R_VM_POWER_ADMIN - () + ~in_oss_since:None + ~in_product_since:rel_tampa + ~name:"migrate_receive" + ~doc:"Prepare to receive a VM, returning a token which can be passed to VM.migrate." + ~params:[Ref _host, "host", "The target host"; + Ref _network, "network", "The network through which migration traffic should be received."; + Map(String, String), "options", "Extra configuration operations" ] + ~result:(Map(String,String), "A value which should be passed to VM.migrate") + ~allowed_roles:_R_VM_POWER_ADMIN + () let set_vcpus_number_live = call - ~name:"set_VCPUs_number_live" - ~in_product_since:rel_rio - ~doc:"Set the number of VCPUs for a running VM" - ~params:[Ref _vm, "self", "The VM"; - Int, "nvcpu", "The number of VCPUs"] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_VCPUs_number_live" + ~in_product_since:rel_rio + ~doc:"Set the number of VCPUs for a running VM" + ~params:[Ref _vm, "self", "The VM"; + Int, "nvcpu", "The number of VCPUs"] + ~allowed_roles:_R_VM_ADMIN + () let vm_set_VCPUs_max = call ~flags:[`Session] - ~name:"set_VCPUs_max" - ~in_product_since:rel_midnight_ride - ~doc:"Set the maximum number of VCPUs for a halted VM" - ~params:[Ref _vm, "self", "The VM"; - Int, "value", "The new maximum number of VCPUs"] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_VCPUs_max" + ~in_product_since:rel_midnight_ride + ~doc:"Set the maximum number of VCPUs for a halted VM" + ~params:[Ref _vm, "self", "The VM"; + Int, "value", "The new maximum number of VCPUs"] + ~allowed_roles:_R_VM_ADMIN + () let vm_set_VCPUs_at_startup = call ~flags:[`Session] - ~name:"set_VCPUs_at_startup" - ~in_product_since:rel_midnight_ride - ~doc:"Set the number of startup VCPUs for a halted VM" - ~params:[Ref _vm, "self", "The VM"; - Int, "value", "The new maximum number of VCPUs"] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_VCPUs_at_startup" + ~in_product_since:rel_midnight_ride + ~doc:"Set the number of startup VCPUs for a halted VM" + ~params:[Ref _vm, "self", "The VM"; + Int, "value", "The new maximum number of VCPUs"] + ~allowed_roles:_R_VM_ADMIN + () let vm_set_HVM_shadow_multiplier = call ~flags:[`Session] - ~name:"set_HVM_shadow_multiplier" - ~in_product_since:rel_midnight_ride - ~doc:"Set the shadow memory multiplier on a halted VM" - ~params:[Ref _vm, "self", "The VM"; - Float, "value", "The new shadow memory multiplier to set"] - ~allowed_roles:_R_VM_POWER_ADMIN - () + ~name:"set_HVM_shadow_multiplier" + ~in_product_since:rel_midnight_ride + ~doc:"Set the shadow memory multiplier on a halted VM" + ~params:[Ref _vm, "self", "The VM"; + Float, "value", "The new shadow memory multiplier to set"] + ~allowed_roles:_R_VM_POWER_ADMIN + () let vm_set_shadow_multiplier_live = call - ~name:"set_shadow_multiplier_live" - ~in_product_since:rel_rio - ~doc:"Set the shadow memory multiplier on a running VM" - ~params:[Ref _vm, "self", "The VM"; - Float, "multiplier", "The new shadow memory multiplier to set"] - ~allowed_roles:_R_VM_POWER_ADMIN - () + ~name:"set_shadow_multiplier_live" + ~in_product_since:rel_rio + ~doc:"Set the shadow memory multiplier on a running VM" + ~params:[Ref _vm, "self", "The VM"; + Float, "multiplier", "The new shadow memory multiplier to set"] + ~allowed_roles:_R_VM_POWER_ADMIN + () let vm_add_to_VCPUs_params_live = call - ~name:"add_to_VCPUs_params_live" - ~in_product_since:rel_rio - ~doc:"Add the given key-value pair to VM.VCPUs_params, and apply that value on the running VM" - ~params:[Ref _vm, "self", "The VM"; - String, "key", "The key"; - String, "value", "The value"] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"add_to_VCPUs_params_live" + ~in_product_since:rel_rio + ~doc:"Add the given key-value pair to VM.VCPUs_params, and apply that value on the running VM" + ~params:[Ref _vm, "self", "The VM"; + String, "key", "The key"; + String, "value", "The value"] + ~allowed_roles:_R_VM_ADMIN + () let vm_send_sysrq = call - ~name:"send_sysrq" - ~in_product_since:rel_rio - ~doc:"Send the given key as a sysrq to this VM. The key is specified as a single character (a String of length 1). This can only be called when the specified VM is in the Running state." - ~params:[Ref _vm, "vm", "The VM"; - String, "key", "The key to send"] - ~errs:[Api_errors.vm_bad_power_state] - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"send_sysrq" + ~in_product_since:rel_rio + ~doc:"Send the given key as a sysrq to this VM. The key is specified as a single character (a String of length 1). This can only be called when the specified VM is in the Running state." + ~params:[Ref _vm, "vm", "The VM"; + String, "key", "The key to send"] + ~errs:[Api_errors.vm_bad_power_state] + ~allowed_roles:_R_POOL_ADMIN + () let vm_send_trigger = call - ~name:"send_trigger" - ~in_product_since:rel_rio - ~doc:"Send the named trigger to this VM. This can only be called when the specified VM is in the Running state." - ~params:[Ref _vm, "vm", "The VM"; - String, "trigger", "The trigger to send"] - ~errs:[Api_errors.vm_bad_power_state] - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"send_trigger" + ~in_product_since:rel_rio + ~doc:"Send the named trigger to this VM. This can only be called when the specified VM is in the Running state." + ~params:[Ref _vm, "vm", "The VM"; + String, "trigger", "The trigger to send"] + ~errs:[Api_errors.vm_bad_power_state] + ~allowed_roles:_R_POOL_ADMIN + () let vm_migrate_send = call - ~name: "migrate_send" - ~in_product_since:rel_tampa - ~doc: "Migrate the VM to another host. This can only be called when the specified VM is in the Running state." - ~params:[Ref _vm, "vm", "The VM"; - Map(String,String), "dest", "The result of a Host.migrate_receive call."; - Bool, "live", "Live migration"; - Map (Ref _vdi, Ref _sr), "vdi_map", "Map of source VDI to destination SR"; - Map (Ref _vif, Ref _network), "vif_map", "Map of source VIF to destination network"; - Map (String, String), "options", "Other parameters"] - ~result:(Ref _vm, "The reference of the newly created VM in the destination pool") - ~errs:[Api_errors.vm_bad_power_state; Api_errors.license_restriction] - ~allowed_roles:_R_VM_POWER_ADMIN - () + ~name: "migrate_send" + ~in_product_since:rel_tampa + ~doc: "Migrate the VM to another host. This can only be called when the specified VM is in the Running state." + ~params:[Ref _vm, "vm", "The VM"; + Map(String,String), "dest", "The result of a Host.migrate_receive call."; + Bool, "live", "Live migration"; + Map (Ref _vdi, Ref _sr), "vdi_map", "Map of source VDI to destination SR"; + Map (Ref _vif, Ref _network), "vif_map", "Map of source VIF to destination network"; + Map (String, String), "options", "Other parameters"] + ~result:(Ref _vm, "The reference of the newly created VM in the destination pool") + ~errs:[Api_errors.vm_bad_power_state; Api_errors.license_restriction] + ~allowed_roles:_R_VM_POWER_ADMIN + () let vm_assert_can_migrate = call - ~name:"assert_can_migrate" - ~in_product_since:rel_tampa - ~doc:"Assert whether a VM can be migrated to the specified destination." - ~params:[ - Ref _vm, "vm", "The VM"; - Map(String,String), "dest", "The result of a VM.migrate_receive call."; - Bool, "live", "Live migration"; - Map (Ref _vdi, Ref _sr), "vdi_map", "Map of source VDI to destination SR"; - Map (Ref _vif, Ref _network), "vif_map", "Map of source VIF to destination network"; - Map (String, String), "options", "Other parameters" ] - ~allowed_roles:_R_VM_POWER_ADMIN - ~errs:[Api_errors.license_restriction] - () + ~name:"assert_can_migrate" + ~in_product_since:rel_tampa + ~doc:"Assert whether a VM can be migrated to the specified destination." + ~params:[ + Ref _vm, "vm", "The VM"; + Map(String,String), "dest", "The result of a VM.migrate_receive call."; + Bool, "live", "Live migration"; + Map (Ref _vdi, Ref _sr), "vdi_map", "Map of source VDI to destination SR"; + Map (Ref _vif, Ref _network), "vif_map", "Map of source VIF to destination network"; + Map (String, String), "options", "Other parameters" ] + ~allowed_roles:_R_VM_POWER_ADMIN + ~errs:[Api_errors.license_restriction] + () let vm_s3_suspend = call - ~name: "s3_suspend" - ~in_product_since:rel_midnight_ride - ~doc:"Try to put the VM into ACPI S3 state" - ~params:[Ref _vm, "vm", "The VM"] - ~hide_from_docs:true - ~allowed_roles:_R_VM_OP - () + ~name: "s3_suspend" + ~in_product_since:rel_midnight_ride + ~doc:"Try to put the VM into ACPI S3 state" + ~params:[Ref _vm, "vm", "The VM"] + ~hide_from_docs:true + ~allowed_roles:_R_VM_OP + () + +let vm_s3_resume = call + ~name: "s3_resume" + ~in_product_since:rel_midnight_ride + ~doc:"Try to resume the VM from ACPI S3 state" + ~params:[Ref _vm, "vm", "The VM"] + ~hide_from_docs:true + ~allowed_roles:_R_VM_OP + () -let vm_s3_resume = call - ~name: "s3_resume" - ~in_product_since:rel_midnight_ride - ~doc:"Try to resume the VM from ACPI S3 state" - ~params:[Ref _vm, "vm", "The VM"] - ~hide_from_docs:true - ~allowed_roles:_R_VM_OP - () - let vm_create_new_blob = call - ~name: "create_new_blob" - ~in_product_since:rel_orlando - ~doc:"Create a placeholder for a named binary blob of data that is associated with this VM" - ~versioned_params: - [{param_type=Ref _vm; param_name="vm"; param_doc="The VM"; param_release=orlando_release; param_default=None}; - {param_type=String; param_name="name"; param_doc="The name associated with the blob"; param_release=orlando_release; param_default=None}; - {param_type=String; param_name="mime_type"; param_doc="The mime type for the data. Empty string translates to application/octet-stream"; param_release=orlando_release; param_default=None}; - {param_type=Bool; param_name="public"; param_doc="True if the blob should be publicly available"; param_release=tampa_release; param_default=Some (VBool false)} - ] - ~result:(Ref _blob, "The reference of the blob, needed for populating its data") - ~allowed_roles:_R_VM_POWER_ADMIN - () + ~name: "create_new_blob" + ~in_product_since:rel_orlando + ~doc:"Create a placeholder for a named binary blob of data that is associated with this VM" + ~versioned_params: + [{param_type=Ref _vm; param_name="vm"; param_doc="The VM"; param_release=orlando_release; param_default=None}; + {param_type=String; param_name="name"; param_doc="The name associated with the blob"; param_release=orlando_release; param_default=None}; + {param_type=String; param_name="mime_type"; param_doc="The mime type for the data. Empty string translates to application/octet-stream"; param_release=orlando_release; param_default=None}; + {param_type=Bool; param_name="public"; param_doc="True if the blob should be publicly available"; param_release=tampa_release; param_default=Some (VBool false)} + ] + ~result:(Ref _blob, "The reference of the blob, needed for populating its data") + ~allowed_roles:_R_VM_POWER_ADMIN + () let vm_copy_bios_strings = call - ~name: "copy_bios_strings" - ~in_product_since:rel_midnight_ride - ~doc:"Copy the BIOS strings from the given host to this VM" - ~params:[Ref _vm, "vm", "The VM to modify"; - Ref _host, "host", "The host to copy the BIOS strings from";] - ~allowed_roles:_R_VM_ADMIN - () + ~name: "copy_bios_strings" + ~in_product_since:rel_midnight_ride + ~doc:"Copy the BIOS strings from the given host to this VM" + ~params:[Ref _vm, "vm", "The VM to modify"; + Ref _host, "host", "The host to copy the BIOS strings from";] + ~allowed_roles:_R_VM_ADMIN + () let vm_set_protection_policy = call - ~name:"set_protection_policy" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~doc:"Set the value of the protection_policy field" - ~params:[Ref _vm, "self", "The VM"; - Ref _vmpp, "value", "The value"] - ~flags:[`Session] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_protection_policy" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~doc:"Set the value of the protection_policy field" + ~params:[Ref _vm, "self", "The VM"; + Ref _vmpp, "value", "The value"] + ~flags:[`Session] + ~allowed_roles:_R_POOL_OP + () let vm_set_start_delay = call - ~name:"set_start_delay" - ~in_product_since:rel_boston - ~doc:"Set this VM's start delay in seconds" - ~params:[Ref _vm, "self", "The VM"; - Int, "value", "This VM's start delay in seconds"] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_start_delay" + ~in_product_since:rel_boston + ~doc:"Set this VM's start delay in seconds" + ~params:[Ref _vm, "self", "The VM"; + Int, "value", "This VM's start delay in seconds"] + ~allowed_roles:_R_POOL_OP + () let vm_set_shutdown_delay = call - ~name:"set_shutdown_delay" - ~in_product_since:rel_boston - ~doc:"Set this VM's shutdown delay in seconds" - ~params:[Ref _vm, "self", "The VM"; - Int, "value", "This VM's shutdown delay in seconds"] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_shutdown_delay" + ~in_product_since:rel_boston + ~doc:"Set this VM's shutdown delay in seconds" + ~params:[Ref _vm, "self", "The VM"; + Int, "value", "This VM's shutdown delay in seconds"] + ~allowed_roles:_R_POOL_OP + () let vm_set_order = call - ~name:"set_order" - ~in_product_since:rel_boston - ~doc:"Set this VM's boot order" - ~params:[Ref _vm, "self", "The VM"; - Int, "value", "This VM's boot order"] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_order" + ~in_product_since:rel_boston + ~doc:"Set this VM's boot order" + ~params:[Ref _vm, "self", "The VM"; + Int, "value", "This VM's boot order"] + ~allowed_roles:_R_POOL_OP + () let vm_set_suspend_VDI = call - ~name:"set_suspend_VDI" - ~in_product_since:rel_boston - ~doc:"Set this VM's suspend VDI, which must be indentical to its current one" - ~params:[Ref _vm, "self", "The VM"; - Ref _vdi, "value", "The suspend VDI uuid"] - ~allowed_roles:_R_POOL_OP - () - + ~name:"set_suspend_VDI" + ~in_product_since:rel_boston + ~doc:"Set this VM's suspend VDI, which must be indentical to its current one" + ~params:[Ref _vm, "self", "The VM"; + Ref _vdi, "value", "The suspend VDI uuid"] + ~allowed_roles:_R_POOL_OP + () + let vm_assert_can_be_recovered = call - ~name:"assert_can_be_recovered" - ~in_product_since:rel_boston - ~doc:"Assert whether all SRs required to recover this VM are available." - ~params:[Ref _vm, "self", "The VM to recover"; - Ref _session, "session_to", "The session to which the VM is to be recovered."] - ~errs:[Api_errors.vm_is_part_of_an_appliance; Api_errors.vm_requires_sr] - ~allowed_roles:_R_READ_ONLY - () + ~name:"assert_can_be_recovered" + ~in_product_since:rel_boston + ~doc:"Assert whether all SRs required to recover this VM are available." + ~params:[Ref _vm, "self", "The VM to recover"; + Ref _session, "session_to", "The session to which the VM is to be recovered."] + ~errs:[Api_errors.vm_is_part_of_an_appliance; Api_errors.vm_requires_sr] + ~allowed_roles:_R_READ_ONLY + () let vm_get_SRs_required_for_recovery = call - ~name:"get_SRs_required_for_recovery" - ~in_product_since:rel_creedence - ~doc:"List all the SR's that are required for the VM to be recovered" - ~params:[Ref _vm , "self" , "The VM for which the SRs have to be recovered"; - Ref _session , "session_to" , "The session to which the SRs of the VM have to be recovered."] - ~result:(Set(Ref _sr),"refs for SRs required to recover the VM") - ~errs:[] - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_SRs_required_for_recovery" + ~in_product_since:rel_creedence + ~doc:"List all the SR's that are required for the VM to be recovered" + ~params:[Ref _vm , "self" , "The VM for which the SRs have to be recovered"; + Ref _session , "session_to" , "The session to which the SRs of the VM have to be recovered."] + ~result:(Set(Ref _sr),"refs for SRs required to recover the VM") + ~errs:[] + ~allowed_roles:_R_READ_ONLY + () let vm_recover = call - ~name:"recover" - ~in_product_since:rel_boston - ~doc:"Recover the VM" - ~params:[Ref _vm, "self", "The VM to recover"; - Ref _session, "session_to", "The session to which the VM is to be recovered."; - Bool, "force", "Whether the VM should replace newer versions of itself."] - ~allowed_roles:_R_READ_ONLY - () + ~name:"recover" + ~in_product_since:rel_boston + ~doc:"Recover the VM" + ~params:[Ref _vm, "self", "The VM to recover"; + Ref _session, "session_to", "The session to which the VM is to be recovered."; + Bool, "force", "Whether the VM should replace newer versions of itself."] + ~allowed_roles:_R_READ_ONLY + () let vm_set_appliance = call - ~name:"set_appliance" - ~in_product_since:rel_boston - ~doc:"Assign this VM to an appliance." - ~params:[Ref _vm, "self", "The VM to assign to an appliance."; - Ref _vm_appliance, "value", "The appliance to which this VM should be assigned."] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_appliance" + ~in_product_since:rel_boston + ~doc:"Assign this VM to an appliance." + ~params:[Ref _vm, "self", "The VM to assign to an appliance."; + Ref _vm_appliance, "value", "The appliance to which this VM should be assigned."] + ~allowed_roles:_R_POOL_OP + () let vm_call_plugin = call - ~name:"call_plugin" - ~in_product_since:rel_cream - ~doc:"Call a XenAPI plugin on this vm" - ~params:[Ref _vm, "vm", "The vm"; - String, "plugin", "The name of the plugin"; - String, "fn", "The name of the function within the plugin"; - Map(String, String), "args", "Arguments for the function"] - ~result:(String, "Result from the plugin") - ~allowed_roles:_R_VM_OP - () + ~name:"call_plugin" + ~in_product_since:rel_cream + ~doc:"Call a XenAPI plugin on this vm" + ~params:[Ref _vm, "vm", "The vm"; + String, "plugin", "The name of the plugin"; + String, "fn", "The name of the function within the plugin"; + Map(String, String), "args", "Arguments for the function"] + ~result:(String, "Result from the plugin") + ~allowed_roles:_R_VM_OP + () let vm_set_has_vendor_device = call - ~name:"set_has_vendor_device" - ~in_product_since:rel_dundee - ~doc:"Controls whether, when the VM starts in HVM mode, its virtual hardware will include the emulated PCI device for which drivers may be available through Windows Update. Usually this should never be changed on a VM on which Windows has been installed: changing it on such a VM is likely to lead to a crash on next start." - ~params:[Ref _vm, "self", "The VM on which to set this flag"; - Bool, "value", "True to provide the vendor PCI device."] - ~allowed_roles:_R_VM_ADMIN - ~doc_tags:[Windows] - () + ~name:"set_has_vendor_device" + ~in_product_since:rel_dundee + ~doc:"Controls whether, when the VM starts in HVM mode, its virtual hardware will include the emulated PCI device for which drivers may be available through Windows Update. Usually this should never be changed on a VM on which Windows has been installed: changing it on such a VM is likely to lead to a crash on next start." + ~params:[Ref _vm, "self", "The VM on which to set this flag"; + Bool, "value", "True to provide the vendor PCI device."] + ~allowed_roles:_R_VM_ADMIN + ~doc_tags:[Windows] + () let vm_import = call - ~name:"import" - ~in_product_since:rel_dundee - ~doc:"Import an XVA from a URI" - ~params:[String, "url", "The URL of the XVA file"; - Ref _sr, "sr", "The destination SR for the disks"; - Bool, "full_restore", "Perform a full restore"; - Bool, "force", "Force the import" - ] - ~result:(Set(Ref _vm), "Imported VM reference") - ~allowed_roles:_R_POOL_OP - () + ~name:"import" + ~in_product_since:rel_dundee + ~doc:"Import an XVA from a URI" + ~params:[String, "url", "The URL of the XVA file"; + Ref _sr, "sr", "The destination SR for the disks"; + Bool, "full_restore", "Perform a full restore"; + Bool, "force", "Force the import" + ] + ~result:(Set(Ref _vm), "Imported VM reference") + ~allowed_roles:_R_POOL_OP + () (* ------------------------------------------------------------------------------------------------------------ Host Management ------------------------------------------------------------------------------------------------------------ *) let host_ha_disable_failover_decisions = call - ~in_product_since:rel_orlando - ~name:"ha_disable_failover_decisions" - ~doc:"Prevents future failover decisions happening on this node. This function should only be used as part of a controlled shutdown of the HA system." - ~params:[Ref _host, "host", "The Host to disable failover decisions for"] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_product_since:rel_orlando + ~name:"ha_disable_failover_decisions" + ~doc:"Prevents future failover decisions happening on this node. This function should only be used as part of a controlled shutdown of the HA system." + ~params:[Ref _host, "host", "The Host to disable failover decisions for"] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_ha_disarm_fencing = call - ~in_product_since:rel_orlando - ~name:"ha_disarm_fencing" - ~doc:"Disarms the fencing function of the HA subsystem. This function is extremely dangerous and should only be used as part of a controlled shutdown of the HA system." - ~params:[Ref _host, "host", "The Host to disarm"] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_product_since:rel_orlando + ~name:"ha_disarm_fencing" + ~doc:"Disarms the fencing function of the HA subsystem. This function is extremely dangerous and should only be used as part of a controlled shutdown of the HA system." + ~params:[Ref _host, "host", "The Host to disarm"] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_ha_stop_daemon = call - ~in_product_since:rel_orlando - ~name:"ha_stop_daemon" - ~doc:"Stops the HA daemon. This function is extremely dangerous and should only be used as part of a controlled shutdown of the HA system." - ~params:[Ref _host, "host", "The Host whose daemon should be stopped"] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_product_since:rel_orlando + ~name:"ha_stop_daemon" + ~doc:"Stops the HA daemon. This function is extremely dangerous and should only be used as part of a controlled shutdown of the HA system." + ~params:[Ref _host, "host", "The Host whose daemon should be stopped"] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_ha_release_resources = call - ~in_product_since:rel_orlando - ~name:"ha_release_resources" - ~doc:"Cleans up any resources on the host associated with this HA instance." - ~params:[Ref _host, "host", "The Host whose resources should be cleaned up"] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_product_since:rel_orlando + ~name:"ha_release_resources" + ~doc:"Cleans up any resources on the host associated with this HA instance." + ~params:[Ref _host, "host", "The Host whose resources should be cleaned up"] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_local_assert_healthy = call ~flags:[`Session] - ~in_product_since:rel_miami - ~name:"local_assert_healthy" - ~doc:"Returns nothing if this host is healthy, otherwise it throws an error explaining why the host is unhealthy" - ~params:[] - ~pool_internal:true - ~hide_from_docs:true - ~errs:[ Api_errors.host_still_booting; - Api_errors.host_has_no_management_ip; - Api_errors.host_master_cannot_talk_back; - Api_errors.host_unknown_to_master; - Api_errors.host_broken; - Api_errors.license_restriction; - Api_errors.license_does_not_support_pooling; - Api_errors.ha_should_be_fenced; - ] - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_product_since:rel_miami + ~name:"local_assert_healthy" + ~doc:"Returns nothing if this host is healthy, otherwise it throws an error explaining why the host is unhealthy" + ~params:[] + ~pool_internal:true + ~hide_from_docs:true + ~errs:[ Api_errors.host_still_booting; + Api_errors.host_has_no_management_ip; + Api_errors.host_master_cannot_talk_back; + Api_errors.host_unknown_to_master; + Api_errors.host_broken; + Api_errors.license_restriction; + Api_errors.license_does_not_support_pooling; + Api_errors.ha_should_be_fenced; + ] + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_preconfigure_ha = call - ~in_product_since:rel_miami - ~name:"preconfigure_ha" - ~doc:"Attach statefiles, generate config files but do not start the xHA daemon." - ~params:[Ref _host, "host", "The Host to modify"; - Set(Ref _vdi), "statefiles", "Set of statefile VDIs to use"; - Ref _vdi, "metadata_vdi", "VDI to use for Pool metadata"; - String, "generation", "UUID identifying this HA instance"; - ] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_product_since:rel_miami + ~name:"preconfigure_ha" + ~doc:"Attach statefiles, generate config files but do not start the xHA daemon." + ~params:[Ref _host, "host", "The Host to modify"; + Set(Ref _vdi), "statefiles", "Set of statefile VDIs to use"; + Ref _vdi, "metadata_vdi", "VDI to use for Pool metadata"; + String, "generation", "UUID identifying this HA instance"; + ] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_ha_join_liveset = call - ~in_product_since:rel_orlando - ~name:"ha_join_liveset" - ~doc:"Block until this host joins the liveset." - ~params:[Ref _host, "host", "The Host whose HA daemon to start"] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_product_since:rel_orlando + ~name:"ha_join_liveset" + ~doc:"Block until this host joins the liveset." + ~params:[Ref _host, "host", "The Host whose HA daemon to start"] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_ha_wait_for_shutdown_via_statefile = call - ~in_product_since:rel_orlando - ~name:"ha_wait_for_shutdown_via_statefile" - ~doc:"Block until this host xHA daemon exits after having seen the invalid statefile. If the host loses statefile access then throw an exception" - ~params:[Ref _host, "host", "The Host whose HA subsystem to query"] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_product_since:rel_orlando + ~name:"ha_wait_for_shutdown_via_statefile" + ~doc:"Block until this host xHA daemon exits after having seen the invalid statefile. If the host loses statefile access then throw an exception" + ~params:[Ref _host, "host", "The Host whose HA subsystem to query"] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () (* let host_query_ha = call ~flags:[`Session] ~in_product_since:rel_miami ~name:"query_ha" ~doc:"Return the local HA configuration as seen by this host" ~params:[] - ~custom_marshaller:true + ~custom_marshaller:true ~pool_internal:true ~hide_from_docs:true () *) let host_request_backup = call ~flags:[`Session] - ~name:"request_backup" - ~in_product_since:rel_rio - ~doc:"Request this host performs a database backup" - ~params:[Ref _host, "host", "The Host to send the request to"; - Int, "generation", "The generation count of the master's database"; - Bool, "force", "If this is true then the client _has_ to take a backup, otherwise it's just an 'offer'" - ] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~name:"request_backup" + ~in_product_since:rel_rio + ~doc:"Request this host performs a database backup" + ~params:[Ref _host, "host", "The Host to send the request to"; + Int, "generation", "The generation count of the master's database"; + Bool, "force", "If this is true then the client _has_ to take a backup, otherwise it's just an 'offer'" + ] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_request_config_file_sync = call ~flags:[`Session] - ~name:"request_config_file_sync" - ~in_product_since:rel_rio - ~doc:"Request this host syncs dom0 config files" - ~params:[Ref _host, "host", "The Host to send the request to"; - String, "hash", "The hash of the master's dom0 config files package" - ] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~name:"request_config_file_sync" + ~in_product_since:rel_rio + ~doc:"Request this host syncs dom0 config files" + ~params:[Ref _host, "host", "The Host to send the request to"; + String, "hash", "The hash of the master's dom0 config files package" + ] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () (* Since there are no async versions, no tasks are generated (!) this is important otherwise the call would block doing a Db.Task.create *) let host_propose_new_master = call ~flags:[`Session] - ~in_product_since:rel_miami - ~name:"propose_new_master" - ~doc:"First phase of a two-phase commit protocol to set the new master. If the host has already committed to another configuration or if the proposed new master is not in this node's membership set then the call will return an exception." - ~params:[String, "address", "The address of the Host which is proposed as the new master"; - Bool, "manual", "True if this call is being invoked by the user manually, false if automatic"; - ] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_product_since:rel_miami + ~name:"propose_new_master" + ~doc:"First phase of a two-phase commit protocol to set the new master. If the host has already committed to another configuration or if the proposed new master is not in this node's membership set then the call will return an exception." + ~params:[String, "address", "The address of the Host which is proposed as the new master"; + Bool, "manual", "True if this call is being invoked by the user manually, false if automatic"; + ] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_abort_new_master = call ~flags:[`Session] - ~in_product_since:rel_miami - ~name:"abort_new_master" - ~doc:"Causes the new master transaction to abort" - ~params:[String, "address", "The address of the Host which is proposed as the new master"] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_product_since:rel_miami + ~name:"abort_new_master" + ~doc:"Causes the new master transaction to abort" + ~params:[String, "address", "The address of the Host which is proposed as the new master"] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_commit_new_master = call ~flags:[`Session] - ~in_product_since:rel_miami - ~name:"commit_new_master" - ~doc:"Second phase of a two-phase commit protocol to set the new master." - ~params:[String, "address", "The address of the Host which should be committed as the new master"] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_product_since:rel_miami + ~name:"commit_new_master" + ~doc:"Second phase of a two-phase commit protocol to set the new master." + ~params:[String, "address", "The address of the Host which should be committed as the new master"] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_compute_free_memory = call - ~in_product_since:rel_orlando - ~name:"compute_free_memory" - ~doc:"Computes the amount of free memory on the host." - ~params:[Ref _host, "host", "The host to send the request to"] - ~pool_internal:false - ~hide_from_docs:false - ~result:(Int, "the amount of free memory on the host.") - ~allowed_roles:_R_READ_ONLY - ~doc_tags:[Memory] - () + ~in_product_since:rel_orlando + ~name:"compute_free_memory" + ~doc:"Computes the amount of free memory on the host." + ~params:[Ref _host, "host", "The host to send the request to"] + ~pool_internal:false + ~hide_from_docs:false + ~result:(Int, "the amount of free memory on the host.") + ~allowed_roles:_R_READ_ONLY + ~doc_tags:[Memory] + () let host_compute_memory_overhead = call - ~in_product_since:rel_midnight_ride - ~name:"compute_memory_overhead" - ~doc:"Computes the virtualization memory overhead of a host." - ~params:[Ref _host, "host", "The host for which to compute the memory overhead"] - ~pool_internal:false - ~hide_from_docs:false - ~result:(Int, "the virtualization memory overhead of the host.") - ~allowed_roles:_R_READ_ONLY - ~doc_tags:[Memory] - () + ~in_product_since:rel_midnight_ride + ~name:"compute_memory_overhead" + ~doc:"Computes the virtualization memory overhead of a host." + ~params:[Ref _host, "host", "The host for which to compute the memory overhead"] + ~pool_internal:false + ~hide_from_docs:false + ~result:(Int, "the virtualization memory overhead of the host.") + ~allowed_roles:_R_READ_ONLY + ~doc_tags:[Memory] + () (* Diagnostics see if host is in emergency mode *) let host_is_in_emergency_mode = call ~flags:[`Session] - ~in_product_since:rel_miami - ~name:"is_in_emergency_mode" - ~doc:"Diagnostics call to discover if host is in emergency mode" - ~params:[] - ~pool_internal:false - ~hide_from_docs:true - ~result:(Bool, "true if host is in emergency mode") - ~allowed_roles:_R_READ_ONLY - () + ~in_product_since:rel_miami + ~name:"is_in_emergency_mode" + ~doc:"Diagnostics call to discover if host is in emergency mode" + ~params:[] + ~pool_internal:false + ~hide_from_docs:true + ~result:(Bool, "true if host is in emergency mode") + ~allowed_roles:_R_READ_ONLY + () (* Signal that the management IP address or hostname has been changed beneath us. *) let host_signal_networking_change = call ~flags:[`Session] - ~in_product_since:rel_miami - ~name:"signal_networking_change" - ~doc:"Signals that the management IP address or hostname has been changed beneath us." - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - ~doc_tags:[Networking] - () + ~in_product_since:rel_miami + ~name:"signal_networking_change" + ~doc:"Signals that the management IP address or hostname has been changed beneath us." + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + ~doc_tags:[Networking] + () let host_notify = call - ~in_product_since:rel_miami - ~name:"notify" - ~doc:"Notify an event" - ~params:[String, "ty", "type of the notification"; - String, "params", "arguments of the notification (can be empty)"; ] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_product_since:rel_miami + ~name:"notify" + ~doc:"Notify an event" + ~params:[String, "ty", "type of the notification"; + String, "params", "arguments of the notification (can be empty)"; ] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_syslog_reconfigure = call - ~in_product_since:rel_miami - ~name:"syslog_reconfigure" - ~doc:"Re-configure syslog logging" - ~params:[Ref _host, "host", "Tell the host to reread its Host.logging parameters and reconfigure itself accordingly"] - ~allowed_roles:_R_POOL_OP - () + ~in_product_since:rel_miami + ~name:"syslog_reconfigure" + ~doc:"Re-configure syslog logging" + ~params:[Ref _host, "host", "Tell the host to reread its Host.logging parameters and reconfigure itself accordingly"] + ~allowed_roles:_R_POOL_OP + () let host_management_reconfigure = call - ~in_product_since:rel_miami - ~name:"management_reconfigure" - ~doc:"Reconfigure the management network interface" - ~params:[ - Ref _pif, "pif", "reference to a PIF object corresponding to the management interface"; - ] - ~allowed_roles:_R_POOL_OP - ~doc_tags:[Networking] - () + ~in_product_since:rel_miami + ~name:"management_reconfigure" + ~doc:"Reconfigure the management network interface" + ~params:[ + Ref _pif, "pif", "reference to a PIF object corresponding to the management interface"; + ] + ~allowed_roles:_R_POOL_OP + ~doc_tags:[Networking] + () let host_local_management_reconfigure = call ~flags:[`Session] - ~in_product_since:rel_miami - ~name:"local_management_reconfigure" - ~doc:"Reconfigure the management network interface. Should only be used if Host.management_reconfigure is impossible because the network configuration is broken." - ~params:[ - String, "interface", "name of the interface to use as a management interface"; - ] - ~allowed_roles:_R_POOL_OP - () + ~in_product_since:rel_miami + ~name:"local_management_reconfigure" + ~doc:"Reconfigure the management network interface. Should only be used if Host.management_reconfigure is impossible because the network configuration is broken." + ~params:[ + String, "interface", "name of the interface to use as a management interface"; + ] + ~allowed_roles:_R_POOL_OP + () let host_ha_xapi_healthcheck = call ~flags:[`Session] - ~in_product_since:rel_orlando - ~name:"ha_xapi_healthcheck" - ~doc:"Returns true if xapi appears to be functioning normally." - ~result:(Bool, "true if xapi is functioning normally.") - ~hide_from_docs:true - ~allowed_roles:_R_POOL_ADMIN - () + ~in_product_since:rel_orlando + ~name:"ha_xapi_healthcheck" + ~doc:"Returns true if xapi appears to be functioning normally." + ~result:(Bool, "true if xapi is functioning normally.") + ~hide_from_docs:true + ~allowed_roles:_R_POOL_ADMIN + () let host_management_disable = call ~flags:[`Session] - ~in_product_since:rel_miami - ~name:"management_disable" - ~doc:"Disable the management network interface" - ~params:[] - ~allowed_roles:_R_POOL_OP - ~doc_tags:[Networking] - () + ~in_product_since:rel_miami + ~name:"management_disable" + ~doc:"Disable the management network interface" + ~params:[] + ~allowed_roles:_R_POOL_OP + ~doc_tags:[Networking] + () let host_get_management_interface = call - ~lifecycle:[Prototyped, rel_tampa, ""] - ~name:"get_management_interface" - ~doc:"Returns the management interface for the specified host" - ~params:[Ref _host, "host", "Which host's management interface is required"] - ~result:(Ref _pif, "The management interface for the host") - ~allowed_roles:_R_POOL_OP - ~doc_tags:[Networking] - () + ~lifecycle:[Prototyped, rel_tampa, ""] + ~name:"get_management_interface" + ~doc:"Returns the management interface for the specified host" + ~params:[Ref _host, "host", "Which host's management interface is required"] + ~result:(Ref _pif, "The management interface for the host") + ~allowed_roles:_R_POOL_OP + ~doc_tags:[Networking] + () (* Simple host evacuate message for Miami. Not intended for HA *) let host_assert_can_evacuate = call - ~in_product_since:rel_miami - ~name:"assert_can_evacuate" - ~doc:"Check this host can be evacuated." - ~params:[Ref _host, "host", "The host to evacuate"] - ~allowed_roles:_R_POOL_OP - () + ~in_product_since:rel_miami + ~name:"assert_can_evacuate" + ~doc:"Check this host can be evacuated." + ~params:[Ref _host, "host", "The host to evacuate"] + ~allowed_roles:_R_POOL_OP + () (* New Orlando message which aims to make the GUI less brittle (unexpected errors will trigger a VM suspend) and sensitive to HA planning constraints *) let host_get_vms_which_prevent_evacuation = call - ~in_product_since:rel_orlando - ~name:"get_vms_which_prevent_evacuation" - ~doc:"Return a set of VMs which prevent the host being evacuated, with per-VM error codes" - ~params:[Ref _host, "self", "The host to query"] - ~result:(Map(Ref _vm, Set(String)), "VMs which block evacuation together with reasons") - ~allowed_roles:_R_READ_ONLY - () + ~in_product_since:rel_orlando + ~name:"get_vms_which_prevent_evacuation" + ~doc:"Return a set of VMs which prevent the host being evacuated, with per-VM error codes" + ~params:[Ref _host, "self", "The host to query"] + ~result:(Map(Ref _vm, Set(String)), "VMs which block evacuation together with reasons") + ~allowed_roles:_R_READ_ONLY + () let host_evacuate = call - ~in_product_since:rel_miami - ~name:"evacuate" - ~doc:"Migrate all VMs off of this host, where possible." - ~params:[Ref _host, "host", "The host to evacuate"] - ~allowed_roles:_R_POOL_OP - () + ~in_product_since:rel_miami + ~name:"evacuate" + ~doc:"Migrate all VMs off of this host, where possible." + ~params:[Ref _host, "host", "The host to evacuate"] + ~allowed_roles:_R_POOL_OP + () let host_get_uncooperative_resident_VMs = call - ~in_product_since:rel_midnight_ride - ~internal_deprecated_since:rel_tampa - ~name:"get_uncooperative_resident_VMs" - ~doc:"Return a set of VMs which are not co-operating with the host's memory control system" - ~params:[Ref _host, "self", "The host to query"] - ~result:((Set(Ref _vm)), "VMs which are not co-operating") - ~allowed_roles:_R_READ_ONLY - () + ~in_product_since:rel_midnight_ride + ~internal_deprecated_since:rel_tampa + ~name:"get_uncooperative_resident_VMs" + ~doc:"Return a set of VMs which are not co-operating with the host's memory control system" + ~params:[Ref _host, "self", "The host to query"] + ~result:((Set(Ref _vm)), "VMs which are not co-operating") + ~allowed_roles:_R_READ_ONLY + () let host_get_uncooperative_domains = call - ~in_product_since:rel_midnight_ride - ~internal_deprecated_since:rel_tampa - ~name:"get_uncooperative_domains" - ~doc:"Return the set of domain uuids which are not co-operating with the host's memory control system" - ~params:[Ref _host, "self", "The host to query"] - ~result:((Set(String)), "UUIDs of domains which are not co-operating") - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_product_since:rel_midnight_ride + ~internal_deprecated_since:rel_tampa + ~name:"get_uncooperative_domains" + ~doc:"Return the set of domain uuids which are not co-operating with the host's memory control system" + ~params:[Ref _host, "self", "The host to query"] + ~result:((Set(String)), "UUIDs of domains which are not co-operating") + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_retrieve_wlb_evacuate_recommendations = call - ~name:"retrieve_wlb_evacuate_recommendations" - ~in_product_since:rel_george - ~doc:"Retrieves recommended host migrations to perform when evacuating the host from the wlb server. If a VM cannot be migrated from the host the reason is listed instead of a recommendation." - ~params:[Ref _host, "self", "The host to query"] - ~result:(Map(Ref _vm, Set(String)), "VMs and the reasons why they would block evacuation, or their target host recommended by the wlb server") - ~allowed_roles:_R_READ_ONLY - () + ~name:"retrieve_wlb_evacuate_recommendations" + ~in_product_since:rel_george + ~doc:"Retrieves recommended host migrations to perform when evacuating the host from the wlb server. If a VM cannot be migrated from the host the reason is listed instead of a recommendation." + ~params:[Ref _host, "self", "The host to query"] + ~result:(Map(Ref _vm, Set(String)), "VMs and the reasons why they would block evacuation, or their target host recommended by the wlb server") + ~allowed_roles:_R_READ_ONLY + () (* Host.Disable *) let host_disable = call - ~in_product_since:rel_rio - ~name:"disable" - ~doc:"Puts the host into a state in which no new VMs can be started. Currently active VMs on the host continue to execute." - ~params:[Ref _host, "host", "The Host to disable"] - ~allowed_roles:_R_POOL_OP - () + ~in_product_since:rel_rio + ~name:"disable" + ~doc:"Puts the host into a state in which no new VMs can be started. Currently active VMs on the host continue to execute." + ~params:[Ref _host, "host", "The Host to disable"] + ~allowed_roles:_R_POOL_OP + () (* Host.Enable *) let host_enable = call - ~name:"enable" - ~in_product_since:rel_rio - ~doc:"Puts the host into a state in which new VMs can be started." - ~params:[Ref _host, "host", "The Host to enable"] - ~allowed_roles:_R_POOL_OP - () + ~name:"enable" + ~in_product_since:rel_rio + ~doc:"Puts the host into a state in which new VMs can be started." + ~params:[Ref _host, "host", "The Host to enable"] + ~allowed_roles:_R_POOL_OP + () (* Host.Shutdown *) let host_shutdown = call - ~name:"shutdown" - ~in_product_since:rel_rio - ~doc:"Shutdown the host. (This function can only be called if there are no currently running VMs on the host and it is disabled.)" - ~params:[Ref _host, "host", "The Host to shutdown"] - ~allowed_roles:_R_POOL_OP - () + ~name:"shutdown" + ~in_product_since:rel_rio + ~doc:"Shutdown the host. (This function can only be called if there are no currently running VMs on the host and it is disabled.)" + ~params:[Ref _host, "host", "The Host to shutdown"] + ~allowed_roles:_R_POOL_OP + () (* Host.reboot *) let host_reboot = call - ~name:"reboot" - ~in_product_since:rel_rio - ~doc:"Reboot the host. (This function can only be called if there are no currently running VMs on the host and it is disabled.)" - ~params:[Ref _host, "host", "The Host to reboot"] - ~allowed_roles:_R_POOL_OP - () + ~name:"reboot" + ~in_product_since:rel_rio + ~doc:"Reboot the host. (This function can only be called if there are no currently running VMs on the host and it is disabled.)" + ~params:[Ref _host, "host", "The Host to reboot"] + ~allowed_roles:_R_POOL_OP + () (* Host.power_on *) let host_power_on = call - ~name:"power_on" - ~in_product_since:rel_orlando - ~doc:"Attempt to power-on the host (if the capability exists)." - ~params:[Ref _host, "host", "The Host to power on"] - ~allowed_roles:_R_POOL_OP - () + ~name:"power_on" + ~in_product_since:rel_orlando + ~doc:"Attempt to power-on the host (if the capability exists)." + ~params:[Ref _host, "host", "The Host to power on"] + ~allowed_roles:_R_POOL_OP + () let host_restart_agent = call - ~name:"restart_agent" - ~in_product_since:rel_rio - ~doc:"Restarts the agent after a 10 second pause. WARNING: this is a dangerous operation. Any operations in progress will be aborted, and unrecoverable data loss may occur. The caller is responsible for ensuring that there are no operations in progress when this method is called." - ~params:[Ref _host, "host", "The Host on which you want to restart the agent"] - ~allowed_roles:_R_POOL_OP - () + ~name:"restart_agent" + ~in_product_since:rel_rio + ~doc:"Restarts the agent after a 10 second pause. WARNING: this is a dangerous operation. Any operations in progress will be aborted, and unrecoverable data loss may occur. The caller is responsible for ensuring that there are no operations in progress when this method is called." + ~params:[Ref _host, "host", "The Host on which you want to restart the agent"] + ~allowed_roles:_R_POOL_OP + () let host_shutdown_agent = call - ~name:"shutdown_agent" - ~in_product_since:rel_orlando - ~doc:"Shuts the agent down after a 10 second pause. WARNING: this is a dangerous operation. Any operations in progress will be aborted, and unrecoverable data loss may occur. The caller is responsible for ensuring that there are no operations in progress when this method is called." - ~params:[] - ~flags:[`Session] (* no async *) - ~allowed_roles:_R_POOL_OP - () + ~name:"shutdown_agent" + ~in_product_since:rel_orlando + ~doc:"Shuts the agent down after a 10 second pause. WARNING: this is a dangerous operation. Any operations in progress will be aborted, and unrecoverable data loss may occur. The caller is responsible for ensuring that there are no operations in progress when this method is called." + ~params:[] + ~flags:[`Session] (* no async *) + ~allowed_roles:_R_POOL_OP + () let host_dmesg = call - ~name:"dmesg" - ~in_product_since:rel_rio - ~doc:"Get the host xen dmesg." - ~params:[Ref _host, "host", "The Host to query"] - ~result:(String, "dmesg string") - ~allowed_roles:_R_POOL_OP - () + ~name:"dmesg" + ~in_product_since:rel_rio + ~doc:"Get the host xen dmesg." + ~params:[Ref _host, "host", "The Host to query"] + ~result:(String, "dmesg string") + ~allowed_roles:_R_POOL_OP + () let host_dmesg_clear = call - ~name:"dmesg_clear" - ~in_product_since:rel_rio - ~doc:"Get the host xen dmesg, and clear the buffer." - ~params:[Ref _host, "host", "The Host to query"] - ~result:(String, "dmesg string") - ~allowed_roles:_R_POOL_OP - () + ~name:"dmesg_clear" + ~in_product_since:rel_rio + ~doc:"Get the host xen dmesg, and clear the buffer." + ~params:[Ref _host, "host", "The Host to query"] + ~result:(String, "dmesg string") + ~allowed_roles:_R_POOL_OP + () let host_get_log = call - ~name:"get_log" - ~in_product_since:rel_rio - ~doc:"Get the host's log file" - ~params:[Ref _host, "host", "The Host to query"] - ~result:(String, "The contents of the host's primary log file") - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_log" + ~in_product_since:rel_rio + ~doc:"Get the host's log file" + ~params:[Ref _host, "host", "The Host to query"] + ~result:(String, "The contents of the host's primary log file") + ~allowed_roles:_R_READ_ONLY + () let host_send_debug_keys = call - ~name:"send_debug_keys" - ~in_product_since:rel_rio - ~doc:"Inject the given string as debugging keys into Xen" - ~params:[Ref _host, "host", "The host"; - String, "keys", "The keys to send"] - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"send_debug_keys" + ~in_product_since:rel_rio + ~doc:"Inject the given string as debugging keys into Xen" + ~params:[Ref _host, "host", "The host"; + String, "keys", "The keys to send"] + ~allowed_roles:_R_POOL_ADMIN + () let host_get_data_sources = call - ~name:"get_data_sources" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~doc:"" - ~result:(Set (Record _data_source), "A set of data sources") - ~params:[Ref _host, "host", "The host to interrogate"] - ~errs:[] - ~flags:[`Session] - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_data_sources" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~doc:"" + ~result:(Set (Record _data_source), "A set of data sources") + ~params:[Ref _host, "host", "The host to interrogate"] + ~errs:[] + ~flags:[`Session] + ~allowed_roles:_R_READ_ONLY + () let host_record_data_source = call - ~name:"record_data_source" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~doc:"Start recording the specified data source" - ~params:[Ref _host, "host", "The host"; - String, "data_source", "The data source to record"] - ~errs:[] - ~flags:[`Session] - ~allowed_roles:_R_POOL_OP - () + ~name:"record_data_source" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~doc:"Start recording the specified data source" + ~params:[Ref _host, "host", "The host"; + String, "data_source", "The data source to record"] + ~errs:[] + ~flags:[`Session] + ~allowed_roles:_R_POOL_OP + () let host_query_data_source = call - ~name:"query_data_source" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~doc:"Query the latest value of the specified data source" - ~params:[Ref _host, "host", "The host"; - String, "data_source", "The data source to query"] - ~result:(Float,"The latest value, averaged over the last 5 seconds") - ~errs:[] - ~flags:[`Session] - ~allowed_roles:_R_READ_ONLY - () + ~name:"query_data_source" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~doc:"Query the latest value of the specified data source" + ~params:[Ref _host, "host", "The host"; + String, "data_source", "The data source to query"] + ~result:(Float,"The latest value, averaged over the last 5 seconds") + ~errs:[] + ~flags:[`Session] + ~allowed_roles:_R_READ_ONLY + () let host_attach_static_vdis = call - ~name:"attach_static_vdis" - ~in_product_since:rel_midnight_ride - ~doc:"Statically attach VDIs on a host." - ~params:[Ref _host, "host", "The Host to modify"; - Map(Ref _vdi, String), "vdi_reason_map", "List of VDI+reason pairs to attach" - ] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~name:"attach_static_vdis" + ~in_product_since:rel_midnight_ride + ~doc:"Statically attach VDIs on a host." + ~params:[Ref _host, "host", "The Host to modify"; + Map(Ref _vdi, String), "vdi_reason_map", "List of VDI+reason pairs to attach" + ] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_detach_static_vdis = call - ~name:"detach_static_vdis" - ~in_product_since:rel_midnight_ride - ~doc:"Detach static VDIs from a host." - ~params:[Ref _host, "host", "The Host to modify"; - Set(Ref _vdi), "vdis", "Set of VDIs to detach"; - ] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~name:"detach_static_vdis" + ~in_product_since:rel_midnight_ride + ~doc:"Detach static VDIs from a host." + ~params:[Ref _host, "host", "The Host to modify"; + Set(Ref _vdi), "vdis", "Set of VDIs to detach"; + ] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_declare_dead = call - ~name:"declare_dead" - ~in_product_since:rel_clearwater - ~doc:"Declare that a host is dead. This is a dangerous operation, and should only be called if the administrator is absolutely sure the host is definitely dead" - ~params:[Ref _host, "host", "The Host to declare is dead"] - ~allowed_roles:_R_POOL_OP - () + ~name:"declare_dead" + ~in_product_since:rel_clearwater + ~doc:"Declare that a host is dead. This is a dangerous operation, and should only be called if the administrator is absolutely sure the host is definitely dead" + ~params:[Ref _host, "host", "The Host to declare is dead"] + ~allowed_roles:_R_POOL_OP + () let host_forget_data_source_archives = call - ~name:"forget_data_source_archives" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~doc:"Forget the recorded statistics related to the specified data source" - ~params:[Ref _host, "host", "The host"; - String, "data_source", "The data source whose archives are to be forgotten"] - ~flags:[`Session] - ~allowed_roles:_R_POOL_OP - () + ~name:"forget_data_source_archives" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~doc:"Forget the recorded statistics related to the specified data source" + ~params:[Ref _host, "host", "The host"; + String, "data_source", "The data source whose archives are to be forgotten"] + ~flags:[`Session] + ~allowed_roles:_R_POOL_OP + () let host_get_diagnostic_timing_stats = call ~flags:[`Session] - ~in_product_since:rel_miami - ~name:"get_diagnostic_timing_stats" - ~doc:"Return timing statistics for diagnostic purposes" - ~params:[Ref _host, "host", "The host to interrogate"] - ~result:(Map(String, String), "population name to summary map") - ~hide_from_docs:true - ~allowed_roles:_R_READ_ONLY - () + ~in_product_since:rel_miami + ~name:"get_diagnostic_timing_stats" + ~doc:"Return timing statistics for diagnostic purposes" + ~params:[Ref _host, "host", "The host to interrogate"] + ~result:(Map(String, String), "population name to summary map") + ~hide_from_docs:true + ~allowed_roles:_R_READ_ONLY + () let host_create_new_blob = call - ~name: "create_new_blob" - ~in_product_since:rel_orlando - ~doc:"Create a placeholder for a named binary blob of data that is associated with this host" - ~versioned_params: - [{param_type=Ref _host; param_name="host"; param_doc="The host"; param_release=orlando_release; param_default=None}; - {param_type=String; param_name="name"; param_doc="The name associated with the blob"; param_release=orlando_release; param_default=None}; - {param_type=String; param_name="mime_type"; param_doc="The mime type for the data. Empty string translates to application/octet-stream"; param_release=orlando_release; param_default=None}; - {param_type=Bool; param_name="public"; param_doc="True if the blob should be publicly available"; param_release=tampa_release; param_default=Some (VBool false)}] - ~result:(Ref _blob, "The reference of the blob, needed for populating its data") - ~allowed_roles:_R_POOL_OP - () + ~name: "create_new_blob" + ~in_product_since:rel_orlando + ~doc:"Create a placeholder for a named binary blob of data that is associated with this host" + ~versioned_params: + [{param_type=Ref _host; param_name="host"; param_doc="The host"; param_release=orlando_release; param_default=None}; + {param_type=String; param_name="name"; param_doc="The name associated with the blob"; param_release=orlando_release; param_default=None}; + {param_type=String; param_name="mime_type"; param_doc="The mime type for the data. Empty string translates to application/octet-stream"; param_release=orlando_release; param_default=None}; + {param_type=Bool; param_name="public"; param_doc="True if the blob should be publicly available"; param_release=tampa_release; param_default=Some (VBool false)}] + ~result:(Ref _blob, "The reference of the blob, needed for populating its data") + ~allowed_roles:_R_POOL_OP + () let host_call_plugin = call - ~name:"call_plugin" - ~in_product_since:rel_orlando - ~doc:"Call a XenAPI plugin on this host" - ~params:[Ref _host, "host", "The host"; - String, "plugin", "The name of the plugin"; - String, "fn", "The name of the function within the plugin"; - Map(String, String), "args", "Arguments for the function";] - ~result:(String, "Result from the plugin") - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"call_plugin" + ~in_product_since:rel_orlando + ~doc:"Call a XenAPI plugin on this host" + ~params:[Ref _host, "host", "The host"; + String, "plugin", "The name of the plugin"; + String, "fn", "The name of the function within the plugin"; + Map(String, String), "args", "Arguments for the function";] + ~result:(String, "Result from the plugin") + ~allowed_roles:_R_POOL_ADMIN + () let host_has_extension = call - ~name:"has_extension" - ~in_product_since:rel_dundee_plus - ~doc:"Return true if the extension is available on the host" - ~params:[Ref _host, "host", "The host"; - String, "name", "The name of the API call";] - ~result:(Bool, "True if the extension exists, false otherwise") - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"has_extension" + ~in_product_since:rel_dundee_plus + ~doc:"Return true if the extension is available on the host" + ~params:[Ref _host, "host", "The host"; + String, "name", "The name of the API call";] + ~result:(Bool, "True if the extension exists, false otherwise") + ~allowed_roles:_R_POOL_ADMIN + () let host_call_extension = call - ~name:"call_extension" - ~in_product_since:rel_dundee_plus - ~doc:"Call a XenAPI extension on this host" - ~params:[Ref _host, "host", "The host"; - String, "call", "Rpc call for the extension";] - ~result:(String, "Result from the extension") - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"call_extension" + ~in_product_since:rel_dundee_plus + ~doc:"Call a XenAPI extension on this host" + ~params:[Ref _host, "host", "The host"; + String, "call", "Rpc call for the extension";] + ~result:(String, "Result from the extension") + ~allowed_roles:_R_POOL_ADMIN + () let host_enable_binary_storage = call - ~name:"enable_binary_storage" - ~in_product_since:rel_orlando - ~hide_from_docs:true - ~pool_internal:true - ~doc:"Enable binary storage on a particular host, for storing RRDs, messages and blobs" - ~params:[Ref _host, "host", "The host"] - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~name:"enable_binary_storage" + ~in_product_since:rel_orlando + ~hide_from_docs:true + ~pool_internal:true + ~doc:"Enable binary storage on a particular host, for storing RRDs, messages and blobs" + ~params:[Ref _host, "host", "The host"] + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_disable_binary_storage = call - ~name:"disable_binary_storage" - ~in_product_since:rel_orlando - ~hide_from_docs:true - ~pool_internal:true - ~doc:"Disable binary storage on a particular host, deleting stored RRDs, messages and blobs" - ~params:[Ref _host, "host", "The host"] - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~name:"disable_binary_storage" + ~in_product_since:rel_orlando + ~hide_from_docs:true + ~pool_internal:true + ~doc:"Disable binary storage on a particular host, deleting stored RRDs, messages and blobs" + ~params:[Ref _host, "host", "The host"] + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_update_pool_secret = call - ~name:"update_pool_secret" - ~in_product_since:rel_midnight_ride - ~hide_from_docs:true - ~pool_internal:true - ~doc:"" - ~params:[ - Ref _host, "host", "The host"; - String, "pool_secret", "The new pool secret" ] - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~name:"update_pool_secret" + ~in_product_since:rel_midnight_ride + ~hide_from_docs:true + ~pool_internal:true + ~doc:"" + ~params:[ + Ref _host, "host", "The host"; + String, "pool_secret", "The new pool secret" ] + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_update_master = call - ~name:"update_master" - ~in_product_since:rel_midnight_ride - ~hide_from_docs:true - ~pool_internal:true - ~doc:"" - ~params:[ - Ref _host, "host", "The host"; - String, "master_address", "The new master address" ] - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~name:"update_master" + ~in_product_since:rel_midnight_ride + ~hide_from_docs:true + ~pool_internal:true + ~doc:"" + ~params:[ + Ref _host, "host", "The host"; + String, "master_address", "The new master address" ] + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_set_localdb_key = call - ~name:"set_localdb_key" - ~in_product_since:rel_midnight_ride - ~doc:"Set a key in the local DB of the host." - ~params:[Ref _host, "host", "The Host to modify"; - String, "key", "Key to change"; - String, "value", "Value to set" - ] - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~name:"set_localdb_key" + ~in_product_since:rel_midnight_ride + ~doc:"Set a key in the local DB of the host." + ~params:[Ref _host, "host", "The Host to modify"; + String, "key", "Key to change"; + String, "value", "Value to set" + ] + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_refresh_pack_info = call - ~name:"refresh_pack_info" - ~in_product_since:rel_midnight_ride - ~doc:"Refresh the list of installed Supplemental Packs." - ~params:[Ref _host, "host", "The Host to modify"] - ~allowed_roles:_R_POOL_OP - () + ~name:"refresh_pack_info" + ~in_product_since:rel_midnight_ride + ~doc:"Refresh the list of installed Supplemental Packs." + ~params:[Ref _host, "host", "The Host to modify"] + ~allowed_roles:_R_POOL_OP + () (* ------------------------------------------------------------------------------------------------------------ VDI Management @@ -3165,169 +3165,169 @@ let host_refresh_pack_info = call (* VDI.Snapshot *) let vdi_snapshot = call - ~name:"snapshot" - ~in_oss_since:None - ~in_product_since:rel_rio - ~versioned_params: - [{param_type=Ref _vdi; param_name="vdi"; param_doc="The VDI to snapshot"; param_release=rio_release; param_default=None}; - {param_type=Map (String, String); param_name="driver_params"; param_doc="Optional parameters that can be passed through to backend driver in order to specify storage-type-specific snapshot options"; param_release=miami_release; param_default=Some (VMap [])} - ] - ~doc:"Take a read-only snapshot of the VDI, returning a reference to the snapshot. If any driver_params are specified then these are passed through to the storage-specific substrate driver that takes the snapshot. NB the snapshot lives in the same Storage Repository as its parent." - ~result:(Ref _vdi, "The ID of the newly created VDI.") - ~allowed_roles:_R_VM_ADMIN - ~doc_tags:[Snapshots] - () + ~name:"snapshot" + ~in_oss_since:None + ~in_product_since:rel_rio + ~versioned_params: + [{param_type=Ref _vdi; param_name="vdi"; param_doc="The VDI to snapshot"; param_release=rio_release; param_default=None}; + {param_type=Map (String, String); param_name="driver_params"; param_doc="Optional parameters that can be passed through to backend driver in order to specify storage-type-specific snapshot options"; param_release=miami_release; param_default=Some (VMap [])} + ] + ~doc:"Take a read-only snapshot of the VDI, returning a reference to the snapshot. If any driver_params are specified then these are passed through to the storage-specific substrate driver that takes the snapshot. NB the snapshot lives in the same Storage Repository as its parent." + ~result:(Ref _vdi, "The ID of the newly created VDI.") + ~allowed_roles:_R_VM_ADMIN + ~doc_tags:[Snapshots] + () let vdi_clone = call - ~name:"clone" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[Ref _vdi, "vdi", "The VDI to clone"] - ~versioned_params: - [{param_type=Ref _vdi; param_name="vdi"; param_doc="The VDI to clone"; param_release=rio_release; param_default=None}; - {param_type=Map (String, String); param_name="driver_params"; param_doc="Optional parameters that are passed through to the backend driver in order to specify storage-type-specific clone options"; param_release=miami_release; param_default=Some (VMap [])} - ] - ~doc:"Take an exact copy of the VDI and return a reference to the new disk. If any driver_params are specified then these are passed through to the storage-specific substrate driver that implements the clone operation. NB the clone lives in the same Storage Repository as its parent." - ~result:(Ref _vdi, "The ID of the newly created VDI.") - ~allowed_roles:_R_VM_ADMIN - ~doc_tags:[Snapshots] - () + ~name:"clone" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[Ref _vdi, "vdi", "The VDI to clone"] + ~versioned_params: + [{param_type=Ref _vdi; param_name="vdi"; param_doc="The VDI to clone"; param_release=rio_release; param_default=None}; + {param_type=Map (String, String); param_name="driver_params"; param_doc="Optional parameters that are passed through to the backend driver in order to specify storage-type-specific clone options"; param_release=miami_release; param_default=Some (VMap [])} + ] + ~doc:"Take an exact copy of the VDI and return a reference to the new disk. If any driver_params are specified then these are passed through to the storage-specific substrate driver that implements the clone operation. NB the clone lives in the same Storage Repository as its parent." + ~result:(Ref _vdi, "The ID of the newly created VDI.") + ~allowed_roles:_R_VM_ADMIN + ~doc_tags:[Snapshots] + () let vdi_resize = call - ~name:"resize" - ~in_product_since:rel_rio - ~in_oss_since:None - ~params:[Ref _vdi, "vdi", "The VDI to resize"; Int, "size", "The new size of the VDI" ] - ~doc:"Resize the VDI." - ~allowed_roles:_R_VM_ADMIN - () + ~name:"resize" + ~in_product_since:rel_rio + ~in_oss_since:None + ~params:[Ref _vdi, "vdi", "The VDI to resize"; Int, "size", "The new size of the VDI" ] + ~doc:"Resize the VDI." + ~allowed_roles:_R_VM_ADMIN + () let vdi_resize_online = call - ~name:"resize_online" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[Ref _vdi, "vdi", "The VDI to resize"; Int, "size", "The new size of the VDI" ] - ~doc:"Resize the VDI which may or may not be attached to running guests." - ~allowed_roles:_R_VM_ADMIN - () + ~name:"resize_online" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[Ref _vdi, "vdi", "The VDI to resize"; Int, "size", "The new size of the VDI" ] + ~doc:"Resize the VDI which may or may not be attached to running guests." + ~allowed_roles:_R_VM_ADMIN + () let vdi_copy = call - ~name:"copy" - ~lifecycle:[ - Published, rel_rio, "Copies a VDI to an SR. There must be a host that can see both the source and destination SRs simultaneously"; - Extended, rel_cowley, "The copy can now be performed between any two SRs."; - Extended, rel_clearwater_felton, "The copy can now be performed into a pre-created VDI. It is now possible to request copying only changed blocks from a base VDI"; ] - ~in_oss_since:None - ~versioned_params: - [{param_type=Ref _vdi; param_name="vdi"; param_doc="The VDI to copy"; param_release=rio_release; param_default=None}; - {param_type=Ref _sr; param_name="sr"; param_doc="The destination SR (only required if the destination VDI is not specified"; param_release=rio_release; param_default=Some (VString Ref.(string_of null))}; - {param_type=Ref _vdi; param_name="base_vdi"; param_doc="The base VDI (only required if copying only changed blocks, by default all blocks will be copied)"; param_release=clearwater_felton_release; param_default=Some (VRef Ref.(string_of null))}; - {param_type=Ref _vdi; param_name="into_vdi"; param_doc="The destination VDI to copy blocks into (if omitted then a destination SR must be provided and a fresh VDI will be created)"; param_release=clearwater_felton_release; param_default=Some (VString Ref.(string_of null))}; - ] - ~doc:"Copy either a full VDI or the block differences between two VDIs into either a fresh VDI or an existing VDI." - ~errs:[Api_errors.vdi_readonly; Api_errors.vdi_too_small; Api_errors.vdi_not_sparse] - ~result:(Ref _vdi, "The reference of the VDI where the blocks were written.") - ~allowed_roles:_R_VM_ADMIN - () + ~name:"copy" + ~lifecycle:[ + Published, rel_rio, "Copies a VDI to an SR. There must be a host that can see both the source and destination SRs simultaneously"; + Extended, rel_cowley, "The copy can now be performed between any two SRs."; + Extended, rel_clearwater_felton, "The copy can now be performed into a pre-created VDI. It is now possible to request copying only changed blocks from a base VDI"; ] + ~in_oss_since:None + ~versioned_params: + [{param_type=Ref _vdi; param_name="vdi"; param_doc="The VDI to copy"; param_release=rio_release; param_default=None}; + {param_type=Ref _sr; param_name="sr"; param_doc="The destination SR (only required if the destination VDI is not specified"; param_release=rio_release; param_default=Some (VString Ref.(string_of null))}; + {param_type=Ref _vdi; param_name="base_vdi"; param_doc="The base VDI (only required if copying only changed blocks, by default all blocks will be copied)"; param_release=clearwater_felton_release; param_default=Some (VRef Ref.(string_of null))}; + {param_type=Ref _vdi; param_name="into_vdi"; param_doc="The destination VDI to copy blocks into (if omitted then a destination SR must be provided and a fresh VDI will be created)"; param_release=clearwater_felton_release; param_default=Some (VString Ref.(string_of null))}; + ] + ~doc:"Copy either a full VDI or the block differences between two VDIs into either a fresh VDI or an existing VDI." + ~errs:[Api_errors.vdi_readonly; Api_errors.vdi_too_small; Api_errors.vdi_not_sparse] + ~result:(Ref _vdi, "The reference of the VDI where the blocks were written.") + ~allowed_roles:_R_VM_ADMIN + () let vdi_pool_migrate = call - ~name:"pool_migrate" - ~in_oss_since:None - ~in_product_since:rel_tampa - ~params:[ Ref _vdi, "vdi", "The VDI to migrate" - ; Ref _sr, "sr", "The destination SR" - ; Map (String, String), "options", "Other parameters" ] - ~result:(Ref _vdi, "The new reference of the migrated VDI.") - ~doc:"Migrate a VDI, which may be attached to a running guest, to a different SR. The destination SR must be visible to the guest." - ~allowed_roles:_R_VM_POWER_ADMIN - () + ~name:"pool_migrate" + ~in_oss_since:None + ~in_product_since:rel_tampa + ~params:[ Ref _vdi, "vdi", "The VDI to migrate" + ; Ref _sr, "sr", "The destination SR" + ; Map (String, String), "options", "Other parameters" ] + ~result:(Ref _vdi, "The new reference of the migrated VDI.") + ~doc:"Migrate a VDI, which may be attached to a running guest, to a different SR. The destination SR must be visible to the guest." + ~allowed_roles:_R_VM_POWER_ADMIN + () (* ------------------------------------------------------------------------------------------------------------ VBDs ------------------------------------------------------------------------------------------------------------ *) let vbd_eject = call - ~name:"eject" - ~in_product_since:rel_rio - ~doc:"Remove the media from the device and leave it empty" - ~params:[Ref _vbd, "vbd", "The vbd representing the CDROM-like device"] - ~errs:[Api_errors.vbd_not_removable_media; Api_errors.vbd_is_empty] - ~allowed_roles:_R_VM_OP - () + ~name:"eject" + ~in_product_since:rel_rio + ~doc:"Remove the media from the device and leave it empty" + ~params:[Ref _vbd, "vbd", "The vbd representing the CDROM-like device"] + ~errs:[Api_errors.vbd_not_removable_media; Api_errors.vbd_is_empty] + ~allowed_roles:_R_VM_OP + () let vbd_insert = call - ~name:"insert" - ~in_product_since:rel_rio - ~doc:"Insert new media into the device" - ~params:[Ref _vbd, "vbd", "The vbd representing the CDROM-like device"; - Ref _vdi, "vdi", "The new VDI to 'insert'"] - ~errs:[Api_errors.vbd_not_removable_media; Api_errors.vbd_not_empty] - ~allowed_roles:_R_VM_OP - () + ~name:"insert" + ~in_product_since:rel_rio + ~doc:"Insert new media into the device" + ~params:[Ref _vbd, "vbd", "The vbd representing the CDROM-like device"; + Ref _vdi, "vdi", "The new VDI to 'insert'"] + ~errs:[Api_errors.vbd_not_removable_media; Api_errors.vbd_not_empty] + ~allowed_roles:_R_VM_OP + () let vbd_plug = call - ~name:"plug" - ~in_product_since:rel_rio - ~doc:"Hotplug the specified VBD, dynamically attaching it to the running VM" - ~params:[Ref _vbd, "self", "The VBD to hotplug"] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"plug" + ~in_product_since:rel_rio + ~doc:"Hotplug the specified VBD, dynamically attaching it to the running VM" + ~params:[Ref _vbd, "self", "The VBD to hotplug"] + ~allowed_roles:_R_VM_ADMIN + () let vbd_unplug = call - ~name:"unplug" - ~in_product_since:rel_rio - ~doc:"Hot-unplug the specified VBD, dynamically unattaching it from the running VM" - ~params:[Ref _vbd, "self", "The VBD to hot-unplug"] - ~errs:[Api_errors.device_detach_rejected; Api_errors.device_already_detached] - ~allowed_roles:_R_VM_ADMIN - () - + ~name:"unplug" + ~in_product_since:rel_rio + ~doc:"Hot-unplug the specified VBD, dynamically unattaching it from the running VM" + ~params:[Ref _vbd, "self", "The VBD to hot-unplug"] + ~errs:[Api_errors.device_detach_rejected; Api_errors.device_already_detached] + ~allowed_roles:_R_VM_ADMIN + () + let vbd_unplug_force = call - ~name:"unplug_force" - ~in_product_since:rel_rio - ~doc:"Forcibly unplug the specified VBD" - ~params:[Ref _vbd, "self", "The VBD to forcibly unplug"] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"unplug_force" + ~in_product_since:rel_rio + ~doc:"Forcibly unplug the specified VBD" + ~params:[Ref _vbd, "self", "The VBD to forcibly unplug"] + ~allowed_roles:_R_VM_ADMIN + () let vbd_unplug_force_no_safety_check = call - ~name:"unplug_force_no_safety_check" - ~doc:"Forcibly unplug the specified VBD without any safety checks. This is an extremely dangerous operation in the general case that can cause guest crashes and data corruption; it should be called with extreme caution." - ~params:[Ref _vbd, "self", "The VBD to forcibly unplug (no safety checks are applied to test if the device supports surprise-remove)"] - ~hide_from_docs:true - ~in_product_since:rel_symc - ~allowed_roles:_R_VM_ADMIN - () + ~name:"unplug_force_no_safety_check" + ~doc:"Forcibly unplug the specified VBD without any safety checks. This is an extremely dangerous operation in the general case that can cause guest crashes and data corruption; it should be called with extreme caution." + ~params:[Ref _vbd, "self", "The VBD to forcibly unplug (no safety checks are applied to test if the device supports surprise-remove)"] + ~hide_from_docs:true + ~in_product_since:rel_symc + ~allowed_roles:_R_VM_ADMIN + () let vbd_pause = call - ~name:"pause" - ~doc:"Stop the backend device servicing requests so that an operation can be performed on the disk (eg live resize, snapshot)" - ~params:[Ref _vbd, "self", "The VBD to pause"] - ~hide_from_docs:true - ~in_product_since:rel_symc - ~result:(String, "Token to uniquely identify this pause instance, used to match the corresponding unpause") (* new in MR *) - ~allowed_roles:_R_VM_ADMIN - () + ~name:"pause" + ~doc:"Stop the backend device servicing requests so that an operation can be performed on the disk (eg live resize, snapshot)" + ~params:[Ref _vbd, "self", "The VBD to pause"] + ~hide_from_docs:true + ~in_product_since:rel_symc + ~result:(String, "Token to uniquely identify this pause instance, used to match the corresponding unpause") (* new in MR *) + ~allowed_roles:_R_VM_ADMIN + () let vbd_unpause = call - ~name:"unpause" - ~doc:"Restart the backend device after it was paused while an operation was performed on the disk (eg live resize, snapshot)" - ~versioned_params: - [{param_type=Ref _vbd; param_name="self"; param_doc="The VBD to unpause"; param_release=miami_symc_release; param_default=None}; - {param_type=String; param_name="token"; param_doc="The token from VBD.pause"; param_release=orlando_release; param_default=Some(VString "")}] - ~hide_from_docs:true - ~in_product_since:rel_symc - ~allowed_roles:_R_VM_ADMIN - () + ~name:"unpause" + ~doc:"Restart the backend device after it was paused while an operation was performed on the disk (eg live resize, snapshot)" + ~versioned_params: + [{param_type=Ref _vbd; param_name="self"; param_doc="The VBD to unpause"; param_release=miami_symc_release; param_default=None}; + {param_type=String; param_name="token"; param_doc="The token from VBD.pause"; param_release=orlando_release; param_default=Some(VString "")}] + ~hide_from_docs:true + ~in_product_since:rel_symc + ~allowed_roles:_R_VM_ADMIN + () let vbd_assert_attachable = call - ~name:"assert_attachable" - ~in_product_since:rel_rio - ~doc:"Throws an error if this VBD could not be attached to this VM if the VM were running. Intended for debugging." - ~params:[Ref _vbd, "self", "The VBD to query"] - ~in_oss_since:None - ~allowed_roles:_R_VM_ADMIN - () + ~name:"assert_attachable" + ~in_product_since:rel_rio + ~doc:"Throws an error if this VBD could not be attached to this VM if the VM were running. Intended for debugging." + ~params:[Ref _vbd, "self", "The VBD to query"] + ~in_oss_since:None + ~allowed_roles:_R_VM_ADMIN + () (******************************************************************************************************************) (* Now define the objects themselves and their fields *) @@ -3335,143 +3335,143 @@ let vbd_assert_attachable = call (** Make an object field record *) let field ?(in_oss_since = Some "3.0.3") ?in_product_since ?(internal_only = false) - ?internal_deprecated_since ?(ignore_foreign_key = false) ?(writer_roles=None) ?(reader_roles=None) - ?(qualifier = RW) ?(ty = String) ?(effect = false) ?(default_value = None) ?(persist = true) - ?(map_keys_roles=[]) (* list of (key_name,(writer_roles)) for a map field *) - ?lifecycle ?(doc_tags=[]) name desc = - (* in_product_since currently defaults to 'Some rel_rio', for backwards compatibility. - * This should eventually become 'None'. *) - let in_product_since = match in_product_since with None -> Some rel_rio | x -> x in - if lifecycle = None && in_product_since = None then - failwith ("Lifecycle for field '" ^ name ^ "' not specified"); - let lifecycle = match lifecycle with - | None -> - let published = match in_product_since with - | None -> [] - | Some rel -> [Published, rel, desc] - in - let deprecated = match internal_deprecated_since with - | None -> [] - | Some rel -> [Deprecated, rel, ""] - in - published @ deprecated - | Some l -> l - in - let release = - { - internal = (match get_published lifecycle with - | Some published -> get_product_releases published - | None -> ["closed"]); - opensource = get_oss_releases in_oss_since; - internal_deprecated_since = get_deprecated lifecycle; - } - in - Field { - release = release; - lifecycle=lifecycle; - qualifier=qualifier; ty=ty; internal_only = internal_only; default_value = default_value; - field_name=name; - full_name=[ name ]; - field_description=desc; - field_persist=persist; - field_has_effect = effect; - field_ignore_foreign_key = ignore_foreign_key; - field_setter_roles = writer_roles; - field_getter_roles = reader_roles; - field_map_keys_roles = map_keys_roles; - field_doc_tags = doc_tags; - } + ?internal_deprecated_since ?(ignore_foreign_key = false) ?(writer_roles=None) ?(reader_roles=None) + ?(qualifier = RW) ?(ty = String) ?(effect = false) ?(default_value = None) ?(persist = true) + ?(map_keys_roles=[]) (* list of (key_name,(writer_roles)) for a map field *) + ?lifecycle ?(doc_tags=[]) name desc = + (* in_product_since currently defaults to 'Some rel_rio', for backwards compatibility. + * This should eventually become 'None'. *) + let in_product_since = match in_product_since with None -> Some rel_rio | x -> x in + if lifecycle = None && in_product_since = None then + failwith ("Lifecycle for field '" ^ name ^ "' not specified"); + let lifecycle = match lifecycle with + | None -> + let published = match in_product_since with + | None -> [] + | Some rel -> [Published, rel, desc] + in + let deprecated = match internal_deprecated_since with + | None -> [] + | Some rel -> [Deprecated, rel, ""] + in + published @ deprecated + | Some l -> l + in + let release = + { + internal = (match get_published lifecycle with + | Some published -> get_product_releases published + | None -> ["closed"]); + opensource = get_oss_releases in_oss_since; + internal_deprecated_since = get_deprecated lifecycle; + } + in + Field { + release = release; + lifecycle=lifecycle; + qualifier=qualifier; ty=ty; internal_only = internal_only; default_value = default_value; + field_name=name; + full_name=[ name ]; + field_description=desc; + field_persist=persist; + field_has_effect = effect; + field_ignore_foreign_key = ignore_foreign_key; + field_setter_roles = writer_roles; + field_getter_roles = reader_roles; + field_map_keys_roles = map_keys_roles; + field_doc_tags = doc_tags; + } let uid ?(in_oss_since=Some "3.0.3") ?(reader_roles=None) ?lifecycle refname = - field - ~in_oss_since - ?lifecycle - ~qualifier:DynamicRO - ~ty:(String) - ~writer_roles:_R_POOL_ADMIN (* only the system should be able to create/modify uuids *) - ~reader_roles - "uuid" - "Unique identifier/object reference" + field + ~in_oss_since + ?lifecycle + ~qualifier:DynamicRO + ~ty:(String) + ~writer_roles:_R_POOL_ADMIN (* only the system should be able to create/modify uuids *) + ~reader_roles + "uuid" + "Unique identifier/object reference" let allowed_and_current_operations ?(writer_roles=None) ?(reader_roles=None) operations_type = - [ + [ field ~writer_roles ~reader_roles ~persist:false ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Set operations_type) ~default_value:(Some (VSet [])) "allowed_operations" "list of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client."; field ~writer_roles ~reader_roles ~persist:false ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Map(String, operations_type)) ~default_value:(Some (VMap [])) "current_operations" "links each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task."; ] (** Make a Namespace (note effect on enclosing field.full_names) *) -let namespace ?(get_field_writer_roles=fun x->x) ?(get_field_reader_roles=fun x->x) ?(idempotent=false) ~name ~contents () = +let namespace ?(get_field_writer_roles=fun x->x) ?(get_field_reader_roles=fun x->x) ?(idempotent=false) ~name ~contents () = let rec prefix = function | Namespace(x, xs) -> Namespace(x, List.map prefix xs) | Field x -> Field { x with full_name = if idempotent then x.full_name else name :: x.full_name; - field_setter_roles=get_field_writer_roles x.field_setter_roles; - field_getter_roles=get_field_reader_roles x.field_getter_roles - } in + field_setter_roles=get_field_writer_roles x.field_setter_roles; + field_getter_roles=get_field_reader_roles x.field_getter_roles + } in Namespace(name, List.map prefix contents) (** Many of the objects have a set of names of various lengths: *) let names ?(writer_roles=None) ?(reader_roles=None) ?lifecycle in_oss_since qual = - let field x y = - field x y ~in_oss_since ~qualifier:qual ~writer_roles ~reader_roles - ~default_value:(Some (VString "")) ?lifecycle in - [ - field "label" "a human-readable name"; - field "description" "a notes field containing human-readable description" - ] + let field x y = + field x y ~in_oss_since ~qualifier:qual ~writer_roles ~reader_roles + ~default_value:(Some (VString "")) ?lifecycle in + [ + field "label" "a human-readable name"; + field "description" "a notes field containing human-readable description" + ] let default_field_reader_roles = _R_ALL (* by default, all can read fields *) let default_field_writer_roles = _R_POOL_ADMIN (* by default, only root can write to them *) (** Create an object and map the object name into the messages *) let create_obj ?lifecycle ~in_oss_since ?in_product_since ?(internal_deprecated_since=None) ~gen_constructor_destructor ~gen_events ~persist ~name ~descr ~doccomments ~contents ~messages ~in_db - ?(contents_default_reader_roles=default_field_reader_roles) ?(contents_default_writer_roles=None) - ?(implicit_messages_allowed_roles=_R_ALL) (* used in implicit obj msgs (get_all, etc) *) - ?force_custom_actions:(force_custom_actions=None) (* None,Some(RW),Some(StaticRO) *) - ~messages_default_allowed_roles ?(doc_tags=[])(* used in constructor, destructor and explicit obj msgs *) - ?(msg_lifecycles = [])(* To specify lifecycle for automatic messages (e.g. constructor) when different to object lifecycle. *) - () = - let contents_default_writer_roles = if contents_default_writer_roles=None then messages_default_allowed_roles else contents_default_writer_roles in - let get_field_reader_roles = function None->contents_default_reader_roles|r->r in - let get_field_writer_roles = function None->contents_default_writer_roles|r->r in - let get_msg_allowed_roles = function None->messages_default_allowed_roles|r->r in - let contents = List.map (function - | Namespace(n,cs)->namespace ~get_field_writer_roles ~get_field_reader_roles ~name:n ~contents:cs ~idempotent:true () - | Field f->Field{f with field_setter_roles=get_field_writer_roles f.field_setter_roles; - field_getter_roles=get_field_reader_roles f.field_getter_roles} - ) contents in - if lifecycle = None && in_product_since = None then - failwith ("Lifecycle for class '" ^ name ^ "' not specified"); - let lifecycle = match lifecycle with - | None -> - let published = match in_product_since with - | None -> [] - | Some rel -> [Published, rel, descr] - in - let deprecated = match internal_deprecated_since with - | None -> [] - | Some rel -> [Deprecated, rel, ""] - in - published @ deprecated - | Some l -> l - in - let release = - { - internal = (match get_published lifecycle with - | Some published -> get_product_releases published - | None -> ["closed"]); - opensource = get_oss_releases in_oss_since; - internal_deprecated_since = get_deprecated lifecycle; - } - in - let msgs = List.map (fun m -> {m with msg_obj_name=name;msg_allowed_roles=get_msg_allowed_roles m.msg_allowed_roles}) messages in - { name = name; description = descr; obj_lifecycle = lifecycle; messages = msgs; contents = contents; - doccomments = doccomments; msg_lifecycles = msg_lifecycles; - gen_constructor_destructor = gen_constructor_destructor; force_custom_actions = force_custom_actions; - persist = persist; gen_events = gen_events; obj_release = release; - in_database=in_db; obj_allowed_roles = messages_default_allowed_roles; obj_implicit_msg_allowed_roles = implicit_messages_allowed_roles; - obj_doc_tags = doc_tags; - } + ?(contents_default_reader_roles=default_field_reader_roles) ?(contents_default_writer_roles=None) + ?(implicit_messages_allowed_roles=_R_ALL) (* used in implicit obj msgs (get_all, etc) *) + ?force_custom_actions:(force_custom_actions=None) (* None,Some(RW),Some(StaticRO) *) + ~messages_default_allowed_roles ?(doc_tags=[])(* used in constructor, destructor and explicit obj msgs *) + ?(msg_lifecycles = [])(* To specify lifecycle for automatic messages (e.g. constructor) when different to object lifecycle. *) + () = + let contents_default_writer_roles = if contents_default_writer_roles=None then messages_default_allowed_roles else contents_default_writer_roles in + let get_field_reader_roles = function None->contents_default_reader_roles|r->r in + let get_field_writer_roles = function None->contents_default_writer_roles|r->r in + let get_msg_allowed_roles = function None->messages_default_allowed_roles|r->r in + let contents = List.map (function + | Namespace(n,cs)->namespace ~get_field_writer_roles ~get_field_reader_roles ~name:n ~contents:cs ~idempotent:true () + | Field f->Field{f with field_setter_roles=get_field_writer_roles f.field_setter_roles; + field_getter_roles=get_field_reader_roles f.field_getter_roles} + ) contents in + if lifecycle = None && in_product_since = None then + failwith ("Lifecycle for class '" ^ name ^ "' not specified"); + let lifecycle = match lifecycle with + | None -> + let published = match in_product_since with + | None -> [] + | Some rel -> [Published, rel, descr] + in + let deprecated = match internal_deprecated_since with + | None -> [] + | Some rel -> [Deprecated, rel, ""] + in + published @ deprecated + | Some l -> l + in + let release = + { + internal = (match get_published lifecycle with + | Some published -> get_product_releases published + | None -> ["closed"]); + opensource = get_oss_releases in_oss_since; + internal_deprecated_since = get_deprecated lifecycle; + } + in + let msgs = List.map (fun m -> {m with msg_obj_name=name;msg_allowed_roles=get_msg_allowed_roles m.msg_allowed_roles}) messages in + { name = name; description = descr; obj_lifecycle = lifecycle; messages = msgs; contents = contents; + doccomments = doccomments; msg_lifecycles = msg_lifecycles; + gen_constructor_destructor = gen_constructor_destructor; force_custom_actions = force_custom_actions; + persist = persist; gen_events = gen_events; obj_release = release; + in_database=in_db; obj_allowed_roles = messages_default_allowed_roles; obj_implicit_msg_allowed_roles = implicit_messages_allowed_roles; + obj_doc_tags = doc_tags; + } (** Additional messages for srs *) let dev_config_param = @@ -3494,238 +3494,238 @@ let sr_create_common = {param_type=String; param_name="content_type"; param_doc="The type of the new SRs content, if required (e.g. ISOs)"; param_release=rio_release; param_default=None}; ] -let sr_sm_config = +let sr_sm_config = {param_type=Map(String,String); param_name="sm_config"; param_doc="Storage backend specific configuration options"; param_release=miami_release; param_default=Some (VMap [])} let sr_create = call - ~name:"create" - ~in_oss_since:None - ~in_product_since:rel_rio - ~versioned_params:(sr_host_param::dev_config_param::sr_physical_size_param::(sr_create_common @ [ sr_shared_param; sr_sm_config ] )) - ~doc:"Create a new Storage Repository and introduce it into the managed system, creating both SR record and PBD record to attach it to current host (with specified device_config parameters)" - ~result:(Ref _sr, "The reference of the newly created Storage Repository.") - ~errs:[Api_errors.sr_unknown_driver] - ~allowed_roles:_R_POOL_OP + ~name:"create" + ~in_oss_since:None + ~in_product_since:rel_rio + ~versioned_params:(sr_host_param::dev_config_param::sr_physical_size_param::(sr_create_common @ [ sr_shared_param; sr_sm_config ] )) + ~doc:"Create a new Storage Repository and introduce it into the managed system, creating both SR record and PBD record to attach it to current host (with specified device_config parameters)" + ~result:(Ref _sr, "The reference of the newly created Storage Repository.") + ~errs:[Api_errors.sr_unknown_driver] + ~allowed_roles:_R_POOL_OP () let destroy_self_param = (Ref _sr, "sr", "The SR to destroy") let sr_destroy = call - ~name:"destroy" - ~in_oss_since:None - ~in_product_since:rel_rio - ~doc:"Destroy specified SR, removing SR-record from database and remove SR from disk. (In order to affect this operation the appropriate device_config is read from the specified SR's PBD on current host)" - ~errs:[Api_errors.sr_has_pbd] - ~params:[destroy_self_param] - ~allowed_roles:_R_POOL_OP - () + ~name:"destroy" + ~in_oss_since:None + ~in_product_since:rel_rio + ~doc:"Destroy specified SR, removing SR-record from database and remove SR from disk. (In order to affect this operation the appropriate device_config is read from the specified SR's PBD on current host)" + ~errs:[Api_errors.sr_has_pbd] + ~params:[destroy_self_param] + ~allowed_roles:_R_POOL_OP + () let sr_forget = call - ~name:"forget" - ~in_oss_since:None - ~in_product_since:rel_rio - ~doc:"Removing specified SR-record from database, without attempting to remove SR from disk" - ~params:[destroy_self_param] - ~errs:[Api_errors.sr_has_pbd] - ~allowed_roles:_R_POOL_OP - () + ~name:"forget" + ~in_oss_since:None + ~in_product_since:rel_rio + ~doc:"Removing specified SR-record from database, without attempting to remove SR from disk" + ~params:[destroy_self_param] + ~errs:[Api_errors.sr_has_pbd] + ~allowed_roles:_R_POOL_OP + () -let sr_introduce = +let sr_introduce = call - ~name:"introduce" - ~in_oss_since:None - ~in_product_since:rel_rio - ~versioned_params:({param_type=String; param_name="uuid"; param_doc="The uuid assigned to the introduced SR"; param_release=rio_release; param_default=None}::(sr_create_common @ [sr_shared_param; sr_sm_config])) - ~doc:"Introduce a new Storage Repository into the managed system" - ~result:(Ref _sr, "The reference of the newly introduced Storage Repository.") - ~allowed_roles:_R_POOL_OP + ~name:"introduce" + ~in_oss_since:None + ~in_product_since:rel_rio + ~versioned_params:({param_type=String; param_name="uuid"; param_doc="The uuid assigned to the introduced SR"; param_release=rio_release; param_default=None}::(sr_create_common @ [sr_shared_param; sr_sm_config])) + ~doc:"Introduce a new Storage Repository into the managed system" + ~result:(Ref _sr, "The reference of the newly introduced Storage Repository.") + ~allowed_roles:_R_POOL_OP () let sr_probe = call - ~name:"probe" - ~in_oss_since:None - ~in_product_since:rel_miami - ~versioned_params:[sr_host_param; dev_config_param; {param_type=String; param_name="type"; param_doc="The type of the SR; used to specify the SR backend driver to use"; param_release=miami_release; param_default=None}; sr_sm_config] - ~doc:"Perform a backend-specific scan, using the given device_config. If the device_config is complete, then this will return a list of the SRs present of this type on the device, if any. If the device_config is partial, then a backend-specific scan will be performed, returning results that will guide the user in improving the device_config." - ~result:(String, "An XML fragment containing the scan results. These are specific to the scan being performed, and the backend.") - ~allowed_roles:_R_POOL_OP + ~name:"probe" + ~in_oss_since:None + ~in_product_since:rel_miami + ~versioned_params:[sr_host_param; dev_config_param; {param_type=String; param_name="type"; param_doc="The type of the SR; used to specify the SR backend driver to use"; param_release=miami_release; param_default=None}; sr_sm_config] + ~doc:"Perform a backend-specific scan, using the given device_config. If the device_config is complete, then this will return a list of the SRs present of this type on the device, if any. If the device_config is partial, then a backend-specific scan will be performed, returning results that will guide the user in improving the device_config." + ~result:(String, "An XML fragment containing the scan results. These are specific to the scan being performed, and the backend.") + ~allowed_roles:_R_POOL_OP () let sr_make = call - ~name:"make" - ~in_oss_since:None - ~in_product_since:rel_rio - ~internal_deprecated_since:rel_miami - ~lifecycle:[ - Published, rel_rio, "Create a new Storage Repository on disk"; - Deprecated, rel_miami, "Use SR.create instead" - ] - ~versioned_params:(sr_host_param::dev_config_param::sr_physical_size_param::(sr_create_common @ [sr_sm_config])) - ~doc:"Create a new Storage Repository on disk. This call is deprecated: use SR.create instead." - ~result:(String, "The uuid of the newly created Storage Repository.") - ~allowed_roles:_R_POOL_OP + ~name:"make" + ~in_oss_since:None + ~in_product_since:rel_rio + ~internal_deprecated_since:rel_miami + ~lifecycle:[ + Published, rel_rio, "Create a new Storage Repository on disk"; + Deprecated, rel_miami, "Use SR.create instead" + ] + ~versioned_params:(sr_host_param::dev_config_param::sr_physical_size_param::(sr_create_common @ [sr_sm_config])) + ~doc:"Create a new Storage Repository on disk. This call is deprecated: use SR.create instead." + ~result:(String, "The uuid of the newly created Storage Repository.") + ~allowed_roles:_R_POOL_OP () let sr_get_supported_types = call - ~name:"get_supported_types" - ~in_product_since:rel_rio - ~flags:[`Session] - ~doc:"Return a set of all the SR types supported by the system" - ~params:[] - ~result:(Set String, "the supported SR types") - ~allowed_roles:_R_READ_ONLY -() + ~name:"get_supported_types" + ~in_product_since:rel_rio + ~flags:[`Session] + ~doc:"Return a set of all the SR types supported by the system" + ~params:[] + ~result:(Set String, "the supported SR types") + ~allowed_roles:_R_READ_ONLY + () let sr_scan = call - ~name:"scan" - ~in_product_since:rel_rio - ~doc:"Refreshes the list of VDIs associated with an SR" - ~params:[Ref _sr, "sr", "The SR to scan" ] - ~allowed_roles:_R_POOL_OP - () + ~name:"scan" + ~in_product_since:rel_rio + ~doc:"Refreshes the list of VDIs associated with an SR" + ~params:[Ref _sr, "sr", "The SR to scan" ] + ~allowed_roles:_R_POOL_OP + () (* Nb, although this is a new explicit call, it's actually been in the API since rio - just autogenerated. So no setting of rel_miami. *) let sr_set_shared = call - ~name:"set_shared" - ~in_product_since:rel_rio - ~doc:"Sets the shared flag on the SR" - ~params:[Ref _sr, "sr", "The SR"; - Bool, "value", "True if the SR is shared"] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_shared" + ~in_product_since:rel_rio + ~doc:"Sets the shared flag on the SR" + ~params:[Ref _sr, "sr", "The SR"; + Bool, "value", "True if the SR is shared"] + ~allowed_roles:_R_POOL_OP + () let sr_set_name_label = call - ~name:"set_name_label" - ~in_product_since:rel_rio - ~doc:"Set the name label of the SR" - ~params:[Ref _sr, "sr", "The SR"; - String, "value", "The name label for the SR"] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_name_label" + ~in_product_since:rel_rio + ~doc:"Set the name label of the SR" + ~params:[Ref _sr, "sr", "The SR"; + String, "value", "The name label for the SR"] + ~allowed_roles:_R_POOL_OP + () let sr_set_name_description = call - ~name:"set_name_description" - ~in_product_since:rel_rio - ~doc:"Set the name description of the SR" - ~params:[Ref _sr, "sr", "The SR"; - String, "value", "The name description for the SR"] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_name_description" + ~in_product_since:rel_rio + ~doc:"Set the name description of the SR" + ~params:[Ref _sr, "sr", "The SR"; + String, "value", "The name description for the SR"] + ~allowed_roles:_R_POOL_OP + () let sr_create_new_blob = call - ~name: "create_new_blob" - ~in_product_since:rel_orlando - ~doc:"Create a placeholder for a named binary blob of data that is associated with this SR" - ~versioned_params: - [{param_type=Ref _sr; param_name="sr"; param_doc="The SR"; param_release=orlando_release; param_default=None}; - {param_type=String; param_name="name"; param_doc="The name associated with the blob"; param_release=orlando_release; param_default=None}; - {param_type=String; param_name="mime_type"; param_doc="The mime type for the data. Empty string translates to application/octet-stream"; param_release=orlando_release; param_default=None}; - {param_type=Bool; param_name="public"; param_doc="True if the blob should be publicly available"; param_release=tampa_release; param_default=Some (VBool false)} - ] - ~result:(Ref _blob, "The reference of the blob, needed for populating its data") - ~allowed_roles:_R_POOL_OP - () + ~name: "create_new_blob" + ~in_product_since:rel_orlando + ~doc:"Create a placeholder for a named binary blob of data that is associated with this SR" + ~versioned_params: + [{param_type=Ref _sr; param_name="sr"; param_doc="The SR"; param_release=orlando_release; param_default=None}; + {param_type=String; param_name="name"; param_doc="The name associated with the blob"; param_release=orlando_release; param_default=None}; + {param_type=String; param_name="mime_type"; param_doc="The mime type for the data. Empty string translates to application/octet-stream"; param_release=orlando_release; param_default=None}; + {param_type=Bool; param_name="public"; param_doc="True if the blob should be publicly available"; param_release=tampa_release; param_default=Some (VBool false)} + ] + ~result:(Ref _blob, "The reference of the blob, needed for populating its data") + ~allowed_roles:_R_POOL_OP + () let sr_get_data_sources = call - ~name:"get_data_sources" - ~in_oss_since:None - ~in_product_since:rel_dundee - ~doc:"" - ~result:(Set (Record _data_source), "A set of data sources") - ~params:[Ref _sr, "sr", "The SR to interrogate"] - ~errs:[] - ~flags:[`Session] - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_data_sources" + ~in_oss_since:None + ~in_product_since:rel_dundee + ~doc:"" + ~result:(Set (Record _data_source), "A set of data sources") + ~params:[Ref _sr, "sr", "The SR to interrogate"] + ~errs:[] + ~flags:[`Session] + ~allowed_roles:_R_READ_ONLY + () let sr_record_data_source = call - ~name:"record_data_source" - ~in_oss_since:None - ~in_product_since:rel_dundee - ~doc:"Start recording the specified data source" - ~params:[Ref _sr, "sr", "The SR"; - String, "data_source", "The data source to record"] - ~errs:[] - ~flags:[`Session] - ~allowed_roles:_R_POOL_OP - () + ~name:"record_data_source" + ~in_oss_since:None + ~in_product_since:rel_dundee + ~doc:"Start recording the specified data source" + ~params:[Ref _sr, "sr", "The SR"; + String, "data_source", "The data source to record"] + ~errs:[] + ~flags:[`Session] + ~allowed_roles:_R_POOL_OP + () let sr_query_data_source = call - ~name:"query_data_source" - ~in_oss_since:None - ~in_product_since:rel_dundee - ~doc:"Query the latest value of the specified data source" - ~params:[Ref _sr, "sr", "The SR"; - String, "data_source", "The data source to query"] - ~result:(Float,"The latest value, averaged over the last 5 seconds") - ~errs:[] - ~flags:[`Session] - ~allowed_roles:_R_READ_ONLY - () + ~name:"query_data_source" + ~in_oss_since:None + ~in_product_since:rel_dundee + ~doc:"Query the latest value of the specified data source" + ~params:[Ref _sr, "sr", "The SR"; + String, "data_source", "The data source to query"] + ~result:(Float,"The latest value, averaged over the last 5 seconds") + ~errs:[] + ~flags:[`Session] + ~allowed_roles:_R_READ_ONLY + () let sr_forget_data_source_archives = call - ~name:"forget_data_source_archives" - ~in_oss_since:None - ~in_product_since:rel_dundee - ~doc:"Forget the recorded statistics related to the specified data source" - ~params:[Ref _sr, "sr", "The SR"; - String, "data_source", "The data source whose archives are to be forgotten"] - ~flags:[`Session] - ~allowed_roles:_R_POOL_OP - () + ~name:"forget_data_source_archives" + ~in_oss_since:None + ~in_product_since:rel_dundee + ~doc:"Forget the recorded statistics related to the specified data source" + ~params:[Ref _sr, "sr", "The SR"; + String, "data_source", "The data source whose archives are to be forgotten"] + ~flags:[`Session] + ~allowed_roles:_R_POOL_OP + () let pbd_plug = call - ~name:"plug" - ~in_oss_since:None - ~in_product_since:rel_rio - ~doc:"Activate the specified PBD, causing the referenced SR to be attached and scanned" - ~params:[Ref _pbd, "self", "The PBD to activate"] - ~errs:[Api_errors.sr_unknown_driver] - ~allowed_roles:_R_POOL_OP - () + ~name:"plug" + ~in_oss_since:None + ~in_product_since:rel_rio + ~doc:"Activate the specified PBD, causing the referenced SR to be attached and scanned" + ~params:[Ref _pbd, "self", "The PBD to activate"] + ~errs:[Api_errors.sr_unknown_driver] + ~allowed_roles:_R_POOL_OP + () let pbd_unplug = call - ~name:"unplug" - ~in_oss_since:None - ~in_product_since:rel_rio - ~doc:"Deactivate the specified PBD, causing the referenced SR to be detached and nolonger scanned" - ~params:[Ref _pbd, "self", "The PBD to deactivate"] - ~allowed_roles:_R_POOL_OP - () + ~name:"unplug" + ~in_oss_since:None + ~in_product_since:rel_rio + ~doc:"Deactivate the specified PBD, causing the referenced SR to be detached and nolonger scanned" + ~params:[Ref _pbd, "self", "The PBD to deactivate"] + ~allowed_roles:_R_POOL_OP + () (** Sessions *) -let session = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_session ~descr:"A session" ~gen_events:false +let session = + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_session ~descr:"A session" ~gen_events:false ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_ADMIN ~messages:[session_login; session_logout; session_chpass; - slave_login; - slave_local_login; slave_local_login_with_password; session_create_from_db_file; local_logout; - session_get_all_subject_identifiers; session_logout_subject_identifier; - ] ~contents:[ - uid _session; - field ~qualifier:DynamicRO ~ty:(Ref _host) - "this_host" "Currently connected host"; - field ~qualifier:DynamicRO ~ty:(Ref _user) - "this_user" "Currently connected user"; - field ~qualifier:DynamicRO ~ty:DateTime - "last_active" "Timestamp for last time session was active"; - field ~qualifier:DynamicRO ~ty:Bool ~in_oss_since:None - "pool" "True if this session relates to a intra-pool login, false otherwise"; - field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; - field ~in_product_since:rel_george ~qualifier:DynamicRO ~default_value:(Some (VBool false)) ~ty:Bool "is_local_superuser" "true iff this session was created using local superuser credentials"; - field ~in_product_since:rel_george ~qualifier:DynamicRO ~default_value:(Some (VRef (Ref.string_of Ref.null))) ~ty:(Ref _subject) "subject" "references the subject instance that created the session. If a session instance has is_local_superuser set, then the value of this field is undefined."; - field ~in_product_since:rel_george ~qualifier:DynamicRO ~default_value:(Some(VDateTime(Date.of_float 0.))) ~ty:DateTime "validation_time" "time when session was last validated"; - field ~in_product_since:rel_george ~qualifier:DynamicRO ~default_value:(Some(VString(""))) ~ty:String "auth_user_sid" "the subject identifier of the user that was externally authenticated. If a session instance has is_local_superuser set, then the value of this field is undefined."; - field ~in_product_since:rel_midnight_ride ~qualifier:DynamicRO ~default_value:(Some(VString(""))) ~ty:String "auth_user_name" "the subject name of the user that was externally authenticated. If a session instance has is_local_superuser set, then the value of this field is undefined."; - field ~in_product_since:rel_midnight_ride ~qualifier:StaticRO ~default_value:(Some(VSet [])) ~ty:(Set(String)) "rbac_permissions" "list with all RBAC permissions for this session"; - field ~in_product_since:rel_midnight_ride ~qualifier:DynamicRO ~ty:(Set(Ref _task)) "tasks" "list of tasks created using the current session"; - field ~in_product_since:rel_midnight_ride ~qualifier:StaticRO ~default_value:(Some (VRef (Ref.string_of Ref.null))) ~ty:(Ref _session) "parent" "references the parent session that created this session"; - field ~in_product_since:rel_clearwater ~qualifier:DynamicRO ~default_value:(Some(VString(""))) ~ty:String "originator" "a key string provided by a API user to distinguish itself from other users sharing the same login name"; - ] - () + slave_login; + slave_local_login; slave_local_login_with_password; session_create_from_db_file; local_logout; + session_get_all_subject_identifiers; session_logout_subject_identifier; + ] ~contents:[ + uid _session; + field ~qualifier:DynamicRO ~ty:(Ref _host) + "this_host" "Currently connected host"; + field ~qualifier:DynamicRO ~ty:(Ref _user) + "this_user" "Currently connected user"; + field ~qualifier:DynamicRO ~ty:DateTime + "last_active" "Timestamp for last time session was active"; + field ~qualifier:DynamicRO ~ty:Bool ~in_oss_since:None + "pool" "True if this session relates to a intra-pool login, false otherwise"; + field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; + field ~in_product_since:rel_george ~qualifier:DynamicRO ~default_value:(Some (VBool false)) ~ty:Bool "is_local_superuser" "true iff this session was created using local superuser credentials"; + field ~in_product_since:rel_george ~qualifier:DynamicRO ~default_value:(Some (VRef (Ref.string_of Ref.null))) ~ty:(Ref _subject) "subject" "references the subject instance that created the session. If a session instance has is_local_superuser set, then the value of this field is undefined."; + field ~in_product_since:rel_george ~qualifier:DynamicRO ~default_value:(Some(VDateTime(Date.of_float 0.))) ~ty:DateTime "validation_time" "time when session was last validated"; + field ~in_product_since:rel_george ~qualifier:DynamicRO ~default_value:(Some(VString(""))) ~ty:String "auth_user_sid" "the subject identifier of the user that was externally authenticated. If a session instance has is_local_superuser set, then the value of this field is undefined."; + field ~in_product_since:rel_midnight_ride ~qualifier:DynamicRO ~default_value:(Some(VString(""))) ~ty:String "auth_user_name" "the subject name of the user that was externally authenticated. If a session instance has is_local_superuser set, then the value of this field is undefined."; + field ~in_product_since:rel_midnight_ride ~qualifier:StaticRO ~default_value:(Some(VSet [])) ~ty:(Set(String)) "rbac_permissions" "list with all RBAC permissions for this session"; + field ~in_product_since:rel_midnight_ride ~qualifier:DynamicRO ~ty:(Set(Ref _task)) "tasks" "list of tasks created using the current session"; + field ~in_product_since:rel_midnight_ride ~qualifier:StaticRO ~default_value:(Some (VRef (Ref.string_of Ref.null))) ~ty:(Ref _session) "parent" "references the parent session that created this session"; + field ~in_product_since:rel_clearwater ~qualifier:DynamicRO ~default_value:(Some(VString(""))) ~ty:String "originator" "a key string provided by a API user to distinguish itself from other users sharing the same login name"; + ] + () (** Tasks *) @@ -3733,77 +3733,77 @@ let session = (* NB: the status 'cancelling' is not being used, nor should it ever be used. It should be purged from here! *) let status_type = Enum("task_status_type", [ "pending", "task is in progress"; - "success", "task was completed successfully"; - "failure", "task has failed"; - "cancelling", "task is being cancelled"; - "cancelled", "task has been cancelled"; - ]) + "success", "task was completed successfully"; + "failure", "task has failed"; + "cancelling", "task is being cancelled"; + "cancelled", "task has been cancelled"; + ]) let task_cancel = call - - ~name:"cancel" - ~in_product_since:rel_rio - ~doc:"Request that a task be cancelled. Note that a task may fail to be cancelled and may complete or fail normally and note that, even when a task does cancel, it might take an arbitrary amount of time." - ~params:[Ref _task, "task", "The task"] - ~errs:[Api_errors.operation_not_allowed] - ~allowed_roles:_R_READ_ONLY (* POOL_OP can cancel any tasks, others can cancel only owned tasks *) - () + + ~name:"cancel" + ~in_product_since:rel_rio + ~doc:"Request that a task be cancelled. Note that a task may fail to be cancelled and may complete or fail normally and note that, even when a task does cancel, it might take an arbitrary amount of time." + ~params:[Ref _task, "task", "The task"] + ~errs:[Api_errors.operation_not_allowed] + ~allowed_roles:_R_READ_ONLY (* POOL_OP can cancel any tasks, others can cancel only owned tasks *) + () let task_create = call ~flags:[`Session] - ~in_oss_since:None - ~in_product_since:rel_rio - ~name:"create" - ~doc:"Create a new task object which must be manually destroyed." - ~params:[String, "label", "short label for the new task"; - String, "description", "longer description for the new task"] - ~result:(Ref _task, "The reference of the created task object") - ~allowed_roles:_R_READ_ONLY (* any subject can create tasks *) - () + ~in_oss_since:None + ~in_product_since:rel_rio + ~name:"create" + ~doc:"Create a new task object which must be manually destroyed." + ~params:[String, "label", "short label for the new task"; + String, "description", "longer description for the new task"] + ~result:(Ref _task, "The reference of the created task object") + ~allowed_roles:_R_READ_ONLY (* any subject can create tasks *) + () let task_destroy = call ~flags:[`Session] - ~in_oss_since:None - ~in_product_since:rel_rio - ~name:"destroy" - ~doc:"Destroy the task object" - ~params:[Ref _task, "self", "Reference to the task object"] - ~allowed_roles:_R_READ_ONLY (* POOL_OP can destroy any tasks, others can destroy only owned tasks *) - () + ~in_oss_since:None + ~in_product_since:rel_rio + ~name:"destroy" + ~doc:"Destroy the task object" + ~params:[Ref _task, "self", "Reference to the task object"] + ~allowed_roles:_R_READ_ONLY (* POOL_OP can destroy any tasks, others can destroy only owned tasks *) + () (* this permission allows to destroy any task, instead of only the owned ones *) let extra_permission_task_destroy_any = "task.destroy/any" let task_allowed_operations = Enum ("task_allowed_operations", List.map operation_enum [ task_cancel; task_destroy ]) -let task = +let task = create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_task ~descr:"A long-running asynchronous task" ~gen_events:true - ~doccomments:[] + ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages: [ task_create; task_destroy; task_cancel ] ~contents: ([ - uid _task; - namespace ~name:"name" ~contents:(names oss_since_303 DynamicRO) (); - ] @ (allowed_and_current_operations task_allowed_operations) @ [ - field ~qualifier:DynamicRO ~ty:DateTime "created" "Time task was created"; - field ~qualifier:DynamicRO ~ty:DateTime "finished" "Time task finished (i.e. succeeded or failed). If task-status is pending, then the value of this field has no meaning"; - field ~qualifier:DynamicRO ~ty:status_type "status" "current status of the task"; - field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:(Ref _session) "session" "the session that created the task"; - field ~qualifier:DynamicRO ~ty:(Ref _host) "resident_on" "the host on which the task is running"; - field ~qualifier:DynamicRO ~ty:Float "progress" "This field contains the estimated fraction of the task which is complete. This field should not be used to determine whether the task is complete - for this the status field of the task should be used."; - field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:Int "externalpid" "If the task has spawned a program, the field record the PID of the process that the task is waiting on. (-1 if no waiting completion of an external program )"; - field ~in_oss_since:None ~internal_deprecated_since:rel_boston ~internal_only:true ~qualifier:DynamicRO ~ty:Int "stunnelpid" "If the task has been forwarded, this field records the pid of the stunnel process spawned to manage the forwarding connection"; - field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:Bool "forwarded" "True if this task has been forwarded to a slave"; - field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:(Ref _host) "forwarded_to" "The host to which the task has been forwarded"; - field ~qualifier:DynamicRO ~ty:String "type" "if the task has completed successfully, this field contains the type of the encoded result (i.e. name of the class whose reference is in the result field). Undefined otherwise."; - field ~qualifier:DynamicRO ~ty:String "result" "if the task has completed successfully, this field contains the result value (either Void or an object reference). Undefined otherwise."; - field ~qualifier:DynamicRO ~ty:(Set String) "error_info" "if the task has failed, this field contains the set of associated error strings. Undefined otherwise."; - field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration" ~map_keys_roles:[("applies_to",(_R_VM_OP));("XenCenterUUID",(_R_VM_OP));("XenCenterMeddlingActionTitle",(_R_VM_OP))]; - (* field ~ty:(Set(Ref _alert)) ~in_product_since:rel_miami ~qualifier:DynamicRO "alerts" "all alerts related to this task"; *) - field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~default_value:(Some (VRef "")) ~ty:(Ref _task) "subtask_of" "Ref pointing to the task this is a substask of."; - field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Set (Ref _task)) "subtasks" "List pointing to all the substasks."; - field ~qualifier:DynamicRO ~in_product_since:rel_dundee ~ty:String ~default_value:(Some (VString (Sexplib.Sexp.to_string (Backtrace.(sexp_of_t empty))))) "backtrace" "Function call trace for debugging."; - ]) + uid _task; + namespace ~name:"name" ~contents:(names oss_since_303 DynamicRO) (); + ] @ (allowed_and_current_operations task_allowed_operations) @ [ + field ~qualifier:DynamicRO ~ty:DateTime "created" "Time task was created"; + field ~qualifier:DynamicRO ~ty:DateTime "finished" "Time task finished (i.e. succeeded or failed). If task-status is pending, then the value of this field has no meaning"; + field ~qualifier:DynamicRO ~ty:status_type "status" "current status of the task"; + field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:(Ref _session) "session" "the session that created the task"; + field ~qualifier:DynamicRO ~ty:(Ref _host) "resident_on" "the host on which the task is running"; + field ~qualifier:DynamicRO ~ty:Float "progress" "This field contains the estimated fraction of the task which is complete. This field should not be used to determine whether the task is complete - for this the status field of the task should be used."; + field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:Int "externalpid" "If the task has spawned a program, the field record the PID of the process that the task is waiting on. (-1 if no waiting completion of an external program )"; + field ~in_oss_since:None ~internal_deprecated_since:rel_boston ~internal_only:true ~qualifier:DynamicRO ~ty:Int "stunnelpid" "If the task has been forwarded, this field records the pid of the stunnel process spawned to manage the forwarding connection"; + field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:Bool "forwarded" "True if this task has been forwarded to a slave"; + field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:(Ref _host) "forwarded_to" "The host to which the task has been forwarded"; + field ~qualifier:DynamicRO ~ty:String "type" "if the task has completed successfully, this field contains the type of the encoded result (i.e. name of the class whose reference is in the result field). Undefined otherwise."; + field ~qualifier:DynamicRO ~ty:String "result" "if the task has completed successfully, this field contains the result value (either Void or an object reference). Undefined otherwise."; + field ~qualifier:DynamicRO ~ty:(Set String) "error_info" "if the task has failed, this field contains the set of associated error strings. Undefined otherwise."; + field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration" ~map_keys_roles:[("applies_to",(_R_VM_OP));("XenCenterUUID",(_R_VM_OP));("XenCenterMeddlingActionTitle",(_R_VM_OP))]; + (* field ~ty:(Set(Ref _alert)) ~in_product_since:rel_miami ~qualifier:DynamicRO "alerts" "all alerts related to this task"; *) + field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~default_value:(Some (VRef "")) ~ty:(Ref _task) "subtask_of" "Ref pointing to the task this is a substask of."; + field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Set (Ref _task)) "subtasks" "List pointing to all the substasks."; + field ~qualifier:DynamicRO ~in_product_since:rel_dundee ~ty:String ~default_value:(Some (VString (Sexplib.Sexp.to_string (Backtrace.(sexp_of_t empty))))) "backtrace" "Function call trace for debugging."; + ]) () (** Many of the objects need to record IO bandwidth *) @@ -3818,44 +3818,44 @@ let user = (* DEPRECATED in favor of subject *) Published, rel_rio, "A user of the system"; Deprecated, rel_george, "Deprecated in favor of subject"; ] - ~doccomments:[] + ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_ADMIN ~messages:[] ~contents: - [ uid _user; - field ~qualifier:StaticRO "short_name" "short name (e.g. userid)"; - field "fullname" "full name"; - field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; - ] + [ uid _user; + field ~qualifier:StaticRO "short_name" "short name (e.g. userid)"; + field "fullname" "full name"; + field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; + ] () (** Guest Memory *) let guest_memory = - let field = field ~ty:Int in - [ - field "overhead" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO "Virtualization memory overhead (bytes)." ~default_value:(Some (VInt 0L)) ~doc_tags:[Memory]; - field "target" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Dynamically-set memory target (bytes). The value of this field indicates the current target for memory available to this VM." ~default_value:(Some (VInt 0L)) ~internal_deprecated_since:rel_midnight_ride ~doc_tags:[Memory]; - field "static_max" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Statically-set (i.e. absolute) maximum (bytes). The value of this field at VM start time acts as a hard limit of the amount of memory a guest can use. New values only take effect on reboot." ~doc_tags:[Memory]; - field "dynamic_max" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Dynamic maximum (bytes)" ~doc_tags:[Memory]; - field "dynamic_min" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Dynamic minimum (bytes)" ~doc_tags:[Memory]; - field "static_min" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Statically-set (i.e. absolute) mininum (bytes). The value of this field indicates the least amount of memory this VM can boot with without crashing." ~doc_tags:[Memory]; - ] + let field = field ~ty:Int in + [ + field "overhead" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO "Virtualization memory overhead (bytes)." ~default_value:(Some (VInt 0L)) ~doc_tags:[Memory]; + field "target" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Dynamically-set memory target (bytes). The value of this field indicates the current target for memory available to this VM." ~default_value:(Some (VInt 0L)) ~internal_deprecated_since:rel_midnight_ride ~doc_tags:[Memory]; + field "static_max" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Statically-set (i.e. absolute) maximum (bytes). The value of this field at VM start time acts as a hard limit of the amount of memory a guest can use. New values only take effect on reboot." ~doc_tags:[Memory]; + field "dynamic_max" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Dynamic maximum (bytes)" ~doc_tags:[Memory]; + field "dynamic_min" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Dynamic minimum (bytes)" ~doc_tags:[Memory]; + field "static_min" ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO "Statically-set (i.e. absolute) mininum (bytes). The value of this field indicates the least amount of memory this VM can boot with without crashing." ~doc_tags:[Memory]; + ] (** Host Memory *) let host_memory = - let field = field ~ty:Int in - [ - field ~qualifier:DynamicRO "overhead" "Virtualization memory overhead (bytes)." ~default_value:(Some (VInt 0L)) ~doc_tags:[Memory]; - ] + let field = field ~ty:Int in + [ + field ~qualifier:DynamicRO "overhead" "Virtualization memory overhead (bytes)." ~default_value:(Some (VInt 0L)) ~doc_tags:[Memory]; + ] (** Host Metrics Memory *) -let host_metrics_memory = - let field = field ~ty:Int in - [ - field ~qualifier:DynamicRO "total" "Total host memory (bytes)" ~doc_tags:[Memory]; - field ~qualifier:DynamicRO ~internal_deprecated_since:rel_midnight_ride "free" "Free host memory (bytes)" ~doc_tags:[Memory]; - ] - -let api_version = +let host_metrics_memory = + let field = field ~ty:Int in + [ + field ~qualifier:DynamicRO "total" "Total host memory (bytes)" ~doc_tags:[Memory]; + field ~qualifier:DynamicRO ~internal_deprecated_since:rel_midnight_ride "free" "Free host memory (bytes)" ~doc_tags:[Memory]; + ] + +let api_version = let field' = field ~qualifier:DynamicRO in [ field' ~ty:Int "major" "major version number"; @@ -3864,30 +3864,30 @@ let api_version = field' ~ty:(Map(String,String)) "vendor_implementation" "details of vendor implementation"; ] -(* Management of host crash dumps. Note that this would be neater if crashes were stored in +(* Management of host crash dumps. Note that this would be neater if crashes were stored in VDIs like VM crashes, however the nature of a host crash dump is that the dom0 has crashed - and has no access to any fancy storage drivers or tools. Plus a host is not guaranteed to + and has no access to any fancy storage drivers or tools. Plus a host is not guaranteed to have any SRs at all. *) let host_crashdump_destroy = call - ~name:"destroy" - ~doc:"Destroy specified host crash dump, removing it from the disk." - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[ Ref _host_crashdump, "self", "The host crashdump to destroy" ] - ~allowed_roles:_R_POOL_OP - () + ~name:"destroy" + ~doc:"Destroy specified host crash dump, removing it from the disk." + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[ Ref _host_crashdump, "self", "The host crashdump to destroy" ] + ~allowed_roles:_R_POOL_OP + () let host_crashdump_upload = call - ~name:"upload" - ~doc:"Upload the specified host crash dump to a specified URL" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[ Ref _host_crashdump, "self", "The host crashdump to upload"; - String, "url", "The URL to upload to"; - Map(String, String), "options", "Extra configuration operations" ] - ~allowed_roles:_R_POOL_OP - () + ~name:"upload" + ~doc:"Upload the specified host crash dump to a specified URL" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[ Ref _host_crashdump, "self", "The host crashdump to upload"; + String, "url", "The URL to upload to"; + Map(String, String), "options", "Extra configuration operations" ] + ~allowed_roles:_R_POOL_OP + () let host_crashdump = create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_host_crashdump ~gen_events:true @@ -3896,222 +3896,222 @@ let host_crashdump = ~messages_default_allowed_roles:_R_POOL_OP ~messages: [host_crashdump_destroy; host_crashdump_upload] ~contents: - [ uid ~in_oss_since:None _host_crashdump; - field ~in_oss_since:None ~qualifier:StaticRO ~ty:(Ref _host) "host" "Host the crashdump relates to"; - field ~in_oss_since:None ~qualifier:DynamicRO ~ty:DateTime "timestamp" "Time the crash happened"; - field ~in_oss_since:None ~qualifier:DynamicRO ~ty:Int "size" "Size of the crashdump"; - field ~qualifier:StaticRO ~ty:String ~in_oss_since:None ~internal_only:true "filename" "filename of crash dir"; - field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; - ] + [ uid ~in_oss_since:None _host_crashdump; + field ~in_oss_since:None ~qualifier:StaticRO ~ty:(Ref _host) "host" "Host the crashdump relates to"; + field ~in_oss_since:None ~qualifier:DynamicRO ~ty:DateTime "timestamp" "Time the crash happened"; + field ~in_oss_since:None ~qualifier:DynamicRO ~ty:Int "size" "Size of the crashdump"; + field ~qualifier:StaticRO ~ty:String ~in_oss_since:None ~internal_only:true "filename" "filename of crash dir"; + field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; + ] () (* New Miami pool patching mechanism *) let pool_patch_after_apply_guidance = - Enum ("after_apply_guidance", + Enum ("after_apply_guidance", [ "restartHVM", "This patch requires HVM guests to be restarted once applied."; "restartPV", "This patch requires PV guests to be restarted once applied."; - "restartHost", "This patch requires the host to be restarted once applied."; + "restartHost", "This patch requires the host to be restarted once applied."; "restartXAPI", "This patch requires XAPI to be restarted once applied."; ]) let pool_patch_apply = call - ~name:"apply" - ~doc:"Apply the selected patch to a host and return its output" - ~in_oss_since:None - ~in_product_since:rel_miami - ~params:[ Ref _pool_patch, "self", "The patch to apply"; Ref _host, "host", "The host to apply the patch too" ] - ~result:(String, "the output of the patch application process") - ~allowed_roles:_R_POOL_OP - () + ~name:"apply" + ~doc:"Apply the selected patch to a host and return its output" + ~in_oss_since:None + ~in_product_since:rel_miami + ~params:[ Ref _pool_patch, "self", "The patch to apply"; Ref _host, "host", "The host to apply the patch too" ] + ~result:(String, "the output of the patch application process") + ~allowed_roles:_R_POOL_OP + () let pool_patch_precheck = call - ~name:"precheck" - ~doc:"Execute the precheck stage of the selected patch on a host and return its output" - ~in_oss_since:None - ~in_product_since:rel_miami - ~params:[ Ref _pool_patch, "self", "The patch whose prechecks will be run"; Ref _host, "host", "The host to run the prechecks on" ] - ~result:(String, "the output of the patch prechecks") - ~allowed_roles:_R_POOL_OP - () + ~name:"precheck" + ~doc:"Execute the precheck stage of the selected patch on a host and return its output" + ~in_oss_since:None + ~in_product_since:rel_miami + ~params:[ Ref _pool_patch, "self", "The patch whose prechecks will be run"; Ref _host, "host", "The host to run the prechecks on" ] + ~result:(String, "the output of the patch prechecks") + ~allowed_roles:_R_POOL_OP + () let pool_patch_clean = call - ~name:"clean" - ~doc:"Removes the patch's files from the server" - ~in_oss_since:None - ~in_product_since:rel_miami - ~params:[ Ref _pool_patch, "self", "The patch to clean up" ] - ~allowed_roles:_R_POOL_OP - () + ~name:"clean" + ~doc:"Removes the patch's files from the server" + ~in_oss_since:None + ~in_product_since:rel_miami + ~params:[ Ref _pool_patch, "self", "The patch to clean up" ] + ~allowed_roles:_R_POOL_OP + () let pool_patch_clean_on_host = call - ~name:"clean_on_host" - ~doc:"Removes the patch's files from the specified host" - ~in_oss_since:None - ~in_product_since:rel_tampa - ~params:[ Ref _pool_patch, "self", "The patch to clean up"; Ref _host, "host", "The host on which to clean the patch" ] - ~allowed_roles:_R_POOL_OP - () + ~name:"clean_on_host" + ~doc:"Removes the patch's files from the specified host" + ~in_oss_since:None + ~in_product_since:rel_tampa + ~params:[ Ref _pool_patch, "self", "The patch to clean up"; Ref _host, "host", "The host on which to clean the patch" ] + ~allowed_roles:_R_POOL_OP + () let pool_patch_pool_clean = call - ~name:"pool_clean" - ~doc:"Removes the patch's files from all hosts in the pool, but does not remove the database entries" - ~in_oss_since:None - ~in_product_since:rel_tampa - ~params:[ Ref _pool_patch, "self", "The patch to clean up" ] - ~allowed_roles:_R_POOL_OP - () + ~name:"pool_clean" + ~doc:"Removes the patch's files from all hosts in the pool, but does not remove the database entries" + ~in_oss_since:None + ~in_product_since:rel_tampa + ~params:[ Ref _pool_patch, "self", "The patch to clean up" ] + ~allowed_roles:_R_POOL_OP + () let pool_patch_destroy = call - ~name:"destroy" - ~doc:"Removes the patch's files from all hosts in the pool, and removes the database entries. Only works on unapplied patches." - ~in_oss_since:None - ~in_product_since:rel_miami - ~params:[ Ref _pool_patch, "self", "The patch to destroy" ] - ~allowed_roles:_R_POOL_OP - () + ~name:"destroy" + ~doc:"Removes the patch's files from all hosts in the pool, and removes the database entries. Only works on unapplied patches." + ~in_oss_since:None + ~in_product_since:rel_miami + ~params:[ Ref _pool_patch, "self", "The patch to destroy" ] + ~allowed_roles:_R_POOL_OP + () let pool_patch_pool_apply = call - ~name:"pool_apply" - ~doc:"Apply the selected patch to all hosts in the pool and return a map of host_ref -> patch output" - ~in_oss_since:None - ~in_product_since:rel_miami - ~params:[ Ref _pool_patch, "self", "The patch to apply"] - ~allowed_roles:_R_POOL_OP - () + ~name:"pool_apply" + ~doc:"Apply the selected patch to all hosts in the pool and return a map of host_ref -> patch output" + ~in_oss_since:None + ~in_product_since:rel_miami + ~params:[ Ref _pool_patch, "self", "The patch to apply"] + ~allowed_roles:_R_POOL_OP + () let pool_patch = - create_obj ~in_db:true - ~in_product_since:rel_miami - ~in_oss_since:None + create_obj ~in_db:true + ~in_product_since:rel_miami + ~in_oss_since:None ~internal_deprecated_since:None - ~persist:PersistEverything - ~gen_constructor_destructor:false + ~persist:PersistEverything + ~gen_constructor_destructor:false ~gen_events:true - ~name:_pool_patch + ~name:_pool_patch ~descr:"Pool-wide patches" ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages:[pool_patch_apply; pool_patch_pool_apply; pool_patch_precheck; pool_patch_clean; pool_patch_pool_clean; pool_patch_destroy; pool_patch_clean_on_host] ~contents: - [ uid ~in_oss_since:None _pool_patch; - namespace ~name:"name" ~contents:(names None StaticRO) (); - field ~in_product_since:rel_miami ~default_value:(Some (VString "")) ~in_oss_since:None ~qualifier:StaticRO ~ty:String "version" "Patch version number"; - field ~in_product_since:rel_miami ~default_value:(Some (VString "")) ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:String "filename" "Filename of the patch"; - field ~in_product_since:rel_miami ~default_value:(Some (VInt Int64.zero)) ~in_oss_since:None ~qualifier:DynamicRO ~ty:Int "size" "Size of the patch"; - field ~in_product_since:rel_miami ~default_value:(Some (VBool false)) ~in_oss_since:None ~qualifier:DynamicRO ~ty:Bool "pool_applied" "This patch should be applied across the entire pool"; - field ~in_product_since:rel_miami ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Set (Ref _host_patch)) "host_patches" "This hosts this patch is applied to."; - field ~in_product_since:rel_miami ~default_value:(Some (VSet [])) ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Set pool_patch_after_apply_guidance) "after_apply_guidance" "What the client should do after this patch has been applied."; - field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~in_oss_since:None ~ty:(Map(String, String)) "other_config" "additional configuration"; - ] - () + [ uid ~in_oss_since:None _pool_patch; + namespace ~name:"name" ~contents:(names None StaticRO) (); + field ~in_product_since:rel_miami ~default_value:(Some (VString "")) ~in_oss_since:None ~qualifier:StaticRO ~ty:String "version" "Patch version number"; + field ~in_product_since:rel_miami ~default_value:(Some (VString "")) ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:String "filename" "Filename of the patch"; + field ~in_product_since:rel_miami ~default_value:(Some (VInt Int64.zero)) ~in_oss_since:None ~qualifier:DynamicRO ~ty:Int "size" "Size of the patch"; + field ~in_product_since:rel_miami ~default_value:(Some (VBool false)) ~in_oss_since:None ~qualifier:DynamicRO ~ty:Bool "pool_applied" "This patch should be applied across the entire pool"; + field ~in_product_since:rel_miami ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Set (Ref _host_patch)) "host_patches" "This hosts this patch is applied to."; + field ~in_product_since:rel_miami ~default_value:(Some (VSet [])) ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Set pool_patch_after_apply_guidance) "after_apply_guidance" "What the client should do after this patch has been applied."; + field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~in_oss_since:None ~ty:(Map(String, String)) "other_config" "additional configuration"; + ] + () (* Management of host patches. Just like the crash dumps it would be marginally neater if the patches were stored as VDIs. *) let host_patch_destroy = call - ~name:"destroy" - ~doc:"Destroy the specified host patch, removing it from the disk. This does NOT reverse the patch" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[ Ref _host_patch, "self", "The patch to destroy" ] - ~internal_deprecated_since: rel_miami - ~allowed_roles:_R_POOL_OP - () + ~name:"destroy" + ~doc:"Destroy the specified host patch, removing it from the disk. This does NOT reverse the patch" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[ Ref _host_patch, "self", "The patch to destroy" ] + ~internal_deprecated_since: rel_miami + ~allowed_roles:_R_POOL_OP + () let host_patch_apply = call - ~name:"apply" - ~doc:"Apply the selected patch and return its output" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[ Ref _host_patch, "self", "The patch to apply" ] - ~result:(String, "the output of the patch application process") - ~internal_deprecated_since: rel_miami - ~allowed_roles:_R_POOL_OP - () + ~name:"apply" + ~doc:"Apply the selected patch and return its output" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[ Ref _host_patch, "self", "The patch to apply" ] + ~result:(String, "the output of the patch application process") + ~internal_deprecated_since: rel_miami + ~allowed_roles:_R_POOL_OP + () -let host_patch = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_host_patch ~gen_events:true +let host_patch = + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_host_patch ~gen_events:true ~descr:"Represents a patch stored on a server" ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages: [host_patch_destroy; host_patch_apply] ~contents: - [ uid ~in_oss_since:None _host_patch; - namespace ~name:"name" ~contents:(names None StaticRO) (); - field ~in_oss_since:None ~qualifier:StaticRO ~ty:String "version" "Patch version number"; - field ~in_oss_since:None ~qualifier:StaticRO ~ty:(Ref _host) "host" "Host the patch relates to"; - field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:String "filename" "Filename of the patch"; - field ~in_oss_since:None ~qualifier:DynamicRO ~ty:Bool "applied" "True if the patch has been applied"; - field ~in_oss_since:None ~qualifier:DynamicRO ~ty:DateTime "timestamp_applied" "Time the patch was applied"; - field ~in_oss_since:None ~qualifier:DynamicRO ~ty:Int "size" "Size of the patch"; - field ~in_product_since:rel_miami ~in_oss_since:None ~qualifier:StaticRO ~ty:(Ref _pool_patch) ~default_value:(Some (VRef "")) "pool_patch" "The patch applied"; - field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~in_oss_since:None ~ty:(Map(String, String)) "other_config" "additional configuration"; - ] + [ uid ~in_oss_since:None _host_patch; + namespace ~name:"name" ~contents:(names None StaticRO) (); + field ~in_oss_since:None ~qualifier:StaticRO ~ty:String "version" "Patch version number"; + field ~in_oss_since:None ~qualifier:StaticRO ~ty:(Ref _host) "host" "Host the patch relates to"; + field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:String "filename" "Filename of the patch"; + field ~in_oss_since:None ~qualifier:DynamicRO ~ty:Bool "applied" "True if the patch has been applied"; + field ~in_oss_since:None ~qualifier:DynamicRO ~ty:DateTime "timestamp_applied" "Time the patch was applied"; + field ~in_oss_since:None ~qualifier:DynamicRO ~ty:Int "size" "Size of the patch"; + field ~in_product_since:rel_miami ~in_oss_since:None ~qualifier:StaticRO ~ty:(Ref _pool_patch) ~default_value:(Some (VRef "")) "pool_patch" "The patch applied"; + field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~in_oss_since:None ~ty:(Map(String, String)) "other_config" "additional configuration"; + ] () let host_bugreport_upload = call - ~name:"bugreport_upload" - ~doc:"Run xen-bugtool --yestoall and upload the output to support" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[ Ref _host, "host", "The host on which to run xen-bugtool"; - String, "url", "The URL to upload to"; - Map(String, String), "options", "Extra configuration operations" ] - ~allowed_roles:_R_POOL_OP - () + ~name:"bugreport_upload" + ~doc:"Run xen-bugtool --yestoall and upload the output to support" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[ Ref _host, "host", "The host on which to run xen-bugtool"; + String, "url", "The URL to upload to"; + Map(String, String), "options", "Extra configuration operations" ] + ~allowed_roles:_R_POOL_OP + () let host_list_methods = call - ~name:"list_methods" - ~in_product_since:rel_rio - ~flags: [`Session] - ~doc:"List all supported methods" - ~params:[] - ~result:(Set(String), "The name of every supported method.") - ~allowed_roles:_R_READ_ONLY - () + ~name:"list_methods" + ~in_product_since:rel_rio + ~flags: [`Session] + ~doc:"List all supported methods" + ~params:[] + ~result:(Set(String), "The name of every supported method.") + ~allowed_roles:_R_READ_ONLY + () let host_license_apply = call - ~name:"license_apply" - ~in_oss_since:None - ~lifecycle:[ - Published, rel_rio, "Apply a new license to a host"; - Removed, rel_clearwater, "Free licenses no longer handled by xapi"; - ] - ~params:[Ref _host, "host", "The host to upload the license to"; - String, "contents", "The contents of the license file, base64 encoded"] - ~doc:"Apply a new license to a host" - ~errs: [Api_errors.license_processing_error] - ~allowed_roles:_R_POOL_OP - () + ~name:"license_apply" + ~in_oss_since:None + ~lifecycle:[ + Published, rel_rio, "Apply a new license to a host"; + Removed, rel_clearwater, "Free licenses no longer handled by xapi"; + ] + ~params:[Ref _host, "host", "The host to upload the license to"; + String, "contents", "The contents of the license file, base64 encoded"] + ~doc:"Apply a new license to a host" + ~errs: [Api_errors.license_processing_error] + ~allowed_roles:_R_POOL_OP + () let host_license_add = call - ~name:"license_add" - ~in_oss_since:None - ~lifecycle:[ - Published, rel_indigo, "Functionality for parsing license files re-added"; - ] - ~params:[Ref _host, "host", "The host to upload the license to"; - String, "contents", "The contents of the license file, base64 encoded"] - ~doc:"Apply a new license to a host" - ~errs: [Api_errors.license_processing_error] - ~allowed_roles:_R_POOL_OP - () + ~name:"license_add" + ~in_oss_since:None + ~lifecycle:[ + Published, rel_indigo, "Functionality for parsing license files re-added"; + ] + ~params:[Ref _host, "host", "The host to upload the license to"; + String, "contents", "The contents of the license file, base64 encoded"] + ~doc:"Apply a new license to a host" + ~errs: [Api_errors.license_processing_error] + ~allowed_roles:_R_POOL_OP + () let host_license_remove = call - ~name:"license_remove" - ~in_oss_since:None - ~lifecycle:[ - Published, rel_indigo, ""; - ] - ~params:[ - Ref _host, "host", "The host from which any license will be removed" - ] - ~doc:"Remove any license file from the specified host, and switch that host to the unlicensed edition" - ~allowed_roles:_R_POOL_OP - () + ~name:"license_remove" + ~in_oss_since:None + ~lifecycle:[ + Published, rel_indigo, ""; + ] + ~params:[ + Ref _host, "host", "The host from which any license will be removed" + ] + ~doc:"Remove any license file from the specified host, and switch that host to the unlicensed edition" + ~allowed_roles:_R_POOL_OP + () let host_create_params = [ @@ -4132,671 +4132,671 @@ let host_create_params = ] let host_create = call - ~name:"create" - ~in_oss_since:None - ~in_product_since:rel_rio - ~versioned_params:host_create_params - ~doc:"Create a new host record" - ~result:(Ref _host, "Reference to the newly created host object.") - ~hide_from_docs:true - ~allowed_roles:_R_POOL_OP + ~name:"create" + ~in_oss_since:None + ~in_product_since:rel_rio + ~versioned_params:host_create_params + ~doc:"Create a new host record" + ~result:(Ref _host, "Reference to the newly created host object.") + ~hide_from_docs:true + ~allowed_roles:_R_POOL_OP () let host_destroy = call - ~name:"destroy" - ~in_oss_since:None - ~in_product_since:rel_rio - ~doc:"Destroy specified host record in database" - ~params:[(Ref _host, "self", "The host record to remove")] - ~allowed_roles:_R_POOL_OP - () + ~name:"destroy" + ~in_oss_since:None + ~in_product_since:rel_rio + ~doc:"Destroy specified host record in database" + ~params:[(Ref _host, "self", "The host record to remove")] + ~allowed_roles:_R_POOL_OP + () let host_get_system_status_capabilities = call ~flags:[`Session] - ~name:"get_system_status_capabilities" - ~in_oss_since:None - ~in_product_since:rel_miami - ~params:[Ref _host, "host", "The host to interrogate"] - ~doc:"" - ~result:(String, "An XML fragment containing the system status capabilities.") - ~allowed_roles:_R_READ_ONLY + ~name:"get_system_status_capabilities" + ~in_oss_since:None + ~in_product_since:rel_miami + ~params:[Ref _host, "host", "The host to interrogate"] + ~doc:"" + ~result:(String, "An XML fragment containing the system status capabilities.") + ~allowed_roles:_R_READ_ONLY () let host_set_hostname_live = call ~flags:[`Session] - ~name:"set_hostname_live" - ~in_oss_since:None - ~in_product_since:rel_miami - ~params:[Ref _host, "host", "The host whose host name to set"; - String, "hostname", "The new host name"] - ~errs:[Api_errors.host_name_invalid] - ~doc:"Sets the host name to the specified string. Both the API and lower-level system hostname are changed immediately." - ~allowed_roles:_R_POOL_OP + ~name:"set_hostname_live" + ~in_oss_since:None + ~in_product_since:rel_miami + ~params:[Ref _host, "host", "The host whose host name to set"; + String, "hostname", "The new host name"] + ~errs:[Api_errors.host_name_invalid] + ~doc:"Sets the host name to the specified string. Both the API and lower-level system hostname are changed immediately." + ~allowed_roles:_R_POOL_OP () let host_tickle_heartbeat = call ~flags:[`Session] - ~name:"tickle_heartbeat" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~params:[Ref _host, "host", "The host calling the function, and whose heartbeat to tickle"; - Map(String, String), "stuff", "Anything else we want to let the master know"; - ] - ~result:(Map(String, String), "Anything the master wants to tell the slave") - ~doc:"Needs to be called every 30 seconds for the master to believe the host is alive" - ~pool_internal:true - ~hide_from_docs:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~name:"tickle_heartbeat" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~params:[Ref _host, "host", "The host calling the function, and whose heartbeat to tickle"; + Map(String, String), "stuff", "Anything else we want to let the master know"; + ] + ~result:(Map(String, String), "Anything the master wants to tell the slave") + ~doc:"Needs to be called every 30 seconds for the master to believe the host is alive" + ~pool_internal:true + ~hide_from_docs:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_sync_data = call ~flags:[`Session] - ~name:"sync_data" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~params:[Ref _host, "host", "The host to whom the data should be sent"] - ~doc:"This causes the synchronisation of the non-database data (messages, RRDs and so on) stored on the master to be synchronised with the host" - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"sync_data" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~params:[Ref _host, "host", "The host to whom the data should be sent"] + ~doc:"This causes the synchronisation of the non-database data (messages, RRDs and so on) stored on the master to be synchronised with the host" + ~allowed_roles:_R_POOL_ADMIN + () let host_backup_rrds = call ~flags:[`Session] - ~name:"backup_rrds" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~params:[Ref _host, "host", "Schedule a backup of the RRDs of this host"; - Float, "delay", "Delay in seconds from when the call is received to perform the backup"] - ~doc:"This causes the RRDs to be backed up to the master" - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"backup_rrds" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~params:[Ref _host, "host", "Schedule a backup of the RRDs of this host"; + Float, "delay", "Delay in seconds from when the call is received to perform the backup"] + ~doc:"This causes the RRDs to be backed up to the master" + ~allowed_roles:_R_POOL_ADMIN + () let host_get_servertime = call ~flags:[`Session] - ~name:"get_servertime" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~params:[Ref _host, "host", "The host whose clock should be queried"] - ~doc:"This call queries the host's clock for the current time" - ~result:(DateTime, "The current time") - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_servertime" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~params:[Ref _host, "host", "The host whose clock should be queried"] + ~doc:"This call queries the host's clock for the current time" + ~result:(DateTime, "The current time") + ~allowed_roles:_R_READ_ONLY + () let host_get_server_localtime = call ~flags:[`Session] - ~name:"get_server_localtime" - ~in_oss_since:None - ~in_product_since:rel_cowley - ~params:[Ref _host, "host", "The host whose clock should be queried"] - ~doc:"This call queries the host's clock for the current time in the host's local timezone" - ~result:(DateTime, "The current local time") - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_server_localtime" + ~in_oss_since:None + ~in_product_since:rel_cowley + ~params:[Ref _host, "host", "The host whose clock should be queried"] + ~doc:"This call queries the host's clock for the current time in the host's local timezone" + ~result:(DateTime, "The current local time") + ~allowed_roles:_R_READ_ONLY + () let host_emergency_ha_disable = call ~flags:[`Session] - ~name:"emergency_ha_disable" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~params:[] - ~doc:"This call disables HA on the local host. This should only be used with extreme care." - ~allowed_roles:_R_POOL_OP - () + ~name:"emergency_ha_disable" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~params:[] + ~doc:"This call disables HA on the local host. This should only be used with extreme care." + ~allowed_roles:_R_POOL_OP + () let host_certificate_install = call - ~in_oss_since:None - ~in_product_since:rel_george - ~pool_internal:true - ~hide_from_docs:true - ~name:"certificate_install" - ~doc:"Install an SSL certificate to this host." - ~params:[Ref _host, "host", "The host"; - String, "name", "A name to give the certificate"; - String, "cert", "The certificate"] - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_oss_since:None + ~in_product_since:rel_george + ~pool_internal:true + ~hide_from_docs:true + ~name:"certificate_install" + ~doc:"Install an SSL certificate to this host." + ~params:[Ref _host, "host", "The host"; + String, "name", "A name to give the certificate"; + String, "cert", "The certificate"] + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_certificate_uninstall = call - ~in_oss_since:None - ~in_product_since:rel_george - ~pool_internal:true - ~hide_from_docs:true - ~name:"certificate_uninstall" - ~doc:"Remove an SSL certificate from this host." - ~params:[Ref _host, "host", "The host"; - String, "name", "The certificate name"] - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_oss_since:None + ~in_product_since:rel_george + ~pool_internal:true + ~hide_from_docs:true + ~name:"certificate_uninstall" + ~doc:"Remove an SSL certificate from this host." + ~params:[Ref _host, "host", "The host"; + String, "name", "The certificate name"] + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_certificate_list = call - ~in_oss_since:None - ~in_product_since:rel_george - ~pool_internal:true - ~hide_from_docs:true - ~name:"certificate_list" - ~doc:"List all installed SSL certificates." - ~params:[Ref _host, "host", "The host"] - ~result:(Set(String),"All installed certificates") - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_oss_since:None + ~in_product_since:rel_george + ~pool_internal:true + ~hide_from_docs:true + ~name:"certificate_list" + ~doc:"List all installed SSL certificates." + ~params:[Ref _host, "host", "The host"] + ~result:(Set(String),"All installed certificates") + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_crl_install = call - ~in_oss_since:None - ~in_product_since:rel_george - ~pool_internal:true - ~hide_from_docs:true - ~name:"crl_install" - ~doc:"Install an SSL certificate revocation list to this host." - ~params:[Ref _host, "host", "The host"; - String, "name", "A name to give the CRL"; - String, "crl", "The CRL"] - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_oss_since:None + ~in_product_since:rel_george + ~pool_internal:true + ~hide_from_docs:true + ~name:"crl_install" + ~doc:"Install an SSL certificate revocation list to this host." + ~params:[Ref _host, "host", "The host"; + String, "name", "A name to give the CRL"; + String, "crl", "The CRL"] + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_crl_uninstall = call - ~in_oss_since:None - ~in_product_since:rel_george - ~pool_internal:true - ~hide_from_docs:true - ~name:"crl_uninstall" - ~doc:"Remove an SSL certificate revocation list from this host." - ~params:[Ref _host, "host", "The host"; - String, "name", "The CRL name"] - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_oss_since:None + ~in_product_since:rel_george + ~pool_internal:true + ~hide_from_docs:true + ~name:"crl_uninstall" + ~doc:"Remove an SSL certificate revocation list from this host." + ~params:[Ref _host, "host", "The host"; + String, "name", "The CRL name"] + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_crl_list = call - ~in_oss_since:None - ~in_product_since:rel_george - ~pool_internal:true - ~hide_from_docs:true - ~name:"crl_list" - ~doc:"List all installed SSL certificate revocation lists." - ~params:[Ref _host, "host", "The host"] - ~result:(Set(String),"All installed CRLs") - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_oss_since:None + ~in_product_since:rel_george + ~pool_internal:true + ~hide_from_docs:true + ~name:"crl_list" + ~doc:"List all installed SSL certificate revocation lists." + ~params:[Ref _host, "host", "The host"] + ~result:(Set(String),"All installed CRLs") + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_certificate_sync = call - ~in_oss_since:None - ~in_product_since:rel_george - ~pool_internal:true - ~hide_from_docs:true - ~name:"certificate_sync" - ~doc:"Resync installed SSL certificates and CRLs." - ~params:[Ref _host, "host", "The host"] - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~in_oss_since:None + ~in_product_since:rel_george + ~pool_internal:true + ~hide_from_docs:true + ~name:"certificate_sync" + ~doc:"Resync installed SSL certificates and CRLs." + ~params:[Ref _host, "host", "The host"] + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_get_server_certificate = call - ~in_oss_since:None - ~in_product_since:rel_george - ~name:"get_server_certificate" - ~doc:"Get the installed server SSL certificate." - ~params:[Ref _host, "host", "The host"] - ~result:(String,"The installed server SSL certificate, in PEM form.") - ~allowed_roles:_R_POOL_OP - () + ~in_oss_since:None + ~in_product_since:rel_george + ~name:"get_server_certificate" + ~doc:"Get the installed server SSL certificate." + ~params:[Ref _host, "host", "The host"] + ~result:(String,"The installed server SSL certificate, in PEM form.") + ~allowed_roles:_R_POOL_OP + () let host_display = - Enum ("host_display", [ - "enabled", "This host is outputting its console to a physical display device"; - "disable_on_reboot", "The host will stop outputting its console to a physical display device on next boot"; - "disabled", "This host is not outputting its console to a physical display device"; - "enable_on_reboot", "The host will start outputting its console to a physical display device on next boot"; - ]) + Enum ("host_display", [ + "enabled", "This host is outputting its console to a physical display device"; + "disable_on_reboot", "The host will stop outputting its console to a physical display device on next boot"; + "disabled", "This host is not outputting its console to a physical display device"; + "enable_on_reboot", "The host will start outputting its console to a physical display device on next boot"; + ]) let host_operations = - Enum ("host_allowed_operations", - [ "provision", "Indicates this host is able to provision another VM"; - "evacuate", "Indicates this host is evacuating"; - "shutdown", "Indicates this host is in the process of shutting itself down"; - "reboot", "Indicates this host is in the process of rebooting"; - "power_on", "Indicates this host is in the process of being powered on"; - "vm_start", "This host is starting a VM"; - "vm_resume", "This host is resuming a VM"; - "vm_migrate", "This host is the migration target of a VM"; - ]) + Enum ("host_allowed_operations", + [ "provision", "Indicates this host is able to provision another VM"; + "evacuate", "Indicates this host is evacuating"; + "shutdown", "Indicates this host is in the process of shutting itself down"; + "reboot", "Indicates this host is in the process of rebooting"; + "power_on", "Indicates this host is in the process of being powered on"; + "vm_start", "This host is starting a VM"; + "vm_resume", "This host is resuming a VM"; + "vm_migrate", "This host is the migration target of a VM"; + ]) let host_enable_external_auth = call ~flags:[`Session] - ~name:"enable_external_auth" - ~in_oss_since:None - ~in_product_since:rel_george - ~params:[ - Ref _host, "host", "The host whose external authentication should be enabled"; - Map (String,String), "config", "A list of key-values containing the configuration data" ; - String, "service_name", "The name of the service" ; - String, "auth_type", "The type of authentication (e.g. AD for Active Directory)" - ] - ~doc:"This call enables external authentication on a host" - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"enable_external_auth" + ~in_oss_since:None + ~in_product_since:rel_george + ~params:[ + Ref _host, "host", "The host whose external authentication should be enabled"; + Map (String,String), "config", "A list of key-values containing the configuration data" ; + String, "service_name", "The name of the service" ; + String, "auth_type", "The type of authentication (e.g. AD for Active Directory)" + ] + ~doc:"This call enables external authentication on a host" + ~allowed_roles:_R_POOL_ADMIN + () let host_disable_external_auth = call ~flags:[`Session] - ~name:"disable_external_auth" - ~in_oss_since:None - ~in_product_since:rel_george - ~versioned_params:[ - {param_type=Ref _host; param_name="host"; param_doc="The host whose external authentication should be disabled"; param_release=george_release; param_default=None}; - {param_type=Map (String, String); param_name="config"; param_doc="Optional parameters as a list of key-values containing the configuration data"; param_release=george_release; param_default=Some (VMap [])} - ] - ~doc:"This call disables external authentication on the local host" - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"disable_external_auth" + ~in_oss_since:None + ~in_product_since:rel_george + ~versioned_params:[ + {param_type=Ref _host; param_name="host"; param_doc="The host whose external authentication should be disabled"; param_release=george_release; param_default=None}; + {param_type=Map (String, String); param_name="config"; param_doc="Optional parameters as a list of key-values containing the configuration data"; param_release=george_release; param_default=Some (VMap [])} + ] + ~doc:"This call disables external authentication on the local host" + ~allowed_roles:_R_POOL_ADMIN + () let host_set_license_params = call - ~name:"set_license_params" - ~in_product_since:rel_orlando (* actually update 3 aka floodgate *) - ~doc:"Set the new license details in the database, trigger a recomputation of the pool SKU" - ~params:[ - Ref _host, "self", "The host"; - Map(String, String), "value", "The license_params" - ] - ~hide_from_docs:true - ~pool_internal:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~name:"set_license_params" + ~in_product_since:rel_orlando (* actually update 3 aka floodgate *) + ~doc:"Set the new license details in the database, trigger a recomputation of the pool SKU" + ~params:[ + Ref _host, "self", "The host"; + Map(String, String), "value", "The license_params" + ] + ~hide_from_docs:true + ~pool_internal:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let host_apply_edition = call ~flags:[`Session] - ~name:"apply_edition" - ~in_product_since:rel_midnight_ride - ~doc:"Change to another edition, or reactivate the current edition after a license has expired. This may be subject to the successful checkout of an appropriate license." - ~versioned_params:[ - {param_type=Ref _host; param_name="host"; param_doc="The host"; param_release=midnight_ride_release; param_default=None}; - {param_type=String; param_name="edition"; param_doc="The requested edition"; param_release=midnight_ride_release; param_default=None}; - {param_type=Bool; param_name="force"; param_doc="Update the license params even if the apply call fails"; param_release=clearwater_release; param_default=Some (VBool false)}; - ] - ~allowed_roles:_R_POOL_OP - () - + ~name:"apply_edition" + ~in_product_since:rel_midnight_ride + ~doc:"Change to another edition, or reactivate the current edition after a license has expired. This may be subject to the successful checkout of an appropriate license." + ~versioned_params:[ + {param_type=Ref _host; param_name="host"; param_doc="The host"; param_release=midnight_ride_release; param_default=None}; + {param_type=String; param_name="edition"; param_doc="The requested edition"; param_release=midnight_ride_release; param_default=None}; + {param_type=Bool; param_name="force"; param_doc="Update the license params even if the apply call fails"; param_release=clearwater_release; param_default=Some (VBool false)}; + ] + ~allowed_roles:_R_POOL_OP + () + let host_set_power_on_mode = call - ~name:"set_power_on_mode" - ~in_product_since:rel_midnight_ride - ~doc:"Set the power-on-mode, host, user and password " - ~params:[ - Ref _host, "self", "The host"; - String, "power_on_mode", "power-on-mode can be empty,iLO,wake-on-lan, DRAC or other"; - Map(String, String), "power_on_config", "Power on config"; - ] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_power_on_mode" + ~in_product_since:rel_midnight_ride + ~doc:"Set the power-on-mode, host, user and password " + ~params:[ + Ref _host, "self", "The host"; + String, "power_on_mode", "power-on-mode can be empty,iLO,wake-on-lan, DRAC or other"; + Map(String, String), "power_on_config", "Power on config"; + ] + ~allowed_roles:_R_POOL_OP + () let host_set_ssl_legacy = call - ~name:"set_ssl_legacy" - ~lifecycle:[Published, rel_dundee, ""] - ~doc:"Enable/disable SSLv3 for interoperability with older versions of XenServer. When this is set to a different value, the host immediately restarts its SSL/TLS listening service; typically this takes less than a second but existing connections to it will be broken. XenAPI login sessions will remain valid." - ~params:[ - Ref _host, "self", "The host"; - Bool, "value", "True to allow SSLv3 and ciphersuites as used in old XenServer versions"; - ] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_ssl_legacy" + ~lifecycle:[Published, rel_dundee, ""] + ~doc:"Enable/disable SSLv3 for interoperability with older versions of XenServer. When this is set to a different value, the host immediately restarts its SSL/TLS listening service; typically this takes less than a second but existing connections to it will be broken. XenAPI login sessions will remain valid." + ~params:[ + Ref _host, "self", "The host"; + Bool, "value", "True to allow SSLv3 and ciphersuites as used in old XenServer versions"; + ] + ~allowed_roles:_R_POOL_OP + () let host_set_cpu_features = call ~flags:[`Session] - ~name:"set_cpu_features" - ~in_product_since:rel_midnight_ride - ~doc:"Set the CPU features to be used after a reboot, if the given features string is valid." - ~params:[ - Ref _host, "host", "The host"; - String, "features", "The features string (32 hexadecimal digits)" - ] - ~allowed_roles:_R_POOL_OP - ~lifecycle:[Published, rel_midnight_ride, ""; Removed, rel_dundee, "Manual CPU feature setting was removed"] - () - + ~name:"set_cpu_features" + ~in_product_since:rel_midnight_ride + ~doc:"Set the CPU features to be used after a reboot, if the given features string is valid." + ~params:[ + Ref _host, "host", "The host"; + String, "features", "The features string (32 hexadecimal digits)" + ] + ~allowed_roles:_R_POOL_OP + ~lifecycle:[Published, rel_midnight_ride, ""; Removed, rel_dundee, "Manual CPU feature setting was removed"] + () + let host_reset_cpu_features = call ~flags:[`Session] - ~name:"reset_cpu_features" - ~in_product_since:rel_midnight_ride - ~doc:"Remove the feature mask, such that after a reboot all features of the CPU are enabled." - ~params:[ - Ref _host, "host", "The host" - ] - ~allowed_roles:_R_POOL_OP - ~lifecycle:[Published, rel_midnight_ride, ""; Removed, rel_dundee, "Manual CPU feature setting was removed"] - () + ~name:"reset_cpu_features" + ~in_product_since:rel_midnight_ride + ~doc:"Remove the feature mask, such that after a reboot all features of the CPU are enabled." + ~params:[ + Ref _host, "host", "The host" + ] + ~allowed_roles:_R_POOL_OP + ~lifecycle:[Published, rel_midnight_ride, ""; Removed, rel_dundee, "Manual CPU feature setting was removed"] + () let host_reset_networking = call - ~name:"reset_networking" - ~lifecycle:[] - ~doc:"Purge all network-related metadata associated with the given host." - ~params:[Ref _host, "host", "The Host to modify"] - ~allowed_roles:_R_POOL_OP - ~hide_from_docs:true - () - + ~name:"reset_networking" + ~lifecycle:[] + ~doc:"Purge all network-related metadata associated with the given host." + ~params:[Ref _host, "host", "The Host to modify"] + ~allowed_roles:_R_POOL_OP + ~hide_from_docs:true + () + let host_enable_local_storage_caching = call ~flags:[`Session] - ~name:"enable_local_storage_caching" - ~in_product_since:rel_cowley - ~doc:"Enable the use of a local SR for caching purposes" - ~params:[ - Ref _host, "host", "The host"; - Ref _sr, "sr", "The SR to use as a local cache" - ] - ~allowed_roles:_R_POOL_OP - () + ~name:"enable_local_storage_caching" + ~in_product_since:rel_cowley + ~doc:"Enable the use of a local SR for caching purposes" + ~params:[ + Ref _host, "host", "The host"; + Ref _sr, "sr", "The SR to use as a local cache" + ] + ~allowed_roles:_R_POOL_OP + () let host_disable_local_storage_caching = call ~flags:[`Session] - ~name:"disable_local_storage_caching" - ~in_product_since:rel_cowley - ~doc:"Disable the use of a local SR for caching purposes" - ~params:[ - Ref _host, "host", "The host" - ] - ~allowed_roles:_R_POOL_OP - () + ~name:"disable_local_storage_caching" + ~in_product_since:rel_cowley + ~doc:"Disable the use of a local SR for caching purposes" + ~params:[ + Ref _host, "host", "The host" + ] + ~allowed_roles:_R_POOL_OP + () let host_get_sm_diagnostics = call ~flags:[`Session] - ~name:"get_sm_diagnostics" - ~in_product_since:rel_boston - ~doc:"Return live SM diagnostics" - ~params:[ - Ref _host, "host", "The host" - ] - ~result:(String, "Printable diagnostic data") - ~allowed_roles:_R_POOL_OP - ~hide_from_docs:true - () + ~name:"get_sm_diagnostics" + ~in_product_since:rel_boston + ~doc:"Return live SM diagnostics" + ~params:[ + Ref _host, "host", "The host" + ] + ~result:(String, "Printable diagnostic data") + ~allowed_roles:_R_POOL_OP + ~hide_from_docs:true + () let host_get_thread_diagnostics = call ~flags:[`Session] - ~name:"get_thread_diagnostics" - ~in_product_since:rel_boston - ~doc:"Return live thread diagnostics" - ~params:[ - Ref _host, "host", "The host" - ] - ~result:(String, "Printable diagnostic data") - ~allowed_roles:_R_POOL_OP - ~hide_from_docs:true - () + ~name:"get_thread_diagnostics" + ~in_product_since:rel_boston + ~doc:"Return live thread diagnostics" + ~params:[ + Ref _host, "host", "The host" + ] + ~result:(String, "Printable diagnostic data") + ~allowed_roles:_R_POOL_OP + ~hide_from_docs:true + () let host_sm_dp_destroy = call ~flags:[`Session] - ~name:"sm_dp_destroy" - ~in_product_since:rel_boston - ~doc:"Attempt to cleanup and destroy a named SM datapath" - ~params:[ - Ref _host, "host", "The host"; - String, "dp", "The datapath"; - Bool, "allow_leak", "If true, all records of the datapath will be removed even if the datapath could not be destroyed cleanly."; - ] - ~allowed_roles:_R_POOL_OP - ~hide_from_docs:true - () + ~name:"sm_dp_destroy" + ~in_product_since:rel_boston + ~doc:"Attempt to cleanup and destroy a named SM datapath" + ~params:[ + Ref _host, "host", "The host"; + String, "dp", "The datapath"; + Bool, "allow_leak", "If true, all records of the datapath will be removed even if the datapath could not be destroyed cleanly."; + ] + ~allowed_roles:_R_POOL_OP + ~hide_from_docs:true + () let host_sync_vlans = call ~flags:[`Session] - ~name:"sync_vlans" - ~lifecycle:[] - ~doc:"Synchronise VLANs on given host with the master's VLANs" - ~params:[ - Ref _host, "host", "The host"; - ] - ~hide_from_docs:true - ~pool_internal:true - ~allowed_roles:_R_POOL_OP - () + ~name:"sync_vlans" + ~lifecycle:[] + ~doc:"Synchronise VLANs on given host with the master's VLANs" + ~params:[ + Ref _host, "host", "The host"; + ] + ~hide_from_docs:true + ~pool_internal:true + ~allowed_roles:_R_POOL_OP + () let host_sync_tunnels = call ~flags:[`Session] - ~name:"sync_tunnels" - ~lifecycle:[] - ~doc:"Synchronise tunnels on given host with the master's tunnels" - ~params:[ - Ref _host, "host", "The host"; - ] - ~hide_from_docs:true - ~pool_internal:true - ~allowed_roles:_R_POOL_OP - () + ~name:"sync_tunnels" + ~lifecycle:[] + ~doc:"Synchronise tunnels on given host with the master's tunnels" + ~params:[ + Ref _host, "host", "The host"; + ] + ~hide_from_docs:true + ~pool_internal:true + ~allowed_roles:_R_POOL_OP + () let host_sync_pif_currently_attached = call ~flags:[`Session] - ~name:"sync_pif_currently_attached" - ~lifecycle:[] - ~doc:"Synchronise tunnels on given host with the master's tunnels" - ~params:[ - Ref _host, "host", "The host"; - Set String, "bridges", "A list of bridges that are currently up"; - ] - ~hide_from_docs:true - ~pool_internal:true - ~allowed_roles:_R_POOL_OP - () + ~name:"sync_pif_currently_attached" + ~lifecycle:[] + ~doc:"Synchronise tunnels on given host with the master's tunnels" + ~params:[ + Ref _host, "host", "The host"; + Set String, "bridges", "A list of bridges that are currently up"; + ] + ~hide_from_docs:true + ~pool_internal:true + ~allowed_roles:_R_POOL_OP + () let host_enable_display = call - ~name:"enable_display" - ~lifecycle:[Published, rel_cream, ""] - ~doc:"Enable console output to the physical display device next time this host boots" - ~params:[ - Ref _host, "host", "The host"; - ] - ~result:(host_display, "This host's physical display usage") - ~allowed_roles:_R_POOL_OP - () + ~name:"enable_display" + ~lifecycle:[Published, rel_cream, ""] + ~doc:"Enable console output to the physical display device next time this host boots" + ~params:[ + Ref _host, "host", "The host"; + ] + ~result:(host_display, "This host's physical display usage") + ~allowed_roles:_R_POOL_OP + () let host_disable_display = call - ~name:"disable_display" - ~lifecycle:[Published, rel_cream, ""] - ~doc:"Disable console output to the physical display device next time this host boots" - ~params:[ - Ref _host, "host", "The host"; - ] - ~result:(host_display, "This host's physical display usage") - ~allowed_roles:_R_POOL_OP - () + ~name:"disable_display" + ~lifecycle:[Published, rel_cream, ""] + ~doc:"Disable console output to the physical display device next time this host boots" + ~params:[ + Ref _host, "host", "The host"; + ] + ~result:(host_display, "This host's physical display usage") + ~allowed_roles:_R_POOL_OP + () let host_apply_guest_agent_config = call - ~name:"apply_guest_agent_config" - ~lifecycle:[Published, rel_dundee, ""] - ~doc:"Signal to the host that the pool-wide guest agent config has changed" - ~params:[ - Ref _host, "host", "The host"; - ] - ~hide_from_docs:true - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"apply_guest_agent_config" + ~lifecycle:[Published, rel_dundee, ""] + ~doc:"Signal to the host that the pool-wide guest agent config has changed" + ~params:[ + Ref _host, "host", "The host"; + ] + ~hide_from_docs:true + ~allowed_roles:_R_POOL_ADMIN + () (** Hosts *) let host = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_host ~descr:"A physical host" ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_OP - ~messages: [host_disable; host_enable; host_shutdown; host_reboot; host_dmesg; host_dmesg_clear; host_get_log; host_send_debug_keys; host_bugreport_upload; host_list_methods; host_license_apply; host_license_add; host_license_remove; host_create; host_destroy; - host_power_on; - host_set_license_params; - host_emergency_ha_disable; - host_ha_disarm_fencing; host_preconfigure_ha; host_ha_join_liveset; - host_ha_disable_failover_decisions; - host_ha_wait_for_shutdown_via_statefile; - host_ha_stop_daemon; - host_ha_release_resources; - host_ha_xapi_healthcheck; - host_local_assert_healthy; - host_request_backup; - host_request_config_file_sync; - host_propose_new_master; host_commit_new_master; host_abort_new_master; - host_get_data_sources; - host_record_data_source; - host_query_data_source; - host_forget_data_source_archives; - host_assert_can_evacuate; - host_get_vms_which_prevent_evacuation; - host_get_uncooperative_resident_VMs; - host_get_uncooperative_domains; - host_evacuate; - host_signal_networking_change; - host_notify; - host_syslog_reconfigure; - host_management_reconfigure; - host_local_management_reconfigure; - host_management_disable; - host_get_management_interface; - host_get_system_status_capabilities; - host_get_diagnostic_timing_stats; - host_restart_agent; - host_shutdown_agent; - host_set_hostname_live; - host_is_in_emergency_mode; - host_compute_free_memory; - host_compute_memory_overhead; - host_tickle_heartbeat; - host_sync_data; - host_backup_rrds; - host_create_new_blob; - host_call_plugin; - host_has_extension; - host_call_extension; - host_get_servertime; - host_get_server_localtime; - host_enable_binary_storage; - host_disable_binary_storage; - host_enable_external_auth; - host_disable_external_auth; - host_retrieve_wlb_evacuate_recommendations; - host_certificate_install; - host_certificate_uninstall; - host_certificate_list; - host_crl_install; - host_crl_uninstall; - host_crl_list; - host_certificate_sync; - host_get_server_certificate; - host_update_pool_secret; - host_update_master; - host_attach_static_vdis; - host_detach_static_vdis; - host_set_localdb_key; - host_apply_edition; - host_refresh_pack_info; - host_set_power_on_mode; - host_set_cpu_features; - host_reset_cpu_features; - host_reset_networking; - host_enable_local_storage_caching; - host_disable_local_storage_caching; - host_get_sm_diagnostics; - host_get_thread_diagnostics; - host_sm_dp_destroy; - host_sync_vlans; - host_sync_tunnels; - host_sync_pif_currently_attached; - host_migrate_receive; - host_declare_dead; - host_enable_display; - host_disable_display; - host_set_ssl_legacy; - host_apply_guest_agent_config; - ] - ~contents: - ([ uid _host; - namespace ~name:"name" ~contents:(names None RW) (); - namespace ~name:"memory" ~contents:host_memory (); - ] @ (allowed_and_current_operations host_operations) @ [ - namespace ~name:"API_version" ~contents:api_version (); - field ~qualifier:DynamicRO ~ty:Bool "enabled" "True if the host is currently enabled"; - field ~qualifier:StaticRO ~ty:(Map(String, String)) "software_version" "version strings"; - field ~ty:(Map(String, String)) "other_config" "additional configuration" ~map_keys_roles:[("folder",(_R_VM_OP));("XenCenter.CustomFields.*",(_R_VM_OP))]; - field ~qualifier:StaticRO ~ty:(Set(String)) "capabilities" "Xen capabilities"; - field ~qualifier:DynamicRO ~ty:(Map(String, String)) "cpu_configuration" "The CPU configuration on this host. May contain keys such as \"nr_nodes\", \"sockets_per_node\", \"cores_per_socket\", or \"threads_per_core\""; - field ~qualifier:DynamicRO ~ty:String "sched_policy" "Scheduler policy currently in force on this host"; - field ~qualifier:DynamicRO ~ty:(Set String) "supported_bootloaders" "a list of the bootloaders installed on the machine"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) "resident_VMs" "list of VMs currently resident on host"; - field ~qualifier:RW ~ty:(Map(String, String)) "logging" "logging configuration"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _pif)) ~doc_tags:[Networking] "PIFs" "physical network interfaces"; - field ~qualifier:RW ~ty:(Ref _sr) "suspend_image_sr" "The SR in which VDIs for suspend images are created"; - field ~qualifier:RW ~ty:(Ref _sr) "crash_dump_sr" "The SR in which VDIs for crash dumps are created"; - field ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Set (Ref _host_crashdump)) "crashdumps" "Set of host crash dumps"; - field ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Set (Ref _host_patch)) "patches" "Set of host patches"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _pbd)) "PBDs" "physical blockdevices"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _hostcpu)) "host_CPUs" "The physical CPUs on this host"; - field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "cpu_info" "Details about the physical CPUs on this host"; - field ~in_oss_since:None ~qualifier:RW ~ty:String ~doc_tags:[Networking] "hostname" "The hostname of this host"; - field ~in_oss_since:None ~qualifier:RW ~ty:String ~doc_tags:[Networking] "address" "The address by which this host can be contacted from any other host in the pool"; - field ~qualifier:DynamicRO ~ty:(Ref _host_metrics) "metrics" "metrics associated with this host"; - field ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Map (String,String)) "license_params" "State of the current license"; - field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:Int "boot_free_mem" "Free memory on host at boot time"; - field ~in_oss_since:None ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Set String) ~default_value:(Some (VSet [])) "ha_statefiles" "The set of statefiles accessible from this host"; - field ~in_oss_since:None ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Set String) ~default_value:(Some (VSet [])) "ha_network_peers" "The set of hosts visible via the network from this host"; - field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Map(String,Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this host"; - field ~writer_roles:_R_VM_OP ~qualifier:RW ~in_product_since:rel_orlando ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes"; - field ~qualifier:DynamicRO ~in_product_since:rel_george ~default_value:(Some (VString "")) ~ty:String "external_auth_type" "type of external authentication service configured; empty if none configured."; - field ~qualifier:DynamicRO ~in_product_since:rel_george ~default_value:(Some (VString "")) ~ty:String "external_auth_service_name" "name of external authentication service configured; empty if none configured."; - field ~qualifier:DynamicRO ~in_product_since:rel_george ~default_value:(Some (VMap [])) ~ty:(Map (String,String)) "external_auth_configuration" "configuration specific to external authentication service"; - field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VString "")) ~ty:String "edition" "Product edition"; - field ~qualifier:RW ~in_product_since:rel_midnight_ride ~default_value:(Some (VMap [VString "address", VString "localhost"; VString "port", VString "27000"])) ~ty:(Map (String, String)) "license_server" "Contact information of the license server"; - field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VMap [])) ~ty:(Map (String,String)) "bios_strings" "BIOS strings"; - field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VString "")) ~ty:String "power_on_mode" "The power on mode"; - field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "power_on_config" "The power on config"; - field ~qualifier:StaticRO ~in_product_since:rel_cowley ~default_value:(Some (VRef (Ref.string_of Ref.null))) ~ty:(Ref _sr) "local_cache_sr" "The SR that is used as a local cache"; - field ~qualifier:DynamicRO ~lifecycle:[Published, rel_boston, ""] ~ty:(Map (String, String)) ~default_value:(Some (VMap [])) - "chipset_info" "Information about chipset features"; - field ~qualifier:DynamicRO ~lifecycle:[Published, rel_boston, ""] ~ty:(Set (Ref _pci)) "PCIs" "List of PCI devices in the host"; - field ~qualifier:DynamicRO ~lifecycle:[Published, rel_boston, ""] ~ty:(Set (Ref _pgpu)) "PGPUs" "List of physical GPUs in the host"; - field ~qualifier:StaticRO ~lifecycle:[Published, rel_dundee, ""] ~ty:Bool ~default_value:(Some (VBool true)) "ssl_legacy" "Allow SSLv3 protocol and ciphersuites as used by older XenServers. This controls both incoming and outgoing connections. When this is set to a different value, the host immediately restarts its SSL/TLS listening service; typically this takes less than a second but existing connections to it will be broken. XenAPI login sessions will remain valid."; - field ~qualifier:RW ~in_product_since:rel_tampa ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "guest_VCPUs_params" "VCPUs params to apply to all resident guests"; - field ~qualifier:RW ~in_product_since:rel_cream ~default_value:(Some (VEnum "enabled")) ~ty:host_display "display" "indicates whether the host is configured to output its console to a physical display device"; - field ~qualifier:DynamicRO ~in_product_since:rel_cream ~default_value:(Some (VSet [VInt 0L])) ~ty:(Set (Int)) "virtual_hardware_platform_versions" "The set of versions of the virtual hardware platform that the host can offer to its guests"; - field ~qualifier:DynamicRO ~default_value:(Some (VRef (Ref.string_of Ref.null))) ~in_product_since:rel_dundee_plus ~ty:(Ref _vm) "control_domain" "The control domain (domain 0)"; - field ~qualifier:DynamicRO ~lifecycle:[Published, rel_ely, ""] ~ty:(Set (Ref _pool_patch)) ~ignore_foreign_key:true "patches_requiring_reboot" "List of patches which require reboot"; - ]) - () - -let host_metrics = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_host_metrics ~descr:"The metrics associated with a host" ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_OP - ~messages:[] ~contents: - [ uid _host_metrics; - namespace ~name:"memory" ~contents:host_metrics_memory (); - field ~qualifier:DynamicRO ~ty:Bool ~in_oss_since:None "live" "Pool master thinks this host is live"; - field ~qualifier:DynamicRO ~ty:DateTime "last_updated" "Time at which this information was last updated"; - field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; - ] - () + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_host ~descr:"A physical host" ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_POOL_OP + ~messages: [host_disable; host_enable; host_shutdown; host_reboot; host_dmesg; host_dmesg_clear; host_get_log; host_send_debug_keys; host_bugreport_upload; host_list_methods; host_license_apply; host_license_add; host_license_remove; host_create; host_destroy; + host_power_on; + host_set_license_params; + host_emergency_ha_disable; + host_ha_disarm_fencing; host_preconfigure_ha; host_ha_join_liveset; + host_ha_disable_failover_decisions; + host_ha_wait_for_shutdown_via_statefile; + host_ha_stop_daemon; + host_ha_release_resources; + host_ha_xapi_healthcheck; + host_local_assert_healthy; + host_request_backup; + host_request_config_file_sync; + host_propose_new_master; host_commit_new_master; host_abort_new_master; + host_get_data_sources; + host_record_data_source; + host_query_data_source; + host_forget_data_source_archives; + host_assert_can_evacuate; + host_get_vms_which_prevent_evacuation; + host_get_uncooperative_resident_VMs; + host_get_uncooperative_domains; + host_evacuate; + host_signal_networking_change; + host_notify; + host_syslog_reconfigure; + host_management_reconfigure; + host_local_management_reconfigure; + host_management_disable; + host_get_management_interface; + host_get_system_status_capabilities; + host_get_diagnostic_timing_stats; + host_restart_agent; + host_shutdown_agent; + host_set_hostname_live; + host_is_in_emergency_mode; + host_compute_free_memory; + host_compute_memory_overhead; + host_tickle_heartbeat; + host_sync_data; + host_backup_rrds; + host_create_new_blob; + host_call_plugin; + host_has_extension; + host_call_extension; + host_get_servertime; + host_get_server_localtime; + host_enable_binary_storage; + host_disable_binary_storage; + host_enable_external_auth; + host_disable_external_auth; + host_retrieve_wlb_evacuate_recommendations; + host_certificate_install; + host_certificate_uninstall; + host_certificate_list; + host_crl_install; + host_crl_uninstall; + host_crl_list; + host_certificate_sync; + host_get_server_certificate; + host_update_pool_secret; + host_update_master; + host_attach_static_vdis; + host_detach_static_vdis; + host_set_localdb_key; + host_apply_edition; + host_refresh_pack_info; + host_set_power_on_mode; + host_set_cpu_features; + host_reset_cpu_features; + host_reset_networking; + host_enable_local_storage_caching; + host_disable_local_storage_caching; + host_get_sm_diagnostics; + host_get_thread_diagnostics; + host_sm_dp_destroy; + host_sync_vlans; + host_sync_tunnels; + host_sync_pif_currently_attached; + host_migrate_receive; + host_declare_dead; + host_enable_display; + host_disable_display; + host_set_ssl_legacy; + host_apply_guest_agent_config; + ] + ~contents: + ([ uid _host; + namespace ~name:"name" ~contents:(names None RW) (); + namespace ~name:"memory" ~contents:host_memory (); + ] @ (allowed_and_current_operations host_operations) @ [ + namespace ~name:"API_version" ~contents:api_version (); + field ~qualifier:DynamicRO ~ty:Bool "enabled" "True if the host is currently enabled"; + field ~qualifier:StaticRO ~ty:(Map(String, String)) "software_version" "version strings"; + field ~ty:(Map(String, String)) "other_config" "additional configuration" ~map_keys_roles:[("folder",(_R_VM_OP));("XenCenter.CustomFields.*",(_R_VM_OP))]; + field ~qualifier:StaticRO ~ty:(Set(String)) "capabilities" "Xen capabilities"; + field ~qualifier:DynamicRO ~ty:(Map(String, String)) "cpu_configuration" "The CPU configuration on this host. May contain keys such as \"nr_nodes\", \"sockets_per_node\", \"cores_per_socket\", or \"threads_per_core\""; + field ~qualifier:DynamicRO ~ty:String "sched_policy" "Scheduler policy currently in force on this host"; + field ~qualifier:DynamicRO ~ty:(Set String) "supported_bootloaders" "a list of the bootloaders installed on the machine"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) "resident_VMs" "list of VMs currently resident on host"; + field ~qualifier:RW ~ty:(Map(String, String)) "logging" "logging configuration"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _pif)) ~doc_tags:[Networking] "PIFs" "physical network interfaces"; + field ~qualifier:RW ~ty:(Ref _sr) "suspend_image_sr" "The SR in which VDIs for suspend images are created"; + field ~qualifier:RW ~ty:(Ref _sr) "crash_dump_sr" "The SR in which VDIs for crash dumps are created"; + field ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Set (Ref _host_crashdump)) "crashdumps" "Set of host crash dumps"; + field ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Set (Ref _host_patch)) "patches" "Set of host patches"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _pbd)) "PBDs" "physical blockdevices"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _hostcpu)) "host_CPUs" "The physical CPUs on this host"; + field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "cpu_info" "Details about the physical CPUs on this host"; + field ~in_oss_since:None ~qualifier:RW ~ty:String ~doc_tags:[Networking] "hostname" "The hostname of this host"; + field ~in_oss_since:None ~qualifier:RW ~ty:String ~doc_tags:[Networking] "address" "The address by which this host can be contacted from any other host in the pool"; + field ~qualifier:DynamicRO ~ty:(Ref _host_metrics) "metrics" "metrics associated with this host"; + field ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Map (String,String)) "license_params" "State of the current license"; + field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:Int "boot_free_mem" "Free memory on host at boot time"; + field ~in_oss_since:None ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Set String) ~default_value:(Some (VSet [])) "ha_statefiles" "The set of statefiles accessible from this host"; + field ~in_oss_since:None ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Set String) ~default_value:(Some (VSet [])) "ha_network_peers" "The set of hosts visible via the network from this host"; + field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Map(String,Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this host"; + field ~writer_roles:_R_VM_OP ~qualifier:RW ~in_product_since:rel_orlando ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes"; + field ~qualifier:DynamicRO ~in_product_since:rel_george ~default_value:(Some (VString "")) ~ty:String "external_auth_type" "type of external authentication service configured; empty if none configured."; + field ~qualifier:DynamicRO ~in_product_since:rel_george ~default_value:(Some (VString "")) ~ty:String "external_auth_service_name" "name of external authentication service configured; empty if none configured."; + field ~qualifier:DynamicRO ~in_product_since:rel_george ~default_value:(Some (VMap [])) ~ty:(Map (String,String)) "external_auth_configuration" "configuration specific to external authentication service"; + field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VString "")) ~ty:String "edition" "Product edition"; + field ~qualifier:RW ~in_product_since:rel_midnight_ride ~default_value:(Some (VMap [VString "address", VString "localhost"; VString "port", VString "27000"])) ~ty:(Map (String, String)) "license_server" "Contact information of the license server"; + field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VMap [])) ~ty:(Map (String,String)) "bios_strings" "BIOS strings"; + field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VString "")) ~ty:String "power_on_mode" "The power on mode"; + field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "power_on_config" "The power on config"; + field ~qualifier:StaticRO ~in_product_since:rel_cowley ~default_value:(Some (VRef (Ref.string_of Ref.null))) ~ty:(Ref _sr) "local_cache_sr" "The SR that is used as a local cache"; + field ~qualifier:DynamicRO ~lifecycle:[Published, rel_boston, ""] ~ty:(Map (String, String)) ~default_value:(Some (VMap [])) + "chipset_info" "Information about chipset features"; + field ~qualifier:DynamicRO ~lifecycle:[Published, rel_boston, ""] ~ty:(Set (Ref _pci)) "PCIs" "List of PCI devices in the host"; + field ~qualifier:DynamicRO ~lifecycle:[Published, rel_boston, ""] ~ty:(Set (Ref _pgpu)) "PGPUs" "List of physical GPUs in the host"; + field ~qualifier:StaticRO ~lifecycle:[Published, rel_dundee, ""] ~ty:Bool ~default_value:(Some (VBool true)) "ssl_legacy" "Allow SSLv3 protocol and ciphersuites as used by older XenServers. This controls both incoming and outgoing connections. When this is set to a different value, the host immediately restarts its SSL/TLS listening service; typically this takes less than a second but existing connections to it will be broken. XenAPI login sessions will remain valid."; + field ~qualifier:RW ~in_product_since:rel_tampa ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "guest_VCPUs_params" "VCPUs params to apply to all resident guests"; + field ~qualifier:RW ~in_product_since:rel_cream ~default_value:(Some (VEnum "enabled")) ~ty:host_display "display" "indicates whether the host is configured to output its console to a physical display device"; + field ~qualifier:DynamicRO ~in_product_since:rel_cream ~default_value:(Some (VSet [VInt 0L])) ~ty:(Set (Int)) "virtual_hardware_platform_versions" "The set of versions of the virtual hardware platform that the host can offer to its guests"; + field ~qualifier:DynamicRO ~default_value:(Some (VRef (Ref.string_of Ref.null))) ~in_product_since:rel_dundee_plus ~ty:(Ref _vm) "control_domain" "The control domain (domain 0)"; + field ~qualifier:DynamicRO ~lifecycle:[Published, rel_ely, ""] ~ty:(Set (Ref _pool_patch)) ~ignore_foreign_key:true "patches_requiring_reboot" "List of patches which require reboot"; + ]) + () + +let host_metrics = + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_host_metrics ~descr:"The metrics associated with a host" ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_POOL_OP + ~messages:[] ~contents: + [ uid _host_metrics; + namespace ~name:"memory" ~contents:host_metrics_memory (); + field ~qualifier:DynamicRO ~ty:Bool ~in_oss_since:None "live" "Pool master thinks this host is live"; + field ~qualifier:DynamicRO ~ty:DateTime "last_updated" "Time at which this information was last updated"; + field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; + ] + () (** HostCPU *) let hostcpu = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_hostcpu ~descr:"A physical CPU" ~gen_events:true - ~lifecycle:[ - Published, rel_rio, "A physical CPU"; - Deprecated, rel_midnight_ride, "Deprecated in favour of the Host.cpu_info field"; - ] - ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_OP - ~messages:[] ~contents: - [ uid _hostcpu; - field ~qualifier:DynamicRO ~ty:(Ref _host) "host" "the host the CPU is in"; - field ~qualifier:DynamicRO ~ty:Int "number" "the number of the physical CPU within the host"; - field ~qualifier:DynamicRO ~ty:String "vendor" "the vendor of the physical CPU"; - field ~qualifier:DynamicRO ~ty:Int "speed" "the speed of the physical CPU"; - field ~qualifier:DynamicRO ~ty:String "modelname" "the model name of the physical CPU"; - field ~qualifier:DynamicRO ~ty:Int "family" "the family (number) of the physical CPU"; - field ~qualifier:DynamicRO ~ty:Int "model" "the model number of the physical CPU"; - field ~qualifier:DynamicRO ~ty:String "stepping" "the stepping of the physical CPU"; - field ~qualifier:DynamicRO ~ty:String "flags" "the flags of the physical CPU (a decoded version of the features field)"; - field ~qualifier:DynamicRO ~ty:String "features" "the physical CPU feature bitmap"; - field ~qualifier:DynamicRO ~persist:false ~ty:Float "utilisation" "the current CPU utilisation"; - field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; -] - () + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_hostcpu ~descr:"A physical CPU" ~gen_events:true + ~lifecycle:[ + Published, rel_rio, "A physical CPU"; + Deprecated, rel_midnight_ride, "Deprecated in favour of the Host.cpu_info field"; + ] + ~doccomments:[] + ~messages_default_allowed_roles:_R_POOL_OP + ~messages:[] ~contents: + [ uid _hostcpu; + field ~qualifier:DynamicRO ~ty:(Ref _host) "host" "the host the CPU is in"; + field ~qualifier:DynamicRO ~ty:Int "number" "the number of the physical CPU within the host"; + field ~qualifier:DynamicRO ~ty:String "vendor" "the vendor of the physical CPU"; + field ~qualifier:DynamicRO ~ty:Int "speed" "the speed of the physical CPU"; + field ~qualifier:DynamicRO ~ty:String "modelname" "the model name of the physical CPU"; + field ~qualifier:DynamicRO ~ty:Int "family" "the family (number) of the physical CPU"; + field ~qualifier:DynamicRO ~ty:Int "model" "the model number of the physical CPU"; + field ~qualifier:DynamicRO ~ty:String "stepping" "the stepping of the physical CPU"; + field ~qualifier:DynamicRO ~ty:String "flags" "the flags of the physical CPU (a decoded version of the features field)"; + field ~qualifier:DynamicRO ~ty:String "features" "the physical CPU feature bitmap"; + field ~qualifier:DynamicRO ~persist:false ~ty:Float "utilisation" "the current CPU utilisation"; + field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; + ] + () (** Disk and network interfaces are associated with QoS parameters: *) let qos devtype = [ field "algorithm_type" "QoS algorithm to use"; - field ~ty:(Map(String,String)) "algorithm_params" + field ~ty:(Map(String,String)) "algorithm_params" "parameters for chosen QoS algorithm"; field ~qualifier:DynamicRO ~ty:(Set String) "supported_algorithms" ("supported QoS algorithms for this " ^ devtype); ] let network_operations = - Enum ("network_operations", - [ "attaching", "Indicates this network is attaching to a VIF or PIF" ]) + Enum ("network_operations", + [ "attaching", "Indicates this network is attaching to a VIF or PIF" ]) let network_default_locking_mode = - Enum ("network_default_locking_mode", [ - "unlocked", "Treat all VIFs on this network with locking_mode = 'default' as if they have locking_mode = 'unlocked'"; - "disabled", "Treat all VIFs on this network with locking_mode = 'default' as if they have locking_mode = 'disabled'"; - ]) + Enum ("network_default_locking_mode", [ + "unlocked", "Treat all VIFs on this network with locking_mode = 'default' as if they have locking_mode = 'unlocked'"; + "disabled", "Treat all VIFs on this network with locking_mode = 'default' as if they have locking_mode = 'disabled'"; + ]) let network_attach = call - ~name:"attach" - ~doc:"Makes the network immediately available on a particular host" - ~params:[Ref _network, "network", "network to which this interface should be connected"; - Ref _host, "host", "physical machine to which this PIF is connected"] - ~in_product_since:rel_miami - ~hide_from_docs:true - ~allowed_roles:_R_POOL_OP - () + ~name:"attach" + ~doc:"Makes the network immediately available on a particular host" + ~params:[Ref _network, "network", "network to which this interface should be connected"; + Ref _host, "host", "physical machine to which this PIF is connected"] + ~in_product_since:rel_miami + ~hide_from_docs:true + ~allowed_roles:_R_POOL_OP + () let network_introduce_params first_rel = [ @@ -4809,224 +4809,224 @@ let network_introduce_params first_rel = (* network pool introduce is used to copy network records on pool join -- it's the network analogue of VDI/PIF.pool_introduce *) let network_pool_introduce = call - ~name:"pool_introduce" - ~in_oss_since:None - ~in_product_since:rel_rio - ~versioned_params:(network_introduce_params miami_release) - ~doc:"Create a new network record in the database only" - ~result:(Ref _network, "The ref of the newly created network record.") - ~hide_from_docs:true - ~allowed_roles:_R_POOL_OP + ~name:"pool_introduce" + ~in_oss_since:None + ~in_product_since:rel_rio + ~versioned_params:(network_introduce_params miami_release) + ~doc:"Create a new network record in the database only" + ~result:(Ref _network, "The ref of the newly created network record.") + ~hide_from_docs:true + ~allowed_roles:_R_POOL_OP () let network_create_new_blob = call - ~name: "create_new_blob" - ~in_product_since:rel_orlando - ~doc:"Create a placeholder for a named binary blob of data that is associated with this pool" - ~versioned_params: - [{param_type=Ref _network; param_name="network"; param_doc="The network"; param_release=orlando_release; param_default=None}; - {param_type=String; param_name="name"; param_doc="The name associated with the blob"; param_release=orlando_release; param_default=None}; - {param_type=String; param_name="mime_type"; param_doc="The mime type for the data. Empty string translates to application/octet-stream"; param_release=orlando_release; param_default=None}; - {param_type=Bool; param_name="public"; param_doc="True if the blob should be publicly available"; param_release=tampa_release; param_default=Some (VBool false)} - ] - ~result:(Ref _blob, "The reference of the blob, needed for populating its data") - ~allowed_roles:_R_POOL_OP - () + ~name: "create_new_blob" + ~in_product_since:rel_orlando + ~doc:"Create a placeholder for a named binary blob of data that is associated with this pool" + ~versioned_params: + [{param_type=Ref _network; param_name="network"; param_doc="The network"; param_release=orlando_release; param_default=None}; + {param_type=String; param_name="name"; param_doc="The name associated with the blob"; param_release=orlando_release; param_default=None}; + {param_type=String; param_name="mime_type"; param_doc="The mime type for the data. Empty string translates to application/octet-stream"; param_release=orlando_release; param_default=None}; + {param_type=Bool; param_name="public"; param_doc="True if the blob should be publicly available"; param_release=tampa_release; param_default=Some (VBool false)} + ] + ~result:(Ref _blob, "The reference of the blob, needed for populating its data") + ~allowed_roles:_R_POOL_OP + () let network_set_default_locking_mode = call - ~name:"set_default_locking_mode" - ~in_product_since:rel_tampa - ~doc:"Set the default locking mode for VIFs attached to this network" - ~params:[ - Ref _network, "network", "The network"; - network_default_locking_mode, "value", "The default locking mode for VIFs attached to this network."; - ] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_default_locking_mode" + ~in_product_since:rel_tampa + ~doc:"Set the default locking mode for VIFs attached to this network" + ~params:[ + Ref _network, "network", "The network"; + network_default_locking_mode, "value", "The default locking mode for VIFs attached to this network."; + ] + ~allowed_roles:_R_POOL_OP + () let network_attach_for_vm = call - ~name:"attach_for_vm" - ~doc:"Attaches all networks needed by a given VM on a particular host" - ~params:[ - Ref _host, "host", "Physical machine to which the networks are to be attached"; - Ref _vm, "vm", "The virtual machine" - ] - ~in_product_since:rel_tampa - ~hide_from_docs:true - ~allowed_roles:_R_POOL_OP - () + ~name:"attach_for_vm" + ~doc:"Attaches all networks needed by a given VM on a particular host" + ~params:[ + Ref _host, "host", "Physical machine to which the networks are to be attached"; + Ref _vm, "vm", "The virtual machine" + ] + ~in_product_since:rel_tampa + ~hide_from_docs:true + ~allowed_roles:_R_POOL_OP + () let network_detach_for_vm = call - ~name:"detach_for_vm" - ~doc:"Detaches all networks of a given VM from a particular host" - ~params:[ - Ref _host, "host", "Physical machine from which the networks are to be attached"; - Ref _vm, "vm", "The virtual machine" - ] - ~in_product_since:rel_tampa - ~hide_from_docs:true - ~allowed_roles:_R_POOL_OP - () + ~name:"detach_for_vm" + ~doc:"Detaches all networks of a given VM from a particular host" + ~params:[ + Ref _host, "host", "Physical machine from which the networks are to be attached"; + Ref _vm, "vm", "The virtual machine" + ] + ~in_product_since:rel_tampa + ~hide_from_docs:true + ~allowed_roles:_R_POOL_OP + () (** A virtual network *) let network = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_network ~descr:"A virtual network" ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_VM_ADMIN (* vm admins can create/destroy networks without PIFs *) - ~doc_tags:[Networking] - ~messages:[network_attach; network_pool_introduce; network_create_new_blob; network_set_default_locking_mode; - network_attach_for_vm; network_detach_for_vm] - ~contents: + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_network ~descr:"A virtual network" ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_VM_ADMIN (* vm admins can create/destroy networks without PIFs *) + ~doc_tags:[Networking] + ~messages:[network_attach; network_pool_introduce; network_create_new_blob; network_set_default_locking_mode; + network_attach_for_vm; network_detach_for_vm] + ~contents: ([ - uid _network; - namespace ~name:"name" ~contents:(names ~writer_roles:_R_POOL_OP oss_since_303 RW) (); - ] @ (allowed_and_current_operations ~writer_roles:_R_POOL_OP network_operations) @ [ - field ~qualifier:DynamicRO ~ty:(Set (Ref _vif)) "VIFs" "list of connected vifs"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _pif)) "PIFs" "list of connected pifs"; - field ~qualifier:RW ~ty:Int ~default_value:(Some (VInt 1500L)) ~in_product_since:rel_midnight_ride "MTU" "MTU in octets"; - field ~writer_roles:_R_POOL_OP ~ty:(Map(String, String)) "other_config" "additional configuration" ~map_keys_roles:[("folder",(_R_VM_OP));("XenCenter.CustomFields.*",(_R_VM_OP));("XenCenterCreateInProgress",(_R_VM_OP))]; - field ~in_oss_since:None ~qualifier:DynamicRO "bridge" "name of the bridge corresponding to this network on the local host"; - field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Map(String, Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this network"; - field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes"; - field ~qualifier:DynamicRO ~in_product_since:rel_tampa ~default_value:(Some (VEnum "unlocked")) ~ty:network_default_locking_mode "default_locking_mode" "The network will use this value to determine the behaviour of all VIFs where locking_mode = default"; - field ~qualifier:DynamicRO ~in_product_since:rel_creedence ~default_value:(Some (VMap [])) ~ty:(Map (Ref _vif, String)) "assigned_ips" "The IP addresses assigned to VIFs on networks that have active xapi-managed DHCP" - ]) - () + uid _network; + namespace ~name:"name" ~contents:(names ~writer_roles:_R_POOL_OP oss_since_303 RW) (); + ] @ (allowed_and_current_operations ~writer_roles:_R_POOL_OP network_operations) @ [ + field ~qualifier:DynamicRO ~ty:(Set (Ref _vif)) "VIFs" "list of connected vifs"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _pif)) "PIFs" "list of connected pifs"; + field ~qualifier:RW ~ty:Int ~default_value:(Some (VInt 1500L)) ~in_product_since:rel_midnight_ride "MTU" "MTU in octets"; + field ~writer_roles:_R_POOL_OP ~ty:(Map(String, String)) "other_config" "additional configuration" ~map_keys_roles:[("folder",(_R_VM_OP));("XenCenter.CustomFields.*",(_R_VM_OP));("XenCenterCreateInProgress",(_R_VM_OP))]; + field ~in_oss_since:None ~qualifier:DynamicRO "bridge" "name of the bridge corresponding to this network on the local host"; + field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Map(String, Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this network"; + field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes"; + field ~qualifier:DynamicRO ~in_product_since:rel_tampa ~default_value:(Some (VEnum "unlocked")) ~ty:network_default_locking_mode "default_locking_mode" "The network will use this value to determine the behaviour of all VIFs where locking_mode = default"; + field ~qualifier:DynamicRO ~in_product_since:rel_creedence ~default_value:(Some (VMap [])) ~ty:(Map (Ref _vif, String)) "assigned_ips" "The IP addresses assigned to VIFs on networks that have active xapi-managed DHCP" + ]) + () let pif_create_VLAN = call - ~name:"create_VLAN" - ~in_product_since:rel_rio - ~doc:"Create a VLAN interface from an existing physical interface. This call is deprecated: use VLAN.create instead" - ~lifecycle:[ - Published, rel_rio, "Create a VLAN interface from an existing physical interface"; - Deprecated, rel_miami, "Replaced by VLAN.create"; - ] - ~params:[String, "device", "physical interface on which to create the VLAN interface"; - Ref _network, "network", "network to which this interface should be connected"; - Ref _host, "host", "physical machine to which this PIF is connected"; - Int, "VLAN", "VLAN tag for the new interface"] - ~result:(Ref _pif, "The reference of the created PIF object") - ~errs:[Api_errors.vlan_tag_invalid] - ~internal_deprecated_since:rel_miami - ~allowed_roles:_R_POOL_OP - () + ~name:"create_VLAN" + ~in_product_since:rel_rio + ~doc:"Create a VLAN interface from an existing physical interface. This call is deprecated: use VLAN.create instead" + ~lifecycle:[ + Published, rel_rio, "Create a VLAN interface from an existing physical interface"; + Deprecated, rel_miami, "Replaced by VLAN.create"; + ] + ~params:[String, "device", "physical interface on which to create the VLAN interface"; + Ref _network, "network", "network to which this interface should be connected"; + Ref _host, "host", "physical machine to which this PIF is connected"; + Int, "VLAN", "VLAN tag for the new interface"] + ~result:(Ref _pif, "The reference of the created PIF object") + ~errs:[Api_errors.vlan_tag_invalid] + ~internal_deprecated_since:rel_miami + ~allowed_roles:_R_POOL_OP + () let pif_destroy = call - ~name:"destroy" - ~in_product_since:rel_rio - ~doc:"Destroy the PIF object (provided it is a VLAN interface). This call is deprecated: use VLAN.destroy or Bond.destroy instead" - ~lifecycle:[ - Published, rel_rio, "Destroy the PIF object (provided it is a VLAN interface)"; - Deprecated, rel_miami, "Replaced by VLAN.destroy and Bond.destroy"; - ] - ~params:[Ref _pif, "self", "the PIF object to destroy"] - ~errs:[Api_errors.pif_is_physical] - ~internal_deprecated_since:rel_miami - ~allowed_roles:_R_POOL_OP - () + ~name:"destroy" + ~in_product_since:rel_rio + ~doc:"Destroy the PIF object (provided it is a VLAN interface). This call is deprecated: use VLAN.destroy or Bond.destroy instead" + ~lifecycle:[ + Published, rel_rio, "Destroy the PIF object (provided it is a VLAN interface)"; + Deprecated, rel_miami, "Replaced by VLAN.destroy and Bond.destroy"; + ] + ~params:[Ref _pif, "self", "the PIF object to destroy"] + ~errs:[Api_errors.pif_is_physical] + ~internal_deprecated_since:rel_miami + ~allowed_roles:_R_POOL_OP + () let pif_plug = call - ~name:"plug" - ~doc:"Attempt to bring up a physical interface" - ~params:[Ref _pif, "self", "the PIF object to plug"] - ~in_product_since:rel_miami - ~allowed_roles:_R_POOL_OP - ~errs:[Api_errors.transport_pif_not_configured] - () + ~name:"plug" + ~doc:"Attempt to bring up a physical interface" + ~params:[Ref _pif, "self", "the PIF object to plug"] + ~in_product_since:rel_miami + ~allowed_roles:_R_POOL_OP + ~errs:[Api_errors.transport_pif_not_configured] + () let pif_unplug = call - ~name:"unplug" - ~doc:"Attempt to bring down a physical interface" - ~params:[Ref _pif, "self", "the PIF object to unplug"] - ~in_product_since:rel_miami - ~allowed_roles:_R_POOL_OP - () + ~name:"unplug" + ~doc:"Attempt to bring down a physical interface" + ~params:[Ref _pif, "self", "the PIF object to unplug"] + ~in_product_since:rel_miami + ~allowed_roles:_R_POOL_OP + () let pif_ip_configuration_mode = Enum ("ip_configuration_mode", - [ "None", "Do not acquire an IP address"; - "DHCP", "Acquire an IP address by DHCP"; - "Static", "Static IP address configuration" ]) + [ "None", "Do not acquire an IP address"; + "DHCP", "Acquire an IP address by DHCP"; + "Static", "Static IP address configuration" ]) let pif_reconfigure_ip = call - ~name:"reconfigure_ip" - ~doc:"Reconfigure the IP address settings for this interface" - ~params:[Ref _pif, "self", "the PIF object to reconfigure"; - pif_ip_configuration_mode, "mode", "whether to use dynamic/static/no-assignment"; - String, "IP", "the new IP address"; - String, "netmask", "the new netmask"; - String, "gateway", "the new gateway"; - String, "DNS", "the new DNS settings"; - ] - ~in_product_since:rel_miami - ~allowed_roles:_R_POOL_OP - () + ~name:"reconfigure_ip" + ~doc:"Reconfigure the IP address settings for this interface" + ~params:[Ref _pif, "self", "the PIF object to reconfigure"; + pif_ip_configuration_mode, "mode", "whether to use dynamic/static/no-assignment"; + String, "IP", "the new IP address"; + String, "netmask", "the new netmask"; + String, "gateway", "the new gateway"; + String, "DNS", "the new DNS settings"; + ] + ~in_product_since:rel_miami + ~allowed_roles:_R_POOL_OP + () let pif_ipv6_configuration_mode = Enum ("ipv6_configuration_mode", - [ "None", "Do not acquire an IPv6 address"; - "DHCP", "Acquire an IPv6 address by DHCP"; - "Static", "Static IPv6 address configuration"; - "Autoconf", "Router assigned prefix delegation IPv6 allocation" ]) + [ "None", "Do not acquire an IPv6 address"; + "DHCP", "Acquire an IPv6 address by DHCP"; + "Static", "Static IPv6 address configuration"; + "Autoconf", "Router assigned prefix delegation IPv6 allocation" ]) let pif_reconfigure_ipv6 = call - ~name:"reconfigure_ipv6" - ~doc:"Reconfigure the IPv6 address settings for this interface" - ~params:[Ref _pif, "self", "the PIF object to reconfigure"; - pif_ipv6_configuration_mode, "mode", "whether to use dynamic/static/no-assignment"; - String, "IPv6", "the new IPv6 address (in / format)"; - String, "gateway", "the new gateway"; - String, "DNS", "the new DNS settings"; - ] - ~lifecycle:[Prototyped, rel_tampa, ""] - ~allowed_roles:_R_POOL_OP - () + ~name:"reconfigure_ipv6" + ~doc:"Reconfigure the IPv6 address settings for this interface" + ~params:[Ref _pif, "self", "the PIF object to reconfigure"; + pif_ipv6_configuration_mode, "mode", "whether to use dynamic/static/no-assignment"; + String, "IPv6", "the new IPv6 address (in / format)"; + String, "gateway", "the new gateway"; + String, "DNS", "the new DNS settings"; + ] + ~lifecycle:[Prototyped, rel_tampa, ""] + ~allowed_roles:_R_POOL_OP + () let pif_primary_address_type = Enum ("primary_address_type", - [ "IPv4", "Primary address is the IPv4 address"; - "IPv6", "Primary address is the IPv6 address" ]) + [ "IPv4", "Primary address is the IPv4 address"; + "IPv6", "Primary address is the IPv6 address" ]) let pif_set_primary_address_type = call - ~name:"set_primary_address_type" - ~doc:"Change the primary address type used by this PIF" - ~params:[Ref _pif, "self", "the PIF object to reconfigure"; - pif_primary_address_type, "primary_address_type", "Whether to prefer IPv4 or IPv6 connections"; - ] - ~lifecycle:[Prototyped, rel_tampa, ""] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_primary_address_type" + ~doc:"Change the primary address type used by this PIF" + ~params:[Ref _pif, "self", "the PIF object to reconfigure"; + pif_primary_address_type, "primary_address_type", "Whether to prefer IPv4 or IPv6 connections"; + ] + ~lifecycle:[Prototyped, rel_tampa, ""] + ~allowed_roles:_R_POOL_OP + () let pif_scan = call - ~name:"scan" - ~doc:"Scan for physical interfaces on a host and create PIF objects to represent them" - ~params:[Ref _host, "host", "The host on which to scan"] - ~in_product_since:rel_miami - ~allowed_roles:_R_POOL_OP - () + ~name:"scan" + ~doc:"Scan for physical interfaces on a host and create PIF objects to represent them" + ~params:[Ref _host, "host", "The host on which to scan"] + ~in_product_since:rel_miami + ~allowed_roles:_R_POOL_OP + () let pif_introduce_params = - [ - {param_type=Ref _host; param_name="host"; param_doc="The host on which the interface exists"; param_release=miami_release; param_default=None}; - {param_type=String; param_name="MAC"; param_doc="The MAC address of the interface"; param_release=miami_release; param_default=None}; - {param_type=String; param_name="device"; param_doc="The device name to use for the interface"; param_release=miami_release; param_default=None}; - {param_type=Bool; param_name="managed"; param_doc="Indicates whether the interface is managed by xapi (defaults to \"true\")"; param_release=vgpu_productisation_release; param_default=Some (VBool true)}; - ] + [ + {param_type=Ref _host; param_name="host"; param_doc="The host on which the interface exists"; param_release=miami_release; param_default=None}; + {param_type=String; param_name="MAC"; param_doc="The MAC address of the interface"; param_release=miami_release; param_default=None}; + {param_type=String; param_name="device"; param_doc="The device name to use for the interface"; param_release=miami_release; param_default=None}; + {param_type=Bool; param_name="managed"; param_doc="Indicates whether the interface is managed by xapi (defaults to \"true\")"; param_release=vgpu_productisation_release; param_default=Some (VBool true)}; + ] let pif_introduce = call - ~name:"introduce" - ~doc:"Create a PIF object matching a particular network interface" - ~versioned_params:pif_introduce_params - ~in_product_since:rel_miami - ~result:(Ref _pif, "The reference of the created PIF object") - ~allowed_roles:_R_POOL_OP - () + ~name:"introduce" + ~doc:"Create a PIF object matching a particular network interface" + ~versioned_params:pif_introduce_params + ~in_product_since:rel_miami + ~result:(Ref _pif, "The reference of the created PIF object") + ~allowed_roles:_R_POOL_OP + () let pif_forget = call - ~name:"forget" - ~doc:"Destroy the PIF object matching a particular network interface" - ~params:[Ref _pif, "self", "The PIF object to destroy"] - ~in_product_since:rel_miami - ~allowed_roles:_R_POOL_OP - ~errs:[Api_errors.pif_tunnel_still_exists] - () + ~name:"forget" + ~doc:"Destroy the PIF object matching a particular network interface" + ~params:[Ref _pif, "self", "The PIF object to destroy"] + ~in_product_since:rel_miami + ~allowed_roles:_R_POOL_OP + ~errs:[Api_errors.pif_tunnel_still_exists] + () let pif_pool_introduce_params first_rel = [ @@ -5057,280 +5057,280 @@ let pif_pool_introduce_params first_rel = (* PIF pool introduce is used to copy PIF records on pool join -- it's the PIF analogue of VDI.pool_introduce *) let pif_pool_introduce = call - ~name:"pool_introduce" - ~in_oss_since:None - ~in_product_since:rel_rio - ~versioned_params:(pif_pool_introduce_params miami_release) - ~doc:"Create a new PIF record in the database only" - ~result:(Ref _pif, "The ref of the newly created PIF record.") - ~hide_from_docs:true - ~allowed_roles:_R_POOL_OP + ~name:"pool_introduce" + ~in_oss_since:None + ~in_product_since:rel_rio + ~versioned_params:(pif_pool_introduce_params miami_release) + ~doc:"Create a new PIF record in the database only" + ~result:(Ref _pif, "The ref of the newly created PIF record.") + ~hide_from_docs:true + ~allowed_roles:_R_POOL_OP () let pif_db_introduce = call - ~name:"db_introduce" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~versioned_params:(pif_pool_introduce_params orlando_release) - ~doc:"Create a new PIF record in the database only" - ~result:(Ref _pif, "The ref of the newly created PIF record.") - ~hide_from_docs:false - ~allowed_roles:_R_POOL_OP - () + ~name:"db_introduce" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~versioned_params:(pif_pool_introduce_params orlando_release) + ~doc:"Create a new PIF record in the database only" + ~result:(Ref _pif, "The ref of the newly created PIF record.") + ~hide_from_docs:false + ~allowed_roles:_R_POOL_OP + () let pif_db_forget = call - ~name:"db_forget" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~params:[ Ref _pif, "self", "The ref of the PIF whose database record should be destroyed" ] - ~doc:"Destroy a PIF database record." - ~hide_from_docs:false - ~allowed_roles:_R_POOL_OP - () + ~name:"db_forget" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~params:[ Ref _pif, "self", "The ref of the PIF whose database record should be destroyed" ] + ~doc:"Destroy a PIF database record." + ~hide_from_docs:false + ~allowed_roles:_R_POOL_OP + () let pif_set_property = call - ~name:"set_property" - ~doc:"Set the value of a property of the PIF" - ~params:[ - Ref _pif, "self", "The PIF"; - String, "name", "The property name"; - String, "value", "The property value"; - ] - ~lifecycle:[Published, rel_creedence, ""] - ~allowed_roles:_R_POOL_OP - () - -let pif = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_pif ~descr:"A physical network interface (note separate VLANs are represented as several PIFs)" - ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_OP - ~doc_tags:[Networking] - ~messages:[pif_create_VLAN; pif_destroy; pif_reconfigure_ip; pif_reconfigure_ipv6; pif_set_primary_address_type; pif_scan; pif_introduce; pif_forget; - pif_unplug; pif_plug; pif_pool_introduce; - pif_db_introduce; pif_db_forget; pif_set_property - ] ~contents: - [ uid _pif; - (* qualifier changed RW -> StaticRO in Miami *) - field ~qualifier:StaticRO "device" "machine-readable name of the interface (e.g. eth0)"; - field ~qualifier:StaticRO ~ty:(Ref _network) "network" "virtual network to which this pif is connected"; - field ~qualifier:StaticRO ~ty:(Ref _host) "host" "physical machine to which this pif is connected"; - (* qualifier changed RW -> StaticRO in Miami *) - field ~qualifier:StaticRO "MAC" "ethernet MAC address of physical interface"; - (* qualifier changed RW -> StaticRO in Miami *) - field ~qualifier:StaticRO ~ty:Int "MTU" "MTU in octets"; - (* qualifier changed RW -> StaticRO in Miami *) - field ~qualifier:StaticRO ~ty:Int "VLAN" "VLAN tag for all traffic passing through this interface"; - field ~in_oss_since:None ~internal_only:true "device_name" "actual dom0 device name"; - field ~qualifier:DynamicRO ~ty:(Ref _pif_metrics) "metrics" "metrics associated with this PIF"; - field ~in_oss_since:None ~ty:Bool ~in_product_since:rel_miami ~qualifier:DynamicRO "physical" "true if this represents a physical network interface" ~default_value:(Some (VBool false)); - field ~in_oss_since:None ~ty:Bool ~in_product_since:rel_miami ~qualifier:DynamicRO "currently_attached" "true if this interface is online" ~default_value:(Some (VBool true)); - field ~in_oss_since:None ~ty:pif_ip_configuration_mode ~in_product_since:rel_miami ~qualifier:DynamicRO "ip_configuration_mode" "Sets if and how this interface gets an IP address" ~default_value:(Some (VEnum "None")); - field ~in_oss_since:None ~ty:String ~in_product_since:rel_miami ~qualifier:DynamicRO "IP" "IP address" ~default_value:(Some (VString "")); - field ~in_oss_since:None ~ty:String ~in_product_since:rel_miami ~qualifier:DynamicRO "netmask" "IP netmask" ~default_value:(Some (VString "")); - field ~in_oss_since:None ~ty:String ~in_product_since:rel_miami ~qualifier:DynamicRO "gateway" "IP gateway" ~default_value:(Some (VString "")); - field ~in_oss_since:None ~ty:String ~in_product_since:rel_miami ~qualifier:DynamicRO "DNS" "IP address of DNS servers to use" ~default_value:(Some (VString "")); - field ~in_oss_since:None ~ty:(Ref _bond) ~in_product_since:rel_miami ~qualifier:DynamicRO "bond_slave_of" "Indicates which bond this interface is part of" ~default_value:(Some (VRef "")); - field ~in_oss_since:None ~ty:(Set(Ref _bond)) ~in_product_since:rel_miami ~qualifier:DynamicRO "bond_master_of" "Indicates this PIF represents the results of a bond"; - field ~in_oss_since:None ~ty:(Ref _vlan) ~in_product_since:rel_miami ~qualifier:DynamicRO "VLAN_master_of" "Indicates wich VLAN this interface receives untagged traffic from" ~default_value:(Some (VRef "")); - field ~in_oss_since:None ~ty:(Set(Ref _vlan)) ~in_product_since:rel_miami ~qualifier:DynamicRO "VLAN_slave_of" "Indicates which VLANs this interface transmits tagged traffic to"; - field ~in_oss_since:None ~ty:Bool ~in_product_since:rel_miami ~qualifier:DynamicRO "management" "Indicates whether the control software is listening for connections on this interface" ~default_value:(Some (VBool false)); - field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "Additional configuration"; - field ~in_product_since:rel_orlando ~default_value:(Some (VBool false)) ~ty:Bool "disallow_unplug" "Prevent this PIF from being unplugged; set this to notify the management tool-stack that the PIF has a special use and should not be unplugged under any circumstances (e.g. because you're running storage traffic over it)"; - field ~in_oss_since:None ~ty:(Set(Ref _tunnel)) ~lifecycle:[Published, rel_cowley, "Indicates to which tunnel this PIF gives access"] ~qualifier:DynamicRO "tunnel_access_PIF_of" "Indicates to which tunnel this PIF gives access"; - field ~in_oss_since:None ~ty:(Set(Ref _tunnel)) ~lifecycle:[Published, rel_cowley, "Indicates to which tunnel this PIF provides transport"] ~qualifier:DynamicRO "tunnel_transport_PIF_of" "Indicates to which tunnel this PIF provides transport"; - field ~in_oss_since:None ~ty:pif_ipv6_configuration_mode ~lifecycle:[Prototyped, rel_tampa, ""] ~qualifier:DynamicRO "ipv6_configuration_mode" "Sets if and how this interface gets an IPv6 address" ~default_value:(Some (VEnum "None")); - field ~in_oss_since:None ~ty:(Set(String)) ~lifecycle:[Prototyped, rel_tampa, ""] ~qualifier:DynamicRO "IPv6" "IPv6 address" ~default_value:(Some (VSet [])); - field ~in_oss_since:None ~ty:String ~lifecycle:[Prototyped, rel_tampa, ""] ~qualifier:DynamicRO "ipv6_gateway" "IPv6 gateway" ~default_value:(Some (VString "")); - field ~in_oss_since:None ~ty:pif_primary_address_type ~lifecycle:[Prototyped, rel_tampa, ""] ~qualifier:DynamicRO "primary_address_type" "Which protocol should define the primary address of this interface" ~default_value:(Some (VEnum "IPv4")); - field ~in_oss_since:None ~ty:Bool ~lifecycle:[Published, rel_vgpu_productisation, ""] ~qualifier:StaticRO "managed" "Indicates whether the interface \ - is managed by xapi. If it is not, then xapi will not configure the interface, the commands PIF.plug/unplug/reconfigure_ip(v6) \ - can not be used, nor can the interface be bonded or have VLANs based on top through xapi." ~default_value:(Some (VBool true)); - field ~lifecycle:[Published, rel_creedence, ""] ~qualifier:DynamicRO ~ty:(Map(String, String)) ~default_value:(Some (VMap [])) "properties" "Additional configuration properties for the interface."; - field ~lifecycle:[Published, rel_dundee, ""] ~qualifier:DynamicRO ~ty:(Set(String)) ~default_value:(Some (VSet [])) "capabilities" "Additional capabilities on the interface."; - ] - () - -let pif_metrics = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_pif_metrics ~descr:"The metrics associated with a physical network interface" - ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_OP - ~doc_tags:[Networking] - ~messages:[] ~contents: - [ uid _pif_metrics; - namespace ~name:"io" ~contents:iobandwidth (); - field ~qualifier:DynamicRO ~ty:Bool "carrier" "Report if the PIF got a carrier or not"; - field ~qualifier:DynamicRO ~ty:String "vendor_id" "Report vendor ID"; - field ~qualifier:DynamicRO ~ty:String "vendor_name" "Report vendor name"; - field ~qualifier:DynamicRO ~ty:String "device_id" "Report device ID"; - field ~qualifier:DynamicRO ~ty:String "device_name" "Report device name"; - field ~qualifier:DynamicRO ~ty:Int "speed" "Speed of the link (if available)"; - field ~qualifier:DynamicRO ~ty:Bool "duplex" "Full duplex capability of the link (if available)"; - field ~qualifier:DynamicRO ~ty:String "pci_bus_path" "PCI bus path of the pif (if available)"; - field ~qualifier:DynamicRO ~ty:DateTime "last_updated" "Time at which this information was last updated"; - field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; - ] - () + ~name:"set_property" + ~doc:"Set the value of a property of the PIF" + ~params:[ + Ref _pif, "self", "The PIF"; + String, "name", "The property name"; + String, "value", "The property value"; + ] + ~lifecycle:[Published, rel_creedence, ""] + ~allowed_roles:_R_POOL_OP + () + +let pif = + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_pif ~descr:"A physical network interface (note separate VLANs are represented as several PIFs)" + ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_POOL_OP + ~doc_tags:[Networking] + ~messages:[pif_create_VLAN; pif_destroy; pif_reconfigure_ip; pif_reconfigure_ipv6; pif_set_primary_address_type; pif_scan; pif_introduce; pif_forget; + pif_unplug; pif_plug; pif_pool_introduce; + pif_db_introduce; pif_db_forget; pif_set_property + ] ~contents: + [ uid _pif; + (* qualifier changed RW -> StaticRO in Miami *) + field ~qualifier:StaticRO "device" "machine-readable name of the interface (e.g. eth0)"; + field ~qualifier:StaticRO ~ty:(Ref _network) "network" "virtual network to which this pif is connected"; + field ~qualifier:StaticRO ~ty:(Ref _host) "host" "physical machine to which this pif is connected"; + (* qualifier changed RW -> StaticRO in Miami *) + field ~qualifier:StaticRO "MAC" "ethernet MAC address of physical interface"; + (* qualifier changed RW -> StaticRO in Miami *) + field ~qualifier:StaticRO ~ty:Int "MTU" "MTU in octets"; + (* qualifier changed RW -> StaticRO in Miami *) + field ~qualifier:StaticRO ~ty:Int "VLAN" "VLAN tag for all traffic passing through this interface"; + field ~in_oss_since:None ~internal_only:true "device_name" "actual dom0 device name"; + field ~qualifier:DynamicRO ~ty:(Ref _pif_metrics) "metrics" "metrics associated with this PIF"; + field ~in_oss_since:None ~ty:Bool ~in_product_since:rel_miami ~qualifier:DynamicRO "physical" "true if this represents a physical network interface" ~default_value:(Some (VBool false)); + field ~in_oss_since:None ~ty:Bool ~in_product_since:rel_miami ~qualifier:DynamicRO "currently_attached" "true if this interface is online" ~default_value:(Some (VBool true)); + field ~in_oss_since:None ~ty:pif_ip_configuration_mode ~in_product_since:rel_miami ~qualifier:DynamicRO "ip_configuration_mode" "Sets if and how this interface gets an IP address" ~default_value:(Some (VEnum "None")); + field ~in_oss_since:None ~ty:String ~in_product_since:rel_miami ~qualifier:DynamicRO "IP" "IP address" ~default_value:(Some (VString "")); + field ~in_oss_since:None ~ty:String ~in_product_since:rel_miami ~qualifier:DynamicRO "netmask" "IP netmask" ~default_value:(Some (VString "")); + field ~in_oss_since:None ~ty:String ~in_product_since:rel_miami ~qualifier:DynamicRO "gateway" "IP gateway" ~default_value:(Some (VString "")); + field ~in_oss_since:None ~ty:String ~in_product_since:rel_miami ~qualifier:DynamicRO "DNS" "IP address of DNS servers to use" ~default_value:(Some (VString "")); + field ~in_oss_since:None ~ty:(Ref _bond) ~in_product_since:rel_miami ~qualifier:DynamicRO "bond_slave_of" "Indicates which bond this interface is part of" ~default_value:(Some (VRef "")); + field ~in_oss_since:None ~ty:(Set(Ref _bond)) ~in_product_since:rel_miami ~qualifier:DynamicRO "bond_master_of" "Indicates this PIF represents the results of a bond"; + field ~in_oss_since:None ~ty:(Ref _vlan) ~in_product_since:rel_miami ~qualifier:DynamicRO "VLAN_master_of" "Indicates wich VLAN this interface receives untagged traffic from" ~default_value:(Some (VRef "")); + field ~in_oss_since:None ~ty:(Set(Ref _vlan)) ~in_product_since:rel_miami ~qualifier:DynamicRO "VLAN_slave_of" "Indicates which VLANs this interface transmits tagged traffic to"; + field ~in_oss_since:None ~ty:Bool ~in_product_since:rel_miami ~qualifier:DynamicRO "management" "Indicates whether the control software is listening for connections on this interface" ~default_value:(Some (VBool false)); + field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "Additional configuration"; + field ~in_product_since:rel_orlando ~default_value:(Some (VBool false)) ~ty:Bool "disallow_unplug" "Prevent this PIF from being unplugged; set this to notify the management tool-stack that the PIF has a special use and should not be unplugged under any circumstances (e.g. because you're running storage traffic over it)"; + field ~in_oss_since:None ~ty:(Set(Ref _tunnel)) ~lifecycle:[Published, rel_cowley, "Indicates to which tunnel this PIF gives access"] ~qualifier:DynamicRO "tunnel_access_PIF_of" "Indicates to which tunnel this PIF gives access"; + field ~in_oss_since:None ~ty:(Set(Ref _tunnel)) ~lifecycle:[Published, rel_cowley, "Indicates to which tunnel this PIF provides transport"] ~qualifier:DynamicRO "tunnel_transport_PIF_of" "Indicates to which tunnel this PIF provides transport"; + field ~in_oss_since:None ~ty:pif_ipv6_configuration_mode ~lifecycle:[Prototyped, rel_tampa, ""] ~qualifier:DynamicRO "ipv6_configuration_mode" "Sets if and how this interface gets an IPv6 address" ~default_value:(Some (VEnum "None")); + field ~in_oss_since:None ~ty:(Set(String)) ~lifecycle:[Prototyped, rel_tampa, ""] ~qualifier:DynamicRO "IPv6" "IPv6 address" ~default_value:(Some (VSet [])); + field ~in_oss_since:None ~ty:String ~lifecycle:[Prototyped, rel_tampa, ""] ~qualifier:DynamicRO "ipv6_gateway" "IPv6 gateway" ~default_value:(Some (VString "")); + field ~in_oss_since:None ~ty:pif_primary_address_type ~lifecycle:[Prototyped, rel_tampa, ""] ~qualifier:DynamicRO "primary_address_type" "Which protocol should define the primary address of this interface" ~default_value:(Some (VEnum "IPv4")); + field ~in_oss_since:None ~ty:Bool ~lifecycle:[Published, rel_vgpu_productisation, ""] ~qualifier:StaticRO "managed" "Indicates whether the interface \ + is managed by xapi. If it is not, then xapi will not configure the interface, the commands PIF.plug/unplug/reconfigure_ip(v6) \ + can not be used, nor can the interface be bonded or have VLANs based on top through xapi." ~default_value:(Some (VBool true)); + field ~lifecycle:[Published, rel_creedence, ""] ~qualifier:DynamicRO ~ty:(Map(String, String)) ~default_value:(Some (VMap [])) "properties" "Additional configuration properties for the interface."; + field ~lifecycle:[Published, rel_dundee, ""] ~qualifier:DynamicRO ~ty:(Set(String)) ~default_value:(Some (VSet [])) "capabilities" "Additional capabilities on the interface."; + ] + () + +let pif_metrics = + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_pif_metrics ~descr:"The metrics associated with a physical network interface" + ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_POOL_OP + ~doc_tags:[Networking] + ~messages:[] ~contents: + [ uid _pif_metrics; + namespace ~name:"io" ~contents:iobandwidth (); + field ~qualifier:DynamicRO ~ty:Bool "carrier" "Report if the PIF got a carrier or not"; + field ~qualifier:DynamicRO ~ty:String "vendor_id" "Report vendor ID"; + field ~qualifier:DynamicRO ~ty:String "vendor_name" "Report vendor name"; + field ~qualifier:DynamicRO ~ty:String "device_id" "Report device ID"; + field ~qualifier:DynamicRO ~ty:String "device_name" "Report device name"; + field ~qualifier:DynamicRO ~ty:Int "speed" "Speed of the link (if available)"; + field ~qualifier:DynamicRO ~ty:Bool "duplex" "Full duplex capability of the link (if available)"; + field ~qualifier:DynamicRO ~ty:String "pci_bus_path" "PCI bus path of the pif (if available)"; + field ~qualifier:DynamicRO ~ty:DateTime "last_updated" "Time at which this information was last updated"; + field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; + ] + () let bond_mode = - Enum ("bond_mode", [ - "balance-slb", "Source-level balancing"; - "active-backup", "Active/passive bonding: only one NIC is carrying traffic"; - "lacp", "Link aggregation control protocol"; - ]) + Enum ("bond_mode", [ + "balance-slb", "Source-level balancing"; + "active-backup", "Active/passive bonding: only one NIC is carrying traffic"; + "lacp", "Link aggregation control protocol"; + ]) let bond_create = call - ~name:"create" - ~doc:"Create an interface bond" - ~versioned_params:[ - {param_type=Ref _network; param_name="network"; param_doc="Network to add the bonded PIF to"; param_release=miami_release; param_default=None}; - {param_type=Set (Ref _pif); param_name="members"; param_doc="PIFs to add to this bond"; param_release=miami_release; param_default=None}; - {param_type=String; param_name="MAC"; param_doc="The MAC address to use on the bond itself. If this parameter is the empty string then the bond will inherit its MAC address from the primary slave."; param_release=miami_release; param_default=None}; - {param_type=bond_mode; param_name="mode"; param_doc="Bonding mode to use for the new bond"; param_release=boston_release; param_default=Some (VEnum "balance-slb")}; - {param_type=Map (String, String); param_name="properties"; param_doc="Additional configuration parameters specific to the bond mode"; param_release=tampa_release; param_default=Some (VMap [])}; - ] - ~result:(Ref _bond, "The reference of the created Bond object") - ~in_product_since:rel_miami - ~allowed_roles:_R_POOL_OP - () + ~name:"create" + ~doc:"Create an interface bond" + ~versioned_params:[ + {param_type=Ref _network; param_name="network"; param_doc="Network to add the bonded PIF to"; param_release=miami_release; param_default=None}; + {param_type=Set (Ref _pif); param_name="members"; param_doc="PIFs to add to this bond"; param_release=miami_release; param_default=None}; + {param_type=String; param_name="MAC"; param_doc="The MAC address to use on the bond itself. If this parameter is the empty string then the bond will inherit its MAC address from the primary slave."; param_release=miami_release; param_default=None}; + {param_type=bond_mode; param_name="mode"; param_doc="Bonding mode to use for the new bond"; param_release=boston_release; param_default=Some (VEnum "balance-slb")}; + {param_type=Map (String, String); param_name="properties"; param_doc="Additional configuration parameters specific to the bond mode"; param_release=tampa_release; param_default=Some (VMap [])}; + ] + ~result:(Ref _bond, "The reference of the created Bond object") + ~in_product_since:rel_miami + ~allowed_roles:_R_POOL_OP + () let bond_destroy = call - ~name:"destroy" - ~doc:"Destroy an interface bond" - ~params:[Ref _bond, "self", "Bond to destroy"] - ~in_product_since:rel_miami - ~allowed_roles:_R_POOL_OP - () + ~name:"destroy" + ~doc:"Destroy an interface bond" + ~params:[Ref _bond, "self", "Bond to destroy"] + ~in_product_since:rel_miami + ~allowed_roles:_R_POOL_OP + () let bond_set_mode = call - ~name:"set_mode" - ~doc:"Change the bond mode" - ~params:[ - Ref _bond, "self", "The bond"; - bond_mode, "value", "The new bond mode"; - ] - ~lifecycle:[Published, rel_boston, ""] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_mode" + ~doc:"Change the bond mode" + ~params:[ + Ref _bond, "self", "The bond"; + bond_mode, "value", "The new bond mode"; + ] + ~lifecycle:[Published, rel_boston, ""] + ~allowed_roles:_R_POOL_OP + () let bond_set_property = call - ~name:"set_property" - ~doc:"Set the value of a property of the bond" - ~params:[ - Ref _bond, "self", "The bond"; - String, "name", "The property name"; - String, "value", "The property value"; - ] - ~in_product_since:rel_tampa - ~allowed_roles:_R_POOL_OP - () - -let bond = + ~name:"set_property" + ~doc:"Set the value of a property of the bond" + ~params:[ + Ref _bond, "self", "The bond"; + String, "name", "The property name"; + String, "value", "The property value"; + ] + ~in_product_since:rel_tampa + ~allowed_roles:_R_POOL_OP + () + +let bond = create_obj ~in_db:true ~in_product_since:rel_miami ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_bond ~descr:"" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~doc_tags:[Networking] ~messages:[ bond_create; bond_destroy; bond_set_mode; bond_set_property ] ~contents: - [ uid _bond; - field ~in_oss_since:None ~in_product_since:rel_miami ~qualifier:StaticRO ~ty:(Ref _pif) "master" "The bonded interface" ~default_value:(Some (VRef "")); - field ~in_oss_since:None ~in_product_since:rel_miami ~qualifier:DynamicRO ~ty:(Set(Ref _pif)) "slaves" "The interfaces which are part of this bond"; - field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; - field ~lifecycle:[Published, rel_boston, ""] ~qualifier:DynamicRO ~default_value:(Some (VRef (Ref.string_of Ref.null))) ~ty:(Ref _pif) "primary_slave" "The PIF of which the IP configuration and MAC were copied to the bond, and which will receive all configuration/VLANs/VIFs on the bond if the bond is destroyed"; - field ~lifecycle:[Published, rel_boston, ""] ~qualifier:DynamicRO ~default_value:(Some (VEnum "balance-slb")) ~ty:bond_mode "mode" "The algorithm used to distribute traffic among the bonded NICs"; - field ~in_oss_since:None ~in_product_since:rel_tampa ~qualifier:DynamicRO ~ty:(Map(String, String)) ~default_value:(Some (VMap [])) "properties" "Additional configuration properties specific to the bond mode."; - field ~in_oss_since:None ~in_product_since:rel_tampa ~qualifier:DynamicRO ~ty:Int ~default_value:(Some (VInt 0L)) "links_up" "Number of links up in this bond"; - ] + [ uid _bond; + field ~in_oss_since:None ~in_product_since:rel_miami ~qualifier:StaticRO ~ty:(Ref _pif) "master" "The bonded interface" ~default_value:(Some (VRef "")); + field ~in_oss_since:None ~in_product_since:rel_miami ~qualifier:DynamicRO ~ty:(Set(Ref _pif)) "slaves" "The interfaces which are part of this bond"; + field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; + field ~lifecycle:[Published, rel_boston, ""] ~qualifier:DynamicRO ~default_value:(Some (VRef (Ref.string_of Ref.null))) ~ty:(Ref _pif) "primary_slave" "The PIF of which the IP configuration and MAC were copied to the bond, and which will receive all configuration/VLANs/VIFs on the bond if the bond is destroyed"; + field ~lifecycle:[Published, rel_boston, ""] ~qualifier:DynamicRO ~default_value:(Some (VEnum "balance-slb")) ~ty:bond_mode "mode" "The algorithm used to distribute traffic among the bonded NICs"; + field ~in_oss_since:None ~in_product_since:rel_tampa ~qualifier:DynamicRO ~ty:(Map(String, String)) ~default_value:(Some (VMap [])) "properties" "Additional configuration properties specific to the bond mode."; + field ~in_oss_since:None ~in_product_since:rel_tampa ~qualifier:DynamicRO ~ty:Int ~default_value:(Some (VInt 0L)) "links_up" "Number of links up in this bond"; + ] () let vlan_create = call - ~name:"create" - ~doc:"Create a VLAN mux/demuxer" - ~params:[ Ref _pif, "tagged_PIF", "PIF which receives the tagged traffic"; - Int, "tag", "VLAN tag to use"; - Ref _network, "network", "Network to receive the untagged traffic" ] - ~result:(Ref _vlan, "The reference of the created VLAN object") - ~in_product_since:rel_miami - ~allowed_roles:_R_POOL_OP - () + ~name:"create" + ~doc:"Create a VLAN mux/demuxer" + ~params:[ Ref _pif, "tagged_PIF", "PIF which receives the tagged traffic"; + Int, "tag", "VLAN tag to use"; + Ref _network, "network", "Network to receive the untagged traffic" ] + ~result:(Ref _vlan, "The reference of the created VLAN object") + ~in_product_since:rel_miami + ~allowed_roles:_R_POOL_OP + () let vlan_destroy = call - ~name:"destroy" - ~doc:"Destroy a VLAN mux/demuxer" - ~params:[Ref _vlan, "self", "VLAN mux/demuxer to destroy"] - ~in_product_since:rel_miami - ~allowed_roles:_R_POOL_OP - () + ~name:"destroy" + ~doc:"Destroy a VLAN mux/demuxer" + ~params:[Ref _vlan, "self", "VLAN mux/demuxer to destroy"] + ~in_product_since:rel_miami + ~allowed_roles:_R_POOL_OP + () -let vlan = +let vlan = create_obj ~in_db:true ~in_product_since:rel_miami ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_vlan ~descr:"A VLAN mux/demux" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~doc_tags:[Networking] ~messages:[ vlan_create; vlan_destroy ] ~contents: ([ - uid _vlan; - field ~qualifier:StaticRO ~ty:(Ref _pif) ~in_product_since:rel_miami "tagged_PIF" "interface on which traffic is tagged" ~default_value:(Some (VRef "")); - field ~qualifier:DynamicRO ~ty:(Ref _pif) ~in_product_since:rel_miami "untagged_PIF" "interface on which traffic is untagged" ~default_value:(Some (VRef "")); - field ~qualifier:StaticRO ~ty:Int ~in_product_since:rel_miami "tag" "VLAN tag in use" ~default_value:(Some (VInt (-1L))); - field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; - ]) - () - + uid _vlan; + field ~qualifier:StaticRO ~ty:(Ref _pif) ~in_product_since:rel_miami "tagged_PIF" "interface on which traffic is tagged" ~default_value:(Some (VRef "")); + field ~qualifier:DynamicRO ~ty:(Ref _pif) ~in_product_since:rel_miami "untagged_PIF" "interface on which traffic is untagged" ~default_value:(Some (VRef "")); + field ~qualifier:StaticRO ~ty:Int ~in_product_since:rel_miami "tag" "VLAN tag in use" ~default_value:(Some (VInt (-1L))); + field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; + ]) + () + let tunnel_create = call - ~name:"create" - ~doc:"Create a tunnel" - ~params:[ Ref _pif, "transport_PIF", "PIF which receives the tagged traffic"; - Ref _network, "network", "Network to receive the tunnelled traffic" ] - ~result:(Ref _tunnel, "The reference of the created tunnel object") - ~lifecycle:[Published, rel_cowley, "Create a tunnel"] - ~allowed_roles:_R_POOL_OP - ~errs:[Api_errors.openvswitch_not_active; Api_errors.transport_pif_not_configured; Api_errors.is_tunnel_access_pif] - () + ~name:"create" + ~doc:"Create a tunnel" + ~params:[ Ref _pif, "transport_PIF", "PIF which receives the tagged traffic"; + Ref _network, "network", "Network to receive the tunnelled traffic" ] + ~result:(Ref _tunnel, "The reference of the created tunnel object") + ~lifecycle:[Published, rel_cowley, "Create a tunnel"] + ~allowed_roles:_R_POOL_OP + ~errs:[Api_errors.openvswitch_not_active; Api_errors.transport_pif_not_configured; Api_errors.is_tunnel_access_pif] + () let tunnel_destroy = call - ~name:"destroy" - ~doc:"Destroy a tunnel" - ~params:[Ref _tunnel, "self", "tunnel to destroy"] - ~lifecycle:[Published, rel_cowley, "Destroy a tunnel"] - ~allowed_roles:_R_POOL_OP - () - -let tunnel = - create_obj ~in_db:true ~lifecycle:[Published, rel_cowley, "A tunnel for network traffic"] ~in_oss_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_tunnel ~descr:"A tunnel for network traffic" ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_OP - ~doc_tags:[Networking] - ~messages:[ tunnel_create; tunnel_destroy ] - ~contents:([ - uid _tunnel ~lifecycle:[Published, rel_cowley, "Unique identifier/object reference"]; - field ~qualifier:StaticRO ~ty:(Ref _pif) ~lifecycle:[Published, rel_cowley, "The interface through which the tunnel is accessed"] "access_PIF" "The interface through which the tunnel is accessed" ~default_value:(Some (VRef "")); - field ~qualifier:StaticRO ~ty:(Ref _pif) ~lifecycle:[Published, rel_cowley, "The interface used by the tunnel"] "transport_PIF" "The interface used by the tunnel" ~default_value:(Some (VRef "")); - field ~ty:(Map(String, String)) ~lifecycle:[Published, rel_cowley, "Status information about the tunnel"] "status" "Status information about the tunnel" ~default_value:(Some (VMap [VString "active", VString "false"])); - field ~lifecycle:[Published, rel_cowley, "Additional configuration"] ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "Additional configuration"; - ]) - () + ~name:"destroy" + ~doc:"Destroy a tunnel" + ~params:[Ref _tunnel, "self", "tunnel to destroy"] + ~lifecycle:[Published, rel_cowley, "Destroy a tunnel"] + ~allowed_roles:_R_POOL_OP + () + +let tunnel = + create_obj ~in_db:true ~lifecycle:[Published, rel_cowley, "A tunnel for network traffic"] ~in_oss_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_tunnel ~descr:"A tunnel for network traffic" ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_POOL_OP + ~doc_tags:[Networking] + ~messages:[ tunnel_create; tunnel_destroy ] + ~contents:([ + uid _tunnel ~lifecycle:[Published, rel_cowley, "Unique identifier/object reference"]; + field ~qualifier:StaticRO ~ty:(Ref _pif) ~lifecycle:[Published, rel_cowley, "The interface through which the tunnel is accessed"] "access_PIF" "The interface through which the tunnel is accessed" ~default_value:(Some (VRef "")); + field ~qualifier:StaticRO ~ty:(Ref _pif) ~lifecycle:[Published, rel_cowley, "The interface used by the tunnel"] "transport_PIF" "The interface used by the tunnel" ~default_value:(Some (VRef "")); + field ~ty:(Map(String, String)) ~lifecycle:[Published, rel_cowley, "Status information about the tunnel"] "status" "Status information about the tunnel" ~default_value:(Some (VMap [VString "active", VString "false"])); + field ~lifecycle:[Published, rel_cowley, "Additional configuration"] ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "Additional configuration"; + ]) + () let pbd_set_device_config = call - ~name:"set_device_config" - ~in_oss_since:None - ~in_product_since:rel_miami - ~params:[Ref _pbd, "self", "The PBD to modify"; - Map(String, String), "value", "The new value of the PBD's device_config"] - ~doc:"Sets the PBD's device_config field" - ~allowed_roles:_R_POOL_OP - () + ~name:"set_device_config" + ~in_oss_since:None + ~in_product_since:rel_miami + ~params:[Ref _pbd, "self", "The PBD to modify"; + Map(String, String), "value", "The new value of the PBD's device_config"] + ~doc:"Sets the PBD's device_config field" + ~allowed_roles:_R_POOL_OP + () let pbd = create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_pbd ~descr:"The physical block devices through which hosts access SRs" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP - ~messages:[ + ~messages:[ pbd_plug; pbd_unplug; pbd_set_device_config - ] ~contents: + ] ~contents: [ uid _pbd; field ~qualifier:StaticRO ~ty:(Ref _host) "host" "physical machine on which the pbd is available"; field ~qualifier:StaticRO ~ty:(Ref _sr) "SR" "the storage repository that the pbd realises"; @@ -5352,218 +5352,218 @@ let device_status_fields = (* VIF messages *) let vif_ipv4_configuration_mode = Enum ("vif_ipv4_configuration_mode", [ - "None", "Follow the default IPv4 configuration of the guest (this is guest-dependent)"; - "Static", "Static IPv4 address configuration"; -]) + "None", "Follow the default IPv4 configuration of the guest (this is guest-dependent)"; + "Static", "Static IPv4 address configuration"; + ]) let vif_ipv6_configuration_mode = Enum ("vif_ipv6_configuration_mode", [ - "None", "Follow the default IPv6 configuration of the guest (this is guest-dependent)"; - "Static", "Static IPv6 address configuration"; -]) + "None", "Follow the default IPv6 configuration of the guest (this is guest-dependent)"; + "Static", "Static IPv6 address configuration"; + ]) let vif_plug = call - ~name:"plug" - ~in_product_since:rel_rio - ~doc:"Hotplug the specified VIF, dynamically attaching it to the running VM" - ~params:[Ref _vif, "self", "The VIF to hotplug"] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"plug" + ~in_product_since:rel_rio + ~doc:"Hotplug the specified VIF, dynamically attaching it to the running VM" + ~params:[Ref _vif, "self", "The VIF to hotplug"] + ~allowed_roles:_R_VM_ADMIN + () let vif_unplug = call - ~name:"unplug" - ~in_product_since:rel_rio - ~doc:"Hot-unplug the specified VIF, dynamically unattaching it from the running VM" - ~params:[Ref _vif, "self", "The VIF to hot-unplug"] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"unplug" + ~in_product_since:rel_rio + ~doc:"Hot-unplug the specified VIF, dynamically unattaching it from the running VM" + ~params:[Ref _vif, "self", "The VIF to hot-unplug"] + ~allowed_roles:_R_VM_ADMIN + () let vif_unplug_force = call - ~name:"unplug_force" - ~in_product_since:rel_boston - ~doc:"Forcibly unplug the specified VIF" - ~params:[Ref _vif, "self", "The VIF to forcibly unplug"] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"unplug_force" + ~in_product_since:rel_boston + ~doc:"Forcibly unplug the specified VIF" + ~params:[Ref _vif, "self", "The VIF to forcibly unplug"] + ~allowed_roles:_R_VM_ADMIN + () let vif_move = call - ~name:"move" - ~in_product_since:rel_ely - ~doc:"Move the specified VIF to the specified network, even while the VM is running" - ~params:[Ref _vif, "self", "The VIF to move"; - Ref _network, "network", "The network to move it to"] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"move" + ~in_product_since:rel_ely + ~doc:"Move the specified VIF to the specified network, even while the VM is running" + ~params:[Ref _vif, "self", "The VIF to move"; + Ref _network, "network", "The network to move it to"] + ~allowed_roles:_R_VM_ADMIN + () let vif_operations = - Enum ("vif_operations", - [ "attach", "Attempting to attach this VIF to a VM"; - "plug", "Attempting to hotplug this VIF"; - "unplug", "Attempting to hot unplug this VIF"; - ]) + Enum ("vif_operations", + [ "attach", "Attempting to attach this VIF to a VM"; + "plug", "Attempting to hotplug this VIF"; + "unplug", "Attempting to hot unplug this VIF"; + ]) let vif_locking_mode = - Enum ("vif_locking_mode", [ - "network_default", "No specific configuration set - default network policy applies"; - "locked", "Only traffic to a specific MAC and a list of IPv4 or IPv6 addresses is permitted"; - "unlocked", "All traffic is permitted"; - "disabled", "No traffic is permitted"; - ]) + Enum ("vif_locking_mode", [ + "network_default", "No specific configuration set - default network policy applies"; + "locked", "Only traffic to a specific MAC and a list of IPv4 or IPv6 addresses is permitted"; + "unlocked", "All traffic is permitted"; + "disabled", "No traffic is permitted"; + ]) let vif_set_locking_mode = call - ~name:"set_locking_mode" - ~in_product_since:rel_tampa - ~doc:"Set the locking mode for this VIF" - ~params:[ - Ref _vif, "self", "The VIF whose locking mode will be set"; - vif_locking_mode, "value", "The new locking mode for the VIF"; - ] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_locking_mode" + ~in_product_since:rel_tampa + ~doc:"Set the locking mode for this VIF" + ~params:[ + Ref _vif, "self", "The VIF whose locking mode will be set"; + vif_locking_mode, "value", "The new locking mode for the VIF"; + ] + ~allowed_roles:_R_POOL_OP + () let vif_set_ipv4_allowed = call - ~name:"set_ipv4_allowed" - ~in_product_since:rel_tampa - ~doc:"Set the IPv4 addresses to which traffic on this VIF can be restricted" - ~params:[ - Ref _vif, "self", "The VIF which the IP addresses will be associated with"; - Set String, "value", "The IP addresses which will be associated with the VIF"; - ] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_ipv4_allowed" + ~in_product_since:rel_tampa + ~doc:"Set the IPv4 addresses to which traffic on this VIF can be restricted" + ~params:[ + Ref _vif, "self", "The VIF which the IP addresses will be associated with"; + Set String, "value", "The IP addresses which will be associated with the VIF"; + ] + ~allowed_roles:_R_POOL_OP + () let vif_add_ipv4_allowed = call - ~name:"add_ipv4_allowed" - ~in_product_since:rel_tampa - ~doc:"Associates an IPv4 address with this VIF" - ~params:[ - Ref _vif, "self", "The VIF which the IP address will be associated with"; - String, "value", "The IP address which will be associated with the VIF"; - ] - ~allowed_roles:_R_POOL_OP - () + ~name:"add_ipv4_allowed" + ~in_product_since:rel_tampa + ~doc:"Associates an IPv4 address with this VIF" + ~params:[ + Ref _vif, "self", "The VIF which the IP address will be associated with"; + String, "value", "The IP address which will be associated with the VIF"; + ] + ~allowed_roles:_R_POOL_OP + () let vif_remove_ipv4_allowed = call - ~name:"remove_ipv4_allowed" - ~in_product_since:rel_tampa - ~doc:"Removes an IPv4 address from this VIF" - ~params:[ - Ref _vif, "self", "The VIF from which the IP address will be removed"; - String, "value", "The IP address which will be removed from the VIF"; - ] - ~allowed_roles:_R_POOL_OP - () + ~name:"remove_ipv4_allowed" + ~in_product_since:rel_tampa + ~doc:"Removes an IPv4 address from this VIF" + ~params:[ + Ref _vif, "self", "The VIF from which the IP address will be removed"; + String, "value", "The IP address which will be removed from the VIF"; + ] + ~allowed_roles:_R_POOL_OP + () let vif_set_ipv6_allowed = call - ~name:"set_ipv6_allowed" - ~in_product_since:rel_tampa - ~doc:"Set the IPv6 addresses to which traffic on this VIF can be restricted" - ~params:[ - Ref _vif, "self", "The VIF which the IP addresses will be associated with"; - Set String, "value", "The IP addresses which will be associated with the VIF"; - ] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_ipv6_allowed" + ~in_product_since:rel_tampa + ~doc:"Set the IPv6 addresses to which traffic on this VIF can be restricted" + ~params:[ + Ref _vif, "self", "The VIF which the IP addresses will be associated with"; + Set String, "value", "The IP addresses which will be associated with the VIF"; + ] + ~allowed_roles:_R_POOL_OP + () let vif_add_ipv6_allowed = call - ~name:"add_ipv6_allowed" - ~in_product_since:rel_tampa - ~doc:"Associates an IPv6 address with this VIF" - ~params:[ - Ref _vif, "self", "The VIF which the IP address will be associated with"; - String, "value", "The IP address which will be associated with the VIF"; - ] - ~allowed_roles:_R_POOL_OP - () + ~name:"add_ipv6_allowed" + ~in_product_since:rel_tampa + ~doc:"Associates an IPv6 address with this VIF" + ~params:[ + Ref _vif, "self", "The VIF which the IP address will be associated with"; + String, "value", "The IP address which will be associated with the VIF"; + ] + ~allowed_roles:_R_POOL_OP + () let vif_remove_ipv6_allowed = call - ~name:"remove_ipv6_allowed" - ~in_product_since:rel_tampa - ~doc:"Removes an IPv6 address from this VIF" - ~params:[ - Ref _vif, "self", "The VIF from which the IP address will be removed"; - String, "value", "The IP address which will be removed from the VIF"; - ] - ~allowed_roles:_R_POOL_OP - () + ~name:"remove_ipv6_allowed" + ~in_product_since:rel_tampa + ~doc:"Removes an IPv6 address from this VIF" + ~params:[ + Ref _vif, "self", "The VIF from which the IP address will be removed"; + String, "value", "The IP address which will be removed from the VIF"; + ] + ~allowed_roles:_R_POOL_OP + () let vif_configure_ipv4 = call - ~name:"configure_ipv4" - ~in_product_since:rel_dundee - ~doc:"Configure IPv4 settings for this virtual interface" - ~versioned_params:[ - {param_type=Ref _vif; param_name="self"; param_doc="The VIF to configure"; param_release=dundee_release; param_default=None}; - {param_type=vif_ipv4_configuration_mode; param_name="mode"; param_doc="Whether to use static or no IPv4 assignment"; param_release=dundee_release; param_default=None}; - {param_type=String; param_name="address"; param_doc="The IPv4 address in / format (for static mode only)"; param_release=dundee_release; param_default=Some(VString "")}; - {param_type=String; param_name="gateway"; param_doc="The IPv4 gateway (for static mode only; leave empty to not set a gateway)"; param_release=dundee_release; param_default=Some(VString "")} - ] - ~allowed_roles:_R_VM_OP - () + ~name:"configure_ipv4" + ~in_product_since:rel_dundee + ~doc:"Configure IPv4 settings for this virtual interface" + ~versioned_params:[ + {param_type=Ref _vif; param_name="self"; param_doc="The VIF to configure"; param_release=dundee_release; param_default=None}; + {param_type=vif_ipv4_configuration_mode; param_name="mode"; param_doc="Whether to use static or no IPv4 assignment"; param_release=dundee_release; param_default=None}; + {param_type=String; param_name="address"; param_doc="The IPv4 address in / format (for static mode only)"; param_release=dundee_release; param_default=Some(VString "")}; + {param_type=String; param_name="gateway"; param_doc="The IPv4 gateway (for static mode only; leave empty to not set a gateway)"; param_release=dundee_release; param_default=Some(VString "")} + ] + ~allowed_roles:_R_VM_OP + () let vif_configure_ipv6 = call - ~name:"configure_ipv6" - ~in_product_since:rel_dundee - ~doc:"Configure IPv6 settings for this virtual interface" - ~versioned_params:[ - {param_type=Ref _vif; param_name="self"; param_doc="The VIF to configure"; param_release=dundee_release; param_default=None}; - {param_type=vif_ipv6_configuration_mode; param_name="mode"; param_doc="Whether to use static or no IPv6 assignment"; param_release=dundee_release; param_default=None}; - {param_type=String; param_name="address"; param_doc="The IPv6 address in / format (for static mode only)"; param_release=dundee_release; param_default=Some(VString "")}; - {param_type=String; param_name="gateway"; param_doc="The IPv6 gateway (for static mode only; leave empty to not set a gateway)"; param_release=dundee_release; param_default=Some(VString "")} - ] - ~allowed_roles:_R_VM_OP - () + ~name:"configure_ipv6" + ~in_product_since:rel_dundee + ~doc:"Configure IPv6 settings for this virtual interface" + ~versioned_params:[ + {param_type=Ref _vif; param_name="self"; param_doc="The VIF to configure"; param_release=dundee_release; param_default=None}; + {param_type=vif_ipv6_configuration_mode; param_name="mode"; param_doc="Whether to use static or no IPv6 assignment"; param_release=dundee_release; param_default=None}; + {param_type=String; param_name="address"; param_doc="The IPv6 address in / format (for static mode only)"; param_release=dundee_release; param_default=Some(VString "")}; + {param_type=String; param_name="gateway"; param_doc="The IPv6 gateway (for static mode only; leave empty to not set a gateway)"; param_release=dundee_release; param_default=Some(VString "")} + ] + ~allowed_roles:_R_VM_OP + () (** A virtual network interface *) let vif = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vif ~descr:"A virtual network interface" - ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_VM_ADMIN - ~doc_tags:[Networking] - ~messages:[vif_plug; vif_unplug; vif_unplug_force; vif_move; vif_set_locking_mode; - vif_set_ipv4_allowed; vif_add_ipv4_allowed; vif_remove_ipv4_allowed; vif_set_ipv6_allowed; vif_add_ipv6_allowed; vif_remove_ipv6_allowed; - vif_configure_ipv4; vif_configure_ipv6] - ~contents: + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vif ~descr:"A virtual network interface" + ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_VM_ADMIN + ~doc_tags:[Networking] + ~messages:[vif_plug; vif_unplug; vif_unplug_force; vif_move; vif_set_locking_mode; + vif_set_ipv4_allowed; vif_add_ipv4_allowed; vif_remove_ipv4_allowed; vif_set_ipv6_allowed; vif_add_ipv6_allowed; vif_remove_ipv6_allowed; + vif_configure_ipv4; vif_configure_ipv6] + ~contents: ([ uid _vif; ] @ (allowed_and_current_operations vif_operations) @ [ - field ~qualifier:StaticRO "device" "order in which VIF backends are created by xapi"; - field ~qualifier:StaticRO ~ty:(Ref _network) "network" "virtual network to which this vif is connected"; - field ~qualifier:StaticRO ~ty:(Ref _vm) "VM" "virtual machine to which this vif is connected"; - field ~qualifier:StaticRO ~ty:String "MAC" "ethernet MAC address of virtual interface, as exposed to guest"; - field ~qualifier:StaticRO ~ty:Int "MTU" "MTU in octets"; - field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:Bool "reserved" "true if the VIF is reserved pending a reboot/migrate"; - field ~ty:(Map(String, String)) "other_config" "additional configuration"; + field ~qualifier:StaticRO "device" "order in which VIF backends are created by xapi"; + field ~qualifier:StaticRO ~ty:(Ref _network) "network" "virtual network to which this vif is connected"; + field ~qualifier:StaticRO ~ty:(Ref _vm) "VM" "virtual machine to which this vif is connected"; + field ~qualifier:StaticRO ~ty:String "MAC" "ethernet MAC address of virtual interface, as exposed to guest"; + field ~qualifier:StaticRO ~ty:Int "MTU" "MTU in octets"; + field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:Bool "reserved" "true if the VIF is reserved pending a reboot/migrate"; + field ~ty:(Map(String, String)) "other_config" "additional configuration"; ] @ device_status_fields @ - [ namespace ~name:"qos" ~contents:(qos "VIF") (); ] @ - [ field ~qualifier:DynamicRO ~ty:(Ref _vif_metrics) "metrics" "metrics associated with this VIF"; - field ~qualifier:DynamicRO ~in_product_since:rel_george ~default_value:(Some (VBool false)) ~ty:Bool "MAC_autogenerated" "true if the MAC was autogenerated; false indicates it was set manually"; - field ~qualifier:StaticRO ~in_product_since:rel_tampa ~default_value:(Some (VEnum "network_default")) ~ty:vif_locking_mode "locking_mode" "current locking mode of the VIF"; - field ~qualifier:StaticRO ~in_product_since:rel_tampa ~default_value:(Some (VSet [])) ~ty:(Set (String)) "ipv4_allowed" "A list of IPv4 addresses which can be used to filter traffic passing through this VIF"; - field ~qualifier:StaticRO ~in_product_since:rel_tampa ~default_value:(Some (VSet [])) ~ty:(Set (String)) "ipv6_allowed" "A list of IPv6 addresses which can be used to filter traffic passing through this VIF"; - field ~ty:vif_ipv4_configuration_mode ~in_product_since:rel_dundee ~qualifier:DynamicRO "ipv4_configuration_mode" "Determines whether IPv4 addresses are configured on the VIF" ~default_value:(Some (VEnum "None")); - field ~ty:(Set (String)) ~in_product_since:rel_dundee ~qualifier:DynamicRO "ipv4_addresses" "IPv4 addresses in CIDR format" ~default_value:(Some (VSet [])); - field ~ty:String ~in_product_since:rel_dundee ~qualifier:DynamicRO "ipv4_gateway" "IPv4 gateway (the empty string means that no gateway is set)" ~default_value:(Some (VString "")); - field ~ty:vif_ipv6_configuration_mode ~in_product_since:rel_dundee ~qualifier:DynamicRO "ipv6_configuration_mode" "Determines whether IPv6 addresses are configured on the VIF" ~default_value:(Some (VEnum "None")); - field ~ty:(Set (String)) ~in_product_since:rel_dundee ~qualifier:DynamicRO "ipv6_addresses" "IPv6 addresses in CIDR format" ~default_value:(Some (VSet [])); - field ~ty:String ~in_product_since:rel_dundee ~qualifier:DynamicRO "ipv6_gateway" "IPv6 gateway (the empty string means that no gateway is set)" ~default_value:(Some (VString "")); - ]) - () - -let vif_metrics = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_vif_metrics ~descr:"The metrics associated with a virtual network device" - ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_VM_ADMIN - ~doc_tags:[Networking] - ~messages:[] ~contents: - [ uid _vif_metrics; - namespace ~name:"io" ~contents:iobandwidth (); - field ~qualifier:DynamicRO ~ty:DateTime "last_updated" "Time at which this information was last updated"; - field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; - ] - () + [ namespace ~name:"qos" ~contents:(qos "VIF") (); ] @ + [ field ~qualifier:DynamicRO ~ty:(Ref _vif_metrics) "metrics" "metrics associated with this VIF"; + field ~qualifier:DynamicRO ~in_product_since:rel_george ~default_value:(Some (VBool false)) ~ty:Bool "MAC_autogenerated" "true if the MAC was autogenerated; false indicates it was set manually"; + field ~qualifier:StaticRO ~in_product_since:rel_tampa ~default_value:(Some (VEnum "network_default")) ~ty:vif_locking_mode "locking_mode" "current locking mode of the VIF"; + field ~qualifier:StaticRO ~in_product_since:rel_tampa ~default_value:(Some (VSet [])) ~ty:(Set (String)) "ipv4_allowed" "A list of IPv4 addresses which can be used to filter traffic passing through this VIF"; + field ~qualifier:StaticRO ~in_product_since:rel_tampa ~default_value:(Some (VSet [])) ~ty:(Set (String)) "ipv6_allowed" "A list of IPv6 addresses which can be used to filter traffic passing through this VIF"; + field ~ty:vif_ipv4_configuration_mode ~in_product_since:rel_dundee ~qualifier:DynamicRO "ipv4_configuration_mode" "Determines whether IPv4 addresses are configured on the VIF" ~default_value:(Some (VEnum "None")); + field ~ty:(Set (String)) ~in_product_since:rel_dundee ~qualifier:DynamicRO "ipv4_addresses" "IPv4 addresses in CIDR format" ~default_value:(Some (VSet [])); + field ~ty:String ~in_product_since:rel_dundee ~qualifier:DynamicRO "ipv4_gateway" "IPv4 gateway (the empty string means that no gateway is set)" ~default_value:(Some (VString "")); + field ~ty:vif_ipv6_configuration_mode ~in_product_since:rel_dundee ~qualifier:DynamicRO "ipv6_configuration_mode" "Determines whether IPv6 addresses are configured on the VIF" ~default_value:(Some (VEnum "None")); + field ~ty:(Set (String)) ~in_product_since:rel_dundee ~qualifier:DynamicRO "ipv6_addresses" "IPv6 addresses in CIDR format" ~default_value:(Some (VSet [])); + field ~ty:String ~in_product_since:rel_dundee ~qualifier:DynamicRO "ipv6_gateway" "IPv6 gateway (the empty string means that no gateway is set)" ~default_value:(Some (VString "")); + ]) + () + +let vif_metrics = + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_vif_metrics ~descr:"The metrics associated with a virtual network device" + ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_VM_ADMIN + ~doc_tags:[Networking] + ~messages:[] ~contents: + [ uid _vif_metrics; + namespace ~name:"io" ~contents:iobandwidth (); + field ~qualifier:DynamicRO ~ty:DateTime "last_updated" "Time at which this information was last updated"; + field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; + ] + () let data_source = - create_obj ~in_db:false ~in_product_since:rel_orlando ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_data_source ~descr:"Data sources for logging in RRDs" + create_obj ~in_db:false ~in_product_since:rel_orlando ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_data_source ~descr:"Data sources for logging in RRDs" ~gen_events:false ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_ADMIN @@ -5575,204 +5575,204 @@ let data_source = field ~qualifier:DynamicRO ~ty:Float "min" "the minimum value of the data source"; field ~qualifier:DynamicRO ~ty:Float "max" "the maximum value of the data source"; field ~qualifier:DynamicRO ~ty:Float "value" "current value of the data source" ] - + () let storage_operations = - Enum ("storage_operations", - [ "scan", "Scanning backends for new or deleted VDIs"; - "destroy", "Destroying the SR"; - "forget", "Forgetting about SR"; - "plug", "Plugging a PBD into this SR"; - "unplug", "Unplugging a PBD from this SR"; - "update", "Refresh the fields on the SR"; - "vdi_create", "Creating a new VDI"; - "vdi_introduce", "Introducing a new VDI"; - "vdi_destroy", "Destroying a VDI"; - "vdi_resize", "Resizing a VDI"; - "vdi_clone", "Cloneing a VDI"; - "vdi_snapshot", "Snapshotting a VDI"; - "vdi_mirror", "Mirroring a VDI"; - "pbd_create", "Creating a PBD for this SR"; - "pbd_destroy", "Destroying one of this SR's PBDs"; ]) + Enum ("storage_operations", + [ "scan", "Scanning backends for new or deleted VDIs"; + "destroy", "Destroying the SR"; + "forget", "Forgetting about SR"; + "plug", "Plugging a PBD into this SR"; + "unplug", "Unplugging a PBD from this SR"; + "update", "Refresh the fields on the SR"; + "vdi_create", "Creating a new VDI"; + "vdi_introduce", "Introducing a new VDI"; + "vdi_destroy", "Destroying a VDI"; + "vdi_resize", "Resizing a VDI"; + "vdi_clone", "Cloneing a VDI"; + "vdi_snapshot", "Snapshotting a VDI"; + "vdi_mirror", "Mirroring a VDI"; + "pbd_create", "Creating a PBD for this SR"; + "pbd_destroy", "Destroying one of this SR's PBDs"; ]) let sr_set_virtual_allocation = call - ~name:"set_virtual_allocation" - ~in_oss_since:None - ~in_product_since:rel_miami - ~params:[Ref _sr, "self", "The SR to modify"; - Int, "value", "The new value of the SR's virtual_allocation"] - ~flags:[`Session] - ~doc:"Sets the SR's virtual_allocation field" - ~allowed_roles:_R_POOL_OP - () + ~name:"set_virtual_allocation" + ~in_oss_since:None + ~in_product_since:rel_miami + ~params:[Ref _sr, "self", "The SR to modify"; + Int, "value", "The new value of the SR's virtual_allocation"] + ~flags:[`Session] + ~doc:"Sets the SR's virtual_allocation field" + ~allowed_roles:_R_POOL_OP + () let sr_set_physical_size = call - ~name:"set_physical_size" - ~in_oss_since:None - ~in_product_since:rel_miami - ~params:[Ref _sr, "self", "The SR to modify"; - Int, "value", "The new value of the SR's physical_size"] - ~flags:[`Session] - ~doc:"Sets the SR's physical_size field" - ~allowed_roles:_R_POOL_OP - () + ~name:"set_physical_size" + ~in_oss_since:None + ~in_product_since:rel_miami + ~params:[Ref _sr, "self", "The SR to modify"; + Int, "value", "The new value of the SR's physical_size"] + ~flags:[`Session] + ~doc:"Sets the SR's physical_size field" + ~allowed_roles:_R_POOL_OP + () let sr_set_physical_utilisation = call - ~name:"set_physical_utilisation" - ~in_oss_since:None - ~in_product_since:rel_miami - ~flags:[`Session] - ~params:[Ref _sr, "self", "The SR to modify"; - Int, "value", "The new value of the SR's physical utilisation"] - ~doc:"Sets the SR's physical_utilisation field" - ~allowed_roles:_R_POOL_OP - () + ~name:"set_physical_utilisation" + ~in_oss_since:None + ~in_product_since:rel_miami + ~flags:[`Session] + ~params:[Ref _sr, "self", "The SR to modify"; + Int, "value", "The new value of the SR's physical utilisation"] + ~doc:"Sets the SR's physical_utilisation field" + ~allowed_roles:_R_POOL_OP + () let sr_update = call - ~name:"update" - ~in_oss_since:None - ~in_product_since:rel_symc - ~params:[Ref _sr, "sr", "The SR whose fields should be refreshed" ] - ~doc:"Refresh the fields on the SR object" - ~allowed_roles:_R_POOL_OP - () + ~name:"update" + ~in_oss_since:None + ~in_product_since:rel_symc + ~params:[Ref _sr, "sr", "The SR whose fields should be refreshed" ] + ~doc:"Refresh the fields on the SR object" + ~allowed_roles:_R_POOL_OP + () let sr_assert_can_host_ha_statefile = call - ~name:"assert_can_host_ha_statefile" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~params:[Ref _sr, "sr", "The SR to query" ] - ~doc:"Returns successfully if the given SR can host an HA statefile. Otherwise returns an error to explain why not" - ~allowed_roles:_R_POOL_OP - () + ~name:"assert_can_host_ha_statefile" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~params:[Ref _sr, "sr", "The SR to query" ] + ~doc:"Returns successfully if the given SR can host an HA statefile. Otherwise returns an error to explain why not" + ~allowed_roles:_R_POOL_OP + () let sr_assert_supports_database_replication = call - ~name:"assert_supports_database_replication" - ~in_oss_since:None - ~in_product_since:rel_boston - ~params:[Ref _sr, "sr", "The SR to query"] - ~doc:"Returns successfully if the given SR supports database replication. Otherwise returns an error to explain why not." - ~allowed_roles:_R_POOL_OP - () + ~name:"assert_supports_database_replication" + ~in_oss_since:None + ~in_product_since:rel_boston + ~params:[Ref _sr, "sr", "The SR to query"] + ~doc:"Returns successfully if the given SR supports database replication. Otherwise returns an error to explain why not." + ~allowed_roles:_R_POOL_OP + () let sr_enable_database_replication = call - ~name:"enable_database_replication" - ~in_oss_since:None - ~in_product_since:rel_boston - ~params:[Ref _sr, "sr", "The SR to which metadata should be replicated"] - ~allowed_roles:_R_POOL_OP - () + ~name:"enable_database_replication" + ~in_oss_since:None + ~in_product_since:rel_boston + ~params:[Ref _sr, "sr", "The SR to which metadata should be replicated"] + ~allowed_roles:_R_POOL_OP + () let sr_disable_database_replication = call - ~name:"disable_database_replication" - ~in_oss_since:None - ~in_product_since:rel_boston - ~params:[Ref _sr, "sr", "The SR to which metadata should be no longer replicated"] - ~allowed_roles:_R_POOL_OP - () + ~name:"disable_database_replication" + ~in_oss_since:None + ~in_product_since:rel_boston + ~params:[Ref _sr, "sr", "The SR to which metadata should be no longer replicated"] + ~allowed_roles:_R_POOL_OP + () (** A storage repository. Note we overide default create/destroy methods with our own here... *) let storage_repository = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_sr ~descr:"A storage repository" - ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_OP - ~messages:[ sr_create; sr_introduce; sr_make; sr_destroy; sr_forget; - sr_update; - sr_get_supported_types; sr_scan; sr_probe; sr_set_shared; - sr_set_name_label; sr_set_name_description; - sr_create_new_blob; - sr_set_physical_size; sr_set_virtual_allocation; sr_set_physical_utilisation; - sr_assert_can_host_ha_statefile; - sr_assert_supports_database_replication; - sr_enable_database_replication; - sr_disable_database_replication; - sr_get_data_sources; - sr_record_data_source; - sr_query_data_source; - sr_forget_data_source_archives; - - ] - ~contents: + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_sr ~descr:"A storage repository" + ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_POOL_OP + ~messages:[ sr_create; sr_introduce; sr_make; sr_destroy; sr_forget; + sr_update; + sr_get_supported_types; sr_scan; sr_probe; sr_set_shared; + sr_set_name_label; sr_set_name_description; + sr_create_new_blob; + sr_set_physical_size; sr_set_virtual_allocation; sr_set_physical_utilisation; + sr_assert_can_host_ha_statefile; + sr_assert_supports_database_replication; + sr_enable_database_replication; + sr_disable_database_replication; + sr_get_data_sources; + sr_record_data_source; + sr_query_data_source; + sr_forget_data_source_archives; + + ] + ~contents: ([ uid _sr; - namespace ~name:"name" ~contents:(names oss_since_303 StaticRO) (); - ] @ (allowed_and_current_operations storage_operations) @ [ - field ~ty:(Set(Ref _vdi)) ~qualifier:DynamicRO "VDIs" "all virtual disks known to this storage repository"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _pbd)) "PBDs" "describes how particular hosts can see this storage repository"; - field ~ty:Int ~qualifier:DynamicRO "virtual_allocation" "sum of virtual_sizes of all VDIs in this storage repository (in bytes)"; - field ~ty:Int ~qualifier:DynamicRO "physical_utilisation" "physical space currently utilised on this storage repository (in bytes). Note that for sparse disk formats, physical_utilisation may be less than virtual_allocation"; - field ~ty:Int ~qualifier:StaticRO "physical_size" "total physical size of the repository (in bytes)"; - field ~qualifier:StaticRO "type" "type of the storage repository"; - field ~qualifier:StaticRO "content_type" "the type of the SR's content, if required (e.g. ISOs)"; - field ~qualifier:DynamicRO "shared" ~ty:Bool "true if this SR is (capable of being) shared between multiple hosts"; - field ~ty:(Map(String, String)) "other_config" "additional configuration" ~map_keys_roles:[("folder",(_R_VM_OP));("XenCenter.CustomFields.*",(_R_VM_OP))]; - field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes"; - field ~ty:Bool ~qualifier:DynamicRO ~in_oss_since:None ~internal_only:true "default_vdi_visibility" ""; - field ~in_oss_since:None ~ty:(Map(String, String)) ~in_product_since:rel_miami ~qualifier:RW "sm_config" "SM dependent data" ~default_value:(Some (VMap [])); - field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Map(String, Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this SR"; - field ~qualifier:DynamicRO ~in_product_since:rel_cowley ~ty:Bool ~default_value:(Some (VBool false)) "local_cache_enabled" "True if this SR is assigned to be the local cache for its host"; - field ~qualifier:DynamicRO ~in_product_since:rel_boston ~ty:(Ref _dr_task) ~default_value:(Some (VRef (Ref.string_of Ref.null))) "introduced_by" "The disaster recovery task which introduced this SR"; - field ~qualifier:DynamicRO ~lifecycle:[Published, rel_dundee, ""] ~ty:Bool ~default_value:(Some (VBool false)) "clustered" "True if the SR is using aggregated local storage"; - field ~qualifier:DynamicRO ~lifecycle:[Published, rel_dundee, ""] ~ty:Bool ~default_value:(Some (VBool false)) "is_tools_sr" "True if this is the SR that contains the Tools ISO VDIs"; - ]) - () + namespace ~name:"name" ~contents:(names oss_since_303 StaticRO) (); + ] @ (allowed_and_current_operations storage_operations) @ [ + field ~ty:(Set(Ref _vdi)) ~qualifier:DynamicRO "VDIs" "all virtual disks known to this storage repository"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _pbd)) "PBDs" "describes how particular hosts can see this storage repository"; + field ~ty:Int ~qualifier:DynamicRO "virtual_allocation" "sum of virtual_sizes of all VDIs in this storage repository (in bytes)"; + field ~ty:Int ~qualifier:DynamicRO "physical_utilisation" "physical space currently utilised on this storage repository (in bytes). Note that for sparse disk formats, physical_utilisation may be less than virtual_allocation"; + field ~ty:Int ~qualifier:StaticRO "physical_size" "total physical size of the repository (in bytes)"; + field ~qualifier:StaticRO "type" "type of the storage repository"; + field ~qualifier:StaticRO "content_type" "the type of the SR's content, if required (e.g. ISOs)"; + field ~qualifier:DynamicRO "shared" ~ty:Bool "true if this SR is (capable of being) shared between multiple hosts"; + field ~ty:(Map(String, String)) "other_config" "additional configuration" ~map_keys_roles:[("folder",(_R_VM_OP));("XenCenter.CustomFields.*",(_R_VM_OP))]; + field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes"; + field ~ty:Bool ~qualifier:DynamicRO ~in_oss_since:None ~internal_only:true "default_vdi_visibility" ""; + field ~in_oss_since:None ~ty:(Map(String, String)) ~in_product_since:rel_miami ~qualifier:RW "sm_config" "SM dependent data" ~default_value:(Some (VMap [])); + field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Map(String, Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this SR"; + field ~qualifier:DynamicRO ~in_product_since:rel_cowley ~ty:Bool ~default_value:(Some (VBool false)) "local_cache_enabled" "True if this SR is assigned to be the local cache for its host"; + field ~qualifier:DynamicRO ~in_product_since:rel_boston ~ty:(Ref _dr_task) ~default_value:(Some (VRef (Ref.string_of Ref.null))) "introduced_by" "The disaster recovery task which introduced this SR"; + field ~qualifier:DynamicRO ~lifecycle:[Published, rel_dundee, ""] ~ty:Bool ~default_value:(Some (VBool false)) "clustered" "True if the SR is using aggregated local storage"; + field ~qualifier:DynamicRO ~lifecycle:[Published, rel_dundee, ""] ~ty:Bool ~default_value:(Some (VBool false)) "is_tools_sr" "True if this is the SR that contains the Tools ISO VDIs"; + ]) + () (** XXX: just make this a field and be done with it. Cowardly refusing to change the schema for now. *) let sm_get_driver_filename = call - ~name:"get_driver_filename" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~params:[Ref _sm, "self", "The SM to query" ] - ~result:(String, "The SM's driver_filename field") - ~doc:"Gets the SM's driver_filename field" - () - -let storage_plugin = + ~name:"get_driver_filename" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~params:[Ref _sm, "self", "The SM to query" ] + ~result:(String, "The SM's driver_filename field") + ~doc:"Gets the SM's driver_filename field" + () + +let storage_plugin = create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_sm ~descr:"A storage manager plugin" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages:[ ] ~contents: - ([ uid _sm; - namespace ~name:"name" ~contents:(names None DynamicRO) (); - field ~in_oss_since:None ~qualifier:DynamicRO "type" "SR.type"; - field ~in_oss_since:None ~qualifier:DynamicRO "vendor" "Vendor who created this plugin"; - field ~in_oss_since:None ~qualifier:DynamicRO "copyright" "Entity which owns the copyright of this plugin"; - field ~in_oss_since:None ~qualifier:DynamicRO "version" "Version of the plugin"; - field ~in_oss_since:None ~qualifier:DynamicRO "required_api_version" "Minimum SM API version required on the server"; - field ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Map(String,String)) "configuration" "names and descriptions of device config keys"; - field ~in_oss_since:None ~qualifier:DynamicRO ~in_product_since:rel_miami ~lifecycle:[ Deprecated, rel_clearwater, "Use SM.features instead"; ] ~ty:(Set(String)) "capabilities" "capabilities of the SM plugin" ~default_value:(Some (VSet [])); - field ~in_oss_since:None ~qualifier:DynamicRO ~in_product_since:rel_clearwater ~ty:(Map(String, Int)) "features" "capabilities of the SM plugin, with capability version numbers" ~default_value:(Some (VMap [])); - field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; - field ~in_product_since:rel_orlando ~qualifier:DynamicRO ~default_value:(Some (VString "")) ~ty:String "driver_filename" "filename of the storage driver"; - field ~in_product_since:rel_dundee ~qualifier:DynamicRO ~default_value:(Some (VSet [])) ~ty:(Set String) "required_cluster_stack" "The storage plugin requires that one of these cluster stacks is configured and running."; - ]) + ([ uid _sm; + namespace ~name:"name" ~contents:(names None DynamicRO) (); + field ~in_oss_since:None ~qualifier:DynamicRO "type" "SR.type"; + field ~in_oss_since:None ~qualifier:DynamicRO "vendor" "Vendor who created this plugin"; + field ~in_oss_since:None ~qualifier:DynamicRO "copyright" "Entity which owns the copyright of this plugin"; + field ~in_oss_since:None ~qualifier:DynamicRO "version" "Version of the plugin"; + field ~in_oss_since:None ~qualifier:DynamicRO "required_api_version" "Minimum SM API version required on the server"; + field ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Map(String,String)) "configuration" "names and descriptions of device config keys"; + field ~in_oss_since:None ~qualifier:DynamicRO ~in_product_since:rel_miami ~lifecycle:[ Deprecated, rel_clearwater, "Use SM.features instead"; ] ~ty:(Set(String)) "capabilities" "capabilities of the SM plugin" ~default_value:(Some (VSet [])); + field ~in_oss_since:None ~qualifier:DynamicRO ~in_product_since:rel_clearwater ~ty:(Map(String, Int)) "features" "capabilities of the SM plugin, with capability version numbers" ~default_value:(Some (VMap [])); + field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; + field ~in_product_since:rel_orlando ~qualifier:DynamicRO ~default_value:(Some (VString "")) ~ty:String "driver_filename" "filename of the storage driver"; + field ~in_product_since:rel_dundee ~qualifier:DynamicRO ~default_value:(Some (VSet [])) ~ty:(Set String) "required_cluster_stack" "The storage plugin requires that one of these cluster stacks is configured and running."; + ]) () let lvhd_enable_thin_provisioning = call - ~name:"enable_thin_provisioning" - ~in_oss_since:None - ~in_product_since:rel_dundee - ~allowed_roles:_R_POOL_ADMIN - ~params:[ - Ref _host, "host", "The LVHD Host to upgrade to being thin-provisioned."; - Ref _sr, "SR", "The LVHD SR to upgrade to being thin-provisioned."; - Int, "initial_allocation", "The initial amount of space to allocate to a newly-created VDI in bytes"; - Int, "allocation_quantum", "The amount of space to allocate to a VDI when it needs to be enlarged in bytes"; - ] - ~doc:"Upgrades an LVHD SR to enable thin-provisioning. Future VDIs created in this SR will be thinly-provisioned, although existing VDIs will be left alone. Note that the SR must be attached to the SRmaster for upgrade to work." - ~forward_to:(HostExtension "LVHD.enable_thin_provisioning") - ~result:(String, "Message from LVHD.enable_thin_provisioning extension") - () - -let lvhd = + ~name:"enable_thin_provisioning" + ~in_oss_since:None + ~in_product_since:rel_dundee + ~allowed_roles:_R_POOL_ADMIN + ~params:[ + Ref _host, "host", "The LVHD Host to upgrade to being thin-provisioned."; + Ref _sr, "SR", "The LVHD SR to upgrade to being thin-provisioned."; + Int, "initial_allocation", "The initial amount of space to allocate to a newly-created VDI in bytes"; + Int, "allocation_quantum", "The amount of space to allocate to a VDI when it needs to be enlarged in bytes"; + ] + ~doc:"Upgrades an LVHD SR to enable thin-provisioning. Future VDIs created in this SR will be thinly-provisioned, although existing VDIs will be left alone. Note that the SR must be attached to the SRmaster for upgrade to work." + ~forward_to:(HostExtension "LVHD.enable_thin_provisioning") + ~result:(String, "Message from LVHD.enable_thin_provisioning extension") + () + +let lvhd = create_obj ~in_db:true ~in_product_since:rel_dundee ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_lvhd ~descr:"LVHD SR specific operations" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_ADMIN - ~messages:[ + ~messages:[ lvhd_enable_thin_provisioning; ] ~contents: [ @@ -5781,8 +5781,8 @@ let lvhd = () (* --- rws: removed this after talking to Andy and Julian -let filesystem = - { name = _filesystem; description = "An on-disk filesystem"; + let filesystem = + { name = _filesystem; description = "An on-disk filesystem"; messages = []; contents = field "uuid" "globally-unique ID" :: @@ -5797,16 +5797,16 @@ let filesystem = (** Each disk is associated with a vdi_type: (a 'style' of disk?) *) -let vdi_type = Enum ("vdi_type", [ "system", "a disk that may be replaced on upgrade"; - "user", "a disk that is always preserved on upgrade"; - "ephemeral", "a disk that may be reformatted on upgrade"; - "suspend", "a disk that stores a suspend image"; - "crashdump", "a disk that stores VM crashdump information"; - "ha_statefile", "a disk used for HA storage heartbeating"; - "metadata", "a disk used for HA Pool metadata"; - "redo_log", "a disk used for a general metadata redo-log"; - "rrd", "a disk that stores SR-level RRDs"; - ]) +let vdi_type = Enum ("vdi_type", [ "system", "a disk that may be replaced on upgrade"; + "user", "a disk that is always preserved on upgrade"; + "ephemeral", "a disk that may be reformatted on upgrade"; + "suspend", "a disk that stores a suspend image"; + "crashdump", "a disk that stores VM crashdump information"; + "ha_statefile", "a disk used for HA storage heartbeating"; + "metadata", "a disk used for HA Pool metadata"; + "redo_log", "a disk used for a general metadata redo-log"; + "rrd", "a disk that stores SR-level RRDs"; + ]) let vdi_introduce_params first_rel = [ @@ -5821,1202 +5821,1202 @@ let vdi_introduce_params first_rel = {param_type=String; param_name="location"; param_doc="location information"; param_release=first_rel; param_default=None}; {param_type=Map(String, String); param_name="xenstore_data"; param_doc="Data to insert into xenstore"; param_release=first_rel; param_default=Some (VMap [])}; {param_type=Map(String, String); param_name="sm_config"; param_doc="Storage-specific config"; param_release=miami_release; param_default=Some (VMap [])}; - {param_type=Bool; param_name = "managed"; param_doc = "Storage-specific config"; param_release=tampa_release; param_default = Some (VBool true) }; - {param_type=Int; param_name="virtual_size"; param_doc = "Storage-specific config"; param_release=tampa_release; param_default = Some (VInt 0L) }; - {param_type=Int; param_name="physical_utilisation"; param_doc = "Storage-specific config"; param_release=tampa_release; param_default = Some (VInt 0L) }; - {param_type=Ref _pool; param_name="metadata_of_pool"; param_doc = "Storage-specific config"; param_release=tampa_release; param_default = Some (VRef "") }; - {param_type=Bool; param_name="is_a_snapshot"; param_doc = "Storage-specific config"; param_release=tampa_release; param_default = Some (VBool false) }; - {param_type=DateTime; param_name="snapshot_time"; param_doc = "Storage-specific config"; param_release=tampa_release; param_default = Some (VDateTime Date.never) }; - {param_type=Ref _vdi; param_name="snapshot_of"; param_doc = "Storage-specific config"; param_release=tampa_release; param_default = Some (VRef "") }; + {param_type=Bool; param_name = "managed"; param_doc = "Storage-specific config"; param_release=tampa_release; param_default = Some (VBool true) }; + {param_type=Int; param_name="virtual_size"; param_doc = "Storage-specific config"; param_release=tampa_release; param_default = Some (VInt 0L) }; + {param_type=Int; param_name="physical_utilisation"; param_doc = "Storage-specific config"; param_release=tampa_release; param_default = Some (VInt 0L) }; + {param_type=Ref _pool; param_name="metadata_of_pool"; param_doc = "Storage-specific config"; param_release=tampa_release; param_default = Some (VRef "") }; + {param_type=Bool; param_name="is_a_snapshot"; param_doc = "Storage-specific config"; param_release=tampa_release; param_default = Some (VBool false) }; + {param_type=DateTime; param_name="snapshot_time"; param_doc = "Storage-specific config"; param_release=tampa_release; param_default = Some (VDateTime Date.never) }; + {param_type=Ref _vdi; param_name="snapshot_of"; param_doc = "Storage-specific config"; param_release=tampa_release; param_default = Some (VRef "") }; ] (* This used to be called VDI.introduce but it was always an internal call *) let vdi_pool_introduce = call - ~name:"pool_introduce" - ~in_oss_since:None - ~in_product_since:rel_rio - ~versioned_params:(vdi_introduce_params miami_release) - ~doc:"Create a new VDI record in the database only" - ~result:(Ref _vdi, "The ref of the newly created VDI record.") - ~hide_from_docs:true - ~allowed_roles:_R_VM_ADMIN + ~name:"pool_introduce" + ~in_oss_since:None + ~in_product_since:rel_rio + ~versioned_params:(vdi_introduce_params miami_release) + ~doc:"Create a new VDI record in the database only" + ~result:(Ref _vdi, "The ref of the newly created VDI record.") + ~hide_from_docs:true + ~allowed_roles:_R_VM_ADMIN () let vdi_db_introduce = { vdi_pool_introduce with msg_name = "db_introduce"; msg_hide_from_docs = false } let vdi_db_forget = call - ~name:"db_forget" - ~in_oss_since:None - ~params:[Ref _vdi, "vdi", "The VDI to forget about"] - ~doc:"Removes a VDI record from the database" - ~in_product_since:rel_miami - ~allowed_roles:_R_VM_ADMIN - () + ~name:"db_forget" + ~in_oss_since:None + ~params:[Ref _vdi, "vdi", "The VDI to forget about"] + ~doc:"Removes a VDI record from the database" + ~in_product_since:rel_miami + ~allowed_roles:_R_VM_ADMIN + () let vdi_introduce = call - ~name:"introduce" - ~in_oss_since:None - ~versioned_params:(vdi_introduce_params rio_release) - ~doc:"Create a new VDI record in the database only" - ~result:(Ref _vdi, "The ref of the newly created VDI record.") - ~errs:[Api_errors.sr_operation_not_supported] - ~in_product_since:rel_miami - ~allowed_roles:_R_VM_ADMIN - () + ~name:"introduce" + ~in_oss_since:None + ~versioned_params:(vdi_introduce_params rio_release) + ~doc:"Create a new VDI record in the database only" + ~result:(Ref _vdi, "The ref of the newly created VDI record.") + ~errs:[Api_errors.sr_operation_not_supported] + ~in_product_since:rel_miami + ~allowed_roles:_R_VM_ADMIN + () let vdi_forget = call - ~name:"forget" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[Ref _vdi, "vdi", "The VDI to forget about"] - ~doc:"Removes a VDI record from the database" - ~allowed_roles:_R_VM_ADMIN - () + ~name:"forget" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[Ref _vdi, "vdi", "The VDI to forget about"] + ~doc:"Removes a VDI record from the database" + ~allowed_roles:_R_VM_ADMIN + () let vdi_force_unlock = call - ~name:"force_unlock" - ~in_oss_since:None - ~in_product_since:rel_rio - ~internal_deprecated_since:rel_miami - ~params:[Ref _vdi, "vdi", "The VDI to forcibly unlock"] - ~doc:"Steals the lock on this VDI and leaves it unlocked. This function is extremely dangerous. This call is deprecated." - ~hide_from_docs:true - ~allowed_roles:_R_VM_ADMIN - () + ~name:"force_unlock" + ~in_oss_since:None + ~in_product_since:rel_rio + ~internal_deprecated_since:rel_miami + ~params:[Ref _vdi, "vdi", "The VDI to forcibly unlock"] + ~doc:"Steals the lock on this VDI and leaves it unlocked. This function is extremely dangerous. This call is deprecated." + ~hide_from_docs:true + ~allowed_roles:_R_VM_ADMIN + () let vdi_update = call - ~name:"update" - ~in_oss_since:None - ~params:[Ref _vdi, "vdi", "The VDI whose stats (eg size) should be updated" ] - ~doc:"Ask the storage backend to refresh the fields in the VDI object" - ~errs:[Api_errors.sr_operation_not_supported] - ~in_product_since:rel_symc - ~allowed_roles:_R_VM_ADMIN - () + ~name:"update" + ~in_oss_since:None + ~params:[Ref _vdi, "vdi", "The VDI whose stats (eg size) should be updated" ] + ~doc:"Ask the storage backend to refresh the fields in the VDI object" + ~errs:[Api_errors.sr_operation_not_supported] + ~in_product_since:rel_symc + ~allowed_roles:_R_VM_ADMIN + () let vdi_operations = - Enum ("vdi_operations", - [ "scan", "Scanning backends for new or deleted VDIs"; - "clone", "Cloning the VDI"; - "copy", "Copying the VDI"; - "resize", "Resizing the VDI"; - "resize_online", "Resizing the VDI which may or may not be online"; - "snapshot", "Snapshotting the VDI"; - "mirror", "Mirroring the VDI"; - "destroy", "Destroying the VDI"; - "forget", "Forget about the VDI"; - "update", "Refreshing the fields of the VDI"; - "force_unlock", "Forcibly unlocking the VDI"; - "generate_config", "Generating static configuration"; - "blocked", "Operations on this VDI are temporarily blocked"; - ]) + Enum ("vdi_operations", + [ "scan", "Scanning backends for new or deleted VDIs"; + "clone", "Cloning the VDI"; + "copy", "Copying the VDI"; + "resize", "Resizing the VDI"; + "resize_online", "Resizing the VDI which may or may not be online"; + "snapshot", "Snapshotting the VDI"; + "mirror", "Mirroring the VDI"; + "destroy", "Destroying the VDI"; + "forget", "Forget about the VDI"; + "update", "Refreshing the fields of the VDI"; + "force_unlock", "Forcibly unlocking the VDI"; + "generate_config", "Generating static configuration"; + "blocked", "Operations on this VDI are temporarily blocked"; + ]) let vdi_set_missing = call - ~name:"set_missing" - ~in_oss_since:None - ~in_product_since:rel_miami - ~params:[Ref _vdi, "self", "The VDI to modify"; - Bool, "value", "The new value of the VDI's missing field"] - ~doc:"Sets the VDI's missing field" - ~flags:[`Session] - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_missing" + ~in_oss_since:None + ~in_product_since:rel_miami + ~params:[Ref _vdi, "self", "The VDI to modify"; + Bool, "value", "The new value of the VDI's missing field"] + ~doc:"Sets the VDI's missing field" + ~flags:[`Session] + ~allowed_roles:_R_VM_ADMIN + () let vdi_set_read_only = call - ~name:"set_read_only" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[Ref _vdi, "self", "The VDI to modify"; - Bool, "value", "The new value of the VDI's read_only field"] - ~flags:[`Session] - ~doc:"Sets the VDI's read_only field" - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_read_only" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[Ref _vdi, "self", "The VDI to modify"; + Bool, "value", "The new value of the VDI's read_only field"] + ~flags:[`Session] + ~doc:"Sets the VDI's read_only field" + ~allowed_roles:_R_VM_ADMIN + () let vdi_set_sharable = call - ~name:"set_sharable" - ~in_oss_since:None - ~in_product_since:rel_george - ~params:[Ref _vdi, "self", "The VDI to modify"; - Bool, "value", "The new value of the VDI's sharable field"] - ~flags:[`Session] - ~doc:"Sets the VDI's sharable field" - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_sharable" + ~in_oss_since:None + ~in_product_since:rel_george + ~params:[Ref _vdi, "self", "The VDI to modify"; + Bool, "value", "The new value of the VDI's sharable field"] + ~flags:[`Session] + ~doc:"Sets the VDI's sharable field" + ~allowed_roles:_R_VM_ADMIN + () let vdi_set_managed = call - ~name:"set_managed" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[Ref _vdi, "self", "The VDI to modify"; - Bool, "value", "The new value of the VDI's managed field"] - ~flags:[`Session] - ~doc:"Sets the VDI's managed field" - ~allowed_roles:_R_VM_ADMIN - () - + ~name:"set_managed" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[Ref _vdi, "self", "The VDI to modify"; + Bool, "value", "The new value of the VDI's managed field"] + ~flags:[`Session] + ~doc:"Sets the VDI's managed field" + ~allowed_roles:_R_VM_ADMIN + () + let vdi_set_virtual_size = call - ~name:"set_virtual_size" - ~in_oss_since:None - ~in_product_since:rel_miami - ~params:[Ref _vdi, "self", "The VDI to modify"; - Int, "value", "The new value of the VDI's virtual size"] - ~flags:[`Session] - ~doc:"Sets the VDI's virtual_size field" - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_virtual_size" + ~in_oss_since:None + ~in_product_since:rel_miami + ~params:[Ref _vdi, "self", "The VDI to modify"; + Int, "value", "The new value of the VDI's virtual size"] + ~flags:[`Session] + ~doc:"Sets the VDI's virtual_size field" + ~allowed_roles:_R_VM_ADMIN + () let vdi_set_physical_utilisation = call - ~name:"set_physical_utilisation" - ~in_oss_since:None - ~in_product_since:rel_miami - ~params:[Ref _vdi, "self", "The VDI to modify"; - Int, "value", "The new value of the VDI's physical utilisation"] - ~flags:[`Session] - ~doc:"Sets the VDI's physical_utilisation field" - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_physical_utilisation" + ~in_oss_since:None + ~in_product_since:rel_miami + ~params:[Ref _vdi, "self", "The VDI to modify"; + Int, "value", "The new value of the VDI's physical utilisation"] + ~flags:[`Session] + ~doc:"Sets the VDI's physical_utilisation field" + ~allowed_roles:_R_VM_ADMIN + () let vdi_set_is_a_snapshot = call - ~name:"set_is_a_snapshot" - ~in_oss_since:None - ~in_product_since:rel_boston - ~params:[Ref _vdi, "self", "The VDI to modify"; - Bool, "value", "The new value indicating whether this VDI is a snapshot"] - ~flags:[`Session] - ~doc:"Sets whether this VDI is a snapshot" - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_is_a_snapshot" + ~in_oss_since:None + ~in_product_since:rel_boston + ~params:[Ref _vdi, "self", "The VDI to modify"; + Bool, "value", "The new value indicating whether this VDI is a snapshot"] + ~flags:[`Session] + ~doc:"Sets whether this VDI is a snapshot" + ~allowed_roles:_R_VM_ADMIN + () let vdi_set_snapshot_of = call - ~name:"set_snapshot_of" - ~in_oss_since:None - ~in_product_since:rel_boston - ~params:[Ref _vdi, "self", "The VDI to modify"; - Ref _vdi, "value", "The VDI of which this VDI is a snapshot"] - ~flags:[`Session] - ~doc:"Sets the VDI of which this VDI is a snapshot" - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_snapshot_of" + ~in_oss_since:None + ~in_product_since:rel_boston + ~params:[Ref _vdi, "self", "The VDI to modify"; + Ref _vdi, "value", "The VDI of which this VDI is a snapshot"] + ~flags:[`Session] + ~doc:"Sets the VDI of which this VDI is a snapshot" + ~allowed_roles:_R_VM_ADMIN + () let vdi_set_snapshot_time = call - ~name:"set_snapshot_time" - ~in_oss_since:None - ~in_product_since:rel_boston - ~params:[Ref _vdi, "self", "The VDI to modify"; - DateTime, "value", "The snapshot time of this VDI."] - ~flags:[`Session] - ~doc:"Sets the snapshot time of this VDI." - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_snapshot_time" + ~in_oss_since:None + ~in_product_since:rel_boston + ~params:[Ref _vdi, "self", "The VDI to modify"; + DateTime, "value", "The snapshot time of this VDI."] + ~flags:[`Session] + ~doc:"Sets the snapshot time of this VDI." + ~allowed_roles:_R_VM_ADMIN + () let vdi_set_metadata_of_pool = call - ~name:"set_metadata_of_pool" - ~in_oss_since:None - ~in_product_since:rel_boston - ~params:[Ref _vdi, "self", "The VDI to modify"; - Ref _pool, "value", "The pool whose metadata is contained by this VDI"] - ~flags:[`Session] - ~doc:"Records the pool whose metadata is contained by this VDI." - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_metadata_of_pool" + ~in_oss_since:None + ~in_product_since:rel_boston + ~params:[Ref _vdi, "self", "The VDI to modify"; + Ref _pool, "value", "The pool whose metadata is contained by this VDI"] + ~flags:[`Session] + ~doc:"Records the pool whose metadata is contained by this VDI." + ~allowed_roles:_R_VM_ADMIN + () (** An API call for debugging and testing only *) let vdi_generate_config = call - ~name:"generate_config" - ~in_oss_since:None - ~in_product_since:rel_orlando - ~params:[Ref _host, "host", "The host on which to generate the configuration"; - Ref _vdi, "vdi", "The VDI to generate the configuration for" ] - ~result:(String, "The generated static configuration") - ~doc:"Internal function for debugging only" - ~hide_from_docs:true - ~allowed_roles:_R_VM_ADMIN - () + ~name:"generate_config" + ~in_oss_since:None + ~in_product_since:rel_orlando + ~params:[Ref _host, "host", "The host on which to generate the configuration"; + Ref _vdi, "vdi", "The VDI to generate the configuration for" ] + ~result:(String, "The generated static configuration") + ~doc:"Internal function for debugging only" + ~hide_from_docs:true + ~allowed_roles:_R_VM_ADMIN + () let on_boot = Enum ("on_boot", [ - "reset", "When a VM containing this VDI is started, the contents of the VDI are reset to the state they were in when this flag was last set."; - "persist", "Standard behaviour."; - ]) + "reset", "When a VM containing this VDI is started, the contents of the VDI are reset to the state they were in when this flag was last set."; + "persist", "Standard behaviour."; + ]) let vdi_set_on_boot = call - ~name:"set_on_boot" - ~in_oss_since:None - ~in_product_since:rel_cowley - ~params:[Ref _vdi, "self", "The VDI to modify"; - on_boot, "value", "The value to set"] - ~doc:"Set the value of the on_boot parameter. This value can only be changed when the VDI is not attached to a running VM." - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_on_boot" + ~in_oss_since:None + ~in_product_since:rel_cowley + ~params:[Ref _vdi, "self", "The VDI to modify"; + on_boot, "value", "The value to set"] + ~doc:"Set the value of the on_boot parameter. This value can only be changed when the VDI is not attached to a running VM." + ~allowed_roles:_R_VM_ADMIN + () let vdi_set_allow_caching = call - ~name:"set_allow_caching" - ~in_oss_since:None - ~in_product_since:rel_cowley - ~params:[Ref _vdi, "self", "The VDI to modify"; - Bool, "value", "The value to set"] - ~doc:"Set the value of the allow_caching parameter. This value can only be changed when the VDI is not attached to a running VM. The caching behaviour is only affected by this flag for VHD-based VDIs that have one parent and no child VHDs. Moreover, caching only takes place when the host running the VM containing this VDI has a nominated SR for local caching." - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_allow_caching" + ~in_oss_since:None + ~in_product_since:rel_cowley + ~params:[Ref _vdi, "self", "The VDI to modify"; + Bool, "value", "The value to set"] + ~doc:"Set the value of the allow_caching parameter. This value can only be changed when the VDI is not attached to a running VM. The caching behaviour is only affected by this flag for VHD-based VDIs that have one parent and no child VHDs. Moreover, caching only takes place when the host running the VM containing this VDI has a nominated SR for local caching." + ~allowed_roles:_R_VM_ADMIN + () let vdi_set_name_label = call - ~name:"set_name_label" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[Ref _vdi, "self", "The VDI to modify"; - String, "value", "The name lable for the VDI"] - ~doc:"Set the name label of the VDI. This can only happen when then its SR is currently attached." - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_name_label" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[Ref _vdi, "self", "The VDI to modify"; + String, "value", "The name lable for the VDI"] + ~doc:"Set the name label of the VDI. This can only happen when then its SR is currently attached." + ~allowed_roles:_R_VM_ADMIN + () let vdi_set_name_description = call - ~name:"set_name_description" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[Ref _vdi, "self", "The VDI to modify"; - String, "value", "The name description for the VDI"] - ~doc:"Set the name description of the VDI. This can only happen when its SR is currently attached." - ~allowed_roles:_R_VM_ADMIN - () + ~name:"set_name_description" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[Ref _vdi, "self", "The VDI to modify"; + String, "value", "The name description for the VDI"] + ~doc:"Set the name description of the VDI. This can only happen when its SR is currently attached." + ~allowed_roles:_R_VM_ADMIN + () let vdi_open_database = call - ~name:"open_database" - ~in_oss_since:None - ~in_product_since:rel_boston - ~params:[Ref _vdi, "self", "The VDI which contains the database to open"] - ~result:(Ref _session, "A session which can be used to query the database") - ~doc:"Load the metadata found on the supplied VDI and return a session reference which can be used in XenAPI calls to query its contents." - ~allowed_roles:_R_POOL_OP - () + ~name:"open_database" + ~in_oss_since:None + ~in_product_since:rel_boston + ~params:[Ref _vdi, "self", "The VDI which contains the database to open"] + ~result:(Ref _session, "A session which can be used to query the database") + ~doc:"Load the metadata found on the supplied VDI and return a session reference which can be used in XenAPI calls to query its contents." + ~allowed_roles:_R_POOL_OP + () let vdi_checksum = call - ~name:"checksum" - ~in_oss_since:None - ~in_product_since:rel_boston - ~params:[Ref _vdi, "self", "The VDI to checksum"] - ~result:(String, "The md5sum of the vdi") - ~doc:"Internal function to calculate VDI checksum and return a string" - ~hide_from_docs:true - ~allowed_roles:_R_VM_ADMIN (* Conceptually, this is not correct. We do it - this way only to follow the previous - convention. It is supposed to fix by future - version of RBAC *) - () + ~name:"checksum" + ~in_oss_since:None + ~in_product_since:rel_boston + ~params:[Ref _vdi, "self", "The VDI to checksum"] + ~result:(String, "The md5sum of the vdi") + ~doc:"Internal function to calculate VDI checksum and return a string" + ~hide_from_docs:true + ~allowed_roles:_R_VM_ADMIN (* Conceptually, this is not correct. We do it + this way only to follow the previous + convention. It is supposed to fix by future + version of RBAC *) + () let vdi_read_database_pool_uuid = call - ~name:"read_database_pool_uuid" - ~in_oss_since:None - ~in_product_since:rel_boston - ~params:[Ref _vdi, "self", "The metadata VDI to look up in the cache."] - ~result:(String, "The cached pool UUID of the database on the VDI.") - ~doc:"Check the VDI cache for the pool UUID of the database on this VDI." - ~allowed_roles:_R_READ_ONLY - () + ~name:"read_database_pool_uuid" + ~in_oss_since:None + ~in_product_since:rel_boston + ~params:[Ref _vdi, "self", "The metadata VDI to look up in the cache."] + ~result:(String, "The cached pool UUID of the database on the VDI.") + ~doc:"Check the VDI cache for the pool UUID of the database on this VDI." + ~allowed_roles:_R_READ_ONLY + () (** A virtual disk *) let vdi = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vdi ~descr:"A virtual disk image" - ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_VM_ADMIN - ~messages:[vdi_snapshot; vdi_clone; vdi_resize; - vdi_resize_online; - vdi_introduce; vdi_pool_introduce; - vdi_db_introduce; vdi_db_forget; - vdi_update; - vdi_copy; - vdi_force_unlock; vdi_set_managed; - vdi_forget; - vdi_set_sharable; - vdi_set_read_only; - vdi_set_missing; - vdi_set_virtual_size; - vdi_set_physical_utilisation; - vdi_set_is_a_snapshot; - vdi_set_snapshot_of; - vdi_set_snapshot_time; - vdi_set_metadata_of_pool; - vdi_set_name_label; - vdi_set_name_description; - vdi_generate_config; - vdi_set_on_boot; - vdi_set_allow_caching; - vdi_open_database; - vdi_checksum; - vdi_read_database_pool_uuid; - vdi_pool_migrate; - ] - ~contents: + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vdi ~descr:"A virtual disk image" + ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_VM_ADMIN + ~messages:[vdi_snapshot; vdi_clone; vdi_resize; + vdi_resize_online; + vdi_introduce; vdi_pool_introduce; + vdi_db_introduce; vdi_db_forget; + vdi_update; + vdi_copy; + vdi_force_unlock; vdi_set_managed; + vdi_forget; + vdi_set_sharable; + vdi_set_read_only; + vdi_set_missing; + vdi_set_virtual_size; + vdi_set_physical_utilisation; + vdi_set_is_a_snapshot; + vdi_set_snapshot_of; + vdi_set_snapshot_time; + vdi_set_metadata_of_pool; + vdi_set_name_label; + vdi_set_name_description; + vdi_generate_config; + vdi_set_on_boot; + vdi_set_allow_caching; + vdi_open_database; + vdi_checksum; + vdi_read_database_pool_uuid; + vdi_pool_migrate; + ] + ~contents: ([ uid _vdi; - namespace ~name:"name" ~contents:(names oss_since_303 StaticRO) (); - ] @ (allowed_and_current_operations vdi_operations) @ [ - field ~qualifier:StaticRO ~ty:(Ref _sr) "SR" "storage repository in which the VDI resides"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _vbd)) "VBDs" "list of vbds that refer to this disk"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _crashdump)) "crash_dumps" "list of crash dumps that refer to this disk"; - field ~qualifier:StaticRO ~ty:Int "virtual_size" "size of disk as presented to the guest (in bytes). Note that, depending on storage backend type, requested size may not be respected exactly"; - field ~qualifier:DynamicRO ~ty:Int "physical_utilisation" "amount of physical space that the disk image is currently taking up on the storage repository (in bytes)"; - field ~qualifier:StaticRO ~ty:vdi_type "type" "type of the VDI"; - field ~qualifier:StaticRO ~ty:Bool "sharable" "true if this disk may be shared"; - field ~qualifier:StaticRO ~ty:Bool "read_only" "true if this disk may ONLY be mounted read-only"; - field ~ty:(Map(String, String)) "other_config" "additional configuration" ~map_keys_roles:[("folder",(_R_VM_OP));("XenCenter.CustomFields.*",(_R_VM_OP))]; - field ~qualifier:DynamicRO ~ty:Bool "storage_lock" "true if this disk is locked at the storage level"; - (* XXX: location field was in the database in rio, now API in miami *) - field ~in_oss_since:None ~in_product_since:rel_miami ~ty:String ~qualifier:DynamicRO ~default_value:(Some (VString "")) "location" "location information"; - field ~in_oss_since:None ~ty:Bool ~qualifier:DynamicRO "managed" ""; - field ~in_oss_since:None ~ty:Bool ~qualifier:DynamicRO "missing" "true if SR scan operation reported this VDI as not present on disk"; - field ~in_oss_since:None ~ty:(Ref _vdi) ~qualifier:DynamicRO "parent" "References the parent disk, if this VDI is part of a chain"; - field ~in_oss_since:None ~ty:(Map(String, String)) ~in_product_since:rel_miami ~qualifier:RW "xenstore_data" "data to be inserted into the xenstore tree (/local/domain/0/backend/vbd///sm-data) after the VDI is attached. This is generally set by the SM backends on vdi_attach." ~default_value:(Some (VMap [])); - field ~in_oss_since:None ~ty:(Map(String, String)) ~in_product_since:rel_miami ~qualifier:RW "sm_config" "SM dependent data" ~default_value:(Some (VMap [])); - - field ~in_product_since:rel_orlando ~default_value:(Some (VBool false)) ~qualifier:DynamicRO ~ty:Bool ~doc_tags:[Snapshots] "is_a_snapshot" "true if this is a snapshot."; - field ~in_product_since:rel_orlando ~default_value:(Some (VRef "")) ~qualifier:DynamicRO ~ty:(Ref _vdi) ~doc_tags:[Snapshots] "snapshot_of" "Ref pointing to the VDI this snapshot is of."; - field ~in_product_since:rel_orlando ~qualifier:DynamicRO ~ty:(Set (Ref _vdi)) ~doc_tags:[Snapshots] "snapshots" "List pointing to all the VDIs snapshots."; - field ~in_product_since:rel_orlando ~default_value:(Some (VDateTime Date.never)) ~qualifier:DynamicRO ~ty:DateTime ~doc_tags:[Snapshots] "snapshot_time" "Date/time when this snapshot was created."; - field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes"; - field ~in_product_since:rel_cowley ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "allow_caching" "true if this VDI is to be cached in the local cache SR"; - field ~in_product_since:rel_cowley ~qualifier:DynamicRO ~ty:on_boot ~default_value:(Some (VEnum "persist")) "on_boot" "The behaviour of this VDI on a VM boot"; - field ~in_product_since:rel_boston ~qualifier:DynamicRO ~ty:(Ref _pool) ~default_value:(Some (VRef (Ref.string_of Ref.null))) "metadata_of_pool" "The pool whose metadata is contained in this VDI"; - field ~in_product_since:rel_boston ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "metadata_latest" "Whether this VDI contains the latest known accessible metadata for the pool"; - field ~lifecycle:[Published, rel_dundee, ""] ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "is_tools_iso" "Whether this VDI is a Tools ISO"; - ]) - () + namespace ~name:"name" ~contents:(names oss_since_303 StaticRO) (); + ] @ (allowed_and_current_operations vdi_operations) @ [ + field ~qualifier:StaticRO ~ty:(Ref _sr) "SR" "storage repository in which the VDI resides"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _vbd)) "VBDs" "list of vbds that refer to this disk"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _crashdump)) "crash_dumps" "list of crash dumps that refer to this disk"; + field ~qualifier:StaticRO ~ty:Int "virtual_size" "size of disk as presented to the guest (in bytes). Note that, depending on storage backend type, requested size may not be respected exactly"; + field ~qualifier:DynamicRO ~ty:Int "physical_utilisation" "amount of physical space that the disk image is currently taking up on the storage repository (in bytes)"; + field ~qualifier:StaticRO ~ty:vdi_type "type" "type of the VDI"; + field ~qualifier:StaticRO ~ty:Bool "sharable" "true if this disk may be shared"; + field ~qualifier:StaticRO ~ty:Bool "read_only" "true if this disk may ONLY be mounted read-only"; + field ~ty:(Map(String, String)) "other_config" "additional configuration" ~map_keys_roles:[("folder",(_R_VM_OP));("XenCenter.CustomFields.*",(_R_VM_OP))]; + field ~qualifier:DynamicRO ~ty:Bool "storage_lock" "true if this disk is locked at the storage level"; + (* XXX: location field was in the database in rio, now API in miami *) + field ~in_oss_since:None ~in_product_since:rel_miami ~ty:String ~qualifier:DynamicRO ~default_value:(Some (VString "")) "location" "location information"; + field ~in_oss_since:None ~ty:Bool ~qualifier:DynamicRO "managed" ""; + field ~in_oss_since:None ~ty:Bool ~qualifier:DynamicRO "missing" "true if SR scan operation reported this VDI as not present on disk"; + field ~in_oss_since:None ~ty:(Ref _vdi) ~qualifier:DynamicRO "parent" "References the parent disk, if this VDI is part of a chain"; + field ~in_oss_since:None ~ty:(Map(String, String)) ~in_product_since:rel_miami ~qualifier:RW "xenstore_data" "data to be inserted into the xenstore tree (/local/domain/0/backend/vbd///sm-data) after the VDI is attached. This is generally set by the SM backends on vdi_attach." ~default_value:(Some (VMap [])); + field ~in_oss_since:None ~ty:(Map(String, String)) ~in_product_since:rel_miami ~qualifier:RW "sm_config" "SM dependent data" ~default_value:(Some (VMap [])); + + field ~in_product_since:rel_orlando ~default_value:(Some (VBool false)) ~qualifier:DynamicRO ~ty:Bool ~doc_tags:[Snapshots] "is_a_snapshot" "true if this is a snapshot."; + field ~in_product_since:rel_orlando ~default_value:(Some (VRef "")) ~qualifier:DynamicRO ~ty:(Ref _vdi) ~doc_tags:[Snapshots] "snapshot_of" "Ref pointing to the VDI this snapshot is of."; + field ~in_product_since:rel_orlando ~qualifier:DynamicRO ~ty:(Set (Ref _vdi)) ~doc_tags:[Snapshots] "snapshots" "List pointing to all the VDIs snapshots."; + field ~in_product_since:rel_orlando ~default_value:(Some (VDateTime Date.never)) ~qualifier:DynamicRO ~ty:DateTime ~doc_tags:[Snapshots] "snapshot_time" "Date/time when this snapshot was created."; + field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes"; + field ~in_product_since:rel_cowley ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "allow_caching" "true if this VDI is to be cached in the local cache SR"; + field ~in_product_since:rel_cowley ~qualifier:DynamicRO ~ty:on_boot ~default_value:(Some (VEnum "persist")) "on_boot" "The behaviour of this VDI on a VM boot"; + field ~in_product_since:rel_boston ~qualifier:DynamicRO ~ty:(Ref _pool) ~default_value:(Some (VRef (Ref.string_of Ref.null))) "metadata_of_pool" "The pool whose metadata is contained in this VDI"; + field ~in_product_since:rel_boston ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "metadata_latest" "Whether this VDI contains the latest known accessible metadata for the pool"; + field ~lifecycle:[Published, rel_dundee, ""] ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "is_tools_iso" "Whether this VDI is a Tools ISO"; + ]) + () (** Virtual disk interfaces have a mode parameter: *) let vbd_mode = Enum ("vbd_mode", [ "RO", "only read-only access will be allowed"; - "RW", "read-write access will be allowed" ]) + "RW", "read-write access will be allowed" ]) let vbd_type = Enum ("vbd_type", - [ "CD", "VBD will appear to guest as CD"; - "Disk", "VBD will appear to guest as disk"; + [ "CD", "VBD will appear to guest as CD"; + "Disk", "VBD will appear to guest as disk"; "Floppy", "VBD will appear as a floppy"]) let vbd_operations = - Enum ("vbd_operations", - [ "attach", "Attempting to attach this VBD to a VM"; - "eject", "Attempting to eject the media from this VBD"; - "insert", "Attempting to insert new media into this VBD"; - "plug", "Attempting to hotplug this VBD"; - "unplug", "Attempting to hot unplug this VBD"; - "unplug_force", "Attempting to forcibly unplug this VBD"; - "pause", "Attempting to pause a block device backend"; - "unpause", "Attempting to unpause a block device backend"; - ]) + Enum ("vbd_operations", + [ "attach", "Attempting to attach this VBD to a VM"; + "eject", "Attempting to eject the media from this VBD"; + "insert", "Attempting to insert new media into this VBD"; + "plug", "Attempting to hotplug this VBD"; + "unplug", "Attempting to hot unplug this VBD"; + "unplug_force", "Attempting to forcibly unplug this VBD"; + "pause", "Attempting to pause a block device backend"; + "unpause", "Attempting to unpause a block device backend"; + ]) (** A virtual disk interface *) let vbd = create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vbd ~descr:"A virtual block device" ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_VM_ADMIN - ~messages: [ vbd_eject; vbd_insert; vbd_plug; vbd_unplug; vbd_unplug_force; vbd_unplug_force_no_safety_check; vbd_assert_attachable; - vbd_pause; vbd_unpause; - ] - ~contents: - ([ uid _vbd; - ] @ (allowed_and_current_operations vbd_operations) @ [ - field ~qualifier:StaticRO ~ty:(Ref _vm) "VM" "the virtual machine"; - field ~qualifier:StaticRO ~ty:(Ref _vdi) "VDI" "the virtual disk"; - - field ~qualifier:DynamicRO "device" "device seen by the guest e.g. hda1"; - field "userdevice" "user-friendly device name e.g. 0,1,2,etc."; - field ~ty:Bool "bootable" "true if this VBD is bootable"; - field ~effect:true ~ty:vbd_mode "mode" "the mode the VBD should be mounted with"; - field ~ty:vbd_type "type" "how the VBD will appear to the guest (e.g. disk or CD)"; - field ~in_oss_since:None ~in_product_since:rel_miami ~ty:Bool ~default_value:(Some (VBool true)) - "unpluggable" "true if this VBD will support hot-unplug"; - field ~qualifier:DynamicRO ~ty:Bool "storage_lock" "true if a storage level lock was acquired"; - field ~qualifier:StaticRO ~ty:Bool "empty" "if true this represents an empty drive"; - field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "reserved" "true if the VBD is reserved pending a reboot/migrate"; - field ~ty:(Map(String, String)) "other_config" "additional configuration"; - ] - @ device_status_fields @ + ~doccomments:[] + ~messages_default_allowed_roles:_R_VM_ADMIN + ~messages: [ vbd_eject; vbd_insert; vbd_plug; vbd_unplug; vbd_unplug_force; vbd_unplug_force_no_safety_check; vbd_assert_attachable; + vbd_pause; vbd_unpause; + ] + ~contents: + ([ uid _vbd; + ] @ (allowed_and_current_operations vbd_operations) @ [ + field ~qualifier:StaticRO ~ty:(Ref _vm) "VM" "the virtual machine"; + field ~qualifier:StaticRO ~ty:(Ref _vdi) "VDI" "the virtual disk"; + + field ~qualifier:DynamicRO "device" "device seen by the guest e.g. hda1"; + field "userdevice" "user-friendly device name e.g. 0,1,2,etc."; + field ~ty:Bool "bootable" "true if this VBD is bootable"; + field ~effect:true ~ty:vbd_mode "mode" "the mode the VBD should be mounted with"; + field ~ty:vbd_type "type" "how the VBD will appear to the guest (e.g. disk or CD)"; + field ~in_oss_since:None ~in_product_since:rel_miami ~ty:Bool ~default_value:(Some (VBool true)) + "unpluggable" "true if this VBD will support hot-unplug"; + field ~qualifier:DynamicRO ~ty:Bool "storage_lock" "true if a storage level lock was acquired"; + field ~qualifier:StaticRO ~ty:Bool "empty" "if true this represents an empty drive"; + field ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "reserved" "true if the VBD is reserved pending a reboot/migrate"; + field ~ty:(Map(String, String)) "other_config" "additional configuration"; + ] + @ device_status_fields @ [ namespace ~name:"qos" ~contents:(qos "VBD") (); ] @ [ field ~qualifier:DynamicRO ~ty:(Ref _vbd_metrics) "metrics" "metrics associated with this VBD"; ]) - () - -let vbd_metrics = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_vbd_metrics ~descr:"The metrics associated with a virtual block device" - ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_VM_ADMIN - ~messages:[] ~contents: - [ uid _vbd_metrics; - namespace ~name:"io" ~contents:iobandwidth (); - field ~qualifier:DynamicRO ~ty:DateTime "last_updated" "Time at which this information was last updated"; - field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; - ] - () + () + +let vbd_metrics = + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_vbd_metrics ~descr:"The metrics associated with a virtual block device" + ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_VM_ADMIN + ~messages:[] ~contents: + [ uid _vbd_metrics; + namespace ~name:"io" ~contents:iobandwidth (); + field ~qualifier:DynamicRO ~ty:DateTime "last_updated" "Time at which this information was last updated"; + field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; + ] + () let crashdump_destroy = call - ~name:"destroy" - ~in_product_since:rel_rio - ~doc:"Destroy the specified crashdump" - ~params:[Ref _crashdump, "self", "The crashdump to destroy"] - ~allowed_roles:_R_POOL_OP - () + ~name:"destroy" + ~in_product_since:rel_rio + ~doc:"Destroy the specified crashdump" + ~params:[Ref _crashdump, "self", "The crashdump to destroy"] + ~allowed_roles:_R_POOL_OP + () (** A crashdump for a particular VM, stored in a particular VDI *) let crashdump = create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_crashdump ~descr:"A VM crashdump" ~gen_events:true - ~doccomments:[] + ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP ~messages: [crashdump_destroy] ~contents: - ([ uid _crashdump; - field ~qualifier:StaticRO ~ty:(Ref _vm) "VM" "the virtual machine"; - field ~qualifier:StaticRO ~ty:(Ref _vdi) "VDI" "the virtual disk"; - field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; - ]) - () + ([ uid _crashdump; + field ~qualifier:StaticRO ~ty:(Ref _vm) "VM" "the virtual machine"; + field ~qualifier:StaticRO ~ty:(Ref _vdi) "VDI" "the virtual disk"; + field ~in_product_since:rel_miami ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "other_config" "additional configuration"; + ]) + () let pool_operations = - Enum ("pool_allowed_operations", - [ "ha_enable", "Indicates this pool is in the process of enabling HA"; - "ha_disable", "Indicates this pool is in the process of disabling HA"; - ]) + Enum ("pool_allowed_operations", + [ "ha_enable", "Indicates this pool is in the process of enabling HA"; + "ha_disable", "Indicates this pool is in the process of disabling HA"; + ]) let pool_enable_ha = call - ~in_product_since:rel_miami - ~name:"enable_ha" - ~in_oss_since:None - ~versioned_params: - [{param_type=Set(Ref _sr); param_name="heartbeat_srs"; param_doc="Set of SRs to use for storage heartbeating"; param_release=miami_release; param_default=None }; - {param_type=Map(String, String); param_name="configuration"; param_doc="Detailed HA configuration to apply"; param_release=miami_release; param_default=None }; - ] - ~doc:"Turn on High Availability mode" - ~allowed_roles:_R_POOL_OP - () + ~in_product_since:rel_miami + ~name:"enable_ha" + ~in_oss_since:None + ~versioned_params: + [{param_type=Set(Ref _sr); param_name="heartbeat_srs"; param_doc="Set of SRs to use for storage heartbeating"; param_release=miami_release; param_default=None }; + {param_type=Map(String, String); param_name="configuration"; param_doc="Detailed HA configuration to apply"; param_release=miami_release; param_default=None }; + ] + ~doc:"Turn on High Availability mode" + ~allowed_roles:_R_POOL_OP + () let pool_disable_ha = call - ~in_product_since:rel_miami - ~name:"disable_ha" - ~in_oss_since:None - ~params:[] - ~doc:"Turn off High Availability mode" - ~allowed_roles:_R_POOL_OP - () + ~in_product_since:rel_miami + ~name:"disable_ha" + ~in_oss_since:None + ~params:[] + ~doc:"Turn off High Availability mode" + ~allowed_roles:_R_POOL_OP + () let pool_sync_database = call - ~name:"sync_database" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[] - ~doc:"Forcibly synchronise the database now" - ~allowed_roles:_R_POOL_OP - () + ~name:"sync_database" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[] + ~doc:"Forcibly synchronise the database now" + ~allowed_roles:_R_POOL_OP + () let pool_designate_new_master = call - ~in_product_since:rel_miami - ~name:"designate_new_master" - ~in_oss_since:None - ~params:[Ref _host, "host", "The host who should become the new master"] - ~doc:"Perform an orderly handover of the role of master to the referenced host." - ~allowed_roles:_R_POOL_OP - () + ~in_product_since:rel_miami + ~name:"designate_new_master" + ~in_oss_since:None + ~params:[Ref _host, "host", "The host who should become the new master"] + ~doc:"Perform an orderly handover of the role of master to the referenced host." + ~allowed_roles:_R_POOL_OP + () let pool_join = call - ~name:"join" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[String, "master_address", "The hostname of the master of the pool to join"; - String, "master_username", "The username of the master (for initial authentication)"; - String, "master_password", "The password for the master (for initial authentication)"; - ] - ~errs:[Api_errors.pool_joining_host_cannot_contain_shared_SRs] - ~doc:"Instruct host to join a new pool" - ~allowed_roles:_R_POOL_OP + ~name:"join" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[String, "master_address", "The hostname of the master of the pool to join"; + String, "master_username", "The username of the master (for initial authentication)"; + String, "master_password", "The password for the master (for initial authentication)"; + ] + ~errs:[Api_errors.pool_joining_host_cannot_contain_shared_SRs] + ~doc:"Instruct host to join a new pool" + ~allowed_roles:_R_POOL_OP () let pool_join_force = call - ~name:"join_force" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[String, "master_address", "The hostname of the master of the pool to join"; - String, "master_username", "The username of the master (for initial authentication)"; - String, "master_password", "The password for the master (for initial authentication)"; - ] - ~doc:"Instruct host to join a new pool" - ~allowed_roles:_R_POOL_OP + ~name:"join_force" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[String, "master_address", "The hostname of the master of the pool to join"; + String, "master_username", "The username of the master (for initial authentication)"; + String, "master_password", "The password for the master (for initial authentication)"; + ] + ~doc:"Instruct host to join a new pool" + ~allowed_roles:_R_POOL_OP () let pool_slave_reset_master = call ~flags:[`Session] - ~name:"emergency_reset_master" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[ - String, "master_address", "The hostname of the master"; - ] - ~doc:"Instruct a slave already in a pool that the master has changed" - ~allowed_roles:_R_POOL_OP + ~name:"emergency_reset_master" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[ + String, "master_address", "The hostname of the master"; + ] + ~doc:"Instruct a slave already in a pool that the master has changed" + ~allowed_roles:_R_POOL_OP () let pool_transition_to_master = call ~flags:[`Session] - ~name:"emergency_transition_to_master" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[] - ~doc:"Instruct host that's currently a slave to transition to being master" - ~allowed_roles:_R_POOL_OP + ~name:"emergency_transition_to_master" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[] + ~doc:"Instruct host that's currently a slave to transition to being master" + ~allowed_roles:_R_POOL_OP () let pool_recover_slaves = call - ~name:"recover_slaves" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[] - ~result:(Set (Ref _host), "list of hosts whose master address were successfully reset") - ~doc:"Instruct a pool master, M, to try and contact its slaves and, if slaves are in emergency mode, reset their master address to M." - ~allowed_roles:_R_POOL_OP - () - + ~name:"recover_slaves" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[] + ~result:(Set (Ref _host), "list of hosts whose master address were successfully reset") + ~doc:"Instruct a pool master, M, to try and contact its slaves and, if slaves are in emergency mode, reset their master address to M." + ~allowed_roles:_R_POOL_OP + () + let pool_eject = call - ~name:"eject" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[Ref _host, "host", "The host to eject"] - ~doc:"Instruct a pool master to eject a host from the pool" - ~allowed_roles:_R_POOL_OP + ~name:"eject" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[Ref _host, "host", "The host to eject"] + ~doc:"Instruct a pool master to eject a host from the pool" + ~allowed_roles:_R_POOL_OP () let pool_initial_auth = call - ~name:"initial_auth" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[] - ~result:(String, "") - ~doc:"Internal use only" - ~hide_from_docs:true - ~allowed_roles:_R_POOL_OP + ~name:"initial_auth" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[] + ~result:(String, "") + ~doc:"Internal use only" + ~hide_from_docs:true + ~allowed_roles:_R_POOL_OP () let pool_create_VLAN_from_PIF = call - ~in_oss_since:None - ~in_product_since:rel_rio - ~name:"create_VLAN_from_PIF" - ~doc:"Create a pool-wide VLAN by taking the PIF." - ~params:[Ref _pif, "pif", "physical interface on any particular host, that identifies the PIF on which to create the (pool-wide) VLAN interface"; - Ref _network, "network", "network to which this interface should be connected"; - Int, "VLAN", "VLAN tag for the new interface"] - ~result:(Set (Ref _pif), "The references of the created PIF objects") - ~errs:[Api_errors.vlan_tag_invalid] - ~allowed_roles:_R_POOL_OP - () + ~in_oss_since:None + ~in_product_since:rel_rio + ~name:"create_VLAN_from_PIF" + ~doc:"Create a pool-wide VLAN by taking the PIF." + ~params:[Ref _pif, "pif", "physical interface on any particular host, that identifies the PIF on which to create the (pool-wide) VLAN interface"; + Ref _network, "network", "network to which this interface should be connected"; + Int, "VLAN", "VLAN tag for the new interface"] + ~result:(Set (Ref _pif), "The references of the created PIF objects") + ~errs:[Api_errors.vlan_tag_invalid] + ~allowed_roles:_R_POOL_OP + () (* !! THIS IS BROKEN; it takes a device name which in the case of a bond is not homogeneous across all pool hosts. See CA-22613. !! *) let pool_create_VLAN = call - ~in_oss_since:None - ~in_product_since:rel_rio - ~name:"create_VLAN" - ~doc:"Create PIFs, mapping a network to the same physical interface/VLAN on each host. This call is deprecated: use Pool.create_VLAN_from_PIF instead." - ~params:[String, "device", "physical interface on which to create the VLAN interface"; - Ref _network, "network", "network to which this interface should be connected"; - Int, "VLAN", "VLAN tag for the new interface"] - ~result:(Set (Ref _pif), "The references of the created PIF objects") - ~errs:[Api_errors.vlan_tag_invalid] - ~allowed_roles:_R_POOL_OP - () + ~in_oss_since:None + ~in_product_since:rel_rio + ~name:"create_VLAN" + ~doc:"Create PIFs, mapping a network to the same physical interface/VLAN on each host. This call is deprecated: use Pool.create_VLAN_from_PIF instead." + ~params:[String, "device", "physical interface on which to create the VLAN interface"; + Ref _network, "network", "network to which this interface should be connected"; + Int, "VLAN", "VLAN tag for the new interface"] + ~result:(Set (Ref _pif), "The references of the created PIF objects") + ~errs:[Api_errors.vlan_tag_invalid] + ~allowed_roles:_R_POOL_OP + () let hello_return = Enum("hello_return", [ - "ok", ""; - "unknown_host", ""; - "cannot_talk_back", "" - ]) + "ok", ""; + "unknown_host", ""; + "cannot_talk_back", "" + ]) let pool_hello = call - ~name:"hello" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[String, "host_uuid", ""; - String, "host_address", "" - ] - ~result:(hello_return, "") - ~doc:"Internal use only" - ~hide_from_docs:true - ~allowed_roles:_R_POOL_OP - () + ~name:"hello" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[String, "host_uuid", ""; + String, "host_address", "" + ] + ~result:(hello_return, "") + ~doc:"Internal use only" + ~hide_from_docs:true + ~allowed_roles:_R_POOL_OP + () let pool_slave_network_report = call - ~name:"slave_network_report" - ~in_oss_since:None - ~in_product_since:rel_rio - ~doc:"Internal use only" - ~params:[Map (String, String), "phydevs", "(device,bridge) pairs of physical NICs on slave"; - Map (String, String), "dev_to_mac", "(device,mac) pairs of physical NICs on slave"; - Map (String, Int), "dev_to_mtu", "(device,mtu) pairs of physical NICs on slave"; - Ref _host, "slave_host", "the host that the PIFs will be attached to when created" - ] - ~result:(Set(Ref _pif), "refs for pifs corresponding to device list") - ~hide_from_docs:true - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"slave_network_report" + ~in_oss_since:None + ~in_product_since:rel_rio + ~doc:"Internal use only" + ~params:[Map (String, String), "phydevs", "(device,bridge) pairs of physical NICs on slave"; + Map (String, String), "dev_to_mac", "(device,mac) pairs of physical NICs on slave"; + Map (String, Int), "dev_to_mtu", "(device,mtu) pairs of physical NICs on slave"; + Ref _host, "slave_host", "the host that the PIFs will be attached to when created" + ] + ~result:(Set(Ref _pif), "refs for pifs corresponding to device list") + ~hide_from_docs:true + ~allowed_roles:_R_POOL_ADMIN + () let pool_ping_slave = call ~flags:[`Session] - ~name:"is_slave" - ~in_oss_since:None - ~in_product_since:rel_rio - ~params:[Ref _host, "host", ""] - ~doc:"Internal use only" - ~result:(Bool, "returns false if pinged host is master [indicating critical error condition]; true if pinged host is slave") - ~hide_from_docs:true - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"is_slave" + ~in_oss_since:None + ~in_product_since:rel_rio + ~params:[Ref _host, "host", ""] + ~doc:"Internal use only" + ~result:(Bool, "returns false if pinged host is master [indicating critical error condition]; true if pinged host is slave") + ~hide_from_docs:true + ~allowed_roles:_R_POOL_ADMIN + () let pool_ha_prevent_restarts_for = call ~flags:[`Session] - ~name:"ha_prevent_restarts_for" - ~in_product_since:rel_orlando_update_1 - ~doc:"When this call returns the VM restart logic will not run for the requested number of seconds. If the argument is zero then the restart thread is immediately unblocked" - ~params:[Int, "seconds", "The number of seconds to block the restart thread for"] - ~allowed_roles:_R_POOL_OP - () + ~name:"ha_prevent_restarts_for" + ~in_product_since:rel_orlando_update_1 + ~doc:"When this call returns the VM restart logic will not run for the requested number of seconds. If the argument is zero then the restart thread is immediately unblocked" + ~params:[Int, "seconds", "The number of seconds to block the restart thread for"] + ~allowed_roles:_R_POOL_OP + () let pool_ha_failover_plan_exists = call ~flags:[`Session] - ~name:"ha_failover_plan_exists" - ~in_product_since:rel_orlando - ~doc:"Returns true if a VM failover plan exists for up to 'n' host failures" - ~params:[Int, "n", "The number of host failures to plan for" ] - ~result:(Bool, "true if a failover plan exists for the supplied number of host failures") - ~allowed_roles:_R_POOL_OP - () + ~name:"ha_failover_plan_exists" + ~in_product_since:rel_orlando + ~doc:"Returns true if a VM failover plan exists for up to 'n' host failures" + ~params:[Int, "n", "The number of host failures to plan for" ] + ~result:(Bool, "true if a failover plan exists for the supplied number of host failures") + ~allowed_roles:_R_POOL_OP + () let pool_ha_compute_max_host_failures_to_tolerate = call ~flags:[`Session] - ~name:"ha_compute_max_host_failures_to_tolerate" - ~in_product_since:rel_orlando - ~doc:"Returns the maximum number of host failures we could tolerate before we would be unable to restart configured VMs" - ~params:[] - ~result:(Int, "maximum value for ha_host_failures_to_tolerate given current configuration") - ~allowed_roles:_R_POOL_OP - () + ~name:"ha_compute_max_host_failures_to_tolerate" + ~in_product_since:rel_orlando + ~doc:"Returns the maximum number of host failures we could tolerate before we would be unable to restart configured VMs" + ~params:[] + ~result:(Int, "maximum value for ha_host_failures_to_tolerate given current configuration") + ~allowed_roles:_R_POOL_OP + () let pool_ha_compute_hypothetical_max_host_failures_to_tolerate = call ~flags:[`Session] - ~name:"ha_compute_hypothetical_max_host_failures_to_tolerate" - ~in_product_since:rel_orlando - ~doc:"Returns the maximum number of host failures we could tolerate before we would be unable to restart the provided VMs" - ~params:[ Map(Ref _vm, String), "configuration", "Map of protected VM reference to restart priority" ] - ~result:(Int, "maximum value for ha_host_failures_to_tolerate given provided configuration") - ~allowed_roles:_R_READ_ONLY - () + ~name:"ha_compute_hypothetical_max_host_failures_to_tolerate" + ~in_product_since:rel_orlando + ~doc:"Returns the maximum number of host failures we could tolerate before we would be unable to restart the provided VMs" + ~params:[ Map(Ref _vm, String), "configuration", "Map of protected VM reference to restart priority" ] + ~result:(Int, "maximum value for ha_host_failures_to_tolerate given provided configuration") + ~allowed_roles:_R_READ_ONLY + () let pool_ha_compute_vm_failover_plan = call ~flags:[`Session] - ~name:"ha_compute_vm_failover_plan" - ~in_product_since:rel_orlando - ~doc:"Return a VM failover plan assuming a given subset of hosts fail" - ~params:[Set(Ref _host), "failed_hosts", "The set of hosts to assume have failed"; - Set(Ref _vm), "failed_vms", "The set of VMs to restart" ] - ~result:(Map(Ref _vm, Map(String, String)), "VM failover plan: a map of VM to host to restart the host on") - ~allowed_roles:_R_POOL_OP - () + ~name:"ha_compute_vm_failover_plan" + ~in_product_since:rel_orlando + ~doc:"Return a VM failover plan assuming a given subset of hosts fail" + ~params:[Set(Ref _host), "failed_hosts", "The set of hosts to assume have failed"; + Set(Ref _vm), "failed_vms", "The set of VMs to restart" ] + ~result:(Map(Ref _vm, Map(String, String)), "VM failover plan: a map of VM to host to restart the host on") + ~allowed_roles:_R_POOL_OP + () let pool_create_new_blob = call - ~name: "create_new_blob" - ~in_product_since:rel_orlando - ~doc:"Create a placeholder for a named binary blob of data that is associated with this pool" - ~versioned_params: - [{param_type=Ref _pool; param_name="pool"; param_doc="The pool"; param_release=orlando_release; param_default=None}; - {param_type=String; param_name="name"; param_doc="The name associated with the blob"; param_release=orlando_release; param_default=None}; - {param_type=String; param_name="mime_type"; param_doc="The mime type for the data. Empty string translates to application/octet-stream"; param_release=orlando_release; param_default=None}; - {param_type=Bool; param_name="public"; param_doc="True if the blob should be publicly available"; param_release=tampa_release; param_default=Some (VBool false)} - ] - ~result:(Ref _blob, "The reference of the blob, needed for populating its data") - ~allowed_roles:_R_POOL_OP - () + ~name: "create_new_blob" + ~in_product_since:rel_orlando + ~doc:"Create a placeholder for a named binary blob of data that is associated with this pool" + ~versioned_params: + [{param_type=Ref _pool; param_name="pool"; param_doc="The pool"; param_release=orlando_release; param_default=None}; + {param_type=String; param_name="name"; param_doc="The name associated with the blob"; param_release=orlando_release; param_default=None}; + {param_type=String; param_name="mime_type"; param_doc="The mime type for the data. Empty string translates to application/octet-stream"; param_release=orlando_release; param_default=None}; + {param_type=Bool; param_name="public"; param_doc="True if the blob should be publicly available"; param_release=tampa_release; param_default=Some (VBool false)} + ] + ~result:(Ref _blob, "The reference of the blob, needed for populating its data") + ~allowed_roles:_R_POOL_OP + () let pool_set_ha_host_failures_to_tolerate = call - ~name:"set_ha_host_failures_to_tolerate" - ~in_product_since:rel_orlando - ~doc:"Set the maximum number of host failures to consider in the HA VM restart planner" - ~params:[Ref _pool, "self", "The pool"; - Int, "value", "New number of host failures to consider"] - ~allowed_roles:_R_POOL_OP - () + ~name:"set_ha_host_failures_to_tolerate" + ~in_product_since:rel_orlando + ~doc:"Set the maximum number of host failures to consider in the HA VM restart planner" + ~params:[Ref _pool, "self", "The pool"; + Int, "value", "New number of host failures to consider"] + ~allowed_roles:_R_POOL_OP + () let pool_ha_schedule_plan_recomputation = call - ~name:"ha_schedule_plan_recomputation" - ~in_product_since:rel_orlando - ~doc:"Signal that the plan should be recomputed (eg a host has come online)" - ~params:[] - ~hide_from_docs:true - ~pool_internal:true - ~allowed_roles:_R_LOCAL_ROOT_ONLY - () + ~name:"ha_schedule_plan_recomputation" + ~in_product_since:rel_orlando + ~doc:"Signal that the plan should be recomputed (eg a host has come online)" + ~params:[] + ~hide_from_docs:true + ~pool_internal:true + ~allowed_roles:_R_LOCAL_ROOT_ONLY + () let pool_enable_binary_storage = call - ~name:"enable_binary_storage" - ~in_product_since:rel_orlando - ~hide_from_docs:true - ~doc:"Enable the storage of larger objects, such as RRDs, messages and binary blobs across all hosts in the pool" - ~params:[] - ~allowed_roles:_R_POOL_OP - () + ~name:"enable_binary_storage" + ~in_product_since:rel_orlando + ~hide_from_docs:true + ~doc:"Enable the storage of larger objects, such as RRDs, messages and binary blobs across all hosts in the pool" + ~params:[] + ~allowed_roles:_R_POOL_OP + () let pool_disable_binary_storage = call - ~name:"disable_binary_storage" - ~in_product_since:rel_orlando - ~hide_from_docs:true - ~doc:"Disable the storage of larger objects, such as RRDs, messages and binary blobs across all hosts in the pool. This will destroy all of these objects where they exist." - ~params:[] - ~allowed_roles:_R_POOL_OP - () + ~name:"disable_binary_storage" + ~in_product_since:rel_orlando + ~hide_from_docs:true + ~doc:"Disable the storage of larger objects, such as RRDs, messages and binary blobs across all hosts in the pool. This will destroy all of these objects where they exist." + ~params:[] + ~allowed_roles:_R_POOL_OP + () let pool_enable_external_auth = call ~flags:[`Session] - ~name:"enable_external_auth" - ~in_oss_since:None - ~in_product_since:rel_george - ~params:[ - Ref _pool, "pool", "The pool whose external authentication should be enabled"; - Map (String,String), "config", "A list of key-values containing the configuration data" ; - String, "service_name", "The name of the service" ; - String, "auth_type", "The type of authentication (e.g. AD for Active Directory)" - ] - ~doc:"This call enables external authentication on all the hosts of the pool" - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"enable_external_auth" + ~in_oss_since:None + ~in_product_since:rel_george + ~params:[ + Ref _pool, "pool", "The pool whose external authentication should be enabled"; + Map (String,String), "config", "A list of key-values containing the configuration data" ; + String, "service_name", "The name of the service" ; + String, "auth_type", "The type of authentication (e.g. AD for Active Directory)" + ] + ~doc:"This call enables external authentication on all the hosts of the pool" + ~allowed_roles:_R_POOL_ADMIN + () let pool_disable_external_auth = call ~flags:[`Session] - ~name:"disable_external_auth" - ~in_oss_since:None - ~in_product_since:rel_george - ~versioned_params:[ - {param_type=Ref _pool; param_name="pool"; param_doc="The pool whose external authentication should be disabled"; param_release=george_release; param_default=None}; - {param_type=Map (String, String); param_name="config"; param_doc="Optional parameters as a list of key-values containing the configuration data"; param_release=george_release; param_default=Some (VMap [])} - ] - ~doc:"This call disables external authentication on all the hosts of the pool" - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"disable_external_auth" + ~in_oss_since:None + ~in_product_since:rel_george + ~versioned_params:[ + {param_type=Ref _pool; param_name="pool"; param_doc="The pool whose external authentication should be disabled"; param_release=george_release; param_default=None}; + {param_type=Map (String, String); param_name="config"; param_doc="Optional parameters as a list of key-values containing the configuration data"; param_release=george_release; param_default=Some (VMap [])} + ] + ~doc:"This call disables external authentication on all the hosts of the pool" + ~allowed_roles:_R_POOL_ADMIN + () let pool_detect_nonhomogeneous_external_auth = call ~flags:[`Session] - ~name:"detect_nonhomogeneous_external_auth" - ~in_oss_since:None - ~in_product_since:rel_george - ~params:[ - Ref _pool, "pool", "The pool where to detect non-homogeneous external authentication configuration"; - ] - ~doc:"This call asynchronously detects if the external authentication configuration in any slave is different from that in the master and raises appropriate alerts" - ~allowed_roles:_R_POOL_OP - () + ~name:"detect_nonhomogeneous_external_auth" + ~in_oss_since:None + ~in_product_since:rel_george + ~params:[ + Ref _pool, "pool", "The pool where to detect non-homogeneous external authentication configuration"; + ] + ~doc:"This call asynchronously detects if the external authentication configuration in any slave is different from that in the master and raises appropriate alerts" + ~allowed_roles:_R_POOL_OP + () let pool_initialize_wlb = call - ~name:"initialize_wlb" - ~in_product_since:rel_george - ~doc:"Initializes workload balancing monitoring on this pool with the specified wlb server" - ~params:[String, "wlb_url", "The ip address and port to use when accessing the wlb server"; - String, "wlb_username", "The username used to authenticate with the wlb server"; - String, "wlb_password", "The password used to authenticate with the wlb server"; - String, "xenserver_username", "The username used by the wlb server to authenticate with the xenserver"; - String, "xenserver_password", "The password used by the wlb server to authenticate with the xenserver"] - ~allowed_roles:_R_POOL_OP - () + ~name:"initialize_wlb" + ~in_product_since:rel_george + ~doc:"Initializes workload balancing monitoring on this pool with the specified wlb server" + ~params:[String, "wlb_url", "The ip address and port to use when accessing the wlb server"; + String, "wlb_username", "The username used to authenticate with the wlb server"; + String, "wlb_password", "The password used to authenticate with the wlb server"; + String, "xenserver_username", "The username used by the wlb server to authenticate with the xenserver"; + String, "xenserver_password", "The password used by the wlb server to authenticate with the xenserver"] + ~allowed_roles:_R_POOL_OP + () let pool_deconfigure_wlb = call - ~name:"deconfigure_wlb" - ~in_product_since:rel_george - ~doc:"Permanently deconfigures workload balancing monitoring on this pool" - ~params:[] - ~allowed_roles:_R_POOL_OP - () + ~name:"deconfigure_wlb" + ~in_product_since:rel_george + ~doc:"Permanently deconfigures workload balancing monitoring on this pool" + ~params:[] + ~allowed_roles:_R_POOL_OP + () let pool_send_wlb_configuration = call - ~name:"send_wlb_configuration" - ~in_product_since:rel_george - ~doc:"Sets the pool optimization criteria for the workload balancing server" - ~params:[Map(String, String), "config", "The configuration to use in optimizing this pool"] - ~allowed_roles:_R_POOL_OP - () - + ~name:"send_wlb_configuration" + ~in_product_since:rel_george + ~doc:"Sets the pool optimization criteria for the workload balancing server" + ~params:[Map(String, String), "config", "The configuration to use in optimizing this pool"] + ~allowed_roles:_R_POOL_OP + () + let pool_retrieve_wlb_configuration = call - ~name:"retrieve_wlb_configuration" - ~in_product_since:rel_george - ~doc:"Retrieves the pool optimization criteria from the workload balancing server" - ~params:[] - ~result:(Map(String,String), "The configuration used in optimizing this pool") - ~allowed_roles:_R_READ_ONLY - () - + ~name:"retrieve_wlb_configuration" + ~in_product_since:rel_george + ~doc:"Retrieves the pool optimization criteria from the workload balancing server" + ~params:[] + ~result:(Map(String,String), "The configuration used in optimizing this pool") + ~allowed_roles:_R_READ_ONLY + () + let pool_retrieve_wlb_recommendations = call - ~name:"retrieve_wlb_recommendations" - ~in_product_since:rel_george - ~doc:"Retrieves vm migrate recommendations for the pool from the workload balancing server" - ~params:[] - ~result:(Map(Ref _vm,Set(String)), "The list of vm migration recommendations") - ~allowed_roles:_R_READ_ONLY - () - + ~name:"retrieve_wlb_recommendations" + ~in_product_since:rel_george + ~doc:"Retrieves vm migrate recommendations for the pool from the workload balancing server" + ~params:[] + ~result:(Map(Ref _vm,Set(String)), "The list of vm migration recommendations") + ~allowed_roles:_R_READ_ONLY + () + let pool_send_test_post = call - ~name:"send_test_post" - ~in_product_since:rel_george - ~doc:"Send the given body to the given host and port, using HTTPS, and print the response. This is used for debugging the SSL layer." - ~params:[(String, "host", ""); (Int, "port", ""); (String, "body", "")] - ~result:(String, "The response") - ~allowed_roles:_R_POOL_ADMIN - () - + ~name:"send_test_post" + ~in_product_since:rel_george + ~doc:"Send the given body to the given host and port, using HTTPS, and print the response. This is used for debugging the SSL layer." + ~params:[(String, "host", ""); (Int, "port", ""); (String, "body", "")] + ~result:(String, "The response") + ~allowed_roles:_R_POOL_ADMIN + () + let pool_certificate_install = call - ~in_oss_since:None - ~in_product_since:rel_george - ~name:"certificate_install" - ~doc:"Install an SSL certificate pool-wide." - ~params:[String, "name", "A name to give the certificate"; - String, "cert", "The certificate"] - ~allowed_roles:_R_POOL_OP - () + ~in_oss_since:None + ~in_product_since:rel_george + ~name:"certificate_install" + ~doc:"Install an SSL certificate pool-wide." + ~params:[String, "name", "A name to give the certificate"; + String, "cert", "The certificate"] + ~allowed_roles:_R_POOL_OP + () let pool_certificate_uninstall = call - ~in_oss_since:None - ~in_product_since:rel_george - ~name:"certificate_uninstall" - ~doc:"Remove an SSL certificate." - ~params:[String, "name", "The certificate name"] - ~allowed_roles:_R_POOL_OP - () + ~in_oss_since:None + ~in_product_since:rel_george + ~name:"certificate_uninstall" + ~doc:"Remove an SSL certificate." + ~params:[String, "name", "The certificate name"] + ~allowed_roles:_R_POOL_OP + () let pool_certificate_list = call - ~in_oss_since:None - ~in_product_since:rel_george - ~name:"certificate_list" - ~doc:"List all installed SSL certificates." - ~result:(Set(String),"All installed certificates") - ~allowed_roles:_R_POOL_OP - () + ~in_oss_since:None + ~in_product_since:rel_george + ~name:"certificate_list" + ~doc:"List all installed SSL certificates." + ~result:(Set(String),"All installed certificates") + ~allowed_roles:_R_POOL_OP + () let pool_crl_install = call - ~in_oss_since:None - ~in_product_since:rel_george - ~name:"crl_install" - ~doc:"Install an SSL certificate revocation list, pool-wide." - ~params:[String, "name", "A name to give the CRL"; - String, "cert", "The CRL"] - ~allowed_roles:_R_POOL_OP - () + ~in_oss_since:None + ~in_product_since:rel_george + ~name:"crl_install" + ~doc:"Install an SSL certificate revocation list, pool-wide." + ~params:[String, "name", "A name to give the CRL"; + String, "cert", "The CRL"] + ~allowed_roles:_R_POOL_OP + () let pool_crl_uninstall = call - ~in_oss_since:None - ~in_product_since:rel_george - ~name:"crl_uninstall" - ~doc:"Remove an SSL certificate revocation list." - ~params:[String, "name", "The CRL name"] - ~allowed_roles:_R_POOL_OP - () + ~in_oss_since:None + ~in_product_since:rel_george + ~name:"crl_uninstall" + ~doc:"Remove an SSL certificate revocation list." + ~params:[String, "name", "The CRL name"] + ~allowed_roles:_R_POOL_OP + () let pool_crl_list = call - ~in_oss_since:None - ~in_product_since:rel_george - ~name:"crl_list" - ~doc:"List all installed SSL certificate revocation lists." - ~result:(Set(String), "All installed CRLs") - ~allowed_roles:_R_POOL_OP - () + ~in_oss_since:None + ~in_product_since:rel_george + ~name:"crl_list" + ~doc:"List all installed SSL certificate revocation lists." + ~result:(Set(String), "All installed CRLs") + ~allowed_roles:_R_POOL_OP + () let pool_certificate_sync = call - ~in_oss_since:None - ~in_product_since:rel_george - ~name:"certificate_sync" - ~doc:"Sync SSL certificates from master to slaves." - ~allowed_roles:_R_POOL_OP - () - + ~in_oss_since:None + ~in_product_since:rel_george + ~name:"certificate_sync" + ~doc:"Sync SSL certificates from master to slaves." + ~allowed_roles:_R_POOL_OP + () + let pool_enable_redo_log = call - ~in_oss_since:None - ~in_product_since:rel_midnight_ride - ~name:"enable_redo_log" - ~params:[Ref _sr, "sr", "SR to hold the redo log."] - ~doc:"Enable the redo log on the given SR and start using it, unless HA is enabled." - ~allowed_roles:_R_POOL_OP - () - + ~in_oss_since:None + ~in_product_since:rel_midnight_ride + ~name:"enable_redo_log" + ~params:[Ref _sr, "sr", "SR to hold the redo log."] + ~doc:"Enable the redo log on the given SR and start using it, unless HA is enabled." + ~allowed_roles:_R_POOL_OP + () + let pool_disable_redo_log = call - ~in_oss_since:None - ~in_product_since:rel_midnight_ride - ~name:"disable_redo_log" - ~doc:"Disable the redo log if in use, unless HA is enabled." - ~allowed_roles:_R_POOL_OP - () + ~in_oss_since:None + ~in_product_since:rel_midnight_ride + ~name:"disable_redo_log" + ~doc:"Disable the redo log if in use, unless HA is enabled." + ~allowed_roles:_R_POOL_OP + () let pool_audit_log_append = call - ~in_oss_since:None - ~pool_internal:true - ~hide_from_docs:true - ~in_product_since:rel_midnight_ride - ~name:"audit_log_append" - ~params:[String, "line", "line to be appended to the audit log"] - ~doc:"Append a line to the audit log on the master." - ~allowed_roles:_R_POOL_ADMIN - () + ~in_oss_since:None + ~pool_internal:true + ~hide_from_docs:true + ~in_product_since:rel_midnight_ride + ~name:"audit_log_append" + ~params:[String, "line", "line to be appended to the audit log"] + ~doc:"Append a line to the audit log on the master." + ~allowed_roles:_R_POOL_ADMIN + () let pool_set_vswitch_controller = call - ~in_oss_since:None - ~in_product_since:rel_midnight_ride - ~lifecycle:[ - Published, rel_midnight_ride, "Set the IP address of the vswitch controller."; - Extended, rel_cowley, "Allow to be set to the empty string (no controller is used)."] - ~name:"set_vswitch_controller" - ~params:[String, "address", "IP address of the vswitch controller."] - ~doc:"Set the IP address of the vswitch controller." - ~allowed_roles:_R_POOL_OP - () + ~in_oss_since:None + ~in_product_since:rel_midnight_ride + ~lifecycle:[ + Published, rel_midnight_ride, "Set the IP address of the vswitch controller."; + Extended, rel_cowley, "Allow to be set to the empty string (no controller is used)."] + ~name:"set_vswitch_controller" + ~params:[String, "address", "IP address of the vswitch controller."] + ~doc:"Set the IP address of the vswitch controller." + ~allowed_roles:_R_POOL_OP + () let pool_test_archive_target = call ~flags:[`Session] - ~name:"test_archive_target" - ~in_oss_since:None - ~in_product_since:rel_cowley - ~params:[Ref _pool, "self", "Reference to the pool"; - Map(String,String), "config", "Location config settings to test"; - ] - ~doc:"This call tests if a location is valid" - ~allowed_roles:_R_POOL_OP - ~result:(String, "An XMLRPC result") - () + ~name:"test_archive_target" + ~in_oss_since:None + ~in_product_since:rel_cowley + ~params:[Ref _pool, "self", "Reference to the pool"; + Map(String,String), "config", "Location config settings to test"; + ] + ~doc:"This call tests if a location is valid" + ~allowed_roles:_R_POOL_OP + ~result:(String, "An XMLRPC result") + () + +let pool_enable_local_storage_caching = call + ~name:"enable_local_storage_caching" + ~in_oss_since:None + ~in_product_since:rel_cowley + ~params:[Ref _pool, "self", "Reference to the pool"] + ~doc:"This call attempts to enable pool-wide local storage caching" + ~allowed_roles:_R_POOL_OP + () -let pool_enable_local_storage_caching = call - ~name:"enable_local_storage_caching" - ~in_oss_since:None - ~in_product_since:rel_cowley - ~params:[Ref _pool, "self", "Reference to the pool"] - ~doc:"This call attempts to enable pool-wide local storage caching" - ~allowed_roles:_R_POOL_OP - () - -let pool_disable_local_storage_caching = call - ~name:"disable_local_storage_caching" - ~in_oss_since:None - ~in_product_since:rel_cowley - ~params:[Ref _pool, "self", "Reference to the pool"] - ~doc:"This call disables pool-wide local storage caching" - ~allowed_roles:_R_POOL_OP - () +let pool_disable_local_storage_caching = call + ~name:"disable_local_storage_caching" + ~in_oss_since:None + ~in_product_since:rel_cowley + ~params:[Ref _pool, "self", "Reference to the pool"] + ~doc:"This call disables pool-wide local storage caching" + ~allowed_roles:_R_POOL_OP + () let pool_get_license_state = call - ~name:"get_license_state" - ~in_oss_since:None - ~in_product_since:rel_clearwater - ~params:[Ref _pool, "self", "Reference to the pool"] - ~doc:"This call returns the license state for the pool" - ~allowed_roles:_R_READ_ONLY - ~result:(Map(String,String), "The pool's license state") - () + ~name:"get_license_state" + ~in_oss_since:None + ~in_product_since:rel_clearwater + ~params:[Ref _pool, "self", "Reference to the pool"] + ~doc:"This call returns the license state for the pool" + ~allowed_roles:_R_READ_ONLY + ~result:(Map(String,String), "The pool's license state") + () let pool_apply_edition = call - ~name:"apply_edition" - ~in_oss_since:None - ~in_product_since:rel_clearwater - ~params:[ - Ref _pool, "self", "Reference to the pool"; - String, "edition", "The requested edition"; - ] - ~doc:"Apply an edition to all hosts in the pool" - ~allowed_roles:_R_POOL_OP - () + ~name:"apply_edition" + ~in_oss_since:None + ~in_product_since:rel_clearwater + ~params:[ + Ref _pool, "self", "Reference to the pool"; + String, "edition", "The requested edition"; + ] + ~doc:"Apply an edition to all hosts in the pool" + ~allowed_roles:_R_POOL_OP + () let pool_enable_ssl_legacy = call - ~name:"enable_ssl_legacy" - ~in_oss_since:None - ~lifecycle:[ - Published, rel_dundee, ""; - ] - ~params:[Ref _pool, "self", "(ignored)";] - ~doc:"Sets ssl_legacy true on each host, pool-master last. See Host.ssl_legacy and Host.set_ssl_legacy." - ~allowed_roles:_R_POOL_OP - () + ~name:"enable_ssl_legacy" + ~in_oss_since:None + ~lifecycle:[ + Published, rel_dundee, ""; + ] + ~params:[Ref _pool, "self", "(ignored)";] + ~doc:"Sets ssl_legacy true on each host, pool-master last. See Host.ssl_legacy and Host.set_ssl_legacy." + ~allowed_roles:_R_POOL_OP + () let pool_disable_ssl_legacy = call - ~name:"disable_ssl_legacy" - ~in_oss_since:None - ~lifecycle:[ - Published, rel_dundee, ""; - ] - ~params:[Ref _pool, "self", "(ignored)";] - ~doc:"Sets ssl_legacy true on each host, pool-master last. See Host.ssl_legacy and Host.set_ssl_legacy." - ~allowed_roles:_R_POOL_OP - () + ~name:"disable_ssl_legacy" + ~in_oss_since:None + ~lifecycle:[ + Published, rel_dundee, ""; + ] + ~params:[Ref _pool, "self", "(ignored)";] + ~doc:"Sets ssl_legacy true on each host, pool-master last. See Host.ssl_legacy and Host.set_ssl_legacy." + ~allowed_roles:_R_POOL_OP + () let pool_has_extension = call - ~name:"has_extension" - ~in_product_since:rel_dundee - ~doc:"Return true if the extension is available on the pool" - ~params:[ - Ref _pool, "self", "The pool"; - String, "name", "The name of the API call" - ] - ~result:(Bool, "True if the extension exists, false otherwise") - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"has_extension" + ~in_product_since:rel_dundee + ~doc:"Return true if the extension is available on the pool" + ~params:[ + Ref _pool, "self", "The pool"; + String, "name", "The name of the API call" + ] + ~result:(Bool, "True if the extension exists, false otherwise") + ~allowed_roles:_R_POOL_ADMIN + () let pool_add_to_guest_agent_config = call - ~name:"add_to_guest_agent_config" - ~in_product_since:rel_dundee - ~doc:"Add a key-value pair to the pool-wide guest agent configuration" - ~params:[ - Ref _pool, "self", "The pool"; - String, "key", "The key to add"; - String, "value", "The value to add"; - ] - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"add_to_guest_agent_config" + ~in_product_since:rel_dundee + ~doc:"Add a key-value pair to the pool-wide guest agent configuration" + ~params:[ + Ref _pool, "self", "The pool"; + String, "key", "The key to add"; + String, "value", "The value to add"; + ] + ~allowed_roles:_R_POOL_ADMIN + () let pool_remove_from_guest_agent_config = call - ~name:"remove_from_guest_agent_config" - ~in_product_since:rel_dundee - ~doc:"Remove a key-value pair from the pool-wide guest agent configuration" - ~params:[ - Ref _pool, "self", "The pool"; - String, "key", "The key to remove"; - ] - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"remove_from_guest_agent_config" + ~in_product_since:rel_dundee + ~doc:"Remove a key-value pair from the pool-wide guest agent configuration" + ~params:[ + Ref _pool, "self", "The pool"; + String, "key", "The key to remove"; + ] + ~allowed_roles:_R_POOL_ADMIN + () (** A pool class *) let pool = - create_obj - ~in_db:true - ~in_product_since:rel_rio - ~in_oss_since:None - ~internal_deprecated_since:None - ~persist:PersistEverything - ~gen_constructor_destructor:false - ~name:_pool - ~descr:"Pool-wide information" - ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_OP - ~messages: - [ pool_join - ; pool_join_force - ; pool_eject - ; pool_initial_auth - ; pool_transition_to_master - ; pool_slave_reset_master - ; pool_recover_slaves - ; pool_hello - ; pool_ping_slave - ; pool_create_VLAN - ; pool_create_VLAN_from_PIF - ; pool_slave_network_report - ; pool_enable_ha - ; pool_disable_ha - ; pool_sync_database - ; pool_designate_new_master - ; pool_ha_prevent_restarts_for - ; pool_ha_failover_plan_exists - ; pool_ha_compute_max_host_failures_to_tolerate - ; pool_ha_compute_hypothetical_max_host_failures_to_tolerate - ; pool_ha_compute_vm_failover_plan - ; pool_set_ha_host_failures_to_tolerate - ; pool_create_new_blob - ; pool_ha_schedule_plan_recomputation - ; pool_enable_binary_storage - ; pool_disable_binary_storage - ; pool_enable_external_auth - ; pool_disable_external_auth - ; pool_detect_nonhomogeneous_external_auth - ; pool_initialize_wlb - ; pool_deconfigure_wlb - ; pool_send_wlb_configuration - ; pool_retrieve_wlb_configuration - ; pool_retrieve_wlb_recommendations - ; pool_send_test_post - ; pool_certificate_install - ; pool_certificate_uninstall - ; pool_certificate_list - ; pool_crl_install - ; pool_crl_uninstall - ; pool_crl_list - ; pool_certificate_sync - ; pool_enable_redo_log - ; pool_disable_redo_log - ; pool_audit_log_append - ; pool_set_vswitch_controller - ; pool_test_archive_target - ; pool_enable_local_storage_caching - ; pool_disable_local_storage_caching - ; pool_get_license_state - ; pool_apply_edition - ; pool_enable_ssl_legacy - ; pool_disable_ssl_legacy - ; pool_has_extension - ; pool_add_to_guest_agent_config - ; pool_remove_from_guest_agent_config - ] - ~contents: - ([uid ~in_oss_since:None _pool] @ - [ field ~in_oss_since:None ~qualifier:RW ~ty:String "name_label" "Short name" - ; field ~in_oss_since:None ~qualifier:RW ~ty:String "name_description" "Description" - ; field ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Ref _host) "master" "The host that is pool master" - ; field ~in_oss_since:None ~qualifier:RW ~ty:(Ref _sr) "default_SR" "Default SR for VDIs" - ; field ~in_oss_since:None ~qualifier:RW ~ty:(Ref _sr) "suspend_image_SR" "The SR in which VDIs for suspend images are created" - ; field ~in_oss_since:None ~qualifier:RW ~ty:(Ref _sr) "crash_dump_SR" "The SR in which VDIs for crash dumps are created" - ; field ~in_oss_since:None ~ty:(Map(String, String)) "other_config" "additional configuration" ~map_keys_roles:[("folder",(_R_VM_OP));("XenCenter.CustomFields.*",(_R_VM_OP));("EMPTY_FOLDERS",(_R_VM_OP))] - ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "ha_enabled" "true if HA is enabled on the pool, false otherwise" - ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:DynamicRO ~ty:(Map(String, String)) ~default_value:(Some (VMap [])) "ha_configuration" "The current HA configuration" - ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:DynamicRO ~ty:(Set String) ~default_value:(Some (VSet [])) "ha_statefiles" "HA statefile VDIs in use" - ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:DynamicRO ~ty:Int ~default_value:(Some (VInt 0L)) "ha_host_failures_to_tolerate" "Number of host failures to tolerate before the Pool is declared to be overcommitted" - ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:DynamicRO ~ty:Int ~default_value:(Some (VInt 0L)) "ha_plan_exists_for" "Number of future host failures we have managed to find a plan for. Once this reaches zero any future host failures will cause the failure of protected VMs." - ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:RW ~ty:Bool ~default_value:(Some (VBool false)) "ha_allow_overcommit" "If set to false then operations which would cause the Pool to become overcommitted will be blocked." - ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "ha_overcommitted" "True if the Pool is considered to be overcommitted i.e. if there exist insufficient physical resources to tolerate the configured number of host failures" - ; field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Map(String, Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this pool" - ; field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes" - ; field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "gui_config" "gui-specific configuration for pool" - ; field ~writer_roles:_R_POOL_OP ~in_product_since:rel_dundee ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "health_check_config" "Configuration for the automatic health check feature" - ; field ~in_product_since:rel_george ~qualifier:DynamicRO ~ty:String ~default_value:(Some (VString "")) "wlb_url" "Url for the configured workload balancing host" - ; field ~in_product_since:rel_george ~qualifier:DynamicRO ~ty:String ~default_value:(Some (VString "")) "wlb_username" "Username for accessing the workload balancing host" - ; field ~in_product_since:rel_george ~internal_only:true ~qualifier:DynamicRO ~ty:(Ref _secret) "wlb_password" "Password for accessing the workload balancing host" - ; field ~in_product_since:rel_george ~qualifier:RW ~ty:Bool ~default_value:(Some (VBool false)) "wlb_enabled" "true if workload balancing is enabled on the pool, false otherwise" - ; field ~in_product_since:rel_george ~qualifier:RW ~ty:Bool ~default_value:(Some (VBool false)) "wlb_verify_cert" "true if communication with the WLB server should enforce SSL certificate verification." - ; field ~in_oss_since:None ~in_product_since:rel_midnight_ride ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "redo_log_enabled" "true a redo-log is to be used other than when HA is enabled, false otherwise" - ; field ~in_oss_since:None ~in_product_since:rel_midnight_ride ~qualifier:DynamicRO ~ty:(Ref _vdi) ~default_value:(Some (VRef (Ref.string_of Ref.null))) "redo_log_vdi" "indicates the VDI to use for the redo-log other than when HA is enabled" - ; field ~in_oss_since:None ~in_product_since:rel_midnight_ride ~qualifier:DynamicRO ~ty:String ~default_value:(Some (VString "")) "vswitch_controller" "address of the vswitch controller" - ; field ~in_oss_since:None ~in_product_since:rel_midnight_ride ~qualifier:DynamicRO ~ty:(Map(String, String)) ~default_value:(Some (VMap [])) "restrictions" "Pool-wide restrictions currently in effect" - ; field ~in_oss_since:None ~in_product_since:rel_boston ~qualifier:DynamicRO ~ty:(Set (Ref _vdi)) "metadata_VDIs" "The set of currently known metadata VDIs for this pool" - ; field ~in_oss_since:None ~in_product_since:rel_dundee ~qualifier:DynamicRO ~default_value:(Some (VString "")) ~ty:String "ha_cluster_stack" "The HA cluster stack that is currently in use. Only valid when HA is enabled." - ] @ (allowed_and_current_operations pool_operations) @ - [ field ~in_oss_since:None ~in_product_since:rel_dundee ~qualifier:DynamicRO ~ty:(Map(String, String)) ~default_value:(Some (VMap [])) "guest_agent_config" "Pool-wide guest agent configuration information" - ; field ~qualifier:DynamicRO ~in_product_since:rel_dundee ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "cpu_info" "Details about the physical CPUs on the pool" - ; field ~qualifier:RW ~in_product_since:rel_dundee ~default_value:(Some (VBool false)) ~ty:Bool "policy_no_vendor_device" "The pool-wide policy for clients on whether to use the vendor device or not on newly created VMs. This field will also be consulted if the 'has_vendor_device' field is not specified in the VM.create call." - ; field ~qualifier:RW ~in_product_since:rel_ely ~default_value:(Some (VBool false)) ~ty:Bool "live_patching_disabled" "The pool-wide flag to show if the live patching feauture is disabled or not." - ]) - () + create_obj + ~in_db:true + ~in_product_since:rel_rio + ~in_oss_since:None + ~internal_deprecated_since:None + ~persist:PersistEverything + ~gen_constructor_destructor:false + ~name:_pool + ~descr:"Pool-wide information" + ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_POOL_OP + ~messages: + [ pool_join + ; pool_join_force + ; pool_eject + ; pool_initial_auth + ; pool_transition_to_master + ; pool_slave_reset_master + ; pool_recover_slaves + ; pool_hello + ; pool_ping_slave + ; pool_create_VLAN + ; pool_create_VLAN_from_PIF + ; pool_slave_network_report + ; pool_enable_ha + ; pool_disable_ha + ; pool_sync_database + ; pool_designate_new_master + ; pool_ha_prevent_restarts_for + ; pool_ha_failover_plan_exists + ; pool_ha_compute_max_host_failures_to_tolerate + ; pool_ha_compute_hypothetical_max_host_failures_to_tolerate + ; pool_ha_compute_vm_failover_plan + ; pool_set_ha_host_failures_to_tolerate + ; pool_create_new_blob + ; pool_ha_schedule_plan_recomputation + ; pool_enable_binary_storage + ; pool_disable_binary_storage + ; pool_enable_external_auth + ; pool_disable_external_auth + ; pool_detect_nonhomogeneous_external_auth + ; pool_initialize_wlb + ; pool_deconfigure_wlb + ; pool_send_wlb_configuration + ; pool_retrieve_wlb_configuration + ; pool_retrieve_wlb_recommendations + ; pool_send_test_post + ; pool_certificate_install + ; pool_certificate_uninstall + ; pool_certificate_list + ; pool_crl_install + ; pool_crl_uninstall + ; pool_crl_list + ; pool_certificate_sync + ; pool_enable_redo_log + ; pool_disable_redo_log + ; pool_audit_log_append + ; pool_set_vswitch_controller + ; pool_test_archive_target + ; pool_enable_local_storage_caching + ; pool_disable_local_storage_caching + ; pool_get_license_state + ; pool_apply_edition + ; pool_enable_ssl_legacy + ; pool_disable_ssl_legacy + ; pool_has_extension + ; pool_add_to_guest_agent_config + ; pool_remove_from_guest_agent_config + ] + ~contents: + ([uid ~in_oss_since:None _pool] @ + [ field ~in_oss_since:None ~qualifier:RW ~ty:String "name_label" "Short name" + ; field ~in_oss_since:None ~qualifier:RW ~ty:String "name_description" "Description" + ; field ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Ref _host) "master" "The host that is pool master" + ; field ~in_oss_since:None ~qualifier:RW ~ty:(Ref _sr) "default_SR" "Default SR for VDIs" + ; field ~in_oss_since:None ~qualifier:RW ~ty:(Ref _sr) "suspend_image_SR" "The SR in which VDIs for suspend images are created" + ; field ~in_oss_since:None ~qualifier:RW ~ty:(Ref _sr) "crash_dump_SR" "The SR in which VDIs for crash dumps are created" + ; field ~in_oss_since:None ~ty:(Map(String, String)) "other_config" "additional configuration" ~map_keys_roles:[("folder",(_R_VM_OP));("XenCenter.CustomFields.*",(_R_VM_OP));("EMPTY_FOLDERS",(_R_VM_OP))] + ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "ha_enabled" "true if HA is enabled on the pool, false otherwise" + ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:DynamicRO ~ty:(Map(String, String)) ~default_value:(Some (VMap [])) "ha_configuration" "The current HA configuration" + ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:DynamicRO ~ty:(Set String) ~default_value:(Some (VSet [])) "ha_statefiles" "HA statefile VDIs in use" + ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:DynamicRO ~ty:Int ~default_value:(Some (VInt 0L)) "ha_host_failures_to_tolerate" "Number of host failures to tolerate before the Pool is declared to be overcommitted" + ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:DynamicRO ~ty:Int ~default_value:(Some (VInt 0L)) "ha_plan_exists_for" "Number of future host failures we have managed to find a plan for. Once this reaches zero any future host failures will cause the failure of protected VMs." + ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:RW ~ty:Bool ~default_value:(Some (VBool false)) "ha_allow_overcommit" "If set to false then operations which would cause the Pool to become overcommitted will be blocked." + ; field ~in_oss_since:None ~in_product_since:rel_orlando ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "ha_overcommitted" "True if the Pool is considered to be overcommitted i.e. if there exist insufficient physical resources to tolerate the configured number of host failures" + ; field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Map(String, Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this pool" + ; field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes" + ; field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "gui_config" "gui-specific configuration for pool" + ; field ~writer_roles:_R_POOL_OP ~in_product_since:rel_dundee ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "health_check_config" "Configuration for the automatic health check feature" + ; field ~in_product_since:rel_george ~qualifier:DynamicRO ~ty:String ~default_value:(Some (VString "")) "wlb_url" "Url for the configured workload balancing host" + ; field ~in_product_since:rel_george ~qualifier:DynamicRO ~ty:String ~default_value:(Some (VString "")) "wlb_username" "Username for accessing the workload balancing host" + ; field ~in_product_since:rel_george ~internal_only:true ~qualifier:DynamicRO ~ty:(Ref _secret) "wlb_password" "Password for accessing the workload balancing host" + ; field ~in_product_since:rel_george ~qualifier:RW ~ty:Bool ~default_value:(Some (VBool false)) "wlb_enabled" "true if workload balancing is enabled on the pool, false otherwise" + ; field ~in_product_since:rel_george ~qualifier:RW ~ty:Bool ~default_value:(Some (VBool false)) "wlb_verify_cert" "true if communication with the WLB server should enforce SSL certificate verification." + ; field ~in_oss_since:None ~in_product_since:rel_midnight_ride ~qualifier:DynamicRO ~ty:Bool ~default_value:(Some (VBool false)) "redo_log_enabled" "true a redo-log is to be used other than when HA is enabled, false otherwise" + ; field ~in_oss_since:None ~in_product_since:rel_midnight_ride ~qualifier:DynamicRO ~ty:(Ref _vdi) ~default_value:(Some (VRef (Ref.string_of Ref.null))) "redo_log_vdi" "indicates the VDI to use for the redo-log other than when HA is enabled" + ; field ~in_oss_since:None ~in_product_since:rel_midnight_ride ~qualifier:DynamicRO ~ty:String ~default_value:(Some (VString "")) "vswitch_controller" "address of the vswitch controller" + ; field ~in_oss_since:None ~in_product_since:rel_midnight_ride ~qualifier:DynamicRO ~ty:(Map(String, String)) ~default_value:(Some (VMap [])) "restrictions" "Pool-wide restrictions currently in effect" + ; field ~in_oss_since:None ~in_product_since:rel_boston ~qualifier:DynamicRO ~ty:(Set (Ref _vdi)) "metadata_VDIs" "The set of currently known metadata VDIs for this pool" + ; field ~in_oss_since:None ~in_product_since:rel_dundee ~qualifier:DynamicRO ~default_value:(Some (VString "")) ~ty:String "ha_cluster_stack" "The HA cluster stack that is currently in use. Only valid when HA is enabled." + ] @ (allowed_and_current_operations pool_operations) @ + [ field ~in_oss_since:None ~in_product_since:rel_dundee ~qualifier:DynamicRO ~ty:(Map(String, String)) ~default_value:(Some (VMap [])) "guest_agent_config" "Pool-wide guest agent configuration information" + ; field ~qualifier:DynamicRO ~in_product_since:rel_dundee ~default_value:(Some (VMap [])) ~ty:(Map(String, String)) "cpu_info" "Details about the physical CPUs on the pool" + ; field ~qualifier:RW ~in_product_since:rel_dundee ~default_value:(Some (VBool false)) ~ty:Bool "policy_no_vendor_device" "The pool-wide policy for clients on whether to use the vendor device or not on newly created VMs. This field will also be consulted if the 'has_vendor_device' field is not specified in the VM.create call." + ; field ~qualifier:RW ~in_product_since:rel_ely ~default_value:(Some (VBool false)) ~ty:Bool "live_patching_disabled" "The pool-wide flag to show if the live patching feauture is disabled or not." + ]) + () (** Auth class *) let auth_get_subject_identifier = call ~flags:[`Session] - ~name:"get_subject_identifier" - ~in_oss_since:None - ~in_product_since:rel_george - ~params:[ - (*Ref _auth, "auth", "???";*) - String, "subject_name", "The human-readable subject_name, such as a username or a groupname" ; - ] - ~result:(String, "the subject_identifier obtained from the external directory service") - ~doc:"This call queries the external directory service to obtain the subject_identifier as a string from the human-readable subject_name" - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_subject_identifier" + ~in_oss_since:None + ~in_product_since:rel_george + ~params:[ + (*Ref _auth, "auth", "???";*) + String, "subject_name", "The human-readable subject_name, such as a username or a groupname" ; + ] + ~result:(String, "the subject_identifier obtained from the external directory service") + ~doc:"This call queries the external directory service to obtain the subject_identifier as a string from the human-readable subject_name" + ~allowed_roles:_R_READ_ONLY + () let auth_get_subject_information_from_identifier = call ~flags:[`Session] - ~name:"get_subject_information_from_identifier" - ~in_oss_since:None - ~in_product_since:rel_george - ~params:[ - String, "subject_identifier", "A string containing the subject_identifier, unique in the external directory service" - ] - ~result:(Map(String,String), "key-value pairs containing at least a key called subject_name") - ~doc:"This call queries the external directory service to obtain the user information (e.g. username, organization etc) from the specified subject_identifier" - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_subject_information_from_identifier" + ~in_oss_since:None + ~in_product_since:rel_george + ~params:[ + String, "subject_identifier", "A string containing the subject_identifier, unique in the external directory service" + ] + ~result:(Map(String,String), "key-value pairs containing at least a key called subject_name") + ~doc:"This call queries the external directory service to obtain the user information (e.g. username, organization etc) from the specified subject_identifier" + ~allowed_roles:_R_READ_ONLY + () let auth_get_group_membership = call ~flags:[`Session] - ~name:"get_group_membership" - ~in_oss_since:None - ~in_product_since:rel_george - ~params:[ - String, "subject_identifier", "A string containing the subject_identifier, unique in the external directory service" - ] - ~result:(Set(String), "set of subject_identifiers that provides the group membership of subject_identifier passed as argument, it contains, recursively, all groups a subject_identifier is member of.") - ~doc:"This calls queries the external directory service to obtain the transitively-closed set of groups that the the subject_identifier is member of." - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_group_membership" + ~in_oss_since:None + ~in_product_since:rel_george + ~params:[ + String, "subject_identifier", "A string containing the subject_identifier, unique in the external directory service" + ] + ~result:(Set(String), "set of subject_identifiers that provides the group membership of subject_identifier passed as argument, it contains, recursively, all groups a subject_identifier is member of.") + ~doc:"This calls queries the external directory service to obtain the transitively-closed set of groups that the the subject_identifier is member of." + ~allowed_roles:_R_READ_ONLY + () let auth = create_obj ~in_db:false ~in_product_since:rel_george ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_auth ~descr:"Management of remote authentication services" @@ -7024,45 +7024,45 @@ let auth = ~doccomments:[] ~messages_default_allowed_roles:_R_READ_ONLY ~messages: [auth_get_subject_identifier; - auth_get_subject_information_from_identifier; - auth_get_group_membership;] + auth_get_subject_information_from_identifier; + auth_get_group_membership;] ~contents:[] () (** Subject class *) let subject_add_to_roles = call ~flags:[`Session] - ~name:"add_to_roles" - ~in_oss_since:None - ~in_product_since:rel_midnight_ride - ~params:[ - Ref _subject, "self", "The subject who we want to add the role to"; - Ref _role, "role", "The unique role reference" ; - ] - ~doc:"This call adds a new role to a subject" - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"add_to_roles" + ~in_oss_since:None + ~in_product_since:rel_midnight_ride + ~params:[ + Ref _subject, "self", "The subject who we want to add the role to"; + Ref _role, "role", "The unique role reference" ; + ] + ~doc:"This call adds a new role to a subject" + ~allowed_roles:_R_POOL_ADMIN + () let subject_remove_from_roles = call ~flags:[`Session] - ~name:"remove_from_roles" - ~in_oss_since:None - ~in_product_since:rel_midnight_ride - ~params:[ - Ref _subject, "self", "The subject from whom we want to remove the role"; - Ref _role, "role", "The unique role reference in the subject's roles field" ; - ] - ~doc:"This call removes a role from a subject" - ~allowed_roles:_R_POOL_ADMIN - () + ~name:"remove_from_roles" + ~in_oss_since:None + ~in_product_since:rel_midnight_ride + ~params:[ + Ref _subject, "self", "The subject from whom we want to remove the role"; + Ref _role, "role", "The unique role reference in the subject's roles field" ; + ] + ~doc:"This call removes a role from a subject" + ~allowed_roles:_R_POOL_ADMIN + () let subject_get_permissions_name_label = call ~flags:[`Session] - ~name:"get_permissions_name_label" - ~in_oss_since:None - ~in_product_since:rel_midnight_ride - ~params:[ - Ref _subject, "self", "The subject whose permissions will be retrieved"; - ] - ~result:(Set(String), "a list of permission names") - ~doc:"This call returns a list of permission names given a subject" - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_permissions_name_label" + ~in_oss_since:None + ~in_product_since:rel_midnight_ride + ~params:[ + Ref _subject, "self", "The subject whose permissions will be retrieved"; + ] + ~result:(Set(String), "a list of permission names") + ~doc:"This call returns a list of permission names given a subject" + ~allowed_roles:_R_READ_ONLY + () (* a subject is a user/group that can log in xapi *) let subject = create_obj ~in_db:true ~in_product_since:rel_george ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_subject ~descr:"A user or group that can log in xapi" @@ -7073,62 +7073,62 @@ let subject = subject_add_to_roles; subject_remove_from_roles; subject_get_permissions_name_label; - ] + ] ~contents:[uid ~in_oss_since:None _subject; - field ~in_product_since:rel_george ~default_value:(Some (VString "")) ~qualifier:StaticRO ~ty:String "subject_identifier" "the subject identifier, unique in the external directory service"; - field ~in_product_since:rel_george ~default_value:(Some (VMap [])) ~qualifier:StaticRO ~ty:(Map(String, String)) "other_config" "additional configuration"; - (* DynamicRO fields do not show up in the constructor, as it should be because a subject must be created without receiving any roles as a parameter *) - field ~in_product_since:rel_midnight_ride ~default_value:(Some (VSet [ - (VRef ("OpaqueRef:"^Constants.rbac_pool_admin_uuid))])) (* pool-admin, according to rbac_static.ml, used during upgrade from pre-rbac xapis *) - ~ignore_foreign_key:true ~qualifier:DynamicRO ~ty:(Set((Ref _role))) "roles" "the roles associated with this subject"; - ] + field ~in_product_since:rel_george ~default_value:(Some (VString "")) ~qualifier:StaticRO ~ty:String "subject_identifier" "the subject identifier, unique in the external directory service"; + field ~in_product_since:rel_george ~default_value:(Some (VMap [])) ~qualifier:StaticRO ~ty:(Map(String, String)) "other_config" "additional configuration"; + (* DynamicRO fields do not show up in the constructor, as it should be because a subject must be created without receiving any roles as a parameter *) + field ~in_product_since:rel_midnight_ride ~default_value:(Some (VSet [ + (VRef ("OpaqueRef:"^Constants.rbac_pool_admin_uuid))])) (* pool-admin, according to rbac_static.ml, used during upgrade from pre-rbac xapis *) + ~ignore_foreign_key:true ~qualifier:DynamicRO ~ty:(Set((Ref _role))) "roles" "the roles associated with this subject"; + ] () (** Role class *) let role_get_permissions = call ~flags:[`Session] - ~name:"get_permissions" - ~in_oss_since:None - ~in_product_since:rel_midnight_ride - ~params:[ - Ref _role, "self", "a reference to a role"; - ] - ~result:(Set(Ref _role), "a list of permissions") - ~doc:"This call returns a list of permissions given a role" - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_permissions" + ~in_oss_since:None + ~in_product_since:rel_midnight_ride + ~params:[ + Ref _role, "self", "a reference to a role"; + ] + ~result:(Set(Ref _role), "a list of permissions") + ~doc:"This call returns a list of permissions given a role" + ~allowed_roles:_R_READ_ONLY + () let role_get_permissions_name_label = call ~flags:[`Session] - ~name:"get_permissions_name_label" - ~in_oss_since:None - ~in_product_since:rel_midnight_ride - ~params:[ - Ref _role, "self", "a reference to a role"; - ] - ~result:(Set(String), "a list of permission names") - ~doc:"This call returns a list of permission names given a role" - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_permissions_name_label" + ~in_oss_since:None + ~in_product_since:rel_midnight_ride + ~params:[ + Ref _role, "self", "a reference to a role"; + ] + ~result:(Set(String), "a list of permission names") + ~doc:"This call returns a list of permission names given a role" + ~allowed_roles:_R_READ_ONLY + () let role_get_by_permission = call ~flags:[`Session] - ~name:"get_by_permission" - ~in_oss_since:None - ~in_product_since:rel_midnight_ride - ~params:[ - Ref _role, "permission", "a reference to a permission" ; - ] - ~result:(Set(Ref _role), "a list of references to roles") - ~doc:"This call returns a list of roles given a permission" - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_by_permission" + ~in_oss_since:None + ~in_product_since:rel_midnight_ride + ~params:[ + Ref _role, "permission", "a reference to a permission" ; + ] + ~result:(Set(Ref _role), "a list of references to roles") + ~doc:"This call returns a list of roles given a permission" + ~allowed_roles:_R_READ_ONLY + () let role_get_by_permission_name_label = call ~flags:[`Session] - ~name:"get_by_permission_name_label" - ~in_oss_since:None - ~in_product_since:rel_midnight_ride - ~params:[ - String, "label", "The short friendly name of the role" ; - ] - ~result:(Set(Ref _role), "a list of references to roles") - ~doc:"This call returns a list of roles given a permission name" - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_by_permission_name_label" + ~in_oss_since:None + ~in_product_since:rel_midnight_ride + ~params:[ + String, "label", "The short friendly name of the role" ; + ] + ~result:(Set(Ref _role), "a list of references to roles") + ~doc:"This call returns a list of roles given a permission name" + ~allowed_roles:_R_READ_ONLY + () (* A role defines a set of API call privileges associated with a subject *) (* A role is synonymous to permission or privilege *) @@ -7147,53 +7147,53 @@ let role = role_get_permissions_name_label; role_get_by_permission; role_get_by_permission_name_label; - ] + ] ~contents: [uid ~in_oss_since:None _role; - namespace ~name:"name" ~contents:( - [ - field ~in_product_since:rel_midnight_ride ~default_value:(Some (VString "")) ~qualifier:StaticRO ~ty:String "label" "a short user-friendly name for the role"; - field ~in_product_since:rel_midnight_ride ~default_value:(Some (VString "")) ~qualifier:StaticRO ~ty:String "description" "what this role is for"; - ]) (); - field ~in_product_since:rel_midnight_ride ~default_value:(Some (VSet [])) ~ignore_foreign_key:true ~qualifier:StaticRO ~ty:(Set(Ref _role)) "subroles" "a list of pointers to other roles or permissions"; - (*RBAC2: field ~in_product_since:rel_midnight_ride ~default_value:(Some (VBool false)) ~qualifier:StaticRO ~ty:Bool "is_complete" "if this is a complete role, meant to be used by the end-user";*) - ] + namespace ~name:"name" ~contents:( + [ + field ~in_product_since:rel_midnight_ride ~default_value:(Some (VString "")) ~qualifier:StaticRO ~ty:String "label" "a short user-friendly name for the role"; + field ~in_product_since:rel_midnight_ride ~default_value:(Some (VString "")) ~qualifier:StaticRO ~ty:String "description" "what this role is for"; + ]) (); + field ~in_product_since:rel_midnight_ride ~default_value:(Some (VSet [])) ~ignore_foreign_key:true ~qualifier:StaticRO ~ty:(Set(Ref _role)) "subroles" "a list of pointers to other roles or permissions"; + (*RBAC2: field ~in_product_since:rel_midnight_ride ~default_value:(Some (VBool false)) ~qualifier:StaticRO ~ty:Bool "is_complete" "if this is a complete role, meant to be used by the end-user";*) + ] () (** A virtual disk interface *) let vtpm = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vtpm ~descr:"A virtual TPM device" - ~gen_events:false - ~doccomments:[] - ~messages_default_allowed_roles:_R_VM_ADMIN - ~messages:[] - ~contents: + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vtpm ~descr:"A virtual TPM device" + ~gen_events:false + ~doccomments:[] + ~messages_default_allowed_roles:_R_VM_ADMIN + ~messages:[] + ~contents: [ uid _vtpm; - field ~qualifier:StaticRO ~ty:(Ref _vm) "VM" "the virtual machine"; - field ~qualifier:StaticRO ~ty:(Ref _vm) "backend" "the domain where the backend is located" ] - () + field ~qualifier:StaticRO ~ty:(Ref _vm) "VM" "the virtual machine"; + field ~qualifier:StaticRO ~ty:(Ref _vm) "backend" "the domain where the backend is located" ] + () (** Console protocols *) let console_protocol = Enum("console_protocol", [ - "vt100", "VT100 terminal"; - "rfb", "Remote FrameBuffer protocol (as used in VNC)"; - "rdp", "Remote Desktop Protocol" - ]) + "vt100", "VT100 terminal"; + "rfb", "Remote FrameBuffer protocol (as used in VNC)"; + "rdp", "Remote Desktop Protocol" + ]) (** A virtual console device *) -let console = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_console ~descr:"A console" - ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_VM_ADMIN - ~messages:[] ~contents: - [ uid _console; - field ~qualifier:DynamicRO ~ty:console_protocol "protocol" "the protocol used by this console"; - field ~qualifier:DynamicRO ~ty:String "location" "URI for the console service"; - field ~qualifier:DynamicRO ~ty:(Ref _vm) "VM" "VM to which this console is attached"; - field ~ty:(Map(String, String)) "other_config" "additional configuration"; - field ~in_oss_since:None ~internal_only:true ~ty:Int "port" "port in dom0 on which the console server is listening"; - ] - () +let console = + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_console ~descr:"A console" + ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_VM_ADMIN + ~messages:[] ~contents: + [ uid _console; + field ~qualifier:DynamicRO ~ty:console_protocol "protocol" "the protocol used by this console"; + field ~qualifier:DynamicRO ~ty:String "location" "URI for the console service"; + field ~qualifier:DynamicRO ~ty:(Ref _vm) "VM" "VM to which this console is attached"; + field ~ty:(Map(String, String)) "other_config" "additional configuration"; + field ~in_oss_since:None ~internal_only:true ~ty:Int "port" "port in dom0 on which the console server is listening"; + ] + () (* PV domain booting *) let pv = @@ -7217,22 +7217,22 @@ let hvm = (** Action to take on guest reboot/power off/sleep etc *) (* let power_behaviour = - Enum ("power_behaviour", [ "destroy", "destroy the VM state"; - "restart", "automatically restart the VM"; - "preserve", "leave VM running"; + Enum ("power_behaviour", [ "destroy", "destroy the VM state"; + "restart", "automatically restart the VM"; + "preserve", "leave VM running"; "rename_restart", "leave VM running and restart a new one" ]) *) -let on_crash_behaviour = +let on_crash_behaviour = Enum ("on_crash_behaviour", [ "destroy", "destroy the VM state"; - "coredump_and_destroy", "record a coredump and then destroy the VM state"; - "restart", "restart the VM"; - "coredump_and_restart", "record a coredump and then restart the VM"; - "preserve", "leave the crashed VM paused"; - "rename_restart", "rename the crashed VM and start a new copy" ]) + "coredump_and_destroy", "record a coredump and then destroy the VM state"; + "restart", "restart the VM"; + "coredump_and_restart", "record a coredump and then restart the VM"; + "preserve", "leave the crashed VM paused"; + "rename_restart", "rename the crashed VM and start a new copy" ]) -let on_normal_exit_behaviour = +let on_normal_exit_behaviour = Enum ("on_normal_exit", [ "destroy", "destroy the VM state"; - "restart", "restart the VM" ]) + "restart", "restart the VM" ]) (** Virtual CPUs *) @@ -7255,205 +7255,205 @@ let actions = let vm_power_state = Enum ("vm_power_state", [ "Halted", "VM is offline and not using any resources"; - "Paused", "All resources have been allocated but the VM itself is paused and its vCPUs are not running"; - "Running", "Running"; - "Suspended", "VM state has been saved to disk and it is nolonger running. Note that disks remain in-use while the VM is suspended."]) + "Paused", "All resources have been allocated but the VM itself is paused and its vCPUs are not running"; + "Running", "Running"; + "Suspended", "VM state has been saved to disk and it is nolonger running. Note that disks remain in-use while the VM is suspended."]) let vm_operations = Enum ("vm_operations", - List.map operation_enum - [ vm_snapshot; vm_clone; vm_copy; vm_create_template; vm_revert; vm_checkpoint; vm_snapshot_with_quiesce; - vm_provision; vm_start; vm_start_on; vm_pause; vm_unpause; vm_cleanShutdown; - vm_cleanReboot; vm_hardShutdown; vm_stateReset; vm_hardReboot; - vm_suspend; csvm; vm_resume; vm_resume_on; - vm_pool_migrate; - vm_migrate_send; - vm_get_boot_record; vm_send_sysrq; vm_send_trigger; - vm_query_services;vm_shutdown; - vm_call_plugin; - ] - @ [ "changing_memory_live", "Changing the memory settings"; - "awaiting_memory_live", "Waiting for the memory settings to change"; - "changing_dynamic_range", "Changing the memory dynamic range"; - "changing_static_range", "Changing the memory static range"; - "changing_memory_limits", "Changing the memory limits"; - "changing_shadow_memory", "Changing the shadow memory for a halted VM."; - "changing_shadow_memory_live", "Changing the shadow memory for a running VM."; - "changing_VCPUs", "Changing VCPU settings for a halted VM."; - "changing_VCPUs_live", "Changing VCPU settings for a running VM."; - "assert_operation_valid", ""; - "data_source_op", "Add, remove, query or list data sources"; - "update_allowed_operations", ""; - "make_into_template", "Turning this VM into a template"; - "import", "importing a VM from a network stream"; - "export", "exporting a VM to a network stream"; - "metadata_export", "exporting VM metadata to a network stream"; - "reverting", "Reverting the VM to a previous snapshotted state"; - "destroy", "refers to the act of uninstalling the VM"; - ] - ) + List.map operation_enum + [ vm_snapshot; vm_clone; vm_copy; vm_create_template; vm_revert; vm_checkpoint; vm_snapshot_with_quiesce; + vm_provision; vm_start; vm_start_on; vm_pause; vm_unpause; vm_cleanShutdown; + vm_cleanReboot; vm_hardShutdown; vm_stateReset; vm_hardReboot; + vm_suspend; csvm; vm_resume; vm_resume_on; + vm_pool_migrate; + vm_migrate_send; + vm_get_boot_record; vm_send_sysrq; vm_send_trigger; + vm_query_services;vm_shutdown; + vm_call_plugin; + ] + @ [ "changing_memory_live", "Changing the memory settings"; + "awaiting_memory_live", "Waiting for the memory settings to change"; + "changing_dynamic_range", "Changing the memory dynamic range"; + "changing_static_range", "Changing the memory static range"; + "changing_memory_limits", "Changing the memory limits"; + "changing_shadow_memory", "Changing the shadow memory for a halted VM."; + "changing_shadow_memory_live", "Changing the shadow memory for a running VM."; + "changing_VCPUs", "Changing VCPU settings for a halted VM."; + "changing_VCPUs_live", "Changing VCPU settings for a running VM."; + "assert_operation_valid", ""; + "data_source_op", "Add, remove, query or list data sources"; + "update_allowed_operations", ""; + "make_into_template", "Turning this VM into a template"; + "import", "importing a VM from a network stream"; + "export", "exporting a VM to a network stream"; + "metadata_export", "exporting VM metadata to a network stream"; + "reverting", "Reverting the VM to a previous snapshotted state"; + "destroy", "refers to the act of uninstalling the VM"; + ] + ) (** VM (or 'guest') configuration: *) let vm = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vm ~descr:"A virtual machine (or 'guest')." - ~gen_events:true - ~doccomments:[ "destroy", "Destroy the specified VM. The VM is completely removed from the system. This function can only be called when the VM is in the Halted State."; - "create", "NOT RECOMMENDED! VM.clone or VM.copy (or VM.import) is a better choice in almost all situations. The standard way to obtain a new VM is to call VM.clone on a template VM, then call VM.provision on the new clone. Caution: if VM.create is used and then the new VM is attached to a virtual disc that has an operating system already installed, then there is no guarantee that the operating system will boot and run. Any software that calls VM.create on a future version of this API may fail or give unexpected results. For example this could happen if an additional parameter were added to VM.create. VM.create is intended only for use in the automatic creation of the system VM templates. It creates a new VM instance, and returns its handle."; - ] - ~lifecycle:[ - Published, rel_rio, ""; - ] - ~messages_default_allowed_roles:_R_VM_ADMIN - ~messages:[ vm_snapshot; vm_snapshot_with_quiesce; vm_clone; vm_copy; vm_revert; vm_checkpoint; - vm_provision; vm_start; vm_start_on; vm_pause; vm_unpause; vm_cleanShutdown;vm_shutdown; - vm_cleanReboot; vm_hardShutdown; vm_stateReset; vm_hardReboot; vm_suspend; csvm; vm_resume; - vm_hardReboot_internal; - vm_resume_on; - vm_pool_migrate; vm_pool_migrate_complete; - set_vcpus_number_live; - vm_add_to_VCPUs_params_live; - vm_set_ha_restart_priority; (* updates the allowed-operations of the VM *) - vm_set_ha_always_run; (* updates the allowed-operations of the VM *) - vm_compute_memory_overhead; - vm_set_memory_dynamic_max; - vm_set_memory_dynamic_min; - vm_set_memory_dynamic_range; - vm_set_memory_static_max; - vm_set_memory_static_min; - vm_set_memory_static_range; - vm_set_memory_limits; - vm_set_memory; - vm_set_memory_target_live; - vm_wait_memory_target_live; - vm_get_cooperative; - vm_set_HVM_shadow_multiplier; - vm_set_shadow_multiplier_live; - vm_set_VCPUs_max; - vm_set_VCPUs_at_startup; - vm_send_sysrq; vm_send_trigger; - vm_maximise_memory; - vm_migrate_send; - vm_assert_can_migrate; - vm_get_boot_record; - vm_get_data_sources; vm_record_data_source; vm_query_data_source; vm_forget_data_source_archives; - assert_operation_valid vm_operations _vm _self; - update_allowed_operations vm_operations _vm _self; - vm_get_allowed_VBD_devices; - vm_get_allowed_VIF_devices; - vm_get_possible_hosts; - vm_assert_can_boot_here; - vm_atomic_set_resident_on; - vm_create_new_blob; - vm_s3_suspend; - vm_s3_resume; - vm_assert_agile; - vm_update_snapshot_metadata; - vm_retrieve_wlb_recommendations; - vm_copy_bios_strings; - vm_set_protection_policy; - vm_set_start_delay; - vm_set_shutdown_delay; - vm_set_order; - vm_set_suspend_VDI; - vm_assert_can_be_recovered; - vm_get_SRs_required_for_recovery; - vm_recover; - vm_import_convert; - vm_set_appliance; - vm_query_services; - vm_call_plugin; - vm_set_has_vendor_device; - vm_import; - ] - ~contents: + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:oss_since_303 ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vm ~descr:"A virtual machine (or 'guest')." + ~gen_events:true + ~doccomments:[ "destroy", "Destroy the specified VM. The VM is completely removed from the system. This function can only be called when the VM is in the Halted State."; + "create", "NOT RECOMMENDED! VM.clone or VM.copy (or VM.import) is a better choice in almost all situations. The standard way to obtain a new VM is to call VM.clone on a template VM, then call VM.provision on the new clone. Caution: if VM.create is used and then the new VM is attached to a virtual disc that has an operating system already installed, then there is no guarantee that the operating system will boot and run. Any software that calls VM.create on a future version of this API may fail or give unexpected results. For example this could happen if an additional parameter were added to VM.create. VM.create is intended only for use in the automatic creation of the system VM templates. It creates a new VM instance, and returns its handle."; + ] + ~lifecycle:[ + Published, rel_rio, ""; + ] + ~messages_default_allowed_roles:_R_VM_ADMIN + ~messages:[ vm_snapshot; vm_snapshot_with_quiesce; vm_clone; vm_copy; vm_revert; vm_checkpoint; + vm_provision; vm_start; vm_start_on; vm_pause; vm_unpause; vm_cleanShutdown;vm_shutdown; + vm_cleanReboot; vm_hardShutdown; vm_stateReset; vm_hardReboot; vm_suspend; csvm; vm_resume; + vm_hardReboot_internal; + vm_resume_on; + vm_pool_migrate; vm_pool_migrate_complete; + set_vcpus_number_live; + vm_add_to_VCPUs_params_live; + vm_set_ha_restart_priority; (* updates the allowed-operations of the VM *) + vm_set_ha_always_run; (* updates the allowed-operations of the VM *) + vm_compute_memory_overhead; + vm_set_memory_dynamic_max; + vm_set_memory_dynamic_min; + vm_set_memory_dynamic_range; + vm_set_memory_static_max; + vm_set_memory_static_min; + vm_set_memory_static_range; + vm_set_memory_limits; + vm_set_memory; + vm_set_memory_target_live; + vm_wait_memory_target_live; + vm_get_cooperative; + vm_set_HVM_shadow_multiplier; + vm_set_shadow_multiplier_live; + vm_set_VCPUs_max; + vm_set_VCPUs_at_startup; + vm_send_sysrq; vm_send_trigger; + vm_maximise_memory; + vm_migrate_send; + vm_assert_can_migrate; + vm_get_boot_record; + vm_get_data_sources; vm_record_data_source; vm_query_data_source; vm_forget_data_source_archives; + assert_operation_valid vm_operations _vm _self; + update_allowed_operations vm_operations _vm _self; + vm_get_allowed_VBD_devices; + vm_get_allowed_VIF_devices; + vm_get_possible_hosts; + vm_assert_can_boot_here; + vm_atomic_set_resident_on; + vm_create_new_blob; + vm_s3_suspend; + vm_s3_resume; + vm_assert_agile; + vm_update_snapshot_metadata; + vm_retrieve_wlb_recommendations; + vm_copy_bios_strings; + vm_set_protection_policy; + vm_set_start_delay; + vm_set_shutdown_delay; + vm_set_order; + vm_set_suspend_VDI; + vm_assert_can_be_recovered; + vm_get_SRs_required_for_recovery; + vm_recover; + vm_import_convert; + vm_set_appliance; + vm_query_services; + vm_call_plugin; + vm_set_has_vendor_device; + vm_import; + ] + ~contents: ([ uid _vm; - ] @ (allowed_and_current_operations vm_operations) @ [ - field ~writer_roles:_R_VM_OP ~qualifier:DynamicRO ~ty:vm_power_state "power_state" "Current power state of the machine"; - namespace ~name:"name" ~contents:(names oss_since_303 RW) (); - - field ~ty:Int "user_version" "Creators of VMs and templates may store version information here."; - field ~effect:true ~ty:Bool "is_a_template" "true if this is a template. Template VMs can never be started, they are used only for cloning other VMs"; - field ~qualifier:DynamicRO ~ty:(Ref _vdi) "suspend_VDI" "The VDI that a suspend image is stored on. (Only has meaning if VM is currently suspended)"; - - field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~ty:(Ref _host) "resident_on" "the host the VM is currently resident on"; - field ~writer_roles:_R_VM_POWER_ADMIN ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:(Ref _host) "scheduled_to_be_resident_on" "the host on which the VM is due to be started/resumed/migrated. This acts as a memory reservation indicator"; - field ~writer_roles:_R_VM_POWER_ADMIN ~in_oss_since:None ~ty:(Ref _host) "affinity" "A host which the VM has some affinity for (or NULL). This is used as a hint to the start call when it decides where to run the VM. Resource constraints may cause the VM to be started elsewhere."; - - namespace ~name:"memory" ~contents:guest_memory (); - namespace ~name:"VCPUs" ~contents:vcpus (); - namespace ~name:"actions" ~contents:actions (); - - field ~writer_roles:_R_POOL_ADMIN ~qualifier:DynamicRO ~ty:(Set (Ref _console)) "consoles" "virtual console devices"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _vif)) ~doc_tags:[Networking] "VIFs" "virtual network interfaces"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _vbd)) "VBDs" "virtual block devices"; - field ~writer_roles:_R_POOL_ADMIN ~qualifier:DynamicRO ~ty:(Set (Ref _crashdump)) "crash_dumps" "crash dumps associated with this VM"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _vtpm)) "VTPMs" "virtual TPMs"; - - namespace ~name:"PV" ~contents:pv (); - namespace ~name:"HVM" ~contents:hvm (); - field ~ty:(Map(String, String)) "platform" "platform-specific configuration"; - - field ~lifecycle:[ - Published, rel_rio, "PCI bus path for pass-through devices"; - Deprecated, rel_boston, "Field was never used"] - "PCI_bus" "PCI bus path for pass-through devices"; - field ~ty:(Map(String, String)) "other_config" "additional configuration" ~map_keys_roles:["pci", _R_POOL_ADMIN; ("folder",(_R_VM_OP));("XenCenter.CustomFields.*",(_R_VM_OP))]; - field ~qualifier:DynamicRO ~ty:Int "domid" "domain ID (if available, -1 otherwise)"; - field ~qualifier:DynamicRO ~in_oss_since:None ~ty:String "domarch" "Domain architecture (if available, null string otherwise)"; - field ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Map(String, String)) "last_boot_CPU_flags" "describes the CPU flags on which the VM was last booted"; - field ~qualifier:DynamicRO ~ty:Bool "is_control_domain" "true if this is a control domain (domain 0 or a driver domain)"; - field ~qualifier:DynamicRO ~ty:(Ref _vm_metrics) "metrics" "metrics associated with this VM"; - field ~qualifier:DynamicRO ~ty:(Ref _vm_guest_metrics) "guest_metrics" "metrics associated with the running guest"; - (* This was an internal field in Rio, Miami beta1, Miami beta2 but is now exposed so that - it will be included automatically in Miami GA exports and can be restored, important if - the VM is in a suspended state *) - field ~in_oss_since:None ~internal_only:false ~in_product_since:rel_miami ~qualifier:DynamicRO ~ty:String "last_booted_record" "marshalled value containing VM record at time of last boot, updated dynamically to reflect the runtime state of the domain" ~default_value:(Some (VString "")); - field ~in_oss_since:None ~ty:String "recommendations" "An XML specification of recommended values and ranges for properties of this VM"; - field ~effect:true ~in_oss_since:None ~ty:(Map(String, String)) ~in_product_since:rel_miami ~qualifier:RW "xenstore_data" "data to be inserted into the xenstore tree (/local/domain//vm-data) after the VM is created." ~default_value:(Some (VMap [])); - field ~writer_roles:_R_POOL_OP ~in_oss_since:None ~ty:Bool ~in_product_since:rel_orlando ~internal_deprecated_since:rel_boston ~qualifier:StaticRO "ha_always_run" "if true then the system will attempt to keep the VM running as much as possible." ~default_value:(Some (VBool false)); - field ~writer_roles:_R_POOL_OP ~in_oss_since:None ~ty:String ~in_product_since:rel_orlando ~qualifier:StaticRO "ha_restart_priority" "has possible values: \"best-effort\" meaning \"try to restart this VM if possible but don't consider the Pool to be overcommitted if this is not possible\"; \"restart\" meaning \"this VM should be restarted\"; \"\" meaning \"do not try to restart this VM\"" ~default_value:(Some (VString "")); - field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_orlando ~default_value:(Some (VBool false)) ~ty:Bool "is_a_snapshot" "true if this is a snapshot. Snapshotted VMs can never be started, they are used only for cloning other VMs"; - field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_orlando ~default_value:(Some (VRef "")) ~ty:(Ref _vm) "snapshot_of" "Ref pointing to the VM this snapshot is of."; - field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Set (Ref _vm)) "snapshots" "List pointing to all the VM snapshots."; - field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_orlando ~default_value:(Some (VDateTime Date.never)) ~ty:DateTime "snapshot_time" "Date/time when this snapshot was created."; - field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_orlando ~default_value:(Some (VString "")) ~ty:String "transportable_snapshot_id" "Transportable ID of the snapshot VM"; - field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Map(String, Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this VM"; - field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes"; - field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~qualifier:RW ~ty:(Map(vm_operations, String)) "blocked_operations" "List of operations which have been explicitly blocked and an error code"; - - field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "snapshot_info" "Human-readable information concerning this snapshot"; - field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VString "")) ~ty:String "snapshot_metadata" "Encoded information about the VM's metadata this is a snapshot of"; - - field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VRef "")) ~ty:(Ref _vm) "parent" "Ref pointing to the parent of this VM"; - field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~ty:(Set (Ref _vm)) "children" "List pointing to all the children of this VM"; - - field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VMap [])) ~ty:(Map (String,String)) "bios_strings" "BIOS strings"; - field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO ~lifecycle:[Published, rel_cowley, ""; Deprecated, rel_clearwater, "The VMPR feature was removed"] ~default_value:(Some (VRef (Ref.string_of Ref.null))) ~ty:(Ref _vmpp) "protection_policy" "Ref pointing to a protection policy for this VM"; - field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO ~lifecycle:[Published, rel_cowley, ""; Deprecated, rel_clearwater, "The VMPR feature was removed"] ~default_value:(Some (VBool false)) ~ty:Bool "is_snapshot_from_vmpp" "true if this snapshot was created by the protection policy"; - field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO ~ty:(Ref _vm_appliance) ~default_value:(Some (VRef (Ref.string_of Ref.null))) "appliance" "the appliance to which this VM belongs"; - field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO ~in_product_since:rel_boston ~default_value:(Some (VInt 0L)) ~ty:Int "start_delay" "The delay to wait before proceeding to the next order in the startup sequence (seconds)"; - field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO ~in_product_since:rel_boston ~default_value:(Some (VInt 0L)) ~ty:Int "shutdown_delay" "The delay to wait before proceeding to the next order in the shutdown sequence (seconds)"; - field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO ~in_product_since:rel_boston ~default_value:(Some (VInt 0L)) ~ty:Int "order" "The point in the startup or shutdown sequence at which this VM will be started"; - field ~qualifier:DynamicRO ~lifecycle:[Published, rel_boston, ""] ~ty:(Set (Ref _vgpu)) "VGPUs" "Virtual GPUs"; - field ~qualifier:DynamicRO ~lifecycle:[Published, rel_boston, ""] ~ty:(Set (Ref _pci)) "attached_PCIs" "Currently passed-through PCI devices"; - field ~writer_roles:_R_VM_ADMIN ~qualifier:RW ~in_product_since:rel_boston ~default_value:(Some (VRef (Ref.string_of Ref.null))) ~ty:(Ref _sr) "suspend_SR" "The SR on which a suspend image is stored"; - field ~qualifier:StaticRO ~in_product_since:rel_boston ~default_value:(Some (VInt 0L)) ~ty:Int "version" "The number of times this VM has been recovered"; - field ~qualifier:StaticRO ~in_product_since:rel_clearwater ~default_value:(Some (VString "0:0")) ~ty:(String) "generation_id" "Generation ID of the VM"; - field ~writer_roles:_R_VM_ADMIN ~qualifier:RW ~in_product_since:rel_cream ~default_value:(Some (VInt 0L)) ~ty:Int "hardware_platform_version" "The host virtual hardware platform version the VM can run on"; - field ~qualifier:StaticRO ~lifecycle:[Published, rel_dundee, ""] ~doc_tags:[Windows] ~default_value:(Some (VCustom (String.concat "\n" [ - "(try Rpc.Bool ("; - "let pool = List.hd (Db_actions.DB_Action.Pool.get_all ~__context) in"; - "let restrictions = Db_actions.DB_Action.Pool.get_restrictions ~__context ~self:pool in "; - "let vendor_device_allowed = try List.assoc \"restrict_pci_device_for_auto_update\" restrictions = \"false\" with _ -> false in"; - "let policy_says_its_ok = not (Db_actions.DB_Action.Pool.get_policy_no_vendor_device ~__context ~self:pool) in"; - "vendor_device_allowed && policy_says_its_ok) with e -> D.error \"Failure when defaulting has_vendor_device field: %s\" (Printexc.to_string e); Rpc.Bool false)"], VBool false))) - ~ty:Bool "has_vendor_device" "When an HVM guest starts, this controls the presence of the emulated C000 PCI device which triggers Windows Update to fetch or update PV drivers."; - field ~qualifier:DynamicRO ~ty:Bool ~lifecycle:[Published, rel_ely, ""] ~default_value:(Some (VBool false)) - "requires_reboot" "Indicates whether a VM requires a reboot in order to update its configuration, e.g. its memory allocation." - ]) - () - -let vm_memory_metrics = + ] @ (allowed_and_current_operations vm_operations) @ [ + field ~writer_roles:_R_VM_OP ~qualifier:DynamicRO ~ty:vm_power_state "power_state" "Current power state of the machine"; + namespace ~name:"name" ~contents:(names oss_since_303 RW) (); + + field ~ty:Int "user_version" "Creators of VMs and templates may store version information here."; + field ~effect:true ~ty:Bool "is_a_template" "true if this is a template. Template VMs can never be started, they are used only for cloning other VMs"; + field ~qualifier:DynamicRO ~ty:(Ref _vdi) "suspend_VDI" "The VDI that a suspend image is stored on. (Only has meaning if VM is currently suspended)"; + + field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~ty:(Ref _host) "resident_on" "the host the VM is currently resident on"; + field ~writer_roles:_R_VM_POWER_ADMIN ~in_oss_since:None ~internal_only:true ~qualifier:DynamicRO ~ty:(Ref _host) "scheduled_to_be_resident_on" "the host on which the VM is due to be started/resumed/migrated. This acts as a memory reservation indicator"; + field ~writer_roles:_R_VM_POWER_ADMIN ~in_oss_since:None ~ty:(Ref _host) "affinity" "A host which the VM has some affinity for (or NULL). This is used as a hint to the start call when it decides where to run the VM. Resource constraints may cause the VM to be started elsewhere."; + + namespace ~name:"memory" ~contents:guest_memory (); + namespace ~name:"VCPUs" ~contents:vcpus (); + namespace ~name:"actions" ~contents:actions (); + + field ~writer_roles:_R_POOL_ADMIN ~qualifier:DynamicRO ~ty:(Set (Ref _console)) "consoles" "virtual console devices"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _vif)) ~doc_tags:[Networking] "VIFs" "virtual network interfaces"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _vbd)) "VBDs" "virtual block devices"; + field ~writer_roles:_R_POOL_ADMIN ~qualifier:DynamicRO ~ty:(Set (Ref _crashdump)) "crash_dumps" "crash dumps associated with this VM"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _vtpm)) "VTPMs" "virtual TPMs"; + + namespace ~name:"PV" ~contents:pv (); + namespace ~name:"HVM" ~contents:hvm (); + field ~ty:(Map(String, String)) "platform" "platform-specific configuration"; + + field ~lifecycle:[ + Published, rel_rio, "PCI bus path for pass-through devices"; + Deprecated, rel_boston, "Field was never used"] + "PCI_bus" "PCI bus path for pass-through devices"; + field ~ty:(Map(String, String)) "other_config" "additional configuration" ~map_keys_roles:["pci", _R_POOL_ADMIN; ("folder",(_R_VM_OP));("XenCenter.CustomFields.*",(_R_VM_OP))]; + field ~qualifier:DynamicRO ~ty:Int "domid" "domain ID (if available, -1 otherwise)"; + field ~qualifier:DynamicRO ~in_oss_since:None ~ty:String "domarch" "Domain architecture (if available, null string otherwise)"; + field ~in_oss_since:None ~qualifier:DynamicRO ~ty:(Map(String, String)) "last_boot_CPU_flags" "describes the CPU flags on which the VM was last booted"; + field ~qualifier:DynamicRO ~ty:Bool "is_control_domain" "true if this is a control domain (domain 0 or a driver domain)"; + field ~qualifier:DynamicRO ~ty:(Ref _vm_metrics) "metrics" "metrics associated with this VM"; + field ~qualifier:DynamicRO ~ty:(Ref _vm_guest_metrics) "guest_metrics" "metrics associated with the running guest"; + (* This was an internal field in Rio, Miami beta1, Miami beta2 but is now exposed so that + it will be included automatically in Miami GA exports and can be restored, important if + the VM is in a suspended state *) + field ~in_oss_since:None ~internal_only:false ~in_product_since:rel_miami ~qualifier:DynamicRO ~ty:String "last_booted_record" "marshalled value containing VM record at time of last boot, updated dynamically to reflect the runtime state of the domain" ~default_value:(Some (VString "")); + field ~in_oss_since:None ~ty:String "recommendations" "An XML specification of recommended values and ranges for properties of this VM"; + field ~effect:true ~in_oss_since:None ~ty:(Map(String, String)) ~in_product_since:rel_miami ~qualifier:RW "xenstore_data" "data to be inserted into the xenstore tree (/local/domain//vm-data) after the VM is created." ~default_value:(Some (VMap [])); + field ~writer_roles:_R_POOL_OP ~in_oss_since:None ~ty:Bool ~in_product_since:rel_orlando ~internal_deprecated_since:rel_boston ~qualifier:StaticRO "ha_always_run" "if true then the system will attempt to keep the VM running as much as possible." ~default_value:(Some (VBool false)); + field ~writer_roles:_R_POOL_OP ~in_oss_since:None ~ty:String ~in_product_since:rel_orlando ~qualifier:StaticRO "ha_restart_priority" "has possible values: \"best-effort\" meaning \"try to restart this VM if possible but don't consider the Pool to be overcommitted if this is not possible\"; \"restart\" meaning \"this VM should be restarted\"; \"\" meaning \"do not try to restart this VM\"" ~default_value:(Some (VString "")); + field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_orlando ~default_value:(Some (VBool false)) ~ty:Bool "is_a_snapshot" "true if this is a snapshot. Snapshotted VMs can never be started, they are used only for cloning other VMs"; + field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_orlando ~default_value:(Some (VRef "")) ~ty:(Ref _vm) "snapshot_of" "Ref pointing to the VM this snapshot is of."; + field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Set (Ref _vm)) "snapshots" "List pointing to all the VM snapshots."; + field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_orlando ~default_value:(Some (VDateTime Date.never)) ~ty:DateTime "snapshot_time" "Date/time when this snapshot was created."; + field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_orlando ~default_value:(Some (VString "")) ~ty:String "transportable_snapshot_id" "Transportable ID of the snapshot VM"; + field ~qualifier:DynamicRO ~in_product_since:rel_orlando ~ty:(Map(String, Ref _blob)) ~default_value:(Some (VMap [])) "blobs" "Binary blobs associated with this VM"; + field ~writer_roles:_R_VM_OP ~in_product_since:rel_orlando ~default_value:(Some (VSet [])) ~ty:(Set String) "tags" "user-specified tags for categorization purposes"; + field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) ~qualifier:RW ~ty:(Map(vm_operations, String)) "blocked_operations" "List of operations which have been explicitly blocked and an error code"; + + field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "snapshot_info" "Human-readable information concerning this snapshot"; + field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VString "")) ~ty:String "snapshot_metadata" "Encoded information about the VM's metadata this is a snapshot of"; + + field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VRef "")) ~ty:(Ref _vm) "parent" "Ref pointing to the parent of this VM"; + field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~ty:(Set (Ref _vm)) "children" "List pointing to all the children of this VM"; + + field ~qualifier:DynamicRO ~in_product_since:rel_midnight_ride ~default_value:(Some (VMap [])) ~ty:(Map (String,String)) "bios_strings" "BIOS strings"; + field ~writer_roles:_R_VM_POWER_ADMIN ~qualifier:StaticRO ~lifecycle:[Published, rel_cowley, ""; Deprecated, rel_clearwater, "The VMPR feature was removed"] ~default_value:(Some (VRef (Ref.string_of Ref.null))) ~ty:(Ref _vmpp) "protection_policy" "Ref pointing to a protection policy for this VM"; + field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO ~lifecycle:[Published, rel_cowley, ""; Deprecated, rel_clearwater, "The VMPR feature was removed"] ~default_value:(Some (VBool false)) ~ty:Bool "is_snapshot_from_vmpp" "true if this snapshot was created by the protection policy"; + field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO ~ty:(Ref _vm_appliance) ~default_value:(Some (VRef (Ref.string_of Ref.null))) "appliance" "the appliance to which this VM belongs"; + field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO ~in_product_since:rel_boston ~default_value:(Some (VInt 0L)) ~ty:Int "start_delay" "The delay to wait before proceeding to the next order in the startup sequence (seconds)"; + field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO ~in_product_since:rel_boston ~default_value:(Some (VInt 0L)) ~ty:Int "shutdown_delay" "The delay to wait before proceeding to the next order in the shutdown sequence (seconds)"; + field ~writer_roles:_R_POOL_OP ~qualifier:StaticRO ~in_product_since:rel_boston ~default_value:(Some (VInt 0L)) ~ty:Int "order" "The point in the startup or shutdown sequence at which this VM will be started"; + field ~qualifier:DynamicRO ~lifecycle:[Published, rel_boston, ""] ~ty:(Set (Ref _vgpu)) "VGPUs" "Virtual GPUs"; + field ~qualifier:DynamicRO ~lifecycle:[Published, rel_boston, ""] ~ty:(Set (Ref _pci)) "attached_PCIs" "Currently passed-through PCI devices"; + field ~writer_roles:_R_VM_ADMIN ~qualifier:RW ~in_product_since:rel_boston ~default_value:(Some (VRef (Ref.string_of Ref.null))) ~ty:(Ref _sr) "suspend_SR" "The SR on which a suspend image is stored"; + field ~qualifier:StaticRO ~in_product_since:rel_boston ~default_value:(Some (VInt 0L)) ~ty:Int "version" "The number of times this VM has been recovered"; + field ~qualifier:StaticRO ~in_product_since:rel_clearwater ~default_value:(Some (VString "0:0")) ~ty:(String) "generation_id" "Generation ID of the VM"; + field ~writer_roles:_R_VM_ADMIN ~qualifier:RW ~in_product_since:rel_cream ~default_value:(Some (VInt 0L)) ~ty:Int "hardware_platform_version" "The host virtual hardware platform version the VM can run on"; + field ~qualifier:StaticRO ~lifecycle:[Published, rel_dundee, ""] ~doc_tags:[Windows] ~default_value:(Some (VCustom (String.concat "\n" [ + "(try Rpc.Bool ("; + "let pool = List.hd (Db_actions.DB_Action.Pool.get_all ~__context) in"; + "let restrictions = Db_actions.DB_Action.Pool.get_restrictions ~__context ~self:pool in "; + "let vendor_device_allowed = try List.assoc \"restrict_pci_device_for_auto_update\" restrictions = \"false\" with _ -> false in"; + "let policy_says_its_ok = not (Db_actions.DB_Action.Pool.get_policy_no_vendor_device ~__context ~self:pool) in"; + "vendor_device_allowed && policy_says_its_ok) with e -> D.error \"Failure when defaulting has_vendor_device field: %s\" (Printexc.to_string e); Rpc.Bool false)"], VBool false))) + ~ty:Bool "has_vendor_device" "When an HVM guest starts, this controls the presence of the emulated C000 PCI device which triggers Windows Update to fetch or update PV drivers."; + field ~qualifier:DynamicRO ~ty:Bool ~lifecycle:[Published, rel_ely, ""] ~default_value:(Some (VBool false)) + "requires_reboot" "Indicates whether a VM requires a reboot in order to update its configuration, e.g. its memory allocation." + ]) + () + +let vm_memory_metrics = [ field ~qualifier:DynamicRO ~ty:Int "actual" "Guest's actual memory (bytes)" ~persist:false ] @@ -7468,58 +7468,58 @@ let vm_vcpu_metrics = ] let vm_metrics = - create_obj - ~in_db:true - ~in_product_since:rel_rio - ~in_oss_since:oss_since_303 - ~internal_deprecated_since:None - ~persist:PersistEverything - ~gen_constructor_destructor:false - ~name:_vm_metrics - ~descr:"The metrics associated with a VM" - ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_VM_ADMIN - ~messages:[] - ~contents: - [ uid _vm_metrics - ; namespace ~name:"memory" ~contents:vm_memory_metrics () - ; namespace ~name:"VCPUs" ~contents:vm_vcpu_metrics () - ; field ~qualifier:DynamicRO ~ty:(Set (String)) - "state" "The state of the guest, eg blocked, dying etc" - ~persist:false - ; field ~qualifier:DynamicRO ~ty:DateTime - "start_time" "Time at which this VM was last booted" - ; field ~in_oss_since:None ~qualifier:DynamicRO ~ty:DateTime - "install_time" "Time at which the VM was installed" - ; field ~qualifier:DynamicRO ~ty:DateTime - "last_updated" "Time at which this information was last updated" - ~persist:false - ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) - ~ty:(Map(String, String)) - "other_config" "additional configuration" - ~persist:false - ; field ~in_product_since:rel_ely ~default_value:(Some (VBool false)) - ~ty:Bool ~qualifier:DynamicRO - "hvm" "hardware virtual machine" - ~persist:false - ; field ~in_product_since:rel_ely ~default_value:(Some (VBool false)) - ~ty:Bool ~qualifier:DynamicRO - "nested_virt" "VM supports nested virtualisation" - ~persist:false - ; field ~in_product_since:rel_ely ~default_value:(Some (VBool false)) - ~ty:Bool ~qualifier:DynamicRO - "nomigrate" "VM is immobile and can't migrate between hosts" - ~persist:false - ] - () + create_obj + ~in_db:true + ~in_product_since:rel_rio + ~in_oss_since:oss_since_303 + ~internal_deprecated_since:None + ~persist:PersistEverything + ~gen_constructor_destructor:false + ~name:_vm_metrics + ~descr:"The metrics associated with a VM" + ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_VM_ADMIN + ~messages:[] + ~contents: + [ uid _vm_metrics + ; namespace ~name:"memory" ~contents:vm_memory_metrics () + ; namespace ~name:"VCPUs" ~contents:vm_vcpu_metrics () + ; field ~qualifier:DynamicRO ~ty:(Set (String)) + "state" "The state of the guest, eg blocked, dying etc" + ~persist:false + ; field ~qualifier:DynamicRO ~ty:DateTime + "start_time" "Time at which this VM was last booted" + ; field ~in_oss_since:None ~qualifier:DynamicRO ~ty:DateTime + "install_time" "Time at which the VM was installed" + ; field ~qualifier:DynamicRO ~ty:DateTime + "last_updated" "Time at which this information was last updated" + ~persist:false + ; field ~in_product_since:rel_orlando ~default_value:(Some (VMap [])) + ~ty:(Map(String, String)) + "other_config" "additional configuration" + ~persist:false + ; field ~in_product_since:rel_ely ~default_value:(Some (VBool false)) + ~ty:Bool ~qualifier:DynamicRO + "hvm" "hardware virtual machine" + ~persist:false + ; field ~in_product_since:rel_ely ~default_value:(Some (VBool false)) + ~ty:Bool ~qualifier:DynamicRO + "nested_virt" "VM supports nested virtualisation" + ~persist:false + ; field ~in_product_since:rel_ely ~default_value:(Some (VBool false)) + ~ty:Bool ~qualifier:DynamicRO + "nomigrate" "VM is immobile and can't migrate between hosts" + ~persist:false + ] + () let tristate_type = Enum ("tristate_type", -[ - "yes", "Known to be true"; - "no", "Known to be false"; - "unspecified", "Unknown or unspecified"; -]) + [ + "yes", "Known to be true"; + "no", "Known to be false"; + "unspecified", "Unknown or unspecified"; + ]) (* Some of this stuff needs to persist (like PV drivers vsns etc.) so we know about what's likely to be in the VM even when it's off. Other things don't need to persist, so we specify these on a per-field basis *) @@ -7531,14 +7531,14 @@ let vm_guest_metrics = ~messages:[] ~contents: [ uid _vm_guest_metrics; field ~qualifier:DynamicRO ~ty:(Map(String, String)) "os_version" "version of the OS"; - field ~qualifier:DynamicRO ~ty:(Map(String, String)) "PV_drivers_version" - "version of the PV drivers"; + field ~qualifier:DynamicRO ~ty:(Map(String, String)) "PV_drivers_version" + "version of the PV drivers"; field ~qualifier:DynamicRO ~ty:Bool ~in_oss_since:None ~lifecycle:[ Published, rel_rio, "true if the PV drivers appear to be up to date"; Deprecated, rel_dundee, "Deprecated in favour of PV_drivers_detected, and redefined in terms of it" ] - "PV_drivers_up_to_date" "Logically equivalent to PV_drivers_detected"; + "PV_drivers_up_to_date" "Logically equivalent to PV_drivers_detected"; field ~qualifier:DynamicRO ~ty:(Map(String, String)) ~lifecycle:[ Published, rel_rio, "free/used/total"; @@ -7568,68 +7568,68 @@ let vmpr_removed = [ Removed, rel_clearwater, "The VMPR feature was removed"; ] let vmpp_protect_now = call ~flags:[`Session] - ~name:"protect_now" - ~lifecycle:vmpr_removed - ~params:[Ref _vmpp, "vmpp", "The protection policy to execute";] - ~doc:"This call executes the protection policy immediately" - ~allowed_roles:_R_POOL_OP - ~result:(String, "An XMLRPC result") - () + ~name:"protect_now" + ~lifecycle:vmpr_removed + ~params:[Ref _vmpp, "vmpp", "The protection policy to execute";] + ~doc:"This call executes the protection policy immediately" + ~allowed_roles:_R_POOL_OP + ~result:(String, "An XMLRPC result") + () let vmpp_archive_now = call ~flags:[`Session] - ~name:"archive_now" - ~lifecycle:vmpr_removed - ~params:[Ref _vm, "snapshot", "The snapshot to archive";] - ~doc:"This call archives the snapshot provided as a parameter" - ~allowed_roles:_R_VM_POWER_ADMIN - ~result:(String, "An XMLRPC result") - () + ~name:"archive_now" + ~lifecycle:vmpr_removed + ~params:[Ref _vm, "snapshot", "The snapshot to archive";] + ~doc:"This call archives the snapshot provided as a parameter" + ~allowed_roles:_R_VM_POWER_ADMIN + ~result:(String, "An XMLRPC result") + () let vmpp_create_alert = call ~flags:[`Session] - ~name:"create_alert" - ~lifecycle:vmpr_removed - ~params:[Ref _vmpp, "vmpp", "The protection policy where the alert should be created"; - String, "name", "The name of the message"; - Int, "priority", "The priority of the message"; - String, "body", "The body of the email message"; - String, "data", "The data in xml"; - ] - ~doc:"This call creates an alert for some protection policy" - ~allowed_roles:_R_LOCAL_ROOT_ONLY - ~hide_from_docs:true - () + ~name:"create_alert" + ~lifecycle:vmpr_removed + ~params:[Ref _vmpp, "vmpp", "The protection policy where the alert should be created"; + String, "name", "The name of the message"; + Int, "priority", "The priority of the message"; + String, "body", "The body of the email message"; + String, "data", "The data in xml"; + ] + ~doc:"This call creates an alert for some protection policy" + ~allowed_roles:_R_LOCAL_ROOT_ONLY + ~hide_from_docs:true + () let vmpp_get_alerts = call ~flags:[`Session] - ~name:"get_alerts" - ~lifecycle:vmpr_removed - ~params:[Ref _vmpp, "vmpp", "The protection policy"; - Int, "hours_from_now", "how many hours in the past the oldest record to fetch is"; - ] - ~doc:"This call fetches a history of alerts for a given protection policy" - ~allowed_roles:_R_POOL_OP - ~result:(Set (String), "A list of alerts encoded in xml") - () + ~name:"get_alerts" + ~lifecycle:vmpr_removed + ~params:[Ref _vmpp, "vmpp", "The protection policy"; + Int, "hours_from_now", "how many hours in the past the oldest record to fetch is"; + ] + ~doc:"This call fetches a history of alerts for a given protection policy" + ~allowed_roles:_R_POOL_OP + ~result:(Set (String), "A list of alerts encoded in xml") + () let vmpp_backup_type = Enum ("vmpp_backup_type", - [ - "snapshot", "The backup is a snapshot"; - "checkpoint", "The backup is a checkpoint"; - ]) + [ + "snapshot", "The backup is a snapshot"; + "checkpoint", "The backup is a checkpoint"; + ]) let vmpp_backup_frequency = Enum ("vmpp_backup_frequency", - [ - "hourly", "Hourly backups"; - "daily", "Daily backups"; - "weekly", "Weekly backups"; - ]) + [ + "hourly", "Hourly backups"; + "daily", "Daily backups"; + "weekly", "Weekly backups"; + ]) let vmpp_archive_frequency = Enum ("vmpp_archive_frequency", - [ - "never", "Never archive"; - "always_after_backup", "Archive after backup"; - "daily", "Daily archives"; - "weekly", "Weekly backups"; - ]) + [ + "never", "Never archive"; + "always_after_backup", "Archive after backup"; + "daily", "Daily archives"; + "weekly", "Weekly backups"; + ]) let vmpp_archive_target_type = Enum ("vmpp_archive_target_type", - [ - "none", "No target config"; - "cifs", "CIFS target config"; - "nfs", "NFS target config"; - ]) + [ + "none", "No target config"; + "cifs", "CIFS target config"; + "nfs", "NFS target config"; + ]) let vmpp_schedule_min = "min" let vmpp_schedule_hour = "hour" let vmpp_schedule_days = "days" @@ -7637,206 +7637,206 @@ let vmpp_archive_target_config_location = "location" let vmpp_archive_target_config_username = "username" let vmpp_archive_target_config_password = "password" let vmpp_set_backup_retention_value = call ~flags:[`Session] - ~name:"set_backup_retention_value" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_POOL_OP - ~params:[ - Ref _vmpp, "self", "The protection policy"; - Int, "value", "the value to set" - ] - () + ~name:"set_backup_retention_value" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_POOL_OP + ~params:[ + Ref _vmpp, "self", "The protection policy"; + Int, "value", "the value to set" + ] + () let vmpp_set_is_backup_running = call ~flags:[`Session] - ~name:"set_is_backup_running" - ~lifecycle:vmpr_removed - ~params:[ - Ref _vmpp, "self", "The protection policy"; - Bool, "value", "true to mark this protection policy's backup is running" - ] - ~doc:"Set the value of the is_backup_running field" - ~allowed_roles:_R_LOCAL_ROOT_ONLY - ~hide_from_docs:true - () + ~name:"set_is_backup_running" + ~lifecycle:vmpr_removed + ~params:[ + Ref _vmpp, "self", "The protection policy"; + Bool, "value", "true to mark this protection policy's backup is running" + ] + ~doc:"Set the value of the is_backup_running field" + ~allowed_roles:_R_LOCAL_ROOT_ONLY + ~hide_from_docs:true + () let vmpp_set_is_archive_running = call ~flags:[`Session] - ~name:"set_is_archive_running" - ~lifecycle:vmpr_removed - ~params:[ - Ref _vmpp, "self", "The protection policy"; - Bool, "value", "true to mark this protection policy's archive is running" - ] - ~doc:"Set the value of the is_archive_running field" - ~allowed_roles:_R_LOCAL_ROOT_ONLY - ~hide_from_docs:true - () + ~name:"set_is_archive_running" + ~lifecycle:vmpr_removed + ~params:[ + Ref _vmpp, "self", "The protection policy"; + Bool, "value", "true to mark this protection policy's archive is running" + ] + ~doc:"Set the value of the is_archive_running field" + ~allowed_roles:_R_LOCAL_ROOT_ONLY + ~hide_from_docs:true + () let vmpp_set_is_alarm_enabled = call ~flags:[`Session] - ~name:"set_is_alarm_enabled" - ~lifecycle:vmpr_removed - ~params:[ - Ref _vmpp, "self", "The protection policy"; - Bool, "value", "true if alarm is enabled for this policy" - ] - ~doc:"Set the value of the is_alarm_enabled field" - ~allowed_roles:_R_POOL_OP - () + ~name:"set_is_alarm_enabled" + ~lifecycle:vmpr_removed + ~params:[ + Ref _vmpp, "self", "The protection policy"; + Bool, "value", "true if alarm is enabled for this policy" + ] + ~doc:"Set the value of the is_alarm_enabled field" + ~allowed_roles:_R_POOL_OP + () let vmpp_set_archive_frequency = call ~flags:[`Session] - ~name:"set_archive_frequency" - ~lifecycle:vmpr_removed - ~params:[ - Ref _vmpp, "self", "The protection policy"; - vmpp_archive_frequency, "value", "the archive frequency" - ] - ~doc:"Set the value of the archive_frequency field" - ~allowed_roles:_R_POOL_OP - () + ~name:"set_archive_frequency" + ~lifecycle:vmpr_removed + ~params:[ + Ref _vmpp, "self", "The protection policy"; + vmpp_archive_frequency, "value", "the archive frequency" + ] + ~doc:"Set the value of the archive_frequency field" + ~allowed_roles:_R_POOL_OP + () let vmpp_set_archive_target_type = call ~flags:[`Session] - ~name:"set_archive_target_type" - ~lifecycle:vmpr_removed - ~params:[ - Ref _vmpp, "self", "The protection policy"; - vmpp_archive_target_type, "value", "the archive target config type" - ] - ~doc:"Set the value of the archive_target_config_type field" - ~allowed_roles:_R_POOL_OP - () + ~name:"set_archive_target_type" + ~lifecycle:vmpr_removed + ~params:[ + Ref _vmpp, "self", "The protection policy"; + vmpp_archive_target_type, "value", "the archive target config type" + ] + ~doc:"Set the value of the archive_target_config_type field" + ~allowed_roles:_R_POOL_OP + () let vmpp_set_backup_frequency = call ~flags:[`Session] - ~name:"set_backup_frequency" - ~lifecycle:vmpr_removed - ~params:[ - Ref _vmpp, "self", "The protection policy"; - vmpp_backup_frequency, "value", "the backup frequency" - ] - ~doc:"Set the value of the backup_frequency field" - ~allowed_roles:_R_POOL_OP - () + ~name:"set_backup_frequency" + ~lifecycle:vmpr_removed + ~params:[ + Ref _vmpp, "self", "The protection policy"; + vmpp_backup_frequency, "value", "the backup frequency" + ] + ~doc:"Set the value of the backup_frequency field" + ~allowed_roles:_R_POOL_OP + () let vmpp_set_backup_schedule = call ~flags:[`Session] - ~name:"set_backup_schedule" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_POOL_OP - ~params:[ - Ref _vmpp, "self", "The protection policy"; - Map(String,String), "value", "the value to set" - ] - () + ~name:"set_backup_schedule" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_POOL_OP + ~params:[ + Ref _vmpp, "self", "The protection policy"; + Map(String,String), "value", "the value to set" + ] + () let vmpp_set_archive_target_config = call ~flags:[`Session] - ~name:"set_archive_target_config" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_POOL_OP - ~params:[ - Ref _vmpp, "self", "The protection policy"; - Map(String,String), "value", "the value to set" - ] - () + ~name:"set_archive_target_config" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_POOL_OP + ~params:[ + Ref _vmpp, "self", "The protection policy"; + Map(String,String), "value", "the value to set" + ] + () let vmpp_set_archive_schedule = call ~flags:[`Session] - ~name:"set_archive_schedule" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_POOL_OP - ~params:[ - Ref _vmpp, "self", "The protection policy"; - Map(String,String), "value", "the value to set" - ] - () + ~name:"set_archive_schedule" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_POOL_OP + ~params:[ + Ref _vmpp, "self", "The protection policy"; + Map(String,String), "value", "the value to set" + ] + () let vmpp_set_alarm_config = call ~flags:[`Session] - ~name:"set_alarm_config" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_POOL_OP - ~params:[ - Ref _vmpp, "self", "The protection policy"; - Map(String,String), "value", "the value to set" - ] - () + ~name:"set_alarm_config" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_POOL_OP + ~params:[ + Ref _vmpp, "self", "The protection policy"; + Map(String,String), "value", "the value to set" + ] + () let vmpp_set_backup_last_run_time = call ~flags:[`Session] - ~name:"set_backup_last_run_time" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_LOCAL_ROOT_ONLY - ~params:[ - Ref _vmpp, "self", "The protection policy"; - DateTime, "value", "the value to set" - ] - () + ~name:"set_backup_last_run_time" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_LOCAL_ROOT_ONLY + ~params:[ + Ref _vmpp, "self", "The protection policy"; + DateTime, "value", "the value to set" + ] + () let vmpp_set_archive_last_run_time = call ~flags:[`Session] - ~name:"set_archive_last_run_time" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_LOCAL_ROOT_ONLY - ~params:[ - Ref _vmpp, "self", "The protection policy"; - DateTime, "value", "the value to set" - ] - () + ~name:"set_archive_last_run_time" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_LOCAL_ROOT_ONLY + ~params:[ + Ref _vmpp, "self", "The protection policy"; + DateTime, "value", "the value to set" + ] + () let vmpp_add_to_backup_schedule = call ~flags:[`Session] - ~name:"add_to_backup_schedule" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_POOL_OP - ~params:[ - Ref _vmpp, "self", "The protection policy"; - String, "key", "the key to add"; - String, "value", "the value to add"; - ] - () + ~name:"add_to_backup_schedule" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_POOL_OP + ~params:[ + Ref _vmpp, "self", "The protection policy"; + String, "key", "the key to add"; + String, "value", "the value to add"; + ] + () let vmpp_add_to_archive_target_config = call ~flags:[`Session] - ~name:"add_to_archive_target_config" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_POOL_OP - ~params:[ - Ref _vmpp, "self", "The protection policy"; - String, "key", "the key to add"; - String, "value", "the value to add"; - ] - () + ~name:"add_to_archive_target_config" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_POOL_OP + ~params:[ + Ref _vmpp, "self", "The protection policy"; + String, "key", "the key to add"; + String, "value", "the value to add"; + ] + () let vmpp_add_to_archive_schedule = call ~flags:[`Session] - ~name:"add_to_archive_schedule" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_POOL_OP - ~params:[ - Ref _vmpp, "self", "The protection policy"; - String, "key", "the key to add"; - String, "value", "the value to add"; - ] - () + ~name:"add_to_archive_schedule" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_POOL_OP + ~params:[ + Ref _vmpp, "self", "The protection policy"; + String, "key", "the key to add"; + String, "value", "the value to add"; + ] + () let vmpp_add_to_alarm_config = call ~flags:[`Session] - ~name:"add_to_alarm_config" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_POOL_OP - ~params:[ - Ref _vmpp, "self", "The protection policy"; - String, "key", "the key to add"; - String, "value", "the value to add"; - ] - () + ~name:"add_to_alarm_config" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_POOL_OP + ~params:[ + Ref _vmpp, "self", "The protection policy"; + String, "key", "the key to add"; + String, "value", "the value to add"; + ] + () let vmpp_remove_from_backup_schedule = call ~flags:[`Session] - ~name:"remove_from_backup_schedule" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_POOL_OP - ~params:[ - Ref _vmpp, "self", "The protection policy"; - String, "key", "the key to remove"; - ] - () + ~name:"remove_from_backup_schedule" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_POOL_OP + ~params:[ + Ref _vmpp, "self", "The protection policy"; + String, "key", "the key to remove"; + ] + () let vmpp_remove_from_archive_target_config = call ~flags:[`Session] - ~name:"remove_from_archive_target_config" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_POOL_OP - ~params:[ - Ref _vmpp, "self", "The protection policy"; - String, "key", "the key to remove"; - ] - () + ~name:"remove_from_archive_target_config" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_POOL_OP + ~params:[ + Ref _vmpp, "self", "The protection policy"; + String, "key", "the key to remove"; + ] + () let vmpp_remove_from_archive_schedule = call ~flags:[`Session] - ~name:"remove_from_archive_schedule" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_POOL_OP - ~params:[ - Ref _vmpp, "self", "The protection policy"; - String, "key", "the key to remove"; - ] - () + ~name:"remove_from_archive_schedule" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_POOL_OP + ~params:[ + Ref _vmpp, "self", "The protection policy"; + String, "key", "the key to remove"; + ] + () let vmpp_remove_from_alarm_config = call ~flags:[`Session] - ~name:"remove_from_alarm_config" - ~lifecycle:vmpr_removed - ~allowed_roles:_R_POOL_OP - ~params:[ - Ref _vmpp, "self", "The protection policy"; - String, "key", "the key to remove"; - ] - () + ~name:"remove_from_alarm_config" + ~lifecycle:vmpr_removed + ~allowed_roles:_R_POOL_OP + ~params:[ + Ref _vmpp, "self", "The protection policy"; + String, "key", "the key to remove"; + ] + () let vmpp = create_obj ~in_db:true ~in_oss_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vmpp ~descr:"VM Protection Policy" ~gen_events:true @@ -7895,207 +7895,207 @@ let vmpp = (* VM appliance *) let vm_appliance_operations = Enum ("vm_appliance_operation", - [ - "start", "Start"; - "clean_shutdown", "Clean shutdown"; - "hard_shutdown", "Hard shutdown"; - "shutdown", "Shutdown"; - ]) + [ + "start", "Start"; + "clean_shutdown", "Clean shutdown"; + "hard_shutdown", "Hard shutdown"; + "shutdown", "Shutdown"; + ]) let vm_appliance = - let vm_appliance_start = call - ~name:"start" - ~in_product_since:rel_boston - ~params:[ - Ref _vm_appliance, "self", "The VM appliance"; - Bool, "paused", "Instantiate all VMs belonging to this appliance in paused state if set to true." - ] - ~errs:[Api_errors.operation_partially_failed] - ~doc:"Start all VMs in the appliance" - ~allowed_roles:_R_POOL_OP - () in - let vm_appliance_clean_shutdown = call - ~name:"clean_shutdown" - ~in_product_since:rel_boston - ~params:[Ref _vm_appliance, "self", "The VM appliance"] - ~errs:[Api_errors.operation_partially_failed] - ~doc:"Perform a clean shutdown of all the VMs in the appliance" - ~allowed_roles:_R_POOL_OP - () in - let vm_appliance_hard_shutdown = call - ~name:"hard_shutdown" - ~in_product_since:rel_boston - ~params:[Ref _vm_appliance, "self", "The VM appliance"] - ~errs:[Api_errors.operation_partially_failed] - ~doc:"Perform a hard shutdown of all the VMs in the appliance" - ~allowed_roles:_R_POOL_OP - () in - let vm_appliance_shutdown = call - ~name:"shutdown" - ~in_product_since:rel_boston - ~params:[Ref _vm_appliance, "self", "The VM appliance"] - ~errs:[Api_errors.operation_partially_failed] - ~doc:"For each VM in the appliance, try to shut it down cleanly. If this fails, perform a hard shutdown of the VM." - ~allowed_roles:_R_POOL_OP - () in - let vm_appliance_assert_can_be_recovered = call - ~name:"assert_can_be_recovered" - ~in_product_since:rel_boston - ~params:[Ref _vm_appliance, "self", "The VM appliance to recover"; - Ref _session, "session_to", "The session to which the VM appliance is to be recovered."] - ~errs:[Api_errors.vm_requires_sr] - ~doc:"Assert whether all SRs required to recover this VM appliance are available." - ~allowed_roles:_R_READ_ONLY - () in - let vm_appliance_get_SRs_required_for_recovery = call - ~name:"get_SRs_required_for_recovery" - ~in_product_since:rel_creedence - ~params:[Ref _vm_appliance , "self" , "The VM appliance for which the required list of SRs has to be recovered."; - Ref _session , "session_to", "The session to which the list of SRs have to be recovered ."] - ~result:(Set(Ref _sr), "refs for SRs required to recover the VM") - ~errs:[] - ~doc:"Get the list of SRs required by the VM appliance to recover." - ~allowed_roles:_R_READ_ONLY - () in - let vm_appliance_recover = call - ~name:"recover" - ~in_product_since:rel_boston - ~params:[Ref _vm_appliance, "self", "The VM appliance to recover"; - Ref _session, "session_to", "The session to which the VM appliance is to be recovered."; - Bool, "force", "Whether the VMs should replace newer versions of themselves."] - ~errs:[Api_errors.vm_requires_sr] - ~doc:"Recover the VM appliance" - ~allowed_roles:_R_READ_ONLY - () in - create_obj ~in_db:true ~in_product_since:rel_boston ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vm_appliance ~descr:"VM appliance" - ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_OP - ~messages:[ - vm_appliance_start; - vm_appliance_clean_shutdown; - vm_appliance_hard_shutdown; - vm_appliance_shutdown; - vm_appliance_assert_can_be_recovered; - vm_appliance_get_SRs_required_for_recovery; - vm_appliance_recover; - ] - ~contents:([ - uid _vm_appliance; - namespace ~name:"name" ~contents:(names None RW) (); - ] @ (allowed_and_current_operations vm_appliance_operations) @ [ - field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) "VMs" "all VMs in this appliance"; - ]) - () + let vm_appliance_start = call + ~name:"start" + ~in_product_since:rel_boston + ~params:[ + Ref _vm_appliance, "self", "The VM appliance"; + Bool, "paused", "Instantiate all VMs belonging to this appliance in paused state if set to true." + ] + ~errs:[Api_errors.operation_partially_failed] + ~doc:"Start all VMs in the appliance" + ~allowed_roles:_R_POOL_OP + () in + let vm_appliance_clean_shutdown = call + ~name:"clean_shutdown" + ~in_product_since:rel_boston + ~params:[Ref _vm_appliance, "self", "The VM appliance"] + ~errs:[Api_errors.operation_partially_failed] + ~doc:"Perform a clean shutdown of all the VMs in the appliance" + ~allowed_roles:_R_POOL_OP + () in + let vm_appliance_hard_shutdown = call + ~name:"hard_shutdown" + ~in_product_since:rel_boston + ~params:[Ref _vm_appliance, "self", "The VM appliance"] + ~errs:[Api_errors.operation_partially_failed] + ~doc:"Perform a hard shutdown of all the VMs in the appliance" + ~allowed_roles:_R_POOL_OP + () in + let vm_appliance_shutdown = call + ~name:"shutdown" + ~in_product_since:rel_boston + ~params:[Ref _vm_appliance, "self", "The VM appliance"] + ~errs:[Api_errors.operation_partially_failed] + ~doc:"For each VM in the appliance, try to shut it down cleanly. If this fails, perform a hard shutdown of the VM." + ~allowed_roles:_R_POOL_OP + () in + let vm_appliance_assert_can_be_recovered = call + ~name:"assert_can_be_recovered" + ~in_product_since:rel_boston + ~params:[Ref _vm_appliance, "self", "The VM appliance to recover"; + Ref _session, "session_to", "The session to which the VM appliance is to be recovered."] + ~errs:[Api_errors.vm_requires_sr] + ~doc:"Assert whether all SRs required to recover this VM appliance are available." + ~allowed_roles:_R_READ_ONLY + () in + let vm_appliance_get_SRs_required_for_recovery = call + ~name:"get_SRs_required_for_recovery" + ~in_product_since:rel_creedence + ~params:[Ref _vm_appliance , "self" , "The VM appliance for which the required list of SRs has to be recovered."; + Ref _session , "session_to", "The session to which the list of SRs have to be recovered ."] + ~result:(Set(Ref _sr), "refs for SRs required to recover the VM") + ~errs:[] + ~doc:"Get the list of SRs required by the VM appliance to recover." + ~allowed_roles:_R_READ_ONLY + () in + let vm_appliance_recover = call + ~name:"recover" + ~in_product_since:rel_boston + ~params:[Ref _vm_appliance, "self", "The VM appliance to recover"; + Ref _session, "session_to", "The session to which the VM appliance is to be recovered."; + Bool, "force", "Whether the VMs should replace newer versions of themselves."] + ~errs:[Api_errors.vm_requires_sr] + ~doc:"Recover the VM appliance" + ~allowed_roles:_R_READ_ONLY + () in + create_obj ~in_db:true ~in_product_since:rel_boston ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:true ~name:_vm_appliance ~descr:"VM appliance" + ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_POOL_OP + ~messages:[ + vm_appliance_start; + vm_appliance_clean_shutdown; + vm_appliance_hard_shutdown; + vm_appliance_shutdown; + vm_appliance_assert_can_be_recovered; + vm_appliance_get_SRs_required_for_recovery; + vm_appliance_recover; + ] + ~contents:([ + uid _vm_appliance; + namespace ~name:"name" ~contents:(names None RW) (); + ] @ (allowed_and_current_operations vm_appliance_operations) @ [ + field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) "VMs" "all VMs in this appliance"; + ]) + () (* DR_task *) let dr_task = - let create = call - ~name:"create" - ~in_product_since:rel_boston - ~params:[ - String, "type", "The SR driver type of the SRs to introduce"; - Map(String, String), "device_config", "The device configuration of the SRs to introduce"; - Set(String), "whitelist", "The devices to use for disaster recovery" - ] - ~result:(Ref _dr_task, "The reference to the created task") - ~doc:"Create a disaster recovery task which will query the supplied list of devices" - ~allowed_roles:_R_POOL_OP - () in - let destroy = call - ~name:"destroy" - ~in_product_since:rel_boston - ~params:[ - Ref _dr_task, "self", "The disaster recovery task to destroy" - ] - ~doc:"Destroy the disaster recovery task, detaching and forgetting any SRs introduced which are no longer required" - ~allowed_roles:_R_POOL_OP - () in - create_obj - ~in_db:true - ~in_product_since:rel_boston - ~in_oss_since:None - ~internal_deprecated_since:None - ~persist:PersistEverything - ~gen_constructor_destructor:false - ~name:_dr_task - ~descr:"DR task" - ~gen_events:true - ~doccomments:[] - ~messages_default_allowed_roles:_R_POOL_OP - ~messages:[create; destroy] - ~contents:[ - uid _dr_task; - field ~qualifier:DynamicRO ~ty:(Set (Ref _sr)) "introduced_SRs" "All SRs introduced by this appliance"; - ] - () + let create = call + ~name:"create" + ~in_product_since:rel_boston + ~params:[ + String, "type", "The SR driver type of the SRs to introduce"; + Map(String, String), "device_config", "The device configuration of the SRs to introduce"; + Set(String), "whitelist", "The devices to use for disaster recovery" + ] + ~result:(Ref _dr_task, "The reference to the created task") + ~doc:"Create a disaster recovery task which will query the supplied list of devices" + ~allowed_roles:_R_POOL_OP + () in + let destroy = call + ~name:"destroy" + ~in_product_since:rel_boston + ~params:[ + Ref _dr_task, "self", "The disaster recovery task to destroy" + ] + ~doc:"Destroy the disaster recovery task, detaching and forgetting any SRs introduced which are no longer required" + ~allowed_roles:_R_POOL_OP + () in + create_obj + ~in_db:true + ~in_product_since:rel_boston + ~in_oss_since:None + ~internal_deprecated_since:None + ~persist:PersistEverything + ~gen_constructor_destructor:false + ~name:_dr_task + ~descr:"DR task" + ~gen_events:true + ~doccomments:[] + ~messages_default_allowed_roles:_R_POOL_OP + ~messages:[create; destroy] + ~contents:[ + uid _dr_task; + field ~qualifier:DynamicRO ~ty:(Set (Ref _sr)) "introduced_SRs" "All SRs introduced by this appliance"; + ] + () (** events handling: *) let event_operation = Enum ("event_operation", - [ "add", "An object has been created"; - "del", "An object has been deleted"; + [ "add", "An object has been created"; + "del", "An object has been deleted"; "mod", "An object has been modified"]) let event = let register = call - ~name:"register" - ~in_product_since:rel_rio - ~params:[Set String, "classes", "register for events for the indicated classes"] - ~doc:"Registers this session with the event system. Specifying * as the desired class will register for all classes." - ~allowed_roles:_R_ALL - () in + ~name:"register" + ~in_product_since:rel_rio + ~params:[Set String, "classes", "register for events for the indicated classes"] + ~doc:"Registers this session with the event system. Specifying * as the desired class will register for all classes." + ~allowed_roles:_R_ALL + () in let unregister = call - ~name:"unregister" - ~in_product_since:rel_rio - ~params:[Set String, "classes", "remove this session's registration for the indicated classes"] - ~doc:"Unregisters this session with the event system" - ~allowed_roles:_R_ALL - () in + ~name:"unregister" + ~in_product_since:rel_rio + ~params:[Set String, "classes", "remove this session's registration for the indicated classes"] + ~doc:"Unregisters this session with the event system" + ~allowed_roles:_R_ALL + () in let next = call - ~name:"next" ~params:[] - ~in_product_since:rel_rio - ~doc:"Blocking call which returns a (possibly empty) batch of events. This method is only recommended for legacy use. New development should use event.from which supercedes this method. " - ~custom_marshaller:true - ~flags:[`Session] - ~result:(Set (Record _event), "the batch of events") - ~errs:[Api_errors.session_not_registered;Api_errors.events_lost] - ~allowed_roles:_R_ALL + ~name:"next" ~params:[] + ~in_product_since:rel_rio + ~doc:"Blocking call which returns a (possibly empty) batch of events. This method is only recommended for legacy use. New development should use event.from which supercedes this method. " + ~custom_marshaller:true + ~flags:[`Session] + ~result:(Set (Record _event), "the batch of events") + ~errs:[Api_errors.session_not_registered;Api_errors.events_lost] + ~allowed_roles:_R_ALL () in let from = call - ~name:"from" - ~params:[Set String, "classes", "register for events for the indicated classes"; - String, "token", "A token representing the point from which to generate database events. The empty string represents the beginning."; - Float, "timeout", "Return after this many seconds if no events match"; - ] - ~in_product_since:rel_boston - ~doc:"Blocking call which returns a new token and a (possibly empty) batch of events. The returned token can be used in subsequent calls to this function." - ~custom_marshaller:true - ~flags:[`Session] - ~result:(Set (Record _event), "the batch of events") - ~errs:[Api_errors.session_not_registered;Api_errors.events_lost] - ~allowed_roles:_R_ALL + ~name:"from" + ~params:[Set String, "classes", "register for events for the indicated classes"; + String, "token", "A token representing the point from which to generate database events. The empty string represents the beginning."; + Float, "timeout", "Return after this many seconds if no events match"; + ] + ~in_product_since:rel_boston + ~doc:"Blocking call which returns a new token and a (possibly empty) batch of events. The returned token can be used in subsequent calls to this function." + ~custom_marshaller:true + ~flags:[`Session] + ~result:(Set (Record _event), "the batch of events") + ~errs:[Api_errors.session_not_registered;Api_errors.events_lost] + ~allowed_roles:_R_ALL () in let get_current_id = call - ~name:"get_current_id" ~params:[] - ~in_product_since:rel_rio - ~doc:"Return the ID of the next event to be generated by the system" - ~flags:[`Session] - ~result:(Int, "the event ID") - ~allowed_roles:_R_ALL - () in + ~name:"get_current_id" ~params:[] + ~in_product_since:rel_rio + ~doc:"Return the ID of the next event to be generated by the system" + ~flags:[`Session] + ~result:(Int, "the event ID") + ~allowed_roles:_R_ALL + () in let inject = call - ~name:"inject" ~params:[ - String, "class", "class of the object"; - String, "ref", "A reference to the object that will be changed."; - ] - ~in_product_since:rel_tampa - ~doc:"Injects an artificial event on the given object and return the corresponding ID" - ~flags:[`Session] - ~result:(String, "the event ID") - ~allowed_roles:_R_ALL - () in + ~name:"inject" ~params:[ + String, "class", "class of the object"; + String, "ref", "A reference to the object that will be changed."; + ] + ~in_product_since:rel_tampa + ~doc:"Injects an artificial event on the given object and return the corresponding ID" + ~flags:[`Session] + ~result:(String, "the event ID") + ~allowed_roles:_R_ALL + () in (* !!! This should call create_obj ~in_db:true like everything else... !!! *) { obj_lifecycle=[]; @@ -8125,25 +8125,25 @@ let event = (** Blobs - binary blobs of data *) -let blob = +let blob = let create = call - ~name:"create" - ~in_product_since:rel_orlando - ~versioned_params: - [{param_type=String; param_name="mime_type"; param_doc="The mime-type of the blob. Defaults to 'application/octet-stream' if the empty string is supplied"; param_release=orlando_release; param_default=None}; - {param_type=Bool; param_name="public"; param_doc="True if the blob should be publicly available"; param_release=tampa_release; param_default=Some (VBool false)}] - ~doc:"Create a placeholder for a binary blob" - ~flags:[`Session] - ~result:(Ref _blob, "The reference to the created blob") - ~allowed_roles:_R_POOL_OP - () in + ~name:"create" + ~in_product_since:rel_orlando + ~versioned_params: + [{param_type=String; param_name="mime_type"; param_doc="The mime-type of the blob. Defaults to 'application/octet-stream' if the empty string is supplied"; param_release=orlando_release; param_default=None}; + {param_type=Bool; param_name="public"; param_doc="True if the blob should be publicly available"; param_release=tampa_release; param_default=Some (VBool false)}] + ~doc:"Create a placeholder for a binary blob" + ~flags:[`Session] + ~result:(Ref _blob, "The reference to the created blob") + ~allowed_roles:_R_POOL_OP + () in let destroy = call - ~name:"destroy" - ~in_product_since:rel_orlando - ~params:[Ref _blob, "self", "The reference of the blob to destroy"] - ~flags:[`Session] - ~allowed_roles:_R_POOL_OP - () in + ~name:"destroy" + ~in_product_since:rel_orlando + ~params:[Ref _blob, "self", "The reference of the blob to destroy"] + ~flags:[`Session] + ~allowed_roles:_R_POOL_OP + () in create_obj ~in_db:true ~in_product_since:rel_orlando ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_blob ~descr:"A placeholder for a binary blob" ~gen_events:true ~doccomments:[] @@ -8152,7 +8152,7 @@ let blob = [ uid _blob; namespace ~name:"name" ~contents:(names oss_since_303 RW) (); field ~qualifier:DynamicRO ~ty:Int "size" "Size of the binary data, in bytes"; - field ~writer_roles:_R_POOL_OP ~qualifier:RW ~in_product_since:rel_tampa ~default_value:(Some (VBool false)) ~ty:Bool "public" "True if the blob is publicly accessible"; + field ~writer_roles:_R_POOL_OP ~qualifier:RW ~in_product_since:rel_tampa ~default_value:(Some (VBool false)) ~ty:Bool "public" "True if the blob is publicly accessible"; field ~qualifier:StaticRO ~ty:DateTime "last_updated" "Time at which the data in the blob was last updated"; field ~qualifier:StaticRO ~ty:String "mime_type" "The mime type associated with this object. Defaults to 'application/octet-stream' if the empty string is supplied"] () @@ -8161,96 +8161,96 @@ let message = let cls = Enum ("cls", [ "VM", "VM"; "Host", "Host"; - "SR", "SR"; - "Pool","Pool"; - "VMPP","VMPP"; - ]) + "SR", "SR"; + "Pool","Pool"; + "VMPP","VMPP"; + ]) in let create = call - ~name:"create" - ~in_product_since:rel_orlando - ~params:[String, "name", "The name of the message"; - Int, "priority", "The priority of the message"; - cls, "cls", "The class of object this message is associated with"; - String, "obj_uuid", "The uuid of the object this message is associated with"; - String, "body", "The body of the message"] - ~flags:[`Session] - ~result:(Ref _message, "The reference of the created message") - ~allowed_roles:_R_POOL_OP - () + ~name:"create" + ~in_product_since:rel_orlando + ~params:[String, "name", "The name of the message"; + Int, "priority", "The priority of the message"; + cls, "cls", "The class of object this message is associated with"; + String, "obj_uuid", "The uuid of the object this message is associated with"; + String, "body", "The body of the message"] + ~flags:[`Session] + ~result:(Ref _message, "The reference of the created message") + ~allowed_roles:_R_POOL_OP + () in let destroy = call - ~name:"destroy" - ~in_product_since:rel_orlando - ~params:[Ref _message, "self", "The reference of the message to destroy"] - ~flags:[`Session] - ~allowed_roles:_R_POOL_OP - () + ~name:"destroy" + ~in_product_since:rel_orlando + ~params:[Ref _message, "self", "The reference of the message to destroy"] + ~flags:[`Session] + ~allowed_roles:_R_POOL_OP + () in - let get_all = call - ~name:"get_all" - ~in_product_since:rel_orlando - ~params:[] - ~flags:[`Session] - ~result:(Set(Ref _message), "The references to the messages") - ~allowed_roles:_R_READ_ONLY - () + let get_all = call + ~name:"get_all" + ~in_product_since:rel_orlando + ~params:[] + ~flags:[`Session] + ~result:(Set(Ref _message), "The references to the messages") + ~allowed_roles:_R_READ_ONLY + () in let get = call - ~name:"get" - ~in_product_since:rel_orlando - ~params:[cls, "cls", "The class of object"; - String, "obj_uuid", "The uuid of the object"; - DateTime, "since", "The cutoff time"] - ~flags:[`Session] - ~result:(Map(Ref _message, Record _message), "The relevant messages") - ~allowed_roles:_R_READ_ONLY - () + ~name:"get" + ~in_product_since:rel_orlando + ~params:[cls, "cls", "The class of object"; + String, "obj_uuid", "The uuid of the object"; + DateTime, "since", "The cutoff time"] + ~flags:[`Session] + ~result:(Map(Ref _message, Record _message), "The relevant messages") + ~allowed_roles:_R_READ_ONLY + () in let get_since = call - ~name:"get_since" - ~in_product_since:rel_orlando - ~params:[DateTime, "since", "The cutoff time"] - ~flags:[`Session] - ~result:(Map(Ref _message, Record _message), "The relevant messages") - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_since" + ~in_product_since:rel_orlando + ~params:[DateTime, "since", "The cutoff time"] + ~flags:[`Session] + ~result:(Map(Ref _message, Record _message), "The relevant messages") + ~allowed_roles:_R_READ_ONLY + () in let get_by_uuid = call - ~name:"get_by_uuid" - ~in_product_since:rel_orlando - ~params:[String, "uuid", "The uuid of the message"] - ~flags:[`Session] - ~result:(Ref _message, "The message reference") - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_by_uuid" + ~in_product_since:rel_orlando + ~params:[String, "uuid", "The uuid of the message"] + ~flags:[`Session] + ~result:(Ref _message, "The message reference") + ~allowed_roles:_R_READ_ONLY + () in let get_record = call - ~name:"get_record" - ~in_product_since:rel_orlando - ~params:[Ref _message, "self", "The reference to the message"] - ~flags:[`Session] - ~result:(Record _message, "The message record") - ~allowed_roles:_R_READ_ONLY - () + ~name:"get_record" + ~in_product_since:rel_orlando + ~params:[Ref _message, "self", "The reference to the message"] + ~flags:[`Session] + ~result:(Record _message, "The message record") + ~allowed_roles:_R_READ_ONLY + () in - let get_all_records = call - ~name:"get_all_records" - ~in_product_since:rel_orlando - ~params:[] - ~flags:[`Session] - ~result:(Map(Ref _message, Record _message), "The messages") - ~allowed_roles:_R_READ_ONLY - () + let get_all_records = call + ~name:"get_all_records" + ~in_product_since:rel_orlando + ~params:[] + ~flags:[`Session] + ~result:(Map(Ref _message, Record _message), "The messages") + ~allowed_roles:_R_READ_ONLY + () in - let get_all_records_where = call - ~name:"get_all_records_where" - ~in_product_since:rel_orlando - ~params:[String, "expr", "The expression to match (not currently used)"] - ~flags:[`Session] - ~result:(Map(Ref _message, Record _message), "The messages") - ~allowed_roles:_R_READ_ONLY - () + let get_all_records_where = call + ~name:"get_all_records_where" + ~in_product_since:rel_orlando + ~params:[String, "expr", "The expression to match (not currently used)"] + ~flags:[`Session] + ~result:(Map(Ref _message, Record _message), "The messages") + ~allowed_roles:_R_READ_ONLY + () in create_obj ~in_db:false ~in_product_since:rel_orlando ~in_oss_since:None ~persist:PersistNothing ~gen_constructor_destructor:false ~name:_message ~descr:"An message for the attention of the administrator" ~gen_events:true ~doccomments:[] ~internal_deprecated_since:None @@ -8264,43 +8264,43 @@ let message = field ~qualifier:DynamicRO ~ty:DateTime "timestamp" "The time at which the message was created"; field ~qualifier:DynamicRO ~ty:String "body" "The body of the message"; ] () - + let secret = - let introduce = call - ~name:"introduce" - ~in_product_since:rel_midnight_ride - ~versioned_params:[ - {param_type=String; param_name="uuid"; param_doc=""; param_release=midnight_ride_release; param_default=None}; - {param_type=String; param_name="value"; param_doc=""; param_release=midnight_ride_release; param_default=None}; - {param_type=(Map (String, String)); param_name="other_config"; param_doc=""; param_release=boston_release; param_default=Some (VMap [])} - ] - ~flags:[`Session] - ~result:(Ref _secret, "") - ~secret:true - ~hide_from_docs:true - ~allowed_roles:_R_POOL_OP - () - in - create_obj - ~descr:"A secret" - ~doccomments:[] - ~gen_constructor_destructor:true - ~gen_events:false - ~in_db:true - ~in_oss_since:None - ~in_product_since:rel_midnight_ride - ~internal_deprecated_since:None - ~messages:[introduce] - ~messages_default_allowed_roles:_R_POOL_OP - ~implicit_messages_allowed_roles:_R_POOL_OP - ~name:_secret - ~persist:PersistEverything - ~contents: - [ uid ~reader_roles:_R_POOL_OP _secret - ; field ~reader_roles:_R_POOL_OP ~qualifier:RW ~ty:String "value" "the secret" - ; field ~qualifier:RW ~ty:(Map (String,String)) "other_config" "other_config" ~default_value:(Some (VMap [])); - ] - () + let introduce = call + ~name:"introduce" + ~in_product_since:rel_midnight_ride + ~versioned_params:[ + {param_type=String; param_name="uuid"; param_doc=""; param_release=midnight_ride_release; param_default=None}; + {param_type=String; param_name="value"; param_doc=""; param_release=midnight_ride_release; param_default=None}; + {param_type=(Map (String, String)); param_name="other_config"; param_doc=""; param_release=boston_release; param_default=Some (VMap [])} + ] + ~flags:[`Session] + ~result:(Ref _secret, "") + ~secret:true + ~hide_from_docs:true + ~allowed_roles:_R_POOL_OP + () + in + create_obj + ~descr:"A secret" + ~doccomments:[] + ~gen_constructor_destructor:true + ~gen_events:false + ~in_db:true + ~in_oss_since:None + ~in_product_since:rel_midnight_ride + ~internal_deprecated_since:None + ~messages:[introduce] + ~messages_default_allowed_roles:_R_POOL_OP + ~implicit_messages_allowed_roles:_R_POOL_OP + ~name:_secret + ~persist:PersistEverything + ~contents: + [ uid ~reader_roles:_R_POOL_OP _secret + ; field ~reader_roles:_R_POOL_OP ~qualifier:RW ~ty:String "value" "the secret" + ; field ~qualifier:RW ~ty:(Map (String,String)) "other_config" "other_config" ~default_value:(Some (VMap [])); + ] + () (* @@ -8324,475 +8324,475 @@ let alert = (** PCI devices *) let pci = - create_obj - ~name:_pci - ~descr:"A PCI device" - ~doccomments:[] - ~gen_constructor_destructor:false - ~gen_events:true - ~in_db:true - ~lifecycle:[Published, rel_boston, ""] - ~messages:[] - ~messages_default_allowed_roles:_R_POOL_OP - ~persist:PersistEverything - ~in_oss_since:None - ~contents:[ - uid _pci ~lifecycle:[Published, rel_boston, ""]; - field ~qualifier:StaticRO ~ty:String ~lifecycle:[] "class_id" "PCI class ID" ~default_value:(Some (VString "")) ~internal_only:true; - field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_boston, ""] "class_name" "PCI class name" ~default_value:(Some (VString "")); - field ~qualifier:StaticRO ~ty:String ~lifecycle:[] "vendor_id" "Vendor ID" ~default_value:(Some (VString "")) ~internal_only:true; - field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_boston, ""] "vendor_name" "Vendor name" ~default_value:(Some (VString "")); - field ~qualifier:StaticRO ~ty:String ~lifecycle:[] "device_id" "Device ID" ~default_value:(Some (VString "")) ~internal_only:true; - field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_boston, ""] "device_name" "Device name" ~default_value:(Some (VString "")); - field ~qualifier:StaticRO ~ty:(Ref _host) ~lifecycle:[Published, rel_boston, ""] "host" "Physical machine that owns the PCI device" ~default_value:(Some (VRef (Ref.string_of Ref.null))); - field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_boston, ""] "pci_id" "PCI ID of the physical device" ~default_value:(Some (VString "")); - field ~qualifier:DynamicRO ~ty:Int ~lifecycle:[] ~default_value:(Some (VInt 1L)) "functions" "Number of physical + virtual PCI functions" ~internal_only:true; - field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) ~lifecycle:[] "attached_VMs" - "VMs that currently have a function of this PCI device passed-through to them" ~internal_only:true; - field ~qualifier:DynamicRO ~ty:(Set (Ref _pci)) ~lifecycle:[Published, rel_boston, ""] "dependencies" "List of dependent PCI devices" ~ignore_foreign_key:true; - field ~qualifier:RW ~ty:(Map (String,String)) ~lifecycle:[Published, rel_boston, ""] "other_config" "Additional configuration" ~default_value:(Some (VMap [])); - field ~qualifier:StaticRO ~ty:String ~lifecycle:[] "subsystem_vendor_id" "Subsystem vendor ID" ~default_value:(Some (VString "")) ~internal_only:true; - field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_clearwater_whetstone, ""] "subsystem_vendor_name" "Subsystem vendor name" ~default_value:(Some (VString "")); - field ~qualifier:StaticRO ~ty:String ~lifecycle:[] "subsystem_device_id" "Subsystem device ID" ~default_value:(Some (VString "")) ~internal_only:true; - field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_clearwater_whetstone, ""] "subsystem_device_name" "Subsystem device name" ~default_value:(Some (VString "")); - ] - () + create_obj + ~name:_pci + ~descr:"A PCI device" + ~doccomments:[] + ~gen_constructor_destructor:false + ~gen_events:true + ~in_db:true + ~lifecycle:[Published, rel_boston, ""] + ~messages:[] + ~messages_default_allowed_roles:_R_POOL_OP + ~persist:PersistEverything + ~in_oss_since:None + ~contents:[ + uid _pci ~lifecycle:[Published, rel_boston, ""]; + field ~qualifier:StaticRO ~ty:String ~lifecycle:[] "class_id" "PCI class ID" ~default_value:(Some (VString "")) ~internal_only:true; + field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_boston, ""] "class_name" "PCI class name" ~default_value:(Some (VString "")); + field ~qualifier:StaticRO ~ty:String ~lifecycle:[] "vendor_id" "Vendor ID" ~default_value:(Some (VString "")) ~internal_only:true; + field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_boston, ""] "vendor_name" "Vendor name" ~default_value:(Some (VString "")); + field ~qualifier:StaticRO ~ty:String ~lifecycle:[] "device_id" "Device ID" ~default_value:(Some (VString "")) ~internal_only:true; + field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_boston, ""] "device_name" "Device name" ~default_value:(Some (VString "")); + field ~qualifier:StaticRO ~ty:(Ref _host) ~lifecycle:[Published, rel_boston, ""] "host" "Physical machine that owns the PCI device" ~default_value:(Some (VRef (Ref.string_of Ref.null))); + field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_boston, ""] "pci_id" "PCI ID of the physical device" ~default_value:(Some (VString "")); + field ~qualifier:DynamicRO ~ty:Int ~lifecycle:[] ~default_value:(Some (VInt 1L)) "functions" "Number of physical + virtual PCI functions" ~internal_only:true; + field ~qualifier:DynamicRO ~ty:(Set (Ref _vm)) ~lifecycle:[] "attached_VMs" + "VMs that currently have a function of this PCI device passed-through to them" ~internal_only:true; + field ~qualifier:DynamicRO ~ty:(Set (Ref _pci)) ~lifecycle:[Published, rel_boston, ""] "dependencies" "List of dependent PCI devices" ~ignore_foreign_key:true; + field ~qualifier:RW ~ty:(Map (String,String)) ~lifecycle:[Published, rel_boston, ""] "other_config" "Additional configuration" ~default_value:(Some (VMap [])); + field ~qualifier:StaticRO ~ty:String ~lifecycle:[] "subsystem_vendor_id" "Subsystem vendor ID" ~default_value:(Some (VString "")) ~internal_only:true; + field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_clearwater_whetstone, ""] "subsystem_vendor_name" "Subsystem vendor name" ~default_value:(Some (VString "")); + field ~qualifier:StaticRO ~ty:String ~lifecycle:[] "subsystem_device_id" "Subsystem device ID" ~default_value:(Some (VString "")) ~internal_only:true; + field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_clearwater_whetstone, ""] "subsystem_device_name" "Subsystem device name" ~default_value:(Some (VString "")); + ] + () (** Physical GPUs (pGPU) *) let pgpu_dom0_access = - Enum ("pgpu_dom0_access", [ - "enabled", "dom0 can access this device as normal"; - "disable_on_reboot", "On host reboot dom0 will be blocked from accessing this device"; - "disabled", "dom0 cannot access this device"; - "enable_on_reboot", "On host reboot dom0 will be allowed to access this device"; - ]) + Enum ("pgpu_dom0_access", [ + "enabled", "dom0 can access this device as normal"; + "disable_on_reboot", "On host reboot dom0 will be blocked from accessing this device"; + "disabled", "dom0 cannot access this device"; + "enable_on_reboot", "On host reboot dom0 will be allowed to access this device"; + ]) let pgpu = - let add_enabled_VGPU_types = call - ~name:"add_enabled_VGPU_types" - ~lifecycle:[Published, rel_vgpu_tech_preview, ""] - ~versioned_params:[ - { - param_type = (Ref _pgpu); - param_name = "self"; - param_doc = "The PGPU to which we are adding an enabled VGPU type"; - param_release = vgpu_tech_preview_release; - param_default = None; - }; - { - param_type = (Ref _vgpu_type); - param_name = "value"; - param_doc = "The VGPU type to enable"; - param_release = vgpu_tech_preview_release; - param_default = None; - }; - ] - ~allowed_roles:_R_POOL_OP - () - in - let remove_enabled_VGPU_types = call - ~name:"remove_enabled_VGPU_types" - ~lifecycle:[Published, rel_vgpu_tech_preview, ""] - ~versioned_params:[ - { - param_type = (Ref _pgpu); - param_name = "self"; - param_doc = "The PGPU from which we are removing an enabled VGPU type"; - param_release = vgpu_tech_preview_release; - param_default = None; - }; - { - param_type = (Ref _vgpu_type); - param_name = "value"; - param_doc = "The VGPU type to disable"; - param_release = vgpu_tech_preview_release; - param_default = None; - }; - ] - ~allowed_roles:_R_POOL_OP - () - in - let set_enabled_VGPU_types = call - ~name:"set_enabled_VGPU_types" - ~lifecycle:[Published, rel_vgpu_tech_preview, ""] - ~versioned_params:[ - { - param_type = (Ref _pgpu); - param_name = "self"; - param_doc = "The PGPU on which we are enabling a set of VGPU types"; - param_release = vgpu_tech_preview_release; - param_default = None; - }; - { - param_type = Set (Ref _vgpu_type); - param_name = "value"; - param_doc = "The VGPU types to enable"; - param_release = vgpu_tech_preview_release; - param_default = None; - }; - ] - ~allowed_roles:_R_POOL_OP - () - in - let set_GPU_group = call - ~name:"set_GPU_group" - ~lifecycle:[Published, rel_vgpu_tech_preview, ""] - ~versioned_params:[ - {param_type=(Ref _pgpu); param_name="self"; param_doc="The PGPU to move to a new group"; param_release=vgpu_tech_preview_release; param_default=None}; - {param_type=(Ref _gpu_group); param_name="value"; param_doc="The group to which the PGPU will be moved"; param_release=vgpu_tech_preview_release; param_default=None}; - ] - ~allowed_roles:_R_POOL_OP - () - in - let get_remaining_capacity = call - ~name:"get_remaining_capacity" - ~lifecycle:[Published, rel_vgpu_tech_preview, ""] - ~versioned_params:[ - { - param_type = (Ref _pgpu); - param_name = "self"; - param_doc = "The PGPU to query"; - param_release = vgpu_tech_preview_release; - param_default = None; - }; - { - param_type = (Ref _vgpu_type); - param_name = "vgpu_type"; - param_doc = "The VGPU type for which we want to find the number of VGPUs which can still be started on this PGPU"; - param_release = vgpu_tech_preview_release; - param_default = None; - }; - ] - ~result:(Int, "The number of VGPUs of the specified type which can still be started on this PGPU") - ~allowed_roles:_R_READ_ONLY - () - in - let enable_dom0_access = call - ~name:"enable_dom0_access" - ~lifecycle:[Published, rel_cream, ""] - ~versioned_params:[ - { - param_type = (Ref _pgpu); - param_name = "self"; - param_doc = "The PGPU to which dom0 will be granted access"; - param_release = cream_release; - param_default = None; - }; - ] - ~result:(pgpu_dom0_access, "The accessibility of this PGPU from dom0") - ~allowed_roles:_R_POOL_OP - () - in - let disable_dom0_access = call - ~name:"disable_dom0_access" - ~lifecycle:[Published, rel_cream, ""] - ~versioned_params:[ - { - param_type = (Ref _pgpu); - param_name = "self"; - param_doc = "The PGPU to which dom0 will be denied access"; - param_release = cream_release; - param_default = None; - }; - ] - ~result:(pgpu_dom0_access, "The accessibility of this PGPU from dom0") - ~allowed_roles:_R_POOL_OP - () - in - create_obj - ~name:_pgpu - ~descr:"A physical GPU (pGPU)" - ~doccomments:[] - ~gen_constructor_destructor:false - ~gen_events:true - ~in_db:true - ~lifecycle:[Published, rel_boston, ""] - ~messages:[ - add_enabled_VGPU_types; - remove_enabled_VGPU_types; - set_enabled_VGPU_types; - set_GPU_group; - get_remaining_capacity; - enable_dom0_access; - disable_dom0_access; - ] - ~messages_default_allowed_roles:_R_POOL_OP - ~persist:PersistEverything - ~in_oss_since:None - ~contents:[ - uid _pgpu ~lifecycle:[Published, rel_boston, ""]; - field ~qualifier:StaticRO ~ty:(Ref _pci) ~lifecycle:[Published, rel_boston, ""] "PCI" "Link to underlying PCI device" ~default_value:(Some (VRef (Ref.string_of Ref.null))); - field ~qualifier:StaticRO ~ty:(Ref _gpu_group) ~lifecycle:[Published, rel_boston, ""] "GPU_group" "GPU group the pGPU is contained in" ~default_value:(Some (VRef (Ref.string_of Ref.null))); - field ~qualifier:DynamicRO ~ty:(Ref _host) ~lifecycle:[Published, rel_boston, ""] "host" "Host that own the GPU" ~default_value:(Some (VRef (Ref.string_of Ref.null))); - field ~qualifier:RW ~ty:(Map (String,String)) ~lifecycle:[Published, rel_boston, ""] "other_config" "Additional configuration" ~default_value:(Some (VMap [])); - field ~qualifier:DynamicRO ~ty:(Set (Ref _vgpu_type)) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "supported_VGPU_types" "List of VGPU types supported by the underlying hardware"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _vgpu_type)) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "enabled_VGPU_types" "List of VGPU types which have been enabled for this PGPU"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _vgpu)) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "resident_VGPUs" "List of VGPUs running on this PGPU"; - field ~qualifier:StaticRO ~ty:Int ~lifecycle:[Published, rel_vgpu_tech_preview, ""] ~internal_only:true ~default_value:(Some (VInt Constants.pgpu_default_size)) "size" "Abstract size of this PGPU"; - field ~qualifier:DynamicRO ~ty:(Map (Ref _vgpu_type, Int)) ~lifecycle:[Published, rel_vgpu_productisation, ""] ~default_value:(Some (VMap [])) "supported_VGPU_max_capacities" "A map relating each VGPU type supported on this GPU to the maximum number of VGPUs of that type which can run simultaneously on this GPU"; - field ~qualifier:DynamicRO ~ty:(pgpu_dom0_access) ~lifecycle:[Published, rel_cream, ""] ~default_value:(Some (VEnum "enabled")) "dom0_access" "The accessibility of this device from dom0"; - field ~qualifier:DynamicRO ~ty:Bool ~lifecycle:[Published, rel_cream, ""] ~default_value:(Some (VBool false)) "is_system_display_device" "Is this device the system display device"; - ] - () + let add_enabled_VGPU_types = call + ~name:"add_enabled_VGPU_types" + ~lifecycle:[Published, rel_vgpu_tech_preview, ""] + ~versioned_params:[ + { + param_type = (Ref _pgpu); + param_name = "self"; + param_doc = "The PGPU to which we are adding an enabled VGPU type"; + param_release = vgpu_tech_preview_release; + param_default = None; + }; + { + param_type = (Ref _vgpu_type); + param_name = "value"; + param_doc = "The VGPU type to enable"; + param_release = vgpu_tech_preview_release; + param_default = None; + }; + ] + ~allowed_roles:_R_POOL_OP + () + in + let remove_enabled_VGPU_types = call + ~name:"remove_enabled_VGPU_types" + ~lifecycle:[Published, rel_vgpu_tech_preview, ""] + ~versioned_params:[ + { + param_type = (Ref _pgpu); + param_name = "self"; + param_doc = "The PGPU from which we are removing an enabled VGPU type"; + param_release = vgpu_tech_preview_release; + param_default = None; + }; + { + param_type = (Ref _vgpu_type); + param_name = "value"; + param_doc = "The VGPU type to disable"; + param_release = vgpu_tech_preview_release; + param_default = None; + }; + ] + ~allowed_roles:_R_POOL_OP + () + in + let set_enabled_VGPU_types = call + ~name:"set_enabled_VGPU_types" + ~lifecycle:[Published, rel_vgpu_tech_preview, ""] + ~versioned_params:[ + { + param_type = (Ref _pgpu); + param_name = "self"; + param_doc = "The PGPU on which we are enabling a set of VGPU types"; + param_release = vgpu_tech_preview_release; + param_default = None; + }; + { + param_type = Set (Ref _vgpu_type); + param_name = "value"; + param_doc = "The VGPU types to enable"; + param_release = vgpu_tech_preview_release; + param_default = None; + }; + ] + ~allowed_roles:_R_POOL_OP + () + in + let set_GPU_group = call + ~name:"set_GPU_group" + ~lifecycle:[Published, rel_vgpu_tech_preview, ""] + ~versioned_params:[ + {param_type=(Ref _pgpu); param_name="self"; param_doc="The PGPU to move to a new group"; param_release=vgpu_tech_preview_release; param_default=None}; + {param_type=(Ref _gpu_group); param_name="value"; param_doc="The group to which the PGPU will be moved"; param_release=vgpu_tech_preview_release; param_default=None}; + ] + ~allowed_roles:_R_POOL_OP + () + in + let get_remaining_capacity = call + ~name:"get_remaining_capacity" + ~lifecycle:[Published, rel_vgpu_tech_preview, ""] + ~versioned_params:[ + { + param_type = (Ref _pgpu); + param_name = "self"; + param_doc = "The PGPU to query"; + param_release = vgpu_tech_preview_release; + param_default = None; + }; + { + param_type = (Ref _vgpu_type); + param_name = "vgpu_type"; + param_doc = "The VGPU type for which we want to find the number of VGPUs which can still be started on this PGPU"; + param_release = vgpu_tech_preview_release; + param_default = None; + }; + ] + ~result:(Int, "The number of VGPUs of the specified type which can still be started on this PGPU") + ~allowed_roles:_R_READ_ONLY + () + in + let enable_dom0_access = call + ~name:"enable_dom0_access" + ~lifecycle:[Published, rel_cream, ""] + ~versioned_params:[ + { + param_type = (Ref _pgpu); + param_name = "self"; + param_doc = "The PGPU to which dom0 will be granted access"; + param_release = cream_release; + param_default = None; + }; + ] + ~result:(pgpu_dom0_access, "The accessibility of this PGPU from dom0") + ~allowed_roles:_R_POOL_OP + () + in + let disable_dom0_access = call + ~name:"disable_dom0_access" + ~lifecycle:[Published, rel_cream, ""] + ~versioned_params:[ + { + param_type = (Ref _pgpu); + param_name = "self"; + param_doc = "The PGPU to which dom0 will be denied access"; + param_release = cream_release; + param_default = None; + }; + ] + ~result:(pgpu_dom0_access, "The accessibility of this PGPU from dom0") + ~allowed_roles:_R_POOL_OP + () + in + create_obj + ~name:_pgpu + ~descr:"A physical GPU (pGPU)" + ~doccomments:[] + ~gen_constructor_destructor:false + ~gen_events:true + ~in_db:true + ~lifecycle:[Published, rel_boston, ""] + ~messages:[ + add_enabled_VGPU_types; + remove_enabled_VGPU_types; + set_enabled_VGPU_types; + set_GPU_group; + get_remaining_capacity; + enable_dom0_access; + disable_dom0_access; + ] + ~messages_default_allowed_roles:_R_POOL_OP + ~persist:PersistEverything + ~in_oss_since:None + ~contents:[ + uid _pgpu ~lifecycle:[Published, rel_boston, ""]; + field ~qualifier:StaticRO ~ty:(Ref _pci) ~lifecycle:[Published, rel_boston, ""] "PCI" "Link to underlying PCI device" ~default_value:(Some (VRef (Ref.string_of Ref.null))); + field ~qualifier:StaticRO ~ty:(Ref _gpu_group) ~lifecycle:[Published, rel_boston, ""] "GPU_group" "GPU group the pGPU is contained in" ~default_value:(Some (VRef (Ref.string_of Ref.null))); + field ~qualifier:DynamicRO ~ty:(Ref _host) ~lifecycle:[Published, rel_boston, ""] "host" "Host that own the GPU" ~default_value:(Some (VRef (Ref.string_of Ref.null))); + field ~qualifier:RW ~ty:(Map (String,String)) ~lifecycle:[Published, rel_boston, ""] "other_config" "Additional configuration" ~default_value:(Some (VMap [])); + field ~qualifier:DynamicRO ~ty:(Set (Ref _vgpu_type)) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "supported_VGPU_types" "List of VGPU types supported by the underlying hardware"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _vgpu_type)) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "enabled_VGPU_types" "List of VGPU types which have been enabled for this PGPU"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _vgpu)) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "resident_VGPUs" "List of VGPUs running on this PGPU"; + field ~qualifier:StaticRO ~ty:Int ~lifecycle:[Published, rel_vgpu_tech_preview, ""] ~internal_only:true ~default_value:(Some (VInt Constants.pgpu_default_size)) "size" "Abstract size of this PGPU"; + field ~qualifier:DynamicRO ~ty:(Map (Ref _vgpu_type, Int)) ~lifecycle:[Published, rel_vgpu_productisation, ""] ~default_value:(Some (VMap [])) "supported_VGPU_max_capacities" "A map relating each VGPU type supported on this GPU to the maximum number of VGPUs of that type which can run simultaneously on this GPU"; + field ~qualifier:DynamicRO ~ty:(pgpu_dom0_access) ~lifecycle:[Published, rel_cream, ""] ~default_value:(Some (VEnum "enabled")) "dom0_access" "The accessibility of this device from dom0"; + field ~qualifier:DynamicRO ~ty:Bool ~lifecycle:[Published, rel_cream, ""] ~default_value:(Some (VBool false)) "is_system_display_device" "Is this device the system display device"; + ] + () (** Groups of GPUs *) let gpu_group = - let create = call - ~name:"create" - ~lifecycle:[Published, rel_boston, ""] - ~versioned_params:[ - {param_type=(String); param_name="name_label"; param_doc=""; param_release=boston_release; param_default=Some (VString "")}; - {param_type=(String); param_name="name_description"; param_doc=""; param_release=boston_release; param_default=Some (VString "")}; - {param_type=(Map (String, String)); param_name="other_config"; param_doc=""; param_release=boston_release; param_default=Some (VMap [])} - ] - ~result:(Ref _gpu_group, "") - ~allowed_roles:_R_POOL_OP - () - in - let destroy = call - ~name:"destroy" - ~lifecycle:[Published, rel_boston, ""] - ~params:[ - Ref _gpu_group, "self", "The vGPU to destroy" - ] - ~allowed_roles:_R_POOL_OP - () - in - let update_enabled_VGPU_types = call - ~name:"update_enabled_VGPU_types" - ~hide_from_docs:true - ~lifecycle:[Published, rel_vgpu_productisation, ""] - ~params:[ - Ref _gpu_group, "self", "The GPU group to update"; - ] - ~allowed_roles:_R_POOL_OP - () - in - let update_supported_VGPU_types = call - ~name:"update_supported_VGPU_types" - ~hide_from_docs:true - ~lifecycle:[Published, rel_vgpu_productisation, ""] - ~params:[ - Ref _gpu_group, "self", "The GPU group to update"; - ] - ~allowed_roles:_R_POOL_OP - () - in - let get_remaining_capacity = call - ~name:"get_remaining_capacity" - ~lifecycle:[Published, rel_vgpu_tech_preview, ""] - ~params:[ - Ref _gpu_group, "self", "The GPU group to query"; - Ref _vgpu_type, "vgpu_type", "The VGPU_type for which the remaining capacity will be calculated"; - ] - ~result:(Int, "The number of VGPUs of the given type which can still be started on the PGPUs in the group") - ~allowed_roles:_R_READ_ONLY - () - in - let allocation_algorithm = - Enum ("allocation_algorithm", - [ "breadth_first", "vGPUs of a given type are allocated evenly across supporting pGPUs."; - "depth_first", "vGPUs of a given type are allocated on supporting pGPUs until they are full."] - ) - in - create_obj - ~name:_gpu_group - ~descr:"A group of compatible GPUs across the resource pool" - ~doccomments:[] - ~gen_constructor_destructor:false - ~gen_events:true - ~in_db:true - ~lifecycle:[Published, rel_boston, ""] - ~messages:[ - create; - destroy; - update_enabled_VGPU_types; - update_supported_VGPU_types; - get_remaining_capacity; - ] - ~messages_default_allowed_roles:_R_POOL_OP - ~persist:PersistEverything - ~in_oss_since:None - ~contents:[ - uid _gpu_group ~lifecycle:[Published, rel_boston, ""]; - namespace ~name:"name" ~contents:(names None RW ~lifecycle:[Published, rel_boston, ""]) (); - field ~qualifier:DynamicRO ~ty:(Set (Ref _pgpu)) ~lifecycle:[Published, rel_boston, ""] "PGPUs" "List of pGPUs in the group"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _vgpu)) ~lifecycle:[Published, rel_boston, ""] "VGPUs" "List of vGPUs using the group"; - field ~qualifier:DynamicRO ~ty:(Set String) ~lifecycle:[Published, rel_boston, ""] "GPU_types" "List of GPU types (vendor+device ID) that can be in this group" ~default_value:(Some (VSet [])); - field ~qualifier:RW ~ty:(Map (String,String)) ~lifecycle:[Published, rel_boston, ""] "other_config" "Additional configuration" ~default_value:(Some (VMap [])); - field ~qualifier:RW ~ty:allocation_algorithm ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "allocation_algorithm" "Current allocation of vGPUs to pGPUs for this group" ~default_value:(Some (VEnum "depth_first")); - field ~qualifier:DynamicRO ~ty:(Set (Ref _vgpu_type)) ~lifecycle:[Published, rel_vgpu_productisation, ""] "supported_VGPU_types" "vGPU types supported on at least one of the pGPUs in this group"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _vgpu_type)) ~lifecycle:[Published, rel_vgpu_productisation, ""] "enabled_VGPU_types" "vGPU types supported on at least one of the pGPUs in this group"; - ] - () + let create = call + ~name:"create" + ~lifecycle:[Published, rel_boston, ""] + ~versioned_params:[ + {param_type=(String); param_name="name_label"; param_doc=""; param_release=boston_release; param_default=Some (VString "")}; + {param_type=(String); param_name="name_description"; param_doc=""; param_release=boston_release; param_default=Some (VString "")}; + {param_type=(Map (String, String)); param_name="other_config"; param_doc=""; param_release=boston_release; param_default=Some (VMap [])} + ] + ~result:(Ref _gpu_group, "") + ~allowed_roles:_R_POOL_OP + () + in + let destroy = call + ~name:"destroy" + ~lifecycle:[Published, rel_boston, ""] + ~params:[ + Ref _gpu_group, "self", "The vGPU to destroy" + ] + ~allowed_roles:_R_POOL_OP + () + in + let update_enabled_VGPU_types = call + ~name:"update_enabled_VGPU_types" + ~hide_from_docs:true + ~lifecycle:[Published, rel_vgpu_productisation, ""] + ~params:[ + Ref _gpu_group, "self", "The GPU group to update"; + ] + ~allowed_roles:_R_POOL_OP + () + in + let update_supported_VGPU_types = call + ~name:"update_supported_VGPU_types" + ~hide_from_docs:true + ~lifecycle:[Published, rel_vgpu_productisation, ""] + ~params:[ + Ref _gpu_group, "self", "The GPU group to update"; + ] + ~allowed_roles:_R_POOL_OP + () + in + let get_remaining_capacity = call + ~name:"get_remaining_capacity" + ~lifecycle:[Published, rel_vgpu_tech_preview, ""] + ~params:[ + Ref _gpu_group, "self", "The GPU group to query"; + Ref _vgpu_type, "vgpu_type", "The VGPU_type for which the remaining capacity will be calculated"; + ] + ~result:(Int, "The number of VGPUs of the given type which can still be started on the PGPUs in the group") + ~allowed_roles:_R_READ_ONLY + () + in + let allocation_algorithm = + Enum ("allocation_algorithm", + [ "breadth_first", "vGPUs of a given type are allocated evenly across supporting pGPUs."; + "depth_first", "vGPUs of a given type are allocated on supporting pGPUs until they are full."] + ) + in + create_obj + ~name:_gpu_group + ~descr:"A group of compatible GPUs across the resource pool" + ~doccomments:[] + ~gen_constructor_destructor:false + ~gen_events:true + ~in_db:true + ~lifecycle:[Published, rel_boston, ""] + ~messages:[ + create; + destroy; + update_enabled_VGPU_types; + update_supported_VGPU_types; + get_remaining_capacity; + ] + ~messages_default_allowed_roles:_R_POOL_OP + ~persist:PersistEverything + ~in_oss_since:None + ~contents:[ + uid _gpu_group ~lifecycle:[Published, rel_boston, ""]; + namespace ~name:"name" ~contents:(names None RW ~lifecycle:[Published, rel_boston, ""]) (); + field ~qualifier:DynamicRO ~ty:(Set (Ref _pgpu)) ~lifecycle:[Published, rel_boston, ""] "PGPUs" "List of pGPUs in the group"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _vgpu)) ~lifecycle:[Published, rel_boston, ""] "VGPUs" "List of vGPUs using the group"; + field ~qualifier:DynamicRO ~ty:(Set String) ~lifecycle:[Published, rel_boston, ""] "GPU_types" "List of GPU types (vendor+device ID) that can be in this group" ~default_value:(Some (VSet [])); + field ~qualifier:RW ~ty:(Map (String,String)) ~lifecycle:[Published, rel_boston, ""] "other_config" "Additional configuration" ~default_value:(Some (VMap [])); + field ~qualifier:RW ~ty:allocation_algorithm ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "allocation_algorithm" "Current allocation of vGPUs to pGPUs for this group" ~default_value:(Some (VEnum "depth_first")); + field ~qualifier:DynamicRO ~ty:(Set (Ref _vgpu_type)) ~lifecycle:[Published, rel_vgpu_productisation, ""] "supported_VGPU_types" "vGPU types supported on at least one of the pGPUs in this group"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _vgpu_type)) ~lifecycle:[Published, rel_vgpu_productisation, ""] "enabled_VGPU_types" "vGPU types supported on at least one of the pGPUs in this group"; + ] + () (** Virtual GPUs (vGPU) *) let vgpu = - let create = call - ~name:"create" - ~lifecycle:[Published, rel_boston, ""] - ~versioned_params:[ - {param_type=(Ref _vm); param_name="VM"; param_doc=""; param_release=boston_release; param_default=None}; - {param_type=(Ref _gpu_group); param_name="GPU_group"; param_doc=""; param_release=boston_release; param_default=None}; - {param_type=String; param_name="device"; param_doc=""; param_release=boston_release; param_default=Some (VString "0")}; - {param_type=(Map (String, String)); param_name="other_config"; param_doc=""; param_release=boston_release; param_default=Some (VMap [])}; - {param_type=(Ref _vgpu_type); param_name="type"; param_doc=""; param_release=vgpu_tech_preview_release; param_default=(Some (VRef (Ref.string_of Ref.null)))}; - ] - ~result:(Ref _vgpu, "reference to the newly created object") - ~allowed_roles:_R_POOL_OP - () - in - let destroy = call - ~name:"destroy" - ~lifecycle:[Published, rel_boston, ""] - ~params:[ - Ref _vgpu, "self", "The vGPU to destroy" - ] - ~allowed_roles:_R_POOL_OP - () - in - let atomic_set_resident_on = call - ~name:"atomic_set_resident_on" - ~lifecycle:[Published, rel_dundee, ""] - ~params:[ - Ref _vgpu, "self", "The vGPU to modify"; - Ref _pgpu, "value", "The pGPU on which the vGPU is running"; - ] - ~allowed_roles:_R_LOCAL_ROOT_ONLY - ~hide_from_docs:true - ~pool_internal:true - () - in - create_obj - ~name:_vgpu - ~descr:"A virtual GPU (vGPU)" - ~doccomments:[] - ~gen_constructor_destructor:false - ~gen_events:true - ~in_db:true - ~lifecycle:[Published, rel_boston, ""] - ~messages:[create; destroy; atomic_set_resident_on] - ~messages_default_allowed_roles:_R_POOL_OP - ~persist:PersistEverything - ~in_oss_since:None - ~contents:[ - uid _vgpu ~lifecycle:[Published, rel_boston, ""]; - field ~qualifier:DynamicRO ~ty:(Ref _vm) ~lifecycle:[Published, rel_boston, ""] "VM" "VM that owns the vGPU"; - field ~qualifier:DynamicRO ~ty:(Ref _gpu_group) ~lifecycle:[Published, rel_boston, ""] "GPU_group" "GPU group used by the vGPU"; - field ~qualifier:DynamicRO ~ty:String ~lifecycle:[Published, rel_boston, ""] ~default_value:(Some (VString "0")) "device" "Order in which the devices are plugged into the VM"; - field ~qualifier:DynamicRO ~ty:Bool ~lifecycle:[Published, rel_boston, ""] ~default_value:(Some (VBool false)) "currently_attached" "Reflects whether the virtual device is currently connected to a physical device"; - field ~qualifier:RW ~ty:(Map (String,String)) ~lifecycle:[Published, rel_boston, ""] "other_config" "Additional configuration" ~default_value:(Some (VMap [])); - field ~qualifier:DynamicRO ~ty:(Ref _vgpu_type) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "type" "Preset type for this VGPU" ~default_value:(Some (VRef (Ref.string_of Ref.null))); - field ~qualifier:DynamicRO ~ty:(Ref _pgpu) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "resident_on" "The PGPU on which this VGPU is running" ~default_value:(Some (VRef (Ref.string_of Ref.null))); - field ~qualifier:DynamicRO ~ty:(Ref _pgpu) ~lifecycle:[Published, rel_dundee, ""] ~internal_only:true "scheduled_to_be_resident_on" "The PGPU on which this VGPU is scheduled to run" ~default_value:(Some (VRef (Ref.string_of Ref.null))); - ] - () + let create = call + ~name:"create" + ~lifecycle:[Published, rel_boston, ""] + ~versioned_params:[ + {param_type=(Ref _vm); param_name="VM"; param_doc=""; param_release=boston_release; param_default=None}; + {param_type=(Ref _gpu_group); param_name="GPU_group"; param_doc=""; param_release=boston_release; param_default=None}; + {param_type=String; param_name="device"; param_doc=""; param_release=boston_release; param_default=Some (VString "0")}; + {param_type=(Map (String, String)); param_name="other_config"; param_doc=""; param_release=boston_release; param_default=Some (VMap [])}; + {param_type=(Ref _vgpu_type); param_name="type"; param_doc=""; param_release=vgpu_tech_preview_release; param_default=(Some (VRef (Ref.string_of Ref.null)))}; + ] + ~result:(Ref _vgpu, "reference to the newly created object") + ~allowed_roles:_R_POOL_OP + () + in + let destroy = call + ~name:"destroy" + ~lifecycle:[Published, rel_boston, ""] + ~params:[ + Ref _vgpu, "self", "The vGPU to destroy" + ] + ~allowed_roles:_R_POOL_OP + () + in + let atomic_set_resident_on = call + ~name:"atomic_set_resident_on" + ~lifecycle:[Published, rel_dundee, ""] + ~params:[ + Ref _vgpu, "self", "The vGPU to modify"; + Ref _pgpu, "value", "The pGPU on which the vGPU is running"; + ] + ~allowed_roles:_R_LOCAL_ROOT_ONLY + ~hide_from_docs:true + ~pool_internal:true + () + in + create_obj + ~name:_vgpu + ~descr:"A virtual GPU (vGPU)" + ~doccomments:[] + ~gen_constructor_destructor:false + ~gen_events:true + ~in_db:true + ~lifecycle:[Published, rel_boston, ""] + ~messages:[create; destroy; atomic_set_resident_on] + ~messages_default_allowed_roles:_R_POOL_OP + ~persist:PersistEverything + ~in_oss_since:None + ~contents:[ + uid _vgpu ~lifecycle:[Published, rel_boston, ""]; + field ~qualifier:DynamicRO ~ty:(Ref _vm) ~lifecycle:[Published, rel_boston, ""] "VM" "VM that owns the vGPU"; + field ~qualifier:DynamicRO ~ty:(Ref _gpu_group) ~lifecycle:[Published, rel_boston, ""] "GPU_group" "GPU group used by the vGPU"; + field ~qualifier:DynamicRO ~ty:String ~lifecycle:[Published, rel_boston, ""] ~default_value:(Some (VString "0")) "device" "Order in which the devices are plugged into the VM"; + field ~qualifier:DynamicRO ~ty:Bool ~lifecycle:[Published, rel_boston, ""] ~default_value:(Some (VBool false)) "currently_attached" "Reflects whether the virtual device is currently connected to a physical device"; + field ~qualifier:RW ~ty:(Map (String,String)) ~lifecycle:[Published, rel_boston, ""] "other_config" "Additional configuration" ~default_value:(Some (VMap [])); + field ~qualifier:DynamicRO ~ty:(Ref _vgpu_type) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "type" "Preset type for this VGPU" ~default_value:(Some (VRef (Ref.string_of Ref.null))); + field ~qualifier:DynamicRO ~ty:(Ref _pgpu) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "resident_on" "The PGPU on which this VGPU is running" ~default_value:(Some (VRef (Ref.string_of Ref.null))); + field ~qualifier:DynamicRO ~ty:(Ref _pgpu) ~lifecycle:[Published, rel_dundee, ""] ~internal_only:true "scheduled_to_be_resident_on" "The PGPU on which this VGPU is scheduled to run" ~default_value:(Some (VRef (Ref.string_of Ref.null))); + ] + () (** Virtual GPU types (i.e. preset sizes) *) let vgpu_type_implementation = - Enum ("vgpu_type_implementation", [ - "passthrough", "Pass through an entire physical GPU to a guest"; - "nvidia", "vGPU using NVIDIA hardware"; - "gvt_g", "vGPU using Intel GVT-g"; - ]) + Enum ("vgpu_type_implementation", [ + "passthrough", "Pass through an entire physical GPU to a guest"; + "nvidia", "vGPU using NVIDIA hardware"; + "gvt_g", "vGPU using Intel GVT-g"; + ]) let vgpu_type = - create_obj - ~name:_vgpu_type - ~descr:"A type of virtual GPU" - ~doccomments:[] - ~gen_constructor_destructor:false - ~gen_events:true - ~in_db:true - ~lifecycle:[Published, rel_vgpu_tech_preview, ""] - ~messages:[] - ~messages_default_allowed_roles:_R_POOL_OP - ~persist:PersistEverything - ~in_oss_since:None - ~contents:[ - uid _vgpu_type ~lifecycle:[Published, rel_vgpu_tech_preview, ""]; - field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_vgpu_tech_preview, ""] ~default_value:(Some (VString "")) "vendor_name" "Name of VGPU vendor"; - field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_vgpu_tech_preview, ""] ~default_value:(Some (VString "")) "model_name" "Model name associated with the VGPU type"; - field ~qualifier:StaticRO ~ty:Int ~lifecycle:[Published, rel_vgpu_tech_preview, ""] ~default_value:(Some (VInt 0L)) "framebuffer_size" "Framebuffer size of the VGPU type, in bytes"; - field ~qualifier:StaticRO ~ty:Int ~lifecycle:[Published, rel_vgpu_tech_preview, ""] ~default_value:(Some (VInt 0L)) "max_heads" "Maximum number of displays supported by the VGPU type"; - field ~qualifier:StaticRO ~ty:Int ~lifecycle:[Published, rel_vgpu_productisation, ""] ~default_value:(Some (VInt 0L)) "max_resolution_x" "Maximum resolution (width) supported by the VGPU type"; - field ~qualifier:StaticRO ~ty:Int ~lifecycle:[Published, rel_vgpu_productisation, ""] ~default_value:(Some (VInt 0L)) "max_resolution_y" "Maximum resolution (height) supported by the VGPU type"; - field ~qualifier:StaticRO ~ty:Int ~lifecycle:[Published, rel_vgpu_tech_preview, ""] ~internal_only:true ~default_value:(Some (VInt 0L)) "size" "Abstract size for tracking PGPU utilisation"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _pgpu)) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "supported_on_PGPUs" "List of PGPUs that support this VGPU type"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _pgpu)) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "enabled_on_PGPUs" "List of PGPUs that have this VGPU type enabled"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _vgpu)) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "VGPUs" "List of VGPUs of this type"; - field ~qualifier:StaticRO ~ty:(Map (String, String)) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] ~default_value:(Some (VMap [])) ~internal_only:true "internal_config" "Extra configuration information for internal use."; - field ~qualifier:DynamicRO ~ty:(Set (Ref _gpu_group)) ~lifecycle:[Published, rel_vgpu_productisation, ""] "supported_on_GPU_groups" "List of GPU groups in which at least one PGPU supports this VGPU type"; - field ~qualifier:DynamicRO ~ty:(Set (Ref _gpu_group)) ~lifecycle:[Published, rel_vgpu_productisation, ""] "enabled_on_GPU_groups" "List of GPU groups in which at least one have this VGPU type enabled"; - field ~qualifier:StaticRO ~ty:vgpu_type_implementation ~lifecycle:[Published, rel_dundee, ""] ~default_value:(Some (VEnum "passthrough")) "implementation" "The internal implementation of this VGPU type"; - field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_dundee, ""] ~default_value:(Some (VString "")) "identifier" "Key used to identify VGPU types and avoid creating duplicates - this field is used internally and not intended for interpretation by API clients"; - field ~qualifier: StaticRO ~ty:Bool ~lifecycle:[Published, rel_dundee, ""] ~default_value:(Some (VBool false)) "experimental" "Indicates whether VGPUs of this type should be considered experimental"; - ] - () + create_obj + ~name:_vgpu_type + ~descr:"A type of virtual GPU" + ~doccomments:[] + ~gen_constructor_destructor:false + ~gen_events:true + ~in_db:true + ~lifecycle:[Published, rel_vgpu_tech_preview, ""] + ~messages:[] + ~messages_default_allowed_roles:_R_POOL_OP + ~persist:PersistEverything + ~in_oss_since:None + ~contents:[ + uid _vgpu_type ~lifecycle:[Published, rel_vgpu_tech_preview, ""]; + field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_vgpu_tech_preview, ""] ~default_value:(Some (VString "")) "vendor_name" "Name of VGPU vendor"; + field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_vgpu_tech_preview, ""] ~default_value:(Some (VString "")) "model_name" "Model name associated with the VGPU type"; + field ~qualifier:StaticRO ~ty:Int ~lifecycle:[Published, rel_vgpu_tech_preview, ""] ~default_value:(Some (VInt 0L)) "framebuffer_size" "Framebuffer size of the VGPU type, in bytes"; + field ~qualifier:StaticRO ~ty:Int ~lifecycle:[Published, rel_vgpu_tech_preview, ""] ~default_value:(Some (VInt 0L)) "max_heads" "Maximum number of displays supported by the VGPU type"; + field ~qualifier:StaticRO ~ty:Int ~lifecycle:[Published, rel_vgpu_productisation, ""] ~default_value:(Some (VInt 0L)) "max_resolution_x" "Maximum resolution (width) supported by the VGPU type"; + field ~qualifier:StaticRO ~ty:Int ~lifecycle:[Published, rel_vgpu_productisation, ""] ~default_value:(Some (VInt 0L)) "max_resolution_y" "Maximum resolution (height) supported by the VGPU type"; + field ~qualifier:StaticRO ~ty:Int ~lifecycle:[Published, rel_vgpu_tech_preview, ""] ~internal_only:true ~default_value:(Some (VInt 0L)) "size" "Abstract size for tracking PGPU utilisation"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _pgpu)) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "supported_on_PGPUs" "List of PGPUs that support this VGPU type"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _pgpu)) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "enabled_on_PGPUs" "List of PGPUs that have this VGPU type enabled"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _vgpu)) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] "VGPUs" "List of VGPUs of this type"; + field ~qualifier:StaticRO ~ty:(Map (String, String)) ~lifecycle:[Published, rel_vgpu_tech_preview, ""] ~default_value:(Some (VMap [])) ~internal_only:true "internal_config" "Extra configuration information for internal use."; + field ~qualifier:DynamicRO ~ty:(Set (Ref _gpu_group)) ~lifecycle:[Published, rel_vgpu_productisation, ""] "supported_on_GPU_groups" "List of GPU groups in which at least one PGPU supports this VGPU type"; + field ~qualifier:DynamicRO ~ty:(Set (Ref _gpu_group)) ~lifecycle:[Published, rel_vgpu_productisation, ""] "enabled_on_GPU_groups" "List of GPU groups in which at least one have this VGPU type enabled"; + field ~qualifier:StaticRO ~ty:vgpu_type_implementation ~lifecycle:[Published, rel_dundee, ""] ~default_value:(Some (VEnum "passthrough")) "implementation" "The internal implementation of this VGPU type"; + field ~qualifier:StaticRO ~ty:String ~lifecycle:[Published, rel_dundee, ""] ~default_value:(Some (VString "")) "identifier" "Key used to identify VGPU types and avoid creating duplicates - this field is used internally and not intended for interpretation by API clients"; + field ~qualifier: StaticRO ~ty:Bool ~lifecycle:[Published, rel_dundee, ""] ~default_value:(Some (VBool false)) "experimental" "Indicates whether VGPUs of this type should be considered experimental"; + ] + () (******************************************************************************************) (** All the objects in the system in order they will appear in documentation: *) let all_system = - [ - session; - auth; - subject; - (role:Datamodel_types.obj); - task; - event; - (* alert; *) - - pool; - pool_patch; - - vm; - vm_metrics; - vm_guest_metrics; - vmpp; - vm_appliance; - dr_task; - host; - host_crashdump; - host_patch; - host_metrics; - hostcpu; - (* network_manager; *) - network; - vif; - vif_metrics; - pif; - pif_metrics; - bond; - vlan; - storage_plugin; - storage_repository; - lvhd; - vdi; - vbd; - vbd_metrics; - pbd; - crashdump; - (* misc *) - vtpm; - console; - (* filesystem; *) - user; - data_source; - blob; - message; - secret; - tunnel; - pci; - pgpu; - gpu_group; - vgpu; - vgpu_type; - ] + [ + session; + auth; + subject; + (role:Datamodel_types.obj); + task; + event; + (* alert; *) + + pool; + pool_patch; + + vm; + vm_metrics; + vm_guest_metrics; + vmpp; + vm_appliance; + dr_task; + host; + host_crashdump; + host_patch; + host_metrics; + hostcpu; + (* network_manager; *) + network; + vif; + vif_metrics; + pif; + pif_metrics; + bond; + vlan; + storage_plugin; + storage_repository; + lvhd; + vdi; + vbd; + vbd_metrics; + pbd; + crashdump; + (* misc *) + vtpm; + console; + (* filesystem; *) + user; + data_source; + blob; + message; + secret; + tunnel; + pci; + pgpu; + gpu_group; + vgpu; + vgpu_type; + ] (** These are the pairs of (object, field) which are bound together in the database schema *) (* If the relation is one-to-many, the "many" nodes (one edge each) must come before the "one" node (many edges) *) @@ -8809,7 +8809,7 @@ let all_relations = (* subtasks hierarchy *) (_task, "subtask_of"), (_task, "subtasks"); (_task, "session"), (_session, "tasks"); - + (_pif, "bond_slave_of"), (_bond, "slaves"); (_bond, "master"), (_pif, "bond_master_of"); (_vlan, "tagged_PIF"), (_pif, "VLAN_slave_of"); @@ -8819,9 +8819,9 @@ let all_relations = (_pbd, "host"), (_host, "PBDs"); (_pbd, "SR"), (_sr, "PBDs"); - (_vbd, "VDI"), (_vdi, "VBDs"); + (_vbd, "VDI"), (_vdi, "VBDs"); (_crashdump, "VDI"), (_vdi, "crash_dumps"); -(* (_vdi, "parent"), (_vdi, "children"); *) + (* (_vdi, "parent"), (_vdi, "children"); *) (_vbd, "VM"), (_vm, "VBDs"); (_crashdump, "VM"), (_vm, "crash_dumps"); @@ -8836,7 +8836,7 @@ let all_relations = (_vdi, "SR"), (_sr, "VDIs"); -(* (_alert, "task"), (_task, "alerts"); *) + (* (_alert, "task"), (_task, "alerts"); *) (_vtpm, "VM"), (_vm, "VTPMs"); (_console, "VM"), (_vm, "consoles"); @@ -8878,7 +8878,7 @@ let all_api = Dm_api.make (all_system, all_relations) (** These are the "emergency" calls that can be performed when a host is in "emergency mode" *) let emergency_calls = [ (pool,pool_slave_reset_master); - (pool,pool_transition_to_master); + (pool,pool_transition_to_master); (pool,pool_ping_slave); (session,slave_local_login); (session,slave_local_login_with_password); @@ -8898,9 +8898,9 @@ let emergency_calls = ] (** Whitelist of calls that will not get forwarded from the slave to master via the unix domain socket *) -let whitelist = [ (session,session_login); - (session,slave_login); - ] @ emergency_calls +let whitelist = [ (session,session_login); + (session,slave_login); + ] @ emergency_calls (* perform consistency checks on api at initialisation time *) let _ = Dm_api.check all_api (List.map (fun (obj,msg) -> obj.name, msg.msg_name) emergency_calls) @@ -8913,46 +8913,46 @@ let no_async_messages_for = [ _session; _event; (* _alert; *) _task; _data_sourc ** through a VM or SR. *) (* Note on the above: it looks like we _do_ have {VBD,VDI}.get_all! *) let expose_get_all_messages_for = [ - _task; - (* _alert; *) - _host; - _host_metrics; - _hostcpu; - _sr; - _vm; - _vm_metrics; - _vm_guest_metrics; - _network; - _vif; - _vif_metrics; - _pif; - _pif_metrics; - _pbd; - _vdi; - _vbd; - _vbd_metrics; - _console; - _crashdump; - _host_crashdump; - _host_patch; - _pool; - _sm; - _pool_patch; - _bond; - _vlan; - _blob; - _subject; - _role; - _secret; - _tunnel; - _vmpp; - _vm_appliance; - _pci; - _pgpu; - _gpu_group; - _vgpu; - _vgpu_type; - _dr_task; + _task; + (* _alert; *) + _host; + _host_metrics; + _hostcpu; + _sr; + _vm; + _vm_metrics; + _vm_guest_metrics; + _network; + _vif; + _vif_metrics; + _pif; + _pif_metrics; + _pbd; + _vdi; + _vbd; + _vbd_metrics; + _console; + _crashdump; + _host_crashdump; + _host_patch; + _pool; + _sm; + _pool_patch; + _bond; + _vlan; + _blob; + _subject; + _role; + _secret; + _tunnel; + _vmpp; + _vm_appliance; + _pci; + _pgpu; + _gpu_group; + _vgpu; + _vgpu_type; + _dr_task; ] let no_task_id_for = [ _task; (* _alert; *) _event ] @@ -8962,10 +8962,10 @@ let current_operations_for = [ _vm; (* _vdi; _host; _sr *) ] (*** HTTP actions ***) type action_arg = (* I'm not using Datamodel_types here because we need varargs *) - String_query_arg of string | - Int64_query_arg of string | - Bool_query_arg of string | - Varargs_query_arg + String_query_arg of string | + Int64_query_arg of string | + Bool_query_arg of string | + Varargs_query_arg type http_meth = Get | Put | Post | Connect | Options let rbac_http_permission_prefix = "http/" @@ -8987,17 +8987,17 @@ let http_actions = [ ("post_services_sm", (Post, Constants.sm_uri, false, [], _R_VM_POWER_ADMIN, [])); ("put_services_sm", (Put, Constants.sm_uri, false, [], _R_VM_POWER_ADMIN, [])); ("put_import", (Put, Constants.import_uri, true, - [Bool_query_arg "restore"; Bool_query_arg "force"; String_query_arg "sr_id"], _R_VM_ADMIN, [])); + [Bool_query_arg "restore"; Bool_query_arg "force"; String_query_arg "sr_id"], _R_VM_ADMIN, [])); ("put_import_metadata", (Put, Constants.import_metadata_uri, true, - [Bool_query_arg "restore"; Bool_query_arg "force"], _R_VM_ADMIN, [])); + [Bool_query_arg "restore"; Bool_query_arg "force"], _R_VM_ADMIN, [])); ("put_import_raw_vdi", (Put, Constants.import_raw_vdi_uri, true, [String_query_arg "vdi"], _R_VM_ADMIN, [])); ("get_export", (Get, Constants.export_uri, true, [String_query_arg "uuid"], _R_VM_ADMIN, [])); ("get_export_metadata", (Get, Constants.export_metadata_uri, true, [String_query_arg "uuid"], _R_VM_ADMIN, [])); ("get_export_raw_vdi", (Get, Constants.export_raw_vdi_uri, true, [String_query_arg "vdi"], _R_VM_ADMIN, [])); - ("connect_console", (Connect, Constants.console_uri, false, [], _R_VM_OP, - [("host_console", _R_POOL_ADMIN)])); (* only _R_POOL_ADMIN can access the host/Dom0 console *) - ("connect_console_ws", (Get, Constants.console_uri, false, [], _R_VM_OP, - [("host_console_ws", _R_POOL_ADMIN)])); (* only _R_POOL_ADMIN can access the host/Dom0 console *) + ("connect_console", (Connect, Constants.console_uri, false, [], _R_VM_OP, + [("host_console", _R_POOL_ADMIN)])); (* only _R_POOL_ADMIN can access the host/Dom0 console *) + ("connect_console_ws", (Get, Constants.console_uri, false, [], _R_VM_OP, + [("host_console_ws", _R_POOL_ADMIN)])); (* only _R_POOL_ADMIN can access the host/Dom0 console *) ("get_root", (Get, "/", false, [], _R_READ_ONLY, [])); ("post_cli", (Post, Constants.cli_uri, false, [], _R_READ_ONLY, [])); ("get_host_backup", (Get, Constants.host_backup_uri, true, [], _R_POOL_ADMIN, [])); @@ -9006,21 +9006,21 @@ let http_actions = [ ("put_pool_patch_upload", (Put, Constants.pool_patch_upload_uri, true, [], _R_POOL_OP, [])); ("get_pool_patch_download", (Get, Constants.pool_patch_download_uri, true, [String_query_arg "uuid"], _R_POOL_OP, [])); ("put_oem_patch_stream", (Put, Constants.oem_patch_stream_uri, true, [], _R_POOL_OP, [])); - ("get_vncsnapshot", (Get, Constants.vncsnapshot_uri, true, [String_query_arg "uuid"], _R_VM_OP, - [("host_console", _R_POOL_ADMIN)])); (* only _R_POOL_ADMIN can snapshot host/Dom0 console *) + ("get_vncsnapshot", (Get, Constants.vncsnapshot_uri, true, [String_query_arg "uuid"], _R_VM_OP, + [("host_console", _R_POOL_ADMIN)])); (* only _R_POOL_ADMIN can snapshot host/Dom0 console *) ("get_pool_xml_db_sync", (Get, Constants.pool_xml_db_sync, true, [], _R_POOL_ADMIN, [])); ("put_pool_xml_db_sync", (Put, Constants.pool_xml_db_sync, false, [], _R_POOL_ADMIN, [])); ("get_config_sync", (Get, Constants.config_sync_uri, false, [], _R_POOL_ADMIN, [])); ("get_vm_connect", (Get, Constants.vm_connect_uri, false, [], _R_POOL_ADMIN, [])); ("put_vm_connect", (Put, Constants.vm_connect_uri, false, [], _R_POOL_ADMIN, [])); ("get_system_status", (Get, Constants.system_status_uri, true, - [String_query_arg "entries"; String_query_arg "output"], _R_POOL_OP, [])); - (Constants.get_vm_rrd, (Get, Constants.get_vm_rrd_uri, true, [String_query_arg "uuid"], _R_READ_ONLY, [])); - (Constants.get_host_rrd, (Get, Constants.get_host_rrd_uri, true, [Bool_query_arg "json"], _R_READ_ONLY, [])); - (Constants.get_sr_rrd, (Get, Constants.get_sr_rrd_uri, true, [String_query_arg "uuid"], _R_READ_ONLY, [])); - (Constants.get_rrd_updates, (Get, Constants.get_rrd_updates_uri, true, - [Int64_query_arg "start"; String_query_arg "cf"; Int64_query_arg "interval"; - Bool_query_arg "host"; String_query_arg "uuid"; Bool_query_arg "json"], _R_READ_ONLY, [])); + [String_query_arg "entries"; String_query_arg "output"], _R_POOL_OP, [])); + (Constants.get_vm_rrd, (Get, Constants.get_vm_rrd_uri, true, [String_query_arg "uuid"], _R_READ_ONLY, [])); + (Constants.get_host_rrd, (Get, Constants.get_host_rrd_uri, true, [Bool_query_arg "json"], _R_READ_ONLY, [])); + (Constants.get_sr_rrd, (Get, Constants.get_sr_rrd_uri, true, [String_query_arg "uuid"], _R_READ_ONLY, [])); + (Constants.get_rrd_updates, (Get, Constants.get_rrd_updates_uri, true, + [Int64_query_arg "start"; String_query_arg "cf"; Int64_query_arg "interval"; + Bool_query_arg "host"; String_query_arg "uuid"; Bool_query_arg "json"], _R_READ_ONLY, [])); (Constants.put_rrd, (Put, Constants.put_rrd_uri, false, [], _R_POOL_ADMIN, [])); ("get_blob", (Get, Constants.blob_uri, false, [], _R_READ_ONLY, [])); ("put_blob", (Put, Constants.blob_uri, true, [String_query_arg "ref"], _R_VM_POWER_ADMIN, [])); @@ -9029,7 +9029,7 @@ let http_actions = [ ("connect_remotecmd", (Connect, Constants.remotecmd_uri, false, [], _R_POOL_ADMIN, [])); ("post_remote_stats", (Post, Constants.remote_stats_uri, false, [], _R_POOL_ADMIN, [])); (* deprecated *) ("get_wlb_report", (Get, Constants.wlb_report_uri, true, - [String_query_arg "report"; Varargs_query_arg], _R_READ_ONLY, [])); + [String_query_arg "report"; Varargs_query_arg], _R_READ_ONLY, [])); ("get_wlb_diagnostics", (Get, Constants.wlb_diagnostics_uri, true, [], _R_READ_ONLY, [])); ("get_audit_log", (Get, Constants.audit_log_uri, true, [], _R_READ_ONLY, [])); @@ -9047,21 +9047,21 @@ let http_actions = [ (* they are meant to be used in exceptional cases where RBAC is already *) (* checked inside them, such as in the XMLRPC (API) calls *) let public_http_actions_with_no_rbac_check = - [ - "post_root"; (* XMLRPC (API) calls -> checks RBAC internally *) - "post_cli"; (* CLI commands -> calls XMLRPC *) - "post_json"; (* JSON -> calls XMLRPC *) - "get_root"; (* Make sure that downloads, personal web pages etc do not go through RBAC asking for a password or session_id *) - (* also, without this line, quicktest_http.ml fails on non_resource_cmd and bad_resource_cmd with a 401 instead of 404 *) - "get_blob"; (* Public blobs don't need authentication *) - "post_root_options"; (* Preflight-requests are not RBAC checked *) - "post_json_options"; - "post_jsonrpc"; - "post_jsonrpc_options"; - ] + [ + "post_root"; (* XMLRPC (API) calls -> checks RBAC internally *) + "post_cli"; (* CLI commands -> calls XMLRPC *) + "post_json"; (* JSON -> calls XMLRPC *) + "get_root"; (* Make sure that downloads, personal web pages etc do not go through RBAC asking for a password or session_id *) + (* also, without this line, quicktest_http.ml fails on non_resource_cmd and bad_resource_cmd with a 401 instead of 404 *) + "get_blob"; (* Public blobs don't need authentication *) + "post_root_options"; (* Preflight-requests are not RBAC checked *) + "post_json_options"; + "post_jsonrpc"; + "post_jsonrpc_options"; + ] (* permissions not associated with any object message or field *) let extra_permissions = [ - (extra_permission_task_destroy_any, _R_POOL_OP); (* only POOL_OP can destroy any tasks *) + (extra_permission_task_destroy_any, _R_POOL_OP); (* only POOL_OP can destroy any tasks *) ] diff --git a/ocaml/idl/datamodel_main.ml b/ocaml/idl/datamodel_main.ml index 3bade0ea657..2b19403d0a1 100644 --- a/ocaml/idl/datamodel_main.ml +++ b/ocaml/idl/datamodel_main.ml @@ -19,66 +19,66 @@ open Dm_api (* right now this file returns all fields if -all set, otherwise it returns fields that have z1 set in release.internal *) -let _ = +let _ = let dot_mode = ref false and latex_mode = ref false and dtd_mode = ref false and closed = ref false (* shows release_closed *) and all = ref false (* shows release_impl as well *) and dirname = ref "" in - - Arg.parse [ "-dot", Arg.Set dot_mode, "output dot graph"; - "-latex", Arg.Set latex_mode, "output latex document"; - "-dtd", Arg.Set dtd_mode, "output XML DTD"; - "-closed", Arg.Set closed, "output all OSS + closed API functions but not including internal ones"; - "-all", Arg.Set all, "output all API functions, including internal ones" - ] - (fun x -> dirname := x) - "compile XenSource API datamodel specification"; - let all_modes = [ !dot_mode; !latex_mode; !dtd_mode ] in - - let num_modes_set = List.length (List.filter (fun x->x) all_modes) in - - if num_modes_set = 0 then failwith "No mode set on the commandline"; - if num_modes_set > 1 then failwith "More than one mode on the commandline"; - let oss_filter api = - filter - (fun _ -> true) (fun field -> List.mem "3.0.3" field.release.opensource) - (fun message -> List.mem "3.0.3" message.msg_release.opensource) - api in - let closed_filter api = - filter - (fun _ -> true) (fun field -> List.mem "closed" field.release.internal) - (fun message -> List.mem "closed" message.msg_release.internal) - api in + Arg.parse [ "-dot", Arg.Set dot_mode, "output dot graph"; + "-latex", Arg.Set latex_mode, "output latex document"; + "-dtd", Arg.Set dtd_mode, "output XML DTD"; + "-closed", Arg.Set closed, "output all OSS + closed API functions but not including internal ones"; + "-all", Arg.Set all, "output all API functions, including internal ones" + ] + (fun x -> dirname := x) + "compile XenSource API datamodel specification"; + let all_modes = [ !dot_mode; !latex_mode; !dtd_mode ] in - let api = match !all, !closed with - | true, _ -> all_api - | _, true -> closed_filter all_api - | _, false -> oss_filter all_api in + let num_modes_set = List.length (List.filter (fun x->x) all_modes) in + if num_modes_set = 0 then failwith "No mode set on the commandline"; + if num_modes_set > 1 then failwith "More than one mode on the commandline"; - (* Add all implicit messages to the API directly *) - let api = DU.add_implicit_messages ~document_order:!latex_mode api in - (* Only show those visible to the client *) - let api = filter (fun _ -> true) (fun field -> true) DU.on_client_side api in - (* And only messages marked as not hidden from the docs, and non-internal fields *) - let api = filter (fun _ -> true) (fun f -> not f.internal_only) (fun m -> not m.msg_hide_from_docs) api in + let oss_filter api = + filter + (fun _ -> true) (fun field -> List.mem "3.0.3" field.release.opensource) + (fun message -> List.mem "3.0.3" message.msg_release.opensource) + api in + let closed_filter api = + filter + (fun _ -> true) (fun field -> List.mem "closed" field.release.internal) + (fun message -> List.mem "closed" message.msg_release.internal) + api in - if !dirname <> "" then Unix.chdir !dirname; - if (!latex_mode) then begin - Latex_backend.all api (!all) - end; + let api = match !all, !closed with + | true, _ -> all_api + | _, true -> closed_filter all_api + | _, false -> oss_filter all_api in - if !dot_mode then begin - List.iter print_endline (Dot_backend.of_objs api) - end; - if !dtd_mode then begin - let api = filter (fun _ -> true) - (fun field -> field.qualifier <> DynamicRO) - (fun _ -> true) - api in - List.iter print_endline (Dtd_backend.of_objs api); - end + (* Add all implicit messages to the API directly *) + let api = DU.add_implicit_messages ~document_order:!latex_mode api in + (* Only show those visible to the client *) + let api = filter (fun _ -> true) (fun field -> true) DU.on_client_side api in + (* And only messages marked as not hidden from the docs, and non-internal fields *) + let api = filter (fun _ -> true) (fun f -> not f.internal_only) (fun m -> not m.msg_hide_from_docs) api in + + if !dirname <> "" then Unix.chdir !dirname; + if (!latex_mode) then begin + Latex_backend.all api (!all) + end; + + if !dot_mode then begin + List.iter print_endline (Dot_backend.of_objs api) + end; + + if !dtd_mode then begin + let api = filter (fun _ -> true) + (fun field -> field.qualifier <> DynamicRO) + (fun _ -> true) + api in + List.iter print_endline (Dtd_backend.of_objs api); + end diff --git a/ocaml/idl/datamodel_schema.ml b/ocaml/idl/datamodel_schema.ml index 681a600dd6e..bbfc824d2a5 100644 --- a/ocaml/idl/datamodel_schema.ml +++ b/ocaml/idl/datamodel_schema.ml @@ -15,85 +15,85 @@ open Schema (* This code could live higher up the stack *) -let of_datamodel () = - let rec flatten_fields fs acc = - match fs with - [] -> acc - | (Datamodel_types.Field f)::fs -> flatten_fields fs (f::acc) - | (Datamodel_types.Namespace (_,internal_fs))::fs -> flatten_fields fs (flatten_fields internal_fs acc) in - let column obj f = - let issetref = match f.Datamodel_types.ty with - | Datamodel_types.Set (Datamodel_types.Ref _) -> true - | _ -> false in - let is_many_to_many f = - let api = Datamodel.all_api in - let this = obj.Datamodel_types.name, f.Datamodel_types.field_name in - Datamodel_utils.Relations.is_in_relation api this && - (Datamodel_utils.Relations.classify api (this,(Datamodel_utils.Relations.other_end_of api this)) = (`Many, `Many)) in - let ty = match f.Datamodel_types.ty with - | Datamodel_types.Set _ -> Type.Set - | Datamodel_types.Map(_,_) -> Type.Pairs - | _ -> Type.String in - { - Column.name = Escaping.escape_id f.Datamodel_types.full_name; - (* NB we always regenerate one-to-many Set(Ref _) fields *) - persistent = f.Datamodel_types.field_persist && (is_many_to_many f || not issetref || f.Datamodel_types.field_ignore_foreign_key); - empty = Datamodel_values.gen_empty_db_val f.Datamodel_types.ty; - (* NB Set(Ref _) fields aren't allowed to have a default value specified so we hardcode one here *) - default = - if issetref - then Some (Value.Set []) - else Stdext.Opt.map Datamodel_values.to_db f.Datamodel_types.default_value ; - ty = ty; - issetref = issetref; - } in +let of_datamodel () = + let rec flatten_fields fs acc = + match fs with + [] -> acc + | (Datamodel_types.Field f)::fs -> flatten_fields fs (f::acc) + | (Datamodel_types.Namespace (_,internal_fs))::fs -> flatten_fields fs (flatten_fields internal_fs acc) in + let column obj f = + let issetref = match f.Datamodel_types.ty with + | Datamodel_types.Set (Datamodel_types.Ref _) -> true + | _ -> false in + let is_many_to_many f = + let api = Datamodel.all_api in + let this = obj.Datamodel_types.name, f.Datamodel_types.field_name in + Datamodel_utils.Relations.is_in_relation api this && + (Datamodel_utils.Relations.classify api (this,(Datamodel_utils.Relations.other_end_of api this)) = (`Many, `Many)) in + let ty = match f.Datamodel_types.ty with + | Datamodel_types.Set _ -> Type.Set + | Datamodel_types.Map(_,_) -> Type.Pairs + | _ -> Type.String in + { + Column.name = Escaping.escape_id f.Datamodel_types.full_name; + (* NB we always regenerate one-to-many Set(Ref _) fields *) + persistent = f.Datamodel_types.field_persist && (is_many_to_many f || not issetref || f.Datamodel_types.field_ignore_foreign_key); + empty = Datamodel_values.gen_empty_db_val f.Datamodel_types.ty; + (* NB Set(Ref _) fields aren't allowed to have a default value specified so we hardcode one here *) + default = + if issetref + then Some (Value.Set []) + else Stdext.Opt.map Datamodel_values.to_db f.Datamodel_types.default_value ; + ty = ty; + issetref = issetref; + } in - (* We store the reference in two places for no good reason still: *) - let _ref = { - Column.name = Db_names.ref; - persistent = true; - empty = Value.String ""; - default = None; - ty = Type.String; - issetref = false; - } in + (* We store the reference in two places for no good reason still: *) + let _ref = { + Column.name = Db_names.ref; + persistent = true; + empty = Value.String ""; + default = None; + ty = Type.String; + issetref = false; + } in - let table obj = { - Table.name = Escaping.escape_obj obj.Datamodel_types.name; - columns = _ref :: (List.map (column obj) (flatten_fields obj.Datamodel_types.contents [])); - persistent = obj.Datamodel_types.persist = Datamodel_types.PersistEverything; - } in - let is_one_to_many x = - match Datamodel_utils.Relations.classify Datamodel.all_api x with - | `One, `Many | `Many, `One -> true - | _ -> false in - let is_many_to_many x = - match Datamodel_utils.Relations.classify Datamodel.all_api x with - | `Many, `Many -> true - | _ -> false in - let add_relation p t (((one_tbl, one_fld), (many_tbl, many_fld)) as r) = - let l = if ForeignMap.mem one_tbl t then ForeignMap.find one_tbl t else [] in - if p r - then ForeignMap.add one_tbl ((one_fld, many_tbl, many_fld) :: l) t - else t in + let table obj = { + Table.name = Escaping.escape_obj obj.Datamodel_types.name; + columns = _ref :: (List.map (column obj) (flatten_fields obj.Datamodel_types.contents [])); + persistent = obj.Datamodel_types.persist = Datamodel_types.PersistEverything; + } in + let is_one_to_many x = + match Datamodel_utils.Relations.classify Datamodel.all_api x with + | `One, `Many | `Many, `One -> true + | _ -> false in + let is_many_to_many x = + match Datamodel_utils.Relations.classify Datamodel.all_api x with + | `Many, `Many -> true + | _ -> false in + let add_relation p t (((one_tbl, one_fld), (many_tbl, many_fld)) as r) = + let l = if ForeignMap.mem one_tbl t then ForeignMap.find one_tbl t else [] in + if p r + then ForeignMap.add one_tbl ((one_fld, many_tbl, many_fld) :: l) t + else t in - let database api = { - Database.tables = List.map table (Dm_api.objects_of_api api) - } in - { - major_vsn = Datamodel.schema_major_vsn; - minor_vsn = Datamodel.schema_minor_vsn; - database = database Datamodel.all_api; - one_to_many = List.fold_left (add_relation is_one_to_many) ForeignMap.empty (Dm_api.relations_of_api Datamodel.all_api); - many_to_many = List.fold_left (add_relation is_many_to_many) ForeignMap.empty (Dm_api.relations_of_api Datamodel.all_api); - } + let database api = { + Database.tables = List.map table (Dm_api.objects_of_api api) + } in + { + major_vsn = Datamodel.schema_major_vsn; + minor_vsn = Datamodel.schema_minor_vsn; + database = database Datamodel.all_api; + one_to_many = List.fold_left (add_relation is_one_to_many) ForeignMap.empty (Dm_api.relations_of_api Datamodel.all_api); + many_to_many = List.fold_left (add_relation is_many_to_many) ForeignMap.empty (Dm_api.relations_of_api Datamodel.all_api); + } (* For now this is a convenience debugging function. Eventually we should separate the datamodel from the database and load the schema from disk. *) let write_schema_to_file filename = - let t = of_datamodel () in - let sexp = Schema.sexp_of_t t in - let oc = open_out filename in - let txt = Sexplib.Sexp.to_string_hum sexp in - output_string oc txt; - close_out oc + let t = of_datamodel () in + let sexp = Schema.sexp_of_t t in + let oc = open_out filename in + let txt = Sexplib.Sexp.to_string_hum sexp in + output_string oc txt; + close_out oc diff --git a/ocaml/idl/datamodel_types.ml b/ocaml/idl/datamodel_types.ml index dc122debe83..c350d18821d 100644 --- a/ocaml/idl/datamodel_types.ml +++ b/ocaml/idl/datamodel_types.ml @@ -28,9 +28,9 @@ open Stdext module Date = struct - include Date - let iso8601_of_rpc rpc = Date.of_string (Rpc.string_of_rpc rpc) - let rpc_of_iso8601 date = Rpc.rpc_of_string (Date.to_string date) + include Date + let iso8601_of_rpc rpc = Date.of_string (Rpc.string_of_rpc rpc) + let rpc_of_iso8601 date = Rpc.rpc_of_string (Date.to_string date) end (* useful constants for product vsn tracking *) @@ -58,28 +58,28 @@ let rel_dundee_plus = "dundee-plus" let rel_ely = "ely" let release_order = - [ rel_rio - ; rel_miami - ; rel_symc - ; rel_orlando - ; rel_orlando_update_1 - ; rel_george - ; rel_midnight_ride - ; rel_cowley - ; rel_boston - ; rel_tampa - ; rel_clearwater - ; rel_vgpu_tech_preview - ; rel_vgpu_productisation - ; rel_clearwater_felton - ; rel_clearwater_whetstone - ; rel_creedence - ; rel_cream - ; rel_indigo - ; rel_dundee - ; rel_dundee_plus - ; rel_ely - ] + [ rel_rio + ; rel_miami + ; rel_symc + ; rel_orlando + ; rel_orlando_update_1 + ; rel_george + ; rel_midnight_ride + ; rel_cowley + ; rel_boston + ; rel_tampa + ; rel_clearwater + ; rel_vgpu_tech_preview + ; rel_vgpu_productisation + ; rel_clearwater_felton + ; rel_clearwater_whetstone + ; rel_creedence + ; rel_cream + ; rel_indigo + ; rel_dundee + ; rel_dundee_plus + ; rel_ely + ] exception Unknown_release of string (* ordering function on releases *) @@ -92,13 +92,13 @@ let release_leq x y = (** Types of object fields. Accessor functions are generated for each field automatically according to its type and qualifiers. *) type ty = - | String | Int | Float | Bool | DateTime - | Enum of string * (string * string) list - | Set of ty - | Map of ty * ty - | Ref of string - | Record of string - with rpc + | String | Int | Float | Bool | DateTime + | Enum of string * (string * string) list + | Set of ty + | Map of ty * ty + | Ref of string + | Record of string +with rpc type api_value = VString of string @@ -111,19 +111,19 @@ type api_value = | VSet of api_value list | VRef of string | VCustom of string * api_value - with rpc - +with rpc + (** Each database field has a qualifier associated with it: * "Static" means the initial value is specified as a parameter in the object constructor. * "Dynamic" means the opposite: its initial value is a predefined default. * "RW" means the value can be updated by XenAPI clients via the autogenerated API. * "RO" means the opposite, but the value can be updated by direct database calls. *) type qualifier = - | RW (** Implicitly static: set in constructor and updatable through API *) - | StaticRO (** Specified in constructor; no autogenerated setter in XenAPI. *) - | DynamicRO (** Initial value is a default; no autogenerated setter in XenAPI. *) - with rpc - + | RW (** Implicitly static: set in constructor and updatable through API *) + | StaticRO (** Specified in constructor; no autogenerated setter in XenAPI. *) + | DynamicRO (** Initial value is a default; no autogenerated setter in XenAPI. *) +with rpc + (** Release keeps track of which versions of opensource/internal products fields and messages are included in *) type release = { opensource: string list; @@ -132,12 +132,12 @@ type release = { } with rpc type lifecycle_change = - | Prototyped - | Published - | Extended - | Changed - | Deprecated - | Removed + | Prototyped + | Published + | Extended + | Changed + | Deprecated + | Removed and lifecycle_transition = lifecycle_change * string * string with rpc @@ -146,17 +146,17 @@ with rpc specified explicitly in the datamodel, or is one of the automatically generated ones. If automatically generated, the tag tells you where it came from: this is needed for the server implementation. *) -type tag = - | FromField of field_op * field - | FromObject of obj_op - | Custom +type tag = + | FromField of field_op * field + | FromObject of obj_op + | Custom and field_op = Getter | Setter | Add | Remove and private_op = GetDBRecord | GetDBAll | Copy and obj_op = Make | Delete | GetByUuid | GetByLabel | GetRecord | GetAll | GetAllRecordsWhere | GetAllRecords - | Private of private_op + | Private of private_op and param = {param_type:ty; param_name:string; param_doc:string; param_release: release; param_default: api_value option} @@ -165,101 +165,101 @@ and doc_tag = VM_lifecycle | Snapshots | Networking | Memory | Windows and forward = Extension of string | HostExtension of string (** Types of RPC messages; in addition to those generated for object fields *) -and message = { - msg_name: string; - msg_params: param list; - msg_result: (ty * string) option; - msg_errors: error list; - msg_doc: string; - msg_async: bool; - msg_session: bool; - msg_secret: bool; (* don't put stuff in logs *) - msg_pool_internal: bool; (* only allow on "pool-login" sessions *) - msg_db_only: bool; (* this is a db_* only message; not exposed through api *) - msg_release: release; - msg_lifecycle: lifecycle_transition list; - msg_has_effect: bool; (* if true it appears in the custom operations *) - msg_force_custom: qualifier option; (* unlike msg_has_effect, msg_force_custom=Some(RO|RW) always forces msg into custom operations, see gen_empty_custom.ml *) - msg_no_current_operations: bool; (* if true it doesnt appear in the current operations *) - msg_tag: tag; - msg_obj_name: string; - msg_custom_marshaller: bool; - msg_hide_from_docs: bool; (* don't list the function in the documentation and do not include it in the SDK *) - msg_allowed_roles: string list option; - msg_map_keys_roles: (string * (string list option)) list; - msg_doc_tags: doc_tag list; - msg_forward_to: forward option; (* proxy the RPC elsewhere *) -} +and message = { + msg_name: string; + msg_params: param list; + msg_result: (ty * string) option; + msg_errors: error list; + msg_doc: string; + msg_async: bool; + msg_session: bool; + msg_secret: bool; (* don't put stuff in logs *) + msg_pool_internal: bool; (* only allow on "pool-login" sessions *) + msg_db_only: bool; (* this is a db_* only message; not exposed through api *) + msg_release: release; + msg_lifecycle: lifecycle_transition list; + msg_has_effect: bool; (* if true it appears in the custom operations *) + msg_force_custom: qualifier option; (* unlike msg_has_effect, msg_force_custom=Some(RO|RW) always forces msg into custom operations, see gen_empty_custom.ml *) + msg_no_current_operations: bool; (* if true it doesnt appear in the current operations *) + msg_tag: tag; + msg_obj_name: string; + msg_custom_marshaller: bool; + msg_hide_from_docs: bool; (* don't list the function in the documentation and do not include it in the SDK *) + msg_allowed_roles: string list option; + msg_map_keys_roles: (string * (string list option)) list; + msg_doc_tags: doc_tag list; + msg_forward_to: forward option; (* proxy the RPC elsewhere *) +} and field = { - release: release; - lifecycle: lifecycle_transition list; - field_persist: bool; - default_value: api_value option; - internal_only: bool; - qualifier: qualifier; - field_name: string; - full_name: string list; - ty: ty; - field_description: string; - field_has_effect: bool; - field_ignore_foreign_key: bool; - field_setter_roles: string list option; - field_getter_roles: string list option; - field_map_keys_roles: (string * (string list option)) list; - field_doc_tags: doc_tag list; -} - -and error = { - err_name: string; - err_params: string list; - err_doc: string; + release: release; + lifecycle: lifecycle_transition list; + field_persist: bool; + default_value: api_value option; + internal_only: bool; + qualifier: qualifier; + field_name: string; + full_name: string list; + ty: ty; + field_description: string; + field_has_effect: bool; + field_ignore_foreign_key: bool; + field_setter_roles: string list option; + field_getter_roles: string list option; + field_map_keys_roles: (string * (string list option)) list; + field_doc_tags: doc_tag list; +} + +and error = { + err_name: string; + err_params: string list; + err_doc: string; } and mess = { - mess_name: string; - mess_doc: string; + mess_name: string; + mess_doc: string; } with rpc let default_message = { - msg_name = ""; - msg_params = []; - msg_result = None; - msg_errors = []; - msg_doc = "This message has no documentation."; - msg_async = true; - msg_session = true; - msg_secret = false; - msg_pool_internal = true; - msg_db_only = false; - msg_release = { - internal=["Never released"]; - opensource=[]; - internal_deprecated_since=None; - }; - msg_lifecycle = []; - msg_has_effect = true; - msg_force_custom = None; - msg_no_current_operations = false; - msg_tag = Custom; - msg_obj_name = ""; - msg_custom_marshaller = false; - msg_hide_from_docs = true; - msg_allowed_roles = None; - msg_map_keys_roles = []; - msg_doc_tags = []; - msg_forward_to = None; + msg_name = ""; + msg_params = []; + msg_result = None; + msg_errors = []; + msg_doc = "This message has no documentation."; + msg_async = true; + msg_session = true; + msg_secret = false; + msg_pool_internal = true; + msg_db_only = false; + msg_release = { + internal=["Never released"]; + opensource=[]; + internal_deprecated_since=None; + }; + msg_lifecycle = []; + msg_has_effect = true; + msg_force_custom = None; + msg_no_current_operations = false; + msg_tag = Custom; + msg_obj_name = ""; + msg_custom_marshaller = false; + msg_hide_from_docs = true; + msg_allowed_roles = None; + msg_map_keys_roles = []; + msg_doc_tags = []; + msg_forward_to = None; } -(** Getters and Setters will be generated for each field, depending on the qualifier. +(** Getters and Setters will be generated for each field, depending on the qualifier. Namespaces allow fields to be grouped together (and this can get reflected in the XML document structure) *) type content = - | Field of field (** An individual field *) - | Namespace of string * content list (** A nice namespace for a group of fields *) - with rpc - + | Field of field (** An individual field *) + | Namespace of string * content list (** A nice namespace for a group of fields *) +with rpc + (* Note: there used be more than 2 persist_options -- that's why it isn't a bool. I figured even though there's only 2 now I may as well leave it as an enumeration type.. *) @@ -269,29 +269,29 @@ type persist_option = PersistNothing | PersistEverything with rpc (** An object (or entity) is represented by one of these: *) type obj = { - name : string; - description : string; - obj_lifecycle: lifecycle_transition list; - contents : content list; - messages : message list; - doccomments : (string * string) list; - msg_lifecycles: ((string * (lifecycle_transition list)) list); - gen_constructor_destructor: bool; - force_custom_actions: qualifier option; (* None,Some(RW),Some(StaticRO) *) - obj_allowed_roles: string list option; (* for construct, destruct and explicit obj msgs*) - obj_implicit_msg_allowed_roles: string list option; (* for all other implicit obj msgs*) - gen_events: bool; - persist: persist_option; - obj_release: release; - in_database: bool; (* If the object is in the database *) - obj_doc_tags: doc_tag list; + name : string; + description : string; + obj_lifecycle: lifecycle_transition list; + contents : content list; + messages : message list; + doccomments : (string * string) list; + msg_lifecycles: ((string * (lifecycle_transition list)) list); + gen_constructor_destructor: bool; + force_custom_actions: qualifier option; (* None,Some(RW),Some(StaticRO) *) + obj_allowed_roles: string list option; (* for construct, destruct and explicit obj msgs*) + obj_implicit_msg_allowed_roles: string list option; (* for all other implicit obj msgs*) + gen_events: bool; + persist: persist_option; + obj_release: release; + in_database: bool; (* If the object is in the database *) + obj_doc_tags: doc_tag list; } with rpc (* val rpc_of_obj : obj -> Rpc.t *) (* let s = Jsonrpc.to_string (rpc_of_obj o) *) (** A relation binds two fields together *) -type relation = (string * string) * (string * string) +type relation = (string * string) * (string * string) (* Check if a value is of a given type *) let rec type_checks v t = @@ -303,12 +303,12 @@ let rec type_checks v t = | VBool _, Bool -> true | VDateTime _, DateTime -> true | VEnum ev, Enum (_,enum_spec) -> - let enum_possibles = List.map fst enum_spec in - List.mem ev enum_possibles + let enum_possibles = List.map fst enum_spec in + List.mem ev enum_possibles | VMap vvl, Map (t1,t2) -> - all_true (List.map (fun (k,v)->type_checks k t1 && type_checks v t2) vvl) + all_true (List.map (fun (k,v)->type_checks k t1 && type_checks v t2) vvl) | VSet vl, Set t -> - all_true (List.map (fun v->type_checks v t) vl) + all_true (List.map (fun v->type_checks v t) vl) | VRef r, Ref _ -> true | VCustom _, _ -> true (* Type checks defered to phase-2 compile time *) | _, _ -> false diff --git a/ocaml/idl/datamodel_utils.ml b/ocaml/idl/datamodel_utils.ml index af836e0f093..ed07f3ad29d 100644 --- a/ocaml/idl/datamodel_utils.ml +++ b/ocaml/idl/datamodel_utils.ml @@ -34,7 +34,7 @@ module Types = struct | Set ty -> fold_left f accu ty | Map(key, value) -> fold_left f (fold_left f accu key) value | _ -> accu - + let rec fold_right f ty accu = let accu = f ty accu in match ty with @@ -55,19 +55,19 @@ module Types = struct | x -> [ x ] (** All types in a list of objects (automatically decomposes) *) - let of_objects system = + let of_objects system = let fields = List.concat (List.map (fun x -> x.contents) system) in let field_types = List.concat (List.map of_content fields) in - + let messages = List.concat (List.map (fun x -> x.messages) system) in let return_types = let aux accu msg = match msg.msg_result with - | None -> accu - | Some(ty, _) -> ty :: accu in + | None -> accu + | Some(ty, _) -> ty :: accu in List.fold_left aux [] messages in let param_types = List.map (fun p -> p.param_type) - (List.concat (List.map (fun x -> x.msg_params) messages)) in + (List.concat (List.map (fun x -> x.msg_params) messages)) in let selves = List.map (fun obj -> Ref(obj.name)) system in let set_self = List.map (fun t -> Set(t)) selves in @@ -84,12 +84,12 @@ module Relations = struct This is only used for computing the class relationships diagram. *) let rec of_types a inb = match inb with | Set x -> if of_types a x = `None then `None else `Many - | Map(x, y) -> if of_types a x = `None && of_types a y = `None + | Map(x, y) -> if of_types a x = `None && of_types a y = `None then `None else `Many | x -> if a = x then `One else `None - let classify api ((a, a_field_name), (b, b_field_name)) = - let a_field = get_field_by_name api ~objname:a ~fieldname:a_field_name + let classify api ((a, a_field_name), (b, b_field_name)) = + let a_field = get_field_by_name api ~objname:a ~fieldname:a_field_name and b_field = get_field_by_name api ~objname:b ~fieldname:b_field_name in of_types (Ref b) a_field.ty, of_types (Ref a) b_field.ty @@ -100,48 +100,48 @@ module Relations = struct | `Many, `Many -> "many-to-many" | _, _ -> "unknown type" - let other_end_of api ((a, b) as one_end) = + let other_end_of api ((a, b) as one_end) = let rels = relations_of_api api in - match (List.concat (List.map (function - | (x, other_end) when x = one_end -> [ other_end ] - | (other_end, x) when x = one_end -> [ other_end ] - | _ -> []) rels)) with + match (List.concat (List.map (function + | (x, other_end) when x = one_end -> [ other_end ] + | (other_end, x) when x = one_end -> [ other_end ] + | _ -> []) rels)) with | [ other_end ] -> other_end | [] -> failwith (Printf.sprintf "Couldn't find other end of relation (%s,%s)" a b) | _ -> failwith ("Found multiple other ends of relation?!") - let is_in_relation api x = - let rels = relations_of_api api in - List.mem_assoc x rels || (List.mem_assoc x (List.map (fun (k, v) -> v, k) rels)) - + let is_in_relation api x = + let rels = relations_of_api api in + List.mem_assoc x rels || (List.mem_assoc x (List.map (fun (k, v) -> v, k) rels)) + end (** Compute a flat list of fields from a datamodel object *) -let fields_of_obj (x: obj) : field list = +let fields_of_obj (x: obj) : field list = let rec of_contents = function | Namespace(_, xs) -> List.concat (List.map of_contents xs) | Field x -> [ x ] in List.concat (List.map of_contents x.contents) (* True if an object has a label (and therefore should have a get_by_name_label message *) -let obj_has_get_by_name_label x = +let obj_has_get_by_name_label x = let all_fields = fields_of_obj x in - List.filter (fun fld -> fld.full_name = [ "name"; "label" ]) all_fields <> [] + List.filter (fun fld -> fld.full_name = [ "name"; "label" ]) all_fields <> [] (* True if an object has tags (and therefore should have a get_tags message *) -let obj_has_get_tags x = +let obj_has_get_tags x = let all_fields = fields_of_obj x in - List.filter (fun fld -> fld.full_name = [ "tags" ]) all_fields <> [] + List.filter (fun fld -> fld.full_name = [ "tags" ]) all_fields <> [] (** XXX: unfortunately we don't mark which parameters of a message refer to the self; return the first parameter of the correct type *) -let find_self_parameter (msg: message) = +let find_self_parameter (msg: message) = match List.filter (fun p -> p.param_type = Ref msg.msg_obj_name) msg.msg_params with | {param_name=x} :: _ -> x | _ -> failwith (Printf.sprintf "Failed to determine self parameter for message %s" msg.msg_name) let plural name = - if String.endswith "metrics" name then + if String.endswith "metrics" name then name ^ " instances" else name ^ "s" @@ -177,7 +177,7 @@ let default_doccomments = "get_all_records", (fun x -> sprintf "Return a map of %s references to %s records for all %s known to the system." - x.name x.name (plural x.name)); + x.name x.name (plural x.name)); "copy", (fun x -> sprintf "returns a reference to an object which is a shallow-copy of the original. NB all Set(Ref _) fields will be empty in the duplicate."); @@ -200,134 +200,134 @@ let get_lifecycle (x : obj) (meth : string) : lifecycle_transition list = * The C bindings set this to get the self variable named after the class, * as opposed to being called "self". It also means that constructor argument * records are called "record" as oppsed to "args". - *) +*) let named_self = ref false -let self_of_obj x = +let self_of_obj x = let self_name = if !named_self then x.name else _self in {param_type=Ref x.name; param_name=self_name; param_doc="reference to the object"; param_release=x.obj_release; param_default=None} (** Compute the list of messages corresponding to a single field *) -let new_messages_of_field x order fld = +let new_messages_of_field x order fld = let self = self_of_obj x in let prefix prefix = prefix ^ (String.concat "_" fld.full_name) in let common = { msg_name = ""; msg_params = []; msg_result = None; msg_errors = []; - msg_doc = "no documentation available"; - msg_async = false; - msg_session = true; - msg_secret = false; - msg_release = fld.release; - msg_lifecycle = fld.lifecycle; - msg_has_effect = fld.field_has_effect; - msg_force_custom = x.force_custom_actions; - msg_no_current_operations = false; - msg_tag = Custom; - msg_obj_name = x.name; - msg_custom_marshaller = false; - msg_hide_from_docs = false; - msg_pool_internal = false; - msg_db_only = fld.internal_only; - msg_allowed_roles = None; - msg_map_keys_roles = []; - msg_doc_tags = []; - msg_forward_to = None - } in + msg_doc = "no documentation available"; + msg_async = false; + msg_session = true; + msg_secret = false; + msg_release = fld.release; + msg_lifecycle = fld.lifecycle; + msg_has_effect = fld.field_has_effect; + msg_force_custom = x.force_custom_actions; + msg_no_current_operations = false; + msg_tag = Custom; + msg_obj_name = x.name; + msg_custom_marshaller = false; + msg_hide_from_docs = false; + msg_pool_internal = false; + msg_db_only = fld.internal_only; + msg_allowed_roles = None; + msg_map_keys_roles = []; + msg_doc_tags = []; + msg_forward_to = None + } in let getter = { common with - msg_name = prefix "get_"; - msg_params = [ self ]; - msg_result = Some (fld.ty, "value of the field"); - msg_errors = []; - msg_doc = (Printf.sprintf - "Get the %s field of the given %s." - (String.concat "/" fld.full_name) x.name); - msg_allowed_roles = fld.field_getter_roles; - msg_tag = FromField(Getter, fld) } in + msg_name = prefix "get_"; + msg_params = [ self ]; + msg_result = Some (fld.ty, "value of the field"); + msg_errors = []; + msg_doc = (Printf.sprintf + "Get the %s field of the given %s." + (String.concat "/" fld.full_name) x.name); + msg_allowed_roles = fld.field_getter_roles; + msg_tag = FromField(Getter, fld) } in let setter = { common with - msg_name = prefix "set_"; - msg_params = [ self; - {param_type=fld.ty; param_name=(if !named_self then fld.field_name else "value"); param_doc="New value to set"; - param_release=fld.release; param_default=None} - ]; - msg_result = None; - msg_errors = []; - msg_doc = (Printf.sprintf - "Set the %s field of the given %s." - (String.concat "/" fld.full_name) x.name); - msg_allowed_roles = fld.field_setter_roles; - msg_tag = FromField(Setter, fld) } in + msg_name = prefix "set_"; + msg_params = [ self; + {param_type=fld.ty; param_name=(if !named_self then fld.field_name else "value"); param_doc="New value to set"; + param_release=fld.release; param_default=None} + ]; + msg_result = None; + msg_errors = []; + msg_doc = (Printf.sprintf + "Set the %s field of the given %s." + (String.concat "/" fld.full_name) x.name); + msg_allowed_roles = fld.field_setter_roles; + msg_tag = FromField(Setter, fld) } in (* Set(Ref _) fields in a many-to-many generate symmetrical add_to, remove_from etc *) let is_many_to_many = - let api = Datamodel.all_api in - let this = x.name, fld.field_name in - Relations.is_in_relation api this && - (Relations.classify api (this,(Relations.other_end_of api this)) = (`Many, `Many)) in + let api = Datamodel.all_api in + let this = x.name, fld.field_name in + Relations.is_in_relation api this && + (Relations.classify api (this,(Relations.other_end_of api this)) = (`Many, `Many)) in match (fld.ty, fld.field_ignore_foreign_key, is_many_to_many) with | Set(Ref _), false, false -> if order = 0 then [getter] else [] - | Set(t), _, _ -> - if order = 0 then [getter] else [ - setter; (* only makes sense to the database *) - { common with - msg_name = prefix "add_"; - msg_params = [ self; - {param_type=t; param_name="value"; param_doc="New value to add"; param_release=fld.release; param_default=None} ]; - msg_result = None; - msg_doc = (sprintf - "Add the given value to the %s field of the given %s. If the value is already in that Set, then do nothing." - (String.concat "/" fld.full_name) x.name); - msg_allowed_roles = fld.field_setter_roles; - msg_tag = FromField(Add, fld) }; - { common with - msg_name = prefix "remove_"; - msg_params = [ self; - {param_type=t; param_name="value"; param_doc="Value to remove"; param_release=fld.release; param_default=None} ]; - msg_result = None; - msg_doc = (sprintf - "Remove the given value from the %s field of the given %s. If the value is not in that Set, then do nothing." - (String.concat "/" fld.full_name) x.name); - msg_allowed_roles = fld.field_setter_roles; - msg_tag = FromField(Remove, fld) }; - ] - | Map(k, v), _, _ -> - if order = 0 then [getter] else [ - setter; (* only makes sense to the database *) - { common with - msg_name = prefix "add_to_"; - msg_params = [ self; - {param_type=k; param_name="key"; param_doc="Key to add"; param_release=fld.release; param_default=None}; - {param_type=v; param_name="value"; param_doc="Value to add"; param_release=fld.release; param_default=None}]; - msg_result = None; - msg_doc = (sprintf - "Add the given key-value pair to the %s field of the given %s." - (String.concat "/" fld.full_name) x.name); - msg_allowed_roles = fld.field_setter_roles; - msg_map_keys_roles = List.map (fun (k,(w))->(k,w)) fld.field_map_keys_roles; - msg_tag = FromField(Add, fld) }; - { common with - msg_name = prefix "remove_from_"; - msg_params = [ self; - {param_type=k; param_name="key"; param_doc="Key to remove"; param_release=fld.release; param_default=None} ]; - msg_result = None; - msg_doc = (sprintf - "Remove the given key and its corresponding value from the %s field of the given %s. If the key is not in that Map, then do nothing." - (String.concat "/" fld.full_name) x.name); - msg_allowed_roles = fld.field_setter_roles; - msg_map_keys_roles = List.map (fun (k,(w))->(k,w)) fld.field_map_keys_roles; - msg_tag = FromField(Remove, fld) }; - ] + | Set(t), _, _ -> + if order = 0 then [getter] else [ + setter; (* only makes sense to the database *) + { common with + msg_name = prefix "add_"; + msg_params = [ self; + {param_type=t; param_name="value"; param_doc="New value to add"; param_release=fld.release; param_default=None} ]; + msg_result = None; + msg_doc = (sprintf + "Add the given value to the %s field of the given %s. If the value is already in that Set, then do nothing." + (String.concat "/" fld.full_name) x.name); + msg_allowed_roles = fld.field_setter_roles; + msg_tag = FromField(Add, fld) }; + { common with + msg_name = prefix "remove_"; + msg_params = [ self; + {param_type=t; param_name="value"; param_doc="Value to remove"; param_release=fld.release; param_default=None} ]; + msg_result = None; + msg_doc = (sprintf + "Remove the given value from the %s field of the given %s. If the value is not in that Set, then do nothing." + (String.concat "/" fld.full_name) x.name); + msg_allowed_roles = fld.field_setter_roles; + msg_tag = FromField(Remove, fld) }; + ] + | Map(k, v), _, _ -> + if order = 0 then [getter] else [ + setter; (* only makes sense to the database *) + { common with + msg_name = prefix "add_to_"; + msg_params = [ self; + {param_type=k; param_name="key"; param_doc="Key to add"; param_release=fld.release; param_default=None}; + {param_type=v; param_name="value"; param_doc="Value to add"; param_release=fld.release; param_default=None}]; + msg_result = None; + msg_doc = (sprintf + "Add the given key-value pair to the %s field of the given %s." + (String.concat "/" fld.full_name) x.name); + msg_allowed_roles = fld.field_setter_roles; + msg_map_keys_roles = List.map (fun (k,(w))->(k,w)) fld.field_map_keys_roles; + msg_tag = FromField(Add, fld) }; + { common with + msg_name = prefix "remove_from_"; + msg_params = [ self; + {param_type=k; param_name="key"; param_doc="Key to remove"; param_release=fld.release; param_default=None} ]; + msg_result = None; + msg_doc = (sprintf + "Remove the given key and its corresponding value from the %s field of the given %s. If the key is not in that Map, then do nothing." + (String.concat "/" fld.full_name) x.name); + msg_allowed_roles = fld.field_setter_roles; + msg_map_keys_roles = List.map (fun (k,(w))->(k,w)) fld.field_map_keys_roles; + msg_tag = FromField(Remove, fld) }; + ] | t, _, _ -> [ if order = 0 then getter else setter - ] + ] -let all_new_messages_of_field obj fld = +let all_new_messages_of_field obj fld = new_messages_of_field obj 0 fld @ (new_messages_of_field obj 1 fld) (** Compute a list of all messages associated with an object including the implicit ones. NB this list requires filtering before being used for (eg) a client *) -let messages_of_obj (x: obj) document_order : message list = +let messages_of_obj (x: obj) document_order : message list = let all_fields = fields_of_obj x in let self = self_of_obj x in (* Generate appropriate get/set/add/remove messages for each field. @@ -335,143 +335,143 @@ let messages_of_obj (x: obj) document_order : message list = needs to be filtered before getting to the client *) (* Dummy message *) - let common = { msg_secret=false; msg_name=""; msg_params=[]; msg_result=None; msg_errors = []; msg_doc=""; - msg_async=false; msg_custom_marshaller = false; msg_db_only = false; - msg_no_current_operations = false; - msg_hide_from_docs = false; msg_pool_internal = false; - msg_session=false; msg_release=x.obj_release; msg_lifecycle=x.obj_lifecycle; msg_has_effect=false; msg_tag=Custom; - msg_force_custom = x.force_custom_actions; - msg_allowed_roles = None; - msg_map_keys_roles = []; - msg_obj_name=x.name; - msg_doc_tags = []; - msg_forward_to = None; - } in + let common = { msg_secret=false; msg_name=""; msg_params=[]; msg_result=None; msg_errors = []; msg_doc=""; + msg_async=false; msg_custom_marshaller = false; msg_db_only = false; + msg_no_current_operations = false; + msg_hide_from_docs = false; msg_pool_internal = false; + msg_session=false; msg_release=x.obj_release; msg_lifecycle=x.obj_lifecycle; msg_has_effect=false; msg_tag=Custom; + msg_force_custom = x.force_custom_actions; + msg_allowed_roles = None; + msg_map_keys_roles = []; + msg_obj_name=x.name; + msg_doc_tags = []; + msg_forward_to = None; + } in (* Constructor *) let ctor = let name = "create" in { common with - msg_name = name; - msg_params = [ {param_type=Record x.name; - param_name=(if !named_self then "record" else "args"); - param_doc="All constructor arguments"; - param_release=x.obj_release; param_default = None - }]; - msg_result = Some (Ref x.name, "reference to the newly created object"); - msg_doc = doccomment x name; - msg_lifecycle = get_lifecycle x name; - msg_async = true; - msg_session = true; - msg_has_effect = true; - msg_allowed_roles = x.obj_allowed_roles; - msg_tag = FromObject Make } in + msg_name = name; + msg_params = [ {param_type=Record x.name; + param_name=(if !named_self then "record" else "args"); + param_doc="All constructor arguments"; + param_release=x.obj_release; param_default = None + }]; + msg_result = Some (Ref x.name, "reference to the newly created object"); + msg_doc = doccomment x name; + msg_lifecycle = get_lifecycle x name; + msg_async = true; + msg_session = true; + msg_has_effect = true; + msg_allowed_roles = x.obj_allowed_roles; + msg_tag = FromObject Make } in (* Destructor *) let dtor = let name = "destroy" in { common with - msg_name = name; - msg_params = [ self ]; - msg_result = None; - msg_doc = doccomment x name; - msg_lifecycle = get_lifecycle x name; - msg_async = true; - msg_session = true; - msg_has_effect = true; - msg_allowed_roles = x.obj_allowed_roles; - msg_tag = FromObject Delete } in + msg_name = name; + msg_params = [ self ]; + msg_result = None; + msg_doc = doccomment x name; + msg_lifecycle = get_lifecycle x name; + msg_async = true; + msg_session = true; + msg_has_effect = true; + msg_allowed_roles = x.obj_allowed_roles; + msg_tag = FromObject Delete } in (* Get by UUID *) let uuid = let name = "get_by_uuid" in { common with - msg_name = name; - msg_params = [ {param_type=String; param_name="uuid"; param_doc="UUID of object to return"; param_release=x.obj_release; param_default = None} ]; - msg_result = Some (Ref x.name, "reference to the object"); - msg_doc = doccomment x name; - msg_lifecycle = get_lifecycle x name; - msg_async = false; - msg_session = true; - msg_has_effect = false; - msg_allowed_roles = x.obj_implicit_msg_allowed_roles; - msg_tag = FromObject GetByUuid } in + msg_name = name; + msg_params = [ {param_type=String; param_name="uuid"; param_doc="UUID of object to return"; param_release=x.obj_release; param_default = None} ]; + msg_result = Some (Ref x.name, "reference to the object"); + msg_doc = doccomment x name; + msg_lifecycle = get_lifecycle x name; + msg_async = false; + msg_session = true; + msg_has_effect = false; + msg_allowed_roles = x.obj_implicit_msg_allowed_roles; + msg_tag = FromObject GetByUuid } in (* Get by label *) let get_by_name_label = let name = "get_by_name_label" in { common with - msg_name = name; - msg_params = [ {param_type=String; param_name="label"; param_doc="label of object to return"; param_release=x.obj_release; param_default = None} ]; - msg_result = Some (Set(Ref x.name), "references to objects with matching names"); - msg_doc = doccomment x name; - msg_lifecycle = get_lifecycle x name; - msg_async = false; - msg_session = true; - msg_has_effect = false; - msg_allowed_roles = x.obj_implicit_msg_allowed_roles; - msg_tag = FromObject GetByLabel } in + msg_name = name; + msg_params = [ {param_type=String; param_name="label"; param_doc="label of object to return"; param_release=x.obj_release; param_default = None} ]; + msg_result = Some (Set(Ref x.name), "references to objects with matching names"); + msg_doc = doccomment x name; + msg_lifecycle = get_lifecycle x name; + msg_async = false; + msg_session = true; + msg_has_effect = false; + msg_allowed_roles = x.obj_implicit_msg_allowed_roles; + msg_tag = FromObject GetByLabel } in (* Get Record *) let get_record = let name = "get_record" in { common with - msg_name = name; - msg_params = [ self ]; - msg_result = Some (Record x.name, "all fields from the object"); - msg_doc = doccomment x name; - msg_lifecycle = get_lifecycle x name; - msg_async = false; - msg_session = true; - msg_has_effect = false; - msg_allowed_roles = x.obj_implicit_msg_allowed_roles; - msg_tag = FromObject GetRecord } in + msg_name = name; + msg_params = [ self ]; + msg_result = Some (Record x.name, "all fields from the object"); + msg_doc = doccomment x name; + msg_lifecycle = get_lifecycle x name; + msg_async = false; + msg_session = true; + msg_has_effect = false; + msg_allowed_roles = x.obj_implicit_msg_allowed_roles; + msg_tag = FromObject GetRecord } in (* Get Record (private db version) *) let get_record_internal = let name = "get_record_internal" in { common with - msg_name = name; - msg_params = [ self ]; - msg_result = Some (Record x.name, "all fields from the object, including implementation-only ones"); - msg_doc = doccomment x name; - msg_lifecycle = get_lifecycle x name; - msg_async = false; - msg_session = true; - msg_db_only = true; - msg_release = {opensource=[]; internal=[]; internal_deprecated_since=None}; (* internal messages not in an any API releases... *) - msg_has_effect = false; - msg_tag = FromObject (Private GetDBRecord); - msg_hide_from_docs = true; - } in + msg_name = name; + msg_params = [ self ]; + msg_result = Some (Record x.name, "all fields from the object, including implementation-only ones"); + msg_doc = doccomment x name; + msg_lifecycle = get_lifecycle x name; + msg_async = false; + msg_session = true; + msg_db_only = true; + msg_release = {opensource=[]; internal=[]; internal_deprecated_since=None}; (* internal messages not in an any API releases... *) + msg_has_effect = false; + msg_tag = FromObject (Private GetDBRecord); + msg_hide_from_docs = true; + } in (* Internal database-only get_all function *) let get_all = let name = "get_all" in { common with - msg_name = name; - msg_params = []; - msg_result = Some(Set(Ref x.name), "references to all objects"); - msg_doc = doccomment x name; - msg_lifecycle = get_lifecycle x name; - msg_async = false; - msg_session = true; (* but irrelevant because currently not exposed *) - msg_release = {opensource=[]; internal=[]; internal_deprecated_since=None}; - msg_db_only = true; - msg_has_effect = false; - msg_tag = FromObject (Private GetDBAll); - msg_hide_from_docs = true } in + msg_name = name; + msg_params = []; + msg_result = Some(Set(Ref x.name), "references to all objects"); + msg_doc = doccomment x name; + msg_lifecycle = get_lifecycle x name; + msg_async = false; + msg_session = true; (* but irrelevant because currently not exposed *) + msg_release = {opensource=[]; internal=[]; internal_deprecated_since=None}; + msg_db_only = true; + msg_has_effect = false; + msg_tag = FromObject (Private GetDBAll); + msg_hide_from_docs = true } in (* Optional public version *) let get_all_public = { get_all with msg_release = x.obj_release; msg_tag = FromObject GetAll; msg_hide_from_docs = false; msg_db_only = false; - msg_allowed_roles = x.obj_implicit_msg_allowed_roles; - } in + msg_allowed_roles = x.obj_implicit_msg_allowed_roles; + } in (* And the 'get_all_records_where' semi-public function *) - let get_all_records_where = { get_all_public with - msg_name = "get_all_records_where"; - msg_tag = FromObject GetAllRecordsWhere; - msg_params = [ {param_type=String; param_name="expr"; param_doc="expression representing records to fetch"; - param_release=x.obj_release; param_default = None} - ]; - msg_result = Some(Map(Ref x.name, Record x.name), "records of all matching objects"); - msg_release = {opensource=[]; internal=x.obj_release.internal; internal_deprecated_since=None}; - msg_allowed_roles = x.obj_implicit_msg_allowed_roles; - msg_hide_from_docs = true; - } in + let get_all_records_where = { get_all_public with + msg_name = "get_all_records_where"; + msg_tag = FromObject GetAllRecordsWhere; + msg_params = [ {param_type=String; param_name="expr"; param_doc="expression representing records to fetch"; + param_release=x.obj_release; param_default = None} + ]; + msg_result = Some(Map(Ref x.name, Record x.name), "records of all matching objects"); + msg_release = {opensource=[]; internal=x.obj_release.internal; internal_deprecated_since=None}; + msg_allowed_roles = x.obj_implicit_msg_allowed_roles; + msg_hide_from_docs = true; + } in (* And the 'get_all_records' public function *) - let get_all_records = let name = "get_all_records" in { get_all_public with - msg_name = name; - msg_tag = FromObject GetAllRecords; - msg_params = [ ]; - msg_result = Some(Map(Ref x.name, Record x.name), "records of all objects"); - msg_release = {opensource=[]; internal=x.obj_release.internal; internal_deprecated_since=None}; - msg_allowed_roles = x.obj_implicit_msg_allowed_roles; - msg_doc = doccomment x name; - msg_lifecycle = get_lifecycle x name - } in + let get_all_records = let name = "get_all_records" in { get_all_public with + msg_name = name; + msg_tag = FromObject GetAllRecords; + msg_params = [ ]; + msg_result = Some(Map(Ref x.name, Record x.name), "records of all objects"); + msg_release = {opensource=[]; internal=x.obj_release.internal; internal_deprecated_since=None}; + msg_allowed_roles = x.obj_implicit_msg_allowed_roles; + msg_doc = doccomment x name; + msg_lifecycle = get_lifecycle x name + } in let name_label = if obj_has_get_by_name_label x then [ get_by_name_label ] else [ ] in let get_all_public = if List.mem x.name expose_get_all_messages_for then [ get_all_public; get_all_records_where; get_all_records ] else [] in @@ -481,50 +481,50 @@ let messages_of_obj (x: obj) document_order : message list = (* Fill in the obj_name field on all messages *) let messages = List.map (fun y -> { y with msg_obj_name = x.name }) x.messages in - if not x.in_database then - messages (* @ [ get_all; get_record; get_record_internal ]*) - else if document_order then - messages @ - get_all_public @ - [ get_all ] @ - List.concat (List.map (all_new_messages_of_field x) all_fields) @ - constructor_destructor @ - [ uuid; get_record ] @ - name_label @ - [ get_record_internal ] - else - [ get_record; get_record_internal; get_all; uuid] @ - constructor_destructor @ - name_label @ - List.concat (List.map (new_messages_of_field x 0) all_fields) @ - List.concat (List.map (new_messages_of_field x 1) all_fields) @ - messages @ - get_all_public - -let add_implicit_messages ?(document_order = false) (api: api) = + if not x.in_database then + messages (* @ [ get_all; get_record; get_record_internal ]*) + else if document_order then + messages @ + get_all_public @ + [ get_all ] @ + List.concat (List.map (all_new_messages_of_field x) all_fields) @ + constructor_destructor @ + [ uuid; get_record ] @ + name_label @ + [ get_record_internal ] + else + [ get_record; get_record_internal; get_all; uuid] @ + constructor_destructor @ + name_label @ + List.concat (List.map (new_messages_of_field x 0) all_fields) @ + List.concat (List.map (new_messages_of_field x 1) all_fields) @ + messages @ + get_all_public + +let add_implicit_messages ?(document_order = false) (api: api) = let objs = objects_of_api api and rels = relations_of_api api in - let objs = List.map (fun obj -> - (* list of all messages, existing plus implicit *) - let messages = messages_of_obj obj document_order in - let obj' = { obj with messages = messages } in - obj') objs in + let objs = List.map (fun obj -> + (* list of all messages, existing plus implicit *) + let messages = messages_of_obj obj document_order in + let obj' = { obj with messages = messages } in + obj') objs in Dm_api.make (objs, rels) - + (* Message filter which selects only those message visible to the client *) let on_client_side (x: message) : bool = match x with - (* Anything that's msg_db_only is not on client-side *) + (* Anything that's msg_db_only is not on client-side *) | { msg_db_only = true } -> false - (* Client cannot modify (set/add/remove) a non-RW field *) + (* Client cannot modify (set/add/remove) a non-RW field *) | { msg_tag = FromField((Setter|Add|Remove), { qualifier = RW }) } -> true | { msg_tag = FromField((Setter|Add|Remove), _) } -> false - (* If an object is tagged with custom ctor/dtor, omit the default one *) - | { msg_tag = FromObject(Make|Delete) } -> - let obj = Dm_api.get_obj_by_name Datamodel.all_api ~objname:x.msg_obj_name in - obj.gen_constructor_destructor + (* If an object is tagged with custom ctor/dtor, omit the default one *) + | { msg_tag = FromObject(Make|Delete) } -> + let obj = Dm_api.get_obj_by_name Datamodel.all_api ~objname:x.msg_obj_name in + obj.gen_constructor_destructor | { msg_obj_name = "event" } -> - x.msg_name <> "get_record" + x.msg_name <> "get_record" | _ -> true let wire_name_common sep ~sync (obj: obj) (msg: message) = @@ -539,25 +539,25 @@ let alternative_wire_name = wire_name_common "_" let wire_name_of_field (fld: field) = String.concat "_" fld.full_name let string_of_doc_tag = function - | VM_lifecycle -> "vm-lifecycle" - | Snapshots -> "snapshots" - | Networking -> "networking" - | Memory -> "memory" - | Windows -> "windows-vm" + | VM_lifecycle -> "vm-lifecycle" + | Snapshots -> "snapshots" + | Networking -> "networking" + | Memory -> "memory" + | Windows -> "windows-vm" let string_of_lifecycle_transition = function - | Prototyped -> "prototyped" - | Published -> "published" - | Extended -> "extended" - | Changed -> "changed" - | Deprecated -> "deprecated" - | Removed -> "removed" + | Prototyped -> "prototyped" + | Published -> "published" + | Extended -> "extended" + | Changed -> "changed" + | Deprecated -> "deprecated" + | Removed -> "removed" (* Check whether the last transition in an API message's lifecycle is Removed. * This allows us to remove API calls and re-add them, and fully list the * corresponding lifecycle changes. *) let rec has_been_removed = function - | [] -> false - | (Removed, _, _) :: [] -> true - | _ :: other_transitions -> has_been_removed other_transitions + | [] -> false + | (Removed, _, _) :: [] -> true + | _ :: other_transitions -> has_been_removed other_transitions diff --git a/ocaml/idl/datamodel_values.ml b/ocaml/idl/datamodel_values.ml index fa03457aa3c..c5679426361 100644 --- a/ocaml/idl/datamodel_values.ml +++ b/ocaml/idl/datamodel_values.ml @@ -21,8 +21,8 @@ let to_string v = | VInt i -> Int64.to_string i | VFloat f -> string_of_float f | VEnum e -> e - | _ -> raise Map_key_that_cannot_be_represented_as_string - + | _ -> raise Map_key_that_cannot_be_represented_as_string + let rec to_rpc v = match v with VString s -> Rpc.String s @@ -46,25 +46,25 @@ let rec to_xml v = | VEnum e -> XMLRPC.To.string e | VMap vvl -> XMLRPC.To.structure (List.map (fun (v1,v2)-> to_string v1, to_xml v2) vvl) | VSet vl -> XMLRPC.To.array (List.map (fun v->to_xml v) vl) - | VRef r -> XMLRPC.To.string r + | VRef r -> XMLRPC.To.string r | VCustom (_,y) -> to_xml y open Printf let to_ocaml_string v = - let rec aux = function - | Rpc.Null -> "Rpc.Null" - | Rpc.String s -> sprintf "Rpc.String \"%s\"" s - | Rpc.Int i -> sprintf "Rpc.Int %LdL" i - | Rpc.Int32 i -> sprintf "Rpc.Int32 %ldl" i - | Rpc.Float f -> sprintf "Rpc.Float %f" f - | Rpc.Bool b -> sprintf "Rpc.Bool %b" b - | Rpc.Dict d -> sprintf "Rpc.Dict [%s]" (String.concat ";" (List.map (fun (n,v) -> sprintf "(\"%s\",%s)" n (aux v)) d)) - | Rpc.Enum l -> sprintf "Rpc.Enum [%s]" (String.concat ";" (List.map aux l)) - | Rpc.DateTime t -> sprintf "Rpc.DateTime %s" t in - match v with - | VCustom (x,_) -> x - | _ -> aux (to_rpc v) + let rec aux = function + | Rpc.Null -> "Rpc.Null" + | Rpc.String s -> sprintf "Rpc.String \"%s\"" s + | Rpc.Int i -> sprintf "Rpc.Int %LdL" i + | Rpc.Int32 i -> sprintf "Rpc.Int32 %ldl" i + | Rpc.Float f -> sprintf "Rpc.Float %f" f + | Rpc.Bool b -> sprintf "Rpc.Bool %b" b + | Rpc.Dict d -> sprintf "Rpc.Dict [%s]" (String.concat ";" (List.map (fun (n,v) -> sprintf "(\"%s\",%s)" n (aux v)) d)) + | Rpc.Enum l -> sprintf "Rpc.Enum [%s]" (String.concat ";" (List.map aux l)) + | Rpc.DateTime t -> sprintf "Rpc.DateTime %s" t in + match v with + | VCustom (x,_) -> x + | _ -> aux (to_rpc v) let rec to_db v = let open Schema.Value in diff --git a/ocaml/idl/dm_api.ml b/ocaml/idl/dm_api.ml index 8012b324bda..beadc1bd8b5 100644 --- a/ocaml/idl/dm_api.ml +++ b/ocaml/idl/dm_api.ml @@ -14,7 +14,7 @@ open Datamodel_types (** The api is made up of objects (which contain fields and additional RPCs), and - relationships, which specify which fields are bound together -- i.e. refer to the + relationships, which specify which fields are bound together -- i.e. refer to the same underlying data *) type api = (obj list) * (relation list) @@ -22,20 +22,20 @@ type api = (obj list) * (relation list) let objects_of_api (objs, _) = objs let relations_of_api (_, rels) = rels -let print_api_stats (system, relations) = - Printf.printf "%d objects and %d relations\n" +let print_api_stats (system, relations) = + Printf.printf "%d objects and %d relations\n" (List.length system) (List.length relations); - Printf.printf "objects = [ %s ]\n" + Printf.printf "objects = [ %s ]\n" (String.concat "; " (List.map (fun x -> x.name) system)) -let get_obj_by_name (system, relations) ~objname:name = +let get_obj_by_name (system, relations) ~objname:name = match List.filter (fun obj -> obj.name = name) system with | [ obj ] -> obj - | _::_ -> - failwith (Printf.sprintf "Multiple instances of name [%s] found in system" name) + | _::_ -> + failwith (Printf.sprintf "Multiple instances of name [%s] found in system" name) | [] -> failwith (Printf.sprintf "Object with name [%s] not found in system" name) -let obj_exists api name = +let obj_exists api name = try let (_: obj) = get_obj_by_name api ~objname:name in true @@ -45,19 +45,19 @@ let obj_exists api name = let get_field_by_name api ~objname:objname ~fieldname:name = let obj = get_obj_by_name api ~objname in let rec contents = function - | Field field :: rest when field.field_name = name -> Some field - | Namespace(_, sub) :: rest -> + | Field field :: rest when field.field_name = name -> Some field + | Namespace(_, sub) :: rest -> let result = contents sub in if result = None then contents rest else result - | _ :: rest -> contents rest - | [] -> None in + | _ :: rest -> contents rest + | [] -> None in match (contents obj.contents) with | Some x -> x - | _ -> - failwith (Printf.sprintf "field not found (field %s in object %s)" name obj.name) + | _ -> + failwith (Printf.sprintf "field not found (field %s in object %s)" name obj.name) -let field_exists api ~objname ~fieldname = - try +let field_exists api ~objname ~fieldname = + try let (_: field) = get_field_by_name api ~objname ~fieldname in true with e -> false @@ -79,19 +79,19 @@ let filter_field (pred: field -> bool) (system: obj list) = | Namespace(_, []) -> [ ] (* no children so removed *) | Namespace(name, contents) -> [ Namespace(name, concat_map remove_leaf contents) ] in let rec fixpoint f x = let result = f x in if result = x then x else fixpoint f result in - let obj x = { x with contents = - let contents = concat_map content x.contents in - fixpoint (concat_map remove_leaf) contents } in - List.map obj system + let obj x = { x with contents = + let contents = concat_map content x.contents in + fixpoint (concat_map remove_leaf) contents } in + List.map obj system (** Takes a predicate and a list of objects, returning the objects with only the messages for which (predicate message) returned true. *) -let filter_messages (pred: message -> bool) (system: obj list) = +let filter_messages (pred: message -> bool) (system: obj list) = let obj x = { x with messages = List.filter pred x.messages } in - List.map obj system + List.map obj system (** Transforms all the fields in an API *) -let map_field (f: field -> field) (system: obj list) = +let map_field (f: field -> field) (system: obj list) = let rec content = function | Field x -> Field (f x) | Namespace(name, contents) -> Namespace(name, List.map content contents) in @@ -100,19 +100,19 @@ let map_field (f: field -> field) (system: obj list) = (** Removes all those relations which refer to non-existent objects or fields *) let filter_relations ((system,relations) as api)= List.filter (function ((a_obj, a_name), (b_obj, b_name)) -> - (obj_exists api a_obj) && - (obj_exists api b_obj) && - (field_exists api ~objname:a_obj ~fieldname:a_name) && - (field_exists api ~objname:b_obj ~fieldname:b_name)) relations + (obj_exists api a_obj) && + (obj_exists api b_obj) && + (field_exists api ~objname:a_obj ~fieldname:a_name) && + (field_exists api ~objname:b_obj ~fieldname:b_name)) relations -let rebuild system relations = +let rebuild system relations = (* remove all relations which refer to non-existent objects or fields *) let relations = filter_relations (system, relations) in let api = system, relations in api -let filter (obj: obj -> bool) (field: field -> bool) (message: message -> bool) - ((system, relations) : api) : api = +let filter (obj: obj -> bool) (field: field -> bool) (message: message -> bool) + ((system, relations) : api) : api = let system = List.filter obj system in let system = filter_field field system in let system = filter_messages message system in @@ -121,11 +121,11 @@ let filter (obj: obj -> bool) (field: field -> bool) (message: message -> bool) rebuild system relations let map (field: field -> field) (message: message -> message) - ((system, relations) : api) : api = + ((system, relations) : api) : api = let system = map_field field system in let system = List.map (fun obj -> { obj with messages = List.map message obj.messages }) system in rebuild system relations - + (* @@ -137,8 +137,8 @@ let make api = (api:api) let check api emergency_calls = let truefn _ = true in let api' = filter truefn truefn truefn api in - if api <> api' - then begin + if api <> api' + then begin print_endline "original:"; print_api_stats api; print_endline "filtered:"; @@ -148,79 +148,79 @@ let check api emergency_calls = let system,relations = api' in (* Sanity check 1: all the objects in the relations should exist in the system *) List.iter (fun ((a_obj, _), (b_obj, _)) -> - ignore (get_obj_by_name api ~objname:a_obj); - ignore (get_obj_by_name api ~objname:b_obj)) relations; - + ignore (get_obj_by_name api ~objname:a_obj); + ignore (get_obj_by_name api ~objname:b_obj)) relations; + (* Sanity check 2: all fields mentioned in the relations should exist *) List.iter (fun ((a_obj, a_name), (b_obj, b_name)) -> - ignore (get_field_by_name api ~objname:a_obj ~fieldname:a_name); - ignore (get_field_by_name api ~objname:b_obj ~fieldname:b_name) ) relations; + ignore (get_field_by_name api ~objname:a_obj ~fieldname:a_name); + ignore (get_field_by_name api ~objname:b_obj ~fieldname:b_name) ) relations; (* Sanity check 3: no side-effects for Ref fields *) - + let (_: obj list) = map_field (function { ty = Ref _; field_has_effect = true } -> - failwith "Can't have a Ref field with a side-effect: it makes the destructors too complicated" - | x -> x) system in + failwith "Can't have a Ref field with a side-effect: it makes the destructors too complicated" + | x -> x) system in (* Sanity check: all Set(Ref _) fields should be one of: - 1. one-to-many: the many end should be DynamicRO - 2. many-to-many: the many end should be DynamicRO or RW - 3. something else with field_ignore_foreign_key + 1. one-to-many: the many end should be DynamicRO + 2. many-to-many: the many end should be DynamicRO or RW + 3. something else with field_ignore_foreign_key *) let rec flatten_fields fs acc = - match fs with - [] -> acc - | (Field f)::fs -> flatten_fields fs (f::acc) - | (Namespace (_,internal_fs))::fs -> flatten_fields fs (flatten_fields internal_fs acc) in - let _ = - let field objname = function - { ty = Set(Ref y); qualifier = q; field_ignore_foreign_key = false } as x -> - let relations = relations @ (List.map (fun (x, y) -> y, x) relations) in - if not(List.mem_assoc (objname, x.field_name) relations) - then failwith (Printf.sprintf "Set(Ref _) field is not in relations table: %s.%s" objname x.field_name); - let other_obj, other_fld = List.assoc (objname, x.field_name) relations in - let other_f = get_field_by_name api ~objname:other_obj ~fieldname:other_fld in - begin match other_f.ty with - | Set(Ref _) -> - if q <> DynamicRO && q <> RW - then failwith (Printf.sprintf "many-to-many Set(Ref _) is not RW or DynamicRO: %s.%s" objname x.field_name); - if not x.field_persist - then failwith (Printf.sprintf "many-to-many Set(Ref _) is not persistent: %s.%s" objname x.field_name); - if not other_f.field_persist - then failwith (Printf.sprintf "many-to-many Set(Ref _) is not persistent: %s.%s" other_obj other_fld); - | Ref _ -> - if q <> DynamicRO - then failwith (Printf.sprintf "many-to-many Set(Ref _) is not DynamicRO: %s.%s" objname x.field_name) - | ty -> - failwith (Printf.sprintf "field in relationship has bad type (Ref or Set(Ref) only): %s.%s" other_obj other_fld) - end - | _ -> () in - let obj o = List.iter (field o.name) (flatten_fields o.contents []) in - List.iter obj (objects_of_api api) in - + match fs with + [] -> acc + | (Field f)::fs -> flatten_fields fs (f::acc) + | (Namespace (_,internal_fs))::fs -> flatten_fields fs (flatten_fields internal_fs acc) in + let _ = + let field objname = function + { ty = Set(Ref y); qualifier = q; field_ignore_foreign_key = false } as x -> + let relations = relations @ (List.map (fun (x, y) -> y, x) relations) in + if not(List.mem_assoc (objname, x.field_name) relations) + then failwith (Printf.sprintf "Set(Ref _) field is not in relations table: %s.%s" objname x.field_name); + let other_obj, other_fld = List.assoc (objname, x.field_name) relations in + let other_f = get_field_by_name api ~objname:other_obj ~fieldname:other_fld in + begin match other_f.ty with + | Set(Ref _) -> + if q <> DynamicRO && q <> RW + then failwith (Printf.sprintf "many-to-many Set(Ref _) is not RW or DynamicRO: %s.%s" objname x.field_name); + if not x.field_persist + then failwith (Printf.sprintf "many-to-many Set(Ref _) is not persistent: %s.%s" objname x.field_name); + if not other_f.field_persist + then failwith (Printf.sprintf "many-to-many Set(Ref _) is not persistent: %s.%s" other_obj other_fld); + | Ref _ -> + if q <> DynamicRO + then failwith (Printf.sprintf "many-to-many Set(Ref _) is not DynamicRO: %s.%s" objname x.field_name) + | ty -> + failwith (Printf.sprintf "field in relationship has bad type (Ref or Set(Ref) only): %s.%s" other_obj other_fld) + end + | _ -> () in + let obj o = List.iter (field o.name) (flatten_fields o.contents []) in + List.iter obj (objects_of_api api) in + (* Sanity check 4: all fields not in rel_rio and not dynamic_RO must have default values *) let (_: obj list) = map_field (function { qualifier=q; release={internal=ir}; default_value=None } as x -> - if not (List.mem rel_rio ir) && not (q=DynamicRO) then - failwith (Printf.sprintf "Field %s not in release Rio, is not DynamicRO and does not have default value specified" (String.concat "/" x.full_name)) - else x - | x -> x) system in + if not (List.mem rel_rio ir) && not (q=DynamicRO) then + failwith (Printf.sprintf "Field %s not in release Rio, is not DynamicRO and does not have default value specified" (String.concat "/" x.full_name)) + else x + | x -> x) system in (* Sanity check 5: no (Set Ref _) fields can have default values *) let (_: obj list) = map_field (function { qualifier=q; release={internal=ir}; default_value=Some _; ty=ty; field_ignore_foreign_key=false } as x -> - begin - match ty with - (Set (Ref _)) -> - failwith (Printf.sprintf "Field %s is a (Set (Ref _)) and has a default value specified. Please remove default value." (String.concat "/" x.full_name)) - | _ -> x - end - | x -> x) system in + begin + match ty with + (Set (Ref _)) -> + failwith (Printf.sprintf "Field %s is a (Set (Ref _)) and has a default value specified. Please remove default value." (String.concat "/" x.full_name)) + | _ -> x + end + | x -> x) system in (* Sanity check 6: all values specfieid in IDL must be of the right type *) let (_: obj list) = map_field (function { default_value=Some v; ty=ty } as x -> - if not (type_checks v ty) then - failwith (Printf.sprintf "Field %s has default value with wrong type." (String.concat "/" x.full_name)); - x - | x -> x) system in + if not (type_checks v ty) then + failwith (Printf.sprintf "Field %s has default value with wrong type." (String.concat "/" x.full_name)); + x + | x -> x) system in (* Sanity check 7: message parameters must be in increasing order of in_product_since *) let are_in_vsn_order ps = @@ -229,30 +229,30 @@ let check api emergency_calls = let release_lt x y = release_leq x y && x<>y in let in_since releases = (* been in since the lowest of releases *) let rec find_smallest sofar l = - match l with - [] -> sofar - | "closed"::xs -> find_smallest sofar xs (* closed is not a real release, so skip it *) - | x::xs -> if release_lt x sofar then find_smallest x xs else find_smallest sofar xs in + match l with + [] -> sofar + | "closed"::xs -> find_smallest sofar xs (* closed is not a real release, so skip it *) + | x::xs -> if release_lt x sofar then find_smallest x xs else find_smallest sofar xs in find_smallest (getlast release_order) releases in let rec check_vsns max_release_sofar ps = match ps with - [] -> true + [] -> true | (p::rest) -> - let param_in_product_since = in_since p.param_release.internal in - if release_lt param_in_product_since max_release_sofar then false - else check_vsns param_in_product_since (* <-- new max *) rest in - check_vsns rel_rio ps + let param_in_product_since = in_since p.param_release.internal in + if release_lt param_in_product_since max_release_sofar then false + else check_vsns param_in_product_since (* <-- new max *) rest in + check_vsns rel_rio ps in let _ = List.iter - (fun obj -> - List.iter - (fun msg -> - if msg.msg_tag=Custom && - not (are_in_vsn_order msg.msg_params) then failwith (Printf.sprintf "Msg %s.%s does not have parameters in version order" - obj.name msg.msg_name) - ) - obj.messages - ) system in + (fun obj -> + List.iter + (fun msg -> + if msg.msg_tag=Custom && + not (are_in_vsn_order msg.msg_params) then failwith (Printf.sprintf "Msg %s.%s does not have parameters in version order" + obj.name msg.msg_name) + ) + obj.messages + ) system in (* Sanity check 8: any "emergency calls" must not support async mode of operation -- if they do then our server dispatch logic will try and create a task for them; since this requires database access they @@ -260,10 +260,10 @@ let check api emergency_calls = let _ = List.iter (fun obj -> - List.iter - (fun msg -> - if msg.msg_async && (List.mem (obj.name,msg.msg_name) emergency_calls) then - failwith (Printf.sprintf "Msg %s.%s is marked as supports async and also appears in emergency_call list. These are mutually exclusive choices." obj.name msg.msg_name) - ) obj.messages + List.iter + (fun msg -> + if msg.msg_async && (List.mem (obj.name,msg.msg_name) emergency_calls) then + failwith (Printf.sprintf "Msg %s.%s is marked as supports async and also appears in emergency_call list. These are mutually exclusive choices." obj.name msg.msg_name) + ) obj.messages ) system in () diff --git a/ocaml/idl/dm_api.mli b/ocaml/idl/dm_api.mli index d0906275521..3e06aa1b98e 100644 --- a/ocaml/idl/dm_api.mli +++ b/ocaml/idl/dm_api.mli @@ -42,7 +42,7 @@ val map : (field -> field) -> (message -> message) -> api -> api (** Create an API from raw components (implicitly calls check) *) val make : (obj list * relation list) -> api -(** Perform basic sanity-checking on the API to ensure various constraints +(** Perform basic sanity-checking on the API to ensure various constraints are satisfied *) val check : api -> (string*string) list (* list of "emergency calls" *) -> unit - + diff --git a/ocaml/idl/dot_backend.ml b/ocaml/idl/dot_backend.ml index 6d69db7b0e9..f42d2e2ba96 100644 --- a/ocaml/idl/dot_backend.ml +++ b/ocaml/idl/dot_backend.ml @@ -32,60 +32,60 @@ let rec all_field_types = function | Field fr -> [ fr.field_name, fr.ty ] | Namespace(_, xs) -> List.concat (List.map all_field_types xs) -let of_objs api = +let of_objs api = let xs = objects_of_api api and relations = relations_of_api api in let names : string list = List.map (fun x -> x.name) xs in - let edges : string list = List.concat (List.map - (fun (obj : obj) -> - (* First consider the edges defined as relational *) - let relational = List.filter (fun ((a, _), (b, _)) -> a = obj.name) relations in - let edges = List.map (fun ((a, a_field_name), (b, b_field_name)) -> - let a_field = get_field_by_name api ~objname:a ~fieldname:a_field_name - and b_field = get_field_by_name api ~objname:b ~fieldname:b_field_name in - let get_arrow which obj ty = match Relations.of_types (Ref obj) ty with - | `None -> failwith (sprintf "bad relational edge between %s.%s and %s.%s; object name [%s] never occurs in [%s]" a a_field_name b b_field_name obj (Types.to_string ty)) - | `One -> [ which ^ "=\"none\"" ] - | `Many -> [ which ^ "=\"crow\"" ] in - let labels = [ (* "label=\"" ^ label ^ "\"";*) "color=\"blue\"" ] @ - (get_arrow "arrowhead" b a_field.ty) @ (get_arrow "arrowtail" a b_field.ty) in - - sprintf "%s -> %s [ %s ]" a b (String.concat ", " labels)) relational in - - (* list of pairs of (field name, type) *) - let name_types : (string * ty) list = List.concat (List.map all_field_types obj.contents) in - (* get rid of all those which are defined as relational *) - let name_types = List.filter - (fun (name, _) -> - List.filter (fun ((a, a_name), (b, b_name)) -> (a = obj.name && a_name = name) || (b = obj.name && b_name = name)) relations - = []) name_types in - - (* decompose each ty into a list of references *) - let name_refs : (string * string * ty) list = - List.concat (List.map (fun (name, ty) -> List.map (fun x -> name, x, ty) (all_refs ty)) name_types) in - let name_names : (string * string) list = List.map - (fun (name, obj, ty) -> - let count = match Relations.of_types (Ref obj) ty with - | `None -> "(0)" | `One -> "(1)" | `Many -> "(*)" in - name ^ count , obj) name_refs in - let edges = List.map - (fun (field, target) -> sprintf "%s -> %s [ label=\"%s\" ]" obj.name target field) name_names @ edges in - - edges - ) xs) in - - [ "digraph g{"; - let node name = Printf.sprintf "%s [ URL=\"%s.html\" ]" name name in - "node [ shape=box ]; " ^ (String.concat " " (List.map node names)) ^ ";" ] @ edges @ [ - "}" - ] + let edges : string list = List.concat (List.map + (fun (obj : obj) -> + (* First consider the edges defined as relational *) + let relational = List.filter (fun ((a, _), (b, _)) -> a = obj.name) relations in + let edges = List.map (fun ((a, a_field_name), (b, b_field_name)) -> + let a_field = get_field_by_name api ~objname:a ~fieldname:a_field_name + and b_field = get_field_by_name api ~objname:b ~fieldname:b_field_name in + let get_arrow which obj ty = match Relations.of_types (Ref obj) ty with + | `None -> failwith (sprintf "bad relational edge between %s.%s and %s.%s; object name [%s] never occurs in [%s]" a a_field_name b b_field_name obj (Types.to_string ty)) + | `One -> [ which ^ "=\"none\"" ] + | `Many -> [ which ^ "=\"crow\"" ] in + let labels = [ (* "label=\"" ^ label ^ "\"";*) "color=\"blue\"" ] @ + (get_arrow "arrowhead" b a_field.ty) @ (get_arrow "arrowtail" a b_field.ty) in + + sprintf "%s -> %s [ %s ]" a b (String.concat ", " labels)) relational in + + (* list of pairs of (field name, type) *) + let name_types : (string * ty) list = List.concat (List.map all_field_types obj.contents) in + (* get rid of all those which are defined as relational *) + let name_types = List.filter + (fun (name, _) -> + List.filter (fun ((a, a_name), (b, b_name)) -> (a = obj.name && a_name = name) || (b = obj.name && b_name = name)) relations + = []) name_types in + + (* decompose each ty into a list of references *) + let name_refs : (string * string * ty) list = + List.concat (List.map (fun (name, ty) -> List.map (fun x -> name, x, ty) (all_refs ty)) name_types) in + let name_names : (string * string) list = List.map + (fun (name, obj, ty) -> + let count = match Relations.of_types (Ref obj) ty with + | `None -> "(0)" | `One -> "(1)" | `Many -> "(*)" in + name ^ count , obj) name_refs in + let edges = List.map + (fun (field, target) -> sprintf "%s -> %s [ label=\"%s\" ]" obj.name target field) name_names @ edges in + + edges + ) xs) in + + [ "digraph g{"; + let node name = Printf.sprintf "%s [ URL=\"%s.html\" ]" name name in + "node [ shape=box ]; " ^ (String.concat " " (List.map node names)) ^ ";" ] @ edges @ [ + "}" + ] (* module Perl = struct (** Output stuff as Perl *) - let rec all system dirname = + let rec all system dirname = List.iter (output_module dirname) system diff --git a/ocaml/idl/dtd_backend.ml b/ocaml/idl/dtd_backend.ml index 0c87c544718..09911a79b95 100644 --- a/ocaml/idl/dtd_backend.ml +++ b/ocaml/idl/dtd_backend.ml @@ -20,154 +20,154 @@ open Datamodel_utils open Datamodel_types open Dm_api - (** Create an XML DTD *) +(** Create an XML DTD *) -let rec split_cols n x = +let rec split_cols n x = if String.length x < n then [ x ] else String.sub x 0 n :: (split_cols n (String.sub x n (String.length x - n - 1))) - + type attribute = Attribute of string * string list * string option - + type dtd_element = PCData - | Element of string * dtd_element list * attribute list - - + | Element of string * dtd_element list * attribute list + + let name_of_dtd_element = function | PCData -> "(#PCDATA)" | Element(name, _, _) -> name - + let is_element = function | Element(_, _, _) -> true | _ -> false - + let string_of_attribute = function Attribute(n, options, default) -> - let opt_string = - if List.length options = 0 then - "CDATA" + let opt_string = + if List.length options = 0 then + "CDATA" + else + "(" ^ String.concat " | " options ^ ")" + in + let def_string = + match default with + Some def -> + if def = "" then + "#REQUIRED" else - "(" ^ String.concat " | " options ^ ")" - in - let def_string = - match default with - Some def -> - if def = "" then - "#REQUIRED" - else - def - | None -> - "#IMPLIED" - in - sprintf "%s %s %s" n opt_string def_string - - + def + | None -> + "#IMPLIED" + in + sprintf "%s %s %s" n opt_string def_string + + let strings_of_attributes parent atts = if List.length atts > 0 then let prefix = sprintf ""] + + prefix :: body @ [">"] else [] - - + + let rec strings_of_dtd_element known_els = function | PCData -> ["(#PCDATA)"] | Element(name, els, attributes) -> - if Hashtbl.mem known_els name then - let el_count = List.length els in - let att_count = List.length attributes in - if el_count = 0 && att_count = 0 then - [] - else - let prefix = sprintf " x <> "" && x <> empty) - ((name_of_dtd_element (List.hd els)) :: - (List.map - (fun x -> empty ^ name_of_dtd_element x) - (List.tl els))))) ^ - ")") in - - Hashtbl.remove known_els name; - (sprintf "%s%s>" prefix body) :: - ((strings_of_attributes name attributes) @ - (List.concat (List.map (strings_of_dtd_element known_els) - (List.filter is_element els)))) - else + if Hashtbl.mem known_els name then + let el_count = List.length els in + let att_count = List.length attributes in + if el_count = 0 && att_count = 0 then [] - + else + let prefix = sprintf " x <> "" && x <> empty) + ((name_of_dtd_element (List.hd els)) :: + (List.map + (fun x -> empty ^ name_of_dtd_element x) + (List.tl els))))) ^ + ")") in + + Hashtbl.remove known_els name; + (sprintf "%s%s>" prefix body) :: + ((strings_of_attributes name attributes) @ + (List.concat (List.map (strings_of_dtd_element known_els) + (List.filter is_element els)))) + else + [] + let element known_els name children atts = let existing_children = if Hashtbl.mem known_els name then match Hashtbl.find known_els name with - Element (_, c, att) -> (c, att) - | _ -> assert(false) + Element (_, c, att) -> (c, att) + | _ -> assert(false) else [], [] in - + let el = Element (name, - (List.setify children @ fst existing_children), + (List.setify children @ fst existing_children), (List.setify atts @ snd existing_children)) in - Hashtbl.replace known_els name el; - el - - + Hashtbl.replace known_els name el; + el + + let add_attribute known_els el_name att_name options default = ignore (element known_els el_name [] [Attribute(att_name, options, default)]) let rec dtd_element_of_contents known_els parent_name accu = function | Namespace(name, xs) -> - element known_els name - (List.fold_left (dtd_element_of_contents known_els name) [] xs) [] :: - accu + element known_els name + (List.fold_left (dtd_element_of_contents known_els name) [] xs) [] :: + accu | Field{field_name=name; ty=ty} -> match ty with Set (Int | Ref _) | Int | Float | DateTime | Bool -> - add_attribute known_els parent_name name [] None; - accu - + add_attribute known_els parent_name name [] None; + accu + | Set _ -> - element known_els name [] [] :: accu - + element known_els name [] [] :: accu + | Ref n -> - add_attribute known_els parent_name name [] (Some ""); - accu - + add_attribute known_els parent_name name [] (Some ""); + accu + | Enum(_, vals) -> - add_attribute known_els parent_name name - (List.map fst vals) None; - accu - + add_attribute known_els parent_name name + (List.map fst vals) None; + accu + | String -> - element known_els name [PCData] [] :: accu + element known_els name [PCData] [] :: accu | _ -> - failwith (sprintf "unimplemented DTD of field %s" name) + failwith (sprintf "unimplemented DTD of field %s" name) + - -let dtd_element_of_obj known_els x = +let dtd_element_of_obj known_els x = element known_els x.name (List.fold_left (dtd_element_of_contents known_els x.name) [] - x.contents) [] - - -let of_objs api = + x.contents) [] + + +let of_objs api = let xs = objects_of_api api in let known_els = Hashtbl.create 10 in let elements = List.map (dtd_element_of_obj known_els) xs in - + List.concat (List.map (strings_of_dtd_element known_els) elements) diff --git a/ocaml/idl/js_backend/main.ml b/ocaml/idl/js_backend/main.ml index 815ee6f5b84..f932191814d 100644 --- a/ocaml/idl/js_backend/main.ml +++ b/ocaml/idl/js_backend/main.ml @@ -18,44 +18,44 @@ open Dm_api let rec ty_to_js ty = match ty with - | String -> "{ty:\"string\"}" - | Int -> "{ty:\"int\"}" - | Float -> "{ty:\"float\"}" - | Bool -> "{ty:\"bool\"}" - | DateTime -> "{ty:\"datetime\"}" - | Enum (name,values) -> - Printf.sprintf "{ty:\"enum\",name:\"%s\",values:[%s]}" name - (String.concat "," (List.map (fun (v,d) -> "\""^v^"\"") values)) - | Set (ty) -> Printf.sprintf "{ty:\"set\",contents:%s}" (ty_to_js ty) - | Map (ty1,ty2) -> Printf.sprintf "{ty:\"map\",keys:%s,values:%s}" - (ty_to_js ty1) (ty_to_js ty2) - | Ref r -> Printf.sprintf "{ty:\"ref\",class:\"%s\"}" r - | Record r -> Printf.sprintf "{ty:\"record\",name:\"%s\"}" r + | String -> "{ty:\"string\"}" + | Int -> "{ty:\"int\"}" + | Float -> "{ty:\"float\"}" + | Bool -> "{ty:\"bool\"}" + | DateTime -> "{ty:\"datetime\"}" + | Enum (name,values) -> + Printf.sprintf "{ty:\"enum\",name:\"%s\",values:[%s]}" name + (String.concat "," (List.map (fun (v,d) -> "\""^v^"\"") values)) + | Set (ty) -> Printf.sprintf "{ty:\"set\",contents:%s}" (ty_to_js ty) + | Map (ty1,ty2) -> Printf.sprintf "{ty:\"map\",keys:%s,values:%s}" + (ty_to_js ty1) (ty_to_js ty2) + | Ref r -> Printf.sprintf "{ty:\"ref\",class:\"%s\"}" r + | Record r -> Printf.sprintf "{ty:\"record\",name:\"%s\"}" r let _ = let api = Datamodel_utils.add_implicit_messages (Datamodel.all_api) in let objs = objects_of_api api in - let msgs = List.flatten (List.map (fun obj -> - let jsstruct = List.map (fun msg -> - ( - (Datamodel_utils.wire_name ~sync:true obj msg), - (let params = List.map (fun param -> (param.param_name,ty_to_js param.param_type,param.param_doc)) msg.msg_params in - if msg.msg_session - then ("session_id","{ty:\"ref\",class:\"session\"}","The session reference")::params - else params), - msg.msg_doc - )) obj.messages - in - let js = List.map (fun (msgname,params,doc) -> - Printf.sprintf "\"%s\":{params:[%s],doc:\"%s\"}" msgname - (String.concat "," - (List.map (fun (name,ty,doc) -> - Printf.sprintf "{name:\"%s\",ty:%s,doc:\"%s\"}" name ty doc) params)) - doc - ) jsstruct in - js - ) objs) in + let msgs = List.flatten (List.map (fun obj -> + let jsstruct = List.map (fun msg -> + ( + (Datamodel_utils.wire_name ~sync:true obj msg), + (let params = List.map (fun param -> (param.param_name,ty_to_js param.param_type,param.param_doc)) msg.msg_params in + if msg.msg_session + then ("session_id","{ty:\"ref\",class:\"session\"}","The session reference")::params + else params), + msg.msg_doc + )) obj.messages + in + let js = List.map (fun (msgname,params,doc) -> + Printf.sprintf "\"%s\":{params:[%s],doc:\"%s\"}" msgname + (String.concat "," + (List.map (fun (name,ty,doc) -> + Printf.sprintf "{name:\"%s\",ty:%s,doc:\"%s\"}" name ty doc) params)) + doc + ) jsstruct in + js + ) objs) in Printf.printf "var messages={%s};" (String.concat ",\n" msgs) - - + + diff --git a/ocaml/idl/json_backend/main.ml b/ocaml/idl/json_backend/main.ml index 0508f3f7369..ce70cc4e1fa 100644 --- a/ocaml/idl/json_backend/main.ml +++ b/ocaml/idl/json_backend/main.ml @@ -20,301 +20,301 @@ open Dm_api (* JSON *) let escape_json s = - let len = String.length s in - if len > 0 then begin - let buf = Buffer.create len in - for i = 0 to len - 1 do - match s.[i] with - | '\"' -> Buffer.add_string buf "\\\"" - | '\\' -> Buffer.add_string buf "\\\\" - | '\b' -> Buffer.add_string buf "\\b" - | '\n' -> Buffer.add_string buf "\\n" - | '\r' -> Buffer.add_string buf "\\r" - | '\t' -> Buffer.add_string buf "\\t" - | c -> Buffer.add_char buf c - done; - Buffer.contents buf - end - else "" + let len = String.length s in + if len > 0 then begin + let buf = Buffer.create len in + for i = 0 to len - 1 do + match s.[i] with + | '\"' -> Buffer.add_string buf "\\\"" + | '\\' -> Buffer.add_string buf "\\\\" + | '\b' -> Buffer.add_string buf "\\b" + | '\n' -> Buffer.add_string buf "\\n" + | '\r' -> Buffer.add_string buf "\\r" + | '\t' -> Buffer.add_string buf "\\t" + | c -> Buffer.add_char buf c + done; + Buffer.contents buf + end + else "" type json = - | JObject of (string * json) list - | JArray of json list - | JString of string - | JNumber of float - | JBoolean of bool - | JEmpty + | JObject of (string * json) list + | JArray of json list + | JString of string + | JNumber of float + | JBoolean of bool + | JEmpty let endl n = - if n = 0 then "" - else "\n" ^ String.make (2*n - 2) ' ' + if n = 0 then "" + else "\n" ^ String.make (2*n - 2) ' ' let rec string_of_json n = function - | JObject l -> (endl n) ^ "{ " ^ (String.concat ("," ^ (endl (n+1))) (List.map (fun (s, j) -> "\"" ^ s ^ "\": " ^ (string_of_json (n+2) j)) l)) ^ " }" - | JArray l -> "[ " ^ (String.concat ", " (List.map (fun j -> (string_of_json n j)) l)) ^ " ]" - | JString s -> "\"" ^ (escape_json s) ^ "\"" - | JNumber n -> Printf.sprintf "%.4f" n - | JBoolean b -> if b = true then "true" else "false" - | JEmpty -> "\"\"" + | JObject l -> (endl n) ^ "{ " ^ (String.concat ("," ^ (endl (n+1))) (List.map (fun (s, j) -> "\"" ^ s ^ "\": " ^ (string_of_json (n+2) j)) l)) ^ " }" + | JArray l -> "[ " ^ (String.concat ", " (List.map (fun j -> (string_of_json n j)) l)) ^ " ]" + | JString s -> "\"" ^ (escape_json s) ^ "\"" + | JNumber n -> Printf.sprintf "%.4f" n + | JBoolean b -> if b = true then "true" else "false" + | JEmpty -> "\"\"" (* Datamodel *) let rec string_of_ty_with_enums ty = - match ty with - | String -> "string", [] - | Int -> "int", [] - | Float -> "float", [] - | Bool -> "bool", [] - | DateTime -> "datetime", [] - | Enum (name, kv) -> "enum " ^ name, [name, kv] - | Set (ty) -> - let s, e = string_of_ty_with_enums ty in - s ^ " set", e - | Map (ty1, ty2) -> - let s1, e1 = string_of_ty_with_enums ty1 in - let s2, e2 = string_of_ty_with_enums ty2 in - Printf.sprintf "(%s -> %s) map" s1 s2, e1 @ e2 - | Ref r -> r ^ " ref", [] - | Record r -> r ^ " record", [] + match ty with + | String -> "string", [] + | Int -> "int", [] + | Float -> "float", [] + | Bool -> "bool", [] + | DateTime -> "datetime", [] + | Enum (name, kv) -> "enum " ^ name, [name, kv] + | Set (ty) -> + let s, e = string_of_ty_with_enums ty in + s ^ " set", e + | Map (ty1, ty2) -> + let s1, e1 = string_of_ty_with_enums ty1 in + let s2, e2 = string_of_ty_with_enums ty2 in + Printf.sprintf "(%s -> %s) map" s1 s2, e1 @ e2 + | Ref r -> r ^ " ref", [] + | Record r -> r ^ " record", [] let string_of_qualifier = function - | RW -> "RW" - | StaticRO -> "RO/constructor" - | DynamicRO -> "RO/runtime" + | RW -> "RW" + | StaticRO -> "RO/constructor" + | DynamicRO -> "RO/runtime" let rec string_of_default = function - | VString x -> "\"" ^ x ^ "\"" - | VInt x -> Int64.to_string x - | VFloat x -> string_of_float x - | VBool x -> string_of_bool x - | VDateTime x -> Date.to_string x - | VEnum x -> x - | VMap x -> Printf.sprintf "{%s}" (String.concat ", " (List.map (fun (a, b) -> Printf.sprintf "%s -> %s" (string_of_default a) (string_of_default b)) x)) - | VSet x -> Printf.sprintf "{%s}" (String.concat ", " (List.map string_of_default x)) - | VRef x -> if x = "" then "Null" else x - | VCustom (_,y) -> string_of_default y + | VString x -> "\"" ^ x ^ "\"" + | VInt x -> Int64.to_string x + | VFloat x -> string_of_float x + | VBool x -> string_of_bool x + | VDateTime x -> Date.to_string x + | VEnum x -> x + | VMap x -> Printf.sprintf "{%s}" (String.concat ", " (List.map (fun (a, b) -> Printf.sprintf "%s -> %s" (string_of_default a) (string_of_default b)) x)) + | VSet x -> Printf.sprintf "{%s}" (String.concat ", " (List.map string_of_default x)) + | VRef x -> if x = "" then "Null" else x + | VCustom (_,y) -> string_of_default y let jarray_of_lifecycle lc = - JArray (List.map (fun (t, r, d) -> - JObject [ - "transition", JString (string_of_lifecycle_transition t); - "release", JString r; - "description", JString d; - ] - ) lc) + JArray (List.map (fun (t, r, d) -> + JObject [ + "transition", JString (string_of_lifecycle_transition t); + "release", JString r; + "description", JString d; + ] + ) lc) let fields_of_obj_with_enums obj = - let rec flatten_contents contents = - List.fold_left (fun l -> function - | Field f -> f :: l - | Namespace (name, contents) -> flatten_contents contents @ l - ) [] contents - in - let fields = flatten_contents obj.contents in - List.fold_left (fun (fields, enums) field -> - let ty, e = string_of_ty_with_enums field.ty in - JObject ( - ("name", JString (String.concat "_" field.full_name)) :: - ("description", JString field.field_description) :: - ("type", JString ty) :: - ("qualifier", JString (string_of_qualifier field.qualifier)) :: - ("tag", JString (match field.field_doc_tags with [] -> "" | t :: _ -> string_of_doc_tag t)) :: - ("lifecycle", jarray_of_lifecycle field.lifecycle) :: - match field.default_value with Some d -> ["default", JString (string_of_default d)] | None -> [] - ) :: fields, - enums @ e - ) ([], []) fields + let rec flatten_contents contents = + List.fold_left (fun l -> function + | Field f -> f :: l + | Namespace (name, contents) -> flatten_contents contents @ l + ) [] contents + in + let fields = flatten_contents obj.contents in + List.fold_left (fun (fields, enums) field -> + let ty, e = string_of_ty_with_enums field.ty in + JObject ( + ("name", JString (String.concat "_" field.full_name)) :: + ("description", JString field.field_description) :: + ("type", JString ty) :: + ("qualifier", JString (string_of_qualifier field.qualifier)) :: + ("tag", JString (match field.field_doc_tags with [] -> "" | t :: _ -> string_of_doc_tag t)) :: + ("lifecycle", jarray_of_lifecycle field.lifecycle) :: + match field.default_value with Some d -> ["default", JString (string_of_default d)] | None -> [] + ) :: fields, + enums @ e + ) ([], []) fields let jarray_of_result_with_enums = function - | None -> JArray [JString "void"], [] - | Some (t, d) -> - let t', enums = string_of_ty_with_enums t in - JArray [JString t'; JString d], enums + | None -> JArray [JString "void"], [] + | Some (t, d) -> + let t', enums = string_of_ty_with_enums t in + JArray [JString t'; JString d], enums let jarray_of_params_with_enums ps = - let params, enums = List.fold_left (fun (params, enums) p -> - let t, e = string_of_ty_with_enums p.param_type in - JObject [ - "type", JString t; - "name", JString p.param_name; - "doc", JString p.param_doc; - ] :: params, - enums @ e - ) ([], []) ps in - JArray (List.rev params), enums + let params, enums = List.fold_left (fun (params, enums) p -> + let t, e = string_of_ty_with_enums p.param_type in + JObject [ + "type", JString t; + "name", JString p.param_name; + "doc", JString p.param_doc; + ] :: params, + enums @ e + ) ([], []) ps in + JArray (List.rev params), enums let jarray_of_errors es = - JArray (List.map (fun e -> - JObject [ - "name", JString e.err_name; - "doc", JString e.err_doc; - ] - ) es ) + JArray (List.map (fun e -> + JObject [ + "name", JString e.err_name; + "doc", JString e.err_doc; + ] + ) es ) let jarray_of_roles = function - | None -> JArray [] - | Some rs -> JArray (List.map (fun s -> JString s) rs) + | None -> JArray [] + | Some rs -> JArray (List.map (fun s -> JString s) rs) let session_id = - { - param_type = Ref Datamodel._session; - param_name = "session_id"; - param_doc = "Reference to a valid session"; - param_release = Datamodel.rio_release; - param_default = None; - } + { + param_type = Ref Datamodel._session; + param_name = "session_id"; + param_doc = "Reference to a valid session"; + param_release = Datamodel.rio_release; + param_default = None; + } let messages_of_obj_with_enums obj = - List.fold_left (fun (msgs, enums) msg -> - let params = - if msg.msg_session then - session_id :: msg.msg_params - else - msg.msg_params - in - let ctor = - if msg.msg_tag = FromObject Make then - let ctor_fields = - List.filter (function { qualifier = (StaticRO | RW) } -> true | _ -> false) (fields_of_obj obj) - |> List.map (fun f -> String.concat "_" f.full_name ^ (if f.default_value = None then "*" else "")) - in - Printf.sprintf "\nThe constructor args are: %s (* = non-optional)." (String.concat ", " ctor_fields) - else - "" - in - let result, enums1 = jarray_of_result_with_enums msg.msg_result in - let params, enums2 = jarray_of_params_with_enums params in - JObject [ - "name", JString msg.msg_name; - "description", JString (msg.msg_doc ^ ctor); - "result", result; - "params", params; - "errors", jarray_of_errors msg.msg_errors; - "roles", jarray_of_roles msg.msg_allowed_roles; - "tag", JString (match msg.msg_doc_tags with [] -> "" | t :: _ -> string_of_doc_tag t); - "lifecycle", jarray_of_lifecycle msg.msg_lifecycle; - "implicit", JBoolean (msg.msg_tag <> Custom); - ] :: msgs, - enums @ enums1 @ enums2 - ) ([], []) obj.messages + List.fold_left (fun (msgs, enums) msg -> + let params = + if msg.msg_session then + session_id :: msg.msg_params + else + msg.msg_params + in + let ctor = + if msg.msg_tag = FromObject Make then + let ctor_fields = + List.filter (function { qualifier = (StaticRO | RW) } -> true | _ -> false) (fields_of_obj obj) + |> List.map (fun f -> String.concat "_" f.full_name ^ (if f.default_value = None then "*" else "")) + in + Printf.sprintf "\nThe constructor args are: %s (* = non-optional)." (String.concat ", " ctor_fields) + else + "" + in + let result, enums1 = jarray_of_result_with_enums msg.msg_result in + let params, enums2 = jarray_of_params_with_enums params in + JObject [ + "name", JString msg.msg_name; + "description", JString (msg.msg_doc ^ ctor); + "result", result; + "params", params; + "errors", jarray_of_errors msg.msg_errors; + "roles", jarray_of_roles msg.msg_allowed_roles; + "tag", JString (match msg.msg_doc_tags with [] -> "" | t :: _ -> string_of_doc_tag t); + "lifecycle", jarray_of_lifecycle msg.msg_lifecycle; + "implicit", JBoolean (msg.msg_tag <> Custom); + ] :: msgs, + enums @ enums1 @ enums2 + ) ([], []) obj.messages let jarray_of_enums enums = - JArray (List.map (fun (name, vs) -> - JObject [ - "name", JString name; - "values", JArray (List.map (fun (v, d) -> JObject [ - "name", JString v; - "doc", JString d; - ]) vs); - ] - ) enums) + JArray (List.map (fun (name, vs) -> + JObject [ + "name", JString name; + "values", JArray (List.map (fun (v, d) -> JObject [ + "name", JString v; + "doc", JString d; + ]) vs); + ] + ) enums) let json_of_objs objs = - JArray (List.map (fun obj -> - let fields, enums1 = fields_of_obj_with_enums obj in - let messages, enums2 = messages_of_obj_with_enums obj in - let enums = Stdext.Listext.List.setify (enums1 @ enums2) in - JObject [ - "name", JString obj.name; - "description", JString obj.description; - "fields", JArray fields; - "messages", JArray messages; - "enums", jarray_of_enums enums; - "lifecycle", jarray_of_lifecycle obj.obj_lifecycle; - "tag", JString (match obj.obj_doc_tags with [] -> "" | t :: _ -> string_of_doc_tag t); - ] - ) objs) + JArray (List.map (fun obj -> + let fields, enums1 = fields_of_obj_with_enums obj in + let messages, enums2 = messages_of_obj_with_enums obj in + let enums = Stdext.Listext.List.setify (enums1 @ enums2) in + JObject [ + "name", JString obj.name; + "description", JString obj.description; + "fields", JArray fields; + "messages", JArray messages; + "enums", jarray_of_enums enums; + "lifecycle", jarray_of_lifecycle obj.obj_lifecycle; + "tag", JString (match obj.obj_doc_tags with [] -> "" | t :: _ -> string_of_doc_tag t); + ] + ) objs) let jobject_of_change (t, n, l, s) = - JObject [ - "transition", JString (string_of_lifecycle_transition t ^ " " ^ s); - "name", JString n; - "log", JString l; - ] + JObject [ + "transition", JString (string_of_lifecycle_transition t ^ " " ^ s); + "name", JString n; + "log", JString l; + ] let compare_changes (a_t, a_n, _, a_k) (b_t, b_n, _, b_k) = - let int_of_transition = function - | Published -> 0 - | Extended -> 10 - | Changed -> 20 - | Deprecated -> 30 - | Removed -> 40 - | Prototyped -> 50 - in - let int_of_kind = function - | "class" -> 0 - | "field" -> 1 - | "message" -> 2 - | _ -> 3 - in - let cmp = compare ((int_of_transition a_t) + (int_of_kind a_k)) ((int_of_transition b_t) + (int_of_kind b_k)) in - if cmp = 0 then - compare a_n b_n - else - cmp + let int_of_transition = function + | Published -> 0 + | Extended -> 10 + | Changed -> 20 + | Deprecated -> 30 + | Removed -> 40 + | Prototyped -> 50 + in + let int_of_kind = function + | "class" -> 0 + | "field" -> 1 + | "message" -> 2 + | _ -> 3 + in + let cmp = compare ((int_of_transition a_t) + (int_of_kind a_k)) ((int_of_transition b_t) + (int_of_kind b_k)) in + if cmp = 0 then + compare a_n b_n + else + cmp let releases objs = - let changes_in_release rel = - let search_obj obj = - let changes = List.filter (fun (transition, release, doc) -> release = rel) obj.obj_lifecycle in - let obj_changes = - List.map (fun (transition, release, doc) -> - transition, - obj.name, - (if doc = "" && transition = Published then obj.description else doc), - "class" - ) changes in - - let changes_for_msg m = - let changes = List.filter (fun (transition, release, doc) -> release = rel) m.msg_lifecycle in - List.map (fun (transition, release, doc) -> - transition, - obj.name ^ "." ^ m.msg_name, - (if doc = "" && transition = Published then m.msg_doc else doc), - "message" - ) changes - in - (* Don't include implicit messages *) - let msgs = List.filter (fun m -> m.msg_tag = Custom) obj.messages in - let msg_changes = List.fold_left (fun l m -> l @ (changes_for_msg m)) [] msgs in - - let changes_for_field f = - let changes = List.filter (fun (transition, release, doc) -> release = rel) f.lifecycle in - let field_name = String.concat "_" f.full_name in - List.map (fun (transition, release, doc) -> - transition, - obj.name ^ "." ^ field_name, - (if doc = "" && transition = Published then f.field_description else doc), - "field" - ) changes - in - let rec flatten_contents contents = - List.fold_left (fun l -> function - | Field f -> f :: l - | Namespace (name, contents) -> flatten_contents contents @ l - ) [] contents - in - let fields = flatten_contents obj.contents in - let field_changes = List.fold_left (fun l f -> l @ (changes_for_field f)) [] fields in + let changes_in_release rel = + let search_obj obj = + let changes = List.filter (fun (transition, release, doc) -> release = rel) obj.obj_lifecycle in + let obj_changes = + List.map (fun (transition, release, doc) -> + transition, + obj.name, + (if doc = "" && transition = Published then obj.description else doc), + "class" + ) changes in - obj_changes @ field_changes @ msg_changes - in - JArray (List.map search_obj objs |> List.flatten |> List.sort compare_changes |> List.map jobject_of_change) - in - let release_info = JObject (List.map (fun rel -> rel, changes_in_release rel) release_order) in - Stdext.Unixext.write_string_to_file ("release_info.json") (string_of_json 0 release_info) + let changes_for_msg m = + let changes = List.filter (fun (transition, release, doc) -> release = rel) m.msg_lifecycle in + List.map (fun (transition, release, doc) -> + transition, + obj.name ^ "." ^ m.msg_name, + (if doc = "" && transition = Published then m.msg_doc else doc), + "message" + ) changes + in + (* Don't include implicit messages *) + let msgs = List.filter (fun m -> m.msg_tag = Custom) obj.messages in + let msg_changes = List.fold_left (fun l m -> l @ (changes_for_msg m)) [] msgs in + + let changes_for_field f = + let changes = List.filter (fun (transition, release, doc) -> release = rel) f.lifecycle in + let field_name = String.concat "_" f.full_name in + List.map (fun (transition, release, doc) -> + transition, + obj.name ^ "." ^ field_name, + (if doc = "" && transition = Published then f.field_description else doc), + "field" + ) changes + in + let rec flatten_contents contents = + List.fold_left (fun l -> function + | Field f -> f :: l + | Namespace (name, contents) -> flatten_contents contents @ l + ) [] contents + in + let fields = flatten_contents obj.contents in + let field_changes = List.fold_left (fun l f -> l @ (changes_for_field f)) [] fields in + + obj_changes @ field_changes @ msg_changes + in + JArray (List.map search_obj objs |> List.flatten |> List.sort compare_changes |> List.map jobject_of_change) + in + let release_info = JObject (List.map (fun rel -> rel, changes_in_release rel) release_order) in + Stdext.Unixext.write_string_to_file ("release_info.json") (string_of_json 0 release_info) let _ = - let api = Datamodel.all_api in - (* Add all implicit messages *) - let api = add_implicit_messages api in - (* Only include messages that are visible to a XenAPI client *) - let api = filter (fun _ -> true) (fun _ -> true) on_client_side api in - (* And only messages marked as not hidden from the docs, and non-internal fields *) - let api = filter (fun _ -> true) (fun f -> not f.internal_only) (fun m -> not m.msg_hide_from_docs) api in + let api = Datamodel.all_api in + (* Add all implicit messages *) + let api = add_implicit_messages api in + (* Only include messages that are visible to a XenAPI client *) + let api = filter (fun _ -> true) (fun _ -> true) on_client_side api in + (* And only messages marked as not hidden from the docs, and non-internal fields *) + let api = filter (fun _ -> true) (fun f -> not f.internal_only) (fun m -> not m.msg_hide_from_docs) api in - let objs = objects_of_api api in - Stdext.Unixext.write_string_to_file "xenapi.json" (objs |> json_of_objs |> string_of_json 0); - releases objs + let objs = objects_of_api api in + Stdext.Unixext.write_string_to_file "xenapi.json" (objs |> json_of_objs |> string_of_json 0); + releases objs diff --git a/ocaml/idl/latex_backend.ml b/ocaml/idl/latex_backend.ml index 0c02e7adf4f..46d9095db3c 100644 --- a/ocaml/idl/latex_backend.ml +++ b/ocaml/idl/latex_backend.ml @@ -20,60 +20,60 @@ open Dm_api open Stdext.Xstringext -let rec formatted_wrap formatter s = +let rec formatted_wrap formatter s = let split_in_2 c s = match String.split ~limit:2 c s with - h :: t -> (h, if t = [] then "" else List.hd t) - | [] -> assert false + h :: t -> (h, if t = [] then "" else List.hd t) + | [] -> assert false in let prespace, postspace = split_in_2 ' ' s in let preeol, posteol = split_in_2 '\n' s in - if String.length prespace < String.length preeol then - (Format.fprintf formatter "%s@ " prespace; - if String.length postspace > 0 then - formatted_wrap formatter postspace) - else - (if String.length posteol > 0 then - (Format.fprintf formatter "%s@\n" preeol; - formatted_wrap formatter posteol) - else - Format.fprintf formatter "%s@ " preeol) + if String.length prespace < String.length preeol then + (Format.fprintf formatter "%s@ " prespace; + if String.length postspace > 0 then + formatted_wrap formatter postspace) + else + (if String.length posteol > 0 then + (Format.fprintf formatter "%s@\n" preeol; + formatted_wrap formatter posteol) + else + Format.fprintf formatter "%s@ " preeol) let wrap s = let buf = Buffer.create 16 in let formatter = Format.formatter_of_buffer buf in - Format.pp_open_hvbox formatter 0; - Format.pp_set_margin formatter 76; - formatted_wrap formatter s; - Format.pp_close_box formatter (); + Format.pp_open_hvbox formatter 0; + Format.pp_set_margin formatter 76; + formatted_wrap formatter s; + Format.pp_close_box formatter (); - Format.fprintf formatter "%!"; + Format.fprintf formatter "%!"; - Buffer.contents buf + Buffer.contents buf let escape s = let sl = String.explode s in let in_quote = ref true in let esc_char = function - '"' -> in_quote := not !in_quote; if !in_quote then "''" else "``" - | '_' -> "\\_" - | '#' -> "\\#" - | c -> String.make 1 c in + '"' -> in_quote := not !in_quote; if !in_quote then "''" else "``" + | '_' -> "\\_" + | '#' -> "\\#" + | c -> String.make 1 c in let escaped_list = List.map esc_char sl in - String.concat "" escaped_list + String.concat "" escaped_list let full_stop s = if String.length s = 0 then s^"." else - if String.get s (String.length s - 1) != '.' - then - s ^ "." - else - s + if String.get s (String.length s - 1) != '.' + then + s ^ "." + else + s let rtrim = String.rtrim @@ -117,13 +117,13 @@ let desc_of_ty_opt = function (** Add namespaces (separated by _) to each field name *) let flatten stuff = let rec f ns = function - | Field fr -> Field { fr with field_name = ns ^ fr.field_name} + | Field fr -> Field { fr with field_name = ns ^ fr.field_name} | Namespace(ns', contents) -> Namespace("", List.map (f (ns ^ ns' ^ "_")) contents) in - f "" stuff + f "" stuff let string_of_qualifier = function - | StaticRO -> "$\\mathit{RO}_\\mathit{ins}$" + | StaticRO -> "$\\mathit{RO}_\\mathit{ins}$" | DynamicRO -> "$\\mathit{RO}_\\mathit{run}$" | RW -> "$\\mathit{RW}$" @@ -134,33 +134,33 @@ let string_of_open_product release = Printf.sprintf "%s %s &" xe dep let of_enum_alias name options = [ - "\\begin{longtable}{|ll|}"; - "\\hline"; - "{\\tt enum " ^ (escape name) ^ "} & \\\\"; - "\\hline" ] @ + "\\begin{longtable}{|ll|}"; + "\\hline"; + "{\\tt enum " ^ (escape name) ^ "} & \\\\"; + "\\hline" ] @ (List.map (fun (option, description) -> - hgap ^ "{\\tt " ^ (escape option) ^ "} & " ^ (escape description) ^ " \\\\") options) @ - [ - "\\hline"; - "\\end{longtable}" - ] - - -let of_content x closed = + hgap ^ "{\\tt " ^ (escape option) ^ "} & " ^ (escape description) ^ " \\\\") options) @ + [ + "\\hline"; + "\\end{longtable}" + ] + + +let of_content x closed = let rec f prefix = function - | Field{release=release; qualifier=qualifier; field_name=name; ty=ty; field_description=description} -> - [ sprintf "%s%s & %s {\\tt %s} & %s & %s \\\\" - (if closed then - string_of_open_product release - else - "") - (string_of_qualifier qualifier) - prefix (escape name) (of_ty ty) (escape description) ] + | Field{release=release; qualifier=qualifier; field_name=name; ty=ty; field_description=description} -> + [ sprintf "%s%s & %s {\\tt %s} & %s & %s \\\\" + (if closed then + string_of_open_product release + else + "") + (string_of_qualifier qualifier) + prefix (escape name) (of_ty ty) (escape description) ] | Namespace(_, fields) -> List.concat (List.map (f prefix) fields) in f "" x - + (* - let header = [ "\\documentclass[8pt]{article}"; + let header = [ "\\documentclass[8pt]{article}"; "\\usepackage{geometry}"; "\\usepackage{layout}"; "\\geometry{"; @@ -178,7 +178,7 @@ let of_content x closed = let footer = [ "\\end{document}" ] *) - + (* Output API parameter table entry *) let mk_latex_param p = String.concat " " @@ -190,75 +190,75 @@ let mk_latex_error err = sprintf "{\\tt %s}" (escape err.err_name) let space = "\\vspace{0.3cm}" - + (* Make a latex section for an API-specified message *) let latex_section_of_message closed section_prefix x = String.concat "\n" ([ - String.concat "" ["\\"^section_prefix^"subsection{RPC name:~"; escape x.msg_name; "}\n"]; - "{\\bf Overview:} "; - if x.msg_release.internal_deprecated_since <> None - then "{\\bf This message is deprecated}" - else ""; - wrap (full_stop (escape x.msg_doc)); - " \\noindent {\\bf Signature:} "; - - let section_contents = - (String.concat " " - [if is_prim_opt_type x.msg_result then of_ty_opt_verbatim x.msg_result - else "("^(of_ty_opt_verbatim x.msg_result)^")"; - x.msg_name; - String.concat "" - [ - "("; - String.concat ", " - ((if x.msg_session then ["session_id s"] else []) @ - (List.map (fun p -> of_ty_verbatim p.param_type ^ " " ^ p.param_name) x.msg_params)); - ")" - ] - ]) in - String.concat "" - (if closed then + String.concat "" ["\\"^section_prefix^"subsection{RPC name:~"; escape x.msg_name; "}\n"]; + "{\\bf Overview:} "; + if x.msg_release.internal_deprecated_since <> None + then "{\\bf This message is deprecated}" + else ""; + wrap (full_stop (escape x.msg_doc)); + " \\noindent {\\bf Signature:} "; + + let section_contents = + (String.concat " " + [if is_prim_opt_type x.msg_result then of_ty_opt_verbatim x.msg_result + else "("^(of_ty_opt_verbatim x.msg_result)^")"; + x.msg_name; + String.concat "" + [ + "("; + String.concat ", " + ((if x.msg_session then ["session_id s"] else []) @ + (List.map (fun p -> of_ty_verbatim p.param_type ^ " " ^ p.param_name) x.msg_params)); + ")" + ] + ]) in + String.concat "" + (if closed then ["\n\n{\\parbox{ \\columnwidth }{\\tt ~~~~~~~"; escape section_contents; - "}}\n\n"] + "}}\n\n"] else ["\\begin{verbatim} "; section_contents; - "\\end{verbatim}\n\n"]) - ] @ - - (if x.msg_params=[] then [] - else - [ - "\\noindent{\\bf Arguments:}\n\n "; - space; - - "\\begin{tabular}{|c|c|p{7cm}|}\n \\hline"; - "{\\bf type} & {\\bf name} & {\\bf description} \\\\ \\hline"; - String.concat "\n" ((List.map mk_latex_param) x.msg_params); - "\\end{tabular}\n"; - ]) @ - - [ - space; - "\n \\noindent {\\bf Return Type:} "; - "{\\tt "; - of_ty_opt x.msg_result; "}\n\n"; - escape (desc_of_ty_opt x.msg_result); - space - ] @ - - (if x.msg_errors=[] then [space; space] - else - [ - ""; - wrap (sprintf "\\noindent{\\bf Possible Error Codes:} %s" - (String.concat ", " ((List.map mk_latex_error) - x.msg_errors))); - "\\vspace{0.6cm}" - ])) - + "\\end{verbatim}\n\n"]) + ] @ + + (if x.msg_params=[] then [] + else + [ + "\\noindent{\\bf Arguments:}\n\n "; + space; + + "\\begin{tabular}{|c|c|p{7cm}|}\n \\hline"; + "{\\bf type} & {\\bf name} & {\\bf description} \\\\ \\hline"; + String.concat "\n" ((List.map mk_latex_param) x.msg_params); + "\\end{tabular}\n"; + ]) @ + + [ + space; + "\n \\noindent {\\bf Return Type:} "; + "{\\tt "; + of_ty_opt x.msg_result; "}\n\n"; + escape (desc_of_ty_opt x.msg_result); + space + ] @ + + (if x.msg_errors=[] then [space; space] + else + [ + ""; + wrap (sprintf "\\noindent{\\bf Possible Error Codes:} %s" + (String.concat ", " ((List.map mk_latex_error) + x.msg_errors))); + "\\vspace{0.6cm}" + ])) + (* Make a load of sections for a list of functions, fb. if section_prefix="" then we make subsections for each function. if section_prefix="sub" then we make subsubsections for each function. *) @@ -270,7 +270,7 @@ let latex_of_funblock closed section_prefix fb = (** * The header for the table containing the fields of the given class. This * table has an additional column if closed is true. - *) +*) let class_header x closed = if closed then [ @@ -294,7 +294,7 @@ let class_header x closed = ] -let class_footer = +let class_footer = [ "\\hline"; "\\end{longtable}" @@ -303,36 +303,36 @@ let class_footer = let field_table_of_obj newpage x closed = let field_tex = List.concat (List.map (fun x -> of_content (flatten x) closed) x.contents) in - (if newpage then ["\\newpage"] else []) @ - [ - "\\subsection{Fields for class: "^(escape x.name)^"}"; - ] @ - (if x.contents=[] then - ["{\\bf Class "^(escape x.name)^" has no fields.}"] - else - (class_header x closed) @ field_tex @ class_footer) - + (if newpage then ["\\newpage"] else []) @ + [ + "\\subsection{Fields for class: "^(escape x.name)^"}"; + ] @ + (if x.contents=[] then + ["{\\bf Class "^(escape x.name)^" has no fields.}"] + else + (class_header x closed) @ field_tex @ class_footer) + let of_obj x closed = - [ + [ "\\newpage"; "\\section{Class: "^(escape x.name)^"}" ] @ (field_table_of_obj false x closed) @ - [ - "\\subsection{RPCs associated with class: "^(escape x.name)^"}" - ] - @ - (if x.messages=[] then - ["\n\n"; - "{\\bf Class "^(escape x.name)^" has no additional RPCs associated with it.}"] - else - [ - latex_of_funblock closed "sub" x.messages - ]) + [ + "\\subsection{RPCs associated with class: "^(escape x.name)^"}" + ] + @ + (if x.messages=[] then + ["\n\n"; + "{\\bf Class "^(escape x.name)^" has no additional RPCs associated with it.}"] + else + [ + latex_of_funblock closed "sub" x.messages + ]) let error_signature name params = if params = [] then -" + " \\vspace{0.3cm} No parameters." else @@ -350,18 +350,18 @@ let error_doc { err_name=name; err_params=params; err_doc=doc } = \\begin{center}\\rule{10em}{0.1pt}\\end{center} " (escape name) (wrap (escape doc)) (error_signature name params) -let include_file ?(escaped=false) ?(blanklines=false) filename = +let include_file ?(escaped=false) ?(blanklines=false) filename = let ic = open_in filename in - try - while true do - let line = input_line ic in - print_endline (if escaped then escape line else line); - if blanklines then print_endline ""; - done - with End_of_file -> () + try + while true do + let line = input_line ic in + print_endline (if escaped then escape line else line); + if blanklines then print_endline ""; + done + with End_of_file -> () and error_section () = - print_endline "\\newpage + print_endline "\\newpage \\section{Error Handling} When a low-level transport error occurs, or a request is malformed at the HTTP or XML-RPC level, the server may send an XML-RPC Fault response, or the client @@ -431,9 +431,9 @@ Each possible error code is documented in the following section. Hashtbl.fold (fun name err acc -> (name, err) :: acc) Datamodel.errors [] in - List.iter error_doc - (snd (List.split - (List.sort (fun (n1, _) (n2, _)-> compare n1 n2) errs))) + List.iter error_doc + (snd (List.split + (List.sort (fun (n1, _) (n2, _)-> compare n1 n2) errs))) let first_sentence s = List.hd (String.split '.' s) @@ -441,7 +441,7 @@ let first_sentence s = let all api closed = (* Remove private messages that are only used internally (e.g. get_record_internal) *) let api = Dm_api.filter (fun _ -> true) (fun _ -> true) - (fun msg -> match msg.msg_tag with (FromObject (Private _)) -> false | _ -> true) api in + (fun msg -> match msg.msg_tag with (FromObject (Private _)) -> false | _ -> true) api in let system = objects_of_api api and relations = relations_of_api api in let graphfilename = @@ -450,7 +450,7 @@ let all api closed = else "xenapi-datamodel-graph" in - print_endline "% + print_endline "% % Copyright (c) 2006-2007 XenSource, Inc. % % All rights reserved. @@ -463,95 +463,95 @@ let all api closed = "; -(* print_endline "This API Reference is autogenerated from datamodel specification and IDL --- do not hand-edit."; -*) - print_endline "\\section{Classes}"; - print_endline "The following classes are defined:"; - print_endline ""; - print_endline "\\begin{center}\\begin{tabular}{|lp{10cm}|}"; - print_endline "\\hline"; - print_endline "Name & Description \\\\"; - print_endline "\\hline"; - - List.iter (fun obj -> printf "{\\tt %s} & %s \\\\\n" (escape obj.name) (escape (first_sentence obj.description))) system; - - print_endline "\\hline"; - print_endline "\\end{tabular}\\end{center}"; - - print_endline "\\section{Relationships Between Classes}"; - print_endline "Fields that are bound together are shown in the following table: "; - - print_endline "\\begin{center}\\begin{tabular}{|ll|l|}"; - print_endline "\\hline"; - print_endline "{\\em object.field} & {\\em object.field} & {\\em relationship} \\\\\n"; - print_endline "\\hline"; - List.iter (function (((a, a_field), (b, b_field)) as rel) -> - let c = Relations.classify api rel in - printf "%s.%s & %s.%s & %s\\\\\n" - (escape a) (escape a_field) - (escape b) (escape b_field) - (Relations.string_of_classification c) - ) relations; - print_endline "\\hline"; - print_endline "\\end{tabular}\\end{center}"; - - print_endline ""; - - print_endline "The following represents bound fields (as specified above) diagramatically, using crows-foot notation to specify one-to-one, one-to-many or many-to-many + (* print_endline "This API Reference is autogenerated from datamodel specification and IDL --- do not hand-edit."; + *) + print_endline "\\section{Classes}"; + print_endline "The following classes are defined:"; + print_endline ""; + print_endline "\\begin{center}\\begin{tabular}{|lp{10cm}|}"; + print_endline "\\hline"; + print_endline "Name & Description \\\\"; + print_endline "\\hline"; + + List.iter (fun obj -> printf "{\\tt %s} & %s \\\\\n" (escape obj.name) (escape (first_sentence obj.description))) system; + + print_endline "\\hline"; + print_endline "\\end{tabular}\\end{center}"; + + print_endline "\\section{Relationships Between Classes}"; + print_endline "Fields that are bound together are shown in the following table: "; + + print_endline "\\begin{center}\\begin{tabular}{|ll|l|}"; + print_endline "\\hline"; + print_endline "{\\em object.field} & {\\em object.field} & {\\em relationship} \\\\\n"; + print_endline "\\hline"; + List.iter (function (((a, a_field), (b, b_field)) as rel) -> + let c = Relations.classify api rel in + printf "%s.%s & %s.%s & %s\\\\\n" + (escape a) (escape a_field) + (escape b) (escape b_field) + (Relations.string_of_classification c) + ) relations; + print_endline "\\hline"; + print_endline "\\end{tabular}\\end{center}"; + + print_endline ""; + + print_endline "The following represents bound fields (as specified above) diagramatically, using crows-foot notation to specify one-to-one, one-to-many or many-to-many relationships:"; - print_endline ""; - print_endline "\\begin{center}\\resizebox{0.8\\textwidth}{!}{"; - print_endline (sprintf "\\includegraphics{%s}" graphfilename); - print_endline "}\\end{center}"; - - print_endline "\\section{Types}"; - print_endline "\\subsection{Primitives}"; - print_endline "The following primitive types are used to specify methods and fields in the API Reference:"; - print_endline ""; - print_endline "\\begin{center}\\begin{tabular}{|ll|}"; - print_endline "\\hline"; - print_endline "Type & Description \\\\"; - print_endline "\\hline"; - print_endline "string & text strings \\\\"; - print_endline "int & 64-bit integers \\\\"; - print_endline "float & IEEE double-precision floating-point numbers \\\\"; - print_endline "bool & boolean \\\\"; - print_endline "datetime & date and timestamp \\\\"; - print_endline "\\hline"; - print_endline "\\end{tabular}\\end{center}"; - print_endline "\\subsection{Higher-order types}"; - print_endline "The following type constructors are used:"; - print_endline ""; - print_endline "\\begin{center}\\begin{tabular}{|ll|}"; - print_endline "\\hline"; - print_endline "Type & Description \\\\"; - print_endline "\\hline"; - print_endline "$c$ ref & reference to an object of class $c$ \\\\"; - print_endline "$t$ set & a set of elements of type $t$ \\\\"; - print_endline "($a \\rightarrow b$) map & a table mapping values of type $a$ to values of type $b$ \\\\"; - print_endline "\\hline"; - print_endline "\\end{tabular}\\end{center}"; - print_endline "\\subsection{Enumeration types}"; - print_endline "The following enumeration types are used:"; - print_endline ""; - List.iter (function Enum (name, options) -> - List.iter print_endline (of_enum_alias name options); - print_string vgap - | _ -> () ) (Types.of_objects system); - print_endline ""; - if closed then - begin - print_endline "\\section{Class field summary}"; - print_endline ""; - print_endline "This section summarises the fields in each class and their qualifiers. This information is replicated in the detailed class reference later in this document and is aggregated here solely for convenience."; - print_endline ""; - List.iter (fun x -> List.iter print_endline (field_table_of_obj true x closed); - print_string vgap) system; - error_section() - end; - List.iter (fun x -> List.iter print_endline (of_obj x closed); - print_string vgap) system; - if not closed then - begin - error_section() - end + print_endline ""; + print_endline "\\begin{center}\\resizebox{0.8\\textwidth}{!}{"; + print_endline (sprintf "\\includegraphics{%s}" graphfilename); + print_endline "}\\end{center}"; + + print_endline "\\section{Types}"; + print_endline "\\subsection{Primitives}"; + print_endline "The following primitive types are used to specify methods and fields in the API Reference:"; + print_endline ""; + print_endline "\\begin{center}\\begin{tabular}{|ll|}"; + print_endline "\\hline"; + print_endline "Type & Description \\\\"; + print_endline "\\hline"; + print_endline "string & text strings \\\\"; + print_endline "int & 64-bit integers \\\\"; + print_endline "float & IEEE double-precision floating-point numbers \\\\"; + print_endline "bool & boolean \\\\"; + print_endline "datetime & date and timestamp \\\\"; + print_endline "\\hline"; + print_endline "\\end{tabular}\\end{center}"; + print_endline "\\subsection{Higher-order types}"; + print_endline "The following type constructors are used:"; + print_endline ""; + print_endline "\\begin{center}\\begin{tabular}{|ll|}"; + print_endline "\\hline"; + print_endline "Type & Description \\\\"; + print_endline "\\hline"; + print_endline "$c$ ref & reference to an object of class $c$ \\\\"; + print_endline "$t$ set & a set of elements of type $t$ \\\\"; + print_endline "($a \\rightarrow b$) map & a table mapping values of type $a$ to values of type $b$ \\\\"; + print_endline "\\hline"; + print_endline "\\end{tabular}\\end{center}"; + print_endline "\\subsection{Enumeration types}"; + print_endline "The following enumeration types are used:"; + print_endline ""; + List.iter (function Enum (name, options) -> + List.iter print_endline (of_enum_alias name options); + print_string vgap + | _ -> () ) (Types.of_objects system); + print_endline ""; + if closed then + begin + print_endline "\\section{Class field summary}"; + print_endline ""; + print_endline "This section summarises the fields in each class and their qualifiers. This information is replicated in the detailed class reference later in this document and is aggregated here solely for convenience."; + print_endline ""; + List.iter (fun x -> List.iter print_endline (field_table_of_obj true x closed); + print_string vgap) system; + error_section() + end; + List.iter (fun x -> List.iter print_endline (of_obj x closed); + print_string vgap) system; + if not closed then + begin + error_section() + end diff --git a/ocaml/idl/ocaml_backend/context.ml b/ocaml/idl/ocaml_backend/context.ml index e7cb71304c2..7b2a434096d 100644 --- a/ocaml/idl/ocaml_backend/context.ml +++ b/ocaml/idl/ocaml_backend/context.ml @@ -21,56 +21,56 @@ module Dummy = Debug.Make(struct let name = "dummytaskhelper" end) (** Every operation has an origin: either the HTTP connection it came from or an internal subsystem (eg synchroniser thread / event handler - thread) *) -type origin = - | Http of Http.Request.t * Unix.file_descr - | Internal + thread) *) +type origin = + | Http of Http.Request.t * Unix.file_descr + | Internal let string_of_origin = function - | Http (req, fd) -> - let peer = match Unix.getpeername fd with - | Unix.ADDR_UNIX _ -> "Unix domain socket" - | Unix.ADDR_INET _ -> "Internet" in (* unfortunately all connections come from stunnel on localhost *) - Printf.sprintf "HTTP request from %s with User-Agent: %s" peer (default "unknown" req.Http.Request.user_agent) + | Http (req, fd) -> + let peer = match Unix.getpeername fd with + | Unix.ADDR_UNIX _ -> "Unix domain socket" + | Unix.ADDR_INET _ -> "Internet" in (* unfortunately all connections come from stunnel on localhost *) + Printf.sprintf "HTTP request from %s with User-Agent: %s" peer (default "unknown" req.Http.Request.user_agent) | Internal -> "Internal" (** A Context is used to represent every API invocation. It may be extended to include extra data without changing all the autogenerated signatures *) type t = { session_id: API.ref_session option; - task_id: API.ref_task; - task_in_database: bool; - forwarded_task : bool; - origin: origin; - task_name: string; (* Name for dummy task FIXME: used only for dummy task, as real task as their name in the database *) - database: Db_ref.t; - dbg: string; - } + task_id: API.ref_task; + task_in_database: bool; + forwarded_task : bool; + origin: origin; + task_name: string; (* Name for dummy task FIXME: used only for dummy task, as real task as their name in the database *) + database: Db_ref.t; + dbg: string; + } let get_session_id x = match x.session_id with - | None -> failwith "Could not find a session_id" - | Some x -> x + | None -> failwith "Could not find a session_id" + | Some x -> x let forwarded_task ctx = - ctx.forwarded_task - -let get_task_id ctx = + ctx.forwarded_task + +let get_task_id ctx = ctx.task_id let get_task_id_string_name ctx = (Ref.string_of ctx.task_id, ctx.task_name) -let task_in_database ctx = +let task_in_database ctx = ctx.task_in_database let get_task_name ctx = ctx.task_name -let get_origin ctx = +let get_origin ctx = string_of_origin ctx.origin -let string_of x = - let session_id = match x.session_id with +let string_of x = + let session_id = match x.session_id with | None -> "None" | Some x -> Ref.string_of x in Printf.sprintf "Context { session_id: %s; task_id: %s; task_in_database: %b; forwarded_task: %b; origin: %s; task_name: %s }" session_id @@ -85,25 +85,25 @@ let database_of x = x.database (** Calls coming in from the unix socket are pre-authenticated *) let is_unix_socket s = match Unix.getpeername s with - Unix.ADDR_UNIX _ -> true - | Unix.ADDR_INET _ -> false + Unix.ADDR_UNIX _ -> true + | Unix.ADDR_INET _ -> false (** Calls coming directly into xapi on port 80 from remote IPs are unencrypted *) -let is_unencrypted s = +let is_unencrypted s = match Unix.getpeername s with - | Unix.ADDR_UNIX _ -> false - | Unix.ADDR_INET (addr, _) when addr = Unix.inet_addr_loopback -> false - | Unix.ADDR_INET _ -> true + | Unix.ADDR_UNIX _ -> false + | Unix.ADDR_INET (addr, _) when addr = Unix.inet_addr_loopback -> false + | Unix.ADDR_INET _ -> true -let default_database () = - if Pool_role.is_master () - then Db_backend.make () - else Db_ref.Remote +let default_database () = + if Pool_role.is_master () + then Db_backend.make () + else Db_ref.Remote let preauth ~__context = match __context.origin with - Internal -> false - | Http (req,s) -> is_unix_socket s + Internal -> false + | Http (req,s) -> is_unix_socket s let get_initial () = { session_id = None; @@ -112,79 +112,79 @@ let get_initial () = forwarded_task = false; origin = Internal; task_name = "initial_task"; - database = default_database (); - dbg = "initial_task"; + database = default_database (); + dbg = "initial_task"; } (* ref fn used to break the cyclic dependency between context, db_actions and taskhelper *) -let __get_task_name : (__context:t -> API.ref_task -> string) ref = +let __get_task_name : (__context:t -> API.ref_task -> string) ref = ref (fun ~__context t -> "__get_task_name not set") -let __make_task = +let __make_task = ref (fun ~__context ~(http_other_config:(string * string) list) ?(description:string option) ?(session_id:API.ref_session option) ?(subtask_of:API.ref_task option) (task_name:string) -> Ref.null, Uuid.null) -let __destroy_task : (__context:t -> API.ref_task -> unit) ref = - ref (fun ~__context:_ _ -> ()) +let __destroy_task : (__context:t -> API.ref_task -> unit) ref = + ref (fun ~__context:_ _ -> ()) let string_of_task __context = __context.dbg let check_for_foreign_database ~__context = - match __context.session_id with - | Some sid -> - (match Db_backend.get_registered_database sid with - | Some database -> {__context with database = database} - | None -> __context) - | None -> - __context + match __context.session_id with + | Some sid -> + (match Db_backend.get_registered_database sid with + | Some database -> {__context with database = database} + | None -> __context) + | None -> + __context (** destructors *) let destroy __context = let debug = if __context.task_in_database then Real.debug else Dummy.debug in if __context.forwarded_task then debug "forwarded task destroyed"; - if not __context.forwarded_task + if not __context.forwarded_task then !__destroy_task ~__context __context.task_id (* CP-982: create tracking id in log files to link username to actions *) let trackid_of_session ?(with_brackets=false) ?(prefix="") session_id = match session_id with - | None -> "" - | Some session_id -> (* a hash is used instead of printing the sensitive session_id value *) - let trackid = Printf.sprintf "trackid=%s" (Digest.to_hex (Digest.string (Ref.string_of session_id))) in - if (with_brackets) then Printf.sprintf "%s(%s)" prefix trackid else trackid + | None -> "" + | Some session_id -> (* a hash is used instead of printing the sensitive session_id value *) + let trackid = Printf.sprintf "trackid=%s" (Digest.to_hex (Digest.string (Ref.string_of session_id))) in + if (with_brackets) then Printf.sprintf "%s(%s)" prefix trackid else trackid let trackid ?(with_brackets=false) ?(prefix="") __context = (* CP-982: create tracking id in log files to link username to actions *) - trackid_of_session ~with_brackets ~prefix __context.session_id + trackid_of_session ~with_brackets ~prefix __context.session_id let make_dbg http_other_config task_name task_id = - if List.mem_assoc "dbg" http_other_config - then List.assoc "dbg" http_other_config - else Printf.sprintf "%s%s%s" task_name (if task_name = "" then "" else " ") (Ref.really_pretty_and_small task_id) + if List.mem_assoc "dbg" http_other_config + then List.assoc "dbg" http_other_config + else Printf.sprintf "%s%s%s" task_name (if task_name = "" then "" else " ") (Ref.really_pretty_and_small task_id) (** constructors *) let from_forwarded_task ?(__context=get_initial ()) ?(http_other_config=[]) ?session_id ?(origin=Internal) task_id = - let task_name = - if Ref.is_dummy task_id + let task_name = + if Ref.is_dummy task_id then Ref.name_of_dummy task_id - else !__get_task_name ~__context task_id - in - let info = if not (Ref.is_dummy task_id) then Real.info else Dummy.debug in - (* CP-982: promote tracking debug line to info status *) - let dbg = make_dbg http_other_config task_name task_id in - info "task %s forwarded%s" dbg (trackid_of_session ~with_brackets:true ~prefix:" " session_id); - { session_id = session_id; - task_id = task_id; - forwarded_task = true; - task_in_database = not (Ref.is_dummy task_id); - origin = origin; - task_name = task_name; - database = default_database (); - dbg = dbg; - } + else !__get_task_name ~__context task_id + in + let info = if not (Ref.is_dummy task_id) then Real.info else Dummy.debug in + (* CP-982: promote tracking debug line to info status *) + let dbg = make_dbg http_other_config task_name task_id in + info "task %s forwarded%s" dbg (trackid_of_session ~with_brackets:true ~prefix:" " session_id); + { session_id = session_id; + task_id = task_id; + forwarded_task = true; + task_in_database = not (Ref.is_dummy task_id); + origin = origin; + task_name = task_name; + database = default_database (); + dbg = dbg; + } let make ?(__context=get_initial ()) ?(http_other_config=[]) ?(quiet=false) ?subtask_of ?session_id ?(database=default_database ()) ?(task_in_database=false) ?task_description ?(origin=Internal) task_name = let task_id, task_uuid = - if task_in_database + if task_in_database then !__make_task ~__context ~http_other_config ?description:task_description ?session_id ?subtask_of task_name else Ref.make_dummy task_name, Uuid.null in @@ -202,36 +202,36 @@ let make ?(__context=get_initial ()) ?(http_other_config=[]) ?(quiet=false) ?sub task_uuid (trackid_of_session ~with_brackets:true ~prefix:" " session_id) (* CP-982: link each task to original session created during login *) (match subtask_of with - | None -> "" - | Some subtask_of -> " by task " ^ (make_dbg [] "" subtask_of)) - ; - { session_id = session_id; - database = database; - task_id = task_id; - task_in_database = task_in_database; - origin = origin; - forwarded_task = false; - task_name = task_name; - dbg = dbg; - } + | None -> "" + | Some subtask_of -> " by task " ^ (make_dbg [] "" subtask_of)) + ; + { session_id = session_id; + database = database; + task_id = task_id; + task_in_database = task_in_database; + origin = origin; + forwarded_task = false; + task_name = task_name; + dbg = dbg; + } let get_http_other_config http_req = - let http_other_config_hdr = "x-http-other-config-" in - http_req.Http.Request.additional_headers - |> List.filter (fun (k, v) -> String.startswith http_other_config_hdr k) - |> List.map (fun (k, v) -> String.sub k (String.length http_other_config_hdr) (String.length k - (String.length http_other_config_hdr)), v) + let http_other_config_hdr = "x-http-other-config-" in + http_req.Http.Request.additional_headers + |> List.filter (fun (k, v) -> String.startswith http_other_config_hdr k) + |> List.map (fun (k, v) -> String.sub k (String.length http_other_config_hdr) (String.length k - (String.length http_other_config_hdr)), v) (** Called by autogenerated dispatch code *) let of_http_req ?session_id ~generate_task_for ~supports_async ~label ~http_req ~fd = - let http_other_config = get_http_other_config http_req in - match http_req.Http.Request.task with - | Some task_id -> - from_forwarded_task ?session_id ~http_other_config ~origin:(Http(http_req,fd)) (Ref.of_string task_id) - | None -> - if generate_task_for && supports_async - then - let subtask_of = Pervasiveext.may Ref.of_string http_req.Http.Request.subtask_of in - make ?session_id ?subtask_of ~http_other_config ~task_in_database:true ~origin:(Http(http_req,fd)) label - else - make ?session_id ~http_other_config ~origin:(Http(http_req,fd)) label + let http_other_config = get_http_other_config http_req in + match http_req.Http.Request.task with + | Some task_id -> + from_forwarded_task ?session_id ~http_other_config ~origin:(Http(http_req,fd)) (Ref.of_string task_id) + | None -> + if generate_task_for && supports_async + then + let subtask_of = Pervasiveext.may Ref.of_string http_req.Http.Request.subtask_of in + make ?session_id ?subtask_of ~http_other_config ~task_in_database:true ~origin:(Http(http_req,fd)) label + else + make ?session_id ~http_other_config ~origin:(Http(http_req,fd)) label diff --git a/ocaml/idl/ocaml_backend/context.mli b/ocaml/idl/ocaml_backend/context.mli index ce7c67cebf9..5709a7e9137 100644 --- a/ocaml/idl/ocaml_backend/context.mli +++ b/ocaml/idl/ocaml_backend/context.mli @@ -13,7 +13,7 @@ *) (** A Context is used to represent every API invocation. It may be extended to include extra data without changing all the autogenerated signatures *) -type t +type t type origin = | Http of Http.Request.t * Unix.file_descr @@ -21,14 +21,14 @@ type origin = (** {6 Constructors} *) -(** [make ~__context ~subtask_of ~database ~session_id ~task_in_database ~task_description ~origin name] creates a new context. +(** [make ~__context ~subtask_of ~database ~session_id ~task_in_database ~task_description ~origin name] creates a new context. [__context] is the calling context, [http_other_config] are extra bits of context picked up from HTTP headers, - [quiet] silences "task created" log messages, - [subtask_of] is a reference to the parent task, + [quiet] silences "task created" log messages, + [subtask_of] is a reference to the parent task, [session_id] is the current session id, - [database] is the database to use in future Db.* operations - [task_in_database] indicates if the task needs to be stored the task in the database, + [database] is the database to use in future Db.* operations + [task_in_database] indicates if the task needs to be stored the task in the database, [task_descrpition] is the description of the task, [task_name] is the task name of the created context. *) val make : @@ -54,20 +54,20 @@ val from_forwarded_task : ?http_other_config:(string * string) list -> ?session_id:API.ref_session -> ?origin:origin -> API.ref_task -> t - + (** {6 Accessors} *) (** [session_of_t __context] returns the session id stored in [__context]. In case there is no session id in this context, it fails with [Failure "Could not find a session_id"]. *) val get_session_id : t -> API.ref_session -(** [get_task_id __context] returns the task id stored in [__context]. Such a task can be either a task stored in +(** [get_task_id __context] returns the task id stored in [__context]. Such a task can be either a task stored in database or a tempory task (also called dummy). *) val get_task_id : t -> API.ref_task val forwarded_task : t -> bool -val string_of_task : t -> string +val string_of_task : t -> string val get_task_id_string_name : t -> string * string @@ -119,5 +119,5 @@ val __make_task : ?description:string -> ?session_id:API.ref_session -> ?subtask_of:API.ref_task -> string -> API.ref_task * API.ref_task Uuid.t) - ref + ref diff --git a/ocaml/idl/ocaml_backend/event_helper.ml b/ocaml/idl/ocaml_backend/event_helper.ml index db953cdbde8..eece834e9c5 100644 --- a/ocaml/idl/ocaml_backend/event_helper.ml +++ b/ocaml/idl/ocaml_backend/event_helper.ml @@ -12,69 +12,69 @@ * GNU Lesser General Public License for more details. *) -type event_record = - | Session of [`Session ] Ref.t * API.session_t option - | Task of [`task ] Ref.t * API.task_t option - | Event of [`Event] Ref.t * API.event_t option - | VM of [`VM] Ref.t * API.vM_t option - | VM_metrics of [`VM_metrics] Ref.t * API.vM_metrics_t option - | VM_guest_metrics of [`VM_guest_metrics] Ref.t * API.vM_guest_metrics_t option - | Host of [`host] Ref.t * API.host_t option - | Host_metrics of [`host_metrics] Ref.t * API.host_metrics_t option - | Host_cpu of [`host_cpu] Ref.t * API.host_cpu_t option - | Network of [`network] Ref.t * API.network_t option - | VIF of [`VIF] Ref.t * API.vIF_t option - | VIF_metrics of [`VIF_metrics] Ref.t * API.vIF_metrics_t option - | PIF of [`PIF] Ref.t * API.pIF_t option - | PIF_metrics of [`PIF_metrics] Ref.t * API.pIF_metrics_t option - | SR of [`SR] Ref.t * API.sR_t option - | VDI of [`VDI] Ref.t * API.vDI_t option - | VBD of [`VBD] Ref.t * API.vBD_t option - | VBD_metrics of [`VBD_metrics] Ref.t * API.vBD_metrics_t option - | PBD of [`PBD] Ref.t * API.pBD_t option - | Crashdump of [`Crashdump] Ref.t * API.crashdump_t option - | VTPM of [`VTPM] Ref.t * API.vTPM_t option - | Console of [`Console] Ref.t * API.console_t option - | User of [`User] Ref.t * API.user_t option - | Pool of [`pool] Ref.t * API.pool_t option - | Message of [`message] Ref.t * API.message_t option - | Secret of [`secret] Ref.t * API.secret_t option - | VMPP of [`VMPP] Ref.t * API.vMPP_t option +type event_record = + | Session of [`Session ] Ref.t * API.session_t option + | Task of [`task ] Ref.t * API.task_t option + | Event of [`Event] Ref.t * API.event_t option + | VM of [`VM] Ref.t * API.vM_t option + | VM_metrics of [`VM_metrics] Ref.t * API.vM_metrics_t option + | VM_guest_metrics of [`VM_guest_metrics] Ref.t * API.vM_guest_metrics_t option + | Host of [`host] Ref.t * API.host_t option + | Host_metrics of [`host_metrics] Ref.t * API.host_metrics_t option + | Host_cpu of [`host_cpu] Ref.t * API.host_cpu_t option + | Network of [`network] Ref.t * API.network_t option + | VIF of [`VIF] Ref.t * API.vIF_t option + | VIF_metrics of [`VIF_metrics] Ref.t * API.vIF_metrics_t option + | PIF of [`PIF] Ref.t * API.pIF_t option + | PIF_metrics of [`PIF_metrics] Ref.t * API.pIF_metrics_t option + | SR of [`SR] Ref.t * API.sR_t option + | VDI of [`VDI] Ref.t * API.vDI_t option + | VBD of [`VBD] Ref.t * API.vBD_t option + | VBD_metrics of [`VBD_metrics] Ref.t * API.vBD_metrics_t option + | PBD of [`PBD] Ref.t * API.pBD_t option + | Crashdump of [`Crashdump] Ref.t * API.crashdump_t option + | VTPM of [`VTPM] Ref.t * API.vTPM_t option + | Console of [`Console] Ref.t * API.console_t option + | User of [`User] Ref.t * API.user_t option + | Pool of [`pool] Ref.t * API.pool_t option + | Message of [`message] Ref.t * API.message_t option + | Secret of [`secret] Ref.t * API.secret_t option + | VMPP of [`VMPP] Ref.t * API.vMPP_t option let maybe f x = - match x with - | Some x -> Some (f x) - | None -> None - + match x with + | Some x -> Some (f x) + | None -> None + let record_of_event ev = - let rpc = ev.Event_types.snapshot in - match ev.Event_types.ty with - | "session" -> Session (Ref.of_string ev.Event_types.reference, maybe (API.session_t_of_rpc) rpc) - | "task" -> Task (Ref.of_string ev.Event_types.reference, maybe (API.task_t_of_rpc) rpc) - | "event" -> Event (Ref.of_string ev.Event_types.reference, maybe (API.event_t_of_rpc) rpc) - | "vm" -> VM (Ref.of_string ev.Event_types.reference, maybe (API.vM_t_of_rpc) rpc) - | "vm_metrics" -> VM_metrics (Ref.of_string ev.Event_types.reference, maybe (API.vM_metrics_t_of_rpc) rpc) - | "vm_guest_metrics" -> VM_guest_metrics (Ref.of_string ev.Event_types.reference, maybe (API.vM_guest_metrics_t_of_rpc) rpc) - | "host" -> Host (Ref.of_string ev.Event_types.reference, maybe (API.host_t_of_rpc) rpc) - | "host_metrics" -> Host_metrics (Ref.of_string ev.Event_types.reference, maybe (API.host_metrics_t_of_rpc) rpc) - | "host_cpu" -> Host_cpu (Ref.of_string ev.Event_types.reference, maybe (API.host_cpu_t_of_rpc) rpc) - | "network" -> Network (Ref.of_string ev.Event_types.reference, maybe (API.network_t_of_rpc) rpc) - | "vif" -> VIF (Ref.of_string ev.Event_types.reference, maybe (API.vIF_t_of_rpc) rpc) - | "vif_metrics" -> VIF_metrics (Ref.of_string ev.Event_types.reference, maybe (API.vIF_metrics_t_of_rpc) rpc) - | "pif" -> PIF (Ref.of_string ev.Event_types.reference, maybe (API.pIF_t_of_rpc) rpc) - | "pif_metrics" -> PIF_metrics (Ref.of_string ev.Event_types.reference, maybe (API.pIF_metrics_t_of_rpc) rpc) - | "sr" -> SR (Ref.of_string ev.Event_types.reference, maybe (API.sR_t_of_rpc) rpc) - | "vdi" -> VDI (Ref.of_string ev.Event_types.reference, maybe (API.vDI_t_of_rpc) rpc) - | "vbd" -> VBD (Ref.of_string ev.Event_types.reference, maybe (API.vBD_t_of_rpc) rpc) - | "vbd_metrics" -> VBD_metrics (Ref.of_string ev.Event_types.reference, maybe (API.vBD_metrics_t_of_rpc) rpc) - | "pbd" -> PBD (Ref.of_string ev.Event_types.reference, maybe (API.pBD_t_of_rpc) rpc) - | "crashdump" -> Crashdump (Ref.of_string ev.Event_types.reference, maybe (API.crashdump_t_of_rpc) rpc) - | "vtpm" -> VTPM (Ref.of_string ev.Event_types.reference, maybe (API.vTPM_t_of_rpc) rpc) - | "console" -> Console (Ref.of_string ev.Event_types.reference, maybe (API.console_t_of_rpc) rpc) - | "user" -> User (Ref.of_string ev.Event_types.reference, maybe (API.user_t_of_rpc) rpc) - | "pool" -> Pool (Ref.of_string ev.Event_types.reference, maybe (API.pool_t_of_rpc) rpc) - | "message" -> Message (Ref.of_string ev.Event_types.reference, maybe (API.message_t_of_rpc) rpc) - | "secret" -> Secret (Ref.of_string ev.Event_types.reference, maybe (API.secret_t_of_rpc) rpc) - | "vmpp" -> VMPP (Ref.of_string ev.Event_types.reference, maybe (API.vMPP_t_of_rpc) rpc) - | _ -> failwith "unknown event type" - + let rpc = ev.Event_types.snapshot in + match ev.Event_types.ty with + | "session" -> Session (Ref.of_string ev.Event_types.reference, maybe (API.session_t_of_rpc) rpc) + | "task" -> Task (Ref.of_string ev.Event_types.reference, maybe (API.task_t_of_rpc) rpc) + | "event" -> Event (Ref.of_string ev.Event_types.reference, maybe (API.event_t_of_rpc) rpc) + | "vm" -> VM (Ref.of_string ev.Event_types.reference, maybe (API.vM_t_of_rpc) rpc) + | "vm_metrics" -> VM_metrics (Ref.of_string ev.Event_types.reference, maybe (API.vM_metrics_t_of_rpc) rpc) + | "vm_guest_metrics" -> VM_guest_metrics (Ref.of_string ev.Event_types.reference, maybe (API.vM_guest_metrics_t_of_rpc) rpc) + | "host" -> Host (Ref.of_string ev.Event_types.reference, maybe (API.host_t_of_rpc) rpc) + | "host_metrics" -> Host_metrics (Ref.of_string ev.Event_types.reference, maybe (API.host_metrics_t_of_rpc) rpc) + | "host_cpu" -> Host_cpu (Ref.of_string ev.Event_types.reference, maybe (API.host_cpu_t_of_rpc) rpc) + | "network" -> Network (Ref.of_string ev.Event_types.reference, maybe (API.network_t_of_rpc) rpc) + | "vif" -> VIF (Ref.of_string ev.Event_types.reference, maybe (API.vIF_t_of_rpc) rpc) + | "vif_metrics" -> VIF_metrics (Ref.of_string ev.Event_types.reference, maybe (API.vIF_metrics_t_of_rpc) rpc) + | "pif" -> PIF (Ref.of_string ev.Event_types.reference, maybe (API.pIF_t_of_rpc) rpc) + | "pif_metrics" -> PIF_metrics (Ref.of_string ev.Event_types.reference, maybe (API.pIF_metrics_t_of_rpc) rpc) + | "sr" -> SR (Ref.of_string ev.Event_types.reference, maybe (API.sR_t_of_rpc) rpc) + | "vdi" -> VDI (Ref.of_string ev.Event_types.reference, maybe (API.vDI_t_of_rpc) rpc) + | "vbd" -> VBD (Ref.of_string ev.Event_types.reference, maybe (API.vBD_t_of_rpc) rpc) + | "vbd_metrics" -> VBD_metrics (Ref.of_string ev.Event_types.reference, maybe (API.vBD_metrics_t_of_rpc) rpc) + | "pbd" -> PBD (Ref.of_string ev.Event_types.reference, maybe (API.pBD_t_of_rpc) rpc) + | "crashdump" -> Crashdump (Ref.of_string ev.Event_types.reference, maybe (API.crashdump_t_of_rpc) rpc) + | "vtpm" -> VTPM (Ref.of_string ev.Event_types.reference, maybe (API.vTPM_t_of_rpc) rpc) + | "console" -> Console (Ref.of_string ev.Event_types.reference, maybe (API.console_t_of_rpc) rpc) + | "user" -> User (Ref.of_string ev.Event_types.reference, maybe (API.user_t_of_rpc) rpc) + | "pool" -> Pool (Ref.of_string ev.Event_types.reference, maybe (API.pool_t_of_rpc) rpc) + | "message" -> Message (Ref.of_string ev.Event_types.reference, maybe (API.message_t_of_rpc) rpc) + | "secret" -> Secret (Ref.of_string ev.Event_types.reference, maybe (API.secret_t_of_rpc) rpc) + | "vmpp" -> VMPP (Ref.of_string ev.Event_types.reference, maybe (API.vMPP_t_of_rpc) rpc) + | _ -> failwith "unknown event type" + diff --git a/ocaml/idl/ocaml_backend/event_types.ml b/ocaml/idl/ocaml_backend/event_types.ml index 7e2b88646d7..3dc89f5537e 100644 --- a/ocaml/idl/ocaml_backend/event_types.ml +++ b/ocaml/idl/ocaml_backend/event_types.ml @@ -14,75 +14,75 @@ open Stdext.Xstringext (** Types used to store events: *****************************************************************) -type op = API.event_operation +type op = API.event_operation let rpc_of_op = API.rpc_of_event_operation let op_of_rpc = API.event_operation_of_rpc type event = { - id: string; - ts: string; - ty: string; - op: op; - reference: string; - snapshot: Rpc.t option; + id: string; + ts: string; + ty: string; + op: op; + reference: string; + snapshot: Rpc.t option; } with rpc let ev_struct_remap = [ - "id","id"; - "ts","timestamp"; - "ty","class"; - "op","operation"; - "reference","ref"; - "snapshot","snapshot" + "id","id"; + "ts","timestamp"; + "ty","class"; + "op","operation"; + "reference","ref"; + "snapshot","snapshot" ] let remap map str = - match str with - | Rpc.Dict d -> - Rpc.Dict (List.map (fun (k,v) -> (List.assoc k map, v)) d) - | _ -> str + match str with + | Rpc.Dict d -> + Rpc.Dict (List.map (fun (k,v) -> (List.assoc k map, v)) d) + | _ -> str let rpc_of_event ev = - remap ev_struct_remap (rpc_of_event ev) + remap ev_struct_remap (rpc_of_event ev) let event_of_rpc rpc = - event_of_rpc (remap (List.map (fun (k,v) -> (v,k)) ev_struct_remap) rpc) + event_of_rpc (remap (List.map (fun (k,v) -> (v,k)) ev_struct_remap) rpc) type events = event list with rpc type token = string with rpc type event_from = { - events: event list; - valid_ref_counts: (string * int32) list; - token: token; + events: event list; + valid_ref_counts: (string * int32) list; + token: token; } with rpc let rec rpc_of_event_from e = - Rpc.Dict - [ ("events", - (Rpc.Enum (List.map rpc_of_event e.events))); - ("valid_ref_counts", - (let dict = - List.map - (fun (key, count) -> - (key, (Rpc.Int32 count))) - e.valid_ref_counts - in Rpc.Dict dict)); - ("token", (rpc_of_token e.token)) ] + Rpc.Dict + [ ("events", + (Rpc.Enum (List.map rpc_of_event e.events))); + ("valid_ref_counts", + (let dict = + List.map + (fun (key, count) -> + (key, (Rpc.Int32 count))) + e.valid_ref_counts + in Rpc.Dict dict)); + ("token", (rpc_of_token e.token)) ] (** Return result of an events.from call *) open Printf -let string_of_op = function `add -> "add" | `_mod -> "mod" | `del -> "del" +let string_of_op = function `add -> "add" | `_mod -> "mod" | `del -> "del" let op_of_string x = match String.lowercase x with | "add" -> `add | "mod" -> `_mod | "del" -> `del | x -> failwith (sprintf "Unknown operation type: %s" x) let string_of_event ev = sprintf "%s %s %s %s %s" ev.id ev.ty (string_of_op ev.op) ev.reference - (if ev.snapshot = None then "(no snapshot)" else "OK") + (if ev.snapshot = None then "(no snapshot)" else "OK") + - diff --git a/ocaml/idl/ocaml_backend/exnHelper.ml b/ocaml/idl/ocaml_backend/exnHelper.ml index b639961c325..6173bc67d81 100644 --- a/ocaml/idl/ocaml_backend/exnHelper.ml +++ b/ocaml/idl/ocaml_backend/exnHelper.ml @@ -23,45 +23,45 @@ module D = Debug.Make(struct let name="backtrace" end) open D let error_of_exn e = - match e with - | Stunnel.Stunnel_error msg -> - internal_error, [ "Connection failed: " ^ (String.lowercase msg) ^ "." ] - | Db_exn.DBCache_NotFound ("missing reference", tblname, reference) -> - (* whenever a reference has been destroyed *) - handle_invalid, [tblname; reference ] - | Db_exn.Too_many_values(tbl, objref, uuid) -> - (* Very bad: database has duplicate references or UUIDs *) - internal_error, [ sprintf "duplicate objects in database: tbl='%s'; object_ref='%s'; uuid='%s'" tbl objref uuid ] - | Db_exn.DBCache_NotFound (reason,p1,p2) -> - begin - match reason with - "missing row" -> handle_invalid, [p1; p2] - | s -> internal_error, [reason; p1; p2] - end - | Db_exn.Duplicate_key (tbl,fld,uuid,key) -> - map_duplicate_key, [ tbl; fld; uuid; key ] - | Db_exn.Read_missing_uuid (tbl,ref,uuid) -> - uuid_invalid, [ tbl; uuid ] - | Db_actions.DM_to_String.StringEnumTypeError s - | Db_actions.DM_to_String.DateTimeError s - | Db_actions.String_to_DM.StringEnumTypeError s -> - invalid_value, [ s ] + match e with + | Stunnel.Stunnel_error msg -> + internal_error, [ "Connection failed: " ^ (String.lowercase msg) ^ "." ] + | Db_exn.DBCache_NotFound ("missing reference", tblname, reference) -> + (* whenever a reference has been destroyed *) + handle_invalid, [tblname; reference ] + | Db_exn.Too_many_values(tbl, objref, uuid) -> + (* Very bad: database has duplicate references or UUIDs *) + internal_error, [ sprintf "duplicate objects in database: tbl='%s'; object_ref='%s'; uuid='%s'" tbl objref uuid ] + | Db_exn.DBCache_NotFound (reason,p1,p2) -> + begin + match reason with + "missing row" -> handle_invalid, [p1; p2] + | s -> internal_error, [reason; p1; p2] + end + | Db_exn.Duplicate_key (tbl,fld,uuid,key) -> + map_duplicate_key, [ tbl; fld; uuid; key ] + | Db_exn.Read_missing_uuid (tbl,ref,uuid) -> + uuid_invalid, [ tbl; uuid ] + | Db_actions.DM_to_String.StringEnumTypeError s + | Db_actions.DM_to_String.DateTimeError s + | Db_actions.String_to_DM.StringEnumTypeError s -> + invalid_value, [ s ] - (* These are the two catch-all patterns. If ever an Errors.Server_error exception *) - (* is raised, this is assumed to be an API error, and passed straight on. Any other *) - (* exception at this point is regarded as an 'internal error', and returned as such *) + (* These are the two catch-all patterns. If ever an Errors.Server_error exception *) + (* is raised, this is assumed to be an API error, and passed straight on. Any other *) + (* exception at this point is regarded as an 'internal error', and returned as such *) - | Api_errors.Server_error (e,l) -> - e,l - | Forkhelpers.Spawn_internal_error(stderr, stdout, Unix.WEXITED n) -> - internal_error, [ Printf.sprintf "Subprocess exited with unexpected code %d; stdout = [ %s ]; stderr = [ %s ]" n stdout stderr ] - | Invalid_argument x -> - internal_error, [ Printf.sprintf "Invalid argument: %s" x ] - | Db_filter.Expression_error (expr, exc) -> - invalid_value, [ expr; (Printexc.to_string exc)] - | e -> - internal_error, [ Printexc.to_string e ] + | Api_errors.Server_error (e,l) -> + e,l + | Forkhelpers.Spawn_internal_error(stderr, stdout, Unix.WEXITED n) -> + internal_error, [ Printf.sprintf "Subprocess exited with unexpected code %d; stdout = [ %s ]; stderr = [ %s ]" n stdout stderr ] + | Invalid_argument x -> + internal_error, [ Printf.sprintf "Invalid argument: %s" x ] + | Db_filter.Expression_error (expr, exc) -> + invalid_value, [ expr; (Printexc.to_string exc)] + | e -> + internal_error, [ Printexc.to_string e ] let string_of_exn exn = - let e, l = error_of_exn exn in - Printf.sprintf "%s: [ %s ]" e (String.concat "; " l) + let e, l = error_of_exn exn in + Printf.sprintf "%s: [ %s ]" e (String.concat "; " l) diff --git a/ocaml/idl/ocaml_backend/genOCaml.ml b/ocaml/idl/ocaml_backend/genOCaml.ml index aebba7fcc9d..2d46a08144a 100644 --- a/ocaml/idl/ocaml_backend/genOCaml.ml +++ b/ocaml/idl/ocaml_backend/genOCaml.ml @@ -28,8 +28,8 @@ let ( @-- ) a b = a @ List.map (( ^ ) " ") b (** Generate a block with an indented, space-separated middle. *) let block head middle tail = (head @- - List.flatten (List.between [""] middle)) @ - tail + List.flatten (List.between [""] middle)) @ + tail let gen_type ty accu = match ty with | String | Int | Float | Bool -> accu @@ -42,21 +42,21 @@ let ty_to_xmlrpc api ty = | Bool -> "To.boolean" | DateTime -> "To.datetime" | Enum(_, cs) -> - let aux (c, _) = constructor_of c^" -> \""^c^"\"" in - " fun v -> To.string(match v with\n "^indent^ - String.concat ("\n"^indent^"| ") (List.map aux cs)^")" + let aux (c, _) = constructor_of c^" -> \""^c^"\"" in + " fun v -> To.string(match v with\n "^indent^ + String.concat ("\n"^indent^"| ") (List.map aux cs)^")" | Float -> "To.double" | Int -> "fun n -> To.string(Int64.to_string n)" | Map(key, value) -> - let kf = begin match key with - | Ref x -> "tostring_reference" - | Enum (name, cs) -> - let aux (c, _) = Printf.sprintf "%s -> \"%s\"" (constructor_of c) (String.lowercase c) in - " function " ^ (String.concat ("\n" ^ indent ^ "| ") (List.map aux cs)) - | key -> "ToString." ^ (alias_of_ty key) - end in - let vf = alias_of_ty value in - "fun m -> map ("^kf^") ("^vf^") m" + let kf = begin match key with + | Ref x -> "tostring_reference" + | Enum (name, cs) -> + let aux (c, _) = Printf.sprintf "%s -> \"%s\"" (constructor_of c) (String.lowercase c) in + " function " ^ (String.concat ("\n" ^ indent ^ "| ") (List.map aux cs)) + | key -> "ToString." ^ (alias_of_ty key) + end in + let vf = alias_of_ty value in + "fun m -> map ("^kf^") ("^vf^") m" | Ref _ -> "fun r -> To.string (Ref.string_of r)" (* | Ref "session" -> "fun uuid -> To.string(Uuid.string_of_cookie uuid)" @@ -65,39 +65,39 @@ let ty_to_xmlrpc api ty = | Set ty -> "fun s -> set "^alias_of_ty ty^" s" | String -> "To.string" | Record x -> - let fields = DU.fields_of_obj (Dm_api.get_obj_by_name api ~objname:x) in - let kvs = List.map - (fun fld -> - alias_of_ty fld.ty ^ " x." ^ - (OU.ocaml_of_record_field (x::fld.full_name)), - String.concat "_" fld.full_name) fields in - let kvs = List.map (fun (record, v) -> "\"" ^ v ^ "\", " ^ record) kvs in - "fun x -> To.structure [ " ^ (String.concat "; " kvs) ^ " ]" - in + let fields = DU.fields_of_obj (Dm_api.get_obj_by_name api ~objname:x) in + let kvs = List.map + (fun fld -> + alias_of_ty fld.ty ^ " x." ^ + (OU.ocaml_of_record_field (x::fld.full_name)), + String.concat "_" fld.full_name) fields in + let kvs = List.map (fun (record, v) -> "\"" ^ v ^ "\", " ^ record) kvs in + "fun x -> To.structure [ " ^ (String.concat "; " kvs) ^ " ]" + in ["and "^alias_of_ty ty^" : "^alias_of_ty ty^" -> xml ="; " "^f] (** Generate a module of datamodel type to XML-RPC marshalling functions. *) let gen_to_xmlrpc api tys = block - ["module To = struct"] - ([["open Xml"]; + ["module To = struct"] + ([["open Xml"]; - ["let methodCall = To.methodCall"]; - ["let methodResponse f x = To.methodResponse (f x)"; ]; - ["let tostring_reference = Ref.string_of"]; - ["let set f l ="; - " To.array (List.map f l)"]; - ["let map fk fv m ="; - " let elements = List.map (fun (k, v) -> fk k, fv v) m in"; - " XMLRPC.To.structure elements"; + ["let methodCall = To.methodCall"]; + ["let methodResponse f x = To.methodResponse (f x)"; ]; + ["let tostring_reference = Ref.string_of"]; + ["let set f l ="; + " To.array (List.map f l)"]; + ["let map fk fv m ="; + " let elements = List.map (fun (k, v) -> fk k, fv v) m in"; + " XMLRPC.To.structure elements"; (* " set (fun (k, v) -> XMLRPC.To.structure [\"key\", fk k; \"value\", fv v]) m" *) -]; - ["let structure = To.structure"]; - ["let rec unused' = ()"]] @ + ]; + ["let structure = To.structure"]; + ["let rec unused' = ()"]] @ (List.map (ty_to_xmlrpc api) tys)) - ["end"] + ["end"] (** Generate code to marshal from the given datamodel type to XML-RPC. *) let ty_of_xmlrpc api ty = @@ -107,26 +107,26 @@ let ty_of_xmlrpc api ty = | Bool -> wrap "xml" "From.boolean xml" | DateTime -> wrap "xml" "From.datetime xml" | Enum(name, cs) -> - let aux (c, _) = "\""^(String.lowercase c)^"\" -> "^constructor_of c in - wrap "xml" - ("\n match String.lowercase (From.string xml) with\n "^ - String.concat "\n | " (List.map aux cs)^ - "\n | _ -> log_backtrace(); raise (RunTimeTypeError(\""^name^"\", xml))") + let aux (c, _) = "\""^(String.lowercase c)^"\" -> "^constructor_of c in + wrap "xml" + ("\n match String.lowercase (From.string xml) with\n "^ + String.concat "\n | " (List.map aux cs)^ + "\n | _ -> log_backtrace(); raise (RunTimeTypeError(\""^name^"\", xml))") | Float -> wrap "xml" "From.double xml" | Int -> wrap "xml" "Int64.of_string(From.string xml)" | Map(key, value) -> - let kf = begin match key with - | Ref x -> "fromstring_reference" - | Enum (name, cs) -> - let aux (c, _) = "\""^(String.lowercase c)^"\" -> "^constructor_of c in - wrap "txt" - ("\n match String.lowercase txt with\n "^ - String.concat "\n | " (List.map aux cs)^ - "\n | _ -> raise (RunTimeTypeError(\""^name^"\", Xml.parse_string txt))") - | key -> "FromString." ^ (alias_of_ty key) - end in - let vf = alias_of_ty_param value in - wrap "xml" ("map ("^kf^") ("^vf^") xml") + let kf = begin match key with + | Ref x -> "fromstring_reference" + | Enum (name, cs) -> + let aux (c, _) = "\""^(String.lowercase c)^"\" -> "^constructor_of c in + wrap "txt" + ("\n match String.lowercase txt with\n "^ + String.concat "\n | " (List.map aux cs)^ + "\n | _ -> raise (RunTimeTypeError(\""^name^"\", Xml.parse_string txt))") + | key -> "FromString." ^ (alias_of_ty key) + end in + let vf = alias_of_ty_param value in + wrap "xml" ("map ("^kf^") ("^vf^") xml") | Ref _ -> wrap "xml" "Ref.of_string (From.string xml)" (* | Ref "session" -> "fun uuid -> Uuid.cookie_of_string(From.string uuid)" @@ -135,52 +135,52 @@ let ty_of_xmlrpc api ty = | Set ty -> wrap "xml" ("set "^alias_of_ty_param ty^" xml") | String -> wrap "xml" "From.string xml" | Record x -> - let fields = DU.fields_of_obj (Dm_api.get_obj_by_name api ~objname:x) in - let fields = - List.map (fun fld -> - (OU.ocaml_of_record_field (x::fld.full_name)) ^ " = " ^ - (alias_of_ty_param fld.ty) ^ - ( - (* generate code to insert default value if none in xml structure *) - let field_name = String.concat "_" fld.full_name in - let default_value = - match fld.DT.ty with - DT.Set (DT.Ref _) -> Some (DT.VSet []) - | _ -> fld.DT.default_value in - match default_value with - None -> "(my_assoc \"" ^ field_name ^ "\" all)" - | Some default -> - Printf.sprintf "(if (List.mem_assoc \"%s\" all) then (my_assoc \"%s\" all) else %s)" - field_name field_name - ("Xml.parse_string (\""^(Xml.to_string (Datamodel_values.to_xml default))^"\")") - )) - fields in - let fields = if fields = [] then [ "__unused=()" ] else fields in - wrap "xml" ("let all = From.structure xml in { " ^ - (String.concat ";\n " fields) ^ " }") in + let fields = DU.fields_of_obj (Dm_api.get_obj_by_name api ~objname:x) in + let fields = + List.map (fun fld -> + (OU.ocaml_of_record_field (x::fld.full_name)) ^ " = " ^ + (alias_of_ty_param fld.ty) ^ + ( + (* generate code to insert default value if none in xml structure *) + let field_name = String.concat "_" fld.full_name in + let default_value = + match fld.DT.ty with + DT.Set (DT.Ref _) -> Some (DT.VSet []) + | _ -> fld.DT.default_value in + match default_value with + None -> "(my_assoc \"" ^ field_name ^ "\" all)" + | Some default -> + Printf.sprintf "(if (List.mem_assoc \"%s\" all) then (my_assoc \"%s\" all) else %s)" + field_name field_name + ("Xml.parse_string (\""^(Xml.to_string (Datamodel_values.to_xml default))^"\")") + )) + fields in + let fields = if fields = [] then [ "__unused=()" ] else fields in + wrap "xml" ("let all = From.structure xml in { " ^ + (String.concat ";\n " fields) ^ " }") in let f = "fun param -> ("^f^")" in ["and "^alias_of_ty ty^" : string -> xml -> "^alias_of_ty ty^" ="; " "^f] (** Generate a module of datamodel type to XML-RPC marshalling functions. *) let gen_of_xmlrpc api tys = block - ["module From = struct"] - ([["open Xml"]; - ["exception Dispatcher_FieldNotFound of string"]; - ["let my_assoc fld assoc_list = try List.assoc fld assoc_list with Not_found -> raise (Dispatcher_FieldNotFound fld)"]; - ["let fromstring_reference = Ref.of_string"]; - ["let methodCall = From.methodCall"]; - ["let methodResponse = From.methodResponse"]; - ["let set f (xml: XMLRPC.xmlrpc) ="; - " From.array f xml"]; - ["let map fk fv (xml: XMLRPC.xmlrpc) ="; - " List.map (fun (k, v) -> fk k, fv v) (From.structure xml)" + ["module From = struct"] + ([["open Xml"]; + ["exception Dispatcher_FieldNotFound of string"]; + ["let my_assoc fld assoc_list = try List.assoc fld assoc_list with Not_found -> raise (Dispatcher_FieldNotFound fld)"]; + ["let fromstring_reference = Ref.of_string"]; + ["let methodCall = From.methodCall"]; + ["let methodResponse = From.methodResponse"]; + ["let set f (xml: XMLRPC.xmlrpc) ="; + " From.array f xml"]; + ["let map fk fv (xml: XMLRPC.xmlrpc) ="; + " List.map (fun (k, v) -> fk k, fv v) (From.structure xml)" (* " let f m = fk (List.assoc \"key\" m), fv (List.assoc \"value\" m) in"; " set (fun b -> f (From.structure b)) xml" *) - ]; - ["let structure = From.structure"]; - ["let rec unused' = ()"]] @ + ]; + ["let structure = From.structure"]; + ["let rec unused' = ()"]] @ (List.map (ty_of_xmlrpc api) tys)) - ["end"] + ["end"] diff --git a/ocaml/idl/ocaml_backend/gen_api.ml b/ocaml/idl/ocaml_backend/gen_api.ml index 8b17578caa7..a8aae329a7e 100644 --- a/ocaml/idl/ocaml_backend/gen_api.ml +++ b/ocaml/idl/ocaml_backend/gen_api.ml @@ -23,164 +23,164 @@ module O = Ocaml_syntax let print s = output_string stdout (s^"\n") let overrides = [ - "vm_operations_to_string_map",( - "let rpc_of_vm_operations_to_string_map x = Rpc.Dict (List.map (fun (x,y) -> (match rpc_of_vm_operations x with Rpc.String x -> x | _ -> failwith \"Marshalling error\"), Rpc.String y) x)\n" ^ - "let vm_operations_to_string_map_of_rpc x = match x with Rpc.Dict l -> List.map (function (x,y) -> vm_operations_of_rpc (Rpc.String x), string_of_rpc y) l | _ -> failwith \"Unmarshalling error\"\n"); - "bond_mode",( - "let rpc_of_bond_mode x = match x with `balanceslb -> Rpc.String \"balance-slb\" | `activebackup -> Rpc.String \"active-backup\" | `lacp -> Rpc.String \"lacp\"\n"^ - "let bond_mode_of_rpc x = match x with Rpc.String \"balance-slb\" -> `balanceslb | Rpc.String \"active-backup\" -> `activebackup | Rpc.String \"lacp\" -> `lacp | _ -> failwith \"Unmarshalling error in bond-mode\"\n"); - "int64_to_float_map",( - "let rpc_of_int64_to_float_map x = Rpc.Dict (List.map (fun (x,y) -> Int64.to_string x, Rpc.Float y) x)\n" ^ - "let int64_to_float_map_of_rpc x = match x with Rpc.Dict x -> List.map (fun (x,y) -> Int64.of_string x, float_of_rpc y) x | _ -> failwith \"Unmarshalling error\""); - "int64_to_int64_map",( - "let rpc_of_int64_to_int64_map x = Rpc.Dict (List.map (fun (x,y) -> Int64.to_string x, Rpc.Int y) x)\n" ^ - "let int64_to_int64_map_of_rpc x = match x with Rpc.Dict x -> List.map (fun (x,y) -> Int64.of_string x, int64_of_rpc y) x | _ -> failwith \"Unmarshalling error\""); - "int64_to_string_set_map",( - "let rpc_of_int64_to_string_set_map x = Rpc.Dict (List.map (fun (x,y) -> Int64.to_string x, rpc_of_string_set y) x)\n" ^ - "let int64_to_string_set_map_of_rpc x = match x with Rpc.Dict x -> List.map (fun (x,y) -> Int64.of_string x, string_set_of_rpc y) x | _ -> failwith \"Unmarshalling error\""); - "event_operation",( - "let rpc_of_event_operation x = match x with | `add -> Rpc.String \"add\" | `del -> Rpc.String \"del\" | `_mod -> Rpc.String \"mod\"\n"^ - "let event_operation_of_rpc x = match x with | Rpc.String \"add\" -> `add | Rpc.String \"del\" -> `del | Rpc.String \"mod\" -> `_mod | _ -> failwith \"Unmarshalling error\""); - + "vm_operations_to_string_map",( + "let rpc_of_vm_operations_to_string_map x = Rpc.Dict (List.map (fun (x,y) -> (match rpc_of_vm_operations x with Rpc.String x -> x | _ -> failwith \"Marshalling error\"), Rpc.String y) x)\n" ^ + "let vm_operations_to_string_map_of_rpc x = match x with Rpc.Dict l -> List.map (function (x,y) -> vm_operations_of_rpc (Rpc.String x), string_of_rpc y) l | _ -> failwith \"Unmarshalling error\"\n"); + "bond_mode",( + "let rpc_of_bond_mode x = match x with `balanceslb -> Rpc.String \"balance-slb\" | `activebackup -> Rpc.String \"active-backup\" | `lacp -> Rpc.String \"lacp\"\n"^ + "let bond_mode_of_rpc x = match x with Rpc.String \"balance-slb\" -> `balanceslb | Rpc.String \"active-backup\" -> `activebackup | Rpc.String \"lacp\" -> `lacp | _ -> failwith \"Unmarshalling error in bond-mode\"\n"); + "int64_to_float_map",( + "let rpc_of_int64_to_float_map x = Rpc.Dict (List.map (fun (x,y) -> Int64.to_string x, Rpc.Float y) x)\n" ^ + "let int64_to_float_map_of_rpc x = match x with Rpc.Dict x -> List.map (fun (x,y) -> Int64.of_string x, float_of_rpc y) x | _ -> failwith \"Unmarshalling error\""); + "int64_to_int64_map",( + "let rpc_of_int64_to_int64_map x = Rpc.Dict (List.map (fun (x,y) -> Int64.to_string x, Rpc.Int y) x)\n" ^ + "let int64_to_int64_map_of_rpc x = match x with Rpc.Dict x -> List.map (fun (x,y) -> Int64.of_string x, int64_of_rpc y) x | _ -> failwith \"Unmarshalling error\""); + "int64_to_string_set_map",( + "let rpc_of_int64_to_string_set_map x = Rpc.Dict (List.map (fun (x,y) -> Int64.to_string x, rpc_of_string_set y) x)\n" ^ + "let int64_to_string_set_map_of_rpc x = match x with Rpc.Dict x -> List.map (fun (x,y) -> Int64.of_string x, string_set_of_rpc y) x | _ -> failwith \"Unmarshalling error\""); + "event_operation",( + "let rpc_of_event_operation x = match x with | `add -> Rpc.String \"add\" | `del -> Rpc.String \"del\" | `_mod -> Rpc.String \"mod\"\n"^ + "let event_operation_of_rpc x = match x with | Rpc.String \"add\" -> `add | Rpc.String \"del\" -> `del | Rpc.String \"mod\" -> `_mod | _ -> failwith \"Unmarshalling error\""); + ] - - + + (** Generate a single type declaration for simple types (eg not containing references to record objects) *) let gen_non_record_type highapi tys = - let rec aux accu = function - | [] -> accu - | DT.String :: t - | DT.Int :: t - | DT.Float :: t - | DT.Bool :: t - | DT.Record _ :: t - | DT.Map (_, DT.Record _) :: t - | DT.Set (DT.Record _) :: t -> aux accu t - | DT.Set (DT.Enum (n,_) as e) as ty :: t -> - aux (sprintf "type %s = %s list with rpc" (OU.alias_of_ty ty) (OU.alias_of_ty e) :: accu) t - | ty :: t -> - let alias = OU.alias_of_ty ty in - if List.mem_assoc alias overrides - then aux ((sprintf "type %s = %s\n%s\n" alias (OU.ocaml_of_ty ty) (List.assoc alias overrides))::accu) t - else aux (sprintf "type %s = %s with rpc" (OU.alias_of_ty ty) (OU.ocaml_of_ty ty) :: accu) t in - aux [] tys + let rec aux accu = function + | [] -> accu + | DT.String :: t + | DT.Int :: t + | DT.Float :: t + | DT.Bool :: t + | DT.Record _ :: t + | DT.Map (_, DT.Record _) :: t + | DT.Set (DT.Record _) :: t -> aux accu t + | DT.Set (DT.Enum (n,_) as e) as ty :: t -> + aux (sprintf "type %s = %s list with rpc" (OU.alias_of_ty ty) (OU.alias_of_ty e) :: accu) t + | ty :: t -> + let alias = OU.alias_of_ty ty in + if List.mem_assoc alias overrides + then aux ((sprintf "type %s = %s\n%s\n" alias (OU.ocaml_of_ty ty) (List.assoc alias overrides))::accu) t + else aux (sprintf "type %s = %s with rpc" (OU.alias_of_ty ty) (OU.ocaml_of_ty ty) :: accu) t in + aux [] tys (** Generate a list of modules for each record kind *) let gen_record_type ~with_module highapi tys = - let rec aux accu = function - | [] -> accu - | DT.Record record :: t -> - - let obj_name = OU.ocaml_of_record_name record in - let all_fields = DU.fields_of_obj (Dm_api.get_obj_by_name highapi ~objname:record) in - let field fld = OU.ocaml_of_record_field (obj_name :: fld.DT.full_name) in - let map_fields fn = String.concat "; " (List.map (fun field -> fn field) all_fields) in - let regular_def fld = sprintf "%s : %s" (field fld) (OU.alias_of_ty fld.DT.ty) in - - let make_of_field fld = - sprintf "\"%s\",rpc_of_%s x.%s" (String.concat "_" fld.DT.full_name) - (OU.alias_of_ty fld.DT.ty) (OU.ocaml_of_record_field (obj_name :: fld.DT.full_name)) - in - - let make_to_field fld = - sprintf "%s = %s_of_rpc (List.assoc \"%s\" x)" (field fld) (OU.alias_of_ty fld.DT.ty) - (String.concat "_" fld.DT.full_name) - in - - let type_t = sprintf "type %s_t = { %s }" obj_name (map_fields regular_def) in - let others = if not with_module then - [] - else [ - sprintf "let rpc_of_%s_t x = Rpc.Dict [ %s ]" obj_name (map_fields make_of_field); - sprintf "let %s_t_of_rpc x = on_dict (fun x -> { %s }) x" obj_name (map_fields make_to_field); - sprintf "type ref_%s_to_%s_t_map = (ref_%s * %s_t) list with rpc" record obj_name record obj_name; - sprintf "type %s_t_set = %s_t list with rpc" obj_name obj_name; - "" - ] in - aux (type_t :: others @ accu) t - | _ :: t -> aux accu t in - aux [] tys + let rec aux accu = function + | [] -> accu + | DT.Record record :: t -> + + let obj_name = OU.ocaml_of_record_name record in + let all_fields = DU.fields_of_obj (Dm_api.get_obj_by_name highapi ~objname:record) in + let field fld = OU.ocaml_of_record_field (obj_name :: fld.DT.full_name) in + let map_fields fn = String.concat "; " (List.map (fun field -> fn field) all_fields) in + let regular_def fld = sprintf "%s : %s" (field fld) (OU.alias_of_ty fld.DT.ty) in + + let make_of_field fld = + sprintf "\"%s\",rpc_of_%s x.%s" (String.concat "_" fld.DT.full_name) + (OU.alias_of_ty fld.DT.ty) (OU.ocaml_of_record_field (obj_name :: fld.DT.full_name)) + in + + let make_to_field fld = + sprintf "%s = %s_of_rpc (List.assoc \"%s\" x)" (field fld) (OU.alias_of_ty fld.DT.ty) + (String.concat "_" fld.DT.full_name) + in + + let type_t = sprintf "type %s_t = { %s }" obj_name (map_fields regular_def) in + let others = if not with_module then + [] + else [ + sprintf "let rpc_of_%s_t x = Rpc.Dict [ %s ]" obj_name (map_fields make_of_field); + sprintf "let %s_t_of_rpc x = on_dict (fun x -> { %s }) x" obj_name (map_fields make_to_field); + sprintf "type ref_%s_to_%s_t_map = (ref_%s * %s_t) list with rpc" record obj_name record obj_name; + sprintf "type %s_t_set = %s_t list with rpc" obj_name obj_name; + "" + ] in + aux (type_t :: others @ accu) t + | _ :: t -> aux accu t in + aux [] tys let gen_client highapi = - List.iter (List.iter print) - (List.between [""] [ - [ - "open API"; - "open Rpc"; - "module type RPC = sig val rpc: Rpc.t -> Rpc.t end"; - "module type IO = sig type 'a t val bind : 'a t -> ('a -> 'b t) -> 'b t val return : 'a -> 'a t end"; - ""; - "let server_failure code args = raise (Api_errors.Server_error (code, args))"; - ]; - O.Module.strings_of (Gen_client.gen_module highapi); - [ "module Id = struct type 'a t = 'a let bind x f = f x let return x = x end"; - "module Client = ClientF(Id)" ] - ]) + List.iter (List.iter print) + (List.between [""] [ + [ + "open API"; + "open Rpc"; + "module type RPC = sig val rpc: Rpc.t -> Rpc.t end"; + "module type IO = sig type 'a t val bind : 'a t -> ('a -> 'b t) -> 'b t val return : 'a -> 'a t end"; + ""; + "let server_failure code args = raise (Api_errors.Server_error (code, args))"; + ]; + O.Module.strings_of (Gen_client.gen_module highapi); + [ "module Id = struct type 'a t = 'a let bind x f = f x let return x = x end"; + "module Client = ClientF(Id)" ] + ]) let add_set_enums types = - List.concat ( - List.map (fun ty -> - match ty with - | DT.Enum _ -> - if List.exists (fun ty2 -> ty2 = DT.Set ty) types then [ty] else [DT.Set ty; ty] - | _ -> [ty]) types) + List.concat ( + List.map (fun ty -> + match ty with + | DT.Enum _ -> + if List.exists (fun ty2 -> ty2 = DT.Set ty) types then [ty] else [DT.Set ty; ty] + | _ -> [ty]) types) let gen_client_types highapi = - let all_types = DU.Types.of_objects (Dm_api.objects_of_api highapi) in - let all_types = add_set_enums all_types in - List.iter (List.iter print) - (List.between [""] [ - [ - "type failure = (string list) with rpc"; - "let response_of_failure code params ="; - " Rpc.failure (rpc_of_failure (code::params))"; - "let response_of_fault code ="; - " Rpc.failure (rpc_of_failure ([\"Fault\"; code]))"; - ]; [ - "include Rpc"; - "type string_list = string list with rpc"; - ]; [ - "module Ref = struct"; - " include Ref"; - " let rpc_of_t _ x = rpc_of_string (Ref.string_of x)"; - " let t_of_rpc _ x = of_string (string_of_rpc x);"; - "end"; - ]; [ - "module Date = struct"; - " open Stdext"; - " include Date"; - " let rpc_of_iso8601 x = DateTime (Date.to_string x)"; - " let iso8601_of_rpc = function String x | DateTime x -> Date.of_string x | _ -> failwith \"Date.iso8601_of_rpc\""; - "end"; - ]; [ - "let on_dict f = function | Rpc.Dict x -> f x | _ -> failwith \"Expected Dictionary\"" - ]; - gen_non_record_type highapi all_types; - gen_record_type ~with_module:true highapi all_types; - O.Signature.strings_of (Gen_client.gen_signature highapi); - [ "module Legacy = struct"; - "open XMLRPC"; - "module D=Debug.Make(struct let name=\"legacy_marshallers\" end)"; - "open D" ]; - GenOCaml.gen_of_xmlrpc highapi all_types; - GenOCaml.gen_to_xmlrpc highapi all_types; - ["end"]; - ]) + let all_types = DU.Types.of_objects (Dm_api.objects_of_api highapi) in + let all_types = add_set_enums all_types in + List.iter (List.iter print) + (List.between [""] [ + [ + "type failure = (string list) with rpc"; + "let response_of_failure code params ="; + " Rpc.failure (rpc_of_failure (code::params))"; + "let response_of_fault code ="; + " Rpc.failure (rpc_of_failure ([\"Fault\"; code]))"; + ]; [ + "include Rpc"; + "type string_list = string list with rpc"; + ]; [ + "module Ref = struct"; + " include Ref"; + " let rpc_of_t _ x = rpc_of_string (Ref.string_of x)"; + " let t_of_rpc _ x = of_string (string_of_rpc x);"; + "end"; + ]; [ + "module Date = struct"; + " open Stdext"; + " include Date"; + " let rpc_of_iso8601 x = DateTime (Date.to_string x)"; + " let iso8601_of_rpc = function String x | DateTime x -> Date.of_string x | _ -> failwith \"Date.iso8601_of_rpc\""; + "end"; + ]; [ + "let on_dict f = function | Rpc.Dict x -> f x | _ -> failwith \"Expected Dictionary\"" + ]; + gen_non_record_type highapi all_types; + gen_record_type ~with_module:true highapi all_types; + O.Signature.strings_of (Gen_client.gen_signature highapi); + [ "module Legacy = struct"; + "open XMLRPC"; + "module D=Debug.Make(struct let name=\"legacy_marshallers\" end)"; + "open D" ]; + GenOCaml.gen_of_xmlrpc highapi all_types; + GenOCaml.gen_to_xmlrpc highapi all_types; + ["end"]; + ]) let gen_server highapi = - List.iter (List.iter print) - (List.between [""] [ - [ "open API"; "open Server_helpers" ]; - O.Module.strings_of (Gen_server.gen_module highapi); - ]) + List.iter (List.iter print) + (List.between [""] [ + [ "open API"; "open Server_helpers" ]; + O.Module.strings_of (Gen_server.gen_module highapi); + ]) let gen_custom_actions highapi = - List.iter (List.iter print) - (List.between [""] [ - [ "open API" ]; - O.Signature.strings_of (Gen_empty_custom.gen_signature Gen_empty_custom.signature_name None highapi); - O.Module.strings_of (Gen_empty_custom.gen_release_module highapi); - ]) + List.iter (List.iter print) + (List.between [""] [ + [ "open API" ]; + O.Signature.strings_of (Gen_empty_custom.gen_signature Gen_empty_custom.signature_name None highapi); + O.Module.strings_of (Gen_empty_custom.gen_release_module highapi); + ]) open Gen_db_actions @@ -188,22 +188,22 @@ let gen_db_actions highapi = let all_types = DU.Types.of_objects (Dm_api.objects_of_api highapi) in let only_records = List.filter (function DT.Record _ -> true | _ -> false) all_types in - List.iter (List.iter print) - (List.between [""] + List.iter (List.iter print) + (List.between [""] [ - [ "open API" ]; + [ "open API" ]; - (* These records have the hidden fields inside *) - gen_record_type ~with_module:false highapi only_records; + (* These records have the hidden fields inside *) + gen_record_type ~with_module:false highapi only_records; - (* NB record types are ignored by dm_to_string and string_to_dm *) - O.Module.strings_of (dm_to_string all_types); - O.Module.strings_of (string_to_dm all_types); - O.Module.strings_of (db_action highapi); ] - @ (List.map O.Module.strings_of (Gen_db_check.all highapi)) @ [ + (* NB record types are ignored by dm_to_string and string_to_dm *) + O.Module.strings_of (dm_to_string all_types); + O.Module.strings_of (string_to_dm all_types); + O.Module.strings_of (db_action highapi); ] + @ (List.map O.Module.strings_of (Gen_db_check.all highapi)) @ [ - ] - ) + ] + ) let gen_rbac highapi = print_endline (Gen_rbac.gen_permissions_of_static_roles highapi) diff --git a/ocaml/idl/ocaml_backend/gen_api_main.ml b/ocaml/idl/ocaml_backend/gen_api_main.ml index a1b0d1e3129..1cfc457e8cc 100644 --- a/ocaml/idl/ocaml_backend/gen_api_main.ml +++ b/ocaml/idl/ocaml_backend/gen_api_main.ml @@ -45,7 +45,7 @@ let filter_api () = | Some "debug" -> filterfn Field.debug Message.debug api_used | Some "nothing" -> filterfn Field.nothing Message.nothing api_used | Some x -> Printf.eprintf "Unknown filter mode: %s\n" x; - api_used + api_used let set_gendebug () = Gen_server.enable_debugging := true @@ -57,17 +57,17 @@ let _ = [ "-mode", Arg.Symbol (["client"; "server"; "api"; "db"; "actions"; "sql"; "rbac"; "test"], - fun x -> mode := Some x), + fun x -> mode := Some x), "Choose which file to output"; "-filter", Arg.Symbol (["opensource"; "closed"; "debug"; "nothing"], - fun x -> filter := Some x), + fun x -> filter := Some x), "Apply a filter to the API"; "-filterinternal", Arg.Bool (fun x -> filterinternal := x), "Filter internal fields and messages"; - + "-gendebug", Arg.Unit (fun _ -> set_gendebug ()), "Add debugging code to generated output"; @@ -78,17 +78,17 @@ let _ = match !mode with | None -> Printf.eprintf "Must select an output type with -mode\n" | Some "client" -> - Gen_api.gen_client api + Gen_api.gen_client api | Some "api" -> - Gen_api.gen_client_types api + Gen_api.gen_client_types api | Some "server" -> - Gen_api.gen_server api + Gen_api.gen_server api | Some "db" -> - Gen_api.gen_db_actions api + Gen_api.gen_db_actions api | Some "actions" -> - Gen_api.gen_custom_actions api + Gen_api.gen_custom_actions api | Some "rbac" -> - Gen_api.gen_rbac api + Gen_api.gen_rbac api | Some "test" -> - Gen_test.gen_test api + Gen_test.gen_test api | Some x -> Printf.eprintf "Didn't recognise mode: %s\n" x diff --git a/ocaml/idl/ocaml_backend/gen_client.ml b/ocaml/idl/ocaml_backend/gen_client.ml index 3c834fd1e61..0bb8a0d3e4c 100644 --- a/ocaml/idl/ocaml_backend/gen_client.ml +++ b/ocaml/idl/ocaml_backend/gen_client.ml @@ -51,16 +51,16 @@ let has_async = function (* true if msg is constructor or desctructor and the msg's object specifies not to make constructor/destructor *) let objfilter msg api = let obj_name = msg.DT.msg_obj_name in - if obj_name="" then failwith (Printf.sprintf "message %s has no obj_name" msg.DT.msg_name) - else - let obj = Dm_api.get_obj_by_name api obj_name in - let obj_gen_con_and_des = obj.DT.gen_constructor_destructor in - let msg_is_con_or_des = - (msg.DT.msg_tag = DT.FromObject (DT.Make)) || - (msg.DT.msg_tag = DT.FromObject (DT.Delete)) in - not msg_is_con_or_des || obj_gen_con_and_des - -let client_api ~sync api = + if obj_name="" then failwith (Printf.sprintf "message %s has no obj_name" msg.DT.msg_name) + else + let obj = Dm_api.get_obj_by_name api obj_name in + let obj_gen_con_and_des = obj.DT.gen_constructor_destructor in + let msg_is_con_or_des = + (msg.DT.msg_tag = DT.FromObject (DT.Make)) || + (msg.DT.msg_tag = DT.FromObject (DT.Delete)) in + not msg_is_con_or_des || obj_gen_con_and_des + +let client_api ~sync api = let filter f = Dm_api.filter (fun _ -> true) (fun _ -> true) f in let api = filter (fun msg-> (DU.on_client_side msg) && (objfilter msg api)) api in if sync then api else filter has_async api @@ -74,38 +74,38 @@ let ctor_fields (obj: obj) = let args_of_message ?(expand_record=true) (obj: obj) ( { msg_tag = tag } as msg) = let arg_of_param = function | {param_type=Record x; param_name=name; param_doc=doc} -> - begin match tag with - | FromObject(Make) -> - if x <> obj.DT.name then failwith "args_of_message"; - if expand_record - then List.map param_of_field (ctor_fields obj) - else [ custom _value (Record x) ] - | _ -> failwith "arg_of_param: encountered a Record in an unexpected place" - end + begin match tag with + | FromObject(Make) -> + if x <> obj.DT.name then failwith "args_of_message"; + if expand_record + then List.map param_of_field (ctor_fields obj) + else [ custom _value (Record x) ] + | _ -> failwith "arg_of_param: encountered a Record in an unexpected place" + end | p -> [ of_param p ] in let session = if msg.msg_session then [ session ] else [ ] in List.concat (session :: (List.map arg_of_param msg.msg_params)) let gen_module api : O.Module.t = (* Generate any additional helper functions for an operation here *) - let helper_record_constructor ~sync (obj: obj) (x: message) = + let helper_record_constructor ~sync (obj: obj) (x: message) = if x.msg_tag <> FromObject(Make) then [] else [ - let fields = ctor_fields obj in - let binding x = - let arg = OU.ocaml_of_record_field x.DT.full_name in - let fld = OU.ocaml_of_record_field (obj.DT.name :: x.DT.full_name) in - sprintf "~%s:%s.%s" arg _value fld in - let all = List.map binding fields in - let all = if x.msg_session then "~session_id"::all else all in - O.Let.make - ~name:(x.msg_name ^ "_from_record") - ~params:(_rpc :: (args_of_message ~expand_record:false obj x)) - ~ty:(if sync then (match x.msg_result with Some (x,_) -> - OU.alias_of_ty x | _ -> "unit") - else OU.alias_of_ty (DT.Ref Datamodel._task)) - ~body:(x.msg_name :: "~rpc" :: all) () - ] in + let fields = ctor_fields obj in + let binding x = + let arg = OU.ocaml_of_record_field x.DT.full_name in + let fld = OU.ocaml_of_record_field (obj.DT.name :: x.DT.full_name) in + sprintf "~%s:%s.%s" arg _value fld in + let all = List.map binding fields in + let all = if x.msg_session then "~session_id"::all else all in + O.Let.make + ~name:(x.msg_name ^ "_from_record") + ~params:(_rpc :: (args_of_message ~expand_record:false obj x)) + ~ty:(if sync then (match x.msg_result with Some (x,_) -> + OU.alias_of_ty x | _ -> "unit") + else OU.alias_of_ty (DT.Ref Datamodel._task)) + ~body:(x.msg_name :: "~rpc" :: all) () + ] in (* Convert an operation into a Let-binding *) let operation ~sync (obj: obj) (x: message) = @@ -121,9 +121,9 @@ let gen_module api : O.Module.t = let ctor_record = let fields = ctor_fields obj in let of_field f = Printf.sprintf "\"%s\", %s" - (DU.wire_name_of_field f) - (O.string_of_param (param_of_field f)) in - "let args = Dict [ " ^ (String.concat "; " (List.map of_field fields)) ^ "] in" in + (DU.wire_name_of_field f) + (O.string_of_param (param_of_field f)) in + "let args = Dict [ " ^ (String.concat "; " (List.map of_field fields)) ^ "] in" in let rpc_args = if is_ctor then [ O.string_of_param session; "args" ] @@ -140,14 +140,14 @@ let gen_module api : O.Module.t = let wire_name = DU.wire_name ~sync obj x in - let return_type = - if x.msg_custom_marshaller + let return_type = + if x.msg_custom_marshaller then "Rpc.t" else begin - if sync then (match x.msg_result with Some (x,_) -> - OU.alias_of_ty x | _ -> "unit") - else OU.alias_of_ty task - end in + if sync then (match x.msg_result with Some (x,_) -> + OU.alias_of_ty x | _ -> "unit") + else OU.alias_of_ty task + end in O.Let.make ~name:x.msg_name @@ -155,12 +155,12 @@ let gen_module api : O.Module.t = ~params:(_rpc :: args) ~ty:return_type ~body:(List.map to_rpc args @ [ - if is_ctor then ctor_record else ""; - Printf.sprintf "rpc_wrapper rpc \"%s\" [ %s ] >>= fun x -> return (%s x)" - wire_name - (String.concat "; " rpc_args) - (from_xmlrpc x.msg_result) - ]) () in + if is_ctor then ctor_record else ""; + Printf.sprintf "rpc_wrapper rpc \"%s\" [ %s ] >>= fun x -> return (%s x)" + wire_name + (String.concat "; " rpc_args) + (from_xmlrpc x.msg_result) + ]) () in (* Convert an object into a Module *) let obj ~sync (obj: obj) = @@ -170,7 +170,7 @@ let gen_module api : O.Module.t = let fields = fields_of (operations @ helpers) in (* let fields = List.map (fun x -> O.Module.Let (operation ~sync obj x)) obj.messages in -*) +*) O.Module.make ~name:(OU.ocaml_of_obj_name obj.DT.name) ~elements:fields () @@ -178,14 +178,14 @@ let gen_module api : O.Module.t = let preamble = [ "let (>>=) = X.bind"; "let return = X.return"; - "let rpc_wrapper rpc name args = "; - " rpc (Rpc.call name args) >>= fun response -> "; - " if response.Rpc.success then"; - " return response.Rpc.contents"; - " else match response.Rpc.contents with"; - " | Rpc.Enum [ Rpc.String \"Fault\"; Rpc.String code ] -> failwith (\"INTERNAL ERROR: \"^code)"; - " | Rpc.Enum ((Rpc.String code) :: args) -> return (server_failure code (List.map Rpc.string_of_rpc args))"; - " | rpc -> failwith (\"Client.rpc: \" ^ Rpc.to_string rpc)"; + "let rpc_wrapper rpc name args = "; + " rpc (Rpc.call name args) >>= fun response -> "; + " if response.Rpc.success then"; + " return response.Rpc.contents"; + " else match response.Rpc.contents with"; + " | Rpc.Enum [ Rpc.String \"Fault\"; Rpc.String code ] -> failwith (\"INTERNAL ERROR: \"^code)"; + " | Rpc.Enum ((Rpc.String code) :: args) -> return (server_failure code (List.map Rpc.string_of_rpc args))"; + " | rpc -> failwith (\"Client.rpc: \" ^ Rpc.to_string rpc)"; ] in let async = @@ -205,7 +205,7 @@ let gen_module api : O.Module.t = ~preamble:preamble ~args:["X : IO"] ~elements:(O.Module.Module async :: - List.map (fun x -> O.Module.Module (obj ~sync:true x)) all_objs) () + List.map (fun x -> O.Module.Module (obj ~sync:true x)) all_objs) () let gen_signature api : O.Signature.t = (* Ensure the 'API' signature (the client's PoV matches the client implementation) *) diff --git a/ocaml/idl/ocaml_backend/gen_common.ml b/ocaml/idl/ocaml_backend/gen_common.ml index de3a3c8422a..5d8ad0ba9b9 100644 --- a/ocaml/idl/ocaml_backend/gen_common.ml +++ b/ocaml/idl/ocaml_backend/gen_common.ml @@ -23,5 +23,5 @@ module OU = Ocaml_utils let context = "__context" let context_with_correct_database = "(Context.check_for_foreign_database ~__context)" -let context_arg = O.Named(context, "Context.t") +let context_arg = O.Named(context, "Context.t") diff --git a/ocaml/idl/ocaml_backend/gen_db_actions.ml b/ocaml/idl/ocaml_backend/gen_db_actions.ml index 33e691d6f2e..8fcfec87a3e 100644 --- a/ocaml/idl/ocaml_backend/gen_db_actions.ml +++ b/ocaml/idl/ocaml_backend/gen_db_actions.ml @@ -31,12 +31,12 @@ let _db_defaults = "DB_DEFAULTS" (** Filter out all the operations which don't make sense to the database *) let make_db_api = Dm_api.filter (fun _ -> true) (fun _ -> true) - (fun ({ msg_tag = tag }) -> match tag with - | FromField(_, _) -> true - | Custom -> false - | FromObject(GetAll) -> false (* rely on the Private(GetDBAll) function for now *) - | FromObject(_) -> true - ) + (fun ({ msg_tag = tag }) -> match tag with + | FromField(_, _) -> true + | Custom -> false + | FromObject(GetAll) -> false (* rely on the Private(GetDBAll) function for now *) + | FromObject(_) -> true + ) (* Only these types are actually marshalled into the database: *) let type_marshalled_in_db = function @@ -54,15 +54,15 @@ let dm_to_string tys : O.Module.t = | DT.Bool -> "string_of_bool" | DT.DateTime -> "fun x -> (try Date.assert_utc x with Invalid_argument s -> raise (DateTimeError s)); Date.to_string x" | DT.Enum(name, cs) -> - let aux (c, _) = (OU.constructor_of c)^" -> \""^c^"\"" in - "\n fun v -> match v with\n "^ - String.concat "\n | " (List.map aux cs) - (* ^"\n | _ -> raise (StringEnumTypeError \""^name^"\")" *) + let aux (c, _) = (OU.constructor_of c)^" -> \""^c^"\"" in + "\n fun v -> match v with\n "^ + String.concat "\n | " (List.map aux cs) + (* ^"\n | _ -> raise (StringEnumTypeError \""^name^"\")" *) | DT.Float -> "Printf.sprintf \"%0.18g\"" | DT.Int -> "Int64.to_string" | DT.Map(key, value) -> - let kf = OU.alias_of_ty key and vf = OU.alias_of_ty value in - "fun m -> map "^kf^" "^vf^" m" + let kf = OU.alias_of_ty key and vf = OU.alias_of_ty value in + "fun m -> map "^kf^" "^vf^" m" | DT.Ref s -> "(Ref.string_of : " ^ OU.ocaml_of_ty ty^" -> string)" (* | DT.Ref "session" -> "(Uuid.string_of_cookie : "^OU.ocaml_of_ty ty^" -> string)" @@ -95,15 +95,15 @@ let string_to_dm tys : O.Module.t = | DT.Bool -> "bool_of_string" | DT.DateTime -> "fun x -> Date.of_string x" | DT.Enum(name, cs) -> - let aux (c, _) = "\""^c^"\" -> "^(OU.constructor_of c) in - "\n fun v -> match v with\n "^ - String.concat "\n | " (List.map aux cs)^ - "\n | _ -> raise (StringEnumTypeError \""^name^"\")" + let aux (c, _) = "\""^c^"\" -> "^(OU.constructor_of c) in + "\n fun v -> match v with\n "^ + String.concat "\n | " (List.map aux cs)^ + "\n | _ -> raise (StringEnumTypeError \""^name^"\")" | DT.Float -> "float_of_string" | DT.Int -> "Int64.of_string" | DT.Map(key, value) -> - let kf = OU.alias_of_ty key and vf = OU.alias_of_ty value in - "fun m -> map "^kf^" "^vf^" m" + let kf = OU.alias_of_ty key and vf = OU.alias_of_ty value in + "fun m -> map "^kf^" "^vf^" m" (* | DT.Ref "session" -> "fun x -> (Uuid.cookie_of_string x : "^OU.ocaml_of_ty ty^")" @@ -136,16 +136,16 @@ let field_in_this_table = function let args_of_message (obj: obj) ( { msg_tag = tag } as msg) = let arg_of_param = function | {param_type=DT.Record x; param_name=name; param_doc=doc} -> - begin match tag with - | FromObject(Make) -> - if x <> obj.DT.name then failwith "args_of_message"; - (* Client constructor takes all object fields regardless of qualifier - but excluding Set(Ref _) types *) - let fields = DU.fields_of_obj obj in - let fields = List.filter field_in_this_table fields in - List.map Client.param_of_field fields - | _ -> failwith "arg_of_param: encountered a Record in an unexpected place" - end + begin match tag with + | FromObject(Make) -> + if x <> obj.DT.name then failwith "args_of_message"; + (* Client constructor takes all object fields regardless of qualifier + but excluding Set(Ref _) types *) + let fields = DU.fields_of_obj obj in + let fields = List.filter field_in_this_table fields in + List.map Client.param_of_field fields + | _ -> failwith "arg_of_param: encountered a Record in an unexpected place" + end | p -> [ Client.of_param p ] in let ref = if tag = FromObject(Make) then [ O.Named("ref", OU.alias_of_ty (Ref obj.name)) ] else [ ] in let args = List.map arg_of_param msg.msg_params in @@ -160,7 +160,7 @@ let look_up_related_table_and_field obj other full_name = let this_end = obj.DT.name, List.hd (full_name) in (* XXX: relationships should store full names *) let obj', fld' = DU.Relations.other_end_of DM.all_api this_end in - (obj', fld') + (obj', fld') (** For a field of type "other" called "full_name" which is a Set(Ref _), return the set *) @@ -168,45 +168,45 @@ let read_set_ref obj other full_name = (* Set(Ref t) is actually stored in the table t *) let obj', fld' = look_up_related_table_and_field obj other full_name in String.concat "\n" [ - Printf.sprintf "if not(DB.is_valid_ref __t %s)" Client._self; - Printf.sprintf "then raise (Api_errors.Server_error(Api_errors.handle_invalid, [ %s ]))" Client._self; - Printf.sprintf "else List.map %s.%s (DB.read_set_ref __t " _string_to_dm (OU.alias_of_ty (DT.Ref other)); - Printf.sprintf " { table = \"%s\"; return=Db_names.ref; " (Escaping.escape_obj obj'); - Printf.sprintf " where_field = \"%s\"; where_value = %s })" fld' Client._self + Printf.sprintf "if not(DB.is_valid_ref __t %s)" Client._self; + Printf.sprintf "then raise (Api_errors.Server_error(Api_errors.handle_invalid, [ %s ]))" Client._self; + Printf.sprintf "else List.map %s.%s (DB.read_set_ref __t " _string_to_dm (OU.alias_of_ty (DT.Ref other)); + Printf.sprintf " { table = \"%s\"; return=Db_names.ref; " (Escaping.escape_obj obj'); + Printf.sprintf " where_field = \"%s\"; where_value = %s })" fld' Client._self ] let get_record (obj: obj) aux_fn_name = let body = [ - Printf.sprintf "let (__regular_fields, __set_refs) = DB.read_record __t \"%s\" %s in" - (Escaping.escape_obj obj.DT.name) Client._self; + Printf.sprintf "let (__regular_fields, __set_refs) = DB.read_record __t \"%s\" %s in" + (Escaping.escape_obj obj.DT.name) Client._self; aux_fn_name^" ~__regular_fields ~__set_refs"; ] in String.concat "\n" body (* Return a thunk which calls get_record on this class, for the event mechanism *) let snapshot obj_name self = - Printf.sprintf "(fun () -> API.%s.rpc_of_t (get_record ~__context ~self:%s))" (OU.ocaml_of_module_name obj_name) self + Printf.sprintf "(fun () -> API.%s.rpc_of_t (get_record ~__context ~self:%s))" (OU.ocaml_of_module_name obj_name) self (* Return a thunk which calls get_record on some other class, for the event mechanism *) -let external_snapshot obj_name self = +let external_snapshot obj_name self = Printf.sprintf "find_get_record \"%s\" ~__context ~self:%s" obj_name self -let ocaml_of_tbl_fields xs = - let of_field (tbl, fld, fn) = +let ocaml_of_tbl_fields xs = + let of_field (tbl, fld, fn) = Printf.sprintf "\"%s\", %s, %s" tbl fld fn in "[" ^ (String.concat "; " (List.map of_field xs)) ^ "]" (* This function is incomplete: -let make_shallow_copy api (obj: obj) (src: string) (dst: string) (all_fields: field list) = - (* NB this copy does not include Set(Ref _) fields, and nor does it call any + let make_shallow_copy api (obj: obj) (src: string) (dst: string) (all_fields: field list) = + (* NB this copy does not include Set(Ref _) fields, and nor does it call any custom actions of other (Ref _) fields! *) - let fields = List.filter field_in_this_table all_fields in - let fields = List.filter (fun x -> x.full_name <> [ "uuid" ]) fields in - let sql_fields = List.map (fun f -> Escaping.escape_id f.full_name) fields in - let to_notify = follow_references obj api in - let to_notify' = List.map - (fun (tbl, fld) -> + let fields = List.filter field_in_this_table all_fields in + let fields = List.filter (fun x -> x.full_name <> [ "uuid" ]) fields in + let sql_fields = List.map (fun f -> Escaping.escape_id f.full_name) fields in + let to_notify = follow_references obj api in + let to_notify' = List.map + (fun (tbl, fld) -> tbl, "\"" ^ (Escaping.escape_id fld.full_name) ^ "\"", "(fun () -> failwith \"shallow copy\")") to_notify in Printf.sprintf "sql_copy %s ~new_objref:%s \"%s\" %s [%s]" (ocaml_of_tbl_fields to_notify') @@ -215,9 +215,9 @@ let make_shallow_copy api (obj: obj) (src: string) (dst: string) (all_fields: fi (String.concat "; " (List.map (fun f -> "\"" ^ f ^ "\"") sql_fields)) *) -let open_db_module = - "let __t = Context.database_of __context in\n" ^ - "let module DB = (val (Db_cache.get __t) : Db_interface.DB_ACCESS) in\n" +let open_db_module = + "let __t = Context.database_of __context in\n" ^ + "let module DB = (val (Db_cache.get __t) : Db_interface.DB_ACCESS) in\n" let db_action api : O.Module.t = let api = make_db_api api in @@ -225,69 +225,69 @@ let db_action api : O.Module.t = let expr = "expr" in let expr_arg = O.Named(expr, "Db_filter_types.expr") in - let get_refs_where (obj: obj) = + let get_refs_where (obj: obj) = let tbl = Escaping.escape_obj obj.DT.name in O.Let.make ~name: "get_refs_where" ~params: [ Gen_common.context_arg; expr_arg ] ~ty: ( OU.alias_of_ty (Ref obj.DT.name) ^ " list") ~body: [ open_db_module; "let refs = (DB.find_refs_with_filter __t \"" ^ tbl ^ "\" " ^ expr ^ ") in "; - "List.map Ref.of_string refs " ] () in - - let get_record_aux_fn_body ?(m="API.") (obj: obj) (all_fields: field list) = - - let of_field = function - | { DT.ty = DT.Set(DT.Ref other); full_name = full_name; DT.field_ignore_foreign_key = false } -> - Printf.sprintf "List.map %s.%s (List.assoc \"%s\" __set_refs)" - _string_to_dm - (OU.alias_of_ty (DT.Ref other)) - (Escaping.escape_id full_name) - | f -> - _string_to_dm ^ "." ^ (OU.alias_of_ty f.DT.ty) ^ - "(List.assoc \"" ^ (Escaping.escape_id f.full_name) ^ "\" __regular_fields)" in - let make_field f = Printf.sprintf " %s%s = %s;" m (OU.ocaml_of_record_field (obj.DT.name :: f.DT.full_name)) (of_field f) in - let fields = List.map make_field all_fields in - let mk_rec = [ "{" ] @ fields @ [ " }"] in - String.concat "\n" mk_rec in - - let get_record_aux_fn (obj : obj) = - let record_fields = List.filter client_side_field (DU.fields_of_obj obj) in - O.Let.make - ~name: "get_record'" - ~params: [ O.Named("__regular_fields", "(string * string) list"); - O.Named("__set_refs", "(string * (string list)) list") ] - ~ty:"'a" - ~body: [ get_record_aux_fn_body obj record_fields ] () in - - let get_record_internal_aux_fn (obj : obj) = - let record_fields = DU.fields_of_obj obj in - O.Let.make - ~name: "get_record_internal'" - ~params: [ O.Named("__regular_fields", "(string * string) list"); - O.Named("__set_refs", "(string * (string list)) list") ] - ~ty: "'a" - ~body: [ get_record_aux_fn_body ~m:"" obj record_fields ] () in - - let get_records_where (obj: obj) name conversion_fn = - O.Let.make - ~name: name - ~params: [ Gen_common.context_arg; expr_arg ] - ~ty: ("'a") - ~body: [ open_db_module; - Printf.sprintf "let records = DB.read_records_where __t \"%s\" %s in" - (Escaping.escape_obj obj.DT.name) expr; - Printf.sprintf "List.map (fun (ref,(__regular_fields,__set_refs)) -> Ref.of_string ref, %s __regular_fields __set_refs) records" conversion_fn] () in - - let register_get_record obj = O.Let.make + "List.map Ref.of_string refs " ] () in + + let get_record_aux_fn_body ?(m="API.") (obj: obj) (all_fields: field list) = + + let of_field = function + | { DT.ty = DT.Set(DT.Ref other); full_name = full_name; DT.field_ignore_foreign_key = false } -> + Printf.sprintf "List.map %s.%s (List.assoc \"%s\" __set_refs)" + _string_to_dm + (OU.alias_of_ty (DT.Ref other)) + (Escaping.escape_id full_name) + | f -> + _string_to_dm ^ "." ^ (OU.alias_of_ty f.DT.ty) ^ + "(List.assoc \"" ^ (Escaping.escape_id f.full_name) ^ "\" __regular_fields)" in + let make_field f = Printf.sprintf " %s%s = %s;" m (OU.ocaml_of_record_field (obj.DT.name :: f.DT.full_name)) (of_field f) in + let fields = List.map make_field all_fields in + let mk_rec = [ "{" ] @ fields @ [ " }"] in + String.concat "\n" mk_rec in + + let get_record_aux_fn (obj : obj) = + let record_fields = List.filter client_side_field (DU.fields_of_obj obj) in + O.Let.make + ~name: "get_record'" + ~params: [ O.Named("__regular_fields", "(string * string) list"); + O.Named("__set_refs", "(string * (string list)) list") ] + ~ty:"'a" + ~body: [ get_record_aux_fn_body obj record_fields ] () in + + let get_record_internal_aux_fn (obj : obj) = + let record_fields = DU.fields_of_obj obj in + O.Let.make + ~name: "get_record_internal'" + ~params: [ O.Named("__regular_fields", "(string * string) list"); + O.Named("__set_refs", "(string * (string list)) list") ] + ~ty: "'a" + ~body: [ get_record_aux_fn_body ~m:"" obj record_fields ] () in + + let get_records_where (obj: obj) name conversion_fn = + O.Let.make + ~name: name + ~params: [ Gen_common.context_arg; expr_arg ] + ~ty: ("'a") + ~body: [ open_db_module; + Printf.sprintf "let records = DB.read_records_where __t \"%s\" %s in" + (Escaping.escape_obj obj.DT.name) expr; + Printf.sprintf "List.map (fun (ref,(__regular_fields,__set_refs)) -> Ref.of_string ref, %s __regular_fields __set_refs) records" conversion_fn] () in + + let register_get_record obj = O.Let.make ~name:"_" ~params:[] ~ty:"unit" ~body:[ - Printf.sprintf "Hashtbl.add Eventgen.get_record_table \"%s\"" obj.DT.name; - Printf.sprintf "(fun ~__context ~self -> (fun () -> API.rpc_of_%s_t (%s.get_record ~__context ~self:(Ref.of_string self))))" - (OU.ocaml_of_record_name obj.DT.name) - (OU.ocaml_of_obj_name obj.DT.name) - ] + Printf.sprintf "Hashtbl.add Eventgen.get_record_table \"%s\"" obj.DT.name; + Printf.sprintf "(fun ~__context ~self -> (fun () -> API.rpc_of_%s_t (%s.get_record ~__context ~self:(Ref.of_string self))))" + (OU.ocaml_of_record_name obj.DT.name) + (OU.ocaml_of_obj_name obj.DT.name) + ] () in let operation (obj: obj) ( { msg_tag = tag } as x ) = @@ -301,108 +301,108 @@ let db_action api : O.Module.t = let body = match tag with | FromField(Setter, fld) -> - Printf.sprintf "DB.write_field __t \"%s\" %s \"%s\" value" - (Escaping.escape_obj obj.DT.name) - Client._self - (Escaping.escape_id fld.DT.full_name) + Printf.sprintf "DB.write_field __t \"%s\" %s \"%s\" value" + (Escaping.escape_obj obj.DT.name) + Client._self + (Escaping.escape_id fld.DT.full_name) | FromField(Getter, { DT.ty = ty; full_name = full_name }) -> - Printf.sprintf "%s.%s (DB.read_field __t \"%s\" \"%s\" %s)" - _string_to_dm (OU.alias_of_ty ty) - (Escaping.escape_obj obj.DT.name) - (Escaping.escape_id full_name) - Client._self + Printf.sprintf "%s.%s (DB.read_field __t \"%s\" \"%s\" %s)" + _string_to_dm (OU.alias_of_ty ty) + (Escaping.escape_obj obj.DT.name) + (Escaping.escape_id full_name) + Client._self | FromField(Add, { DT.ty = DT.Map(_, _); full_name = full_name }) -> - Printf.sprintf "DB.process_structured_field __t (%s,%s) \"%s\" \"%s\" %s AddMap" - Client._key Client._value - (Escaping.escape_obj obj.DT.name) - (Escaping.escape_id full_name) - Client._self + Printf.sprintf "DB.process_structured_field __t (%s,%s) \"%s\" \"%s\" %s AddMap" + Client._key Client._value + (Escaping.escape_obj obj.DT.name) + (Escaping.escape_id full_name) + Client._self | FromField(Add, { DT.ty = DT.Set(_); full_name = full_name }) -> - Printf.sprintf "DB.process_structured_field __t (%s,\"\") \"%s\" \"%s\" %s AddSet" - Client._value - (Escaping.escape_obj obj.DT.name) - (Escaping.escape_id full_name) - Client._self + Printf.sprintf "DB.process_structured_field __t (%s,\"\") \"%s\" \"%s\" %s AddSet" + Client._value + (Escaping.escape_obj obj.DT.name) + (Escaping.escape_id full_name) + Client._self | FromField(Remove, { DT.ty = DT.Map(_, _); full_name = full_name }) -> - Printf.sprintf "DB.process_structured_field __t (%s,\"\") \"%s\" \"%s\" %s RemoveMap" - Client._key - (Escaping.escape_obj obj.DT.name) - (Escaping.escape_id full_name) - Client._self + Printf.sprintf "DB.process_structured_field __t (%s,\"\") \"%s\" \"%s\" %s RemoveMap" + Client._key + (Escaping.escape_obj obj.DT.name) + (Escaping.escape_id full_name) + Client._self | FromField(Remove, { DT.ty = DT.Set(_); full_name = full_name }) -> - Printf.sprintf "DB.process_structured_field __t (%s,\"\") \"%s\" \"%s\" %s RemoveSet" - Client._value - (Escaping.escape_obj obj.DT.name) - (Escaping.escape_id full_name) - Client._self + Printf.sprintf "DB.process_structured_field __t (%s,\"\") \"%s\" \"%s\" %s RemoveSet" + Client._value + (Escaping.escape_obj obj.DT.name) + (Escaping.escape_id full_name) + Client._self | FromField((Add | Remove), _) -> failwith "Cannot generate db add/remove for non sets and maps" | FromObject(Delete) -> - (Printf.sprintf "DB.delete_row __t \"%s\" %s" - (Escaping.escape_obj obj.DT.name) Client._self) + (Printf.sprintf "DB.delete_row __t \"%s\" %s" + (Escaping.escape_obj obj.DT.name) Client._self) | FromObject(Make) -> - let fields = List.filter field_in_this_table (DU.fields_of_obj obj) in -(* let fields = db_fields_of_obj obj in *) - let kvs = List.map (fun fld -> - Escaping.escape_id fld.full_name, - OU.ocaml_of_record_field fld.full_name) fields in - let kvs' = List.map (fun (sql, o) -> - Printf.sprintf "(\"%s\", %s)" sql o) kvs in - Printf.sprintf "DB.create_row __t \"%s\" [ %s ] ref" - (Escaping.escape_obj obj.DT.name) - (String.concat "; " kvs') + let fields = List.filter field_in_this_table (DU.fields_of_obj obj) in + (* let fields = db_fields_of_obj obj in *) + let kvs = List.map (fun fld -> + Escaping.escape_id fld.full_name, + OU.ocaml_of_record_field fld.full_name) fields in + let kvs' = List.map (fun (sql, o) -> + Printf.sprintf "(\"%s\", %s)" sql o) kvs in + Printf.sprintf "DB.create_row __t \"%s\" [ %s ] ref" + (Escaping.escape_obj obj.DT.name) + (String.concat "; " kvs') | FromObject(GetByUuid) -> - begin match x.msg_params, x.msg_result with - | [ {param_type=ty; param_name=name} ], Some (result_ty, _) -> - let query = Printf.sprintf "DB.db_get_by_uuid __t \"%s\" %s" - (Escaping.escape_obj obj.DT.name) - (OU.escape name) in - _string_to_dm ^ "." ^ (OU.alias_of_ty result_ty) ^ " (" ^ query ^ ")" - | _ -> failwith "GetByUuid call should have only one parameter and a result!" - end + begin match x.msg_params, x.msg_result with + | [ {param_type=ty; param_name=name} ], Some (result_ty, _) -> + let query = Printf.sprintf "DB.db_get_by_uuid __t \"%s\" %s" + (Escaping.escape_obj obj.DT.name) + (OU.escape name) in + _string_to_dm ^ "." ^ (OU.alias_of_ty result_ty) ^ " (" ^ query ^ ")" + | _ -> failwith "GetByUuid call should have only one parameter and a result!" + end | FromObject(GetByLabel) -> - begin match x.msg_params, x.msg_result with - | [ {param_type=ty; param_name=name} ], Some (Set result_ty, _) -> - let query = Printf.sprintf "DB.db_get_by_name_label __t \"%s\" %s" - (Escaping.escape_obj obj.DT.name) - (OU.escape name) in - if DU.obj_has_get_by_name_label obj - then "List.map " ^ _string_to_dm ^ "." ^ (OU.alias_of_ty result_ty) ^ " (" ^ query ^ ")" - else "failwith \\\"Object has no label field\\\"" - | _ -> failwith "GetByLabel call should have only one parameter and a result!" - end + begin match x.msg_params, x.msg_result with + | [ {param_type=ty; param_name=name} ], Some (Set result_ty, _) -> + let query = Printf.sprintf "DB.db_get_by_name_label __t \"%s\" %s" + (Escaping.escape_obj obj.DT.name) + (OU.escape name) in + if DU.obj_has_get_by_name_label obj + then "List.map " ^ _string_to_dm ^ "." ^ (OU.alias_of_ty result_ty) ^ " (" ^ query ^ ")" + else "failwith \\\"Object has no label field\\\"" + | _ -> failwith "GetByLabel call should have only one parameter and a result!" + end | FromObject(GetRecord) -> get_record obj "get_record'" | FromObject(Private(GetDBRecord)) -> get_record obj "get_record_internal'" | FromObject(Private(GetDBAll)) -> - (* | FromObject(GetAll) -> *) - (* Generate the same code for the internal GetDBAll as well as the public GetAll. - Eventually we'll need to provide user filtering for the public version *) - begin match x.msg_result with - | Some (Set result_ty, _) -> - let query = Printf.sprintf "DB.read_refs __t \"%s\"" - (Escaping.escape_obj obj.DT.name) in - "List.map " ^ _string_to_dm ^ "." ^ (OU.alias_of_ty result_ty) ^ "(" ^ query ^ ")" - | _ -> failwith "GetAll call needs a result type" - end + (* | FromObject(GetAll) -> *) + (* Generate the same code for the internal GetDBAll as well as the public GetAll. + Eventually we'll need to provide user filtering for the public version *) + begin match x.msg_result with + | Some (Set result_ty, _) -> + let query = Printf.sprintf "DB.read_refs __t \"%s\"" + (Escaping.escape_obj obj.DT.name) in + "List.map " ^ _string_to_dm ^ "." ^ (OU.alias_of_ty result_ty) ^ "(" ^ query ^ ")" + | _ -> failwith "GetAll call needs a result type" + end | FromObject(GetAllRecords) -> - String.concat "\n" - [ "let expr' = Db_filter_types.True in"; - "get_records_where ~" ^ Gen_common.context ^ " ~expr:expr'" ] + String.concat "\n" + [ "let expr' = Db_filter_types.True in"; + "get_records_where ~" ^ Gen_common.context ^ " ~expr:expr'" ] | FromObject(GetAllRecordsWhere) -> - String.concat "\n" - [ "let expr' = Db_filter.expr_of_string expr in"; - "get_records_where ~" ^ Gen_common.context ^ " ~expr:expr'" ] + String.concat "\n" + [ "let expr' = Db_filter.expr_of_string expr in"; + "get_records_where ~" ^ Gen_common.context ^ " ~expr:expr'" ] - (* + (* | FromObject(Private(Copy)) -> begin match x.msg_params with | [ _, src_name, _; _, dst_name, _ ] -> make_shallow_copy api obj (OU.escape src_name) (OU.escape dst_name) (DU.fields_of_obj obj) | _ -> failwith "Copy needs a single parameter" end *) - | _ -> assert false - in + | _ -> assert false + in O.Let.make ~name: x.msg_name ~params: (Gen_common.context_arg :: args) @@ -410,12 +410,12 @@ let db_action api : O.Module.t = ~body: (List.map to_string args @ [ open_db_module; body ]) () in let obj (obj: obj) = - let others = + let others = [ get_record_aux_fn obj; - get_record_internal_aux_fn obj; - get_refs_where obj; - get_records_where obj "get_internal_records_where" "get_record_internal'"; - get_records_where obj "get_records_where" "get_record'"; + get_record_internal_aux_fn obj; + get_refs_where obj; + get_records_where obj "get_internal_records_where" "get_record_internal'"; + get_records_where obj "get_records_where" "get_record'"; ] in let bindings = List.map (operation obj) obj.messages @ others in @@ -426,7 +426,7 @@ let db_action api : O.Module.t = ~letrec:true () in - let obj_init (obj: obj) = + let obj_init (obj: obj) = O.Module.make ~name:(OU.ocaml_of_obj_name obj.DT.name ^ "_init") ~elements:(if obj.DT.in_database then [O.Module.Let (register_get_record obj)] else []) @@ -437,11 +437,11 @@ let db_action api : O.Module.t = O.Module.make ~name:_db_action - ~preamble:[ - "open Db_cache_types"; - "module D=Debug.Make(struct let name=\"db\" end)"; - "open D"; - ] + ~preamble:[ + "open Db_cache_types"; + "module D=Debug.Make(struct let name=\"db\" end)"; + "open D"; + ] ~elements:(List.map (fun x -> O.Module.Module x) modules) () @@ -450,7 +450,7 @@ let db_action api : O.Module.t = which has no custom action. The signature will be smaller than the db_actions signature but the db_actions module will be compatable with it *) let make_db_defaults_api = Dm_api.filter (fun _ -> true) (fun _ -> true) - (fun x -> not(Gen_empty_custom.operation_requires_side_effect x)) + (fun x -> not(Gen_empty_custom.operation_requires_side_effect x)) let db_defaults api : O.Signature.t = (* Since we intend to defunctorise, don't bother filtering the signature *) @@ -460,8 +460,8 @@ let db_defaults api : O.Signature.t = let args = Gen_common.context_arg :: (args_of_message obj x) in { O.Val.name = x.msg_name; params = args @ - [ O.Anon(None, match x.msg_result with Some (ty,_) -> OU.alias_of_ty ty - | None -> "unit") ] + [ O.Anon(None, match x.msg_result with Some (ty,_) -> OU.alias_of_ty ty + | None -> "unit") ] } in let obj (obj: obj) = diff --git a/ocaml/idl/ocaml_backend/gen_db_check.ml b/ocaml/idl/ocaml_backend/gen_db_check.ml index 3e61d0d7b01..5797bb65830 100644 --- a/ocaml/idl/ocaml_backend/gen_db_check.ml +++ b/ocaml/idl/ocaml_backend/gen_db_check.ml @@ -27,7 +27,7 @@ let _db_action = Gen_db_actions._db_action (** True if a field is actually in this table, false if stored elsewhere (ie Set(Ref _) are stored in foreign tables *) -let field_in_this_table = Gen_db_actions.field_in_this_table +let field_in_this_table = Gen_db_actions.field_in_this_table (* Escaping.escape_id f.full_name *) @@ -35,18 +35,18 @@ let _db_exists = "Db_exists" let self obj = O.Named(Client._self, OU.alias_of_ty (Ref obj.name)) -let record_exists api : O.Module.t = - let ref_exists (obj: obj) = - let body = +let record_exists api : O.Module.t = + let ref_exists (obj: obj) = + let body = if obj.DT.in_database then begin - Printf.sprintf - "try ignore(%s.%s.get_record_internal ~%s ~%s); true with _ -> false" - Gen_db_actions._db_action - (OU.ocaml_of_obj_name obj.name) - Gen_common.context - Client._self + Printf.sprintf + "try ignore(%s.%s.get_record_internal ~%s ~%s); true with _ -> false" + Gen_db_actions._db_action + (OU.ocaml_of_obj_name obj.name) + Gen_common.context + Client._self end else begin - "false" + "false" end in O.Let.make @@ -64,30 +64,30 @@ let _db_check = "Db_check" let db_check api : O.Module.t = - let check_refs (obj: obj) = + let check_refs (obj: obj) = (* List all the fields of the object which are references AND stored in this table *) let fields = List.filter field_in_this_table (DU.fields_of_obj obj) in let fields = List.filter (function { DT.ty = Ref _ } -> true | _ -> false ) fields in - let getrecord = + let getrecord = Printf.sprintf "let _r = %s.%s.get_record_internal ~%s ~%s in" - Gen_db_actions._db_action - (OU.ocaml_of_obj_name obj.name) - Gen_common.context - Client._self in + Gen_db_actions._db_action + (OU.ocaml_of_obj_name obj.name) + Gen_common.context + Client._self in let check = function - | { ty = Ref x; full_name = full_name } -> - Printf.sprintf "(fun () -> %s._%s ~%s ~%s:_r.%s)" - _db_exists - (OU.ocaml_of_obj_name x) - Gen_common.context - Client._self - (OU.ocaml_of_record_field (obj.DT.name :: full_name)) - | _ -> assert false - in - let wrapper f = + | { ty = Ref x; full_name = full_name } -> + Printf.sprintf "(fun () -> %s._%s ~%s ~%s:_r.%s)" + _db_exists + (OU.ocaml_of_obj_name x) + Gen_common.context + Client._self + (OU.ocaml_of_record_field (obj.DT.name :: full_name)) + | _ -> assert false + in + let wrapper f = Printf.sprintf "(runcheck \"%s\" %s \"%s\" %s)" - obj.name Client._self (String.concat "/" f.full_name) (check f) in + obj.name Client._self (String.concat "/" f.full_name) (check f) in let body = if fields = [] then ["true"] else [getrecord; String.concat " &&\n " (List.map wrapper fields)] in O.Let.make @@ -96,15 +96,15 @@ let db_check api : O.Module.t = ~ty: "'a" ~body () in - let all_records (obj: obj) = + let all_records (obj: obj) = let obj_name = OU.ocaml_of_obj_name obj.name in - let fold = - if obj.DT.in_database then - Printf.sprintf - "List.fold_left (&&) true (List.map (fun self -> _%s ~%s ~self) (%s.%s.get_all ~%s))" - obj_name Gen_common.context _db_action obj_name Gen_common.context - else - "true" + let fold = + if obj.DT.in_database then + Printf.sprintf + "List.fold_left (&&) true (List.map (fun self -> _%s ~%s ~self) (%s.%s.get_all ~%s))" + obj_name Gen_common.context _db_action obj_name Gen_common.context + else + "true" in O.Let.make ~name: ("all_" ^ obj_name) @@ -112,15 +112,15 @@ let db_check api : O.Module.t = ~ty: "bool" ~body: [ fold ] () in - let all (objs: obj list) = - let one obj = + let all (objs: obj list) = + let one obj = let obj_name = OU.ocaml_of_obj_name obj.name in Printf.sprintf "(all_%s ~%s)" obj_name Gen_common.context in - O.Module.Let (O.Let.make - ~name:"all" - ~params: [ Gen_common.context_arg ] - ~ty: "bool" - ~body: [ String.concat "&&\n" (List.map one objs) ] ()) in + O.Module.Let (O.Let.make + ~name:"all" + ~params: [ Gen_common.context_arg ] + ~ty: "bool" + ~body: [ String.concat "&&\n" (List.map one objs) ] ()) in let objects = Dm_api.objects_of_api api in let lets_of f = List.map (fun x -> O.Module.Let (f x)) objects in @@ -130,7 +130,7 @@ let db_check api : O.Module.t = ~preamble:[ "let runcheck cls row col fn = "; " Printf.printf \"Checking %s %s.%s: \" (Ref.string_of row) cls col;"; " if fn ()"; - " then (print_endline \"OK\"; true)"; + " then (print_endline \"OK\"; true)"; " else (print_endline \"FAILED\"; false)" ] ~elements:( lets_of check_refs @ (lets_of all_records) @ [ all objects ] ) () diff --git a/ocaml/idl/ocaml_backend/gen_empty_custom.ml b/ocaml/idl/ocaml_backend/gen_empty_custom.ml index ec45fa77a45..b8e7f683c1c 100644 --- a/ocaml/idl/ocaml_backend/gen_empty_custom.ml +++ b/ocaml/idl/ocaml_backend/gen_empty_custom.ml @@ -36,12 +36,12 @@ let _task_id = "task_id" *) -let operation_requires_side_effect ({ msg_tag = tag } as msg) = +let operation_requires_side_effect ({ msg_tag = tag } as msg) = (match msg.DT.msg_force_custom (* this flag always forces msg into custom_actions.ml *) - with None -> false | Some (mode) -> - if mode=RW then true (*RW=force both setters and getters into custom_actions *) - else (*{Static/Dynamic}RO=force only getters into custom_actions *) - (match msg with + with None -> false | Some (mode) -> + if mode=RW then true (*RW=force both setters and getters into custom_actions *) + else (*{Static/Dynamic}RO=force only getters into custom_actions *) + (match msg with | { msg_tag = FromField((Setter|Add|Remove), _) } -> false | { msg_tag = FromObject(Make|Delete) } -> false | _ -> true) @@ -71,20 +71,20 @@ let gen_debug_module name_override result_type_override body_override api : O.Mo let result_type = match result_type_override with - None -> - begin - match x.msg_custom_marshaller, x.msg_result with - | true, _ -> "Rpc.t" - | _, Some (ty, _) -> OU.alias_of_ty ty - | _, None -> "unit" - end - | Some t -> t in + None -> + begin + match x.msg_custom_marshaller, x.msg_result with + | true, _ -> "Rpc.t" + | _, Some (ty, _) -> OU.alias_of_ty ty + | _, None -> "unit" + end + | Some t -> t in let body = match body_override with - None -> [ "raise (Not_implemented \""^x.msg_name^"\")" ] - | Some b -> b in - + None -> [ "raise (Not_implemented \""^x.msg_name^"\")" ] + | Some b -> b in + O.Let.make ~name:x.msg_name ~params:(Gen_common.context_arg :: args) @@ -97,8 +97,8 @@ let gen_debug_module name_override result_type_override body_override api : O.Mo let messages = List.filter (fun x -> not (DU.has_been_removed x.DT.msg_lifecycle)) obj.messages in let fields = List.map (fun x -> O.Module.Let (operation obj x)) messages in O.Module.make - ~name:(OU.ocaml_of_obj_name obj.DT.name) - ~elements:fields () + ~name:(OU.ocaml_of_obj_name obj.DT.name) + ~elements:fields () in O.Module.make @@ -117,7 +117,7 @@ let gen_signature signature_name result_type_override api : O.Signature.t = an implementation of everything. *) let gen_release_module api : O.Module.t = let obj (obj: obj) = O.Module.make - ~name:(OU.ocaml_of_obj_name obj.DT.name) ~elements:[] () in + ~name:(OU.ocaml_of_obj_name obj.DT.name) ~elements:[] () in O.Module.make ~name:release_module_name ~elements:(List.map (fun x -> O.Module.Module (obj x)) (Dm_api.objects_of_api api)) () diff --git a/ocaml/idl/ocaml_backend/gen_rbac.ml b/ocaml/idl/ocaml_backend/gen_rbac.ml index 00d11810b7e..52351ea0058 100644 --- a/ocaml/idl/ocaml_backend/gen_rbac.ml +++ b/ocaml/idl/ocaml_backend/gen_rbac.ml @@ -24,53 +24,53 @@ module Client = Gen_client open DT -let rec role_idx = function - | (_,[])->(-1) - |(e1,e2::xs)-> if e1=e2 then 0 else 1+(role_idx (e1,xs)) +let rec role_idx = function + | (_,[])->(-1) + |(e1,e2::xs)-> if e1=e2 then 0 else 1+(role_idx (e1,xs)) let internal_role_local_root = "_local_root_" (* the output of this function is used as input by the automatic tests *) let writer_csv static_roles_permissions static_permissions_roles = - (Printf.sprintf "%s,PERMISSION/ROLE,%s\n" - (let t = Debug.gettimestring () in (String.sub t 0 ((String.length t)-1))) - (* role titles are ordered by roles in roles_all *) - (List.fold_left (fun rr r->rr^r^",") "" Datamodel.roles_all) - ) - ^List.fold_left - (fun acc (permission,roles) -> - (Printf.sprintf ",%s," permission) - ^(List.fold_left - (fun acc role -> if (List.exists (fun r->r=role) roles) then "X,"^acc else ","^acc) - "" - (List.rev Datamodel.roles_all) (* Xs are ordered by roles in roles_all *) - ) - ^"\n" - ^acc - ) - "" - static_permissions_roles + (Printf.sprintf "%s,PERMISSION/ROLE,%s\n" + (let t = Debug.gettimestring () in (String.sub t 0 ((String.length t)-1))) + (* role titles are ordered by roles in roles_all *) + (List.fold_left (fun rr r->rr^r^",") "" Datamodel.roles_all) + ) + ^List.fold_left + (fun acc (permission,roles) -> + (Printf.sprintf ",%s," permission) + ^(List.fold_left + (fun acc role -> if (List.exists (fun r->r=role) roles) then "X,"^acc else ","^acc) + "" + (List.rev Datamodel.roles_all) (* Xs are ordered by roles in roles_all *) + ) + ^"\n" + ^acc + ) + "" + static_permissions_roles let hash2uuid str = - let h = Digest.string str in - let hex = Digest.to_hex h in - let int_array hex = - let l = ref [] in - Scanf.sscanf - hex - "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x" - (fun a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 a14 a15 -> - l := [a0;a1;a2;a3;a4;a5;a6;a7;a8;a9;a10;a11;a12;a13;a14;a15;]); - Array.of_list !l - in - Uuid.string_of_uuid (Uuid.uuid_of_int_array (int_array hex)) + let h = Digest.string str in + let hex = Digest.to_hex h in + let int_array hex = + let l = ref [] in + Scanf.sscanf + hex + "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x" + (fun a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 a14 a15 -> + l := [a0;a1;a2;a3;a4;a5;a6;a7;a8;a9;a10;a11;a12;a13;a14;a15;]); + Array.of_list !l + in + Uuid.string_of_uuid (Uuid.uuid_of_int_array (int_array hex)) let replace_char _str c1 c2 = - let str = String.copy _str in (*defensive copy*) - for i=0 to String.length str -1 do - if str.[i]=c1 then str.[i]<-c2 else () - done; - str + let str = String.copy _str in (*defensive copy*) + for i=0 to String.length str -1 do + if str.[i]=c1 then str.[i]<-c2 else () + done; + str let role_uuid name = hash2uuid name let ref_prefix = Ref.ref_prefix @@ -78,27 +78,27 @@ let role_ref name = (ref_prefix ^ (role_uuid name)) let permission_description = "A basic permission" let permission_name wire_name = - let s1 =replace_char (Printf.sprintf "permission_%s" wire_name) '.' '_' in - let s2 = replace_char s1 '/' '_' in - let s3 = Stdext.Xstringext.String.replace "*" "WILDCHAR" s2 in - Stdext.Xstringext.String.replace ":" "_" s3 + let s1 =replace_char (Printf.sprintf "permission_%s" wire_name) '.' '_' in + let s2 = replace_char s1 '/' '_' in + let s3 = Stdext.Xstringext.String.replace "*" "WILDCHAR" s2 in + Stdext.Xstringext.String.replace ":" "_" s3 let permission_index = ref 0 let writer_permission name nperms = - let permission_uuid = role_uuid name in - (*let permission_ref = role_ref name in*) - let permission_name_label = - (String.lowercase name) (* lowercase here asked by GUI team *) - in - permission_index := !permission_index+1; - let permission_number = (Printf.sprintf "%i/%i" !permission_index nperms) in - (Printf.sprintf "let %s = \n { (* %s *)\n" (permission_name name) permission_number) - (*^(Printf.sprintf " role_ref = \"%s\";\n" permission_ref)*) - ^(Printf.sprintf " role_uuid = \"%s\";\n" permission_uuid) - ^(Printf.sprintf " role_name_label = \"%s\";\n" permission_name_label) - ^(Printf.sprintf " role_name_description = permission_description;\n") - ^(Printf.sprintf " role_subroles = []; (* permission cannot have any subroles *)\n") - ^(Printf.sprintf " }\n") + let permission_uuid = role_uuid name in + (*let permission_ref = role_ref name in*) + let permission_name_label = + (String.lowercase name) (* lowercase here asked by GUI team *) + in + permission_index := !permission_index+1; + let permission_number = (Printf.sprintf "%i/%i" !permission_index nperms) in + (Printf.sprintf "let %s = \n { (* %s *)\n" (permission_name name) permission_number) + (*^(Printf.sprintf " role_ref = \"%s\";\n" permission_ref)*) + ^(Printf.sprintf " role_uuid = \"%s\";\n" permission_uuid) + ^(Printf.sprintf " role_name_label = \"%s\";\n" permission_name_label) + ^(Printf.sprintf " role_name_description = permission_description;\n") + ^(Printf.sprintf " role_subroles = []; (* permission cannot have any subroles *)\n") + ^(Printf.sprintf " }\n") let role_label role = replace_char (Printf.sprintf "role_%s" role) '-' '_' (*let subroles_label role = (Printf.sprintf "subroles_of_%s" (role_label role))*) @@ -106,34 +106,34 @@ let permissions_label role = (Printf.sprintf "permissions_of_%s" (role_label rol let role_index = ref 0 let writer_role name nroles = - let role_uuid = - if name = Datamodel.role_pool_admin - (* pool-admin role has a fixed uuid because it's the default role in Datamodel subject's roles field *) - then Constants.rbac_pool_admin_uuid - (* all the other roles use a hash as uuid *) - else role_uuid name - in - (*let role_ref = role_ref name in*) - let role_name_label = - (String.lowercase name) (* lowercase here asked by GUI team *) - in - role_index := !role_index+1; - let role_number = (Printf.sprintf "%i/%i" !role_index nroles) in - let role_description = - try List.assoc role_name_label Datamodel.role_description - with Not_found -> - failwith (Printf.sprintf - "Check Datamodel.role_description: there's no role description for role %s" - role_name_label - ) - in - (Printf.sprintf "let %s = \n { (* %s *)\n" (role_label name) role_number) - (*^(Printf.sprintf " role_ref = \"%s\";\n" role_ref)*) - ^(Printf.sprintf " role_uuid = \"%s\";\n" role_uuid) - ^(Printf.sprintf " role_name_label = \"%s\";\n" role_name_label) - ^(Printf.sprintf " role_name_description = \"%s\";\n" role_description) - ^(Printf.sprintf " role_subroles = get_refs %s;\n" (permissions_label name)) - ^(Printf.sprintf " }\n") + let role_uuid = + if name = Datamodel.role_pool_admin + (* pool-admin role has a fixed uuid because it's the default role in Datamodel subject's roles field *) + then Constants.rbac_pool_admin_uuid + (* all the other roles use a hash as uuid *) + else role_uuid name + in + (*let role_ref = role_ref name in*) + let role_name_label = + (String.lowercase name) (* lowercase here asked by GUI team *) + in + role_index := !role_index+1; + let role_number = (Printf.sprintf "%i/%i" !role_index nroles) in + let role_description = + try List.assoc role_name_label Datamodel.role_description + with Not_found -> + failwith (Printf.sprintf + "Check Datamodel.role_description: there's no role description for role %s" + role_name_label + ) + in + (Printf.sprintf "let %s = \n { (* %s *)\n" (role_label name) role_number) + (*^(Printf.sprintf " role_ref = \"%s\";\n" role_ref)*) + ^(Printf.sprintf " role_uuid = \"%s\";\n" role_uuid) + ^(Printf.sprintf " role_name_label = \"%s\";\n" role_name_label) + ^(Printf.sprintf " role_name_description = \"%s\";\n" role_description) + ^(Printf.sprintf " role_subroles = get_refs %s;\n" (permissions_label name)) + ^(Printf.sprintf " }\n") (* let get_ref name = @@ -142,129 +142,129 @@ let get_ref name = (* the output of this function generates ocaml/autogen/rbac-static.ml *) let writer_stdout static_roles_permissions static_permissions_roles = - let nperms = List.length static_permissions_roles in - let nroles = List.length static_roles_permissions in - (Printf.sprintf "(* This file contains relations between static roles and permissions *)") - ^(Printf.sprintf "\n(* Auto-generated from the role flags in the datamodel *)\n\n") - (* 0. ml header/imports *) - ^(Printf.sprintf "open Db_actions\n\n") - (* 1. the static permissions *) - ^(Printf.sprintf "let permission_description = \"%s\"\n\n" permission_description) - ^(List.fold_left - (fun acc (perm,_) -> acc^(writer_permission perm nperms)) - "" - static_permissions_roles - ) - (* 2. static_roles<->permissions *) - ^(List.fold_left - (fun acc (_role,perms) -> - (* role's list of permissions *) - let permissions_label = permissions_label _role in - (*let subroles_label = subroles_label _role in*) - acc^ - (Printf.sprintf "(* %i elements in %s *)\n" (List.length perms) permissions_label) - ^(Printf.sprintf "let %s = [" permissions_label) - ^(List.fold_left - (fun acc perm -> (Printf.sprintf "%s; " (permission_name perm))^acc) - "" - perms - ) - ^Printf.sprintf "]\n\n" -(* (* role's list of permission refs *) - ^(Printf.sprintf "(* %i elements in %s *)\n" (List.length perms) subroles_label) - ^(Printf.sprintf "let %s = [" subroles_label) - ^(List.fold_left - (fun acc perm -> (Printf.sprintf "\"%s\"; " (role_ref perm))^acc) - "" - perms - ) - ^Printf.sprintf "]\n\n" -*) - ) - "" - static_roles_permissions - ) - (* 3. all static permissions *) - ^("let all_static_permissions = permissions_of_role_pool_admin\n") - (* 4. list of static roles *) - ^"let get_refs permissions = List.map (fun p->Ref.of_string (Ref.ref_prefix ^ p.role_uuid)) permissions\n\n" - ^(List.fold_left - (fun acc (role,_) -> acc^(writer_role role nroles)) - "" - static_roles_permissions - ) - (* 5. all static roles *) - ^("let all_static_roles = \n[\n") - ^(List.fold_left - (fun acc (role,_) -> acc^(Printf.sprintf " %s;\n" (role_label role))) - "" - static_roles_permissions - ) - ^("]\n") + let nperms = List.length static_permissions_roles in + let nroles = List.length static_roles_permissions in + (Printf.sprintf "(* This file contains relations between static roles and permissions *)") + ^(Printf.sprintf "\n(* Auto-generated from the role flags in the datamodel *)\n\n") + (* 0. ml header/imports *) + ^(Printf.sprintf "open Db_actions\n\n") + (* 1. the static permissions *) + ^(Printf.sprintf "let permission_description = \"%s\"\n\n" permission_description) + ^(List.fold_left + (fun acc (perm,_) -> acc^(writer_permission perm nperms)) + "" + static_permissions_roles + ) + (* 2. static_roles<->permissions *) + ^(List.fold_left + (fun acc (_role,perms) -> + (* role's list of permissions *) + let permissions_label = permissions_label _role in + (*let subroles_label = subroles_label _role in*) + acc^ + (Printf.sprintf "(* %i elements in %s *)\n" (List.length perms) permissions_label) + ^(Printf.sprintf "let %s = [" permissions_label) + ^(List.fold_left + (fun acc perm -> (Printf.sprintf "%s; " (permission_name perm))^acc) + "" + perms + ) + ^Printf.sprintf "]\n\n" + (* (* role's list of permission refs *) + ^(Printf.sprintf "(* %i elements in %s *)\n" (List.length perms) subroles_label) + ^(Printf.sprintf "let %s = [" subroles_label) + ^(List.fold_left + (fun acc perm -> (Printf.sprintf "\"%s\"; " (role_ref perm))^acc) + "" + perms + ) + ^Printf.sprintf "]\n\n" + *) + ) + "" + static_roles_permissions + ) + (* 3. all static permissions *) + ^("let all_static_permissions = permissions_of_role_pool_admin\n") + (* 4. list of static roles *) + ^"let get_refs permissions = List.map (fun p->Ref.of_string (Ref.ref_prefix ^ p.role_uuid)) permissions\n\n" + ^(List.fold_left + (fun acc (role,_) -> acc^(writer_role role nroles)) + "" + static_roles_permissions + ) + (* 5. all static roles *) + ^("let all_static_roles = \n[\n") + ^(List.fold_left + (fun acc (role,_) -> acc^(Printf.sprintf " %s;\n" (role_label role))) + "" + static_roles_permissions + ) + ^("]\n") (* This function maps a string xperm and an extra-str-list into *) (* a dictionary entry (xperm,extra-str-list::original-str-list), *) (* and returns the resulting dictionary *) let rec concat = function - | (xperm,rs,[]) -> - let (r1,r2)=(List.partition (fun (r,_)->r=internal_role_local_root) rs) in - let r,perms = match r1 with []->(internal_role_local_root,[])|r1::_->r1 in - ((r,xperm::perms)::r2) - | (xperm,rs,xr::extra_rs) -> - let (r1,r2)=(List.partition (fun (r,_)->r=xr) rs) in - let r,perms = match r1 with []->(xr,[])|r1::_->r1 in - concat (xperm,((r,xperm::perms)::r2),extra_rs) + | (xperm,rs,[]) -> + let (r1,r2)=(List.partition (fun (r,_)->r=internal_role_local_root) rs) in + let r,perms = match r1 with []->(internal_role_local_root,[])|r1::_->r1 in + ((r,xperm::perms)::r2) + | (xperm,rs,xr::extra_rs) -> + let (r1,r2)=(List.partition (fun (r,_)->r=xr) rs) in + let r,perms = match r1 with []->(xr,[])|r1::_->r1 in + concat (xperm,((r,xperm::perms)::r2),extra_rs) let get_key_permission_name permission key_name = permission ^ "/key:" ^ key_name let add_permission_to_roles roles_permissions (obj: obj) (x: message) = - let msg_allowed_roles = x.msg_allowed_roles in - let msg_map_keys_roles = x.msg_map_keys_roles in - let wire_name = DU.wire_name ~sync:true obj x in - match msg_allowed_roles with - | None -> ( - (*roles_permissions (*<-in case no-role messages are allowed, use this*)*) - (* a message should have at least one role *) - failwith (Printf.sprintf "No roles for message %s" wire_name); - ) - | Some(allowed_roles) -> - let with_msg_roles_permissions = - (concat (wire_name,roles_permissions,allowed_roles)) - in - List.fold_left - (fun rsps (k,rs)-> - let wire_name_key = get_key_permission_name wire_name k in - match rs with - |None->failwith (Printf.sprintf "No roles for key %s" wire_name_key) - |Some(allowed_roles)->(concat (wire_name_key, rsps, allowed_roles)) - ) - with_msg_roles_permissions - msg_map_keys_roles + let msg_allowed_roles = x.msg_allowed_roles in + let msg_map_keys_roles = x.msg_map_keys_roles in + let wire_name = DU.wire_name ~sync:true obj x in + match msg_allowed_roles with + | None -> ( + (*roles_permissions (*<-in case no-role messages are allowed, use this*)*) + (* a message should have at least one role *) + failwith (Printf.sprintf "No roles for message %s" wire_name); + ) + | Some(allowed_roles) -> + let with_msg_roles_permissions = + (concat (wire_name,roles_permissions,allowed_roles)) + in + List.fold_left + (fun rsps (k,rs)-> + let wire_name_key = get_key_permission_name wire_name k in + match rs with + |None->failwith (Printf.sprintf "No roles for key %s" wire_name_key) + |Some(allowed_roles)->(concat (wire_name_key, rsps, allowed_roles)) + ) + with_msg_roles_permissions + msg_map_keys_roles let get_http_permissions_roles = - List.fold_left - (fun acc (http_permission,(_,_,_,_,some_roles,sub_actions))-> acc @ - let roles = Stdext.Pervasiveext.default [] some_roles in - (Datamodel.rbac_http_permission_prefix ^ http_permission, roles) - :: - (List.map (* sub_actions for this http_permission *) - (fun (sub_action,some_roles)-> - let roles = Stdext.Pervasiveext.default [] some_roles in - (Datamodel.rbac_http_permission_prefix ^ http_permission - ^ "/" ^ sub_action, roles) - ) - sub_actions - ) - ) - [] - Datamodel.http_actions + List.fold_left + (fun acc (http_permission,(_,_,_,_,some_roles,sub_actions))-> acc @ + let roles = Stdext.Pervasiveext.default [] some_roles in + (Datamodel.rbac_http_permission_prefix ^ http_permission, roles) + :: + (List.map (* sub_actions for this http_permission *) + (fun (sub_action,some_roles)-> + let roles = Stdext.Pervasiveext.default [] some_roles in + (Datamodel.rbac_http_permission_prefix ^ http_permission + ^ "/" ^ sub_action, roles) + ) + sub_actions + ) + ) + [] + Datamodel.http_actions let get_extra_permissions_roles = - List.map - (fun (p,rs)->(p,Stdext.Pervasiveext.default [] rs)) - Datamodel.extra_permissions - + List.map + (fun (p,rs)->(p,Stdext.Pervasiveext.default [] rs)) + Datamodel.extra_permissions + (* Returns a (permission, static_role list) list generated from datamodel.ml *) let gen_roles_of_permissions roles_permissions = (* @@ -274,68 +274,68 @@ let apicalls obj = let allmsg = List.map (fun obj -> String.concat "" (objmsgs obj)) all_objs in allmsg *) - let rec _permissions_roles = function - | (acc,[]) -> acc - | (acc,(role,permissions)::rps) -> - _permissions_roles ((concat (role,acc,permissions)),rps) - in - (* sort roles in each api-call/permission *) - let sort_fn a b = (* 0 if equal, + if a>b, - if a (permission,List.sort sort_fn roles)) - (_permissions_roles ([],roles_permissions)) - ) - in - permissions_roles + let rec _permissions_roles = function + | (acc,[]) -> acc + | (acc,(role,permissions)::rps) -> + _permissions_roles ((concat (role,acc,permissions)),rps) + in + (* sort roles in each api-call/permission *) + let sort_fn a b = (* 0 if equal, + if a>b, - if a (permission,List.sort sort_fn roles)) + (_permissions_roles ([],roles_permissions)) + ) + in + permissions_roles (* Returns a (static_role,permission list) list generated from datamodel.ml *) let gen_permissions_of_static_roles highapi = - let api = Client.client_api ~sync:true highapi in - let all_objs = Dm_api.objects_of_api api in + let api = Client.client_api ~sync:true highapi in + let all_objs = Dm_api.objects_of_api api in + + let rec get_roles_permissions_of_objs = function + | (acc,[]) -> acc + | (acc,obj::objs) -> + begin + let rec get_roles_permissions_of_obj_msgs = function + | (acc,[]) -> acc + | (acc,msg::msgs) -> + get_roles_permissions_of_obj_msgs + ((add_permission_to_roles acc obj msg),msgs) + in + get_roles_permissions_of_objs + ((get_roles_permissions_of_obj_msgs (acc,obj.messages)),objs) + end + in + let api_roles_permissions = + (get_roles_permissions_of_objs ([],all_objs)) (*api*) + in + let roles_permissions = (*api+http+extra*) + List.rev + (List.fold_left + (List.fold_left (fun arps (hr,hps) -> (concat (hr,arps,hps)))) + api_roles_permissions + [get_http_permissions_roles;get_extra_permissions_roles] + ) + in + + let _permissions_roles = gen_roles_of_permissions roles_permissions in + let _,permissions_roles = (* ignore the _local_root_ permission *) + List.partition (fun (r,_)->r=internal_role_local_root) _permissions_roles + in - let rec get_roles_permissions_of_objs = function - | (acc,[]) -> acc - | (acc,obj::objs) -> - begin - let rec get_roles_permissions_of_obj_msgs = function - | (acc,[]) -> acc - | (acc,msg::msgs) -> - get_roles_permissions_of_obj_msgs - ((add_permission_to_roles acc obj msg),msgs) - in - get_roles_permissions_of_objs - ((get_roles_permissions_of_obj_msgs (acc,obj.messages)),objs) - end - in - let api_roles_permissions = - (get_roles_permissions_of_objs ([],all_objs)) (*api*) - in - let roles_permissions = (*api+http+extra*) - List.rev - (List.fold_left - (List.fold_left (fun arps (hr,hps) -> (concat (hr,arps,hps)))) - api_roles_permissions - [get_http_permissions_roles;get_extra_permissions_roles] - ) - in - - let _permissions_roles = gen_roles_of_permissions roles_permissions in - let _,permissions_roles = (* ignore the _local_root_ permission *) - List.partition (fun (r,_)->r=internal_role_local_root) _permissions_roles - in - - if !Gen_server.enable_debugging - then begin (* for rbac_static.csv *) - writer_csv roles_permissions permissions_roles - end - else begin (* for rbac_static.ml *) - let _,roles_permissions = (* ignore the _local_root_ internal role *) - List.partition (fun (r,_)->r=internal_role_local_root) roles_permissions - in - writer_stdout roles_permissions permissions_roles - end + if !Gen_server.enable_debugging + then begin (* for rbac_static.csv *) + writer_csv roles_permissions permissions_roles + end + else begin (* for rbac_static.ml *) + let _,roles_permissions = (* ignore the _local_root_ internal role *) + List.partition (fun (r,_)->r=internal_role_local_root) roles_permissions + in + writer_stdout roles_permissions permissions_roles + end diff --git a/ocaml/idl/ocaml_backend/gen_server.ml b/ocaml/idl/ocaml_backend/gen_server.ml index c3058276b5a..38e6af0b478 100644 --- a/ocaml/idl/ocaml_backend/gen_server.ml +++ b/ocaml/idl/ocaml_backend/gen_server.ml @@ -36,17 +36,17 @@ let is_session_arg arg = ((binding = "session_id") && (converter = "ref_session")) let from_rpc ?(ignore=false) arg = - let binding = O.string_of_param arg in - let converter = O.type_of_param arg in - Printf.sprintf "let %s%s = %s_of_rpc %s_rpc in" (if ignore then "_" else "") binding converter binding + let binding = O.string_of_param arg in + let converter = O.type_of_param arg in + Printf.sprintf "let %s%s = %s_of_rpc %s_rpc in" (if ignore then "_" else "") binding converter binding let read_msg_parameter msg_parameter = - from_rpc - + from_rpc + let debug msg args = if !enable_debugging - then "D.debug \""^(String.escaped msg)^"\" " ^ (String.concat " " args) ^ ";" else "" - + then "D.debug \""^(String.escaped msg)^"\" " ^ (String.concat " " args) ^ ";" else "" + let has_default_args args = let arg_has_default arg = match arg.DT.param_default with @@ -65,8 +65,8 @@ let count_mandatory_message_parameters (msg: message) = match params with | [] -> 0 | head::tail -> ((match head.param_default with - | None -> 1 - | Some x -> 0) + + | None -> 1 + | Some x -> 0) + (count_mandatory_parameters tail)) in count_mandatory_parameters msg.msg_params @@ -76,14 +76,14 @@ let operation (obj: obj) (x: message) = let msg_params_without_default_values = List.filter (fun p -> p.DT.param_default=None) msg_params in let msg_without_default_values = {x with DT.msg_params=msg_params_without_default_values} in - + let all_args = Client.args_of_message obj x in let args_without_default_values = Client.args_of_message obj msg_without_default_values in - + (* Constructors use a on the wire *) - let is_ctor = x.msg_tag = FromObject(Make) && Client.use_structure_in_ctor in - + let is_ctor = x.msg_tag = FromObject(Make) && Client.use_structure_in_ctor in + (* Result marshaller converts the result to a string for the Task table *) let result_marshaller = match x.msg_custom_marshaller, x.msg_result with | true, _ -> "(fun x -> x)" @@ -98,9 +98,9 @@ let operation (obj: obj) (x: message) = let wire_name = DU.wire_name ~sync:true obj x in let alternative_wire_name = DU.alternative_wire_name ~sync:true obj x in - let orig_string_args = - if is_ctor then [O.string_of_param Client.session;"__structure"] - else List.map O.string_of_param args_without_default_values in + let orig_string_args = + if is_ctor then [O.string_of_param Client.session;"__structure"] + else List.map O.string_of_param args_without_default_values in let string_args = List.map (fun s -> Printf.sprintf "%s_rpc" s) orig_string_args in @@ -120,37 +120,37 @@ let operation (obj: obj) (x: message) = let binding = O.string_of_param (Client.param_of_field f) in let converter = Printf.sprintf "%s_of_rpc" (OU.alias_of_ty f.DT.ty) in let lookup_expr = - match f.DT.default_value with - None -> Printf.sprintf "(my_assoc \"%s\" __structure)" (DU.wire_name_of_field f) - | Some default -> - Printf.sprintf "(if (List.mem_assoc \"%s\" __structure) then (my_assoc \"%s\" __structure) else %s)" - (DU.wire_name_of_field f) (DU.wire_name_of_field f) - (Datamodel_values.to_ocaml_string default) in + match f.DT.default_value with + None -> Printf.sprintf "(my_assoc \"%s\" __structure)" (DU.wire_name_of_field f) + | Some default -> + Printf.sprintf "(if (List.mem_assoc \"%s\" __structure) then (my_assoc \"%s\" __structure) else %s)" + (DU.wire_name_of_field f) (DU.wire_name_of_field f) + (Datamodel_values.to_ocaml_string default) in Printf.sprintf " let %s = %s %s in" binding converter lookup_expr in - String.concat "\n" + String.concat "\n" ("let __structure = match __structure_rpc with Dict d -> d | _ -> failwith \"bad __structure\" in" :: - (List.map of_field fields)) in - + (List.map of_field fields)) in + (* impl_fn = something like "VM.make ~__context" *) - let impl_fn = + let impl_fn = (* filter out the session_id *) let args_without_session = List.filter (function O.Named("session_id", _) -> false | _ -> true) all_args in - Printf.sprintf "%s.%s %s %s" - (OU.ocaml_of_obj_name obj.DT.name) - x.msg_name - ("~__context:" ^ Gen_common.context_with_correct_database) - (String.concat "" (List.map (fun arg -> " ~" ^ (O.string_of_param arg)) args_without_session)) in - + Printf.sprintf "%s.%s %s %s" + (OU.ocaml_of_obj_name obj.DT.name) + x.msg_name + ("~__context:" ^ Gen_common.context_with_correct_database) + (String.concat "" (List.map (fun arg -> " ~" ^ (O.string_of_param arg)) args_without_session)) in + let has_async = Client.has_async x in - + let comments = List.concat [ - if Gen_empty_custom.operation_requires_side_effect x - then [ "(* has side-effect (with locks and no automatic DB action) *)" ] - else [ "(* has no side-effect; should be handled by DB action *) "]; - if has_async - then [ "(* has asynchronous mode *)" ] - else [ "(* has no asynchronous mode *)" ] - ] in + if Gen_empty_custom.operation_requires_side_effect x + then [ "(* has side-effect (with locks and no automatic DB action) *)" ] + else [ "(* has no side-effect; should be handled by DB action *) "]; + if has_async + then [ "(* has asynchronous mode *)" ] + else [ "(* has no asynchronous mode *)" ] + ] in (* Generate the unmarshalling code *) let rec add_counts i l = @@ -158,16 +158,16 @@ let operation (obj: obj) (x: message) = [] -> [] | x::xs -> (i,x)::(add_counts (i+1) xs) in let has_session_arg = - if is_ctor then is_session_arg Client.session - else List.exists (fun a->is_session_arg a) args_without_default_values + if is_ctor then is_session_arg Client.session + else List.exists (fun a->is_session_arg a) args_without_default_values in let rbac_check_begin = if has_session_arg then [ - "let arg_names = "^(List.fold_right (fun arg args -> "\""^arg^"\"::"^args) orig_string_args (if is_non_constructor_with_defaults then ((List.fold_right (fun dp ss->"\""^(dp.DT.param_name)^"\"::"^ss) msg_params_with_default_values "")^"[]") else "[]"))^" in"; - "let key_names = "^(List.fold_right (fun arg args -> "\""^arg^"\"::"^args) (List.map (fun (k,_)->k) x.msg_map_keys_roles) "[]")^" in"; - "let rbac __context fn = Rbac.check session_id __call ~args:(arg_names,__params) ~keys:key_names ~__context ~fn in"] + "let arg_names = "^(List.fold_right (fun arg args -> "\""^arg^"\"::"^args) orig_string_args (if is_non_constructor_with_defaults then ((List.fold_right (fun dp ss->"\""^(dp.DT.param_name)^"\"::"^ss) msg_params_with_default_values "")^"[]") else "[]"))^" in"; + "let key_names = "^(List.fold_right (fun arg args -> "\""^arg^"\"::"^args) (List.map (fun (k,_)->k) x.msg_map_keys_roles) "[]")^" in"; + "let rbac __context fn = Rbac.check session_id __call ~args:(arg_names,__params) ~keys:key_names ~__context ~fn in"] else [ - "let rbac __context fn = fn() in" + "let rbac __context fn = fn() in" ] in let rbac_check_end = if has_session_arg then [] else [] in @@ -178,201 +178,201 @@ let operation (obj: obj) (x: message) = ( (* If we're a constructor then unmarshall all the fields from the constructor record, passed as a struct *) if is_ctor then [from_rpc Client.session; from_ctor_record] - (* Otherwise, go read non-default fields from pattern match; if we have default fields then we need to - get those from the 'default_fields' arg *) + (* Otherwise, go read non-default fields from pattern match; if we have default fields then we need to + get those from the 'default_fields' arg *) else List.map (fun a -> from_rpc ~ignore:(ignore && not (is_session_arg a)) a) args_without_default_values) (* and for every default value we try to get this from default_args or default it *) @ ( List.map - (fun (param_count, default_param) -> - let param_name = OU.ocaml_of_record_name default_param.DT.param_name in - let param_type = OU.alias_of_ty default_param.DT.param_type in - let try_and_get_default = Printf.sprintf "Server_helpers.nth %d default_args" param_count in - let default_value = - match default_param.DT.param_default with - None -> "** EXPECTED DEFAULT VALUE IN THIS PARAM **" - | Some default -> - Datamodel_values.to_ocaml_string default in - Printf.sprintf "let %s = %s_of_rpc (try %s with _ -> %s) in" - param_name param_type try_and_get_default default_value - ) - (add_counts 1 msg_params_with_default_values)) + (fun (param_count, default_param) -> + let param_name = OU.ocaml_of_record_name default_param.DT.param_name in + let param_type = OU.alias_of_ty default_param.DT.param_type in + let try_and_get_default = Printf.sprintf "Server_helpers.nth %d default_args" param_count in + let default_value = + match default_param.DT.param_default with + None -> "** EXPECTED DEFAULT VALUE IN THIS PARAM **" + | Some default -> + Datamodel_values.to_ocaml_string default in + Printf.sprintf "let %s = %s_of_rpc (try %s with _ -> %s) in" + param_name param_type try_and_get_default default_value + ) + (add_counts 1 msg_params_with_default_values)) in - + let may_be_side_effecting msg = match msg.msg_tag with - FromField (Setter, _ ) | FromField (Add, _) | FromField(Remove, _) -> true - | FromField _ -> false - | FromObject Make | FromObject Delete | FromObject (Private Copy) -> true - | FromObject _ -> false - | Custom -> true in - - let session_check_exp = - if x.msg_session - then [ "Session_check.check " ^ (string_of_bool x.msg_pool_internal) ^ " session_id;" ] - else [] - in - - let gen_body () = match x.DT.msg_forward_to with - | Some Extension name -> - [ - "Server_helpers.forward_extension ~__context rbac call" - ] - | Some HostExtension name -> - [ - "let host = ref_host_of_rpc host_rpc in"; - "let call_string = Jsonrpc.string_of_call call in"; - "let marshaller = "^result_marshaller^" in"; - "let local_op = fun ~__context ->(rbac __context (fun()->(Custom.Host.call_extension ~__context:(Context.check_for_foreign_database ~__context) ~host ~call:call_string))) in"; - "let supports_async = true in"; - "let generate_task_for = true in"; - "let forward_op = fun ~local_fn ~__context -> (rbac __context (fun()-> (Forward.Host.call_extension ~__context:(Context.check_for_foreign_database ~__context) ~host ~call:call_string) )) in"; - "let resp = Server_helpers.do_dispatch ~session_id ~forward_op __async supports_async __call local_op marshaller fd http_req __label generate_task_for in"; - "if resp.Rpc.success then"; - " let rpc = Jsonrpc.response_of_string (string_of_rpc resp.contents) in"; - " try"; - " let _ = "^result_unmarshaller^" rpc.contents in"; - " rpc"; - " with"; - " | _ -> API.response_of_failure Api_errors.internal_error [string_of_rpc resp.Rpc.contents]"; - "else"; - " Server_helpers.unknown_rpc_failure __call"; - ] - | None -> - let module_prefix = if (Gen_empty_custom.operation_requires_side_effect x) then _custom else _db_defaults in - let common_let_decs = - [ - "let marshaller = "^result_marshaller^" in"; - "let local_op = fun ~__context ->(rbac __context (fun()->("^module_prefix^"."^impl_fn^"))) in"; - "let supports_async = "^(if has_async then "true" else "false")^" in"; - "let generate_task_for = "^(string_of_bool (not (List.mem obj.name DM.no_task_id_for)))^" in" ] in - let side_effect_let_decs = - if Gen_empty_custom.operation_requires_side_effect x then - [ - Printf.sprintf "let forward_op = fun ~local_fn ~__context -> (rbac __context (fun()-> (%s.%s) )) in" _forward impl_fn - ] - else - [ - Printf.sprintf "%s \"%s\";" - (if may_be_side_effecting x then "ApiLogSideEffect.debug" else "ApiLogRead.debug") - wire_name - ] in - - let body_exp = - [ - Printf.sprintf "let resp = Server_helpers.do_dispatch %s %s __async supports_async __call local_op marshaller fd http_req __label generate_task_for in" - (if x.msg_session then "~session_id" else "") - (if Gen_empty_custom.operation_requires_side_effect x then "~forward_op" else ""); - (* "P.debug \"Server RPC response: %s\" (Rpc.to_string (resp.Rpc.contents));"; *) - "resp" - ] in - common_let_decs @ side_effect_let_decs @ body_exp in - - let all = - let all_list = - if not (DU.has_been_removed x.DT.msg_lifecycle) then - (comments @ unmarshall_code @ session_check_exp @ rbac_check_begin @ gen_body () @ rbac_check_end) - else - (comments @ ["let session_id = ref_session_of_rpc session_id_rpc in"] @ session_check_exp @ ["response_of_failure Api_errors.message_removed []"]) in - String.concat "\n " ("" :: all_list) in - - name_pattern_match ^ "\n" - ^ " begin match __params with\n" - ^ " | " ^ arg_pattern ^ " -> " ^ all ^ "\n" - ^ " | _ ->\n" - ^ " Server_helpers.parameter_count_mismatch_failure __call " ^ "\"" ^ (string_of_int (count_mandatory_message_parameters x)) ^ "\"" ^ " (string_of_int ((List.length __params) - " ^ (if x.msg_session then "1" else "0") ^ "))\n" - ^ " end" + FromField (Setter, _ ) | FromField (Add, _) | FromField(Remove, _) -> true + | FromField _ -> false + | FromObject Make | FromObject Delete | FromObject (Private Copy) -> true + | FromObject _ -> false + | Custom -> true in + + let session_check_exp = + if x.msg_session + then [ "Session_check.check " ^ (string_of_bool x.msg_pool_internal) ^ " session_id;" ] + else [] + in + + let gen_body () = match x.DT.msg_forward_to with + | Some Extension name -> + [ + "Server_helpers.forward_extension ~__context rbac call" + ] + | Some HostExtension name -> + [ + "let host = ref_host_of_rpc host_rpc in"; + "let call_string = Jsonrpc.string_of_call call in"; + "let marshaller = "^result_marshaller^" in"; + "let local_op = fun ~__context ->(rbac __context (fun()->(Custom.Host.call_extension ~__context:(Context.check_for_foreign_database ~__context) ~host ~call:call_string))) in"; + "let supports_async = true in"; + "let generate_task_for = true in"; + "let forward_op = fun ~local_fn ~__context -> (rbac __context (fun()-> (Forward.Host.call_extension ~__context:(Context.check_for_foreign_database ~__context) ~host ~call:call_string) )) in"; + "let resp = Server_helpers.do_dispatch ~session_id ~forward_op __async supports_async __call local_op marshaller fd http_req __label generate_task_for in"; + "if resp.Rpc.success then"; + " let rpc = Jsonrpc.response_of_string (string_of_rpc resp.contents) in"; + " try"; + " let _ = "^result_unmarshaller^" rpc.contents in"; + " rpc"; + " with"; + " | _ -> API.response_of_failure Api_errors.internal_error [string_of_rpc resp.Rpc.contents]"; + "else"; + " Server_helpers.unknown_rpc_failure __call"; + ] + | None -> + let module_prefix = if (Gen_empty_custom.operation_requires_side_effect x) then _custom else _db_defaults in + let common_let_decs = + [ + "let marshaller = "^result_marshaller^" in"; + "let local_op = fun ~__context ->(rbac __context (fun()->("^module_prefix^"."^impl_fn^"))) in"; + "let supports_async = "^(if has_async then "true" else "false")^" in"; + "let generate_task_for = "^(string_of_bool (not (List.mem obj.name DM.no_task_id_for)))^" in" ] in + let side_effect_let_decs = + if Gen_empty_custom.operation_requires_side_effect x then + [ + Printf.sprintf "let forward_op = fun ~local_fn ~__context -> (rbac __context (fun()-> (%s.%s) )) in" _forward impl_fn + ] + else + [ + Printf.sprintf "%s \"%s\";" + (if may_be_side_effecting x then "ApiLogSideEffect.debug" else "ApiLogRead.debug") + wire_name + ] in + + let body_exp = + [ + Printf.sprintf "let resp = Server_helpers.do_dispatch %s %s __async supports_async __call local_op marshaller fd http_req __label generate_task_for in" + (if x.msg_session then "~session_id" else "") + (if Gen_empty_custom.operation_requires_side_effect x then "~forward_op" else ""); + (* "P.debug \"Server RPC response: %s\" (Rpc.to_string (resp.Rpc.contents));"; *) + "resp" + ] in + common_let_decs @ side_effect_let_decs @ body_exp in + + let all = + let all_list = + if not (DU.has_been_removed x.DT.msg_lifecycle) then + (comments @ unmarshall_code @ session_check_exp @ rbac_check_begin @ gen_body () @ rbac_check_end) + else + (comments @ ["let session_id = ref_session_of_rpc session_id_rpc in"] @ session_check_exp @ ["response_of_failure Api_errors.message_removed []"]) in + String.concat "\n " ("" :: all_list) in + + name_pattern_match ^ "\n" + ^ " begin match __params with\n" + ^ " | " ^ arg_pattern ^ " -> " ^ all ^ "\n" + ^ " | _ ->\n" + ^ " Server_helpers.parameter_count_mismatch_failure __call " ^ "\"" ^ (string_of_int (count_mandatory_message_parameters x)) ^ "\"" ^ " (string_of_int ((List.length __params) - " ^ (if x.msg_session then "1" else "0") ^ "))\n" + ^ " end" (* ------------------------------------------------------------------------------------------ Code to generate whole module - ------------------------------------------------------------------------------------------ *) + ------------------------------------------------------------------------------------------ *) let gen_module api : O.Module.t = (* For testing purposes the ocaml client and server are kept in sync *) let api = Client.client_api ~sync:true api in let obj (obj: obj) = List.map (operation obj) obj.messages in - let all_objs = Dm_api.objects_of_api api in + let all_objs = Dm_api.objects_of_api api in O.Module.make ~name:module_name ~args:[_custom ^ " : Custom_actions." ^ Gen_empty_custom.signature_name; - _forward ^ " : Custom_actions." ^ Gen_empty_custom.signature_name] + _forward ^ " : Custom_actions." ^ Gen_empty_custom.signature_name] ~preamble:[ "module D = Debug.Make(struct let name = \"dispatcher\" end)"; "module ApiLogRead = Debug.Make(struct let name = \"api_readonly\" end)"; "module ApiLogSideEffect = Debug.Make(struct let name = \"api_effect\" end)" -(* "exception Invalid_operation"; *) + (* "exception Invalid_operation"; *) ] ~elements:[ O.Module.Let ( - O.Let.make - ~name: "dispatch_call" - ~params: [ O.Anon(Some "http_req", "Http.Request.t"); - O.Anon(Some "fd", "Unix.file_descr"); - O.Anon(Some "call", "Rpc.call") ] - ~ty: "response" - ~body: ( - [ - "let __call, __params = call.Rpc.name, call.Rpc.params in"; - "List.iter (fun p -> let s = Rpc.to_string p in if not (Stdext.Encodings.UTF8_XML.is_valid s) then"; - "raise (Api_errors.Server_error(Api_errors.invalid_value, [\"Invalid UTF-8 string in parameter\"; s]))) __params;"; - "let __async = Server_helpers.is_async __call in"; - "let __label = __call in"; - "let __call = if __async then Server_helpers.remove_async_prefix __call else __call in"; - "let subtask_of = if http_req.Http.Request.task <> None then http_req.Http.Request.task else http_req.Http.Request.subtask_of in"; - "let http_other_config = Context.get_http_other_config http_req in"; - "Server_helpers.exec_with_new_task (\"dispatch:\"^__call^\"\") ~http_other_config ?subtask_of:(Stdext.Pervasiveext.may Ref.of_string subtask_of) (fun __context ->"; + O.Let.make + ~name: "dispatch_call" + ~params: [ O.Anon(Some "http_req", "Http.Request.t"); + O.Anon(Some "fd", "Unix.file_descr"); + O.Anon(Some "call", "Rpc.call") ] + ~ty: "response" + ~body: ( + [ + "let __call, __params = call.Rpc.name, call.Rpc.params in"; + "List.iter (fun p -> let s = Rpc.to_string p in if not (Stdext.Encodings.UTF8_XML.is_valid s) then"; + "raise (Api_errors.Server_error(Api_errors.invalid_value, [\"Invalid UTF-8 string in parameter\"; s]))) __params;"; + "let __async = Server_helpers.is_async __call in"; + "let __label = __call in"; + "let __call = if __async then Server_helpers.remove_async_prefix __call else __call in"; + "let subtask_of = if http_req.Http.Request.task <> None then http_req.Http.Request.task else http_req.Http.Request.subtask_of in"; + "let http_other_config = Context.get_http_other_config http_req in"; + "Server_helpers.exec_with_new_task (\"dispatch:\"^__call^\"\") ~http_other_config ?subtask_of:(Stdext.Pervasiveext.may Ref.of_string subtask_of) (fun __context ->"; (* "if not (Hashtbl.mem supress_printing_for_these_messages __call) then "; debug "%s %s" [ "__call"; "(if __async then \"(async)\" else \"\")" ]; *) - "Server_helpers.dispatch_exn_wrapper (fun () -> (match __call with "; - ] @ (List.flatten (List.map obj all_objs)) @ [ - "| \"system.listMethods\" -> "; - " success (rpc_of_string_set [" ] @ - begin - let objmsgs obj = List.map (fun msg -> Printf.sprintf "\"%s\";" (DU.wire_name ~sync:true obj msg)) obj.messages in - let allmsg = List.map (fun obj -> String.concat "" (objmsgs obj)) all_objs in - allmsg - end @ [ - " ])"; - "| func -> "; - " if (try Scanf.sscanf func \"system.isAlive:%s\" (fun _ -> true) with _ -> false)"; - " then Rpc.success (List.hd __params)"; - " else begin"; - " if (try Scanf.sscanf func \"unknown-message-%s\" (fun _ -> false) with _ -> true)"; - " then " ^ (debug "This is not a built-in rpc \"%s\"" [ "__call" ]); - " begin match __params with"; - " | session_id_rpc::_->"; - " let session_id = ref_session_of_rpc session_id_rpc in"; - " Session_check.check false session_id;"; - " (* based on the Host.call_extension call *)"; - " let arg_names = \"session_id\"::__call::[] in"; - " let key_names = [] in"; - " let rbac __context fn = Rbac.check session_id \"Host.call_extension\" ~args:(arg_names,__params) ~keys:key_names ~__context ~fn in"; - " Server_helpers.forward_extension ~__context rbac { call with Rpc.name = __call }"; - " | _ ->"; - " Server_helpers.unknown_rpc_failure func "; - " end"; - " end"; - ")))"; - ] - ) () + "Server_helpers.dispatch_exn_wrapper (fun () -> (match __call with "; + ] @ (List.flatten (List.map obj all_objs)) @ [ + "| \"system.listMethods\" -> "; + " success (rpc_of_string_set [" ] @ + begin + let objmsgs obj = List.map (fun msg -> Printf.sprintf "\"%s\";" (DU.wire_name ~sync:true obj msg)) obj.messages in + let allmsg = List.map (fun obj -> String.concat "" (objmsgs obj)) all_objs in + allmsg + end @ [ + " ])"; + "| func -> "; + " if (try Scanf.sscanf func \"system.isAlive:%s\" (fun _ -> true) with _ -> false)"; + " then Rpc.success (List.hd __params)"; + " else begin"; + " if (try Scanf.sscanf func \"unknown-message-%s\" (fun _ -> false) with _ -> true)"; + " then " ^ (debug "This is not a built-in rpc \"%s\"" [ "__call" ]); + " begin match __params with"; + " | session_id_rpc::_->"; + " let session_id = ref_session_of_rpc session_id_rpc in"; + " Session_check.check false session_id;"; + " (* based on the Host.call_extension call *)"; + " let arg_names = \"session_id\"::__call::[] in"; + " let key_names = [] in"; + " let rbac __context fn = Rbac.check session_id \"Host.call_extension\" ~args:(arg_names,__params) ~keys:key_names ~__context ~fn in"; + " Server_helpers.forward_extension ~__context rbac { call with Rpc.name = __call }"; + " | _ ->"; + " Server_helpers.unknown_rpc_failure func "; + " end"; + " end"; + ")))"; + ] + ) () ); - O.Module.Let ( - O.Let.make - ~name: "dispatch" - ~params: [ - O.Anon(Some "http_req", "Http.Request.t"); - O.Anon(Some "fd", "Unix.file_descr"); - O.Anon(Some "body", "string") - ] - ~ty: "unit" - ~body: [ - "let call = Xmlrpc.call_of_string body in"; - " dispatch_call http_req fd call" - ] ()) - ] () + O.Module.Let ( + O.Let.make + ~name: "dispatch" + ~params: [ + O.Anon(Some "http_req", "Http.Request.t"); + O.Anon(Some "fd", "Unix.file_descr"); + O.Anon(Some "body", "string") + ] + ~ty: "unit" + ~body: [ + "let call = Xmlrpc.call_of_string body in"; + " dispatch_call http_req fd call" + ] ()) + ] () diff --git a/ocaml/idl/ocaml_backend/gen_test.ml b/ocaml/idl/ocaml_backend/gen_test.ml index 10365d2ec3e..e20ba3a1afa 100644 --- a/ocaml/idl/ocaml_backend/gen_test.ml +++ b/ocaml/idl/ocaml_backend/gen_test.ml @@ -23,55 +23,55 @@ module O = Ocaml_syntax let print s = output_string stdout (s^"\n") let rec gen_test_type highapi ty = - let rec aux = function - | DT.String -> "\"teststring\"" - | DT.Int -> "123456789123456789L" - | DT.Float -> "0.123456789" - | DT.Bool -> "true" - | DT.DateTime -> "(Date.of_string \"20120101T00:00:00Z\")" - | DT.Enum (_,(x,_)::_) -> Printf.sprintf "(%s)" (OU.constructor_of x) - | DT.Set (DT.Enum (x,y)) -> - Printf.sprintf "[ %s ]" - (String.concat ";" - (List.map (fun (x,y) -> OU.constructor_of x) y)) - | DT.Set x -> Printf.sprintf "[ %s ]" (aux x) - | DT.Map (x,y) -> Printf.sprintf "[ (%s,%s) ]" (aux x) (aux y) - | DT.Ref x -> Printf.sprintf "(Ref.of_string \"OpaqueRef:foo\")" - | DT.Record x -> gen_record_type highapi x - | _ -> failwith "Invalid type" - in - aux ty + let rec aux = function + | DT.String -> "\"teststring\"" + | DT.Int -> "123456789123456789L" + | DT.Float -> "0.123456789" + | DT.Bool -> "true" + | DT.DateTime -> "(Date.of_string \"20120101T00:00:00Z\")" + | DT.Enum (_,(x,_)::_) -> Printf.sprintf "(%s)" (OU.constructor_of x) + | DT.Set (DT.Enum (x,y)) -> + Printf.sprintf "[ %s ]" + (String.concat ";" + (List.map (fun (x,y) -> OU.constructor_of x) y)) + | DT.Set x -> Printf.sprintf "[ %s ]" (aux x) + | DT.Map (x,y) -> Printf.sprintf "[ (%s,%s) ]" (aux x) (aux y) + | DT.Ref x -> Printf.sprintf "(Ref.of_string \"OpaqueRef:foo\")" + | DT.Record x -> gen_record_type highapi x + | _ -> failwith "Invalid type" + in + aux ty (** Generate a list of modules for each record kind *) and gen_record_type highapi record = - let obj_name = OU.ocaml_of_record_name record in - let all_fields = DU.fields_of_obj (Dm_api.get_obj_by_name highapi ~objname:record) in - let field fld = OU.ocaml_of_record_field (obj_name :: fld.DT.full_name) in - let map_fields fn = String.concat "; " (List.map (fun field -> fn field) all_fields) in - let regular_def fld = sprintf "%s=%s" (field fld) (gen_test_type highapi fld.DT.ty) in - sprintf "{ %s }" (map_fields regular_def) + let obj_name = OU.ocaml_of_record_name record in + let all_fields = DU.fields_of_obj (Dm_api.get_obj_by_name highapi ~objname:record) in + let field fld = OU.ocaml_of_record_field (obj_name :: fld.DT.full_name) in + let map_fields fn = String.concat "; " (List.map (fun field -> fn field) all_fields) in + let regular_def fld = sprintf "%s=%s" (field fld) (gen_test_type highapi fld.DT.ty) in + sprintf "{ %s }" (map_fields regular_def) let gen_test highapi = - let all_types = DU.Types.of_objects (Dm_api.objects_of_api highapi) in - let all_types = Gen_api.add_set_enums all_types in - ignore(all_types); - List.iter (List.iter print) - (List.between [""] [ - ["open API"]; - ["let _ ="]; - List.concat (List.map (fun ty -> - [ - sprintf "let oc = open_out \"rpc-light_%s.xml\" in" (OU.alias_of_ty ty); - sprintf "let x = %s in" (gen_test_type highapi ty); - sprintf "Printf.fprintf oc \"%%s\" (Xmlrpc.to_string (API.rpc_of_%s x));" (OU.alias_of_ty ty); - "close_out oc;"; - sprintf "let oc = open_out \"xml-light2_%s.xml\" in" (OU.alias_of_ty ty); - sprintf "Printf.fprintf oc \"%%s\" (Xml.to_string (API.Legacy.To.%s x));" (OU.alias_of_ty ty); - "close_out oc;"; -(* sprintf "let s = Xml.to_string (API.Legacy.To.%s x) in" (OU.alias_of_ty ty);*) -(* sprintf "let y =" *) - ] - ) all_types) - ]) + let all_types = DU.Types.of_objects (Dm_api.objects_of_api highapi) in + let all_types = Gen_api.add_set_enums all_types in + ignore(all_types); + List.iter (List.iter print) + (List.between [""] [ + ["open API"]; + ["let _ ="]; + List.concat (List.map (fun ty -> + [ + sprintf "let oc = open_out \"rpc-light_%s.xml\" in" (OU.alias_of_ty ty); + sprintf "let x = %s in" (gen_test_type highapi ty); + sprintf "Printf.fprintf oc \"%%s\" (Xmlrpc.to_string (API.rpc_of_%s x));" (OU.alias_of_ty ty); + "close_out oc;"; + sprintf "let oc = open_out \"xml-light2_%s.xml\" in" (OU.alias_of_ty ty); + sprintf "Printf.fprintf oc \"%%s\" (Xml.to_string (API.Legacy.To.%s x));" (OU.alias_of_ty ty); + "close_out oc;"; + (* sprintf "let s = Xml.to_string (API.Legacy.To.%s x) in" (OU.alias_of_ty ty);*) + (* sprintf "let y =" *) + ] + ) all_types) + ]) diff --git a/ocaml/idl/ocaml_backend/htmldoc.ml b/ocaml/idl/ocaml_backend/htmldoc.ml index b8925a0602f..dba80f66a20 100644 --- a/ocaml/idl/ocaml_backend/htmldoc.ml +++ b/ocaml/idl/ocaml_backend/htmldoc.ml @@ -13,24 +13,24 @@ *) (** Custom doc generator for our generated code *) -(* Based on the example in the manual: +(* Based on the example in the manual: http://caml.inria.fr/pub/docs/manual-ocaml/manual029.html#s:ocamldoc-custom-tags *) class my_gen = object(self) inherit Odoc_html.html (** Return HTML code for the given text of a bar tag. *) - method html_of_lock (t: Odoc_info.text) = + method html_of_lock (t: Odoc_info.text) = (* Decode the locks annotation, stored as s-expresions *) let txt = Odoc_info.string_of_text t in - let locks = - match SExpr_TS.of_string txt with - | SExpr.Node kv -> - List.map (function SExpr.Node [ SExpr.String k; SExpr.String v ] -> k,v - | _ -> failwith "Failed to parse lock comment") kv - | _ -> failwith "Failed to parse lock comment" in + let locks = + match SExpr_TS.of_string txt with + | SExpr.Node kv -> + List.map (function SExpr.Node [ SExpr.String k; SExpr.String v ] -> k,v + | _ -> failwith "Failed to parse lock comment") kv + | _ -> failwith "Failed to parse lock comment" in - if locks = [] + if locks = [] then "No locks held" else Printf.sprintf "With the following locks held: %s" (String.concat " " (List.map (fun (k, v) -> k ^ "." ^ v) locks)) diff --git a/ocaml/idl/ocaml_backend/json.ml b/ocaml/idl/ocaml_backend/json.ml index 5d9630ca1b9..5c454815bed 100644 --- a/ocaml/idl/ocaml_backend/json.ml +++ b/ocaml/idl/ocaml_backend/json.ml @@ -19,44 +19,44 @@ open Stdext.Xstringext let escape_string s = String.escaped ~rules:[('\n',"\\n");('"',"\\\"");('\\',"\\\\")] s -let rec xmlrpc_to_json xml = - let float_to_string f = - match classify_float f with - | FP_normal | FP_subnormal -> Printf.sprintf "%0.0f" f - | FP_nan -> "NaN" - | FP_infinite -> if f>0.0 then "Infinity" else "-Infinity" - | FP_zero -> "0.0" +let rec xmlrpc_to_json xml = + let float_to_string f = + match classify_float f with + | FP_normal | FP_subnormal -> Printf.sprintf "%0.0f" f + | FP_nan -> "NaN" + | FP_infinite -> if f>0.0 then "Infinity" else "-Infinity" + | FP_zero -> "0.0" in match xml with - | Element("i4",_,[PCData i]) -> i - | Element("boolean",_,[PCData b]) -> b - | Element("string",_,[PCData s]) -> "\""^(escape_string s)^"\"" - | PCData s -> "\""^(escape_string s)^"\"" - | Element("double",_,[PCData d]) -> float_to_string (float_of_string d) - | Element("dateTime.iso8601",_,[PCData d]) -> "\""^d^"\"" - | Element("base64",_,[PCData b]) -> "\""^b^"\"" - | Element("struct",_,list) -> - let map_func elts = - match elts with - | Element("member",_,[Element("name",_,[PCData n]);Element("value",_,[v])]) -> - "\""^n^"\":"^(xmlrpc_to_json v) - | Element("member",_,[Element("name",_,[PCData n]);Element("value",_,[])]) -> - "\""^n^"\":\"\"" - | Element(n,_,_) -> ( Printf.fprintf stderr "%s" ("Bad XMLRPC (expecting member, got "^n^")"); "") - | _ -> failwith "Bad XMLRPC" - in - let elts = List.map map_func list in - "{"^(String.concat "," elts)^"}" - | Element("array",_,[Element("data",_,values)]) -> - let values = List.map (function - |Element("value",_,[v]) -> xmlrpc_to_json v - | Element("value",_,[]) -> "\"\"" - | _ -> failwith "Bad XMLRPC" - ) values in - "["^(String.concat "," values)^"]" - | Element(n,_,_) -> (Printf.fprintf stderr "%s" ("Bad XMLRPC (expecting something, got "^n^")"); "") - - - - - + | Element("i4",_,[PCData i]) -> i + | Element("boolean",_,[PCData b]) -> b + | Element("string",_,[PCData s]) -> "\""^(escape_string s)^"\"" + | PCData s -> "\""^(escape_string s)^"\"" + | Element("double",_,[PCData d]) -> float_to_string (float_of_string d) + | Element("dateTime.iso8601",_,[PCData d]) -> "\""^d^"\"" + | Element("base64",_,[PCData b]) -> "\""^b^"\"" + | Element("struct",_,list) -> + let map_func elts = + match elts with + | Element("member",_,[Element("name",_,[PCData n]);Element("value",_,[v])]) -> + "\""^n^"\":"^(xmlrpc_to_json v) + | Element("member",_,[Element("name",_,[PCData n]);Element("value",_,[])]) -> + "\""^n^"\":\"\"" + | Element(n,_,_) -> ( Printf.fprintf stderr "%s" ("Bad XMLRPC (expecting member, got "^n^")"); "") + | _ -> failwith "Bad XMLRPC" + in + let elts = List.map map_func list in + "{"^(String.concat "," elts)^"}" + | Element("array",_,[Element("data",_,values)]) -> + let values = List.map (function + |Element("value",_,[v]) -> xmlrpc_to_json v + | Element("value",_,[]) -> "\"\"" + | _ -> failwith "Bad XMLRPC" + ) values in + "["^(String.concat "," values)^"]" + | Element(n,_,_) -> (Printf.fprintf stderr "%s" ("Bad XMLRPC (expecting something, got "^n^")"); "") + + + + + diff --git a/ocaml/idl/ocaml_backend/locking.ml b/ocaml/idl/ocaml_backend/locking.ml index b9d25945830..ac4ca585494 100644 --- a/ocaml/idl/ocaml_backend/locking.ml +++ b/ocaml/idl/ocaml_backend/locking.ml @@ -26,64 +26,64 @@ module Client = Gen_client open DT (** For a particular operation, decide which locks are required *) -let of_message (obj: obj) (x: message) = +let of_message (obj: obj) (x: message) = let this_class = obj.DT.name in let self = [ this_class, Client._self ] in let session = [ DM._session, Client._session_id ] in - (* Take all the arguments of type Ref x, select those which are part of + (* Take all the arguments of type Ref x, select those which are part of a relationship (associated with a foreign Set(Ref _) and compute the list of foreign objects to lock *) - let from_ref_relationships (x: obj) = + let from_ref_relationships (x: obj) = let fields = DU.fields_of_obj x in let consider = function - | { DT.ty = DT.Ref obj; field_name = field_name } as arg -> - let this_end = this_class, field_name in - begin - try - let class', name' = DU.Relations.other_end_of DM.all_api this_end in - if obj <> class' then failwith "relationship lookup failed"; - let fld' = Dm_api.get_field_by_name DM.all_api ~objname:class' ~fieldname:name' in - if fld'.DT.field_has_effect - then [ class', OU.ocaml_of_record_field arg.full_name ] - else [] - with Failure(_) -> [] - end - | _ -> [] in - List.concat (List.map consider fields) in + | { DT.ty = DT.Ref obj; field_name = field_name } as arg -> + let this_end = this_class, field_name in + begin + try + let class', name' = DU.Relations.other_end_of DM.all_api this_end in + if obj <> class' then failwith "relationship lookup failed"; + let fld' = Dm_api.get_field_by_name DM.all_api ~objname:class' ~fieldname:name' in + if fld'.DT.field_has_effect + then [ class', OU.ocaml_of_record_field arg.full_name ] + else [] + with Failure(_) -> [] + end + | _ -> [] in + List.concat (List.map consider fields) in let session = if x.msg_session && this_class = DM._session then session else [] in let all = match x.msg_tag with - | FromField(_, { DT.field_has_effect = false }) -> + | FromField(_, { DT.field_has_effect = false }) -> (* for simple cases, the database handles these. Recall that for - Set(Ref x) types, the magic is in the constructor of the other - end of the relation *) + Set(Ref x) types, the magic is in the constructor of the other + end of the relation *) [] - | FromField(_, { DT.field_has_effect = true }) -> + | FromField(_, { DT.field_has_effect = true }) -> self - | FromObject(Make) -> + | FromObject(Make) -> (* Lock this instance and, for any Ref x argument where it corresponds to the - 'N' side of a 1-N relationship and where the '1' side field has - field_has_effect = true. + 'N' side of a 1-N relationship and where the '1' side field has + field_has_effect = true. Example: creating a VBD with a VM ref *) from_ref_relationships obj (* NB self doesn't exist yet *) - | FromObject(Delete) -> + | FromObject(Delete) -> (* Lock this instance and, for any Ref x field which is in a relationship, - lock those other objects if they have field_has_effect = true - Example: deleting a VBD - (Note: deleting a VM containing a Set(Ref VBD) won't cause a side-effect + lock those other objects if they have field_has_effect = true + Example: deleting a VBD + (Note: deleting a VM containing a Set(Ref VBD) won't cause a side-effect on the VBD because we currently ban fields of type Ref _ having - field_has_effect = true) *) + field_has_effect = true) *) (* XXX: need to fetch IDs from the database *) self (* self @ (from_ref_relationships obj) *) - | FromObject(_) -> + | FromObject(_) -> (* Database handles everything else *) [] - | Custom -> + | Custom -> (* Lock the session and the "self" parameter (NB not every message has a self, eg login) *) let self_lock = try [ this_class, DU.find_self_parameter x ] with _ -> [] in session @ self_lock diff --git a/ocaml/idl/ocaml_backend/locking_helpers.ml b/ocaml/idl/ocaml_backend/locking_helpers.ml index 17a5fa94e86..c0e3c5d17be 100644 --- a/ocaml/idl/ocaml_backend/locking_helpers.ml +++ b/ocaml/idl/ocaml_backend/locking_helpers.ml @@ -21,146 +21,146 @@ open Listext module D = Debug.Make(struct let name = "locking_helpers" end) open D -type resource = - | Lock of string - | Process of string * int +type resource = + | Lock of string + | Process of string * int let string_of_resource = function - | Lock x -> Printf.sprintf "Lock(%s)" x - | Process (name, pid) -> Printf.sprintf "Process(%s, %d)" name pid + | Lock x -> Printf.sprintf "Lock(%s)" x + | Process (name, pid) -> Printf.sprintf "Process(%s, %d)" name pid let kill_resource = function - | Lock x -> debug "There is no way to forcibly remove Lock(%s)" x - | Process (name, pid) -> - info "Sending SIGKILL to %s pid %d" name pid; - Unix.kill pid Sys.sigkill + | Lock x -> debug "There is no way to forcibly remove Lock(%s)" x + | Process (name, pid) -> + info "Sending SIGKILL to %s pid %d" name pid; + Unix.kill pid Sys.sigkill module Thread_state = struct - type time = float - type t = { - acquired_resources: (resource * time) list; - task: API.ref_task; - name: string; - waiting_for: (resource * time) option; - } - let empty = { - acquired_resources = []; - task = Ref.null; - name = ""; - waiting_for = None; - } - let m = Mutex.create () - module IntMap = Map.Make(struct type t = int let compare = compare end) - let thread_states = ref IntMap.empty - - let get_acquired_resources_by_task task = - let snapshot = Mutex.execute m (fun () -> !thread_states) in - let all, _ = IntMap.partition (fun _ ts -> ts.task = task) snapshot in - List.map fst (IntMap.fold (fun _ ts acc -> ts.acquired_resources @ acc) all []) - - let get_all_acquired_resources () = - let snapshot = Mutex.execute m (fun () -> !thread_states) in - List.map fst (IntMap.fold (fun _ ts acc -> ts.acquired_resources @ acc) snapshot []) - - let me () = Thread.id (Thread.self ()) - - let update f = - let id = me () in - let snapshot = Mutex.execute m (fun () -> !thread_states) in - let ts = - if IntMap.mem id snapshot - then f (IntMap.find id snapshot) - else f empty in - Mutex.execute m - (fun () -> - thread_states := if ts = empty then IntMap.remove id !thread_states else IntMap.add id ts !thread_states - ) - - let with_named_thread name task f = - update (fun ts -> { ts with name = name; task = task }); - finally f - (fun () -> update (fun ts -> { ts with name = ""; task = Ref.null })) - - let now () = Unix.gettimeofday () - let waiting_for resource = - update (fun ts -> { ts with waiting_for = Some (resource, now()) }) - - let acquired resource = - update (fun ts -> { ts with waiting_for = None; acquired_resources = (resource, now()) :: ts.acquired_resources }) - - let released resource = - update (fun ts -> { ts with acquired_resources = List.filter (fun (r,_) -> r <> resource) ts.acquired_resources }) - - let to_graphviz () = - let t' = now () in - let snapshot = Mutex.execute m (fun () -> !thread_states) in - (* Map from thread ids -> record rows *) - let threads = IntMap.map (fun ts -> - [ ts.name ]::[ Ref.really_pretty_and_small ts.task ]::(List.map (fun (r, t) -> [ string_of_resource r; Printf.sprintf "%.0f" (t' -. t) ]) ts.acquired_resources) - ) snapshot in - let resources_of_ts ts = - List.map fst ts.acquired_resources @ - (Opt.default [] (Opt.map (fun (r, _) -> [ r ]) ts.waiting_for)) in - let all_resources = - List.setify - (IntMap.fold (fun _ ts acc -> resources_of_ts ts @ acc ) snapshot [] ) in - - let resources_to_ids = List.combine all_resources (Range.to_list (Range.make 0 (List.length all_resources))) in - let resources_to_sll = List.map - (function - | Lock x as y -> y, [ [ "lock" ]; [ x ] ] - | Process (name, pid) as y -> y, [ [ "process" ]; [ name ]; [ string_of_int pid ] ] - ) all_resources in - - let resources_to_threads = - IntMap.fold (fun id ts acc -> - List.map (fun (r, _) -> id, List.assoc r resources_to_ids) ts.acquired_resources @ acc - ) snapshot [] in - let threads_to_resources = - IntMap.fold (fun id ts acc -> - match ts.waiting_for with - | None -> acc - | Some (r, _) -> (id, List.assoc r resources_to_ids) :: acc - ) snapshot [] in - let label_of_sll sll = - let bar = String.concat " | " in - bar (List.map (fun sl -> "{" ^ (bar sl) ^ "}") sll) in - let all = [ - "digraph Resources {"; - "node [shape=Mrecord];" - ] @ (IntMap.fold (fun id sll acc -> Printf.sprintf "t%d [label=\"%s\"];" id (label_of_sll sll) :: acc) threads []) @ [ - "node [shape=record];" - ] @ (List.map (fun (resource, id) -> Printf.sprintf "r%d [style=filled label=\"%s\"];" id (label_of_sll (List.assoc resource resources_to_sll))) resources_to_ids) @ - (List.map (fun (t, r) -> Printf.sprintf "t%d -> r%d" t r) threads_to_resources) @ - (List.map (fun (t, r) -> Printf.sprintf "r%d -> t%d" r t) resources_to_threads) @ [ - "rankdir=LR"; - "overlap=false"; - "label=\"Threads and resources\""; - "fontsize=12"; - "}"; - ] in - String.concat "\n" all + type time = float + type t = { + acquired_resources: (resource * time) list; + task: API.ref_task; + name: string; + waiting_for: (resource * time) option; + } + let empty = { + acquired_resources = []; + task = Ref.null; + name = ""; + waiting_for = None; + } + let m = Mutex.create () + module IntMap = Map.Make(struct type t = int let compare = compare end) + let thread_states = ref IntMap.empty + + let get_acquired_resources_by_task task = + let snapshot = Mutex.execute m (fun () -> !thread_states) in + let all, _ = IntMap.partition (fun _ ts -> ts.task = task) snapshot in + List.map fst (IntMap.fold (fun _ ts acc -> ts.acquired_resources @ acc) all []) + + let get_all_acquired_resources () = + let snapshot = Mutex.execute m (fun () -> !thread_states) in + List.map fst (IntMap.fold (fun _ ts acc -> ts.acquired_resources @ acc) snapshot []) + + let me () = Thread.id (Thread.self ()) + + let update f = + let id = me () in + let snapshot = Mutex.execute m (fun () -> !thread_states) in + let ts = + if IntMap.mem id snapshot + then f (IntMap.find id snapshot) + else f empty in + Mutex.execute m + (fun () -> + thread_states := if ts = empty then IntMap.remove id !thread_states else IntMap.add id ts !thread_states + ) + + let with_named_thread name task f = + update (fun ts -> { ts with name = name; task = task }); + finally f + (fun () -> update (fun ts -> { ts with name = ""; task = Ref.null })) + + let now () = Unix.gettimeofday () + let waiting_for resource = + update (fun ts -> { ts with waiting_for = Some (resource, now()) }) + + let acquired resource = + update (fun ts -> { ts with waiting_for = None; acquired_resources = (resource, now()) :: ts.acquired_resources }) + + let released resource = + update (fun ts -> { ts with acquired_resources = List.filter (fun (r,_) -> r <> resource) ts.acquired_resources }) + + let to_graphviz () = + let t' = now () in + let snapshot = Mutex.execute m (fun () -> !thread_states) in + (* Map from thread ids -> record rows *) + let threads = IntMap.map (fun ts -> + [ ts.name ]::[ Ref.really_pretty_and_small ts.task ]::(List.map (fun (r, t) -> [ string_of_resource r; Printf.sprintf "%.0f" (t' -. t) ]) ts.acquired_resources) + ) snapshot in + let resources_of_ts ts = + List.map fst ts.acquired_resources @ + (Opt.default [] (Opt.map (fun (r, _) -> [ r ]) ts.waiting_for)) in + let all_resources = + List.setify + (IntMap.fold (fun _ ts acc -> resources_of_ts ts @ acc ) snapshot [] ) in + + let resources_to_ids = List.combine all_resources (Range.to_list (Range.make 0 (List.length all_resources))) in + let resources_to_sll = List.map + (function + | Lock x as y -> y, [ [ "lock" ]; [ x ] ] + | Process (name, pid) as y -> y, [ [ "process" ]; [ name ]; [ string_of_int pid ] ] + ) all_resources in + + let resources_to_threads = + IntMap.fold (fun id ts acc -> + List.map (fun (r, _) -> id, List.assoc r resources_to_ids) ts.acquired_resources @ acc + ) snapshot [] in + let threads_to_resources = + IntMap.fold (fun id ts acc -> + match ts.waiting_for with + | None -> acc + | Some (r, _) -> (id, List.assoc r resources_to_ids) :: acc + ) snapshot [] in + let label_of_sll sll = + let bar = String.concat " | " in + bar (List.map (fun sl -> "{" ^ (bar sl) ^ "}") sll) in + let all = [ + "digraph Resources {"; + "node [shape=Mrecord];" + ] @ (IntMap.fold (fun id sll acc -> Printf.sprintf "t%d [label=\"%s\"];" id (label_of_sll sll) :: acc) threads []) @ [ + "node [shape=record];" + ] @ (List.map (fun (resource, id) -> Printf.sprintf "r%d [style=filled label=\"%s\"];" id (label_of_sll (List.assoc resource resources_to_sll))) resources_to_ids) @ + (List.map (fun (t, r) -> Printf.sprintf "t%d -> r%d" t r) threads_to_resources) @ + (List.map (fun (t, r) -> Printf.sprintf "r%d -> t%d" r t) resources_to_threads) @ [ + "rankdir=LR"; + "overlap=false"; + "label=\"Threads and resources\""; + "fontsize=12"; + "}"; + ] in + String.concat "\n" all end module Named_mutex = struct - type t = { - name: string; - m: Mutex.t; - } - let create name = { - name = name; - m = Mutex.create (); - } - let execute (x:t) f = - let r = Lock x.name in - Thread_state.waiting_for r; - Mutex.execute x.m - (fun () -> - Thread_state.acquired r; - finally - f - (fun () -> Thread_state.released r) - ) + type t = { + name: string; + m: Mutex.t; + } + let create name = { + name = name; + m = Mutex.create (); + } + let execute (x:t) f = + let r = Lock x.name in + Thread_state.waiting_for r; + Mutex.execute x.m + (fun () -> + Thread_state.acquired r; + finally + f + (fun () -> Thread_state.released r) + ) end (* We store the locks in a hashtable whose keys are VM references and values are abstract @@ -181,7 +181,7 @@ let locks : (API.ref_VM, token) Hashtbl.t = Hashtbl.create 10 exception Lock_not_held (** Raised by assert_locked if we screw up the use of 'with_lock' *) let assert_locked (target: API.ref_VM) (token: token) = - let held = + let held = Mutex.execute mutex (fun () -> Hashtbl.fold (fun target' token' acc -> acc || ((target = target') && (token = token'))) locks false) in if not held then begin @@ -200,13 +200,13 @@ let acquire_lock (target: API.ref_VM) = Thread_state.waiting_for (lock_of_vm target); let token = Mutex.execute mutex - (fun () -> - while already_locked () do Condition.wait cvar mutex done; - let token = !next_token in - next_token := Int64.add !next_token 1L; - Hashtbl.replace locks target token; - token - ) in + (fun () -> + while already_locked () do Condition.wait cvar mutex done; + let token = !next_token in + next_token := Int64.add !next_token 1L; + Hashtbl.replace locks target token; + token + ) in debug "Acquired lock on VM %s with token %Ld" (Ref.string_of target) token; Thread_state.acquired (lock_of_vm target); token @@ -215,21 +215,21 @@ let acquire_lock (target: API.ref_VM) = exception Internal_error_vm_not_locked exception Internal_error_token_mismatch -let release_lock (target: API.ref_VM) (token: token) = +let release_lock (target: API.ref_VM) (token: token) = Mutex.execute mutex (fun () -> if not(Hashtbl.mem locks target) then begin - error "VM %s not locked at all: lock cannot be released" (Ref.string_of target); - raise Internal_error_vm_not_locked + error "VM %s not locked at all: lock cannot be released" (Ref.string_of target); + raise Internal_error_vm_not_locked end; let token' = Hashtbl.find locks target in if token <> token' then begin - error "VM %s locked with token %Ld: cannot be unlocked by token %Ld" (Ref.string_of target) token' token; - raise Internal_error_token_mismatch + error "VM %s locked with token %Ld: cannot be unlocked by token %Ld" (Ref.string_of target) token' token; + raise Internal_error_token_mismatch end; Hashtbl.remove locks target; Condition.broadcast cvar ); Thread_state.released (lock_of_vm target); debug "Released lock on VM %s with token %Ld" (Ref.string_of target) token - + diff --git a/ocaml/idl/ocaml_backend/locking_helpers.mli b/ocaml/idl/ocaml_backend/locking_helpers.mli index 23d7a74473f..b2e8d5c9dd9 100644 --- a/ocaml/idl/ocaml_backend/locking_helpers.mli +++ b/ocaml/idl/ocaml_backend/locking_helpers.mli @@ -13,9 +13,9 @@ *) (** Represents a type of resource a thread has either allocated or is waiting for. *) -type resource = - | Lock of string (** e.g. a per-VM lock or a queue *) - | Process of string * int (** e.g. an stunnel process with the given pid *) +type resource = + | Lock of string (** e.g. a per-VM lock or a queue *) + | Process of string * int (** e.g. an stunnel process with the given pid *) (** Best-effort attempt to kill a resource *) val kill_resource: resource -> unit @@ -23,25 +23,25 @@ val kill_resource: resource -> unit (** Records per-thread diagnostic information *) module Thread_state : sig - (** Called when a thread becomes associated with a particular task *) - val with_named_thread: string -> API.ref_task -> (unit -> 'a) -> 'a + (** Called when a thread becomes associated with a particular task *) + val with_named_thread: string -> API.ref_task -> (unit -> 'a) -> 'a - (** Called when a thread is about to block waiting for a resource to be free *) - val waiting_for: resource -> unit + (** Called when a thread is about to block waiting for a resource to be free *) + val waiting_for: resource -> unit - (** Called when a thread acquires a resource *) - val acquired: resource -> unit + (** Called when a thread acquires a resource *) + val acquired: resource -> unit - (** Called when a thread releases a resource *) - val released: resource -> unit + (** Called when a thread releases a resource *) + val released: resource -> unit - val get_all_acquired_resources: unit -> resource list - val get_acquired_resources_by_task: API.ref_task -> resource list - val to_graphviz: unit -> string + val get_all_acquired_resources: unit -> resource list + val get_acquired_resources_by_task: API.ref_task -> resource list + val to_graphviz: unit -> string end module Named_mutex : sig - type t - val create: string -> t - val execute: t -> (unit -> 'a) -> 'a + type t + val create: string -> t + val execute: t -> (unit -> 'a) -> 'a end diff --git a/ocaml/idl/ocaml_backend/ocaml_syntax.ml b/ocaml/idl/ocaml_backend/ocaml_syntax.ml index 40ea9bd3b8f..9808f6d939d 100644 --- a/ocaml/idl/ocaml_backend/ocaml_syntax.ml +++ b/ocaml/idl/ocaml_backend/ocaml_syntax.ml @@ -15,11 +15,11 @@ (**********************************************************************************) -type item = +type item = | Indent of item list | Line of string -let string_of_item x = +let string_of_item x = let rec indent prefix = function | Line x -> prefix ^ x ^ "\n" | Indent x -> String.concat "" (List.map (indent (prefix ^ " ")) x) in @@ -43,7 +43,7 @@ let type_of_param = function module Val = struct type t = { name: string; params: param list } - let item_of x = + let item_of x = let param = function | Anon (_, ty) -> ty | Named (name, ty) -> name ^ ":" ^ ty in @@ -53,55 +53,55 @@ end module Let = struct type t = { name: string; params: param list; ty: string; body: string list; doc: string } - let make ?(doc="") ~name ~params ~ty ~body () = + let make ?(doc="") ~name ~params ~ty ~body () = { name = name; params = params; ty = ty; body = body; doc = doc } let val_of x = { Val.name = x.name; params = x.params @ [ Anon (None, x.ty) ] } - let items_of ?(prefix="let") x = + let items_of ?(prefix="let") x = let param = function | Anon (None, ty) -> "(_:" ^ ty ^ ")" | Anon (Some x, ty) -> "(" ^ x ^ ": " ^ ty ^ ")" | Named (name, ty) -> "~" ^ name in [ Line ("(** " ^ x.doc ^ " *)"); - Line (prefix ^ " " ^ x.name ^ " " ^ - (String.concat " " (List.map param x.params)) ^ " ="); + Line (prefix ^ " " ^ x.name ^ " " ^ + (String.concat " " (List.map param x.params)) ^ " ="); Indent (List.map (fun x -> Line x) x.body) ] end module Type = struct type t = { name: string; body: string } - + let item_of x = Line ("type " ^ x.name ^ " = " ^ x.body) end module Module = struct type e = Let of Let.t | Module of t | Type of Type.t and t = { name: string; (** OCaml module name *) - preamble: string list; (** Convenient place for helper functions, opens etc *) - letrec: bool; (** True for all the let bindings to be mutually recursive*) - args: string list; (** for functor *) - elements: e list } + preamble: string list; (** Convenient place for helper functions, opens etc *) + letrec: bool; (** True for all the let bindings to be mutually recursive*) + args: string list; (** for functor *) + elements: e list } - let make ?(preamble=[]) ?(letrec=false) ?(args=[]) ~name ~elements () = - { name = name; preamble = preamble; letrec = letrec; + let make ?(preamble=[]) ?(letrec=false) ?(args=[]) ~name ~elements () = + { name = name; preamble = preamble; letrec = letrec; args = args; elements = elements } - let rec items_of x = + let rec items_of x = let e = function | Let y -> Let.items_of ~prefix:(if x.letrec then "and" else "let") y | Module x -> items_of x | Type x -> [ Type.item_of x ] in - let opening = "module " ^ x.name ^ " = " ^ - (if x.args = [] - then "" - else String.concat " " (List.map (fun x -> "functor(" ^ x ^ ") ->") x.args)) ^ - "struct" in + let opening = "module " ^ x.name ^ " = " ^ + (if x.args = [] + then "" + else String.concat " " (List.map (fun x -> "functor(" ^ x ^ ") ->") x.args)) ^ + "struct" in [ Line opening; Indent ( - List.map (fun x -> Line x) x.preamble @ - ( if x.letrec then [ Line "let rec __unused () = ()" ] else [] ) @ - (List.concat (List.map e x.elements)) + List.map (fun x -> Line x) x.preamble @ + ( if x.letrec then [ Line "let rec __unused () = ()" ] else [] ) @ + (List.concat (List.map e x.elements)) ); Line "end" ] @@ -111,8 +111,8 @@ end module Signature = struct type e = Val of Val.t | Module of t | Type of Type.t and t = { name: string; elements: e list } - - let rec items_of ?(toplevel=true) x = + + let rec items_of ?(toplevel=true) x = let e = function | Val x -> [ Val.item_of x ] | Module x -> items_of ~toplevel:false x @@ -121,12 +121,12 @@ module Signature = struct then Line ("module type " ^ x.name ^ " = sig") else Line ("module " ^ x.name ^ " : sig"); Indent ( - List.concat (List.map e x.elements) + List.concat (List.map e x.elements) ); Line "end" ] - let rec of_module (x: Module.t) = + let rec of_module (x: Module.t) = let e = function | Module.Let x -> Val (Let.val_of x) | Module.Type x -> Type x diff --git a/ocaml/idl/ocaml_backend/ocaml_utils.ml b/ocaml/idl/ocaml_backend/ocaml_utils.ml index 8ddde65594e..f32e1369ac7 100644 --- a/ocaml/idl/ocaml_backend/ocaml_utils.ml +++ b/ocaml/idl/ocaml_backend/ocaml_utils.ml @@ -20,10 +20,10 @@ let keywords = [ "mod"; "type"; "class"; "ref"; "and" ] let escape x = if List.mem x keywords then - "_" ^ x + "_" ^ x else match x.[0] with - | 'a' .. 'z' | '_' -> x - | _ -> "_" ^ x + | 'a' .. 'z' | '_' -> x + | _ -> "_" ^ x (** Escape enum names to make them readable polymorphic variant type constructors. *) @@ -33,29 +33,29 @@ let constructor_of string = | _ -> "" in let string = if List.mem string keywords then "_" ^ string else string in let list = match String.explode string with - '0'..'9' :: _ as list -> "`_" :: List.map remove_non_alphanum list + '0'..'9' :: _ as list -> "`_" :: List.map remove_non_alphanum list | list -> "`" :: List.map remove_non_alphanum list in String.concat "" list (* generates: vM *) let ocaml_of_record_name x = - escape (String.uncapitalize x) + escape (String.uncapitalize x) (* generates: _VM *) let ocaml_of_record_name_rpc x = - escape x + escape x (* generates: _VM_foo *) -let ocaml_of_record_field_rpc x = - escape (String.concat "_" x) +let ocaml_of_record_field_rpc x = + escape (String.concat "_" x) (* generates: vM_foo *) let ocaml_of_record_field = function - | [] -> failwith "ocaml_of_record_field" - | h :: t -> ocaml_of_record_field_rpc (String.uncapitalize h :: t) + | [] -> failwith "ocaml_of_record_field" + | h :: t -> ocaml_of_record_field_rpc (String.uncapitalize h :: t) let ocaml_of_module_name x = - String.capitalize x + String.capitalize x (** Convert an IDL enum into a polymorhic variant. *) let ocaml_of_enum list = @@ -64,16 +64,16 @@ let ocaml_of_enum list = (** Convert an IDL type to a function name; we need to generate functions to marshal/unmarshal from XML for each unique IDL type *) let rec alias_of_ty ?(prefix="") = function - | String -> "string" - | Int -> "int64" - | Float -> "float" - | Bool -> "bool" - | DateTime -> "datetime" - | Set ty -> sprintf "%s_set" (alias_of_ty ty) - | Enum(name, _) -> String.uncapitalize name - | Map(k, v) -> sprintf "%s_to_%s_map" (alias_of_ty k) (alias_of_ty v) - | Ref x -> sprintf "ref_%s" x - | Record x -> sprintf "%s_t" (ocaml_of_record_name x) + | String -> "string" + | Int -> "int64" + | Float -> "float" + | Bool -> "bool" + | DateTime -> "datetime" + | Set ty -> sprintf "%s_set" (alias_of_ty ty) + | Enum(name, _) -> String.uncapitalize name + | Map(k, v) -> sprintf "%s_to_%s_map" (alias_of_ty k) (alias_of_ty v) + | Ref x -> sprintf "ref_%s" x + | Record x -> sprintf "%s_t" (ocaml_of_record_name x) (** Convert an IDL type into a string containing OCaml code representing the type. *) @@ -87,16 +87,16 @@ let rec ocaml_of_ty = function | Set x -> ocaml_of_ty x ^ " list" | Enum(name, cs) -> ocaml_of_enum (List.map fst cs) | Map(l, r) -> "("^alias_of_ty l^" * "^alias_of_ty r^") list" -(* | Ref "session" -> "Uuid.cookie" *) + (* | Ref "session" -> "Uuid.cookie" *) | Ref ty -> "[`"^ty^"] Ref.t" | Record x -> failwith "ocaml_of_ty got a record" (** Take an object name and return the corresponding ocaml name *) -let ocaml_of_obj_name x = - if x = "" +let ocaml_of_obj_name x = + if x = "" then failwith "Empty object name" else (match x.[0] with - | 'A'..'Z' | 'a'..'z' -> String.capitalize x - | _ -> "M_" ^ x) + | 'A'..'Z' | 'a'..'z' -> String.capitalize x + | _ -> "M_" ^ x) diff --git a/ocaml/idl/ocaml_backend/rbac.ml b/ocaml/idl/ocaml_backend/rbac.ml index d08021c4fec..9984918f105 100644 --- a/ocaml/idl/ocaml_backend/rbac.ml +++ b/ocaml/idl/ocaml_backend/rbac.ml @@ -21,21 +21,21 @@ let trackid session_id = (Context.trackid_of_session (Some session_id)) (* From the Requirements: -1) Since we rely on an external directory/authentication service, disabling this - external authentication effectively disables RBAC too. When disabled like - this only the local root account can be used so the system "fails secure". + 1) Since we rely on an external directory/authentication service, disabling this + external authentication effectively disables RBAC too. When disabled like + this only the local root account can be used so the system "fails secure". Given this we do not need any separate mechanism to enable/disable RBAC. -2) At all times the RBAC policy will be applied; the license state will only + 2) At all times the RBAC policy will be applied; the license state will only affect the Pool Administrator's ability to modify the subject -> role mapping. -3) The local superuser (root) has the "Pool Admin" role. -4) If a subject has no roles assigned then, authentication will fail with an + 3) The local superuser (root) has the "Pool Admin" role. + 4) If a subject has no roles assigned then, authentication will fail with an error such as PERMISSION_DENIED. -5) To guarantee the new role takes effect, the user should be logged out and + 5) To guarantee the new role takes effect, the user should be logged out and forced to log back in again (requires "Logout active user connections" permission) - (So, there's no need to update the session immediately after modifying either the + (So, there's no need to update the session immediately after modifying either the Subject.roles field or the Roles.subroles field, or for this function to immediately reflect any modifications in these fields without the user logging out and in again) @@ -58,10 +58,10 @@ let trackid session_id = (Context.trackid_of_session (Some session_id)) * all_sids(s) = Union(subject(s), groups(subject(s)) * and denied otherwise. -Intuitively the 3 nested Unions above come from the 3 one -> many relationships: -1. a session (i.e. a user) has multiple subject IDs; -2. a subject ID has multiple roles; -3. a role has multiple permissions. + Intuitively the 3 nested Unions above come from the 3 one -> many relationships: + 1. a session (i.e. a user) has multiple subject IDs; + 2. a subject ID has multiple roles; + 3. a role has multiple permissions. *) @@ -72,224 +72,224 @@ module Permission_set = Set.Make(String) (* This flag enables efficient look-up of the permission set *) let use_efficient_permission_set = true -let permission_set permission_list = - List.fold_left - (fun set r->Permission_set.add r set) - Permission_set.empty - permission_list +let permission_set permission_list = + List.fold_left + (fun set r->Permission_set.add r set) + Permission_set.empty + permission_list let create_session_permissions_tbl ~session_id ~rbac_permissions = - if use_efficient_permission_set - && Pool_role.is_master () (* Create this structure on the master only, *) - (* so as to avoid heap-leaking on the slaves *) - then begin - debug "Creating permission-set tree for session %s" - (Context.trackid_of_session (Some session_id)); - let permission_tree = (permission_set rbac_permissions) in - Hashtbl.replace session_permissions_tbl session_id permission_tree; - Some(permission_tree) - end - else - None + if use_efficient_permission_set + && Pool_role.is_master () (* Create this structure on the master only, *) + (* so as to avoid heap-leaking on the slaves *) + then begin + debug "Creating permission-set tree for session %s" + (Context.trackid_of_session (Some session_id)); + let permission_tree = (permission_set rbac_permissions) in + Hashtbl.replace session_permissions_tbl session_id permission_tree; + Some(permission_tree) + end + else + None let destroy_session_permissions_tbl ~session_id = - if use_efficient_permission_set then - Hashtbl.remove - session_permissions_tbl - session_id + if use_efficient_permission_set then + Hashtbl.remove + session_permissions_tbl + session_id (* create a key permission name that can be in the session *) let get_key_permission_name permission key_name = - permission ^ "/key:" ^ key_name + permission ^ "/key:" ^ key_name (* create a key-error permission name that is never in the session *) let get_keyERR_permission_name permission err = - permission ^ "/keyERR:" ^ err + permission ^ "/keyERR:" ^ err let permission_of_action ?args ~keys _action = - (* all permissions are in lowercase, see gen_rbac.writer_ *) - let action = (String.lowercase _action) in - if (List.length keys) < 1 - then (* most actions do not use rbac-guarded map keys in the arguments *) - action - - else (* only actions with rbac-guarded map keys fall here *) - match args with - |None -> begin (* this should never happen *) - debug "DENYING access: no args for keyed-action %s" action; - get_keyERR_permission_name action "DENY_NOARGS" (* will always deny *) - end - |Some (arg_keys,arg_values) -> - if (List.length arg_keys) <> (List.length arg_values) - then begin (* this should never happen *) - debug "DENYING access: arg_keys and arg_values lengths don't match: arg_keys=[%s], arg_values=[%s]" - ((List.fold_left (fun ss s->ss^s^",") "" arg_keys)) - ((List.fold_left (fun ss s->ss^(Rpc.to_string s)^",") "" arg_values)) - ; - get_keyERR_permission_name action "DENY_WRGLEN" (* will always deny *) - end - else (* keys and values have the same length *) - let rec get_permission_name_of_keys arg_keys arg_values = - match arg_keys,arg_values with - |[],[]|_,[]|[],_-> (* this should never happen *) - begin - debug "DENYING access: no 'key' argument in the action %s" action; - get_keyERR_permission_name action "DENY_NOKEY" (* deny by default *) - end - |k::ks,v::vs-> - if k<>"key" (* "key" is defined in datamodel_utils.ml *) - then - (get_permission_name_of_keys ks vs) - else (* found "key" in args *) - match v with - | Rpc.String key_name_in_args -> - begin - (*debug "key_name_in_args=%s, keys=[%s]" key_name_in_args ((List.fold_left (fun ss s->ss^s^",") "" keys)) ;*) - try - let key_name = - List.find - (fun key_name -> - if Stdext.Xstringext.String.endswith "*" key_name - then begin (* resolve wildcards at the end *) - Stdext.Xstringext.String.startswith - (String.sub key_name 0 ((String.length key_name) - 1)) - key_name_in_args - end - else (* no wildcards to resolve *) - key_name = key_name_in_args - ) - keys - in - get_key_permission_name action (String.lowercase key_name) - with Not_found -> (* expected, key_in_args is not rbac-protected *) - action - end - |_ -> begin (* this should never happen *) - debug "DENYING access: wrong XML value [%s] in the 'key' argument of action %s" (Rpc.to_string v) action; - get_keyERR_permission_name action "DENY_NOVALUE" - end - in - get_permission_name_of_keys arg_keys arg_values + (* all permissions are in lowercase, see gen_rbac.writer_ *) + let action = (String.lowercase _action) in + if (List.length keys) < 1 + then (* most actions do not use rbac-guarded map keys in the arguments *) + action + + else (* only actions with rbac-guarded map keys fall here *) + match args with + |None -> begin (* this should never happen *) + debug "DENYING access: no args for keyed-action %s" action; + get_keyERR_permission_name action "DENY_NOARGS" (* will always deny *) + end + |Some (arg_keys,arg_values) -> + if (List.length arg_keys) <> (List.length arg_values) + then begin (* this should never happen *) + debug "DENYING access: arg_keys and arg_values lengths don't match: arg_keys=[%s], arg_values=[%s]" + ((List.fold_left (fun ss s->ss^s^",") "" arg_keys)) + ((List.fold_left (fun ss s->ss^(Rpc.to_string s)^",") "" arg_values)) + ; + get_keyERR_permission_name action "DENY_WRGLEN" (* will always deny *) + end + else (* keys and values have the same length *) + let rec get_permission_name_of_keys arg_keys arg_values = + match arg_keys,arg_values with + |[],[]|_,[]|[],_-> (* this should never happen *) + begin + debug "DENYING access: no 'key' argument in the action %s" action; + get_keyERR_permission_name action "DENY_NOKEY" (* deny by default *) + end + |k::ks,v::vs-> + if k<>"key" (* "key" is defined in datamodel_utils.ml *) + then + (get_permission_name_of_keys ks vs) + else (* found "key" in args *) + match v with + | Rpc.String key_name_in_args -> + begin + (*debug "key_name_in_args=%s, keys=[%s]" key_name_in_args ((List.fold_left (fun ss s->ss^s^",") "" keys)) ;*) + try + let key_name = + List.find + (fun key_name -> + if Stdext.Xstringext.String.endswith "*" key_name + then begin (* resolve wildcards at the end *) + Stdext.Xstringext.String.startswith + (String.sub key_name 0 ((String.length key_name) - 1)) + key_name_in_args + end + else (* no wildcards to resolve *) + key_name = key_name_in_args + ) + keys + in + get_key_permission_name action (String.lowercase key_name) + with Not_found -> (* expected, key_in_args is not rbac-protected *) + action + end + |_ -> begin (* this should never happen *) + debug "DENYING access: wrong XML value [%s] in the 'key' argument of action %s" (Rpc.to_string v) action; + get_keyERR_permission_name action "DENY_NOVALUE" + end + in + get_permission_name_of_keys arg_keys arg_values let is_permission_in_session ~session_id ~permission ~session = - let find_linear elem set = List.exists (fun e -> e = elem) set in - let find_log elem set = Permission_set.mem elem set in - if use_efficient_permission_set then - begin (* use efficient log look-up of permissions *) - let permission_tree = - try Some(Hashtbl.find session_permissions_tbl session_id) - with Not_found -> begin - create_session_permissions_tbl - ~session_id - ~rbac_permissions:session.API.session_rbac_permissions - end - in match permission_tree with - | Some(permission_tree) -> find_log permission permission_tree - | None -> find_linear permission session.API.session_rbac_permissions - end - else (* use linear look-up of permissions *) - find_linear permission session.API.session_rbac_permissions + let find_linear elem set = List.exists (fun e -> e = elem) set in + let find_log elem set = Permission_set.mem elem set in + if use_efficient_permission_set then + begin (* use efficient log look-up of permissions *) + let permission_tree = + try Some(Hashtbl.find session_permissions_tbl session_id) + with Not_found -> begin + create_session_permissions_tbl + ~session_id + ~rbac_permissions:session.API.session_rbac_permissions + end + in match permission_tree with + | Some(permission_tree) -> find_log permission permission_tree + | None -> find_linear permission session.API.session_rbac_permissions + end + else (* use linear look-up of permissions *) + find_linear permission session.API.session_rbac_permissions open Db_actions (* look up the list generated in xapi_session.get_permissions *) let is_access_allowed ~__context ~session_id ~permission = - (* always allow local system access *) - if Session_check.is_local_session __context session_id - then true + (* always allow local system access *) + if Session_check.is_local_session __context session_id + then true - (* normal user session *) - else - let session = DB_Action.Session.get_record ~__context ~self:session_id in - (* the root user can always execute anything *) - if session.API.session_is_local_superuser - then true + (* normal user session *) + else + let session = DB_Action.Session.get_record ~__context ~self:session_id in + (* the root user can always execute anything *) + if session.API.session_is_local_superuser + then true - (* not root user, so let's decide if permission is allowed or denied *) - else - is_permission_in_session ~session_id ~permission ~session + (* not root user, so let's decide if permission is allowed or denied *) + else + is_permission_in_session ~session_id ~permission ~session (* Execute fn if rbac access is allowed for action, otherwise fails. *) let nofn = fun () -> () let check ?(extra_dmsg="") ?(extra_msg="") ?args ?(keys=[]) ~__context ~fn session_id action = - let permission = permission_of_action action ?args ~keys in - - if (is_access_allowed ~__context ~session_id ~permission) - then (* allow access to action *) - begin - let sexpr_of_args = - Rbac_audit.allowed_pre_fn ~__context ~action ?args () - in - try - let result = (fn ()) (* call rbac-protected function *) - in - Rbac_audit.allowed_post_fn_ok ~__context ~session_id ~action - ~permission ?sexpr_of_args ?args ~result (); - result - with error-> (* catch all exceptions *) - begin - Rbac_audit.allowed_post_fn_error ~__context ~session_id ~action - ~permission ?sexpr_of_args ?args ~error (); - raise error - end - end - else begin (* deny access to action *) - let allowed_roles_string = - try - let allowed_roles = Xapi_role.get_by_permission_name_label ~__context ~label:permission in - List.fold_left - (fun acc allowed_role -> acc ^ (if acc = "" then "" else ", ") ^ - (Xapi_role.get_name_label ~__context ~self:allowed_role)) - "" allowed_roles - with e -> debug "Could not obtain allowed roles for %s (%s)" permission (ExnHelper.string_of_exn e); - "" - in - let msg = (Printf.sprintf "No permission in user session. (Roles with this permission: %s)%s" - allowed_roles_string extra_msg) in - debug "%s[%s]: %s %s %s" action permission msg (trackid session_id) extra_dmsg; - Rbac_audit.denied ~__context ~session_id ~action ~permission - ?args (); - raise (Api_errors.Server_error - (Api_errors.rbac_permission_denied,[permission;msg])) - end + let permission = permission_of_action action ?args ~keys in + + if (is_access_allowed ~__context ~session_id ~permission) + then (* allow access to action *) + begin + let sexpr_of_args = + Rbac_audit.allowed_pre_fn ~__context ~action ?args () + in + try + let result = (fn ()) (* call rbac-protected function *) + in + Rbac_audit.allowed_post_fn_ok ~__context ~session_id ~action + ~permission ?sexpr_of_args ?args ~result (); + result + with error-> (* catch all exceptions *) + begin + Rbac_audit.allowed_post_fn_error ~__context ~session_id ~action + ~permission ?sexpr_of_args ?args ~error (); + raise error + end + end + else begin (* deny access to action *) + let allowed_roles_string = + try + let allowed_roles = Xapi_role.get_by_permission_name_label ~__context ~label:permission in + List.fold_left + (fun acc allowed_role -> acc ^ (if acc = "" then "" else ", ") ^ + (Xapi_role.get_name_label ~__context ~self:allowed_role)) + "" allowed_roles + with e -> debug "Could not obtain allowed roles for %s (%s)" permission (ExnHelper.string_of_exn e); + "" + in + let msg = (Printf.sprintf "No permission in user session. (Roles with this permission: %s)%s" + allowed_roles_string extra_msg) in + debug "%s[%s]: %s %s %s" action permission msg (trackid session_id) extra_dmsg; + Rbac_audit.denied ~__context ~session_id ~action ~permission + ?args (); + raise (Api_errors.Server_error + (Api_errors.rbac_permission_denied,[permission;msg])) + end let get_session_of_context ~__context ~permission = - try (Context.get_session_id __context) - with Failure _ -> raise (Api_errors.Server_error - (Api_errors.rbac_permission_denied,[permission;"no session in context"])) + try (Context.get_session_id __context) + with Failure _ -> raise (Api_errors.Server_error + (Api_errors.rbac_permission_denied,[permission;"no session in context"])) let assert_permission_name ~__context ~permission = - let session_id = get_session_of_context ~__context ~permission in - check ~__context ~fn:nofn session_id permission + let session_id = get_session_of_context ~__context ~permission in + check ~__context ~fn:nofn session_id permission let assert_permission ~__context ~permission = - assert_permission_name ~__context ~permission:permission.role_name_label + assert_permission_name ~__context ~permission:permission.role_name_label (* this is necessary to break dependency cycle between rbac and taskhelper *) let init_task_helper_rbac_has_permission_fn = - if !TaskHelper.rbac_assert_permission_fn = None - then TaskHelper.rbac_assert_permission_fn := Some(assert_permission) + if !TaskHelper.rbac_assert_permission_fn = None + then TaskHelper.rbac_assert_permission_fn := Some(assert_permission) let has_permission_name ~__context ~permission = - let session_id = get_session_of_context ~__context ~permission in - is_access_allowed ~__context ~session_id ~permission + let session_id = get_session_of_context ~__context ~permission in + is_access_allowed ~__context ~session_id ~permission let has_permission ~__context ~permission = - has_permission_name ~__context ~permission:permission.role_name_label + has_permission_name ~__context ~permission:permission.role_name_label -let check_with_new_task ?(extra_dmsg="") ?(extra_msg="") ?(task_desc="check") - ?args ~fn session_id action = - let task_desc = task_desc^":"^action in - Server_helpers.exec_with_new_task task_desc - (fun __context -> - check ~extra_dmsg ~extra_msg ~__context ?args ~fn session_id action - ) +let check_with_new_task ?(extra_dmsg="") ?(extra_msg="") ?(task_desc="check") + ?args ~fn session_id action = + let task_desc = task_desc^":"^action in + Server_helpers.exec_with_new_task task_desc + (fun __context -> + check ~extra_dmsg ~extra_msg ~__context ?args ~fn session_id action + ) (* used by xapi_http.ml to decide if rbac checks should be applied *) let is_rbac_enabled_for_http_action http_action_name = - not - (List.mem http_action_name Datamodel.public_http_actions_with_no_rbac_check) + not + (List.mem http_action_name Datamodel.public_http_actions_with_no_rbac_check) diff --git a/ocaml/idl/ocaml_backend/rbac_audit.ml b/ocaml/idl/ocaml_backend/rbac_audit.ml index 191add9226a..08b92a82b07 100644 --- a/ocaml/idl/ocaml_backend/rbac_audit.ml +++ b/ocaml/idl/ocaml_backend/rbac_audit.ml @@ -43,169 +43,169 @@ open Db_actions open Db_filter_types -let is_http action = - Stdext.Xstringext.String.startswith Datamodel.rbac_http_permission_prefix action +let is_http action = + Stdext.Xstringext.String.startswith Datamodel.rbac_http_permission_prefix action let call_type_of ~action = - if is_http action then "HTTP" else "API" + if is_http action then "HTTP" else "API" let str_local_session = "LOCAL_SESSION" let str_local_superuser = "LOCAL_SUPERUSER" let get_subject_common ~__context ~session_id ~fnname - ~fn_if_local_session ~fn_if_local_superuser ~fn_if_subject = - try - if Session_check.is_local_session __context session_id - then (fn_if_local_session ()) - else - if (DB_Action.Session.get_is_local_superuser ~__context ~self:session_id) - then (fn_if_local_superuser ()) - else (fn_if_subject ()) - with - | e -> begin - D.debug "error %s for %s:%s" - fnname (trackid session_id) (ExnHelper.string_of_exn e); - "" (* default value returned after an internal error *) - end + ~fn_if_local_session ~fn_if_local_superuser ~fn_if_subject = + try + if Session_check.is_local_session __context session_id + then (fn_if_local_session ()) + else + if (DB_Action.Session.get_is_local_superuser ~__context ~self:session_id) + then (fn_if_local_superuser ()) + else (fn_if_subject ()) + with + | e -> begin + D.debug "error %s for %s:%s" + fnname (trackid session_id) (ExnHelper.string_of_exn e); + "" (* default value returned after an internal error *) + end let get_subject_identifier __context session_id = - get_subject_common ~__context ~session_id - ~fnname:"get_subject_identifier" - ~fn_if_local_session:(fun()->str_local_session) - ~fn_if_local_superuser:(fun()->str_local_superuser) - ~fn_if_subject:( - fun()->DB_Action.Session.get_auth_user_sid ~__context ~self:session_id - ) + get_subject_common ~__context ~session_id + ~fnname:"get_subject_identifier" + ~fn_if_local_session:(fun()->str_local_session) + ~fn_if_local_superuser:(fun()->str_local_superuser) + ~fn_if_subject:( + fun()->DB_Action.Session.get_auth_user_sid ~__context ~self:session_id + ) let get_subject_name __context session_id = - get_subject_common ~__context ~session_id - ~fnname:"get_subject_name" - ~fn_if_local_session:(fun()-> - (* we are in emergency mode here, do not call DB_Action: - - local sessions are not in the normal DB - - local sessions do not have a username field - - DB_Action will block forever trying to access an inaccessible master - *) - "" - ) - ~fn_if_local_superuser:(fun()-> - DB_Action.Session.get_auth_user_name ~__context ~self:session_id - ) - ~fn_if_subject:(fun()-> - DB_Action.Session.get_auth_user_name ~__context ~self:session_id - ) + get_subject_common ~__context ~session_id + ~fnname:"get_subject_name" + ~fn_if_local_session:(fun()-> + (* we are in emergency mode here, do not call DB_Action: + - local sessions are not in the normal DB + - local sessions do not have a username field + - DB_Action will block forever trying to access an inaccessible master + *) + "" + ) + ~fn_if_local_superuser:(fun()-> + DB_Action.Session.get_auth_user_name ~__context ~self:session_id + ) + ~fn_if_subject:(fun()-> + DB_Action.Session.get_auth_user_name ~__context ~self:session_id + ) (*given a ref-value, return a human-friendly value associated with that ref*) let get_obj_of_ref_common obj_ref fn = - let indexrec = Ref_index.lookup obj_ref in - match indexrec with - | None -> None - | Some indexrec -> fn indexrec + let indexrec = Ref_index.lookup obj_ref in + match indexrec with + | None -> None + | Some indexrec -> fn indexrec let get_obj_of_ref obj_ref = - get_obj_of_ref_common obj_ref - (fun irec -> Some(irec.Ref_index.name_label, irec.Ref_index.uuid, irec.Ref_index._ref)) + get_obj_of_ref_common obj_ref + (fun irec -> Some(irec.Ref_index.name_label, irec.Ref_index.uuid, irec.Ref_index._ref)) let get_obj_name_of_ref obj_ref = - get_obj_of_ref_common obj_ref (fun irec -> irec.Ref_index.name_label) + get_obj_of_ref_common obj_ref (fun irec -> irec.Ref_index.name_label) let get_obj_uuid_of_ref obj_ref = - get_obj_of_ref_common obj_ref (fun irec -> Some(irec.Ref_index.uuid)) + get_obj_of_ref_common obj_ref (fun irec -> Some(irec.Ref_index.uuid)) let get_obj_ref_of_ref obj_ref = - get_obj_of_ref_common obj_ref (fun irec -> Some(irec.Ref_index._ref)) + get_obj_of_ref_common obj_ref (fun irec -> Some(irec.Ref_index._ref)) let get_sexpr_arg name name_of_ref uuid_of_ref ref_value : SExpr.t = - SExpr.Node - ( (* s-expr lib should properly escape malicious values *) - (SExpr.String name):: - (SExpr.String name_of_ref):: - (SExpr.String uuid_of_ref):: - (SExpr.String ref_value):: - [] - ) + SExpr.Node + ( (* s-expr lib should properly escape malicious values *) + (SExpr.String name):: + (SExpr.String name_of_ref):: + (SExpr.String uuid_of_ref):: + (SExpr.String ref_value):: + [] + ) (* given a list of (name,'',ref-value) triplets, *) (* map '' -> friendly-value of ref-value. *) (* used on the master to map missing reference names from slaves *) let get_obj_names_of_refs (obj_ref_list : SExpr.t list) : SExpr.t list= - List.map - (fun obj_ref -> - match obj_ref with - |SExpr.Node (SExpr.String name::SExpr.String "":: - SExpr.String ""::SExpr.String ref_value::[]) -> - get_sexpr_arg - name - (match (get_obj_name_of_ref ref_value) with - | None -> "" (* ref_value is not a ref! *) - | Some obj_name -> obj_name (* the missing name *) - ) - (match (get_obj_uuid_of_ref ref_value) with - | None -> "" (* ref_value is not a ref! *) - | Some obj_uuid -> obj_uuid (* the missing uuid *) - ) - ref_value - |_->obj_ref (* do nothing if not a triplet *) - ) - obj_ref_list + List.map + (fun obj_ref -> + match obj_ref with + |SExpr.Node (SExpr.String name::SExpr.String "":: + SExpr.String ""::SExpr.String ref_value::[]) -> + get_sexpr_arg + name + (match (get_obj_name_of_ref ref_value) with + | None -> "" (* ref_value is not a ref! *) + | Some obj_name -> obj_name (* the missing name *) + ) + (match (get_obj_uuid_of_ref ref_value) with + | None -> "" (* ref_value is not a ref! *) + | Some obj_uuid -> obj_uuid (* the missing uuid *) + ) + ref_value + |_->obj_ref (* do nothing if not a triplet *) + ) + obj_ref_list (* unwrap the audit record and add names to the arg refs *) (* this is necessary because we can only obtain the ref names *) (* on the master, and http audit records can come from slaves *) let populate_audit_record_with_obj_names_of_refs line = - try - let sexpr_idx = (String.index line ']') + 1 in - let before_sexpr_str = String.sub line 0 sexpr_idx in - (* remove the [...] prefix *) - let sexpr_str = Stdext.Xstringext.String.sub_to_end line sexpr_idx in - let sexpr = SExpr_TS.of_string sexpr_str in - match sexpr with - |SExpr.Node els -> begin - if List.length els = 0 - then line - else - let (args:SExpr.t) = List.hd (List.rev els) in - (match List.partition (fun (e:SExpr.t) ->e<>args) els with - |prefix, ((SExpr.Node arg_list)::[]) -> - (* paste together the prefix of original audit record *) - before_sexpr_str^" "^ - (SExpr.string_of - (SExpr.Node ( - prefix@ - ((SExpr.Node (get_obj_names_of_refs arg_list)):: - []) - )) - ) - |prefix,_->line - ) - end - |_->line - with e -> - D.debug "error populating audit record arg names: %s" - (ExnHelper.string_of_exn e) - ; - line + try + let sexpr_idx = (String.index line ']') + 1 in + let before_sexpr_str = String.sub line 0 sexpr_idx in + (* remove the [...] prefix *) + let sexpr_str = Stdext.Xstringext.String.sub_to_end line sexpr_idx in + let sexpr = SExpr_TS.of_string sexpr_str in + match sexpr with + |SExpr.Node els -> begin + if List.length els = 0 + then line + else + let (args:SExpr.t) = List.hd (List.rev els) in + (match List.partition (fun (e:SExpr.t) ->e<>args) els with + |prefix, ((SExpr.Node arg_list)::[]) -> + (* paste together the prefix of original audit record *) + before_sexpr_str^" "^ + (SExpr.string_of + (SExpr.Node ( + prefix@ + ((SExpr.Node (get_obj_names_of_refs arg_list)):: + []) + )) + ) + |prefix,_->line + ) + end + |_->line + with e -> + D.debug "error populating audit record arg names: %s" + (ExnHelper.string_of_exn e) + ; + line let action_params_whitelist = - [ (* manual params asked by audit report team *) - ("host.create",["hostname";"address";"external_auth_type";"external_auth_service_name";"edition"]); - ("VM.create",["is_a_template";"memory_target";"memory_static_max";"memory_dynamic_max";"memory_dynamic_min";"memory_static_min";"ha_always_run";"ha_restart_priority"]); - ("host.set_address",["value"]); - ("VM.migrate",["dest";"live"]); - ("VM.start_on",["start_paused";"force"]); - ("VM.start",["start_paused";"force"]); - ("pool.create_VLAN",["device";"vLAN"]); - ("pool.join_force",["master_address"]); - ("pool.join",["master_address"]); - ("pool.enable_external_auth",["service_name";"auth_type"]); - ("host.enable_external_auth",["service_name";"auth_type"]); - ("subject.create",["subject_identifier";"other_config"]); - ("subject.create.other_config",["subject-name"]); + [ (* manual params asked by audit report team *) + ("host.create",["hostname";"address";"external_auth_type";"external_auth_service_name";"edition"]); + ("VM.create",["is_a_template";"memory_target";"memory_static_max";"memory_dynamic_max";"memory_dynamic_min";"memory_static_min";"ha_always_run";"ha_restart_priority"]); + ("host.set_address",["value"]); + ("VM.migrate",["dest";"live"]); + ("VM.start_on",["start_paused";"force"]); + ("VM.start",["start_paused";"force"]); + ("pool.create_VLAN",["device";"vLAN"]); + ("pool.join_force",["master_address"]); + ("pool.join",["master_address"]); + ("pool.enable_external_auth",["service_name";"auth_type"]); + ("host.enable_external_auth",["service_name";"auth_type"]); + ("subject.create",["subject_identifier";"other_config"]); + ("subject.create.other_config",["subject-name"]); (* used for VMPP alert logs *) ("message.create",["name";"body"]); ("VMPP.create_alert",["name";"data"]); - ] + ] let action_params_zip = [ (* params that should be compressed *) @@ -218,23 +218,23 @@ let zip data = (* todo: remove i/o, make this more efficient *) let zdata = ref "" in Stdext.Pervasiveext.finally (fun ()-> - Stdext.Unixext.atomic_write_to_file tmp_path 0o600 - (fun fd -> - Gzip.compress fd (fun fd -> - let len = String.length data in - let written = Unix.write fd data 0 len in - if written <> len then failwith (Printf.sprintf "zip: wrote only %i bytes of %i" written len) - ) - ); - let fd_in = Unix.openfile tmp_path [ Unix.O_RDONLY] 0o400 in - Stdext.Pervasiveext.finally - (fun ()-> - let cin=Unix.in_channel_of_descr fd_in in - let cin_len = in_channel_length cin in - zdata := (String.create cin_len); - for i = 1 to cin_len do !zdata.[i-1] <- input_char cin done; - ) - (fun ()->Unix.close fd_in) + Stdext.Unixext.atomic_write_to_file tmp_path 0o600 + (fun fd -> + Gzip.compress fd (fun fd -> + let len = String.length data in + let written = Unix.write fd data 0 len in + if written <> len then failwith (Printf.sprintf "zip: wrote only %i bytes of %i" written len) + ) + ); + let fd_in = Unix.openfile tmp_path [ Unix.O_RDONLY] 0o400 in + Stdext.Pervasiveext.finally + (fun ()-> + let cin=Unix.in_channel_of_descr fd_in in + let cin_len = in_channel_length cin in + zdata := (String.create cin_len); + for i = 1 to cin_len do !zdata.[i-1] <- input_char cin done; + ) + (fun ()->Unix.close fd_in) ) (fun ()-> Sys.remove tmp_path) ; @@ -246,46 +246,46 @@ let zip data = (* todo: remove i/o, make this more efficient *) (* manual ref getters *) let get_subject_other_config_subject_name __context self = - try - List.assoc - "subject-name" - (DB_Action.Subject.get_other_config ~__context ~self:(Ref.of_string self)) - with e -> - D.debug "couldn't get Subject.other_config.subject-name for ref %s: %s" self (ExnHelper.string_of_exn e); - "" + try + List.assoc + "subject-name" + (DB_Action.Subject.get_other_config ~__context ~self:(Ref.of_string self)) + with e -> + D.debug "couldn't get Subject.other_config.subject-name for ref %s: %s" self (ExnHelper.string_of_exn e); + "" let get_role_name_label __context self = - try - (*Xapi_role.get_name_label ~__context ~self:(Ref.of_string self)*) - (*DB_Action.Role.get_name_label ~__context ~self:(Ref.of_string self)*) - let ps=(Rbac_static.all_static_roles@Rbac_static.all_static_permissions) in - let p=List.find (fun p->Ref.ref_prefix^p.role_uuid=self) ps in - p.role_name_label - with e -> - D.debug "couldn't get Role.name_label for ref %s: %s" self (ExnHelper.string_of_exn e); - "" + try + (*Xapi_role.get_name_label ~__context ~self:(Ref.of_string self)*) + (*DB_Action.Role.get_name_label ~__context ~self:(Ref.of_string self)*) + let ps=(Rbac_static.all_static_roles@Rbac_static.all_static_permissions) in + let p=List.find (fun p->Ref.ref_prefix^p.role_uuid=self) ps in + p.role_name_label + with e -> + D.debug "couldn't get Role.name_label for ref %s: %s" self (ExnHelper.string_of_exn e); + "" let action_param_ref_getter_fn = - [ (* manual override on ref getters *) - ("subject.destroy",["self",(fun _ctx _ref->get_subject_other_config_subject_name _ctx _ref)]); - ("subject.remove_from_roles",["self",(fun _ctx _ref->get_subject_other_config_subject_name _ctx _ref);"role",(fun _ctx _ref->get_role_name_label _ctx _ref)]); - ("subject.add_to_roles",["self",(fun _ctx _ref->get_subject_other_config_subject_name _ctx _ref);"role",(fun _ctx _ref->get_role_name_label _ctx _ref)]); - ] + [ (* manual override on ref getters *) + ("subject.destroy",["self",(fun _ctx _ref->get_subject_other_config_subject_name _ctx _ref)]); + ("subject.remove_from_roles",["self",(fun _ctx _ref->get_subject_other_config_subject_name _ctx _ref);"role",(fun _ctx _ref->get_role_name_label _ctx _ref)]); + ("subject.add_to_roles",["self",(fun _ctx _ref->get_subject_other_config_subject_name _ctx _ref);"role",(fun _ctx _ref->get_role_name_label _ctx _ref)]); + ] (* get a namevalue directly from db, instead from db_cache *) -let get_db_namevalue __context name action _ref = - if List.mem_assoc action action_param_ref_getter_fn - then ( - let params=List.assoc action action_param_ref_getter_fn in - if List.mem_assoc name params then ( - let getter_fn=List.assoc name params in - getter_fn __context _ref - ) - else - "" (* default value empty *) - ) - else - "" (* default value empty *) +let get_db_namevalue __context name action _ref = + if List.mem_assoc action action_param_ref_getter_fn + then ( + let params=List.assoc action action_param_ref_getter_fn in + if List.mem_assoc name params then ( + let getter_fn=List.assoc name params in + getter_fn __context _ref + ) + else + "" (* default value empty *) + ) + else + "" (* default value empty *) (* Map selected xapi call arguments into audit sexpr arguments. Not all parameters are mapped into audit log arguments because @@ -302,249 +302,249 @@ let rec sexpr_args_of __context name rpc_value action = false ) in - (* heuristic 1: print descriptive arguments in the xapi call *) - if (List.mem name ["name";"label";"description";"name_label";"name_description";"new_name"]) (* param for any action *) || (is_selected_action_param action_params_whitelist) (* action+param pair *) - then - ( match rpc_value with - | Rpc.String value -> - Some (get_sexpr_arg - name - (if is_selected_action_param action_params_zip - then (zip value) - else value - ) - "" "" - ) - | Rpc.Dict _ -> - Some (SExpr.Node - ( - (SExpr.String name) - ::(SExpr.Node (sexpr_of_parameters __context (action^"."^name) (Some (["__structure"],[rpc_value])))) - ::(SExpr.String "") - ::(SExpr.String "") - ::[] - ) - ) - | _-> (*D.debug "sexpr_args_of:value=%s" (Xml.to_string xml_value);*) - (*None*) - Some (get_sexpr_arg name (Rpc.to_string rpc_value) "" "") - ) - else - (* heuristic 2: print uuid/refs arguments in the xapi call *) - match rpc_value with - | Rpc.String value -> ( - let name_uuid_ref = get_obj_of_ref value in - match name_uuid_ref with - | None -> - if Stdext.Xstringext.String.startswith Ref.ref_prefix value - then (* it's a ref, just not in the db cache *) - Some (get_sexpr_arg name (get_db_namevalue __context name action value) "" value) - else (* ignore values that are not a ref *) - None - | Some(_name_of_ref_value, uuid_of_ref_value, ref_of_ref_value) -> - let name_of_ref_value = (match _name_of_ref_value with|None->(get_db_namevalue __context name action ref_of_ref_value)|Some ""->(get_db_namevalue __context name action ref_of_ref_value)|Some a -> a) in - Some (get_sexpr_arg name name_of_ref_value uuid_of_ref_value ref_of_ref_value) - ) - |_-> None + (* heuristic 1: print descriptive arguments in the xapi call *) + if (List.mem name ["name";"label";"description";"name_label";"name_description";"new_name"]) (* param for any action *) || (is_selected_action_param action_params_whitelist) (* action+param pair *) + then + ( match rpc_value with + | Rpc.String value -> + Some (get_sexpr_arg + name + (if is_selected_action_param action_params_zip + then (zip value) + else value + ) + "" "" + ) + | Rpc.Dict _ -> + Some (SExpr.Node + ( + (SExpr.String name) + ::(SExpr.Node (sexpr_of_parameters __context (action^"."^name) (Some (["__structure"],[rpc_value])))) + ::(SExpr.String "") + ::(SExpr.String "") + ::[] + ) + ) + | _-> (*D.debug "sexpr_args_of:value=%s" (Xml.to_string xml_value);*) + (*None*) + Some (get_sexpr_arg name (Rpc.to_string rpc_value) "" "") + ) + else + (* heuristic 2: print uuid/refs arguments in the xapi call *) + match rpc_value with + | Rpc.String value -> ( + let name_uuid_ref = get_obj_of_ref value in + match name_uuid_ref with + | None -> + if Stdext.Xstringext.String.startswith Ref.ref_prefix value + then (* it's a ref, just not in the db cache *) + Some (get_sexpr_arg name (get_db_namevalue __context name action value) "" value) + else (* ignore values that are not a ref *) + None + | Some(_name_of_ref_value, uuid_of_ref_value, ref_of_ref_value) -> + let name_of_ref_value = (match _name_of_ref_value with|None->(get_db_namevalue __context name action ref_of_ref_value)|Some ""->(get_db_namevalue __context name action ref_of_ref_value)|Some a -> a) in + Some (get_sexpr_arg name name_of_ref_value uuid_of_ref_value ref_of_ref_value) + ) + |_-> None and -(* Given an action and its parameters, *) -(* return the marshalled uuid params and corresponding names *) -(*let rec*) sexpr_of_parameters __context action args : SExpr.t list = - match args with - | None -> [] - | Some (str_names,rpc_values) -> - begin - if (List.length str_names) <> (List.length rpc_values) - then - ( (* debug mode *) - D.debug "cannot marshall arguments for the action %s: name and value list lengths don't match. str_names=[%s], xml_values=[%s]" action ((List.fold_left (fun ss s->ss^s^",") "" str_names)) ((List.fold_left (fun ss s->ss^(Rpc.to_string s)^",") "" rpc_values)); - [] - ) - else - List.fold_right2 - (fun str_name rpc_value (params:SExpr.t list) -> - if str_name = "session_id" - then params (* ignore session_id param *) - else - (* if it is a constructor structure, need to rewrap params *) - if str_name = "__structure" - then match rpc_value with - | Rpc.Dict d -> - let names = List.map fst d in - let values = List.map snd d in - let myparam = sexpr_of_parameters __context action (Some (names,values)) in - myparam@params - | rpc_value -> - (match (sexpr_args_of __context str_name rpc_value action) - with None->params|Some p->p::params - ) - else - (* the expected list of xml arguments *) - begin - (match (sexpr_args_of __context str_name rpc_value action) - with None->params|Some p->p::params - ) - end - ) - str_names - rpc_values - [] - end + (* Given an action and its parameters, *) + (* return the marshalled uuid params and corresponding names *) + (*let rec*) sexpr_of_parameters __context action args : SExpr.t list = + match args with + | None -> [] + | Some (str_names,rpc_values) -> + begin + if (List.length str_names) <> (List.length rpc_values) + then + ( (* debug mode *) + D.debug "cannot marshall arguments for the action %s: name and value list lengths don't match. str_names=[%s], xml_values=[%s]" action ((List.fold_left (fun ss s->ss^s^",") "" str_names)) ((List.fold_left (fun ss s->ss^(Rpc.to_string s)^",") "" rpc_values)); + [] + ) + else + List.fold_right2 + (fun str_name rpc_value (params:SExpr.t list) -> + if str_name = "session_id" + then params (* ignore session_id param *) + else + (* if it is a constructor structure, need to rewrap params *) + if str_name = "__structure" + then match rpc_value with + | Rpc.Dict d -> + let names = List.map fst d in + let values = List.map snd d in + let myparam = sexpr_of_parameters __context action (Some (names,values)) in + myparam@params + | rpc_value -> + (match (sexpr_args_of __context str_name rpc_value action) + with None->params|Some p->p::params + ) + else + (* the expected list of xml arguments *) + begin + (match (sexpr_args_of __context str_name rpc_value action) + with None->params|Some p->p::params + ) + end + ) + str_names + rpc_values + [] + end let has_to_audit action = - let has_side_effect action = - not (Stdext.Xstringext.String.has_substr action ".get") (* TODO: a bit slow? *) - in - (!Xapi_globs.log_getter || (has_side_effect action)) - && - not ( (* these actions are ignored *) - List.mem action - [ (* list of _actions_ filtered out from the audit log *) - "session.local_logout"; (* session logout have their own *) - "session.logout"; (* rbac_audit calls, because after logout *) - (* the session is destroyed and no audit is possible*) - "event.next"; (* this action is just spam in the audit log*) - "event.from"; (* spam *) - "http/get_rrd_updates"; (* spam *) - "http/post_remote_db_access"; (* spam *) - "host.tickle_heartbeat"; (* spam *) - ] - ) + let has_side_effect action = + not (Stdext.Xstringext.String.has_substr action ".get") (* TODO: a bit slow? *) + in + (!Xapi_globs.log_getter || (has_side_effect action)) + && + not ( (* these actions are ignored *) + List.mem action + [ (* list of _actions_ filtered out from the audit log *) + "session.local_logout"; (* session logout have their own *) + "session.logout"; (* rbac_audit calls, because after logout *) + (* the session is destroyed and no audit is possible*) + "event.next"; (* this action is just spam in the audit log*) + "event.from"; (* spam *) + "http/get_rrd_updates"; (* spam *) + "http/post_remote_db_access"; (* spam *) + "host.tickle_heartbeat"; (* spam *) + ] + ) let wrap fn = - try fn () - with e -> (* never bubble up the error here *) - D.debug "ignoring %s" (ExnHelper.string_of_exn e) + try fn () + with e -> (* never bubble up the error here *) + D.debug "ignoring %s" (ExnHelper.string_of_exn e) (* Extra info required for the WLB audit report. *) let add_dummy_args __context action args = - match args with - | None -> args - | Some (str_names, rpc_values) -> begin - let rec find_self str_names rpc_values = - match str_names, rpc_values with - | ("self"::_), (rpc::_) -> rpc - | (_::str_names'), (_::rpc_values') -> find_self str_names' rpc_values' - | _, _ -> raise Not_found - in - match action with - (* Add VDI info for VBD.destroy *) - | "VBD.destroy" -> begin - try - let vbd = API.ref_VBD_of_rpc (find_self str_names rpc_values) in - let vdi = DB_Action.VBD.get_VDI __context vbd in - Some (str_names@["VDI"], rpc_values@[API.rpc_of_ref_VDI vdi]) - with e -> - D.debug "couldn't get VDI ref for VBD: %s" (ExnHelper.string_of_exn e); - args - end - | _ -> args - end + match args with + | None -> args + | Some (str_names, rpc_values) -> begin + let rec find_self str_names rpc_values = + match str_names, rpc_values with + | ("self"::_), (rpc::_) -> rpc + | (_::str_names'), (_::rpc_values') -> find_self str_names' rpc_values' + | _, _ -> raise Not_found + in + match action with + (* Add VDI info for VBD.destroy *) + | "VBD.destroy" -> begin + try + let vbd = API.ref_VBD_of_rpc (find_self str_names rpc_values) in + let vdi = DB_Action.VBD.get_VDI __context vbd in + Some (str_names@["VDI"], rpc_values@[API.rpc_of_ref_VDI vdi]) + with e -> + D.debug "couldn't get VDI ref for VBD: %s" (ExnHelper.string_of_exn e); + args + end + | _ -> args + end let sexpr_of __context session_id allowed_denied ok_error result_error ?args ?sexpr_of_args action permission = - let result_error = - if result_error = "" then result_error else ":"^result_error - in - (*let (params:SExpr.t list) = (string_of_parameters action args) in*) - SExpr.Node ( - SExpr.String (trackid session_id):: + let result_error = + if result_error = "" then result_error else ":"^result_error + in + (*let (params:SExpr.t list) = (string_of_parameters action args) in*) + SExpr.Node ( + SExpr.String (trackid session_id):: SExpr.String (get_subject_identifier __context session_id):: SExpr.String (get_subject_name __context session_id):: - SExpr.String (allowed_denied):: - SExpr.String (ok_error ^ result_error):: + SExpr.String (allowed_denied):: + SExpr.String (ok_error ^ result_error):: SExpr.String (call_type_of action):: - (*SExpr.String (Helper_hostname.get_hostname ())::*) + (*SExpr.String (Helper_hostname.get_hostname ())::*) SExpr.String action:: (SExpr.Node ( - match sexpr_of_args with - | None -> (let args' = add_dummy_args __context action args in sexpr_of_parameters __context action args') - | Some sexpr_of_args -> sexpr_of_args - )):: - [] - ) + match sexpr_of_args with + | None -> (let args' = add_dummy_args __context action args in sexpr_of_parameters __context action args') + | Some sexpr_of_args -> sexpr_of_args + )):: + [] + ) let append_line = Audit.audit let fn_append_to_master_audit_log = ref None let audit_line_of __context session_id allowed_denied ok_error result_error action permission ?args ?sexpr_of_args () = - let _line = - (SExpr.string_of - (sexpr_of __context session_id allowed_denied - ok_error result_error ?args ?sexpr_of_args action permission - ) - ) - in - let line = Stdext.Xstringext.String.replace "\n" " " _line in (* no \n in line *) - let line = Stdext.Xstringext.String.replace "\r" " " line in (* no \r in line *) - - let audit_line = append_line "%s" line in - (*D.debug "line=%s, audit_line=%s" line audit_line;*) - match !fn_append_to_master_audit_log with - | None -> () - | Some fn -> fn __context action audit_line + let _line = + (SExpr.string_of + (sexpr_of __context session_id allowed_denied + ok_error result_error ?args ?sexpr_of_args action permission + ) + ) + in + let line = Stdext.Xstringext.String.replace "\n" " " _line in (* no \n in line *) + let line = Stdext.Xstringext.String.replace "\r" " " line in (* no \r in line *) + + let audit_line = append_line "%s" line in + (*D.debug "line=%s, audit_line=%s" line audit_line;*) + match !fn_append_to_master_audit_log with + | None -> () + | Some fn -> fn __context action audit_line let allowed_pre_fn ~__context ~action ?args () = - try - if (has_to_audit action) - (* for now, we only cache arg results for destroy actions *) - && (Stdext.Xstringext.String.has_substr action ".destroy") - then let args' = add_dummy_args __context action args in Some(sexpr_of_parameters __context action args') - else None - with e -> - D.debug "ignoring %s" (ExnHelper.string_of_exn e); - None + try + if (has_to_audit action) + (* for now, we only cache arg results for destroy actions *) + && (Stdext.Xstringext.String.has_substr action ".destroy") + then let args' = add_dummy_args __context action args in Some(sexpr_of_parameters __context action args') + else None + with e -> + D.debug "ignoring %s" (ExnHelper.string_of_exn e); + None let allowed_post_fn_ok ~__context ~session_id ~action ~permission ?sexpr_of_args ?args ?result () = - wrap (fun () -> - if has_to_audit action then - audit_line_of __context session_id "ALLOWED" "OK" "" action permission ?sexpr_of_args ?args () - ) + wrap (fun () -> + if has_to_audit action then + audit_line_of __context session_id "ALLOWED" "OK" "" action permission ?sexpr_of_args ?args () + ) let allowed_post_fn_error ~__context ~session_id ~action ~permission ?sexpr_of_args ?args ?error () = - wrap (fun () -> - if has_to_audit action then - let error_str = - match error with - | None -> "" - | Some error -> (ExnHelper.string_of_exn error) - in - audit_line_of __context session_id "ALLOWED" "ERROR" error_str action permission ?sexpr_of_args ?args () - ) - + wrap (fun () -> + if has_to_audit action then + let error_str = + match error with + | None -> "" + | Some error -> (ExnHelper.string_of_exn error) + in + audit_line_of __context session_id "ALLOWED" "ERROR" error_str action permission ?sexpr_of_args ?args () + ) + let denied ~__context ~session_id ~action ~permission ?args () = - wrap (fun () -> - if has_to_audit action then - audit_line_of __context session_id "DENIED" "" "" action permission ?args () - ) + wrap (fun () -> + if has_to_audit action then + audit_line_of __context session_id "DENIED" "" "" action permission ?args () + ) let session_create_or_destroy ~create ~__context ~session_id ~uname = wrap (fun () -> - let session_rec = DB_Action.Session.get_record ~__context ~self:session_id in - let s_is_intrapool = session_rec.API.session_pool in - let s_is_lsu = session_rec.API.session_is_local_superuser in - (* filters out intra-pool logins to avoid spamming the audit log *) - if (not s_is_intrapool) && (not s_is_lsu) then ( - let action = (if create then "session.create" else "session.destroy") in - let originator = session_rec.API.session_originator in - let sexpr_of_args = - (get_sexpr_arg "originator" originator "" ""):: - [] - in - let sexpr_of_args = - if create then - (get_sexpr_arg "uname" (match uname with None->""|Some u->u) "" ""):: - sexpr_of_args - else - sexpr_of_args - in - allowed_post_fn_ok ~__context ~session_id ~action ~sexpr_of_args ~permission:action () - ) - ) + let session_rec = DB_Action.Session.get_record ~__context ~self:session_id in + let s_is_intrapool = session_rec.API.session_pool in + let s_is_lsu = session_rec.API.session_is_local_superuser in + (* filters out intra-pool logins to avoid spamming the audit log *) + if (not s_is_intrapool) && (not s_is_lsu) then ( + let action = (if create then "session.create" else "session.destroy") in + let originator = session_rec.API.session_originator in + let sexpr_of_args = + (get_sexpr_arg "originator" originator "" ""):: + [] + in + let sexpr_of_args = + if create then + (get_sexpr_arg "uname" (match uname with None->""|Some u->u) "" ""):: + sexpr_of_args + else + sexpr_of_args + in + allowed_post_fn_ok ~__context ~session_id ~action ~sexpr_of_args ~permission:action () + ) + ) let session_destroy ~__context ~session_id = - session_create_or_destroy ~uname:None ~create:false ~__context ~session_id + session_create_or_destroy ~uname:None ~create:false ~__context ~session_id let session_create ~__context ~session_id ~uname = - session_create_or_destroy ~create:true ~__context ~session_id ~uname + session_create_or_destroy ~create:true ~__context ~session_id ~uname diff --git a/ocaml/idl/ocaml_backend/ref.ml b/ocaml/idl/ocaml_backend/ref.ml index 0cfe159a6b6..81f35aa98bc 100644 --- a/ocaml/idl/ocaml_backend/ref.ml +++ b/ocaml/idl/ocaml_backend/ref.ml @@ -12,13 +12,13 @@ * GNU Lesser General Public License for more details. *) open Stdext - + (** Internally, a reference is simply a string. *) type 'a t = string let ref_prefix = "OpaqueRef:" -let make () = +let make () = let uuid = Uuid.string_of_uuid (Uuid.make_uuid ()) in ref_prefix ^ uuid @@ -34,26 +34,26 @@ let of_string x = x (* a dummy reference is a reference of an object which is not in database *) let dummy_sep = '|' let dummy_prefix = "DummyRef:" - + open Xstringext let make_dummy task_name = let uuid = Uuid.string_of_uuid (Uuid.make_uuid ()) in - dummy_prefix ^ String.of_char dummy_sep ^ uuid ^ String.of_char dummy_sep ^ task_name + dummy_prefix ^ String.of_char dummy_sep ^ uuid ^ String.of_char dummy_sep ^ task_name let is_dummy x = String.startswith dummy_prefix x - -let name_of_dummy x = + +let name_of_dummy x = match String.split ~limit:3 dummy_sep x with - | [_;_;name] -> name - | l -> failwith (Printf.sprintf "Ref.name_of_dummy: %s is not a valid dummy reference (%i)" x (List.length l)) + | [_;_;name] -> name + | l -> failwith (Printf.sprintf "Ref.name_of_dummy: %s is not a valid dummy reference (%i)" x (List.length l)) (* we do not show the name when we pretty print the dummy reference *) let pretty_string_of_dummy x = match String.split ~limit:3 dummy_sep x with - | [_;uuid;_] -> dummy_prefix ^ uuid - | l -> failwith (Printf.sprintf "Ref.pretty_string_of_dummy: %s is not a valid dummy reference (%i)" x (List.length l)) + | [_;uuid;_] -> dummy_prefix ^ uuid + | l -> failwith (Printf.sprintf "Ref.pretty_string_of_dummy: %s is not a valid dummy reference (%i)" x (List.length l)) let really_pretty_and_small x = let s, prelen, c = @@ -61,7 +61,7 @@ let really_pretty_and_small x = (pretty_string_of_dummy x, String.length dummy_prefix, 'D') else (string_of x, String.length ref_prefix, 'R') - in + in try let r = String.create 14 in r.[0] <- c; r.[1] <- ':'; diff --git a/ocaml/idl/ocaml_backend/server_helpers.ml b/ocaml/idl/ocaml_backend/server_helpers.ml index be9526433d4..1626da8f2d9 100644 --- a/ocaml/idl/ocaml_backend/server_helpers.ml +++ b/ocaml/idl/ocaml_backend/server_helpers.ml @@ -25,7 +25,7 @@ let my_assoc fld assoc_list = try List.assoc fld assoc_list with - Not_found -> raise (Dispatcher_FieldNotFound fld) + Not_found -> raise (Dispatcher_FieldNotFound fld) exception Nth (* should never be thrown externally *) let rec nth n l = @@ -42,74 +42,74 @@ let supress_printing_for_these_messages : (string,unit) Hashtbl.t = let tbl = Hashtbl.create 20 in List.iter (fun k -> Hashtbl.replace tbl k ()) ["host.tickle_heartbeat"; "session.login_with_password"; "session.logout"; "session.local_logout"; "session.slave_local_login"; "session.slave_local_login_with_password"]; tbl - -let is_async x = + +let is_async x = String.length x > async_length && (String.sub x 0 async_length = async_wire_name) -let remove_async_prefix x = +let remove_async_prefix x = String.sub x async_length (String.length x - async_length) let unknown_rpc_failure func = - API.response_of_failure Api_errors.message_method_unknown [func] + API.response_of_failure Api_errors.message_method_unknown [func] let parameter_count_mismatch_failure func expected received = - API.response_of_failure Api_errors.message_parameter_count_mismatch [func; expected; received] + API.response_of_failure Api_errors.message_parameter_count_mismatch [func; expected; received] (* Execute fn f in specified __context, marshalling result with "marshaller". If has_task is set then __context has a real task in it that has to be completed. *) let exec ?marshaller ?f_forward ~__context f = - (* NB: - 1. If we are a slave we process the call locally assuming the locks have - already been taken by the master - 2. If we are the master, locks are only necessary for the potentially-forwarded - (ie side-effecting) operations and not things like the database layer *) + (* NB: + 1. If we are a slave we process the call locally assuming the locks have + already been taken by the master + 2. If we are the master, locks are only necessary for the potentially-forwarded + (ie side-effecting) operations and not things like the database layer *) try - let result = - if not(Pool_role.is_master ()) - then f ~__context (* slaves process everything locally *) - else match f_forward with - | None -> - (* this operation cannot be forwarded (eg database lookup); do it now *) - f ~__context - | Some forward -> - (* use the forwarding layer (NB this might make a local call ultimately) *) - forward ~local_fn:f ~__context - in - begin match marshaller with - | None -> TaskHelper.complete ~__context None - | Some fn -> TaskHelper.complete ~__context (Some (fn result)) - end; - result - with - | Api_errors.Server_error (a,b) as e when a = Api_errors.task_cancelled -> - Backtrace.is_important e; - TaskHelper.cancel ~__context; - raise e - | e -> - Backtrace.is_important e; - TaskHelper.failed ~__context e; - raise e + let result = + if not(Pool_role.is_master ()) + then f ~__context (* slaves process everything locally *) + else match f_forward with + | None -> + (* this operation cannot be forwarded (eg database lookup); do it now *) + f ~__context + | Some forward -> + (* use the forwarding layer (NB this might make a local call ultimately) *) + forward ~local_fn:f ~__context + in + begin match marshaller with + | None -> TaskHelper.complete ~__context None + | Some fn -> TaskHelper.complete ~__context (Some (fn result)) + end; + result + with + | Api_errors.Server_error (a,b) as e when a = Api_errors.task_cancelled -> + Backtrace.is_important e; + TaskHelper.cancel ~__context; + raise e + | e -> + Backtrace.is_important e; + TaskHelper.failed ~__context e; + raise e (** WARNING: the context is destroyed when execution is finished if the task is not forwarded, in database and not called asynchronous. *) -(* FIXME: This function should not be used for external call : we should add a proper .mli file to hide it. *) +(* FIXME: This function should not be used for external call : we should add a proper .mli file to hide it. *) let exec_with_context ~__context ?marshaller ?f_forward ?(called_async=false) f = - Locking_helpers.Thread_state.with_named_thread (Context.get_task_name __context) (Context.get_task_id __context) - (fun () -> - Debug.with_thread_associated (Context.string_of_task __context) - (fun () -> - finally - (fun () -> - (* CP-982: promote tracking debug line to info status *) - if called_async then info "spawning a new thread to handle the current task%s" (Context.trackid ~with_brackets:true ~prefix:" " __context); - exec ?marshaller ?f_forward ~__context f) - (fun () -> - if not called_async - then Context.destroy __context - (* else debug "nothing more to process for this thread" *) - ) - ) - () - ) + Locking_helpers.Thread_state.with_named_thread (Context.get_task_name __context) (Context.get_task_id __context) + (fun () -> + Debug.with_thread_associated (Context.string_of_task __context) + (fun () -> + finally + (fun () -> + (* CP-982: promote tracking debug line to info status *) + if called_async then info "spawning a new thread to handle the current task%s" (Context.trackid ~with_brackets:true ~prefix:" " __context); + exec ?marshaller ?f_forward ~__context f) + (fun () -> + if not called_async + then Context.destroy __context + (* else debug "nothing more to process for this thread" *) + ) + ) + () + ) let dispatch_exn_wrapper f = try @@ -119,40 +119,40 @@ let dispatch_exn_wrapper f = let do_dispatch ?session_id ?forward_op ?self called_async supports_async called_fn_name op_fn marshaller_fn fd http_req label generate_task_for = - if (called_async && (not supports_async)) + if (called_async && (not supports_async)) then API.response_of_fault ("No async mode for this operation (rpc: "^called_fn_name^")") - else + else let __context = Context.of_http_req ?session_id ~generate_task_for ~supports_async ~label ~http_req ~fd in if called_async - then begin - (* Fork thread in which to execute async call *) - ignore (Thread.create - (fun () -> - exec_with_context ~__context ~called_async ?f_forward:forward_op ~marshaller:marshaller_fn op_fn) - ()); - (* Return task id immediately *) - Rpc.success (API.rpc_of_ref_task (Context.get_task_id __context)) - end else - let result = - exec_with_context ~__context ~called_async ?f_forward:forward_op ~marshaller:marshaller_fn op_fn - in - Rpc.success (marshaller_fn result) + then begin + (* Fork thread in which to execute async call *) + ignore (Thread.create + (fun () -> + exec_with_context ~__context ~called_async ?f_forward:forward_op ~marshaller:marshaller_fn op_fn) + ()); + (* Return task id immediately *) + Rpc.success (API.rpc_of_ref_task (Context.get_task_id __context)) + end else + let result = + exec_with_context ~__context ~called_async ?f_forward:forward_op ~marshaller:marshaller_fn op_fn + in + Rpc.success (marshaller_fn result) let exec_with_new_task ?http_other_config ?quiet ?subtask_of ?session_id ?task_in_database ?task_description ?origin task_name f = - exec_with_context - ~__context:(Context.make ?http_other_config ?quiet ?subtask_of ?session_id ?task_in_database ?task_description ?origin task_name) + exec_with_context + ~__context:(Context.make ?http_other_config ?quiet ?subtask_of ?session_id ?task_in_database ?task_description ?origin task_name) (fun ~__context -> f __context) - + let exec_with_forwarded_task ?http_other_config ?session_id ?origin task_id f = exec_with_context ~__context:(Context.from_forwarded_task ?http_other_config ?session_id ?origin task_id) - (fun ~__context -> f __context) + (fun ~__context -> f __context) let exec_with_subtask ~__context ?task_in_database ?task_description task_name f = - let subtask_of = Context.get_task_id __context in - let session_id = try Some (Context.get_session_id __context) with _ -> None in - let new_context = Context.make ~subtask_of ?session_id ?task_in_database ?task_description task_name in - exec_with_context ~__context:new_context f + let subtask_of = Context.get_task_id __context in + let session_id = try Some (Context.get_session_id __context) with _ -> None in + let new_context = Context.make ~subtask_of ?session_id ?task_in_database ?task_description task_name in + exec_with_context ~__context:new_context f let forward_extension ~__context rbac call = rbac __context (fun () -> Xapi_extensions.call_extension call) diff --git a/ocaml/idl/ocaml_backend/session_check.ml b/ocaml/idl/ocaml_backend/session_check.ml index 4d7a29c8328..668e0916c2d 100644 --- a/ocaml/idl/ocaml_backend/session_check.ml +++ b/ocaml/idl/ocaml_backend/session_check.ml @@ -23,44 +23,44 @@ open D let check_local_session_hook = ref None let is_local_session __context session_id = default false - (may (fun f -> f ~__context ~session_id) !check_local_session_hook) + (may (fun f -> f ~__context ~session_id) !check_local_session_hook) (* intra_pool_only is true iff the call that's invoking this check can only be called from host<->host intra-pool communication *) let check ~intra_pool_only ~session_id = Server_helpers.exec_with_new_task ~quiet:true "session_check" (fun __context -> (* First see if this is a "local" session *) - if is_local_session __context session_id + if is_local_session __context session_id then () (* debug "Session is in the local database" *) else - (* Assuming we're in master mode *) - (try - let pool = Db_actions.DB_Action.Session.get_pool ~__context ~self:session_id in - - (* If the session is not a pool login, but this call is only supported for pool logins then fail *) - if (not pool) && intra_pool_only then - raise (Api_errors.Server_error (Api_errors.internal_error,["Internal API call attempted with non-pool (external) session"])); - - (* If the session isn't a pool login, and we're a slave, fail *) - if (not pool) && (not (Pool_role.is_master ())) then raise Non_master_login_on_slave; - - if (Pool_role.is_master ()) then - Db_actions.DB_Action.Session.set_last_active ~__context ~self:session_id - ~value:(Stdext.Date.of_float (Unix.time())) - with - | Db_exn.DBCache_NotFound (_, tblname, reference) -> - debug "Session check failed: missing reference; tbl = %s, ref = %s" tblname reference; - raise (Api_errors.Server_error (Api_errors.session_invalid,[reference])) - | Non_master_login_on_slave -> - let master = Db_actions.DB_Action.Pool.get_master ~__context ~self: - (List.hd (Db_actions.DB_Action.Pool.get_all ~__context)) in - let address = Db_actions.DB_Action.Host.get_address ~__context ~self:master in - raise (Api_errors.Server_error (Api_errors.host_is_slave,[address])); - | Api_errors.Server_error(code, params) as e -> - debug "Session check failed: unexpected exception %s %s" code (String.concat " " params); - raise e - | exn -> - debug "Session check failed: unexpected exception '%s'" (Printexc.to_string exn); - raise (Api_errors.Server_error (Api_errors.session_invalid,[Ref.string_of session_id])) - ) + (* Assuming we're in master mode *) + (try + let pool = Db_actions.DB_Action.Session.get_pool ~__context ~self:session_id in + + (* If the session is not a pool login, but this call is only supported for pool logins then fail *) + if (not pool) && intra_pool_only then + raise (Api_errors.Server_error (Api_errors.internal_error,["Internal API call attempted with non-pool (external) session"])); + + (* If the session isn't a pool login, and we're a slave, fail *) + if (not pool) && (not (Pool_role.is_master ())) then raise Non_master_login_on_slave; + + if (Pool_role.is_master ()) then + Db_actions.DB_Action.Session.set_last_active ~__context ~self:session_id + ~value:(Stdext.Date.of_float (Unix.time())) + with + | Db_exn.DBCache_NotFound (_, tblname, reference) -> + debug "Session check failed: missing reference; tbl = %s, ref = %s" tblname reference; + raise (Api_errors.Server_error (Api_errors.session_invalid,[reference])) + | Non_master_login_on_slave -> + let master = Db_actions.DB_Action.Pool.get_master ~__context ~self: + (List.hd (Db_actions.DB_Action.Pool.get_all ~__context)) in + let address = Db_actions.DB_Action.Host.get_address ~__context ~self:master in + raise (Api_errors.Server_error (Api_errors.host_is_slave,[address])); + | Api_errors.Server_error(code, params) as e -> + debug "Session check failed: unexpected exception %s %s" code (String.concat " " params); + raise e + | exn -> + debug "Session check failed: unexpected exception '%s'" (Printexc.to_string exn); + raise (Api_errors.Server_error (Api_errors.session_invalid,[Ref.string_of session_id])) + ) ) diff --git a/ocaml/idl/ocaml_backend/taskHelper.ml b/ocaml/idl/ocaml_backend/taskHelper.ml index f9764e98368..4ac29bc6257 100644 --- a/ocaml/idl/ocaml_backend/taskHelper.ml +++ b/ocaml/idl/ocaml_backend/taskHelper.ml @@ -23,70 +23,70 @@ type t = API.ref_task let now () = Date.of_float (Unix.time ()) (* creates a new task *) -let make ~__context ~http_other_config ?(description="") ?session_id ?subtask_of label : (t * t Uuid.t) = +let make ~__context ~http_other_config ?(description="") ?session_id ?subtask_of label : (t * t Uuid.t) = let uuid = Uuid.make_uuid () in let uuid_str = Uuid.string_of_uuid uuid in let ref = Ref.make () in - (* we store in database only parent/child relationship between real tasks *) + (* we store in database only parent/child relationship between real tasks *) let subtaskid_of = match subtask_of with | Some task_id when not (Ref.is_dummy task_id) -> task_id | _e -> Ref.null in let (_ : unit) = Db_actions.DB_Action.Task.create - ~ref - ~__context - ~created:(Date.of_float (Unix.time())) - ~finished:(Date.of_float 0.0) - ~current_operations:[] - ~_type:"" - ~session:(Pervasiveext.default Ref.null session_id) - ~resident_on:(!Xapi_globs.localhost_ref) - ~status:`pending - ~result:"" ~progress:0. - ~error_info:[] - ~allowed_operations:[] - ~name_description:description ~name_label:label - ~stunnelpid:(-1L) ~forwarded:false ~forwarded_to:Ref.null - ~uuid:uuid_str ~externalpid:(-1L) - ~subtask_of:subtaskid_of - ~other_config:(List.map (fun (k, v) -> "http:" ^ k, v) http_other_config) - ~backtrace:(Sexplib.Sexp.to_string (Backtrace.(sexp_of_t empty))) in + ~ref + ~__context + ~created:(Date.of_float (Unix.time())) + ~finished:(Date.of_float 0.0) + ~current_operations:[] + ~_type:"" + ~session:(Pervasiveext.default Ref.null session_id) + ~resident_on:(!Xapi_globs.localhost_ref) + ~status:`pending + ~result:"" ~progress:0. + ~error_info:[] + ~allowed_operations:[] + ~name_description:description ~name_label:label + ~stunnelpid:(-1L) ~forwarded:false ~forwarded_to:Ref.null + ~uuid:uuid_str ~externalpid:(-1L) + ~subtask_of:subtaskid_of + ~other_config:(List.map (fun (k, v) -> "http:" ^ k, v) http_other_config) + ~backtrace:(Sexplib.Sexp.to_string (Backtrace.(sexp_of_t empty))) in ref, uuid let rbac_assert_permission_fn = ref None (* required to break dep-cycle with rbac.ml *) let assert_can_destroy ?(ok_if_no_session_in_context=false) ~__context task_id = let assert_permission_task_destroy_any () = (match !rbac_assert_permission_fn with - | None -> failwith "no taskhelper.rbac_assert_permission_fn" (* shouldn't ever happen *) - | Some fn -> fn ~__context ~permission:Rbac_static.permission_task_destroy_any + | None -> failwith "no taskhelper.rbac_assert_permission_fn" (* shouldn't ever happen *) + | Some fn -> fn ~__context ~permission:Rbac_static.permission_task_destroy_any ) in let context_session = try Some (Context.get_session_id __context) with Failure _ -> None in - match context_session with + match context_session with | None -> (* no session in context *) if ok_if_no_session_in_context then () (* only internal xapi calls (eg db_gc) have no session in contexts, so rbac can be ignored *) else assert_permission_task_destroy_any () (* will raise "no-session-in-context" exception *) | Some context_session -> - let is_own_task = - try - let task_session = Db_actions.DB_Action.Task.get_session ~__context ~self:task_id in - let task_auth_user_sid = Db_actions.DB_Action.Session.get_auth_user_sid ~__context ~self:task_session in - let context_auth_user_sid = Db_actions.DB_Action.Session.get_auth_user_sid ~__context ~self:context_session in - (*debug "task_auth_user_sid=%s,context_auth_user_sid=%s" task_auth_user_sid context_auth_user_sid;*) - (task_auth_user_sid = context_auth_user_sid) - with e -> - debug "assert_can_destroy: %s" (ExnHelper.string_of_exn e); - false - in - (*debug "IS_OWN_TASK=%b" is_own_task;*) - (* 1. any subject can destroy its own tasks *) - if not is_own_task then - (* 2. if not own task, has this session permission to destroy any tasks? *) - assert_permission_task_destroy_any () + let is_own_task = + try + let task_session = Db_actions.DB_Action.Task.get_session ~__context ~self:task_id in + let task_auth_user_sid = Db_actions.DB_Action.Session.get_auth_user_sid ~__context ~self:task_session in + let context_auth_user_sid = Db_actions.DB_Action.Session.get_auth_user_sid ~__context ~self:context_session in + (*debug "task_auth_user_sid=%s,context_auth_user_sid=%s" task_auth_user_sid context_auth_user_sid;*) + (task_auth_user_sid = context_auth_user_sid) + with e -> + debug "assert_can_destroy: %s" (ExnHelper.string_of_exn e); + false + in + (*debug "IS_OWN_TASK=%b" is_own_task;*) + (* 1. any subject can destroy its own tasks *) + if not is_own_task then + (* 2. if not own task, has this session permission to destroy any tasks? *) + assert_permission_task_destroy_any () let destroy ~__context task_id = - if not (Ref.is_dummy task_id) + if not (Ref.is_dummy task_id) then ( assert_can_destroy ~ok_if_no_session_in_context:true ~__context task_id; Db_actions.DB_Action.Task.destroy ~__context ~self:task_id @@ -98,14 +98,14 @@ let destroy ~__context task_id = *) (* set the ref fn to break the cyclic dependency *) -let init () = +let init () = Context.__get_task_name := (fun ~__context self -> Db_actions.DB_Action.Task.get_name_label ~__context ~self); Context.__destroy_task := destroy; Context.__make_task := make let operate_on_db_task ~__context f = - if Context.task_in_database __context - then f (Context.get_task_id __context) + if Context.task_in_database __context + then f (Context.get_task_id __context) let set_description ~__context value = operate_on_db_task ~__context @@ -114,8 +114,8 @@ let set_description ~__context value = let add_to_other_config ~__context key value = operate_on_db_task ~__context (fun self -> - Db_actions.DB_Action.Task.remove_from_other_config ~__context ~self ~key; - Db_actions.DB_Action.Task.add_to_other_config ~__context ~self ~key ~value) + Db_actions.DB_Action.Task.remove_from_other_config ~__context ~self ~key; + Db_actions.DB_Action.Task.add_to_other_config ~__context ~self ~key ~value) let set_progress ~__context value = operate_on_db_task ~__context @@ -128,138 +128,138 @@ let set_external_pid ~__context pid = let clear_external_pid ~__context = set_external_pid ~__context (-1) let set_result_on_task ~__context task_id result = - match result with - | None -> () - | Some x -> Db_actions.DB_Action.Task.set_result ~__context ~self:task_id ~value:(Xmlrpc.to_string x) + match result with + | None -> () + | Some x -> Db_actions.DB_Action.Task.set_result ~__context ~self:task_id ~value:(Xmlrpc.to_string x) (** Only set the result without completing the task. Useful for vm import *) -let set_result ~__context result = +let set_result ~__context result = operate_on_db_task ~__context (fun t -> set_result_on_task ~__context t result) let status_to_string = function - | `pending -> "pending" - | `success -> "success" - | `failure -> "failure" - | `cancelling -> "cancelling" - | `cancelled -> "cancelled" + | `pending -> "pending" + | `success -> "success" + | `failure -> "failure" + | `cancelling -> "cancelling" + | `cancelled -> "cancelled" let status_is_completed task_status = - (task_status=`success) || (task_status=`failure) || (task_status=`cancelled) + (task_status=`success) || (task_status=`failure) || (task_status=`cancelled) let complete ~__context result = operate_on_db_task ~__context (fun self -> - let status = Db_actions.DB_Action.Task.get_status ~__context ~self in - if status = `pending then begin - Db_actions.DB_Action.Task.set_allowed_operations ~__context ~self ~value:[]; - Db_actions.DB_Action.Task.set_finished ~__context ~self ~value:(Date.of_float (Unix.time())); - Db_actions.DB_Action.Task.set_progress ~__context ~self ~value:1.; - set_result_on_task ~__context self result; - Db_actions.DB_Action.Task.set_status ~__context ~self ~value:`success - end else - debug "the status of %s is: %s; cannot set it to `success" - (Ref.really_pretty_and_small self) - (status_to_string status)) + let status = Db_actions.DB_Action.Task.get_status ~__context ~self in + if status = `pending then begin + Db_actions.DB_Action.Task.set_allowed_operations ~__context ~self ~value:[]; + Db_actions.DB_Action.Task.set_finished ~__context ~self ~value:(Date.of_float (Unix.time())); + Db_actions.DB_Action.Task.set_progress ~__context ~self ~value:1.; + set_result_on_task ~__context self result; + Db_actions.DB_Action.Task.set_status ~__context ~self ~value:`success + end else + debug "the status of %s is: %s; cannot set it to `success" + (Ref.really_pretty_and_small self) + (status_to_string status)) let set_cancellable ~__context = operate_on_db_task ~__context (fun self -> Db_actions.DB_Action.Task.set_allowed_operations ~__context ~self ~value:[`cancel]) -let set_not_cancellable ~__context = +let set_not_cancellable ~__context = operate_on_db_task ~__context (fun self -> Db_actions.DB_Action.Task.set_allowed_operations ~__context ~self ~value:[]) let is_cancelling ~__context = - Context.task_in_database __context && + Context.task_in_database __context && let l = Db_actions.DB_Action.Task.get_current_operations ~__context ~self:(Context.get_task_id __context) in - List.exists (fun (_,x) -> x=`cancel) l + List.exists (fun (_,x) -> x=`cancel) l let raise_cancelled ~__context = - let task_id = Context.get_task_id __context in - raise Api_errors.(Server_error (task_cancelled, [Ref.string_of task_id])) + let task_id = Context.get_task_id __context in + raise Api_errors.(Server_error (task_cancelled, [Ref.string_of task_id])) let exn_if_cancelling ~__context = - if is_cancelling ~__context - then raise_cancelled ~__context + if is_cancelling ~__context + then raise_cancelled ~__context let cancel ~__context = operate_on_db_task ~__context (fun self -> - assert_can_destroy ~__context self; - let status = Db_actions.DB_Action.Task.get_status ~__context ~self in - if status = `pending then begin - Db_actions.DB_Action.Task.set_progress ~__context ~self ~value:1.; - Db_actions.DB_Action.Task.set_finished ~__context ~self ~value:(Date.of_float (Unix.time())); - Db_actions.DB_Action.Task.set_status ~__context ~self ~value:`cancelled; - Db_actions.DB_Action.Task.set_allowed_operations ~__context ~self ~value:[] - end else - debug "the status of %s is %s; cannot set it to `cancelled" - (Ref.really_pretty_and_small self) - (status_to_string status)) + assert_can_destroy ~__context self; + let status = Db_actions.DB_Action.Task.get_status ~__context ~self in + if status = `pending then begin + Db_actions.DB_Action.Task.set_progress ~__context ~self ~value:1.; + Db_actions.DB_Action.Task.set_finished ~__context ~self ~value:(Date.of_float (Unix.time())); + Db_actions.DB_Action.Task.set_status ~__context ~self ~value:`cancelled; + Db_actions.DB_Action.Task.set_allowed_operations ~__context ~self ~value:[] + end else + debug "the status of %s is %s; cannot set it to `cancelled" + (Ref.really_pretty_and_small self) + (status_to_string status)) let failed ~__context exn = let code, params = ExnHelper.error_of_exn exn in operate_on_db_task ~__context (fun self -> - let status = Db_actions.DB_Action.Task.get_status ~__context ~self in - if status = `pending then begin - Db_actions.DB_Action.Task.set_progress ~__context ~self ~value:1.; - Db_actions.DB_Action.Task.set_error_info ~__context ~self ~value:(code::params); - Db_actions.DB_Action.Task.set_backtrace ~__context ~self ~value:(Sexplib.Sexp.to_string (Backtrace.(sexp_of_t (get exn)))); - Db_actions.DB_Action.Task.set_finished ~__context ~self ~value:(Date.of_float (Unix.time())); - Db_actions.DB_Action.Task.set_allowed_operations ~__context ~self ~value:[]; - if code=Api_errors.task_cancelled - then Db_actions.DB_Action.Task.set_status ~__context ~self ~value:`cancelled - else Db_actions.DB_Action.Task.set_status ~__context ~self ~value:`failure - end else - debug "the status of %s is %s; cannot set it to %s" - (Ref.really_pretty_and_small self) - (status_to_string status) - (if code=Api_errors.task_cancelled then "`cancelled" else "`failure")) - - -type id = - | Sm of string - | Xenops of string * string (* queue name * id *) + let status = Db_actions.DB_Action.Task.get_status ~__context ~self in + if status = `pending then begin + Db_actions.DB_Action.Task.set_progress ~__context ~self ~value:1.; + Db_actions.DB_Action.Task.set_error_info ~__context ~self ~value:(code::params); + Db_actions.DB_Action.Task.set_backtrace ~__context ~self ~value:(Sexplib.Sexp.to_string (Backtrace.(sexp_of_t (get exn)))); + Db_actions.DB_Action.Task.set_finished ~__context ~self ~value:(Date.of_float (Unix.time())); + Db_actions.DB_Action.Task.set_allowed_operations ~__context ~self ~value:[]; + if code=Api_errors.task_cancelled + then Db_actions.DB_Action.Task.set_status ~__context ~self ~value:`cancelled + else Db_actions.DB_Action.Task.set_status ~__context ~self ~value:`failure + end else + debug "the status of %s is %s; cannot set it to %s" + (Ref.really_pretty_and_small self) + (status_to_string status) + (if code=Api_errors.task_cancelled then "`cancelled" else "`failure")) + + +type id = + | Sm of string + | Xenops of string * string (* queue name * id *) let id_to_task_tbl : (id, API.ref_task) Hashtbl.t = Hashtbl.create 10 let task_to_id_tbl : (API.ref_task, id) Hashtbl.t = Hashtbl.create 10 let task_tbl_m = Mutex.create () let id_to_task_exn id = - Mutex.execute task_tbl_m - (fun () -> - Hashtbl.find id_to_task_tbl id - ) + Mutex.execute task_tbl_m + (fun () -> + Hashtbl.find id_to_task_tbl id + ) let task_to_id_exn task = - Mutex.execute task_tbl_m - (fun () -> - Hashtbl.find task_to_id_tbl task - ) + Mutex.execute task_tbl_m + (fun () -> + Hashtbl.find task_to_id_tbl task + ) let register_task __context id = - let task = Context.get_task_id __context in - Mutex.execute task_tbl_m - (fun () -> - Hashtbl.replace id_to_task_tbl id task; - Hashtbl.replace task_to_id_tbl task id; - ); - (* Since we've bound the XenAPI Task to the xenopsd Task, and the xenopsd Task - is cancellable, mark the XenAPI Task as cancellable too. *) - set_cancellable ~__context; - () + let task = Context.get_task_id __context in + Mutex.execute task_tbl_m + (fun () -> + Hashtbl.replace id_to_task_tbl id task; + Hashtbl.replace task_to_id_tbl task id; + ); + (* Since we've bound the XenAPI Task to the xenopsd Task, and the xenopsd Task + is cancellable, mark the XenAPI Task as cancellable too. *) + set_cancellable ~__context; + () let unregister_task __context id = - (* The rest of the XenAPI Task won't be cancellable *) - set_not_cancellable ~__context; - Mutex.execute task_tbl_m - (fun () -> - let task = Hashtbl.find id_to_task_tbl id in - Hashtbl.remove id_to_task_tbl id; - Hashtbl.remove task_to_id_tbl task; - ); - () + (* The rest of the XenAPI Task won't be cancellable *) + set_not_cancellable ~__context; + Mutex.execute task_tbl_m + (fun () -> + let task = Hashtbl.find id_to_task_tbl id in + Hashtbl.remove id_to_task_tbl id; + Hashtbl.remove task_to_id_tbl task; + ); + () diff --git a/ocaml/idl/ocaml_backend/tasks.ml b/ocaml/idl/ocaml_backend/tasks.ml index e43b620bf9a..bf3336c0a09 100644 --- a/ocaml/idl/ocaml_backend/tasks.ml +++ b/ocaml/idl/ocaml_backend/tasks.ml @@ -18,31 +18,31 @@ module TaskSet = Set.Make(struct type t = API.ref_task let compare = compare end (* Return once none of the tasks have a `pending status. *) let wait_for_all ~rpc ~session_id ~tasks = - let classes = List.map - (fun task -> Printf.sprintf "task/%s" (Ref.string_of task)) - tasks - in - let timeout = 5.0 in - let rec wait ~token ~task_set = - if TaskSet.is_empty task_set then () - else begin - let open Event_types in - let event_from_rpc = Client.Event.from ~rpc ~session_id ~classes ~token ~timeout in - let event_from = Event_types.event_from_of_rpc event_from_rpc in - let records = List.map Event_helper.record_of_event event_from.events in - (* If any records indicate that a task is no longer pending, remove that task from the set. *) - let pending_task_set = List.fold_left (fun task_set' record -> - match record with - | Event_helper.Task (t, Some t_rec) -> - if (TaskSet.mem t task_set') && (t_rec.API.task_status <> `pending) then - TaskSet.remove t task_set' - else - task_set' - | _ -> task_set') task_set records in - wait ~token:(event_from.Event_types.token) ~task_set:pending_task_set - end - in - let token = "" in - let task_set = List.fold_left (fun task_set' task -> TaskSet.add task task_set') TaskSet.empty tasks in - wait ~token ~task_set + let classes = List.map + (fun task -> Printf.sprintf "task/%s" (Ref.string_of task)) + tasks + in + let timeout = 5.0 in + let rec wait ~token ~task_set = + if TaskSet.is_empty task_set then () + else begin + let open Event_types in + let event_from_rpc = Client.Event.from ~rpc ~session_id ~classes ~token ~timeout in + let event_from = Event_types.event_from_of_rpc event_from_rpc in + let records = List.map Event_helper.record_of_event event_from.events in + (* If any records indicate that a task is no longer pending, remove that task from the set. *) + let pending_task_set = List.fold_left (fun task_set' record -> + match record with + | Event_helper.Task (t, Some t_rec) -> + if (TaskSet.mem t task_set') && (t_rec.API.task_status <> `pending) then + TaskSet.remove t task_set' + else + task_set' + | _ -> task_set') task_set records in + wait ~token:(event_from.Event_types.token) ~task_set:pending_task_set + end + in + let token = "" in + let task_set = List.fold_left (fun task_set' task -> TaskSet.add task task_set') TaskSet.empty tasks in + wait ~token ~task_set diff --git a/ocaml/license/daily_license_check.ml b/ocaml/license/daily_license_check.ml index 514c151729b..12bbc6e4a8c 100644 --- a/ocaml/license/daily_license_check.ml +++ b/ocaml/license/daily_license_check.ml @@ -6,50 +6,50 @@ let seconds_per_day = 3600. *. 24. let seconds_per_30_days = 30. *. seconds_per_day let days_to_expiry now expiry = - expiry /. seconds_per_day -. now /. seconds_per_day + expiry /. seconds_per_day -. now /. seconds_per_day let get_hosts all_license_params threshold = - List.fold_left (fun acc (name_label, license_params) -> - let expiry = List.assoc "expiry" license_params in - let expiry = Stdext.Date.(to_float (of_string expiry)) in - if expiry < threshold then - name_label :: acc - else - acc - ) [] all_license_params + List.fold_left (fun acc (name_label, license_params) -> + let expiry = List.assoc "expiry" license_params in + let expiry = Stdext.Date.(to_float (of_string expiry)) in + if expiry < threshold then + name_label :: acc + else + acc + ) [] all_license_params let check_license now pool_license_state all_license_params = - let expiry = List.assoc "expiry" pool_license_state in - let expiry = Stdext.Date.(to_float (of_string expiry)) in - let days = days_to_expiry now expiry in - if days <= 0. then - Expired (get_hosts all_license_params now) - else if days <= 30. then - Expiring (get_hosts all_license_params (now +. seconds_per_30_days)) - else - Good + let expiry = List.assoc "expiry" pool_license_state in + let expiry = Stdext.Date.(to_float (of_string expiry)) in + let days = days_to_expiry now expiry in + if days <= 0. then + Expired (get_hosts all_license_params now) + else if days <= 30. then + Expiring (get_hosts all_license_params (now +. seconds_per_30_days)) + else + Good let get_info_from_db rpc session = - let pool = List.hd (XenAPI.Pool.get_all rpc session) in - let pool_license_state = XenAPI.Pool.get_license_state rpc session pool in - let hosts = XenAPI.Host.get_all_records rpc session in - let all_license_params = List.map (fun (_, host) -> - host.API.host_name_label, host.API.host_license_params) hosts in - pool, pool_license_state, all_license_params + let pool = List.hd (XenAPI.Pool.get_all rpc session) in + let pool_license_state = XenAPI.Pool.get_license_state rpc session pool in + let hosts = XenAPI.Host.get_all_records rpc session in + let all_license_params = List.map (fun (_, host) -> + host.API.host_name_label, host.API.host_license_params) hosts in + pool, pool_license_state, all_license_params let execute rpc session pool result = - let send_alert session pool msg body = - let (name, priority) = msg in - let pool_uuid = XenAPI.Pool.get_uuid rpc session pool in - ignore (XenAPI.Message.create rpc session name priority `Pool pool_uuid body) - in - match result with - | Good -> () - | Expiring hosts -> - let body = Printf.sprintf "The licenses of the following hosts are about to expire: %s" - (String.concat ", " hosts) in - send_alert session pool Api_messages.license_expires_soon body - | Expired hosts -> - let body = Printf.sprintf "The licenses of the following hosts have expired: %s" - (String.concat ", " hosts) in - send_alert session pool Api_messages.license_expired body + let send_alert session pool msg body = + let (name, priority) = msg in + let pool_uuid = XenAPI.Pool.get_uuid rpc session pool in + ignore (XenAPI.Message.create rpc session name priority `Pool pool_uuid body) + in + match result with + | Good -> () + | Expiring hosts -> + let body = Printf.sprintf "The licenses of the following hosts are about to expire: %s" + (String.concat ", " hosts) in + send_alert session pool Api_messages.license_expires_soon body + | Expired hosts -> + let body = Printf.sprintf "The licenses of the following hosts have expired: %s" + (String.concat ", " hosts) in + send_alert session pool Api_messages.license_expired body diff --git a/ocaml/license/daily_license_check_main.ml b/ocaml/license/daily_license_check_main.ml index dd183ffece6..a9506168997 100644 --- a/ocaml/license/daily_license_check_main.ml +++ b/ocaml/license/daily_license_check_main.ml @@ -1,27 +1,27 @@ module XenAPI = Client.Client let rpc xml = - let open Xmlrpc_client in - XMLRPC_protocol.rpc - ~srcstr:"daily-license-check" - ~dststr:"xapi" - ~transport:(Unix "/var/xapi/xapi") - ~http:(xmlrpc ~version:"1.0" "/") - xml + let open Xmlrpc_client in + XMLRPC_protocol.rpc + ~srcstr:"daily-license-check" + ~dststr:"xapi" + ~transport:(Unix "/var/xapi/xapi") + ~http:(xmlrpc ~version:"1.0" "/") + xml let _ = - let session = XenAPI.Session.login_with_password - ~rpc:rpc - ~uname:"" - ~pwd:"" - ~version:Xapi_globs.api_version_string - ~originator:"daily-license-check" - in - Stdext.Pervasiveext.finally - (fun () -> - let now = Unix.time () in - let pool, pool_license_state, all_license_params = Daily_license_check.get_info_from_db rpc session in - let result = Daily_license_check.check_license now pool_license_state all_license_params in - Daily_license_check.execute rpc session pool result - ) - (fun () -> XenAPI.Session.logout rpc session) + let session = XenAPI.Session.login_with_password + ~rpc:rpc + ~uname:"" + ~pwd:"" + ~version:Xapi_globs.api_version_string + ~originator:"daily-license-check" + in + Stdext.Pervasiveext.finally + (fun () -> + let now = Unix.time () in + let pool, pool_license_state, all_license_params = Daily_license_check.get_info_from_db rpc session in + let result = Daily_license_check.check_license now pool_license_state all_license_params in + Daily_license_check.execute rpc session pool result + ) + (fun () -> XenAPI.Session.logout rpc session) diff --git a/ocaml/license/license_init.ml b/ocaml/license/license_init.ml index 57e1f7a958b..300b1c769b7 100644 --- a/ocaml/license/license_init.ml +++ b/ocaml/license/license_init.ml @@ -23,37 +23,37 @@ let fst4 (e,_,_,_) = e and lst4 (_,_,_,i) = i let find_min_edition allowed_editions = - List.fold_left - (fun a b -> - if (lst4 a) < (lst4 b) - then a else b) - ("","","",max_int) - allowed_editions - |> fst4 + List.fold_left + (fun a b -> + if (lst4 a) < (lst4 b) + then a else b) + ("","","",max_int) + allowed_editions + |> fst4 (* xapi calls this function upon startup *) let initialise ~__context ~host = - let module V6client = (val !v6client : V6clientS) in - - let set_licensing edition features additional = - debug "Setting license to %s" edition; - Db.Host.set_edition ~__context ~self:host ~value:edition; - (* Copy resulting license to the database *) - Xapi_host.copy_license_to_db ~__context ~host ~features ~additional in - - try - let edition = Db.Host.get_edition ~__context ~self:host in - let edition', features, additional = - V6client.apply_edition ~__context edition ["force", "true"] in - set_licensing edition' features additional - - with - | Api_errors.Server_error (code, []) when code = Api_errors.v6d_failure -> - (* Couldn't communicate with v6d, so fall back to running in free/libre - * "xcp" mode, with all standard features enabled and no additional - * features advertised. This is the same as the "free" edition from v6d - * for most purposes but not for pool-join: see assert_restrictions_match - * in pre_join_checks in ocaml/xapi/xapi_pool.ml *) - set_licensing "free/libre" Features.all_features [] - - | _ -> () + let module V6client = (val !v6client : V6clientS) in + + let set_licensing edition features additional = + debug "Setting license to %s" edition; + Db.Host.set_edition ~__context ~self:host ~value:edition; + (* Copy resulting license to the database *) + Xapi_host.copy_license_to_db ~__context ~host ~features ~additional in + + try + let edition = Db.Host.get_edition ~__context ~self:host in + let edition', features, additional = + V6client.apply_edition ~__context edition ["force", "true"] in + set_licensing edition' features additional + + with + | Api_errors.Server_error (code, []) when code = Api_errors.v6d_failure -> + (* Couldn't communicate with v6d, so fall back to running in free/libre + * "xcp" mode, with all standard features enabled and no additional + * features advertised. This is the same as the "free" edition from v6d + * for most purposes but not for pool-join: see assert_restrictions_match + * in pre_join_checks in ocaml/xapi/xapi_pool.ml *) + set_licensing "free/libre" Features.all_features [] + + | _ -> () diff --git a/ocaml/license/license_init.mli b/ocaml/license/license_init.mli index 1665e7c30ec..82e2b1d162d 100644 --- a/ocaml/license/license_init.mli +++ b/ocaml/license/license_init.mli @@ -13,7 +13,7 @@ *) (** Licensing initialisation * @group Licensing - *) +*) module type V6clientS = module type of V6client val v6client : (module V6clientS) ref diff --git a/ocaml/license/v6client.ml b/ocaml/license/v6client.ml index e285a3a0025..2e1ba87b4e8 100644 --- a/ocaml/license/v6client.ml +++ b/ocaml/license/v6client.ml @@ -24,77 +24,77 @@ let retry = ref true (* RPC function for communication with the v6 daemon *) let socket = Filename.concat "/var/lib/xcp" "v6" let v6rpc call = - let open Xmlrpc_client in - XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"v6d" ~transport:(Unix socket) ~http:(xmlrpc ~version:"1.0" "/") call + let open Xmlrpc_client in + XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"v6d" ~transport:(Unix socket) ~http:(xmlrpc ~version:"1.0" "/") call let rec apply_edition ~__context edition additional = - (* Get localhost's current license state. *) - let host = Helpers.get_localhost ~__context in - let license_server = Db.Host.get_license_server ~__context ~self:host in - let current_edition = Db.Host.get_edition ~__context ~self:host in - let current_license_params = Db.Host.get_license_params ~__context ~self:host in - (* Make sure the socket count in license_params is correct. - * At first boot, the key won't exist, and it may be wrong if we've restored - * a database dump from a different host. *) - let cpu_info = Db.Host.get_cpu_info ~__context ~self:host in - let socket_count = List.assoc "socket_count" cpu_info in - let current_license_params = - List.replace_assoc "sockets" socket_count current_license_params in - (* Construct the RPC params to be sent to v6d *) - let additional = ("current_edition", current_edition) :: - license_server @ current_license_params @ additional in - let params = [ Rpc.rpc_of_string (Context.string_of_task __context) - ; V6rpc.rpc_of_apply_edition_in - { V6rpc.edition_in = edition - ; V6rpc.additional_in = additional } ] in - try - let call = Rpc.call "apply_edition" params in - let response = try v6rpc call with _ -> raise V6DaemonFailure in - debug "response: %s" (Rpc.to_string response.Rpc.contents); - if response.Rpc.success then - let r = V6rpc.apply_edition_out_of_rpc response.Rpc.contents in - r.V6rpc.edition_out, r.V6rpc.features_out, r.V6rpc.additional_out - else - let e = V6errors.error_of_rpc response.Rpc.contents in - match e with - | s, _ when s = V6errors.v6d_failure -> - raise V6DaemonFailure - | name, args -> - raise (Api_errors.Server_error (name, args)) - with V6DaemonFailure -> - if !retry then begin - error "Apply_edition failed. Retrying once..."; - retry := false; - Thread.delay 2.; - apply_edition ~__context edition additional - end else begin - error "Apply_edition failed."; - retry := true; - raise (Api_errors.Server_error (Api_errors.v6d_failure, [])) - end + (* Get localhost's current license state. *) + let host = Helpers.get_localhost ~__context in + let license_server = Db.Host.get_license_server ~__context ~self:host in + let current_edition = Db.Host.get_edition ~__context ~self:host in + let current_license_params = Db.Host.get_license_params ~__context ~self:host in + (* Make sure the socket count in license_params is correct. + * At first boot, the key won't exist, and it may be wrong if we've restored + * a database dump from a different host. *) + let cpu_info = Db.Host.get_cpu_info ~__context ~self:host in + let socket_count = List.assoc "socket_count" cpu_info in + let current_license_params = + List.replace_assoc "sockets" socket_count current_license_params in + (* Construct the RPC params to be sent to v6d *) + let additional = ("current_edition", current_edition) :: + license_server @ current_license_params @ additional in + let params = [ Rpc.rpc_of_string (Context.string_of_task __context) + ; V6rpc.rpc_of_apply_edition_in + { V6rpc.edition_in = edition + ; V6rpc.additional_in = additional } ] in + try + let call = Rpc.call "apply_edition" params in + let response = try v6rpc call with _ -> raise V6DaemonFailure in + debug "response: %s" (Rpc.to_string response.Rpc.contents); + if response.Rpc.success then + let r = V6rpc.apply_edition_out_of_rpc response.Rpc.contents in + r.V6rpc.edition_out, r.V6rpc.features_out, r.V6rpc.additional_out + else + let e = V6errors.error_of_rpc response.Rpc.contents in + match e with + | s, _ when s = V6errors.v6d_failure -> + raise V6DaemonFailure + | name, args -> + raise (Api_errors.Server_error (name, args)) + with V6DaemonFailure -> + if !retry then begin + error "Apply_edition failed. Retrying once..."; + retry := false; + Thread.delay 2.; + apply_edition ~__context edition additional + end else begin + error "Apply_edition failed."; + retry := true; + raise (Api_errors.Server_error (Api_errors.v6d_failure, [])) + end let get_editions dbg = - try - let call = Rpc.call "get_editions" [Rpc.rpc_of_string dbg] in - let response = v6rpc call in - debug "response: %s" (Rpc.to_string response.Rpc.contents); - if response.Rpc.success then - let r = V6rpc.get_editions_out_of_rpc response.Rpc.contents in - r.V6rpc.editions - else - raise V6DaemonFailure - with _ -> - raise (Api_errors.Server_error (Api_errors.v6d_failure, [])) + try + let call = Rpc.call "get_editions" [Rpc.rpc_of_string dbg] in + let response = v6rpc call in + debug "response: %s" (Rpc.to_string response.Rpc.contents); + if response.Rpc.success then + let r = V6rpc.get_editions_out_of_rpc response.Rpc.contents in + r.V6rpc.editions + else + raise V6DaemonFailure + with _ -> + raise (Api_errors.Server_error (Api_errors.v6d_failure, [])) let get_version dbg = - try - let call = Rpc.call "get_version" [Rpc.rpc_of_string dbg] in - let response = v6rpc call in - debug "response: %s" (Rpc.to_string response.Rpc.contents); - if response.Rpc.success then - Rpc.string_of_rpc response.Rpc.contents - else - raise V6DaemonFailure - with _ -> - raise (Api_errors.Server_error (Api_errors.v6d_failure, [])) + try + let call = Rpc.call "get_version" [Rpc.rpc_of_string dbg] in + let response = v6rpc call in + debug "response: %s" (Rpc.to_string response.Rpc.contents); + if response.Rpc.success then + Rpc.string_of_rpc response.Rpc.contents + else + raise V6DaemonFailure + with _ -> + raise (Api_errors.Server_error (Api_errors.v6d_failure, [])) diff --git a/ocaml/license/v6client.mli b/ocaml/license/v6client.mli index f5177adbe87..f282c5329e0 100644 --- a/ocaml/license/v6client.mli +++ b/ocaml/license/v6client.mli @@ -15,11 +15,11 @@ * Client module to interact with the licensing daemon [v6d]. * [v6d] controls which features are enabled for a given "edition". * @group Licensing - *) +*) (** Call the [apply_edition] function on the v6d *) val apply_edition : __context:Context.t -> string -> (string * string) list -> - string * Features.feature list * (string * string) list + string * Features.feature list * (string * string) list (** Call the [get_editions] function on the v6d *) val get_editions : string -> (string * string * string * int) list diff --git a/ocaml/license/v6daemon.ml b/ocaml/license/v6daemon.ml index 6a2cf415c37..6602efcbdb6 100644 --- a/ocaml/license/v6daemon.ml +++ b/ocaml/license/v6daemon.ml @@ -20,39 +20,39 @@ module D=Debug.Make(struct let name="v6daemon" end) open D let xmlrpc_handler process req bio _ = - Debug.with_thread_associated "v6d_handler" (fun () -> - let path = match String.split '/' req.Http.Request.uri with - | x::path::_ -> path - | _ -> failwith "Unknown path" - in - debug "path=%s" path; - let body = Http_svr.read_body req bio in - let s = Buf_io.fd_of bio in - let rpc = Xmlrpc.call_of_string body in - debug "Request name: %s" rpc.Rpc.name ; - List.iter (fun param -> - debug "Request param: %s" (Rpc.to_string param) - ) rpc.Rpc.params ; - let result = process rpc in - debug "Response: %s" (Rpc.to_string result.Rpc.contents); - let str = Xmlrpc.string_of_response result in - Http_svr.response_str req s str - ) () + Debug.with_thread_associated "v6d_handler" (fun () -> + let path = match String.split '/' req.Http.Request.uri with + | x::path::_ -> path + | _ -> failwith "Unknown path" + in + debug "path=%s" path; + let body = Http_svr.read_body req bio in + let s = Buf_io.fd_of bio in + let rpc = Xmlrpc.call_of_string body in + debug "Request name: %s" rpc.Rpc.name ; + List.iter (fun param -> + debug "Request param: %s" (Rpc.to_string param) + ) rpc.Rpc.params ; + let result = process rpc in + debug "Response: %s" (Rpc.to_string result.Rpc.contents); + let str = Xmlrpc.string_of_response result in + Http_svr.response_str req s str + ) () let server = Http_svr.Server.empty () let startup process = - Debug.with_thread_associated "daemon_init" (fun () -> - info "(Re)starting v6d..."; - (* unix socket *) - let unix_socket_path = Filename.concat "/var/lib/xcp" "v6" in - Stdext.Unixext.mkdir_safe (Filename.dirname unix_socket_path) 0o700; - Stdext.Unixext.unlink_safe unix_socket_path; - let domain_sock = Http_svr.bind (Unix.ADDR_UNIX(unix_socket_path)) "unix_rpc" in - Http_svr.start server domain_sock; - Http_svr.Server.add_handler server Http.Post "/" (Http_svr.BufIO (xmlrpc_handler process)); - ignore Daemon.(notify State.Ready); - (* keep daemon alive *) - Stdext.Threadext.keep_alive () - ) () + Debug.with_thread_associated "daemon_init" (fun () -> + info "(Re)starting v6d..."; + (* unix socket *) + let unix_socket_path = Filename.concat "/var/lib/xcp" "v6" in + Stdext.Unixext.mkdir_safe (Filename.dirname unix_socket_path) 0o700; + Stdext.Unixext.unlink_safe unix_socket_path; + let domain_sock = Http_svr.bind (Unix.ADDR_UNIX(unix_socket_path)) "unix_rpc" in + Http_svr.start server domain_sock; + Http_svr.Server.add_handler server Http.Post "/" (Http_svr.BufIO (xmlrpc_handler process)); + ignore Daemon.(notify State.Ready); + (* keep daemon alive *) + Stdext.Threadext.keep_alive () + ) () diff --git a/ocaml/license/v6errors.ml b/ocaml/license/v6errors.ml index 75575119e14..78ddc595a4c 100644 --- a/ocaml/license/v6errors.ml +++ b/ocaml/license/v6errors.ml @@ -3,7 +3,7 @@ exception Error of error let to_string = function | Error (name, args) -> - Printf.sprintf "V6error(%s, [ %a ])" name (fun () -> String.concat "; ") args + Printf.sprintf "V6error(%s, [ %a ])" name (fun () -> String.concat "; ") args | e -> Printexc.to_string e diff --git a/ocaml/license/v6rpc.ml b/ocaml/license/v6rpc.ml index 2bb6a2ddb57..43bae8ee28d 100644 --- a/ocaml/license/v6rpc.ml +++ b/ocaml/license/v6rpc.ml @@ -16,83 +16,83 @@ module D = Debug.Make(struct let name="v6rpc" end) open D type apply_edition_in = { - edition_in: string; - additional_in: (string * string) list; + edition_in: string; + additional_in: (string * string) list; } with rpc type apply_edition_out = { - edition_out: string; - features_out: Features.feature list; - additional_out: (string * string) list; + edition_out: string; + features_out: Features.feature list; + additional_out: (string * string) list; } with rpc type names = string * string * string * int with rpc type get_editions_out = { - editions: names list; + editions: names list; } with rpc module type V6api = sig - (* dbg_str -> edition -> additional_params -> enabled_features, additional_params *) - val apply_edition : string -> string -> (string * string) list -> - string * Features.feature list * (string * string) list - (* dbg_str -> list of editions (name, marketing name, short name) *) - val get_editions : string -> (string * string * string * int) list - (* dbg_str -> result *) - val get_version : string -> string - (* () -> version *) - val reopen_logs : unit -> bool + (* dbg_str -> edition -> additional_params -> enabled_features, additional_params *) + val apply_edition : string -> string -> (string * string) list -> + string * Features.feature list * (string * string) list + (* dbg_str -> list of editions (name, marketing name, short name) *) + val get_editions : string -> (string * string * string * int) list + (* dbg_str -> result *) + val get_version : string -> string + (* () -> version *) + val reopen_logs : unit -> bool end module V6process = functor(V: V6api) -> struct - let process call = - let response = - try match call.Rpc.name with - | "apply_edition" -> - let dbg_rpc, arg_rpc = match call.Rpc.params with - | [a;b] -> (a,b) - | _ -> - debug "Error in apply_edition rpc" ; - raise (V6errors.Error ("unmarshalling_error", [])) in - let arg = apply_edition_in_of_rpc arg_rpc in - let dbg = Rpc.string_of_rpc dbg_rpc in - let edition, features, additional_params = - V.apply_edition dbg arg.edition_in arg.additional_in in - let response = rpc_of_apply_edition_out - {edition_out = edition; features_out = features; additional_out = additional_params} in - Rpc.success response - | "get_editions" -> - let dbg_rpc = match call.Rpc.params with - | [a] -> a - | _ -> - debug "Error in get_editions rpc" ; - raise (V6errors.Error ("unmarshalling_error", [])) in - let dbg = Rpc.string_of_rpc dbg_rpc in - let response = rpc_of_get_editions_out {editions = V.get_editions dbg} in - Rpc.success response - | "get_version" -> - let dbg_rpc = match call.Rpc.params with - | [a] -> a - | _ -> - debug "Error in get_version rpc" ; - raise (V6errors.Error ("unmarshalling_error", [])) in - let dbg = Rpc.string_of_rpc dbg_rpc in - let response = Rpc.rpc_of_string (V.get_version dbg) in - Rpc.success response - | "reopen-logs" -> - let response = Rpc.rpc_of_bool (V.reopen_logs ()) in - Rpc.success response - | x -> failwith ("unknown RPC: " ^ x) - with - | V6errors.Error e as exn -> - error "%s" (V6errors.to_string exn); - Debug.log_backtrace exn (Backtrace.get exn); - Rpc.failure (V6errors.rpc_of_error e) - | e -> - Debug.log_backtrace e (Backtrace.get e); - let e = Printexc.to_string e in - error "Error: %s" e; - Rpc.failure (V6errors.rpc_of_error (V6errors.v6d_failure, [e])) - in - response + let process call = + let response = + try match call.Rpc.name with + | "apply_edition" -> + let dbg_rpc, arg_rpc = match call.Rpc.params with + | [a;b] -> (a,b) + | _ -> + debug "Error in apply_edition rpc" ; + raise (V6errors.Error ("unmarshalling_error", [])) in + let arg = apply_edition_in_of_rpc arg_rpc in + let dbg = Rpc.string_of_rpc dbg_rpc in + let edition, features, additional_params = + V.apply_edition dbg arg.edition_in arg.additional_in in + let response = rpc_of_apply_edition_out + {edition_out = edition; features_out = features; additional_out = additional_params} in + Rpc.success response + | "get_editions" -> + let dbg_rpc = match call.Rpc.params with + | [a] -> a + | _ -> + debug "Error in get_editions rpc" ; + raise (V6errors.Error ("unmarshalling_error", [])) in + let dbg = Rpc.string_of_rpc dbg_rpc in + let response = rpc_of_get_editions_out {editions = V.get_editions dbg} in + Rpc.success response + | "get_version" -> + let dbg_rpc = match call.Rpc.params with + | [a] -> a + | _ -> + debug "Error in get_version rpc" ; + raise (V6errors.Error ("unmarshalling_error", [])) in + let dbg = Rpc.string_of_rpc dbg_rpc in + let response = Rpc.rpc_of_string (V.get_version dbg) in + Rpc.success response + | "reopen-logs" -> + let response = Rpc.rpc_of_bool (V.reopen_logs ()) in + Rpc.success response + | x -> failwith ("unknown RPC: " ^ x) + with + | V6errors.Error e as exn -> + error "%s" (V6errors.to_string exn); + Debug.log_backtrace exn (Backtrace.get exn); + Rpc.failure (V6errors.rpc_of_error e) + | e -> + Debug.log_backtrace e (Backtrace.get e); + let e = Printexc.to_string e in + error "Error: %s" e; + Rpc.failure (V6errors.rpc_of_error (V6errors.v6d_failure, [e])) + in + response end diff --git a/ocaml/license/v6rpc.mli b/ocaml/license/v6rpc.mli index a9aaa2d74ea..7b66a8774f1 100644 --- a/ocaml/license/v6rpc.mli +++ b/ocaml/license/v6rpc.mli @@ -16,31 +16,31 @@ (** The RPC interface of the licensing daemon *) module type V6api = - sig - (* dbg_str -> edition -> additional_params -> enabled_features, additional_params *) - val apply_edition : string -> string -> (string * string) list -> - string * Features.feature list * (string * string) list - (* dbg_str -> list of editions *) - val get_editions : string -> (string * string * string * int) list - (* dbg_str -> result *) - val get_version : string -> string - (* () -> version *) - val reopen_logs : unit -> bool - end +sig + (* dbg_str -> edition -> additional_params -> enabled_features, additional_params *) + val apply_edition : string -> string -> (string * string) list -> + string * Features.feature list * (string * string) list + (* dbg_str -> list of editions *) + val get_editions : string -> (string * string * string * int) list + (* dbg_str -> result *) + val get_version : string -> string + (* () -> version *) + val reopen_logs : unit -> bool +end (** RPC handler module *) module V6process : functor (V : V6api) -> - sig - (** Process an RPC call *) - val process : Rpc.call -> Rpc.response - end +sig + (** Process an RPC call *) + val process : Rpc.call -> Rpc.response +end (** {2 Marshaling functions} *) (** Definition of [apply_edition] RPC *) type apply_edition_in = { - edition_in: string; (** The requested edition *) - additional_in: (string * string) list; (** Additional parameters *) + edition_in: string; (** The requested edition *) + additional_in: (string * string) list; (** Additional parameters *) } (** Convert RPC into {!apply_edition_in} structure *) @@ -51,9 +51,9 @@ val rpc_of_apply_edition_in : apply_edition_in -> Rpc.t (** Return type of the [apply_edition] RPC *) type apply_edition_out = { - edition_out: string; (** The edition that was applied *) - features_out: Features.feature list; (** The features that are now enabled *) - additional_out: (string * string) list; (** Additional parameters *) + edition_out: string; (** The edition that was applied *) + features_out: Features.feature list; (** The features that are now enabled *) + additional_out: (string * string) list; (** Additional parameters *) } (** Convert RPC into {!apply_edition_out} structure *) @@ -67,12 +67,12 @@ val rpc_of_apply_edition_out : apply_edition_out -> Rpc.t - Long name of the edition; - Abbreviation of the edition name; - Edition order number. - *) +*) type names = string * string * string * int (** Return type of the [get_editions] RPC *) type get_editions_out = { - editions: names list; (** List of all available editions *) + editions: names list; (** List of all available editions *) } (** Convert RPC into {!get_editions_out} structure *) diff --git a/ocaml/mpathalert/mpathalert.ml b/ocaml/mpathalert/mpathalert.ml index 161c49160ad..a2648f4ffeb 100644 --- a/ocaml/mpathalert/mpathalert.ml +++ b/ocaml/mpathalert/mpathalert.ml @@ -25,285 +25,285 @@ let delay = ref 120. let lock = Mutex.create () let with_global_lock (f:unit -> unit) = Mutex.execute lock f -let time_of_float x = - let time = Unix.gmtime x in - Printf.sprintf "%04d%02d%02dT%02d:%02d:%02dZ" - (time.Unix.tm_year+1900) - (time.Unix.tm_mon+1) - time.Unix.tm_mday - time.Unix.tm_hour - time.Unix.tm_min - time.Unix.tm_sec - -let stdout_m = Mutex.create () +let time_of_float x = + let time = Unix.gmtime x in + Printf.sprintf "%04d%02d%02dT%02d:%02d:%02dZ" + (time.Unix.tm_year+1900) + (time.Unix.tm_mon+1) + time.Unix.tm_mday + time.Unix.tm_hour + time.Unix.tm_min + time.Unix.tm_sec + +let stdout_m = Mutex.create () let debug (fmt: ('a , unit, string, unit) format4) = - if !print_debug then - Threadext.Mutex.execute stdout_m - (fun () -> - Printf.kprintf - (fun s -> Printf.printf "%s [%d] %s\n" (time_of_float (Unix.gettimeofday ())) (Thread.id (Thread.self ())) s; flush stdout) fmt) - else - Printf.kprintf (fun s -> ()) fmt - + if !print_debug then + Threadext.Mutex.execute stdout_m + (fun () -> + Printf.kprintf + (fun s -> Printf.printf "%s [%d] %s\n" (time_of_float (Unix.gettimeofday ())) (Thread.id (Thread.self ())) s; flush stdout) fmt) + else + Printf.kprintf (fun s -> ()) fmt + type t = { - host: [`host] Uuid.t; - host_name: string; - pbd: [`pbd] Uuid.t; - timestamp: float; - scsi_id: string; - current: int; - max: int } + host: [`host] Uuid.t; + host_name: string; + pbd: [`pbd] Uuid.t; + timestamp: float; + scsi_id: string; + current: int; + max: int } let to_string alert = - if alert.pbd <> Uuid.null then - Printf.sprintf "[%s] host=%s; host-name=\"%s\"; pbd=%s; scsi_id=%s; current=%d; max=%d" - (time_of_float alert.timestamp) (String.escaped (Uuid.to_string alert.host)) - alert.host_name (Uuid.to_string alert.pbd) alert.scsi_id alert.current alert.max - else - Printf.sprintf "[%s] host=%s; host-name=\"%s\"; root=true; current=%d; max=%d" - (time_of_float alert.timestamp) (String.escaped (Uuid.to_string alert.host)) - alert.host_name alert.current alert.max + if alert.pbd <> Uuid.null then + Printf.sprintf "[%s] host=%s; host-name=\"%s\"; pbd=%s; scsi_id=%s; current=%d; max=%d" + (time_of_float alert.timestamp) (String.escaped (Uuid.to_string alert.host)) + alert.host_name (Uuid.to_string alert.pbd) alert.scsi_id alert.current alert.max + else + Printf.sprintf "[%s] host=%s; host-name=\"%s\"; root=true; current=%d; max=%d" + (time_of_float alert.timestamp) (String.escaped (Uuid.to_string alert.host)) + alert.host_name alert.current alert.max (* execute f within an active session *) let rec retry_with_session f rpc x = - let session = - let rec aux () = - try Client.Session.login_with_password ~rpc ~uname:"" ~pwd:"" ~version:"1.4" ~originator:"mpathalert" - with _ -> Thread.delay !delay; aux () in - aux () in - try - f rpc session x - with e -> - begin try Client.Session.logout ~rpc ~session_id:session with _ -> () end; - debug "Got '%s', trying with a new session ..." (Printexc.to_string e); - Thread.delay !delay; - retry_with_session f rpc x + let session = + let rec aux () = + try Client.Session.login_with_password ~rpc ~uname:"" ~pwd:"" ~version:"1.4" ~originator:"mpathalert" + with _ -> Thread.delay !delay; aux () in + aux () in + try + f rpc session x + with e -> + begin try Client.Session.logout ~rpc ~session_id:session with _ -> () end; + debug "Got '%s', trying with a new session ..." (Printexc.to_string e); + Thread.delay !delay; + retry_with_session f rpc x let keep_mpath = List.filter (fun (key, value) -> Xstringext.String.startswith "mpath-" key) (* create a list of alerts from a PBD event *) let create_pbd_alerts rpc session snapshot (pbd_ref, pbd_rec, timestamp) = - let aux (key, value) = - let scsi_id = String.sub_to_end key 6 in - let current, max = Scanf.sscanf value "[%d, %d]" (fun current max -> current, max) in - let host = Uuid.of_string (Client.Host.get_uuid rpc session pbd_rec.API.pBD_host) in - let host_name = Client.Host.get_name_label rpc session pbd_rec.API.pBD_host in - let pbd = Uuid.of_string pbd_rec.API.pBD_uuid in - let alert = { - host = host; - host_name = host_name; - pbd = pbd; - timestamp = timestamp; - scsi_id = scsi_id; - current = current; - max = max - } in - debug "Alert '%s' created from %s=%s" (to_string alert) key value; - alert in - - let diff = List.set_difference (keep_mpath pbd_rec.API.pBD_other_config) snapshot in - List.map aux diff - + let aux (key, value) = + let scsi_id = String.sub_to_end key 6 in + let current, max = Scanf.sscanf value "[%d, %d]" (fun current max -> current, max) in + let host = Uuid.of_string (Client.Host.get_uuid rpc session pbd_rec.API.pBD_host) in + let host_name = Client.Host.get_name_label rpc session pbd_rec.API.pBD_host in + let pbd = Uuid.of_string pbd_rec.API.pBD_uuid in + let alert = { + host = host; + host_name = host_name; + pbd = pbd; + timestamp = timestamp; + scsi_id = scsi_id; + current = current; + max = max + } in + debug "Alert '%s' created from %s=%s" (to_string alert) key value; + alert in + + let diff = List.set_difference (keep_mpath pbd_rec.API.pBD_other_config) snapshot in + List.map aux diff + (* create a list of alerts from a host event *) let create_host_alerts rpc session snapshot (host_ref, host_rec, timestamp) = - let aux (key, value) = - let scsi_id = "n/a" in - let current, max = Scanf.sscanf value "[%d, %d]" (fun current max -> current, max) in - let host = Uuid.of_string host_rec.API.host_uuid in - let host_name = host_rec.API.host_name_label in - let pbd = Uuid.null in - let alert = { - host = host; - host_name = host_name; - pbd = pbd; - timestamp = timestamp; - scsi_id = scsi_id; - current = current; - max = max - } in - debug "Alert '%s' created from %s=%s" (to_string alert) key value; - alert in - - let diff = List.set_difference (keep_mpath host_rec.API.host_other_config) snapshot in - List.map aux diff + let aux (key, value) = + let scsi_id = "n/a" in + let current, max = Scanf.sscanf value "[%d, %d]" (fun current max -> current, max) in + let host = Uuid.of_string host_rec.API.host_uuid in + let host_name = host_rec.API.host_name_label in + let pbd = Uuid.null in + let alert = { + host = host; + host_name = host_name; + pbd = pbd; + timestamp = timestamp; + scsi_id = scsi_id; + current = current; + max = max + } in + debug "Alert '%s' created from %s=%s" (to_string alert) key value; + alert in + + let diff = List.set_difference (keep_mpath host_rec.API.host_other_config) snapshot in + List.map aux diff let listener rpc session queue = - let snapshot = Hashtbl.create 48 in - let update_snapshot r other_config = - let r = Ref.string_of r in - if Hashtbl.mem snapshot r then - debug "Update an entry of the snapshot table: %s" r - else - debug "Add a new entry to the snapshot table: %s" r; - Hashtbl.replace snapshot r other_config in - let remove_from_snapshot r = - let r = Ref.string_of r in - debug "Remove an entry to the snapshot table: %s" r; - Hashtbl.remove snapshot r in - let get_snapshot r = Hashtbl.find snapshot (Ref.string_of r) in - - Client.Event.register rpc session ["pbd"; "host"]; - - (* populate the snapshot cache *) - let pbds = Client.PBD.get_all_records rpc session in - List.iter (fun (pbd_ref, pbd_rec) -> update_snapshot pbd_ref (keep_mpath pbd_rec.API.pBD_other_config)) pbds; - let hosts = Client.Host.get_all_records rpc session in - List.iter (fun (host_ref, host_rec) -> update_snapshot host_ref (keep_mpath host_rec.API.host_other_config)) hosts; - - (* proceed events *) - let proceed event = - match Event_helper.record_of_event event with - | Event_helper.PBD (pbd_ref, pbd_rec_opt) -> - begin match event.op, pbd_rec_opt with - | `add, Some pbd_rec -> - debug "Processing an ADD event"; - update_snapshot pbd_ref (keep_mpath pbd_rec.API.pBD_other_config) - | `del, _ -> - debug "Processing a DEL event"; - remove_from_snapshot pbd_ref - | `_mod, Some pbd_rec -> - let alerts = create_pbd_alerts rpc session (get_snapshot pbd_ref) (pbd_ref, pbd_rec, float_of_string event.ts) in - debug "Processing a MOD event"; - List.iter (fun alert -> with_global_lock (fun () -> Queue.push alert queue)) alerts; - update_snapshot pbd_ref (keep_mpath pbd_rec.API.pBD_other_config) - | _ -> () (* this should never happens *) - end - | Event_helper.Host (host_ref, host_rec_opt) -> - begin match event.op, host_rec_opt with - | `add, Some host_rec -> - debug "Processing an ADD event"; - update_snapshot host_ref (keep_mpath host_rec.API.host_other_config) - | `del, _ -> - debug "Processing a DEL event"; - remove_from_snapshot host_ref - | `_mod, Some host_rec -> - debug "Processing a MOD event"; - let alerts = create_host_alerts rpc session (get_snapshot host_ref) (host_ref, host_rec, float_of_string event.ts) in - List.iter (fun alert -> with_global_lock (fun () -> Queue.push alert queue)) alerts; - update_snapshot host_ref (keep_mpath host_rec.API.host_other_config) - | _ -> () (* this should never happens *) - end - | _ -> () (* this should never happen *) in - - (* infinite loop *) - while true do - let events = Event_types.events_of_rpc (Client.Event.next rpc session) in - List.iter proceed events - done + let snapshot = Hashtbl.create 48 in + let update_snapshot r other_config = + let r = Ref.string_of r in + if Hashtbl.mem snapshot r then + debug "Update an entry of the snapshot table: %s" r + else + debug "Add a new entry to the snapshot table: %s" r; + Hashtbl.replace snapshot r other_config in + let remove_from_snapshot r = + let r = Ref.string_of r in + debug "Remove an entry to the snapshot table: %s" r; + Hashtbl.remove snapshot r in + let get_snapshot r = Hashtbl.find snapshot (Ref.string_of r) in + + Client.Event.register rpc session ["pbd"; "host"]; + + (* populate the snapshot cache *) + let pbds = Client.PBD.get_all_records rpc session in + List.iter (fun (pbd_ref, pbd_rec) -> update_snapshot pbd_ref (keep_mpath pbd_rec.API.pBD_other_config)) pbds; + let hosts = Client.Host.get_all_records rpc session in + List.iter (fun (host_ref, host_rec) -> update_snapshot host_ref (keep_mpath host_rec.API.host_other_config)) hosts; + + (* proceed events *) + let proceed event = + match Event_helper.record_of_event event with + | Event_helper.PBD (pbd_ref, pbd_rec_opt) -> + begin match event.op, pbd_rec_opt with + | `add, Some pbd_rec -> + debug "Processing an ADD event"; + update_snapshot pbd_ref (keep_mpath pbd_rec.API.pBD_other_config) + | `del, _ -> + debug "Processing a DEL event"; + remove_from_snapshot pbd_ref + | `_mod, Some pbd_rec -> + let alerts = create_pbd_alerts rpc session (get_snapshot pbd_ref) (pbd_ref, pbd_rec, float_of_string event.ts) in + debug "Processing a MOD event"; + List.iter (fun alert -> with_global_lock (fun () -> Queue.push alert queue)) alerts; + update_snapshot pbd_ref (keep_mpath pbd_rec.API.pBD_other_config) + | _ -> () (* this should never happens *) + end + | Event_helper.Host (host_ref, host_rec_opt) -> + begin match event.op, host_rec_opt with + | `add, Some host_rec -> + debug "Processing an ADD event"; + update_snapshot host_ref (keep_mpath host_rec.API.host_other_config) + | `del, _ -> + debug "Processing a DEL event"; + remove_from_snapshot host_ref + | `_mod, Some host_rec -> + debug "Processing a MOD event"; + let alerts = create_host_alerts rpc session (get_snapshot host_ref) (host_ref, host_rec, float_of_string event.ts) in + List.iter (fun alert -> with_global_lock (fun () -> Queue.push alert queue)) alerts; + update_snapshot host_ref (keep_mpath host_rec.API.host_other_config) + | _ -> () (* this should never happens *) + end + | _ -> () (* this should never happen *) in + + (* infinite loop *) + while true do + let events = Event_types.events_of_rpc (Client.Event.next rpc session) in + List.iter proceed events + done let state_of_the_world rpc session = - debug "Generating the current state of the world"; - let pbds = Client.PBD.get_all_records rpc session in - let pbd_alerts = List.flatten (List.map (fun (pbd_ref, pbd_rec) -> create_pbd_alerts rpc session [] (pbd_ref, pbd_rec, Unix.gettimeofday ())) pbds) in - let hosts = Client.Host.get_all_records rpc session in - let host_alerts = List.flatten (List.map (fun (host_ref, host_rec) -> create_host_alerts rpc session [] (host_ref, host_rec, Unix.gettimeofday ())) hosts) in - let alerts = List.filter (fun alert -> alert.current <> alert.max) (pbd_alerts @ host_alerts) in - debug "State of the world generated"; - alerts + debug "Generating the current state of the world"; + let pbds = Client.PBD.get_all_records rpc session in + let pbd_alerts = List.flatten (List.map (fun (pbd_ref, pbd_rec) -> create_pbd_alerts rpc session [] (pbd_ref, pbd_rec, Unix.gettimeofday ())) pbds) in + let hosts = Client.Host.get_all_records rpc session in + let host_alerts = List.flatten (List.map (fun (host_ref, host_rec) -> create_host_alerts rpc session [] (host_ref, host_rec, Unix.gettimeofday ())) hosts) in + let alerts = List.filter (fun alert -> alert.current <> alert.max) (pbd_alerts @ host_alerts) in + debug "State of the world generated"; + alerts let sender rpc session (delay, msg, queue) = - debug "Start sender with delay=%.0f seconds" delay; - let pool_uuid = - let _, pool_rec = List.hd (Client.Pool.get_all_records rpc session) in - pool_rec.API.pool_uuid in - - let tmp = Buffer.create 1024 in - - (* Hashtable containing all the broken scsi_id saw since the last wake up *) - let broken_history = Hashtbl.create 32 in - let update_broken_history alert = - if alert.max <> alert.current then begin - debug "Updating '%s' in the broken history" (to_string alert); - Hashtbl.replace broken_history (alert.pbd, alert.scsi_id) () - end else begin - debug "Removing '%s' of the broken history" (to_string alert); - Hashtbl.remove broken_history (alert.pbd, alert.scsi_id) - end in - let remember_broken_history state_of_the_world = - debug "Cleaning and re-populating the broken history"; - Hashtbl.clear broken_history; - List.iter update_broken_history state_of_the_world in - let was_broken pbd scsi_id = - Hashtbl.mem broken_history (pbd, scsi_id) in - - (* if the alert scsi_id was broken or is broken, generates the alert; then, update the history of broken scsi_id *) - let interesting_alert = ref false in - let proceed alert = - if was_broken alert.pbd alert.scsi_id || alert.current <> alert.max then begin - debug "Adding '%s' to the temp buffer as was_broken=%b and is_broken=%b" (to_string alert) (was_broken alert.pbd alert.scsi_id) (alert.current <> alert.max); - interesting_alert := true; - Buffer.add_string tmp (to_string alert ^ "\n") - end else - debug "Ignoring '%s' as was_broken=%b and is_broken=%b" (to_string alert) (was_broken alert.pbd alert.scsi_id) (alert.current <> alert.max); - update_broken_history alert in - - while true do - debug "Wake up"; - - let state_of_the_world = state_of_the_world rpc session in - - with_global_lock (fun () -> - if not (Queue.is_empty queue) then begin - - (* write everything on a tempary buffer *) - Buffer.clear tmp; - - (* update the state of the world *) - if state_of_the_world <> [] then begin - let alert_msgs = List.map to_string state_of_the_world in - Buffer.add_string tmp (Printf.sprintf "Unhealthy paths:\n%s\n" (String.concat "\n" alert_msgs)) - end; - - (* update the received events *) - Buffer.add_string tmp (Printf.sprintf "Events received during the last %.0f seconds:\n" delay); - - interesting_alert := false; - while not (Queue.is_empty queue) do - proceed (Queue.pop queue) - done; - - (* if an intersting alert had been proceeded, then commit our changes to the msg buffer *) - if !interesting_alert then - Buffer.add_buffer msg tmp; - end); - - if Buffer.length msg <> 0 then begin - let (name, priority) = Api_messages.multipath_periodic_alert in - let (_:API.ref_message) = Client.Message.create rpc session name priority `Pool pool_uuid (Buffer.contents msg) in - remember_broken_history state_of_the_world; - Buffer.clear msg; - end; - - Thread.delay delay; - done + debug "Start sender with delay=%.0f seconds" delay; + let pool_uuid = + let _, pool_rec = List.hd (Client.Pool.get_all_records rpc session) in + pool_rec.API.pool_uuid in + + let tmp = Buffer.create 1024 in + + (* Hashtable containing all the broken scsi_id saw since the last wake up *) + let broken_history = Hashtbl.create 32 in + let update_broken_history alert = + if alert.max <> alert.current then begin + debug "Updating '%s' in the broken history" (to_string alert); + Hashtbl.replace broken_history (alert.pbd, alert.scsi_id) () + end else begin + debug "Removing '%s' of the broken history" (to_string alert); + Hashtbl.remove broken_history (alert.pbd, alert.scsi_id) + end in + let remember_broken_history state_of_the_world = + debug "Cleaning and re-populating the broken history"; + Hashtbl.clear broken_history; + List.iter update_broken_history state_of_the_world in + let was_broken pbd scsi_id = + Hashtbl.mem broken_history (pbd, scsi_id) in + + (* if the alert scsi_id was broken or is broken, generates the alert; then, update the history of broken scsi_id *) + let interesting_alert = ref false in + let proceed alert = + if was_broken alert.pbd alert.scsi_id || alert.current <> alert.max then begin + debug "Adding '%s' to the temp buffer as was_broken=%b and is_broken=%b" (to_string alert) (was_broken alert.pbd alert.scsi_id) (alert.current <> alert.max); + interesting_alert := true; + Buffer.add_string tmp (to_string alert ^ "\n") + end else + debug "Ignoring '%s' as was_broken=%b and is_broken=%b" (to_string alert) (was_broken alert.pbd alert.scsi_id) (alert.current <> alert.max); + update_broken_history alert in + + while true do + debug "Wake up"; + + let state_of_the_world = state_of_the_world rpc session in + + with_global_lock (fun () -> + if not (Queue.is_empty queue) then begin + + (* write everything on a tempary buffer *) + Buffer.clear tmp; + + (* update the state of the world *) + if state_of_the_world <> [] then begin + let alert_msgs = List.map to_string state_of_the_world in + Buffer.add_string tmp (Printf.sprintf "Unhealthy paths:\n%s\n" (String.concat "\n" alert_msgs)) + end; + + (* update the received events *) + Buffer.add_string tmp (Printf.sprintf "Events received during the last %.0f seconds:\n" delay); + + interesting_alert := false; + while not (Queue.is_empty queue) do + proceed (Queue.pop queue) + done; + + (* if an intersting alert had been proceeded, then commit our changes to the msg buffer *) + if !interesting_alert then + Buffer.add_buffer msg tmp; + end); + + if Buffer.length msg <> 0 then begin + let (name, priority) = Api_messages.multipath_periodic_alert in + let (_:API.ref_message) = Client.Message.create rpc session name priority `Pool pool_uuid (Buffer.contents msg) in + remember_broken_history state_of_the_world; + Buffer.clear msg; + end; + + Thread.delay delay; + done let _ = - let pidfile = ref "/var/run/mpathalert.pid" in - let daemonize = ref false in - - Arg.parse (Arg.align [ - "-debug", Arg.Set print_debug, " Print debug messages"; - "-delay", Arg.Set_float delay, Printf.sprintf " Set the delay, in seconds, between 2 consecutive alerts (default is %.0f)" !delay; - "-daemon", Arg.Set daemonize, " Create a daemon"; - "-pidfile", Arg.Set_string pidfile, Printf.sprintf " Set the pid file (default is %s)" !pidfile ]) - (fun _ -> failwith "Invalid argument") - "Usage: mpathalert [-debug] [-delay time to wait between alerts] [-daemon] [-pidfile filename]"; - - if !daemonize then - Unixext.daemonize (); - - Unixext.mkdir_rec (Filename.dirname !pidfile) 0o755; - Unixext.pidfile_write !pidfile; - - let rpc xml = - let open Xmlrpc_client in - let http = xmlrpc ~version:"1.0" "/" in - XMLRPC_protocol.rpc ~srcstr:"mpathalert" ~dststr:"xapi" ~transport:(Unix (Filename.concat "/var/lib/xcp" "xapi")) ~http xml in - let queue = Queue.create () in - let msg = Buffer.create 1024 in - - let (t1:Thread.t) = Thread.create (retry_with_session listener rpc) queue in - let (t2:Thread.t) = Thread.create (retry_with_session sender rpc) (!delay, msg, queue) in - - Thread.join t1; - Thread.join t2 + let pidfile = ref "/var/run/mpathalert.pid" in + let daemonize = ref false in + + Arg.parse (Arg.align [ + "-debug", Arg.Set print_debug, " Print debug messages"; + "-delay", Arg.Set_float delay, Printf.sprintf " Set the delay, in seconds, between 2 consecutive alerts (default is %.0f)" !delay; + "-daemon", Arg.Set daemonize, " Create a daemon"; + "-pidfile", Arg.Set_string pidfile, Printf.sprintf " Set the pid file (default is %s)" !pidfile ]) + (fun _ -> failwith "Invalid argument") + "Usage: mpathalert [-debug] [-delay time to wait between alerts] [-daemon] [-pidfile filename]"; + + if !daemonize then + Unixext.daemonize (); + + Unixext.mkdir_rec (Filename.dirname !pidfile) 0o755; + Unixext.pidfile_write !pidfile; + + let rpc xml = + let open Xmlrpc_client in + let http = xmlrpc ~version:"1.0" "/" in + XMLRPC_protocol.rpc ~srcstr:"mpathalert" ~dststr:"xapi" ~transport:(Unix (Filename.concat "/var/lib/xcp" "xapi")) ~http xml in + let queue = Queue.create () in + let msg = Buffer.create 1024 in + + let (t1:Thread.t) = Thread.create (retry_with_session listener rpc) queue in + let (t2:Thread.t) = Thread.create (retry_with_session sender rpc) (!delay, msg, queue) in + + Thread.join t1; + Thread.join t2 diff --git a/ocaml/perftest/apiperf.ml b/ocaml/perftest/apiperf.ml index c93b65c42ef..2db6deae7de 100644 --- a/ocaml/perftest/apiperf.ml +++ b/ocaml/perftest/apiperf.ml @@ -20,7 +20,7 @@ open Xstringext open Pervasiveext open Client -let time f = +let time f = let start = Unix.gettimeofday () in f (); Unix.gettimeofday () -. start @@ -33,25 +33,25 @@ let threads = ref 1 let url = ref ("file://" ^ "/var/lib/xcp" ^ "xapi") -type url = +type url = | Http of string * int | Https of string * int | Uds of string -let url_of_string x = - let host_and_port_of_string default_port x = +let url_of_string x = + let host_and_port_of_string default_port x = match String.split ':' x with | [ host; port ] -> host, int_of_string port | [ host ] -> host, default_port in match String.explode x with | 'h' :: 't' :: 't' :: 'p' :: 's' :: ':' :: '/' :: '/' :: rest -> - let host, port = host_and_port_of_string 443 (String.implode rest) in - Https(host, port) + let host, port = host_and_port_of_string 443 (String.implode rest) in + Https(host, port) | 'h' :: 't' :: 't' :: 'p' :: ':' :: '/' :: '/' :: rest -> - let host, port = host_and_port_of_string 80 (String.implode rest) in - Http(host, port) + let host, port = host_and_port_of_string 80 (String.implode rest) in + Http(host, port) | 'f' :: 'i' :: 'l' :: 'e' :: ':' :: '/' :: '/' :: rest -> - Uds(String.implode rest) + Uds(String.implode rest) | _ -> failwith (Printf.sprintf "Unknown URL: %s; was expecting https:// http:// or file://" x) let string_of_url = function @@ -59,23 +59,23 @@ let string_of_url = function | Http(host, port) -> Printf.sprintf "http://%s:%d/" host port | Uds path -> Printf.sprintf "file://%s" path -let rpc_of_url = - let open Xmlrpcclient in - let http = xmlrpc ~version:"1.0" "/" in - function - | Http(host, port) -> fun xml -> - XML_protocol.rpc ~transport:(TCP(host, port)) ~http xml - | Https(host, port) -> fun xml -> - XML_protocol.rpc ~transport:(SSL(SSL.make ~use_stunnel_cache:!use_stunnel_cache (), host, port)) ~http xml - | Uds filename -> fun xml -> - XML_protocol.rpc ~transport:(Unix filename) ~http xml +let rpc_of_url = + let open Xmlrpcclient in + let http = xmlrpc ~version:"1.0" "/" in + function + | Http(host, port) -> fun xml -> + XML_protocol.rpc ~transport:(TCP(host, port)) ~http xml + | Https(host, port) -> fun xml -> + XML_protocol.rpc ~transport:(SSL(SSL.make ~use_stunnel_cache:!use_stunnel_cache (), host, port)) ~http xml + | Uds filename -> fun xml -> + XML_protocol.rpc ~transport:(Unix filename) ~http xml open API open XMLRPC let server_failure code args = raise (Api_errors.Server_error (code, args)) -let rpc_wrapper rpc name args = +let rpc_wrapper rpc name args = match From.methodResponse(rpc(To.methodCall name args)) with | Fault _ -> invalid_arg "Client.rpc (Fault _)" | Success [] -> XMLRPC.To.structure [] (* dummy value *) @@ -86,44 +86,44 @@ let rpc_wrapper rpc name args = let get_log ~rpc ~session_id ~host = let session_id = API.To.ref_session session_id in let host = API.To.ref_host host in - + API.From.string "return value of host.get_log" (rpc_wrapper rpc "host.get_log'" [ session_id; host ]) (* Use the Host.query_data_source API to test the speed of the forwarding engine *) -let test rpc session hosts nthreads time_limit = +let test rpc session hosts nthreads time_limit = let test_started = Unix.gettimeofday () in let n = ref 0 in let sigma_x = ref 0. in let m = Mutex.create () in - let samples xs = + let samples xs = Mutex.execute m (fun () -> - n := !n + (List.length xs); - sigma_x := List.fold_left (+.) !sigma_x xs + n := !n + (List.length xs); + sigma_x := List.fold_left (+.) !sigma_x xs ) in - let body () = + let body () = while Unix.gettimeofday () -. test_started < time_limit do - let one host = time - (fun () -> - try - if !master then begin - (* Use the invalid XMLRPC request *) - try - ignore(get_log rpc session host) - with Api_errors.Server_error(code, params) when code = Api_errors.message_method_unknown -> () - end else begin - (* Use the valid XMLRPC request so it is forwarded *) - try - ignore(Client.Host.get_log rpc session host) - with Api_errors.Server_error(code, params) when code = Api_errors.not_implemented -> () - end - with e -> - Printf.fprintf stderr "%s\n" (Printexc.to_string e); - flush stderr; - raise e - ) in + let one host = time + (fun () -> + try + if !master then begin + (* Use the invalid XMLRPC request *) + try + ignore(get_log rpc session host) + with Api_errors.Server_error(code, params) when code = Api_errors.message_method_unknown -> () + end else begin + (* Use the valid XMLRPC request so it is forwarded *) + try + ignore(Client.Host.get_log rpc session host) + with Api_errors.Server_error(code, params) when code = Api_errors.not_implemented -> () + end + with e -> + Printf.fprintf stderr "%s\n" (Printexc.to_string e); + flush stderr; + raise e + ) in let times = List.map one hosts in samples times done in @@ -139,12 +139,12 @@ let time = ref 30. let _ = Arg.parse [ "-master", (Arg.Set master), (Printf.sprintf "test the master only [default:%b]" !master); - "-slaves", (Arg.Set_int slave_limit), (Printf.sprintf "number of slaves to forward requests to (round-robin) [default:%d]" !slave_limit); - "-threads", (Arg.Set_int threads), (Printf.sprintf "number of parallel threads to run [default:%d]" !threads); - "-time", (Arg.Set_float time), (Printf.sprintf "set test time in seconds [default:%.2f]" !time); - "-cache", (Arg.Set use_stunnel_cache), (Printf.sprintf "use the stunnel client cache [default:%b]" !use_stunnel_cache); - "-url", (Arg.Set_string url), (Printf.sprintf "specify the URL to use [default:%s]" !url); - ] + "-slaves", (Arg.Set_int slave_limit), (Printf.sprintf "number of slaves to forward requests to (round-robin) [default:%d]" !slave_limit); + "-threads", (Arg.Set_int threads), (Printf.sprintf "number of parallel threads to run [default:%d]" !threads); + "-time", (Arg.Set_float time), (Printf.sprintf "set test time in seconds [default:%.2f]" !time); + "-cache", (Arg.Set use_stunnel_cache), (Printf.sprintf "use the stunnel client cache [default:%b]" !use_stunnel_cache); + "-url", (Arg.Set_string url), (Printf.sprintf "specify the URL to use [default:%s]" !url); + ] (fun x -> Printf.fprintf stderr "Skipping unknown argument: %s\n" x) "Test the performance of the XMLRPC request forwarding engine"; let url = url_of_string !url in @@ -160,10 +160,10 @@ let _ = let pool = List.hd (Client.Pool.get_all rpc session) in let master_host = Client.Pool.get_master rpc session pool in let slave_hosts = List.filter (fun h -> h <> master_host) hosts in - + let hosts_to_test = if !master then [ master_host ] else (fst (List.chop !slave_limit slave_hosts)) in test rpc session hosts_to_test !threads !time ) (fun () -> Client.Session.logout rpc session) - + diff --git a/ocaml/perftest/createVM.ml b/ocaml/perftest/createVM.ml index b5561d7e6f7..0a7f0b72145 100644 --- a/ocaml/perftest/createVM.ml +++ b/ocaml/perftest/createVM.ml @@ -24,71 +24,71 @@ let make_iscsi_ip pool = Printf.sprintf "192.168.%d.200" (pool.ipbase+2) let find_iscsi_iso session_id = - let vdis = Client.VDI.get_all rpc session_id in - try - Some (List.find (fun vdi -> Client.VDI.get_name_label rpc session_id vdi = iscsi_vm_iso) vdis) - with _ -> None + let vdis = Client.VDI.get_all rpc session_id in + try + Some (List.find (fun vdi -> Client.VDI.get_name_label rpc session_id vdi = iscsi_vm_iso) vdis) + with _ -> None (** Create the VM with the iscsi iso attached *) let make_iscsi session_id pool network = - try - let iscsi_iso = match find_iscsi_iso session_id with - | Some vdi -> vdi - | None -> failwith "iSCSI VM iso not found" in - let template = List.hd (Client.VM.get_by_name_label rpc session_id iscsi_vm_template) in - let newvm = Client.VM.clone rpc session_id template "ISCSI target server" in - Client.VM.provision rpc session_id newvm; - let _ (* isovbd *) = Client.VBD.create rpc session_id newvm iscsi_iso "0" true `RO `CD false false [] "" [] in - let realpool = List.hd (Client.Pool.get_all rpc session_id) in - let defaultsr = Client.Pool.get_default_SR rpc session_id realpool in + try + let iscsi_iso = match find_iscsi_iso session_id with + | Some vdi -> vdi + | None -> failwith "iSCSI VM iso not found" in + let template = List.hd (Client.VM.get_by_name_label rpc session_id iscsi_vm_template) in + let newvm = Client.VM.clone rpc session_id template "ISCSI target server" in + Client.VM.provision rpc session_id newvm; + let _ (* isovbd *) = Client.VBD.create rpc session_id newvm iscsi_iso "0" true `RO `CD false false [] "" [] in + let realpool = List.hd (Client.Pool.get_all rpc session_id) in + let defaultsr = Client.Pool.get_default_SR rpc session_id realpool in - for i = 0 to pool.iscsi_luns - 1 do - let storage_vdi_label = Printf.sprintf "SCSI VDI %d" i in - let storage_vdi = Client.VDI.create rpc session_id storage_vdi_label "" defaultsr sr_disk_size `user false false [oc_key,pool.key] [] [] [] in - let userdevice = Printf.sprintf "%d" (i+1) in - ignore(Client.VBD.create rpc session_id newvm storage_vdi userdevice false `RW `Disk false false [] "" []) - done; + for i = 0 to pool.iscsi_luns - 1 do + let storage_vdi_label = Printf.sprintf "SCSI VDI %d" i in + let storage_vdi = Client.VDI.create rpc session_id storage_vdi_label "" defaultsr sr_disk_size `user false false [oc_key,pool.key] [] [] [] in + let userdevice = Printf.sprintf "%d" (i+1) in + ignore(Client.VBD.create rpc session_id newvm storage_vdi userdevice false `RW `Disk false false [] "" []) + done; - Client.VM.set_PV_bootloader rpc session_id newvm "pygrub"; - Client.VM.set_PV_args rpc session_id newvm (Printf.sprintf "net_ip=%s net_mask=255.255.255.0" (make_iscsi_ip pool)); - Client.VM.set_HVM_boot_policy rpc session_id newvm ""; - let (_ : API.ref_VIF) = Client.VIF.create rpc session_id "0" network newvm "" 1500L [oc_key,pool.key] "" [] `network_default [] [] in - Client.VM.add_to_other_config rpc session_id newvm oc_key pool.key; - let localhost_uuid = Xapi_inventory.lookup "INSTALLATION_UUID" in - Client.VM.start_on rpc session_id newvm (Client.Host.get_by_uuid rpc session_id localhost_uuid) false false; - Some newvm - with e -> - debug "Caught exception with iscsi VM: %s" (Printexc.to_string e); - None + Client.VM.set_PV_bootloader rpc session_id newvm "pygrub"; + Client.VM.set_PV_args rpc session_id newvm (Printf.sprintf "net_ip=%s net_mask=255.255.255.0" (make_iscsi_ip pool)); + Client.VM.set_HVM_boot_policy rpc session_id newvm ""; + let (_ : API.ref_VIF) = Client.VIF.create rpc session_id "0" network newvm "" 1500L [oc_key,pool.key] "" [] `network_default [] [] in + Client.VM.add_to_other_config rpc session_id newvm oc_key pool.key; + let localhost_uuid = Xapi_inventory.lookup "INSTALLATION_UUID" in + Client.VM.start_on rpc session_id newvm (Client.Host.get_by_uuid rpc session_id localhost_uuid) false false; + Some newvm + with e -> + debug "Caught exception with iscsi VM: %s" (Printexc.to_string e); + None let make ~rpc ~session_id ~pool ~vm ~networks ~storages = - let wintemplate = List.hd (Client.VM.get_by_name_label ~rpc ~session_id ~label:innertemplate) in - let host_refs = Array.of_list (Client.Host.get_all ~rpc ~session_id) in - for i = 0 to (Array.length storages) - 1 do - Printf.printf "Creating %d VMs in SR %d\n%!" vm.num i; - for j = 0 to vm.num - 1 do - let newname = Printf.sprintf "VM %d%s%s" j (if Array.length storages > 1 then Printf.sprintf " in SR %d" i else "") (if vm.tag <> "" then " - "^vm.tag else "") in - let clone = Client.VM.clone ~rpc ~session_id ~vm:wintemplate ~new_name:newname in - Client.VM.add_tags ~rpc ~session_id ~self:clone ~value:vm.tag; - Client.VM.remove_from_other_config ~rpc ~session_id ~self:clone ~key:"disks"; - for userdevice = 0 to vm.vbds - 1 do - Printf.printf " - creating VDI %d for VM %d on SR %d of %d\n%!" userdevice j i (Array.length storages); - let newdisk = Client.VDI.create ~rpc ~session_id ~name_label:"Guest disk" ~name_description:"" ~sR:storages.(i) - ~virtual_size:4194304L ~_type:`user ~sharable:false ~read_only:false ~xenstore_data:[] ~other_config:[] - ~sm_config:[] ~tags:[] in - ignore(Client.VBD.create ~rpc ~session_id ~vM:clone ~vDI:newdisk ~userdevice:(string_of_int userdevice) ~bootable:false - ~mode:`RW ~_type:`Disk ~unpluggable:true ~empty:false ~qos_algorithm_type:"" ~qos_algorithm_params:[] ~other_config:[]) - done; - Client.VM.provision ~rpc ~session_id ~vm:clone; - for device = 0 to (min vm.vifs (Array.length networks)) - 1 do - ignore(Client.VIF.create ~rpc ~session_id ~device:(string_of_int device) ~network:networks.(device) ~vM:clone ~mAC:"" - ~mTU:1500L ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[] ~locking_mode:`network_default ~ipv4_allowed:[] ~ipv6_allowed:[]) - done; - Client.VM.set_memory_static_min ~rpc ~session_id ~self:clone ~value:16777216L; - Client.VM.set_memory_dynamic_min ~rpc ~session_id ~self:clone ~value:16777216L; - Client.VM.set_memory_dynamic_max ~rpc ~session_id ~self:clone ~value:16777216L; - Client.VM.set_memory_static_max ~rpc ~session_id ~self:clone ~value:16777216L; - if vm.has_affinity && Array.length storages = Array.length host_refs - then Client.VM.set_affinity ~rpc ~session_id ~self:clone ~value:host_refs.(i); - done - done + let wintemplate = List.hd (Client.VM.get_by_name_label ~rpc ~session_id ~label:innertemplate) in + let host_refs = Array.of_list (Client.Host.get_all ~rpc ~session_id) in + for i = 0 to (Array.length storages) - 1 do + Printf.printf "Creating %d VMs in SR %d\n%!" vm.num i; + for j = 0 to vm.num - 1 do + let newname = Printf.sprintf "VM %d%s%s" j (if Array.length storages > 1 then Printf.sprintf " in SR %d" i else "") (if vm.tag <> "" then " - "^vm.tag else "") in + let clone = Client.VM.clone ~rpc ~session_id ~vm:wintemplate ~new_name:newname in + Client.VM.add_tags ~rpc ~session_id ~self:clone ~value:vm.tag; + Client.VM.remove_from_other_config ~rpc ~session_id ~self:clone ~key:"disks"; + for userdevice = 0 to vm.vbds - 1 do + Printf.printf " - creating VDI %d for VM %d on SR %d of %d\n%!" userdevice j i (Array.length storages); + let newdisk = Client.VDI.create ~rpc ~session_id ~name_label:"Guest disk" ~name_description:"" ~sR:storages.(i) + ~virtual_size:4194304L ~_type:`user ~sharable:false ~read_only:false ~xenstore_data:[] ~other_config:[] + ~sm_config:[] ~tags:[] in + ignore(Client.VBD.create ~rpc ~session_id ~vM:clone ~vDI:newdisk ~userdevice:(string_of_int userdevice) ~bootable:false + ~mode:`RW ~_type:`Disk ~unpluggable:true ~empty:false ~qos_algorithm_type:"" ~qos_algorithm_params:[] ~other_config:[]) + done; + Client.VM.provision ~rpc ~session_id ~vm:clone; + for device = 0 to (min vm.vifs (Array.length networks)) - 1 do + ignore(Client.VIF.create ~rpc ~session_id ~device:(string_of_int device) ~network:networks.(device) ~vM:clone ~mAC:"" + ~mTU:1500L ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[] ~locking_mode:`network_default ~ipv4_allowed:[] ~ipv6_allowed:[]) + done; + Client.VM.set_memory_static_min ~rpc ~session_id ~self:clone ~value:16777216L; + Client.VM.set_memory_dynamic_min ~rpc ~session_id ~self:clone ~value:16777216L; + Client.VM.set_memory_dynamic_max ~rpc ~session_id ~self:clone ~value:16777216L; + Client.VM.set_memory_static_max ~rpc ~session_id ~self:clone ~value:16777216L; + if vm.has_affinity && Array.length storages = Array.length host_refs + then Client.VM.set_affinity ~rpc ~session_id ~self:clone ~value:host_refs.(i); + done + done diff --git a/ocaml/perftest/createpool.ml b/ocaml/perftest/createpool.ml index daa2198f1f7..6c2818b1432 100644 --- a/ocaml/perftest/createpool.ml +++ b/ocaml/perftest/createpool.ml @@ -36,11 +36,11 @@ let get_network_num_from_interface pool i = running out of space. In particular the hybrid thin/thick behaviour of LVHD won't work so we can't use LVM over iSCSI or FC. It's probably easiest to include a whitelist here rather than find an EQL array to test this. *) -let sr_is_suitable session_id sr = +let sr_is_suitable session_id sr = let t = String.lowercase (Client.SR.get_type rpc session_id sr) in t = "ext" || t = "nfs" -let default_sr_must_be_suitable session_id = +let default_sr_must_be_suitable session_id = let realpool = List.hd (Client.Pool.get_all rpc session_id) in let defaultsr = Client.Pool.get_default_SR rpc session_id realpool in if not (sr_is_suitable session_id defaultsr) @@ -51,15 +51,15 @@ let initialise session_id template pool = let networks_to_create = pool.interfaces_per_host - pool.bonds in debug "Creating %d networks..." networks_to_create; let networks = Array.init networks_to_create (fun i -> - Client.Network.create ~rpc ~session_id ~name_label:(Printf.sprintf "perftestnet%d" i) ~name_description:"" ~mTU:1500L ~other_config:[oc_key,pool.key] ~tags:[]) + Client.Network.create ~rpc ~session_id ~name_label:(Printf.sprintf "perftestnet%d" i) ~name_description:"" ~mTU:1500L ~other_config:[oc_key,pool.key] ~tags:[]) in (* Set up the template - create the VIFs *) debug "Setting up the template. Creating VIFs on networks"; - let interfaces = Array.init pool.interfaces_per_host (fun i -> - let net = networks.(get_network_num_from_interface pool i) in - Client.VIF.create ~rpc ~session_id ~device:(string_of_int i) ~network:net ~vM:template ~mAC:"" ~mTU:1500L - ~other_config:[oc_key,pool.key] ~qos_algorithm_type:"" ~qos_algorithm_params:[] ~locking_mode:`network_default ~ipv4_allowed:[] ~ipv6_allowed:[]) + let interfaces = Array.init pool.interfaces_per_host (fun i -> + let net = networks.(get_network_num_from_interface pool i) in + Client.VIF.create ~rpc ~session_id ~device:(string_of_int i) ~network:net ~vM:template ~mAC:"" ~mTU:1500L + ~other_config:[oc_key,pool.key] ~qos_algorithm_type:"" ~qos_algorithm_params:[] ~locking_mode:`network_default ~ipv4_allowed:[] ~ipv6_allowed:[]) in (* Create a disk for local storage *) @@ -67,11 +67,11 @@ let initialise session_id template pool = default_sr_must_be_suitable session_id; let realpool = List.hd (Client.Pool.get_all rpc session_id) in let defaultsr = Client.Pool.get_default_SR rpc session_id realpool in - let newdisk = Client.VDI.create ~rpc ~session_id ~name_label:"SDK storage" ~name_description:"" ~sR:defaultsr - ~virtual_size:sr_disk_size ~_type:`user ~sharable:false ~read_only:false ~xenstore_data:[] ~other_config:[oc_key,pool.key] - ~sm_config:[] ~tags:[] in + let newdisk = Client.VDI.create ~rpc ~session_id ~name_label:"SDK storage" ~name_description:"" ~sR:defaultsr + ~virtual_size:sr_disk_size ~_type:`user ~sharable:false ~read_only:false ~xenstore_data:[] ~other_config:[oc_key,pool.key] + ~sm_config:[] ~tags:[] in let (_: API.ref_VBD) = Client.VBD.create ~rpc ~session_id ~vM:template ~vDI:newdisk ~userdevice:sr_disk_device ~bootable:false - ~mode:`RW ~_type:`Disk ~unpluggable:true ~empty:false ~qos_algorithm_type:"" ~qos_algorithm_params:[] ~other_config:[oc_key,pool.key] + ~mode:`RW ~_type:`Disk ~unpluggable:true ~empty:false ~qos_algorithm_type:"" ~qos_algorithm_params:[] ~other_config:[oc_key,pool.key] in debug "Setting up xenstore keys"; @@ -79,8 +79,8 @@ let initialise session_id template pool = Client.VM.set_PV_args ~rpc ~session_id ~self:template ~value:"noninteractive"; (* no password setting step *) Client.VM.add_to_xenstore_data ~rpc ~session_id ~self:template ~key:"vm-data/provision/interfaces/0/admin" ~value:"true"; Array.iteri (fun i net -> - Client.VM.add_to_xenstore_data ~rpc ~session_id ~self:template ~key:(Printf.sprintf "vm-data/provision/interfaces/%d/mode" i) ~value:"static"; - Client.VM.add_to_xenstore_data ~rpc ~session_id ~self:template ~key:(Printf.sprintf "vm-data/provision/interfaces/%d/netmask" i) ~value:"255.255.255.0") interfaces; + Client.VM.add_to_xenstore_data ~rpc ~session_id ~self:template ~key:(Printf.sprintf "vm-data/provision/interfaces/%d/mode" i) ~value:"static"; + Client.VM.add_to_xenstore_data ~rpc ~session_id ~self:template ~key:(Printf.sprintf "vm-data/provision/interfaces/%d/netmask" i) ~value:"255.255.255.0") interfaces; debug "Setting memory to 128 Megs"; Client.VM.set_memory_static_min rpc session_id template (Int64.mul 128L 1048576L); @@ -102,12 +102,12 @@ let reset_template session_id template = (* Destroy template's sr disk *) let vbds = Client.VM.get_VBDs rpc session_id template in List.iter (fun vbd -> - if List.mem_assoc oc_key (Client.VBD.get_other_config rpc session_id vbd) then begin - let vdi = Client.VBD.get_VDI rpc session_id vbd in - assert (List.mem_assoc oc_key (Client.VDI.get_other_config rpc session_id vdi)); - Client.VDI.destroy rpc session_id vdi; - try Client.VBD.destroy rpc session_id vbd with _ -> (); - end) vbds; + if List.mem_assoc oc_key (Client.VBD.get_other_config rpc session_id vbd) then begin + let vdi = Client.VBD.get_VDI rpc session_id vbd in + assert (List.mem_assoc oc_key (Client.VDI.get_other_config rpc session_id vdi)); + Client.VDI.destroy rpc session_id vdi; + try Client.VBD.destroy rpc session_id vbd with _ -> (); + end) vbds; (* Remove xenstore keys *) Client.VM.set_xenstore_data rpc session_id template []; @@ -119,349 +119,349 @@ let uninitialise session_id template key = debug "Shutting down and uninstalling any VMs"; let vms = Client.VM.get_all rpc session_id in List.iter (fun vm -> - let is_a_template = Client.VM.get_is_a_template rpc session_id vm in - let is_control_domain = Client.VM.get_is_control_domain rpc session_id vm in - let is_managed = try (List.assoc oc_key (Client.VM.get_other_config rpc session_id vm)) = key with _ -> false in - let running = Client.VM.get_power_state rpc session_id vm = `Running in - if not is_a_template && not is_control_domain && is_managed then begin - if running then Client.VM.hard_shutdown rpc session_id vm; - let vbds = Client.VM.get_VBDs rpc session_id vm in - let vdis = List.map (fun vbd -> Client.VBD.get_VDI rpc session_id vbd) vbds in - List.iter (fun vdi -> try Client.VDI.destroy rpc session_id vdi with _ -> ()) vdis; - List.iter (fun vbd -> try Client.VBD.destroy rpc session_id vbd with _ -> ()) vbds; - List.iter (fun vif -> try Client.VIF.destroy rpc session_id vif with _ -> ()) (Client.VM.get_VIFs rpc session_id vm); - Client.VM.destroy rpc session_id vm - end) vms; + let is_a_template = Client.VM.get_is_a_template rpc session_id vm in + let is_control_domain = Client.VM.get_is_control_domain rpc session_id vm in + let is_managed = try (List.assoc oc_key (Client.VM.get_other_config rpc session_id vm)) = key with _ -> false in + let running = Client.VM.get_power_state rpc session_id vm = `Running in + if not is_a_template && not is_control_domain && is_managed then begin + if running then Client.VM.hard_shutdown rpc session_id vm; + let vbds = Client.VM.get_VBDs rpc session_id vm in + let vdis = List.map (fun vbd -> Client.VBD.get_VDI rpc session_id vbd) vbds in + List.iter (fun vdi -> try Client.VDI.destroy rpc session_id vdi with _ -> ()) vdis; + List.iter (fun vbd -> try Client.VBD.destroy rpc session_id vbd with _ -> ()) vbds; + List.iter (fun vif -> try Client.VIF.destroy rpc session_id vif with _ -> ()) (Client.VM.get_VIFs rpc session_id vm); + Client.VM.destroy rpc session_id vm + end) vms; (* Destroy networks *) debug "Destroying networks"; let nets = Client.Network.get_all_records rpc session_id in - let mynets = List.filter (fun (_,r) -> - List.mem_assoc oc_key r.API.network_other_config && + let mynets = List.filter (fun (_,r) -> + List.mem_assoc oc_key r.API.network_other_config && List.assoc oc_key r.API.network_other_config = key) nets in List.iter (fun (net,_) -> Client.Network.destroy rpc session_id net) mynets; let nets = Client.Network.get_all_records rpc session_id in debug "Destroying any bridges"; let ic = Unix.open_process_in "ifconfig -a | grep \"^xapi\" | awk '{print $1}'" in - let netdevs = + let netdevs = let rec doline () = - try - let x = input_line ic in - x :: doline () + try + let x = input_line ic in + x :: doline () with _ -> [] in doline () in - List.iter (fun netdev -> - if not (List.exists (fun (_,net) -> net.API.network_bridge = netdev) nets) - then begin - ignore(Sys.command (Printf.sprintf "ifconfig %s down 2>/dev/null" netdev)); - ignore(Sys.command (Printf.sprintf "brctl delbr %s 2>/dev/null" netdev)) - end) netdevs - + List.iter (fun netdev -> + if not (List.exists (fun (_,net) -> net.API.network_bridge = netdev) nets) + then begin + ignore(Sys.command (Printf.sprintf "ifconfig %s down 2>/dev/null" netdev)); + ignore(Sys.command (Printf.sprintf "brctl delbr %s 2>/dev/null" netdev)) + end) netdevs + let destroy_sdk_pool session_id sdkname key = let template = List.hd (Client.VM.get_by_name_label rpc session_id sdkname) in uninitialise session_id template key -let describe_pool template_name pool_name key = +let describe_pool template_name pool_name key = let pool = Scenario.get pool_name in let pool = {pool with key=key} in - Printf.sprintf "Base template: %s" template_name :: (description_of_pool pool) + Printf.sprintf "Base template: %s" template_name :: (description_of_pool pool) let iscsi_vm_iso_must_exist session_id = - (* The iSCSI VM iso must exist *) - if CreateVM.find_iscsi_iso session_id = None - then failwith (Printf.sprintf "The iSCSI target VM iso could not be found (%s)" CreateVM.iscsi_vm_iso) + (* The iSCSI VM iso must exist *) + if CreateVM.find_iscsi_iso session_id = None + then failwith (Printf.sprintf "The iSCSI target VM iso could not be found (%s)" CreateVM.iscsi_vm_iso) let create_sdk_pool session_id sdkname pool_name key ipbase = - iscsi_vm_iso_must_exist session_id; - default_sr_must_be_suitable session_id; - let pool = List.find (fun p -> p.id = pool_name) pools in - let pool = {pool with key=key; ipbase=ipbase} in - - let template = - try List.hd (Client.VM.get_by_name_label rpc session_id sdkname) - with _ -> debug ~out:stderr "template '%s' not found" sdkname; exit 1 - in - let uuid = Client.VM.get_uuid rpc session_id template in - debug "Creating test pool '%s' using SDK template uuid=%s" pool.id uuid; - - (* Clear up any leftover state on the template *) - reset_template session_id template; - - let interfaces = initialise session_id template pool in - - Printf.printf "Creating iSCSI target VM serving %d LUNs\n%!" pool.iscsi_luns; - let (_ : API.ref_VM option) = CreateVM.make_iscsi session_id pool (Client.VIF.get_network rpc session_id interfaces.(2)) in - - debug "Creating %d SDK VMs" pool.hosts; - let hosts = Array.init pool.hosts ( - fun i -> - let n = i + 1 in - let vm = Client.VM.clone rpc session_id template (Printf.sprintf "perftestpool%d" n) in - Client.VM.provision rpc session_id vm; - Array.iteri (fun i _ -> - ignore(Client.VM.add_to_xenstore_data rpc session_id vm (Printf.sprintf "vm-data/provision/interfaces/%d/ip" i) - (Printf.sprintf "192.168.%d.%d" (i+pool.ipbase) n))) interfaces; - vm) - in - - debug "Setting memory on master to be 256 Megs"; - Client.VM.set_memory_static_max rpc session_id hosts.(0) (Int64.mul 256L 1048576L); - Client.VM.set_memory_static_min rpc session_id hosts.(0) (Int64.mul 256L 1048576L); - Client.VM.set_memory_dynamic_max rpc session_id hosts.(0) (Int64.mul 256L 1048576L); - Client.VM.set_memory_dynamic_min rpc session_id hosts.(0) (Int64.mul 256L 1048576L); - - Client.VM.add_to_other_config rpc session_id hosts.(0) master_of_pool pool.key; - Client.VM.add_to_other_config rpc session_id hosts.(0) management_ip (Printf.sprintf "192.168.%d.1" pool.ipbase); - - let localhost_uuid = Xapi_inventory.lookup "INSTALLATION_UUID" in - Array.iteri (fun i host -> debug "Starting VM %d" i; Client.VM.start_on rpc session_id host (Client.Host.get_by_uuid rpc session_id localhost_uuid) false false) hosts; - - ignore(Sys.command (Printf.sprintf "ifconfig %s 192.168.%d.200 up" (Client.Network.get_bridge rpc session_id (Client.VIF.get_network rpc session_id interfaces.(0))) pool.ipbase)); - - reset_template session_id template; - - debug "Guests are now booting..."; - let pingable = Array.make (Array.length hosts) false in - let firstboot = Array.make (Array.length hosts) false in - let string_of_status () = - String.implode - (Array.to_list - (Array.mapi (fun i ping -> - let boot = firstboot.(i) in match ping, boot with - | false, false -> '.' - | true, false -> 'P' - | true, true -> 'B' - | _, _ -> '?') pingable)) in - - let has_guest_booted i vm = - let ip = Printf.sprintf "192.168.%d.%d" pool.ipbase (i+1) in - let is_pingable () = - if pingable.(i) then true else begin - if Sys.command (Printf.sprintf "ping -W 1 -c 1 %s 2>/dev/null >/dev/null" ip) = 0 then begin - pingable.(i) <- true; - debug "Individual host status: %s" (string_of_status ()); - true - end else false - end in - let firstbooted () = - if firstboot.(i) then true else begin - let rpc = remoterpc ip in - try - let s = Client.Session.login_with_password rpc "root" "xensource" "1.1" "perftest" in - finally - (fun () -> - let host = List.hd (Client.Host.get_all rpc s) in (* only one host because it hasn't joined the pool yet *) - let other_config = Client.Host.get_other_config rpc s host in - let key = "firstboot-complete" in - (* Since these are 'fresh' hosts which have never booted, the key goes from missing -> present *) - if List.mem_assoc key other_config then begin - firstboot.(i) <- true; - debug "Individual host status: %s" (string_of_status ()); - true; - end else false - ) - (fun () -> Client.Session.logout rpc s) - with _ -> false - end in - is_pingable () && (firstbooted ()) in - - let wait_until_guests_have_booted () = - for i = 0 to Array.length pingable - 1 do - pingable.(i) <- false; - done; - let finished = ref false in - while not !finished do - finished := List.fold_left (&&) true (Array.to_list (Array.mapi has_guest_booted hosts)); - Unix.sleep 20; - done in - - wait_until_guests_have_booted (); - debug "Guests have booted; issuing Pool.joins."; - - let host_uuids = Array.mapi (fun i vm -> - let n = i + 1 in - let rpc = remoterpc (Printf.sprintf "192.168.%d.%d" pool.ipbase n) in - let s = Client.Session.login_with_password rpc "root" "xensource" "1.1" "perftest" in - let h = List.hd (Client.Host.get_all rpc s) in - let u = Client.Host.get_uuid rpc s h in - debug "Setting name of host %d" n; - Client.Host.set_name_label rpc s h (Printf.sprintf "perftest host %d" i); - if i<>0 then begin - debug "Joining to pool"; - Client.Pool.join rpc s (Printf.sprintf "192.168.%d.1" pool.ipbase) "root" "xensource" - end; - u - ) hosts in - - let poolrpc = remoterpc (Printf.sprintf "192.168.%d.1" pool.ipbase) in - let poolses = Client.Session.login_with_password poolrpc "root" "xensource" "1.1" "perftest" in - - let vpool=List.hd (Client.Pool.get_all poolrpc poolses) in - Client.Pool.add_to_other_config poolrpc poolses vpool "scenario" pool_name; - - debug "Waiting for all hosts to become live and enabled"; - let hosts = Array.of_list (Client.Host.get_all poolrpc poolses) in - let live = Array.make (Array.length hosts) false in - let enabled = Array.make (Array.length hosts) false in - let string_of_status () = - String.implode - (Array.to_list - (Array.mapi (fun i live -> - let enabled = enabled.(i) in match live, enabled with - | false, false -> '.' - | true, false -> 'L' - | true, true -> 'E' - | _, _ -> '?') live)) in - - let has_host_booted rpc session_id i host = - try - if live.(i) && enabled.(i) then true else begin - let metrics = Client.Host.get_metrics rpc session_id host in - let live' = Client.Host_metrics.get_live rpc session_id metrics in - let enabled' = Client.Host.get_enabled rpc session_id host in - if live.(i) <> live' || enabled.(i) <> enabled' then debug "Individual host status: %s" (string_of_status ()); - live.(i) <- live'; - enabled.(i) <- enabled'; - live' && enabled' - end - with _ -> false in - let finished = ref false in - while not !finished do - Unix.sleep 20; - finished := List.fold_left (&&) true (Array.to_list (Array.mapi (has_host_booted poolrpc poolses) hosts)); - done; - debug "All hosts are ready."; - - let mypool = List.hd (Client.Pool.get_all poolrpc poolses) in - let master = Client.Pool.get_master poolrpc poolses mypool in - - let iscsi_vm_ip = CreateVM.make_iscsi_ip pool in - - let xml = try - Client.SR.probe ~rpc:poolrpc ~session_id:poolses ~host:master - ~device_config:["target",iscsi_vm_ip] - ~sm_config:[] - ~_type:"lvmoiscsi" - with Api_errors.Server_error("SR_BACKEND_FAILURE_96",[a;b;xml]) -> - xml - in - let iqns = parse_sr_probe_for_iqn xml in - if iqns = [] then failwith "iSCSI target VM failed again - maybe you should fix it this time?"; - let iqn = List.hd iqns in - let xml = try - Client.SR.probe ~rpc:poolrpc ~session_id:poolses ~host:master - ~device_config:["target",iscsi_vm_ip; "targetIQN",iqn] - ~sm_config:[] - ~_type:"lvmoiscsi" - with Api_errors.Server_error("SR_BACKEND_FAILURE_107",[a;b;xml]) -> - xml - in - - (* Create an SR for each LUN found *) - Printf.printf "Creating LVMoISCSI SRs (one for each of %d LUNs)\n%!" pool.iscsi_luns; - let scsiids = Array.of_list (parse_sr_probe_for_scsiids xml) in - if Array.length scsiids <> pool.iscsi_luns then failwith (Printf.sprintf "We created %d VDIs on the iSCSI target VM but found %d LUNs" pool.iscsi_luns (Array.length scsiids)); - let lun_srs = Array.init pool.iscsi_luns - (fun i -> - Printf.printf " - Creating shared LVMoISCSI SR %d...\n%!" i; - let name_label = Printf.sprintf "LVMoISCSI-%d" i in - Client.SR.create poolrpc poolses master ["target",iscsi_vm_ip; "targetIQN",iqn; "SCSIid",scsiids.(i)] - 0L name_label "" "lvmoiscsi" "" true []) - in - - let local_srs = Array.mapi (fun i host_uuid -> - let h = Client.Host.get_by_uuid poolrpc poolses host_uuid in - let name_label = Printf.sprintf "Local LVM on host %d" i in - Client.SR.create poolrpc poolses h ["device","/dev/"^sr_disk_device] 0L name_label "" "lvm" "" false []) host_uuids - in - - let pifs = Client.PIF.get_all poolrpc poolses in - - let bondednets = Array.init pool.bonds (fun i -> - Client.Network.create poolrpc poolses (Printf.sprintf "Network associated with bond%d" i) "" 1500L [] []) - in - - let unused_nets = ref (List.setify (List.map (fun pif -> Client.PIF.get_network poolrpc poolses pif) pifs)) in - - (* Reconfigure the master's networking last as this will be the most destructive *) - let master_uuid = Client.Host.get_uuid poolrpc poolses master in - let slave_uuids = List.filter (fun x -> x <> master_uuid) (Array.to_list host_uuids) in - let host_uuids = Array.of_list (slave_uuids @ [ master_uuid ]) in - - let (_ : API.ref_Bond array array) = Array.mapi (fun i host_uuid -> - let host_ref = Client.Host.get_by_uuid poolrpc poolses host_uuid in - let pifs = List.filter (fun pif -> Client.PIF.get_host poolrpc poolses pif = host_ref) pifs in - Array.init pool.bonds (fun bnum -> - let device = Printf.sprintf "eth%d" (bnum*2) in - let device2 = Printf.sprintf "eth%d" (bnum*2 + 1) in - let master = List.find (fun pif -> Client.PIF.get_device poolrpc poolses pif = device) pifs in - let pifs = List.filter (fun pif -> let d = Client.PIF.get_device poolrpc poolses pif in d=device || d=device2) pifs in - let nets = List.map (fun pif -> Client.PIF.get_network poolrpc poolses pif) pifs in - unused_nets := List.filter (fun net -> not (List.mem net nets)) !unused_nets; - let mac = Client.PIF.get_MAC poolrpc poolses master in - let bond = Client.Bond.create poolrpc poolses bondednets.(bnum) pifs mac `balanceslb [] in - let bondpif = Client.Bond.get_master poolrpc poolses bond in - Client.PIF.reconfigure_ip poolrpc poolses bondpif `Static (Client.PIF.get_IP poolrpc poolses master) "255.255.255.0" "" ""; - if Client.PIF.get_management poolrpc poolses master then begin - (try Client.Host.management_reconfigure poolrpc poolses bondpif; - with _ -> ()); - debug "Reconfigured management interface to be on the bond."; - (* In case we've lost our network connection *) - wait_until_guests_have_booted (); - end; - bond - ) - ) host_uuids in - debug "Waiting for all guests to be pingable again."; - wait_until_guests_have_booted (); - debug "Successfully pinged all virtual hosts."; - (* We'll use the Windows XP SP3 template to create the VMs required *) - - let nets_for_vms = !unused_nets @ (Array.to_list bondednets) in - - debug "Nets for VMs: %s" (String.concat "," (List.map (fun net -> Client.Network.get_name_label poolrpc poolses net) nets_for_vms)); - - let networks = Array.of_list nets_for_vms in - - Printf.printf "Creating VMs (%s)\n%!" (if pool.use_shared_storage then "on shared storage" else "on local storage"); - let storages = if pool.use_shared_storage then lun_srs else local_srs in - List.iter (fun vm -> CreateVM.make ~rpc:poolrpc ~session_id:poolses ~networks ~storages ~pool ~vm) pool.vms + iscsi_vm_iso_must_exist session_id; + default_sr_must_be_suitable session_id; + let pool = List.find (fun p -> p.id = pool_name) pools in + let pool = {pool with key=key; ipbase=ipbase} in + + let template = + try List.hd (Client.VM.get_by_name_label rpc session_id sdkname) + with _ -> debug ~out:stderr "template '%s' not found" sdkname; exit 1 + in + let uuid = Client.VM.get_uuid rpc session_id template in + debug "Creating test pool '%s' using SDK template uuid=%s" pool.id uuid; + + (* Clear up any leftover state on the template *) + reset_template session_id template; + + let interfaces = initialise session_id template pool in + + Printf.printf "Creating iSCSI target VM serving %d LUNs\n%!" pool.iscsi_luns; + let (_ : API.ref_VM option) = CreateVM.make_iscsi session_id pool (Client.VIF.get_network rpc session_id interfaces.(2)) in + + debug "Creating %d SDK VMs" pool.hosts; + let hosts = Array.init pool.hosts ( + fun i -> + let n = i + 1 in + let vm = Client.VM.clone rpc session_id template (Printf.sprintf "perftestpool%d" n) in + Client.VM.provision rpc session_id vm; + Array.iteri (fun i _ -> + ignore(Client.VM.add_to_xenstore_data rpc session_id vm (Printf.sprintf "vm-data/provision/interfaces/%d/ip" i) + (Printf.sprintf "192.168.%d.%d" (i+pool.ipbase) n))) interfaces; + vm) + in + + debug "Setting memory on master to be 256 Megs"; + Client.VM.set_memory_static_max rpc session_id hosts.(0) (Int64.mul 256L 1048576L); + Client.VM.set_memory_static_min rpc session_id hosts.(0) (Int64.mul 256L 1048576L); + Client.VM.set_memory_dynamic_max rpc session_id hosts.(0) (Int64.mul 256L 1048576L); + Client.VM.set_memory_dynamic_min rpc session_id hosts.(0) (Int64.mul 256L 1048576L); + + Client.VM.add_to_other_config rpc session_id hosts.(0) master_of_pool pool.key; + Client.VM.add_to_other_config rpc session_id hosts.(0) management_ip (Printf.sprintf "192.168.%d.1" pool.ipbase); + + let localhost_uuid = Xapi_inventory.lookup "INSTALLATION_UUID" in + Array.iteri (fun i host -> debug "Starting VM %d" i; Client.VM.start_on rpc session_id host (Client.Host.get_by_uuid rpc session_id localhost_uuid) false false) hosts; + + ignore(Sys.command (Printf.sprintf "ifconfig %s 192.168.%d.200 up" (Client.Network.get_bridge rpc session_id (Client.VIF.get_network rpc session_id interfaces.(0))) pool.ipbase)); + + reset_template session_id template; + + debug "Guests are now booting..."; + let pingable = Array.make (Array.length hosts) false in + let firstboot = Array.make (Array.length hosts) false in + let string_of_status () = + String.implode + (Array.to_list + (Array.mapi (fun i ping -> + let boot = firstboot.(i) in match ping, boot with + | false, false -> '.' + | true, false -> 'P' + | true, true -> 'B' + | _, _ -> '?') pingable)) in + + let has_guest_booted i vm = + let ip = Printf.sprintf "192.168.%d.%d" pool.ipbase (i+1) in + let is_pingable () = + if pingable.(i) then true else begin + if Sys.command (Printf.sprintf "ping -W 1 -c 1 %s 2>/dev/null >/dev/null" ip) = 0 then begin + pingable.(i) <- true; + debug "Individual host status: %s" (string_of_status ()); + true + end else false + end in + let firstbooted () = + if firstboot.(i) then true else begin + let rpc = remoterpc ip in + try + let s = Client.Session.login_with_password rpc "root" "xensource" "1.1" "perftest" in + finally + (fun () -> + let host = List.hd (Client.Host.get_all rpc s) in (* only one host because it hasn't joined the pool yet *) + let other_config = Client.Host.get_other_config rpc s host in + let key = "firstboot-complete" in + (* Since these are 'fresh' hosts which have never booted, the key goes from missing -> present *) + if List.mem_assoc key other_config then begin + firstboot.(i) <- true; + debug "Individual host status: %s" (string_of_status ()); + true; + end else false + ) + (fun () -> Client.Session.logout rpc s) + with _ -> false + end in + is_pingable () && (firstbooted ()) in + + let wait_until_guests_have_booted () = + for i = 0 to Array.length pingable - 1 do + pingable.(i) <- false; + done; + let finished = ref false in + while not !finished do + finished := List.fold_left (&&) true (Array.to_list (Array.mapi has_guest_booted hosts)); + Unix.sleep 20; + done in + + wait_until_guests_have_booted (); + debug "Guests have booted; issuing Pool.joins."; + + let host_uuids = Array.mapi (fun i vm -> + let n = i + 1 in + let rpc = remoterpc (Printf.sprintf "192.168.%d.%d" pool.ipbase n) in + let s = Client.Session.login_with_password rpc "root" "xensource" "1.1" "perftest" in + let h = List.hd (Client.Host.get_all rpc s) in + let u = Client.Host.get_uuid rpc s h in + debug "Setting name of host %d" n; + Client.Host.set_name_label rpc s h (Printf.sprintf "perftest host %d" i); + if i<>0 then begin + debug "Joining to pool"; + Client.Pool.join rpc s (Printf.sprintf "192.168.%d.1" pool.ipbase) "root" "xensource" + end; + u + ) hosts in + + let poolrpc = remoterpc (Printf.sprintf "192.168.%d.1" pool.ipbase) in + let poolses = Client.Session.login_with_password poolrpc "root" "xensource" "1.1" "perftest" in + + let vpool=List.hd (Client.Pool.get_all poolrpc poolses) in + Client.Pool.add_to_other_config poolrpc poolses vpool "scenario" pool_name; + + debug "Waiting for all hosts to become live and enabled"; + let hosts = Array.of_list (Client.Host.get_all poolrpc poolses) in + let live = Array.make (Array.length hosts) false in + let enabled = Array.make (Array.length hosts) false in + let string_of_status () = + String.implode + (Array.to_list + (Array.mapi (fun i live -> + let enabled = enabled.(i) in match live, enabled with + | false, false -> '.' + | true, false -> 'L' + | true, true -> 'E' + | _, _ -> '?') live)) in + + let has_host_booted rpc session_id i host = + try + if live.(i) && enabled.(i) then true else begin + let metrics = Client.Host.get_metrics rpc session_id host in + let live' = Client.Host_metrics.get_live rpc session_id metrics in + let enabled' = Client.Host.get_enabled rpc session_id host in + if live.(i) <> live' || enabled.(i) <> enabled' then debug "Individual host status: %s" (string_of_status ()); + live.(i) <- live'; + enabled.(i) <- enabled'; + live' && enabled' + end + with _ -> false in + let finished = ref false in + while not !finished do + Unix.sleep 20; + finished := List.fold_left (&&) true (Array.to_list (Array.mapi (has_host_booted poolrpc poolses) hosts)); + done; + debug "All hosts are ready."; + + let mypool = List.hd (Client.Pool.get_all poolrpc poolses) in + let master = Client.Pool.get_master poolrpc poolses mypool in + + let iscsi_vm_ip = CreateVM.make_iscsi_ip pool in + + let xml = try + Client.SR.probe ~rpc:poolrpc ~session_id:poolses ~host:master + ~device_config:["target",iscsi_vm_ip] + ~sm_config:[] + ~_type:"lvmoiscsi" + with Api_errors.Server_error("SR_BACKEND_FAILURE_96",[a;b;xml]) -> + xml + in + let iqns = parse_sr_probe_for_iqn xml in + if iqns = [] then failwith "iSCSI target VM failed again - maybe you should fix it this time?"; + let iqn = List.hd iqns in + let xml = try + Client.SR.probe ~rpc:poolrpc ~session_id:poolses ~host:master + ~device_config:["target",iscsi_vm_ip; "targetIQN",iqn] + ~sm_config:[] + ~_type:"lvmoiscsi" + with Api_errors.Server_error("SR_BACKEND_FAILURE_107",[a;b;xml]) -> + xml + in + + (* Create an SR for each LUN found *) + Printf.printf "Creating LVMoISCSI SRs (one for each of %d LUNs)\n%!" pool.iscsi_luns; + let scsiids = Array.of_list (parse_sr_probe_for_scsiids xml) in + if Array.length scsiids <> pool.iscsi_luns then failwith (Printf.sprintf "We created %d VDIs on the iSCSI target VM but found %d LUNs" pool.iscsi_luns (Array.length scsiids)); + let lun_srs = Array.init pool.iscsi_luns + (fun i -> + Printf.printf " - Creating shared LVMoISCSI SR %d...\n%!" i; + let name_label = Printf.sprintf "LVMoISCSI-%d" i in + Client.SR.create poolrpc poolses master ["target",iscsi_vm_ip; "targetIQN",iqn; "SCSIid",scsiids.(i)] + 0L name_label "" "lvmoiscsi" "" true []) + in + + let local_srs = Array.mapi (fun i host_uuid -> + let h = Client.Host.get_by_uuid poolrpc poolses host_uuid in + let name_label = Printf.sprintf "Local LVM on host %d" i in + Client.SR.create poolrpc poolses h ["device","/dev/"^sr_disk_device] 0L name_label "" "lvm" "" false []) host_uuids + in + + let pifs = Client.PIF.get_all poolrpc poolses in + + let bondednets = Array.init pool.bonds (fun i -> + Client.Network.create poolrpc poolses (Printf.sprintf "Network associated with bond%d" i) "" 1500L [] []) + in + + let unused_nets = ref (List.setify (List.map (fun pif -> Client.PIF.get_network poolrpc poolses pif) pifs)) in + + (* Reconfigure the master's networking last as this will be the most destructive *) + let master_uuid = Client.Host.get_uuid poolrpc poolses master in + let slave_uuids = List.filter (fun x -> x <> master_uuid) (Array.to_list host_uuids) in + let host_uuids = Array.of_list (slave_uuids @ [ master_uuid ]) in + + let (_ : API.ref_Bond array array) = Array.mapi (fun i host_uuid -> + let host_ref = Client.Host.get_by_uuid poolrpc poolses host_uuid in + let pifs = List.filter (fun pif -> Client.PIF.get_host poolrpc poolses pif = host_ref) pifs in + Array.init pool.bonds (fun bnum -> + let device = Printf.sprintf "eth%d" (bnum*2) in + let device2 = Printf.sprintf "eth%d" (bnum*2 + 1) in + let master = List.find (fun pif -> Client.PIF.get_device poolrpc poolses pif = device) pifs in + let pifs = List.filter (fun pif -> let d = Client.PIF.get_device poolrpc poolses pif in d=device || d=device2) pifs in + let nets = List.map (fun pif -> Client.PIF.get_network poolrpc poolses pif) pifs in + unused_nets := List.filter (fun net -> not (List.mem net nets)) !unused_nets; + let mac = Client.PIF.get_MAC poolrpc poolses master in + let bond = Client.Bond.create poolrpc poolses bondednets.(bnum) pifs mac `balanceslb [] in + let bondpif = Client.Bond.get_master poolrpc poolses bond in + Client.PIF.reconfigure_ip poolrpc poolses bondpif `Static (Client.PIF.get_IP poolrpc poolses master) "255.255.255.0" "" ""; + if Client.PIF.get_management poolrpc poolses master then begin + (try Client.Host.management_reconfigure poolrpc poolses bondpif; + with _ -> ()); + debug "Reconfigured management interface to be on the bond."; + (* In case we've lost our network connection *) + wait_until_guests_have_booted (); + end; + bond + ) + ) host_uuids in + debug "Waiting for all guests to be pingable again."; + wait_until_guests_have_booted (); + debug "Successfully pinged all virtual hosts."; + (* We'll use the Windows XP SP3 template to create the VMs required *) + + let nets_for_vms = !unused_nets @ (Array.to_list bondednets) in + + debug "Nets for VMs: %s" (String.concat "," (List.map (fun net -> Client.Network.get_name_label poolrpc poolses net) nets_for_vms)); + + let networks = Array.of_list nets_for_vms in + + Printf.printf "Creating VMs (%s)\n%!" (if pool.use_shared_storage then "on shared storage" else "on local storage"); + let storages = if pool.use_shared_storage then lun_srs else local_srs in + List.iter (fun vm -> CreateVM.make ~rpc:poolrpc ~session_id:poolses ~networks ~storages ~pool ~vm) pool.vms let create_pool session_id sdkname pool_name key ipbase = - iscsi_vm_iso_must_exist session_id; - default_sr_must_be_suitable session_id; - let pool = Scenario.get pool_name in - let pool = { pool with key=key } in - if pool.Scenario.hosts <> 1 - then begin - debug ~out:stderr "At the moment, multiple host pool is supported only for SDK pool"; - exit 1 - end; - let host = List.hd (Client.Host.get_all ~rpc ~session_id) in - - (* 1/ forget the local lvm storages *) - List.iter (fun lvm_sr -> - List.iter - (fun pbd -> Client.PBD.unplug ~rpc ~session_id ~self:pbd) - (Client.SR.get_PBDs ~rpc ~session_id ~self:lvm_sr); - Client.SR.forget ~rpc ~session_id ~sr:lvm_sr) - (Client.SR.get_by_name_label ~rpc ~session_id ~label:"Local storage"); - - (* 2/ create an default ext storage *) - let storages = - match Client.SR.get_by_name_label ~rpc ~session_id ~label:"Local vhd" with - | [] -> - [| Client.SR.create ~rpc ~session_id ~_type:"ext" ~name_label:"Local vhd" ~name_description:"" ~device_config:["device","/dev/sda3"] - ~host ~physical_size:Scenario.sr_disk_size ~shared:true ~sm_config:[] ~content_type:"" |] - | l -> Array.of_list l - in - let pool_ref = List.hd (Client.Pool.get_all ~rpc ~session_id) in - Client.Pool.set_default_SR ~rpc ~session_id ~self:pool_ref ~value:storages.(0); - Client.Pool.set_crash_dump_SR ~rpc ~session_id ~self:pool_ref ~value:storages.(0); - Client.Pool.set_suspend_image_SR ~rpc ~session_id ~self:pool_ref ~value:storages.(0); - - (* 3/ building the VMs *) - let networks = Array.of_list (Client.Network.get_all ~rpc ~session_id) in - List.iter (fun vm -> CreateVM.make ~rpc ~session_id ~networks ~storages ~pool ~vm) pool.vms + iscsi_vm_iso_must_exist session_id; + default_sr_must_be_suitable session_id; + let pool = Scenario.get pool_name in + let pool = { pool with key=key } in + if pool.Scenario.hosts <> 1 + then begin + debug ~out:stderr "At the moment, multiple host pool is supported only for SDK pool"; + exit 1 + end; + let host = List.hd (Client.Host.get_all ~rpc ~session_id) in + + (* 1/ forget the local lvm storages *) + List.iter (fun lvm_sr -> + List.iter + (fun pbd -> Client.PBD.unplug ~rpc ~session_id ~self:pbd) + (Client.SR.get_PBDs ~rpc ~session_id ~self:lvm_sr); + Client.SR.forget ~rpc ~session_id ~sr:lvm_sr) + (Client.SR.get_by_name_label ~rpc ~session_id ~label:"Local storage"); + + (* 2/ create an default ext storage *) + let storages = + match Client.SR.get_by_name_label ~rpc ~session_id ~label:"Local vhd" with + | [] -> + [| Client.SR.create ~rpc ~session_id ~_type:"ext" ~name_label:"Local vhd" ~name_description:"" ~device_config:["device","/dev/sda3"] + ~host ~physical_size:Scenario.sr_disk_size ~shared:true ~sm_config:[] ~content_type:"" |] + | l -> Array.of_list l + in + let pool_ref = List.hd (Client.Pool.get_all ~rpc ~session_id) in + Client.Pool.set_default_SR ~rpc ~session_id ~self:pool_ref ~value:storages.(0); + Client.Pool.set_crash_dump_SR ~rpc ~session_id ~self:pool_ref ~value:storages.(0); + Client.Pool.set_suspend_image_SR ~rpc ~session_id ~self:pool_ref ~value:storages.(0); + + (* 3/ building the VMs *) + let networks = Array.of_list (Client.Network.get_all ~rpc ~session_id) in + List.iter (fun vm -> CreateVM.make ~rpc ~session_id ~networks ~storages ~pool ~vm) pool.vms diff --git a/ocaml/perftest/cumulative_time.ml b/ocaml/perftest/cumulative_time.ml index 105cb27ebb2..e22a7597d1d 100644 --- a/ocaml/perftest/cumulative_time.ml +++ b/ocaml/perftest/cumulative_time.ml @@ -17,19 +17,19 @@ open Listext open Perfdebug open Graphutil -let _ = +let _ = let inputs = ref [] in let format = ref `X11 in let separate_graphs = ref false in let graphic_filename = ref "" in Arg.parse [ - "-format", Arg.Symbol ([ "eps"; "gif"; "x11" ], - (function - | "eps" -> format := `Eps - | "gif" -> format := `Gif - | "x11" -> format := `X11 - | _ -> failwith "huh ?")), - " Set output format (default: X11)"; + "-format", Arg.Symbol ([ "eps"; "gif"; "x11" ], + (function + | "eps" -> format := `Eps + | "gif" -> format := `Gif + | "x11" -> format := `X11 + | _ -> failwith "huh ?")), + " Set output format (default: X11)"; "-output", Arg.Set_string graphic_filename, " Set default output file (for non-X11 modes)"; "-separate", Arg.Set separate_graphs, " Plot each data series on separate axes"; ] @@ -42,50 +42,50 @@ let _ = let output_files = List.map (fun _ -> Filename.temp_file "cumulative" "dat") inputs in let all = List.combine inputs output_files in - finally + finally (fun () -> let max_readings = ref 0 in List.iter - (fun ((info,points), output_file) -> - let (_: string) = get_result info in - let num_points = List.length points in + (fun ((info,points), output_file) -> + let (_: string) = get_result info in + let num_points = List.length points in - max_readings := max num_points !max_readings; - - Unixext.with_file output_file [ Unix.O_WRONLY; Unix.O_TRUNC; Unix.O_CREAT ] 0o644 - (fun fd -> - let points_array = Array.of_list (List.rev points) in - let cumulative = ref 0. in - for i=0 to num_points-1 do - cumulative := points_array.(i) +. !cumulative; - Unixext.really_write_string fd (Printf.sprintf "%d %f %f\n" (i+1) !cumulative points_array.(i)) - done - ) - ) all; + max_readings := max num_points !max_readings; + + Unixext.with_file output_file [ Unix.O_WRONLY; Unix.O_TRUNC; Unix.O_CREAT ] 0o644 + (fun fd -> + let points_array = Array.of_list (List.rev points) in + let cumulative = ref 0. in + for i=0 to num_points-1 do + cumulative := points_array.(i) +. !cumulative; + Unixext.really_write_string fd (Printf.sprintf "%d %f %f\n" (i+1) !cumulative points_array.(i)) + done + ) + ) all; (* Plot a line for (a) elapsed time and (b) this particular duration *) let ls = List.flatten (List.mapi - (fun i ((info,floats), output) -> - let graph_one_label = Printf.sprintf "Cumulative time, SR %d (left axis)" (i+1) in - let graph_two_label = Printf.sprintf "Time per VM, SR %d (right axis)" (i+1) in - [{ Gnuplot.filename = output; title = graph_one_label; graphname = get_result info; field=2; yaxis=1; scale=1./.3600.; style="lines" }; - { Gnuplot.filename = output; title = graph_two_label; graphname = get_result info; field=3; yaxis=2; scale=1.; style="lines" }] - ) all) in + (fun i ((info,floats), output) -> + let graph_one_label = Printf.sprintf "Cumulative time, SR %d (left axis)" (i+1) in + let graph_two_label = Printf.sprintf "Time per VM, SR %d (right axis)" (i+1) in + [{ Gnuplot.filename = output; title = graph_one_label; graphname = get_result info; field=2; yaxis=1; scale=1./.3600.; style="lines" }; + { Gnuplot.filename = output; title = graph_two_label; graphname = get_result info; field=3; yaxis=2; scale=1.; style="lines" }] + ) all) in List.iter (fun result -> - let g = { Gnuplot.xlabel = Printf.sprintf "Number of %s" (string_of_result result); - ylabel = "Elapsed time (h)"; - y2label = Some "Duration (s)"; - lines = List.filter (fun l -> l.Gnuplot.graphname = result) ls; - log_x_axis = false; - xrange = Some(0., float_of_int !max_readings); - normal_probability_y_axis = None; - } in - let output = match !format with - | `Eps -> Gnuplot.Ps (Printf.sprintf "%s-%s.eps" !graphic_filename result) - | `Gif -> Gnuplot.Gif (Printf.sprintf "%s-%s.gif" !graphic_filename result) - | `X11 -> Gnuplot.X11 in - ignore (Gnuplot.render g output) - ) (get_result_types inputs) + let g = { Gnuplot.xlabel = Printf.sprintf "Number of %s" (string_of_result result); + ylabel = "Elapsed time (h)"; + y2label = Some "Duration (s)"; + lines = List.filter (fun l -> l.Gnuplot.graphname = result) ls; + log_x_axis = false; + xrange = Some(0., float_of_int !max_readings); + normal_probability_y_axis = None; + } in + let output = match !format with + | `Eps -> Gnuplot.Ps (Printf.sprintf "%s-%s.eps" !graphic_filename result) + | `Gif -> Gnuplot.Gif (Printf.sprintf "%s-%s.gif" !graphic_filename result) + | `X11 -> Gnuplot.X11 in + ignore (Gnuplot.render g output) + ) (get_result_types inputs) ) (fun () -> List.iter (fun f -> Unixext.unlink_safe f) output_files) diff --git a/ocaml/perftest/gnuplot.ml b/ocaml/perftest/gnuplot.ml index 7183bcc6f5e..610716e7705 100644 --- a/ocaml/perftest/gnuplot.ml +++ b/ocaml/perftest/gnuplot.ml @@ -35,32 +35,32 @@ type t = { lines: line list; } -type output = +type output = | Ps of string | Gif of string | X11 -let make_normal_probability_tics tics = - Printf.sprintf "set ytics (%s)" - (String.concat ", " (List.map (fun tic -> Printf.sprintf "\"%.2f\" invnorm(%f)" tic tic) tics)) +let make_normal_probability_tics tics = + Printf.sprintf "set ytics (%s)" + (String.concat ", " (List.map (fun tic -> Printf.sprintf "\"%.2f\" invnorm(%f)" tic tic) tics)) -let make_log_tics tics = - Printf.sprintf "set xtics (%s)" - (String.concat ", " (List.map (fun tic -> Printf.sprintf "\"%.2f\" %f" tic tic) tics)) +let make_log_tics tics = + Printf.sprintf "set xtics (%s)" + (String.concat ", " (List.map (fun tic -> Printf.sprintf "\"%.2f\" %f" tic tic) tics)) -let invnorm (x: t) (y: string) = +let invnorm (x: t) (y: string) = if x.normal_probability_y_axis = None then y else Printf.sprintf "invnorm(%s)" y -let render (x: t) output = +let render (x: t) output = let line (y: line) = let field = if x.normal_probability_y_axis = None then Printf.sprintf "($%d*%f)" y.field y.scale else Printf.sprintf "(invnorm($%d*%f))" y.field y.scale in Printf.sprintf "\"%s\" using 1:%s axis x1y%d title \"%s\" with %s" y.filename field y.yaxis y.title y.style in let config = [ - Printf.sprintf "set terminal %s" + Printf.sprintf "set terminal %s" (match output with | Ps _ -> "postscript eps enhanced color" | Gif _ -> "gif" @@ -74,26 +74,26 @@ let render (x: t) output = Printf.sprintf "set xlabel \"%s\"" x.xlabel; Printf.sprintf "set ylabel \"%s\"" x.ylabel; ] @ (match x.y2label with - | None -> [] - | Some label -> [ - Printf.sprintf "set y2label \"%s\"" label; - "set ytics nomirror"; - "set y2tics auto"; - "set y2range [0:]"; - ]) + | None -> [] + | Some label -> [ + Printf.sprintf "set y2label \"%s\"" label; + "set ytics nomirror"; + "set y2tics auto"; + "set y2range [0:]"; + ]) @ (match x.normal_probability_y_axis with - | Some (min, max) -> - [ make_normal_probability_tics [ 0.001; 0.01; 0.05; 0.1; 0.2; 0.3; 0.4; 0.5; 0.6; 0.7; 0.8; 0.9; 0.95; 0.99; 0.999 ]; - Printf.sprintf "set yrange [invnorm(%f):invnorm(%f)]" min max ] - | None -> []) + | Some (min, max) -> + [ make_normal_probability_tics [ 0.001; 0.01; 0.05; 0.1; 0.2; 0.3; 0.4; 0.5; 0.6; 0.7; 0.8; 0.9; 0.95; 0.99; 0.999 ]; + Printf.sprintf "set yrange [invnorm(%f):invnorm(%f)]" min max ] + | None -> []) @ (match x.log_x_axis with - | true -> - [ "set logscale x"; - "set grid"; - "set xtics (\"1\" 1, \"2\" 2, \"3\" 3, \"4\" 4, \"5\" 5, \"6\" 6, \"7\" 7, \"8\" 8, \"9\" 9, \"10\" 10, \"11\" 11, \"12\" 12, \"13\" 13, \"14\" 14, \"15\" 15, \"20\" 20, \"30\" 30)" ]; - | false -> - [] - ) + | true -> + [ "set logscale x"; + "set grid"; + "set xtics (\"1\" 1, \"2\" 2, \"3\" 3, \"4\" 4, \"5\" 5, \"6\" 6, \"7\" 7, \"8\" 8, \"9\" 9, \"10\" 10, \"11\" 11, \"12\" 12, \"13\" 13, \"14\" 14, \"15\" 15, \"20\" 20, \"30\" 30)" ]; + | false -> + [] + ) @ [ (if x.log_x_axis then "set logscale x" else ""); (match x.xrange with @@ -101,7 +101,7 @@ let render (x: t) output = | Some(min, max) -> Printf.sprintf "set xrange [%f:%f]" min max); Printf.sprintf "plot %s" (String.concat ", " (List.map line x.lines)) ] in - + let f = Filename.temp_file "gnuplot" "gnuplot" in Stdext.Unixext.write_string_to_file f (String.concat "\n" config); finally diff --git a/ocaml/perftest/graphutil.ml b/ocaml/perftest/graphutil.ml index 2adb7e0fe6a..9c8232b713d 100644 --- a/ocaml/perftest/graphutil.ml +++ b/ocaml/perftest/graphutil.ml @@ -22,43 +22,43 @@ type short_info = string * string * string type info = short_info * float list let merge_infos (infos:info list) = - let names = Listext.List.setify (List.map (fun ((file,result,subtest),_) -> (file,result,subtest)) infos) in - let floats ((file,result,subtest) as i)= i, List.flatten (List.map (fun ((f,r,s),fl) -> if file=f && result=r && subtest=s then fl else []) infos) in - let merge_infos = List.map floats names in - debug "Available data:"; - List.iter (fun ((f,r,s),fl) -> debug "\t* in file: %s \t%s \t%s \t-- %i points" f r s (List.length fl)) merge_infos; - merge_infos + let names = Listext.List.setify (List.map (fun ((file,result,subtest),_) -> (file,result,subtest)) infos) in + let floats ((file,result,subtest) as i)= i, List.flatten (List.map (fun ((f,r,s),fl) -> if file=f && result=r && subtest=s then fl else []) infos) in + let merge_infos = List.map floats names in + debug "Available data:"; + List.iter (fun ((f,r,s),fl) -> debug "\t* in file: %s \t%s \t%s \t-- %i points" f r s (List.length fl)) merge_infos; + merge_infos let clone_cnt = ref 0 let info_from_raw_result ?(separate=false) file result : info list = match result.rawresult with - | StartTest floats | ShutdownTest floats -> [ (file, result.resultname, result.subtest), floats] - | CloneTest floats -> - (* Pretend that we got the data from separate files, so they are considered as separate data series *) - let file = Printf.sprintf "%s-%d" file !clone_cnt in - (* Make the resultnames distinct to force the lines onto separate graphs *) - let resultname = if separate then (Printf.sprintf "%s-%d" result.resultname !clone_cnt) else result.resultname in - let subtest = result.subtest in - clone_cnt := !clone_cnt+1; - [ (file,resultname,subtest), floats ] - | _ -> [] + | StartTest floats | ShutdownTest floats -> [ (file, result.resultname, result.subtest), floats] + | CloneTest floats -> + (* Pretend that we got the data from separate files, so they are considered as separate data series *) + let file = Printf.sprintf "%s-%d" file !clone_cnt in + (* Make the resultnames distinct to force the lines onto separate graphs *) + let resultname = if separate then (Printf.sprintf "%s-%d" result.resultname !clone_cnt) else result.resultname in + let subtest = result.subtest in + clone_cnt := !clone_cnt+1; + [ (file,resultname,subtest), floats ] + | _ -> [] -let floats_from_file fname = +let floats_from_file fname = let floats = ref [] in Unixext.readfile_line (fun line -> floats := float_of_string (String.strip String.isspace line) :: !floats) fname; !floats let get_info ?(separate=false) files : info list = - let aux f = - match Testtypes.from_string (Unixext.string_of_file f) with - | None -> [ (f, "", ""), floats_from_file f] - | Some results -> List.flatten (List.map (info_from_raw_result ~separate f) results) - in - merge_infos (List.flatten (List.map aux files)) + let aux f = + match Testtypes.from_string (Unixext.string_of_file f) with + | None -> [ (f, "", ""), floats_from_file f] + | Some results -> List.flatten (List.map (info_from_raw_result ~separate f) results) + in + merge_infos (List.flatten (List.map aux files)) let short_info_to_string ((file,result,subtest) : short_info) = - Printf.sprintf "%s.%s.%s" result subtest file + Printf.sprintf "%s.%s.%s" result subtest file let short_info_to_title ((_,_,subtest) : short_info) = subtest @@ -68,28 +68,28 @@ let get_result_types (all_info:info list) = Listext.List.setify (List.map (fun ( (_,result,_),_) -> result) all_info) let replace_assoc r n l = - if List.mem_assoc r l - then (r,n) :: (List.remove_assoc r l) - else (r,n) :: l + if List.mem_assoc r l + then (r,n) :: (List.remove_assoc r l) + else (r,n) :: l let get_op op extremum (infos:info list) = - let mem : (string * float) list ref = ref [] in - let aux ((_,result,_),floats) = - if List.mem_assoc result !mem - then mem := (result, List.fold_left op (List.assoc result !mem) floats) :: (List.remove_assoc result !mem) - else mem := (result, List.fold_left op extremum floats) :: !mem - in - List.iter aux infos; - !mem + let mem : (string * float) list ref = ref [] in + let aux ((_,result,_),floats) = + if List.mem_assoc result !mem + then mem := (result, List.fold_left op (List.assoc result !mem) floats) :: (List.remove_assoc result !mem) + else mem := (result, List.fold_left op extremum floats) :: !mem + in + List.iter aux infos; + !mem let get_min = get_op min max_float let get_max = get_op max min_float let string_of_result = function - | "startall" -> "sequential VM.start" - | "stopall" -> "sequential VM.stop" - | "parallel_startall" -> "parallel VM.start" - | "parallel_stopall" -> "parallel VM.stop" - | "clone" -> "parallel VM.clone" - | s when (String.startswith "clone-" s) -> "parallel VM.clone" - | _ -> "???" + | "startall" -> "sequential VM.start" + | "stopall" -> "sequential VM.stop" + | "parallel_startall" -> "parallel VM.start" + | "parallel_stopall" -> "parallel VM.stop" + | "clone" -> "parallel VM.clone" + | s when (String.startswith "clone-" s) -> "parallel VM.clone" + | _ -> "???" diff --git a/ocaml/perftest/histogram.ml b/ocaml/perftest/histogram.ml index 9e68e2f0e26..bc243236205 100644 --- a/ocaml/perftest/histogram.ml +++ b/ocaml/perftest/histogram.ml @@ -16,7 +16,7 @@ open Perfdebug open Statistics open Graphutil -let _ = +let _ = let sigma = ref 0.1 in let inputs = ref [] in let format = ref `X11 in @@ -27,13 +27,13 @@ let _ = let min_percentile = ref 1. in let max_percentile = ref 95. in Arg.parse [ - "-format", Arg.Symbol ([ "eps"; "gif"; "x11" ], - (function - | "eps" -> format := `Eps - | "gif" -> format := `Gif - | "x11" -> format := `X11 - | _ -> failwith "huh ?")), - " Set output format (default: X11)"; + "-format", Arg.Symbol ([ "eps"; "gif"; "x11" ], + (function + | "eps" -> format := `Eps + | "gif" -> format := `Gif + | "x11" -> format := `X11 + | _ -> failwith "huh ?")), + " Set output format (default: X11)"; "-output", Arg.Set_string graphic_filename, " Set default output file (for non-X11 modes)"; "-sigma", Arg.Set_float sigma, Printf.sprintf " Set sigma for the gaussian (default %f)" !sigma; "-integrate", Arg.Set integrate, Printf.sprintf " Integrate the probability density function (default: %b)" !integrate; @@ -52,82 +52,82 @@ let _ = let output_files = List.map (fun _ -> Filename.temp_file "histogram" "dat") inputs in let all = List.combine inputs output_files in - Stdext.Pervasiveext.finally + Stdext.Pervasiveext.finally (fun () -> (* Write some summary statistics on stderr *) - List.iter - (fun (info, points) -> - debug ~out:stderr "%s has lognormal mean %f +/- %f" (short_info_to_string info) (LogNormal.mean points) (LogNormal.sigma points); - ) inputs; + List.iter + (fun (info, points) -> + debug ~out:stderr "%s has lognormal mean %f +/- %f" (short_info_to_string info) (LogNormal.mean points) (LogNormal.sigma points); + ) inputs; let min_point = get_min inputs in let max_point = get_max inputs in (* To make sure that each added gaussian really adds 1 unit of area, we extend the bins - 3 sigmas to the left and right *) - let min_point = List.map (fun (r,n) -> r, n -. 3. *. sigma) min_point + 3 sigmas to the left and right *) + let min_point = List.map (fun (r,n) -> r, n -. 3. *. sigma) min_point and max_point = List.map (fun (r,n) -> r, n +. 3. *. sigma) max_point in (* Attempt to zoom the graph in on the 10% to 90% region *) - let xrange_min = ref max_point + let xrange_min = ref max_point and xrange_max = ref min_point in List.iter - (fun ((info,points), output_file) -> - let result = get_result info in - let x = Hist.make (List.assoc result min_point) (List.assoc result max_point) 1000 in + (fun ((info,points), output_file) -> + let result = get_result info in + let x = Hist.make (List.assoc result min_point) (List.assoc result max_point) 1000 in + + (* -- Apply the Weierstrass transform -- *) + + (* NB Each call to Hist.convolve (i.e. each VM timing measured) increases the total area under the curve by 1. + By dividing through by 'n' (where 'n' is the total number of VMs i.e. points) we make the total area under + the curve equal 1 so we can consider the result as a probability density function. In particular this means + we can directly compare curves for 10, 100, 1000 measurements without worrying about scale factors and + also trade speed for estimation accuracy. *) + let num_points = float_of_int (List.length points) in - (* -- Apply the Weierstrass transform -- *) - - (* NB Each call to Hist.convolve (i.e. each VM timing measured) increases the total area under the curve by 1. - By dividing through by 'n' (where 'n' is the total number of VMs i.e. points) we make the total area under - the curve equal 1 so we can consider the result as a probability density function. In particular this means - we can directly compare curves for 10, 100, 1000 measurements without worrying about scale factors and - also trade speed for estimation accuracy. *) - let num_points = float_of_int (List.length points) in - - List.iter (fun y -> Hist.convolve x (fun z -> (gaussian y sigma z) /. num_points)) points; - (* Sanity-check: area under histogram should be almost 1.0 *) - let total_area = Hist.fold x (fun bin_start bin_end height acc -> (bin_end -. bin_start) *. height +. acc) 0. in - if abs_float (1. -. total_area) > 0.01 - then debug ~out:stderr "WARNING: area under histogram should be 1.0 but is %f" total_area; + List.iter (fun y -> Hist.convolve x (fun z -> (gaussian y sigma z) /. num_points)) points; + (* Sanity-check: area under histogram should be almost 1.0 *) + let total_area = Hist.fold x (fun bin_start bin_end height acc -> (bin_end -. bin_start) *. height +. acc) 0. in + if abs_float (1. -. total_area) > 0.01 + then debug ~out:stderr "WARNING: area under histogram should be 1.0 but is %f" total_area; - let cumulative = Hist.integrate x in - let t_10 = Hist.find_x cumulative 0.1 in - let t_80 = Hist.find_x cumulative 0.8 in - let t_90 = Hist.find_x cumulative 0.9 in - let t_95 = Hist.find_x cumulative 0.95 in - debug ~out:stderr "10th percentile: %f" t_10; - debug ~out:stderr "80th percentile: %f" t_80; - debug ~out:stderr "90th percentile: %f" t_90; - debug ~out:stderr "95th percentile: %f" t_95; - debug ~out:stderr "Clipping data between %.0f and %.0f percentiles" !min_percentile !max_percentile; - xrange_min := replace_assoc result (min (List.assoc result !xrange_min) (Hist.find_x cumulative (!min_percentile /. 100.))) !xrange_min; - xrange_max := replace_assoc result (max (List.assoc result !xrange_max) (Hist.find_x cumulative (!max_percentile /. 100.))) !xrange_max; + let cumulative = Hist.integrate x in + let t_10 = Hist.find_x cumulative 0.1 in + let t_80 = Hist.find_x cumulative 0.8 in + let t_90 = Hist.find_x cumulative 0.9 in + let t_95 = Hist.find_x cumulative 0.95 in + debug ~out:stderr "10th percentile: %f" t_10; + debug ~out:stderr "80th percentile: %f" t_80; + debug ~out:stderr "90th percentile: %f" t_90; + debug ~out:stderr "95th percentile: %f" t_95; + debug ~out:stderr "Clipping data between %.0f and %.0f percentiles" !min_percentile !max_percentile; + xrange_min := replace_assoc result (min (List.assoc result !xrange_min) (Hist.find_x cumulative (!min_percentile /. 100.))) !xrange_min; + xrange_max := replace_assoc result (max (List.assoc result !xrange_max) (Hist.find_x cumulative (!max_percentile /. 100.))) !xrange_max; - let x = if !integrate then Hist.integrate x else x in - Stdext.Unixext.with_file output_file [ Unix.O_WRONLY; Unix.O_TRUNC; Unix.O_CREAT ] 0o644 (Hist.to_gnuplot x) - ) all; + let x = if !integrate then Hist.integrate x else x in + Stdext.Unixext.with_file output_file [ Unix.O_WRONLY; Unix.O_TRUNC; Unix.O_CREAT ] 0o644 (Hist.to_gnuplot x) + ) all; let ls = List.map (fun ((info,floats), output) -> { Gnuplot.filename = output; title = short_info_to_title info; graphname = get_result info; field = 2; yaxis=1; scale=1.; style="linespoints" }) all in - let ylabel = - if !integrate - then "Cumulative probability" - else "Estimate of the probability density function" in + let ylabel = + if !integrate + then "Cumulative probability" + else "Estimate of the probability density function" in List.iter (fun result -> - let g = { Gnuplot.xlabel = Printf.sprintf "Time for %s XenAPI calls to complete / seconds" (string_of_result result); - ylabel = ylabel; - y2label = None; - lines = List.filter (fun l -> l.Gnuplot.graphname = result) ls; - log_x_axis = !log_axis; - xrange = Some(List.assoc result !xrange_min, List.assoc result !xrange_max); - normal_probability_y_axis = if !normal then Some(!min_percentile /. 100., !max_percentile /. 100.) else None; - } in - let output = match !format with - | `Eps -> Gnuplot.Ps (Printf.sprintf "%s-%s.eps" !graphic_filename result) - | `Gif -> Gnuplot.Gif (Printf.sprintf "%s-%s.gif" !graphic_filename result) - | `X11 -> Gnuplot.X11 in - ignore (Gnuplot.render g output) - ) (get_result_types inputs) + let g = { Gnuplot.xlabel = Printf.sprintf "Time for %s XenAPI calls to complete / seconds" (string_of_result result); + ylabel = ylabel; + y2label = None; + lines = List.filter (fun l -> l.Gnuplot.graphname = result) ls; + log_x_axis = !log_axis; + xrange = Some(List.assoc result !xrange_min, List.assoc result !xrange_max); + normal_probability_y_axis = if !normal then Some(!min_percentile /. 100., !max_percentile /. 100.) else None; + } in + let output = match !format with + | `Eps -> Gnuplot.Ps (Printf.sprintf "%s-%s.eps" !graphic_filename result) + | `Gif -> Gnuplot.Gif (Printf.sprintf "%s-%s.gif" !graphic_filename result) + | `X11 -> Gnuplot.X11 in + ignore (Gnuplot.render g output) + ) (get_result_types inputs) ) (fun () -> List.iter Stdext.Unixext.unlink_safe output_files) diff --git a/ocaml/perftest/perfdebug.ml b/ocaml/perftest/perfdebug.ml index 8fcb74f541a..4511acb82c0 100644 --- a/ocaml/perftest/perfdebug.ml +++ b/ocaml/perftest/perfdebug.ml @@ -11,10 +11,10 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -let stdout_m = Mutex.create () +let stdout_m = Mutex.create () let debug ?(out=stdout) (fmt: ('a , unit, string, unit) format4) = Stdext.Threadext.Mutex.execute stdout_m (fun () -> - Printf.kprintf (fun s -> Printf.fprintf out "%s\n" s; flush stdout) fmt + Printf.kprintf (fun s -> Printf.fprintf out "%s\n" s; flush stdout) fmt ) diff --git a/ocaml/perftest/perftest.ml b/ocaml/perftest/perftest.ml index d39ae26ebfb..91f1c79dbfe 100644 --- a/ocaml/perftest/perftest.ml +++ b/ocaml/perftest/perftest.ml @@ -20,7 +20,7 @@ open Perfdebug let xenrtfname = ref "perftest-xenrt.log" -let marshall_xenrt pool metadata results = +let marshall_xenrt pool metadata results = let oc = open_out !xenrtfname in Printf.fprintf oc "\n"; Printf.fprintf oc "%s\n" (Scenario.xml_of_scenario pool); @@ -28,17 +28,17 @@ let marshall_xenrt pool metadata results = List.iter (fun (k,v) -> Printf.fprintf oc " %s%s\n" k v) metadata; Printf.fprintf oc " \n \n"; List.iter (fun r -> - Printf.fprintf oc " %f\n" r.resultname r.subtest r.xenrtresult) results; - Printf.fprintf oc " \n"; + Printf.fprintf oc " %f\n" r.resultname r.subtest r.xenrtresult) results; + Printf.fprintf oc " \n"; close_out oc let rawfname = ref "" let marshall_raw (raw_results:Testtypes.result list) = - if !rawfname <> "" then - let oc = open_out !rawfname in - Printf.fprintf oc "%s" (Testtypes.to_string raw_results); - close_out oc + if !rawfname <> "" then + let oc = open_out !rawfname in + Printf.fprintf oc "%s" (Testtypes.to_string raw_results); + close_out oc let marshall pool metadata results = marshall_raw results; @@ -63,15 +63,15 @@ let _ = let possible_modes = [ "initpool"; "destroypool"; "run"; "describe"; ] in Arg.parse (Arg.align - [ "-template", Arg.Set_string template_name, Printf.sprintf " Clone VMs from named base template (default is %s)" !template_name; - "-scenario", Arg.Set_string scenario, Printf.sprintf " Choose scenario (default is %s; possibilities are %s" !scenario (string_of_set (Scenario.get_all ())); - "-key", Arg.Set_string key, " Key name to identify the Pool instance"; - "-ipbase", Arg.Set_int ipbase, Printf.sprintf " Choose base IP address (default is %d for 192.168.%d.1)" !ipbase !ipbase; - "-xenrtoutput", Arg.Set_string xenrtfname, " Set output filename for xenrt (defaults to perftest-xenrt.log)"; - "-rawoutput", Arg.Set_string rawfname, " Set output filename for raw results (by default, do not output results)"; - "-runall", Arg.Set run_all, Printf.sprintf " Run tests %s (tests run by default are %s)" (string_of_set Tests.testnames) (string_of_set Tests.runtestnames); - "-iter", Arg.Set_int iter, Printf.sprintf " Number of iterations (default is %i)" !iter; - ]) (fun x -> if !mode = "" then mode := x else debug ~out:stderr "Ignoring unexpected argument: %s" x) + [ "-template", Arg.Set_string template_name, Printf.sprintf " Clone VMs from named base template (default is %s)" !template_name; + "-scenario", Arg.Set_string scenario, Printf.sprintf " Choose scenario (default is %s; possibilities are %s" !scenario (string_of_set (Scenario.get_all ())); + "-key", Arg.Set_string key, " Key name to identify the Pool instance"; + "-ipbase", Arg.Set_int ipbase, Printf.sprintf " Choose base IP address (default is %d for 192.168.%d.1)" !ipbase !ipbase; + "-xenrtoutput", Arg.Set_string xenrtfname, " Set output filename for xenrt (defaults to perftest-xenrt.log)"; + "-rawoutput", Arg.Set_string rawfname, " Set output filename for raw results (by default, do not output results)"; + "-runall", Arg.Set run_all, Printf.sprintf " Run tests %s (tests run by default are %s)" (string_of_set Tests.testnames) (string_of_set Tests.runtestnames); + "-iter", Arg.Set_int iter, Printf.sprintf " Number of iterations (default is %i)" !iter; + ]) (fun x -> if !mode = "" then mode := x else debug ~out:stderr "Ignoring unexpected argument: %s" x) (Printf.sprintf "Configure and run a simulated test\nUsage: %s -key %s" Sys.argv.(0) (string_of_set possible_modes)); if not(List.mem !mode possible_modes) then begin @@ -90,32 +90,32 @@ let _ = try match !mode with | "describe" -> - let lines = Createpool.describe_pool !template_name !scenario !key in - List.iter (fun x -> debug "* %s" x) lines + let lines = Createpool.describe_pool !template_name !scenario !key in + List.iter (fun x -> debug "* %s" x) lines | _ -> - let session = Client.Session.login_with_password rpc "root" "xenroot" "1.2" "perftest" in - let (_: API.string_to_string_map) = get_metadata rpc session in - Stdext.Pervasiveext.finally - (fun () -> - let pool = Scenario.get !scenario in - match !mode with - | "initpool" when pool.Scenario.sdk -> - Createpool.create_sdk_pool session !template_name !scenario !key !ipbase - | "initpool" -> - Createpool.create_pool session !template_name !scenario !key !ipbase - | "destroypool" when pool.Scenario.sdk -> - Createpool.destroy_sdk_pool session !template_name !key - | "destroypool" -> - debug ~out:stderr "Not yet implemented ... "; - | "run" -> - let newrpc = if pool.Scenario.sdk then remoterpc (Printf.sprintf "192.168.%d.1" !ipbase) else rpc in - let session = if pool.Scenario.sdk then Client.Session.login_with_password newrpc "root" "xensource" "1.2" "perftest" else session in - Stdext.Pervasiveext.finally - (fun () -> marshall pool (get_metadata newrpc session) (Tests.run newrpc session !key !run_all !iter)) - (fun () -> if pool.Scenario.sdk then Client.Session.logout newrpc session) - | _ -> failwith (Printf.sprintf "unknown mode: %s" !mode) - ) (fun () -> Client.Session.logout rpc session) + let session = Client.Session.login_with_password rpc "root" "xenroot" "1.2" "perftest" in + let (_: API.string_to_string_map) = get_metadata rpc session in + Stdext.Pervasiveext.finally + (fun () -> + let pool = Scenario.get !scenario in + match !mode with + | "initpool" when pool.Scenario.sdk -> + Createpool.create_sdk_pool session !template_name !scenario !key !ipbase + | "initpool" -> + Createpool.create_pool session !template_name !scenario !key !ipbase + | "destroypool" when pool.Scenario.sdk -> + Createpool.destroy_sdk_pool session !template_name !key + | "destroypool" -> + debug ~out:stderr "Not yet implemented ... "; + | "run" -> + let newrpc = if pool.Scenario.sdk then remoterpc (Printf.sprintf "192.168.%d.1" !ipbase) else rpc in + let session = if pool.Scenario.sdk then Client.Session.login_with_password newrpc "root" "xensource" "1.2" "perftest" else session in + Stdext.Pervasiveext.finally + (fun () -> marshall pool (get_metadata newrpc session) (Tests.run newrpc session !key !run_all !iter)) + (fun () -> if pool.Scenario.sdk then Client.Session.logout newrpc session) + | _ -> failwith (Printf.sprintf "unknown mode: %s" !mode) + ) (fun () -> Client.Session.logout rpc session) with Api_errors.Server_error(code, params) -> debug ~out:stderr "Caught API error: %s [ %s ]" code (String.concat "; " params) diff --git a/ocaml/perftest/perfutil.ml b/ocaml/perftest/perfutil.ml index 067d9acf27f..f0ce2c74bc0 100644 --- a/ocaml/perftest/perfutil.ml +++ b/ocaml/perftest/perfutil.ml @@ -17,19 +17,19 @@ open Client open Stdext.Xstringext let rpc xml = - let open Xmlrpc_client in - XMLRPC_protocol.rpc ~srcstr:"perftest" ~dststr:"xapi" ~transport:(Unix (Filename.concat "/var/lib/xcp" "xapi")) ~http:(xmlrpc ~version:"1.0" "/") xml + let open Xmlrpc_client in + XMLRPC_protocol.rpc ~srcstr:"perftest" ~dststr:"xapi" ~transport:(Unix (Filename.concat "/var/lib/xcp" "xapi")) ~http:(xmlrpc ~version:"1.0" "/") xml let remoterpc host xml = - let open Xmlrpc_client in - XMLRPC_protocol.rpc ~srcstr:"perftest" ~dststr:"remotexapi" ~transport:(SSL(SSL.make (), host, 443)) ~http:(xmlrpc ~version:"1.1" "/") xml + let open Xmlrpc_client in + XMLRPC_protocol.rpc ~srcstr:"perftest" ~dststr:"remotexapi" ~transport:(SSL(SSL.make (), host, 443)) ~http:(xmlrpc ~version:"1.1" "/") xml (* Rewrite the provisioning XML fragment to create all disks on a new, specified SR. This is cut-n-pasted from cli_util.ml *) -let rewrite_provisioning_xml rpc session_id new_vm sr_uuid = +let rewrite_provisioning_xml rpc session_id new_vm sr_uuid = let rewrite_xml xml newsrname = let rewrite_disk = function | Xml.Element("disk",params,[]) -> - Xml.Element("disk",List.map (fun (x,y) -> if x<>"sr" then (x,y) else ("sr",newsrname)) params,[]) + Xml.Element("disk",List.map (fun (x,y) -> if x<>"sr" then (x,y) else ("sr",newsrname)) params,[]) | x -> x in match xml with @@ -45,32 +45,32 @@ let rewrite_provisioning_xml rpc session_id new_vm sr_uuid = Client.VM.add_to_other_config rpc session_id new_vm "disks" (Xml.to_string newdisks) end -let parse_sr_probe_for_iqn (xml: string) : string list = +let parse_sr_probe_for_iqn (xml: string) : string list = match Xml.parse_string xml with | Xml.Element("iscsi-target-iqns", _, children) -> - let parse_tgts = function - | Xml.Element("TGT", _, children) -> - let parse_kv = function - | Xml.Element(key, _, [ Xml.PCData v ]) -> - key, String.strip String.isspace v (* remove whitespace at both ends *) - | _ -> failwith "Malformed key/value pair" in - let all = List.map parse_kv children in - List.assoc "TargetIQN" all - | _ -> failwith "Malformed or missing " in - List.map parse_tgts children + let parse_tgts = function + | Xml.Element("TGT", _, children) -> + let parse_kv = function + | Xml.Element(key, _, [ Xml.PCData v ]) -> + key, String.strip String.isspace v (* remove whitespace at both ends *) + | _ -> failwith "Malformed key/value pair" in + let all = List.map parse_kv children in + List.assoc "TargetIQN" all + | _ -> failwith "Malformed or missing " in + List.map parse_tgts children | _ -> failwith "Missing element" let parse_sr_probe_for_scsiids (xml : string) : string list = match Xml.parse_string xml with | Xml.Element("iscsi-target", _, children) -> - let parse_luns = function - | Xml.Element("LUN", _, children) -> - let parse_kv = function - | Xml.Element(key, _, [ Xml.PCData v ]) -> - key, String.strip String.isspace v (* remove whitespace at both ends *) - | _ -> failwith "Malformed key/value pair" in - let all = List.map parse_kv children in - List.assoc "SCSIid" all - | _ -> failwith "Malformed or missing " in - List.map parse_luns children + let parse_luns = function + | Xml.Element("LUN", _, children) -> + let parse_kv = function + | Xml.Element(key, _, [ Xml.PCData v ]) -> + key, String.strip String.isspace v (* remove whitespace at both ends *) + | _ -> failwith "Malformed key/value pair" in + let all = List.map parse_kv children in + List.assoc "SCSIid" all + | _ -> failwith "Malformed or missing " in + List.map parse_luns children | _ -> failwith "Missing element" diff --git a/ocaml/perftest/scenario.ml b/ocaml/perftest/scenario.ml index 16aadb00145..79ca6e6e4ff 100644 --- a/ocaml/perftest/scenario.ml +++ b/ocaml/perftest/scenario.ml @@ -13,113 +13,113 @@ *) (* VMs *) type vm = - { vbds: int; - vifs: int; - tag: string; - num: int; - has_affinity: bool; - } + { vbds: int; + vifs: int; + tag: string; + num: int; + has_affinity: bool; + } let default_vm num = - { vbds = 1; - vifs = 4; - tag = "everything"; - num = num; - has_affinity = true - } + { vbds = 1; + vifs = 4; + tag = "everything"; + num = num; + has_affinity = true + } -let string_of_vm (x: vm) = - let vbds = Printf.sprintf "%s VBDs" (if x.vbds = 0 then "no" else string_of_int x.vbds) in - let vifs = Printf.sprintf "%s VIFs" (if x.vifs = 0 then "no" else string_of_int x.vifs) in - Printf.sprintf "%d VMs per host (tag %s) with %s, %s and affinity%s set" x.num x.tag vbds vifs (if x.has_affinity then "" else " not") +let string_of_vm (x: vm) = + let vbds = Printf.sprintf "%s VBDs" (if x.vbds = 0 then "no" else string_of_int x.vbds) in + let vifs = Printf.sprintf "%s VIFs" (if x.vifs = 0 then "no" else string_of_int x.vifs) in + Printf.sprintf "%d VMs per host (tag %s) with %s, %s and affinity%s set" x.num x.tag vbds vifs (if x.has_affinity then "" else " not") (* Pools *) type pool = - { id: string; - sdk: bool; - hosts: int; - interfaces_per_host: int; - vms: vm list; - bonds: int; (* Needs to be less than or equal to interfaces_per_host / 2 *) - key: string; - ipbase: int; - iscsi_luns: int; - use_shared_storage: bool; - } + { id: string; + sdk: bool; + hosts: int; + interfaces_per_host: int; + vms: vm list; + bonds: int; (* Needs to be less than or equal to interfaces_per_host / 2 *) + key: string; + ipbase: int; + iscsi_luns: int; + use_shared_storage: bool; + } -let default = - { id="default"; - sdk=true; - hosts=1; - interfaces_per_host=6; - vms = - [ (default_vm 20); - { (default_vm 20) with vifs = 0; tag = "novifs" }; - { (default_vm 20) with vbds = 0; tag = "novbds" }; - { (default_vm 20) with vifs = 0; vbds = 0; tag = "novbdsnovifs" } - ]; - bonds=2; - key=""; - ipbase=0; - iscsi_luns=1; - use_shared_storage=false; +let default = + { id="default"; + sdk=true; + hosts=1; + interfaces_per_host=6; + vms = + [ (default_vm 20); + { (default_vm 20) with vifs = 0; tag = "novifs" }; + { (default_vm 20) with vbds = 0; tag = "novbds" }; + { (default_vm 20) with vifs = 0; vbds = 0; tag = "novbdsnovifs" } + ]; + bonds=2; + key=""; + ipbase=0; + iscsi_luns=1; + use_shared_storage=false; } let description_of_pool (x: pool) = - [ Printf.sprintf "Scenario: %s" x.id; - Printf.sprintf "Key: %s" x.key; - Printf.sprintf "%d hosts, each with %d network interfaces, %d of which are paired into %d bonds" - x.hosts x.interfaces_per_host (x.bonds * 2) x.bonds; - ] @ (List.map string_of_vm x.vms) + [ Printf.sprintf "Scenario: %s" x.id; + Printf.sprintf "Key: %s" x.key; + Printf.sprintf "%d hosts, each with %d network interfaces, %d of which are paired into %d bonds" + x.hosts x.interfaces_per_host (x.bonds * 2) x.bonds; + ] @ (List.map string_of_vm x.vms) let pools = - [ { default with id="pool0"; hosts=1 }; - { default with id="pool1"; hosts=4 }; - { default with id="pool2"; hosts=16}; - { default with id="pool3"; hosts=48}; - { default with - id="real1"; - hosts=1; - sdk=false; - bonds=0; - interfaces_per_host=0; - vms = [ { (default_vm 50) with tag = "" } ]}; - { default with - id="xendesktop"; - hosts=8; - vms = [ { (default_vm 50) with vbds = 0; vifs = 1; tag = "xendesktop"; has_affinity = false } ]}; - { default with - id="empty"; - hosts=1; (* we won't be starting VMs in the clone test so we don't need any hosts *) - vms = [ { (default_vm 1) with tag = "winxp-gold"; vifs = 1; vbds = 1 } ]; (* 1 per host *) - iscsi_luns=6; - use_shared_storage=true;} - ] + [ { default with id="pool0"; hosts=1 }; + { default with id="pool1"; hosts=4 }; + { default with id="pool2"; hosts=16}; + { default with id="pool3"; hosts=48}; + { default with + id="real1"; + hosts=1; + sdk=false; + bonds=0; + interfaces_per_host=0; + vms = [ { (default_vm 50) with tag = "" } ]}; + { default with + id="xendesktop"; + hosts=8; + vms = [ { (default_vm 50) with vbds = 0; vifs = 1; tag = "xendesktop"; has_affinity = false } ]}; + { default with + id="empty"; + hosts=1; (* we won't be starting VMs in the clone test so we don't need any hosts *) + vms = [ { (default_vm 1) with tag = "winxp-gold"; vifs = 1; vbds = 1 } ]; (* 1 per host *) + iscsi_luns=6; + use_shared_storage=true;} + ] let get_all () = List.map (fun p -> p.id) pools let get name = List.find (fun p -> p.id=name) pools let xml_of_scenario s = - String.concat "\n" + String.concat "\n" ([""; - (Printf.sprintf " %s" s.id); - (Printf.sprintf " %s" s.key); - (Printf.sprintf " %b" s.sdk); - (Printf.sprintf " %d" s.hosts); - (Printf.sprintf " %d" s.interfaces_per_host); - (Printf.sprintf " ")] - @ + (Printf.sprintf " %s" s.id); + (Printf.sprintf " %s" s.key); + (Printf.sprintf " %b" s.sdk); + (Printf.sprintf " %d" s.hosts); + (Printf.sprintf " %d" s.interfaces_per_host); + (Printf.sprintf " ")] + @ (List.map (fun vm -> Printf.sprintf " " vm.vbds vm.vifs vm.tag vm.num vm.has_affinity) s.vms) - @ + @ [" "; Printf.sprintf " %d" s.bonds; Printf.sprintf " %d" s.ipbase; "" ]) - + let oc_key = "perftestsetup" let sr_disk_size = Int64.mul 1048576L 2093049L (* limit of 1 vhd ~2 terabytes (megs, gigs, t.. what?) *) let sr_disk_device = "xvde" - + diff --git a/ocaml/perftest/statistics.ml b/ocaml/perftest/statistics.ml index 684d9f23a3d..043d74b902c 100644 --- a/ocaml/perftest/statistics.ml +++ b/ocaml/perftest/statistics.ml @@ -18,21 +18,21 @@ let pi = atan 1. *. 4. let gaussian mu sigma x = 1.0 /. (sigma *. sqrt (2.0 *. pi)) *. exp (-.(((x -. mu) ) ** 2.0 ) /. (2.0 *. sigma *. sigma)) module Hist = struct - type t = { + type t = { bin_start: float array; bin_end: float array; bin_count: float array; (* height of each bin: multiply by width to get area *) } (** Initialise a histogram covering values from [min:max] in 'n' uniform steps *) - let make (min: float) (max: float) (n: int) = + let make (min: float) (max: float) (n: int) = let range = max -. min in { bin_start = Array.init n (fun i -> range /. (float_of_int n) *. (float_of_int i) +. min); bin_end = Array.init n (fun i -> range /. (float_of_int n) *. (float_of_int (i + 1)) +. min); bin_count = Array.init n (fun _ -> 0.); } - let integrate (x: t) = + let integrate (x: t) = let n = Array.length x.bin_start in let result = make x.bin_start.(0) x.bin_end.(Array.length x.bin_end - 1) n in let area = ref 0. in @@ -45,7 +45,7 @@ module Hist = struct result (** Call 'f' with the start, end and height of each bin *) - let iter (x: t) (f: float -> float -> float -> unit) = + let iter (x: t) (f: float -> float -> float -> unit) = for i = 0 to Array.length x.bin_start - 1 do let width = x.bin_end.(i) -. x.bin_start.(i) in f x.bin_start.(i) x.bin_end.(i) (x.bin_count.(i) /. width) @@ -58,28 +58,28 @@ module Hist = struct !acc (** Write output to a file descriptor in gnuplot format *) - let to_gnuplot (x: t) (fd: Unix.file_descr) = + let to_gnuplot (x: t) (fd: Unix.file_descr) = iter x (fun bin_start bin_end height -> - let center = (bin_start +. bin_end) /. 2.0 in - let line = Printf.sprintf "%f %f\n" center height in - let (_: int) = Unix.write fd line 0 (String.length line) in () - ) + let center = (bin_start +. bin_end) /. 2.0 in + let line = Printf.sprintf "%f %f\n" center height in + let (_: int) = Unix.write fd line 0 (String.length line) in () + ) exception Stop (** Add a sample point *) - let add (x: t) (y: float) = + let add (x: t) (y: float) = try for i = 0 to Array.length x.bin_start - 1 do - if x.bin_start.(i) <= y && (y <= x.bin_end.(i + 1)) then begin - x.bin_count.(i) <- x.bin_count.(i) +. 1.0; - raise Stop - end + if x.bin_start.(i) <= y && (y <= x.bin_end.(i + 1)) then begin + x.bin_count.(i) <- x.bin_count.(i) +. 1.0; + raise Stop + end done with Stop -> () (** Evaluate 'f' given the center of each bin and add the result to the bin count *) - let convolve (x: t) (f: float -> float) = + let convolve (x: t) (f: float -> float) = for i = 0 to Array.length x.bin_start - 1 do let center = (x.bin_start.(i) +. x.bin_end.(i)) /. 2.0 in let width = x.bin_end.(i) -. x.bin_start.(i) in @@ -88,11 +88,11 @@ module Hist = struct done (** Given a monotonically increasing histogram find the 'x' value given a 'y' *) - let find_x (x: t) (y: float) = + let find_x (x: t) (y: float) = match fold x (fun bin_start bin_end height acc -> match acc with - | Some x -> acc (* got it already *) - | None -> if height > y then Some ((bin_start +. bin_end) /. 2.) (* no interpolation *) else None - ) None with + | Some x -> acc (* got it already *) + | None -> if height > y then Some ((bin_start +. bin_end) /. 2.) (* no interpolation *) else None + ) None with | Some x -> x | None -> raise Not_found end @@ -100,15 +100,15 @@ end module Normal = struct let mean (points: float list) = List.fold_left (+.) 0. points /. (float_of_int (List.length points)) - let sigma (points: float list) = + let sigma (points: float list) = let sum_x = List.fold_left (+.) 0. points and sum_xx = List.fold_left (+.) 0. (List.map (fun x -> x *. x) points) in let n = float_of_int (List.length points) in sqrt (n *. sum_xx -. sum_x *. sum_x) /. n end - + module LogNormal = struct - let mean (points: float list) = + let mean (points: float list) = let points = List.map log points in let normal_sigma = Normal.sigma points in let normal_mean = Normal.mean points in diff --git a/ocaml/perftest/tests.ml b/ocaml/perftest/tests.ml index 9b010709672..2ccca33227d 100644 --- a/ocaml/perftest/tests.ml +++ b/ocaml/perftest/tests.ml @@ -15,7 +15,7 @@ open Client open Stdext -open Listext +open Listext open Threadext open Xstringext open Pervasiveext @@ -36,158 +36,158 @@ let time f = elapsed let subtest_string key tag = - if tag = "" - then key - else Printf.sprintf "%s (%s)" key tag + if tag = "" + then key + else Printf.sprintf "%s (%s)" key tag let startall rpc session_id test = - let vms = Client.VM.get_all_records rpc session_id in - let tags = List.map (fun (vm,vmr) -> vmr.API.vM_tags) vms in - let tags = List.setify (List.flatten tags) in - List.map - (fun tag -> - debug "Starting VMs with tag: %s" tag; - let vms = List.filter (fun (vm,vmr) -> List.mem tag vmr.API.vM_tags) vms in - let vms = List.sort (fun (vm1,vmr1) (vm2,vmr2) -> compare vmr1.API.vM_affinity vmr2.API.vM_affinity) vms in - let vms_names_uuids = List.map (fun (vm,vmr) -> (vm,vmr.API.vM_name_label, vmr.API.vM_uuid)) vms in - let times = List.map - (fun (vm,name_label,uuid) -> - debug "Starting VM uuid '%s' (%s)" uuid name_label; - let result = time (fun () -> Client.VM.start rpc session_id vm false false) in - debug "Elapsed time: %f" result; - result) - vms_names_uuids in - { - resultname=test.testname; - subtest=subtest_string test.key tag; - xenrtresult=(List.fold_left (+.) 0.0 times); - rawresult=StartTest times - }) - tags + let vms = Client.VM.get_all_records rpc session_id in + let tags = List.map (fun (vm,vmr) -> vmr.API.vM_tags) vms in + let tags = List.setify (List.flatten tags) in + List.map + (fun tag -> + debug "Starting VMs with tag: %s" tag; + let vms = List.filter (fun (vm,vmr) -> List.mem tag vmr.API.vM_tags) vms in + let vms = List.sort (fun (vm1,vmr1) (vm2,vmr2) -> compare vmr1.API.vM_affinity vmr2.API.vM_affinity) vms in + let vms_names_uuids = List.map (fun (vm,vmr) -> (vm,vmr.API.vM_name_label, vmr.API.vM_uuid)) vms in + let times = List.map + (fun (vm,name_label,uuid) -> + debug "Starting VM uuid '%s' (%s)" uuid name_label; + let result = time (fun () -> Client.VM.start rpc session_id vm false false) in + debug "Elapsed time: %f" result; + result) + vms_names_uuids in + { + resultname=test.testname; + subtest=subtest_string test.key tag; + xenrtresult=(List.fold_left (+.) 0.0 times); + rawresult=StartTest times + }) + tags let parallel_with_vms async_op opname n vms rpc session_id test subtest_name = - (* Not starting in affinity order *) - let vms_names_uuids = List.map (fun (vm,vmr) -> (vm,vmr.API.vM_name_label, vmr.API.vM_uuid)) vms in - - (* Manage a set of active tasks using the event system. This could be factored out into a more generic - service if/when necessary *) - - (* Start 'n' at a time *) - let active_tasks = ref [] in - let vms_to_start = ref vms_names_uuids in - let vm_to_start_time = Hashtbl.create 10 in - let tasks_to_vm = Hashtbl.create 10 in - let m = Mutex.create () in - let c = Condition.create () in - - let results = ref [] in - - (* Take a set of tasks which have finished, update structures and return true if there are no more active tasks - left. *) - let process_finished_tasks finished = - let to_delete = ref [] in - let finished = Mutex.execute m - (fun () -> - List.iter - (fun task -> - if List.mem task !active_tasks then begin - if not(Hashtbl.mem tasks_to_vm task) - then debug ~out:stderr "Ignoring completed task which doesn't correspond to a VM %s" opname - else begin - let uuid = Hashtbl.find tasks_to_vm task in - let started = Hashtbl.find vm_to_start_time uuid in - let time_taken = Unix.gettimeofday () -. started in - results := time_taken :: !results; - debug "%sing VM uuid '%s'" opname uuid; - debug "Elapsed time: %f" time_taken; - Hashtbl.remove vm_to_start_time uuid; - Hashtbl.remove tasks_to_vm task; - end; - active_tasks := List.filter (fun x -> x <> task) !active_tasks; - Condition.signal c; - to_delete := task :: !to_delete - end - ) finished; - !active_tasks = [] (* true if no active tasks left *) - ) in - List.iter (fun task -> Client.Task.destroy ~rpc ~session_id ~self:task) !to_delete; - finished in - - (* Run this in a thread body to create a thread which will process each task completion and then terminate when all the - tasks have finished. *) - let check_active_tasks () = - let classes = [ "task" ] in - finally - (fun () -> - let finished = ref false in - while not(!finished) do - Client.Event.register ~rpc ~session_id ~classes; - try - (* Need to check once after registering to avoid a race *) - let finished_tasks = - List.filter - (fun task -> Client.Task.get_status ~rpc ~session_id ~self:task <> `pending) - (Mutex.execute m (fun () -> !active_tasks)) in - finished := process_finished_tasks finished_tasks; - - while not(!finished) do - (* debug ~out:stderr "Polling for events (%d active tasks)" (Mutex.execute m (fun () -> List.length !active_tasks)); *) - let events = Event_types.events_of_rpc (Client.Event.next ~rpc ~session_id) in - let events = List.map Event_helper.record_of_event events in - let finished_tasks = - List.concat - (List.map - (function - | Event_helper.Task (t, Some t_rec) -> if t_rec.API.task_status <> `pending || t_rec.API.task_current_operations <> [] then [ t ] else [ ] - | Event_helper.Task (t, None) -> [ t ] - | _ -> []) events) in - finished := process_finished_tasks finished_tasks; - done - with Api_errors.Server_error(code, _) when code = Api_errors.events_lost -> - debug ~out:stderr "Caught EVENTS_LOST; reregistering"; - Client.Event.unregister ~rpc ~session_id ~classes - done) - (fun () -> Client.Event.unregister ~rpc ~session_id ~classes) in - - - let control_task = Client.Task.create rpc session_id ("Parallel VM "^opname^" test") "" in - active_tasks := [ control_task ]; - let thread = Thread.create check_active_tasks () in - - while !vms_to_start <> [] do - let start_one () = - let vm, name, uuid = List.hd !vms_to_start in - vms_to_start := List.tl !vms_to_start; - Mutex.execute m - (fun () -> - let task = async_op ~rpc ~session_id ~vm in - debug ~out:stderr "Issued VM %s for '%s'" opname uuid; - Hashtbl.add tasks_to_vm task uuid; - Hashtbl.add vm_to_start_time uuid (Unix.gettimeofday ()); - active_tasks := task :: !active_tasks; - ) in - (* Only start at most 'n' at once. Note that the active_task list includes a master control task *) - Mutex.execute m (fun () -> while List.length !active_tasks > n do Condition.wait c m done); - start_one () - done; - Client.Task.cancel ~rpc ~session_id ~task:control_task; - - debug ~out:stderr "Finished %sing VMs" opname; - Thread.join thread; - - {resultname=test.testname; subtest=subtest_name; xenrtresult=(List.fold_left (+.) 0.0 !results); rawresult=StartTest !results} + (* Not starting in affinity order *) + let vms_names_uuids = List.map (fun (vm,vmr) -> (vm,vmr.API.vM_name_label, vmr.API.vM_uuid)) vms in + + (* Manage a set of active tasks using the event system. This could be factored out into a more generic + service if/when necessary *) + + (* Start 'n' at a time *) + let active_tasks = ref [] in + let vms_to_start = ref vms_names_uuids in + let vm_to_start_time = Hashtbl.create 10 in + let tasks_to_vm = Hashtbl.create 10 in + let m = Mutex.create () in + let c = Condition.create () in + + let results = ref [] in + + (* Take a set of tasks which have finished, update structures and return true if there are no more active tasks + left. *) + let process_finished_tasks finished = + let to_delete = ref [] in + let finished = Mutex.execute m + (fun () -> + List.iter + (fun task -> + if List.mem task !active_tasks then begin + if not(Hashtbl.mem tasks_to_vm task) + then debug ~out:stderr "Ignoring completed task which doesn't correspond to a VM %s" opname + else begin + let uuid = Hashtbl.find tasks_to_vm task in + let started = Hashtbl.find vm_to_start_time uuid in + let time_taken = Unix.gettimeofday () -. started in + results := time_taken :: !results; + debug "%sing VM uuid '%s'" opname uuid; + debug "Elapsed time: %f" time_taken; + Hashtbl.remove vm_to_start_time uuid; + Hashtbl.remove tasks_to_vm task; + end; + active_tasks := List.filter (fun x -> x <> task) !active_tasks; + Condition.signal c; + to_delete := task :: !to_delete + end + ) finished; + !active_tasks = [] (* true if no active tasks left *) + ) in + List.iter (fun task -> Client.Task.destroy ~rpc ~session_id ~self:task) !to_delete; + finished in + + (* Run this in a thread body to create a thread which will process each task completion and then terminate when all the + tasks have finished. *) + let check_active_tasks () = + let classes = [ "task" ] in + finally + (fun () -> + let finished = ref false in + while not(!finished) do + Client.Event.register ~rpc ~session_id ~classes; + try + (* Need to check once after registering to avoid a race *) + let finished_tasks = + List.filter + (fun task -> Client.Task.get_status ~rpc ~session_id ~self:task <> `pending) + (Mutex.execute m (fun () -> !active_tasks)) in + finished := process_finished_tasks finished_tasks; + + while not(!finished) do + (* debug ~out:stderr "Polling for events (%d active tasks)" (Mutex.execute m (fun () -> List.length !active_tasks)); *) + let events = Event_types.events_of_rpc (Client.Event.next ~rpc ~session_id) in + let events = List.map Event_helper.record_of_event events in + let finished_tasks = + List.concat + (List.map + (function + | Event_helper.Task (t, Some t_rec) -> if t_rec.API.task_status <> `pending || t_rec.API.task_current_operations <> [] then [ t ] else [ ] + | Event_helper.Task (t, None) -> [ t ] + | _ -> []) events) in + finished := process_finished_tasks finished_tasks; + done + with Api_errors.Server_error(code, _) when code = Api_errors.events_lost -> + debug ~out:stderr "Caught EVENTS_LOST; reregistering"; + Client.Event.unregister ~rpc ~session_id ~classes + done) + (fun () -> Client.Event.unregister ~rpc ~session_id ~classes) in + + + let control_task = Client.Task.create rpc session_id ("Parallel VM "^opname^" test") "" in + active_tasks := [ control_task ]; + let thread = Thread.create check_active_tasks () in + + while !vms_to_start <> [] do + let start_one () = + let vm, name, uuid = List.hd !vms_to_start in + vms_to_start := List.tl !vms_to_start; + Mutex.execute m + (fun () -> + let task = async_op ~rpc ~session_id ~vm in + debug ~out:stderr "Issued VM %s for '%s'" opname uuid; + Hashtbl.add tasks_to_vm task uuid; + Hashtbl.add vm_to_start_time uuid (Unix.gettimeofday ()); + active_tasks := task :: !active_tasks; + ) in + (* Only start at most 'n' at once. Note that the active_task list includes a master control task *) + Mutex.execute m (fun () -> while List.length !active_tasks > n do Condition.wait c m done); + start_one () + done; + Client.Task.cancel ~rpc ~session_id ~task:control_task; + + debug ~out:stderr "Finished %sing VMs" opname; + Thread.join thread; + + {resultname=test.testname; subtest=subtest_name; xenrtresult=(List.fold_left (+.) 0.0 !results); rawresult=StartTest !results} (** @param n the maximum number of concurrent invocations of async_op *) -let parallel async_op opname n rpc session_id test = +let parallel async_op opname n rpc session_id test = let vms = Client.VM.get_all_records rpc session_id in let tags = List.map (fun (vm,vmr) -> vmr.API.vM_tags) vms in let tags = List.setify (List.flatten tags) in Printf.printf "Tags are [%s]\n%!" (String.concat "; " tags); List.map (fun tag -> - let vms = List.filter (fun (vm,vmr) -> List.mem tag vmr.API.vM_tags) vms in - Printf.printf "%sing %d VMs with tag: %s\n%!" opname (List.length vms) tag; - parallel_with_vms async_op opname n vms rpc session_id test (subtest_string test.key tag) - ) tags + let vms = List.filter (fun (vm,vmr) -> List.mem tag vmr.API.vM_tags) vms in + Printf.printf "%sing %d VMs with tag: %s\n%!" opname (List.length vms) tag; + parallel_with_vms async_op opname n vms rpc session_id test (subtest_string test.key tag) + ) tags let parallel_startall = parallel (Client.Async.VM.start ~start_paused:false ~force:false) "start" let parallel_stopall = parallel Client.Async.VM.hard_shutdown "stop" @@ -197,18 +197,18 @@ let stopall rpc session_id test = let tags = List.map (fun (vm,vmr) -> vmr.API.vM_tags) vms in let tags = List.setify (List.flatten tags) in List.map (fun tag -> - debug "Starting VMs with tag: %s" tag; - let vms = List.filter (fun (vm,vmr) -> List.mem tag vmr.API.vM_tags) vms in - let vms = List.sort (fun (vm1,vmr1) (vm2,vmr2) -> compare vmr1.API.vM_affinity vmr2.API.vM_affinity) vms in - let vms_names_uuids = List.map (fun (vm,vmr) -> (vm,vmr.API.vM_name_label, vmr.API.vM_uuid)) vms in - let times = List.map - (fun (vm,name_label,uuid) -> - debug "Stopping VM uuid '%s' (%s)" uuid name_label; - let result = time (fun () -> Client.VM.hard_shutdown rpc session_id vm) in - debug "Elapsed time: %f" result; - result) vms_names_uuids in - {resultname=test.testname; subtest=subtest_string test.key tag; xenrtresult=(List.fold_left (+.) 0.0 times); rawresult=ShutdownTest times} - ) tags + debug "Starting VMs with tag: %s" tag; + let vms = List.filter (fun (vm,vmr) -> List.mem tag vmr.API.vM_tags) vms in + let vms = List.sort (fun (vm1,vmr1) (vm2,vmr2) -> compare vmr1.API.vM_affinity vmr2.API.vM_affinity) vms in + let vms_names_uuids = List.map (fun (vm,vmr) -> (vm,vmr.API.vM_name_label, vmr.API.vM_uuid)) vms in + let times = List.map + (fun (vm,name_label,uuid) -> + debug "Stopping VM uuid '%s' (%s)" uuid name_label; + let result = time (fun () -> Client.VM.hard_shutdown rpc session_id vm) in + debug "Elapsed time: %f" result; + result) vms_names_uuids in + {resultname=test.testname; subtest=subtest_string test.key tag; xenrtresult=(List.fold_left (+.) 0.0 times); rawresult=ShutdownTest times} + ) tags let clone num_clones rpc session_id test = Printf.printf "Doing clone test\n%!"; @@ -217,89 +217,89 @@ let clone num_clones rpc session_id test = let tags = List.setify (List.flatten tags) in Printf.printf "Tags are [%s]\n%!" (String.concat "; " tags); List.flatten (List.map (fun tag -> - let vms = List.filter (fun (vm,vmr) -> List.mem tag vmr.API.vM_tags) vms in - Printf.printf "We've got %d VMs\n%!" (List.length vms); - - (* Start a thread to clone each one n times *) - let body (vm, vmr, res, clone_refs) = - let name_label = vmr.API.vM_name_label in - Printf.printf "Performing %d clones of '%s' within thread...\n%!" num_clones name_label; - for j=0 to num_clones-1 do - let result = time (fun () -> - let clone = Client.VM.clone ~rpc ~session_id ~vm ~new_name:"clone" in - clone_refs := clone :: !clone_refs - ) in - Printf.printf "clone %d of '%s' finished: %f\n%!" j name_label result; - res := result :: !res - done - in - let threads_and_results = List.map (fun (vm,vmr) -> - let res : float list ref = ref [] in - let clones : API.ref_VM list ref = ref [] in - let t = Thread.create body (vm, vmr, res, clones) in - (t, (res, clones)) - ) vms in - let (threads, times_and_clones) = List.split threads_and_results in - let (times, clones) = List.split times_and_clones in - Printf.printf "Waiting for threads to finish...\n%!"; - List.iter (fun t -> Thread.join t) threads; - Printf.printf "Threads have finished\n%!"; - - (* times is a list of (list of floats, each being the time to clone a VM), one per SR *) - let times = List.map (fun x -> !x) times in - Printf.printf "Times are: [%s]\n%!" (String.concat ", " (List.map (fun x -> Printf.sprintf "[%s]" (String.concat ", " (List.map (fun x -> Printf.sprintf "%f" x) x))) times)); - let clones = List.map (fun x -> !x) clones in - - (* Output the results for cloning each gold VM as a separate record *) - let results = List.map - (fun x -> {resultname=test.testname; subtest=subtest_string test.key tag; xenrtresult=(List.fold_left (+.) 0.0 (List.flatten times)); rawresult=CloneTest x}) - times - in - - (* Best-effort clean-up *) - ignore_exn - (fun () -> - Printf.printf "Cleaning up...\n%!"; - (* Create a thread to clean up each set of clones *) - let threads = List.mapi - (fun i clones -> - Thread.create (fun clones -> - List.iteri - (fun j clone -> - Printf.printf "Thread %d destroying VM %d...\n%!" i j; - let vbds = Client.VM.get_VBDs ~rpc ~session_id ~self:clone in - let vdis = List.map (fun vbd -> Client.VBD.get_VDI rpc session_id vbd) vbds in - List.iter (fun vdi -> Client.VDI.destroy ~rpc ~session_id ~self:vdi) vdis; - Client.VM.destroy ~rpc ~session_id ~self:clone - ) - clones - ) clones - ) clones in - Printf.printf "Waiting for clean-up threads to finish...\n%!"; - List.iter (fun t -> Thread.join t) threads; - Printf.printf "Clean-up threads have finished\n%!"; - ); - - (* Finally, return the results *) - results - ) tags) - -let recordssize rpc session_id test = + let vms = List.filter (fun (vm,vmr) -> List.mem tag vmr.API.vM_tags) vms in + Printf.printf "We've got %d VMs\n%!" (List.length vms); + + (* Start a thread to clone each one n times *) + let body (vm, vmr, res, clone_refs) = + let name_label = vmr.API.vM_name_label in + Printf.printf "Performing %d clones of '%s' within thread...\n%!" num_clones name_label; + for j=0 to num_clones-1 do + let result = time (fun () -> + let clone = Client.VM.clone ~rpc ~session_id ~vm ~new_name:"clone" in + clone_refs := clone :: !clone_refs + ) in + Printf.printf "clone %d of '%s' finished: %f\n%!" j name_label result; + res := result :: !res + done + in + let threads_and_results = List.map (fun (vm,vmr) -> + let res : float list ref = ref [] in + let clones : API.ref_VM list ref = ref [] in + let t = Thread.create body (vm, vmr, res, clones) in + (t, (res, clones)) + ) vms in + let (threads, times_and_clones) = List.split threads_and_results in + let (times, clones) = List.split times_and_clones in + Printf.printf "Waiting for threads to finish...\n%!"; + List.iter (fun t -> Thread.join t) threads; + Printf.printf "Threads have finished\n%!"; + + (* times is a list of (list of floats, each being the time to clone a VM), one per SR *) + let times = List.map (fun x -> !x) times in + Printf.printf "Times are: [%s]\n%!" (String.concat ", " (List.map (fun x -> Printf.sprintf "[%s]" (String.concat ", " (List.map (fun x -> Printf.sprintf "%f" x) x))) times)); + let clones = List.map (fun x -> !x) clones in + + (* Output the results for cloning each gold VM as a separate record *) + let results = List.map + (fun x -> {resultname=test.testname; subtest=subtest_string test.key tag; xenrtresult=(List.fold_left (+.) 0.0 (List.flatten times)); rawresult=CloneTest x}) + times + in + + (* Best-effort clean-up *) + ignore_exn + (fun () -> + Printf.printf "Cleaning up...\n%!"; + (* Create a thread to clean up each set of clones *) + let threads = List.mapi + (fun i clones -> + Thread.create (fun clones -> + List.iteri + (fun j clone -> + Printf.printf "Thread %d destroying VM %d...\n%!" i j; + let vbds = Client.VM.get_VBDs ~rpc ~session_id ~self:clone in + let vdis = List.map (fun vbd -> Client.VBD.get_VDI rpc session_id vbd) vbds in + List.iter (fun vdi -> Client.VDI.destroy ~rpc ~session_id ~self:vdi) vdis; + Client.VM.destroy ~rpc ~session_id ~self:clone + ) + clones + ) clones + ) clones in + Printf.printf "Waiting for clean-up threads to finish...\n%!"; + List.iter (fun t -> Thread.join t) threads; + Printf.printf "Clean-up threads have finished\n%!"; + ); + + (* Finally, return the results *) + results + ) tags) + +let recordssize rpc session_id test = let doxmlrpctest (subtestname,testfn) = testfn (); let res = (Int64.to_float !Http_client.last_content_length) in - {resultname=test.testname; + {resultname=test.testname; subtest=subtestname; xenrtresult=res; rawresult=SizeTest res} in - List.map doxmlrpctest + List.map doxmlrpctest [("VM records", fun () -> ignore(Client.VM.get_all_records rpc session_id )); ("VBD records", fun () -> ignore(Client.VBD.get_all_records rpc session_id )); ("VIF records", fun () -> ignore(Client.VIF.get_all_records rpc session_id)); ("VDI records", fun () -> ignore(Client.VDI.get_all_records rpc session_id)); ("SR records", fun () -> ignore(Client.SR.get_all_records rpc session_id))] - + let tests key = [ {run=true; key=key; @@ -328,10 +328,10 @@ let tests key = [ ] let testnames = - List.map (fun t -> t.testname) (tests "") - + List.map (fun t -> t.testname) (tests "") + let runtestnames = - List.map (fun t -> t.testname) (List.filter (fun t -> t.run) (tests "")) + List.map (fun t -> t.testname) (List.filter (fun t -> t.run) (tests "")) let runone rpc session_id test = debug "Running test: %s" test.testname; @@ -340,15 +340,15 @@ let runone rpc session_id test = results let run rpc session_id key run_all iter = - let tests = - if run_all - then tests key - else List.filter (fun t -> t.run) (tests key) + let tests = + if run_all + then tests key + else List.filter (fun t -> t.run) (tests key) in let rec iter_tests n = - if n = 1 - then tests - else tests @ iter_tests (n-1) + if n = 1 + then tests + else tests @ iter_tests (n-1) in List.fold_left (fun acc test -> (runone rpc session_id test) @ acc) [] (iter_tests iter) diff --git a/ocaml/perftest/testtypes.ml b/ocaml/perftest/testtypes.ml index 2eb0b7b7257..8b8e15df34b 100644 --- a/ocaml/perftest/testtypes.ml +++ b/ocaml/perftest/testtypes.ml @@ -14,11 +14,11 @@ (* Test results *) -type resultdata = - | StartTest of float list - | SizeTest of float - | ShutdownTest of float list - | CloneTest of float list (* one float list per gold VM cloned *) +type resultdata = + | StartTest of float list + | SizeTest of float + | ShutdownTest of float list + | CloneTest of float list (* one float list per gold VM cloned *) type result = { resultname : string; @@ -32,14 +32,14 @@ let header = "RAW" let sep = ':' let to_string (results:result list) = - Printf.sprintf "%s%c%s" header sep (Marshal.to_string results [Marshal.No_sharing]) + Printf.sprintf "%s%c%s" header sep (Marshal.to_string results [Marshal.No_sharing]) let from_string s : result list option = - let open Stdext.Xstringext.String in - if startswith header s - then begin - match split ~limit:2 sep s with - | [_; r] -> Some (Marshal.from_string r 0) - | _ -> None - end else - None + let open Stdext.Xstringext.String in + if startswith header s + then begin + match split ~limit:2 sep s with + | [_; r] -> Some (Marshal.from_string r 0) + | _ -> None + end else + None diff --git a/ocaml/perftest/xmlrpcserver.ml b/ocaml/perftest/xmlrpcserver.ml index eb4ef23d4f6..b6f2d81815a 100644 --- a/ocaml/perftest/xmlrpcserver.ml +++ b/ocaml/perftest/xmlrpcserver.ml @@ -11,7 +11,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(* +(* * extremely basic HTTP XMLRPC server *) @@ -28,7 +28,7 @@ module Json = struct let xmlrpc_to_json x = "" end -let whitelist = List.map (fun (obj,msg) -> Datamodel_utils.wire_name ~sync:true obj msg) Datamodel.whitelist +let whitelist = List.map (fun (obj,msg) -> Datamodel_utils.wire_name ~sync:true obj msg) Datamodel.whitelist let emergency_call_list = List.map (fun (obj,msg) -> Datamodel_utils.wire_name ~sync:true obj msg) Datamodel.emergency_calls let counter = ref 0 @@ -40,11 +40,11 @@ let callback1 is_json req fd body xml = (* We now have the body string, the xml and the call name, and can also tell *) (* if we're a master or slave and whether the call came in on the unix domain socket or the tcp socket *) (* If we're a slave, and the call is from the unix domain socket, and the call *isn't* session.login_with_password, then forward *) - if !Xapi_globs.slave_emergency_mode && (not (List.mem call emergency_call_list)) + if !Xapi_globs.slave_emergency_mode && (not (List.mem call emergency_call_list)) then raise !Xapi_globs.emergency_mode_error; - if ((not (Pool_role.is_master ())) && (Context.is_unix_socket fd) && (not (List.mem call whitelist))) + if ((not (Pool_role.is_master ())) && (Context.is_unix_socket fd) && (not (List.mem call whitelist))) then - Printf.printf "would forward\n" + Printf.printf "would forward\n" (* forward req body xml *) @@ -52,14 +52,14 @@ let callback1 is_json req fd body xml = if Mutex.execute counter_m (fun () -> incr counter; !counter) mod 100 = 0 then (Printf.printf "."; flush stdout); - let response = Server.dispatch_xml req fd xml in - let translated = - match is_json,response with - true,XMLRPC.Success [Xml.Element("value",_,[x])] -> XMLRPC.Success [Xml.Element("value",[],[Xml.PCData (Json.xmlrpc_to_json x)])] - | _ -> response - in - XMLRPC.To.methodResponse translated - (* debug(fmt "response = %s" response); *) + let response = Server.dispatch_xml req fd xml in + let translated = + match is_json,response with + true,XMLRPC.Success [Xml.Element("value",_,[x])] -> XMLRPC.Success [Xml.Element("value",[],[Xml.PCData (Json.xmlrpc_to_json x)])] + | _ -> response + in + XMLRPC.To.methodResponse translated +(* debug(fmt "response = %s" response); *) @@ -69,12 +69,12 @@ let callback req bio = let xml = Xml.parse_string body in try let response = Xml.to_bigbuffer (callback1 false req fd (Some body) xml) in - Http_svr.response_fct req ~hdrs:[ "Content-Type: text/xml" ] fd (Bigbuffer.length response) - (fun fd -> Bigbuffer.to_fct response (fun s -> ignore(Unix.write fd s 0 (String.length s)))) - with + Http_svr.response_fct req ~hdrs:[ "Content-Type: text/xml" ] fd (Bigbuffer.length response) + (fun fd -> Bigbuffer.to_fct response (fun s -> ignore(Unix.write fd s 0 (String.length s)))) + with | (Api_errors.Server_error (err, params)) -> - Http_svr.response_str req ~hdrs:[ "Content-Type: text/xml" ] fd - (Xml.to_string (XMLRPC.To.methodResponse (XMLRPC.Failure(err, params)))) + Http_svr.response_str req ~hdrs:[ "Content-Type: text/xml" ] fd + (Xml.to_string (XMLRPC.To.methodResponse (XMLRPC.Failure(err, params)))) let register () = Http_svr.add_handler Post "/" callback @@ -84,33 +84,33 @@ let get_main_ip_address ~__context = (** Start the XML-RPC server. *) let _ = - let http_port = ref Xapi_globs.default_cleartext_port in - Arg.parse ([ - "-log", Arg.String (fun s -> - if s = "all" then - Logs.set_default Log.Debug [ "stderr" ] - else - Logs.add s [ "stderr" ]), - "open a logger to stderr to the argument key name"; - "-http-port", Arg.Set_int http_port, "set http port"; - ])(fun x -> printf "Warning, ignoring unknown argument: %s" x) - "Receive file uploads by HTTP"; - - printf "Starting server on port %d\n%!" !http_port; - printf "Whitelist length = %d; emergency call list = %d\n" (List.length whitelist) (List.length emergency_call_list); - try - register (); - let sockaddr = Unix.ADDR_INET(Unix.inet_addr_of_string Xapi_globs.ips_to_listen_on, !http_port) in - let inet_sock = Http_svr.bind sockaddr in - let threads = Http_svr.http_svr [ (inet_sock,"ur_inet") ] in - print_endline "Receiving upload requests on:"; - Printf.printf "http://%s:%d/upload\n" (get_main_ip_address ()) !http_port; - flush stdout; - while true do - Thread.delay 10. - done - with - | exn -> (eprintf "Caught exception: %s\n!" - (Printexc.to_string exn)) + let http_port = ref Xapi_globs.default_cleartext_port in + Arg.parse ([ + "-log", Arg.String (fun s -> + if s = "all" then + Logs.set_default Log.Debug [ "stderr" ] + else + Logs.add s [ "stderr" ]), + "open a logger to stderr to the argument key name"; + "-http-port", Arg.Set_int http_port, "set http port"; + ])(fun x -> printf "Warning, ignoring unknown argument: %s" x) + "Receive file uploads by HTTP"; + + printf "Starting server on port %d\n%!" !http_port; + printf "Whitelist length = %d; emergency call list = %d\n" (List.length whitelist) (List.length emergency_call_list); + try + register (); + let sockaddr = Unix.ADDR_INET(Unix.inet_addr_of_string Xapi_globs.ips_to_listen_on, !http_port) in + let inet_sock = Http_svr.bind sockaddr in + let threads = Http_svr.http_svr [ (inet_sock,"ur_inet") ] in + print_endline "Receiving upload requests on:"; + Printf.printf "http://%s:%d/upload\n" (get_main_ip_address ()) !http_port; + flush stdout; + while true do + Thread.delay 10. + done + with + | exn -> (eprintf "Caught exception: %s\n!" + (Printexc.to_string exn)) diff --git a/ocaml/ptoken/genptoken.ml b/ocaml/ptoken/genptoken.ml index 6f9d116c7b1..c74b15603f7 100644 --- a/ocaml/ptoken/genptoken.ml +++ b/ocaml/ptoken/genptoken.ml @@ -12,12 +12,12 @@ let opt_target = ("-o", Arg.String set_target, "name of file to write to [ptoken let opts = [opt_force; opt_target] let _ = - Arg.parse opts (fun _ -> ()) "Generate a pool token"; - if Sys.file_exists !options.tgtfile - then if !options.force - then Sys.remove !options.tgtfile - else begin print_endline "File exists, use -f to replace it."; exit 1 end; - let uuid _ = Uuid.to_string (Uuid.make_uuid ()) in - let uuids = String.concat "/" [uuid (); uuid (); uuid ()] in - let f = open_out_gen [Open_wronly; Open_creat; Open_excl; Open_binary] 0o640 !options.tgtfile in - output_string f uuids + Arg.parse opts (fun _ -> ()) "Generate a pool token"; + if Sys.file_exists !options.tgtfile + then if !options.force + then Sys.remove !options.tgtfile + else begin print_endline "File exists, use -f to replace it."; exit 1 end; + let uuid _ = Uuid.to_string (Uuid.make_uuid ()) in + let uuids = String.concat "/" [uuid (); uuid (); uuid ()] in + let f = open_out_gen [Open_wronly; Open_creat; Open_excl; Open_binary] 0o640 !options.tgtfile in + output_string f uuids diff --git a/ocaml/rfb/rfb.ml b/ocaml/rfb/rfb.ml index 37f73aee8cd..2aafed36c86 100644 --- a/ocaml/rfb/rfb.ml +++ b/ocaml/rfb/rfb.ml @@ -14,7 +14,7 @@ open Stdext.Xstringext (* TODO: - 1. Check for overflow in UInt32/UInt64 + 1. Check for overflow in UInt32/UInt64 *) exception Truncated @@ -22,9 +22,9 @@ exception Truncated let _marshal (x: int list) = String.implode (List.map char_of_int (List.map (fun x -> x land 0xff) x)) let _unmarshal (x: string) = List.map int_of_char (String.explode x) -let blit src srcoff dst dstoff len = - (* Printf.printf "blit src_len=%d srcoff=%d dst_len=%d dstoff=%d len=%d\n" (String.length src) srcoff (String.length dst) dstoff len; *) - String.blit src srcoff dst dstoff len +let blit src srcoff dst dstoff len = + (* Printf.printf "blit src_len=%d srcoff=%d dst_len=%d dstoff=%d len=%d\n" (String.length src) srcoff (String.length dst) dstoff len; *) + String.blit src srcoff dst dstoff len module UInt16 = struct type t = int @@ -36,7 +36,7 @@ module UInt16 = struct let marshal (x: t) : string = _marshal [ x >> 8; x ] - let marshal_at (buf: string) (off: int) (x: t) = + let marshal_at (buf: string) (off: int) (x: t) = let raw = marshal x in blit raw 0 buf off 2; off + 2 @@ -56,17 +56,17 @@ module UInt32 = struct let (<<) = Int32.shift_left let (>>) = Int32.shift_right let (&&) = Int32.logand - - let marshal (x: t) : string = + + let marshal (x: t) : string = _marshal (List.map Int32.to_int [ x >> 24; x >> 16; x >> 8; x ]) - let marshal_at (buf: string) (off: int) (x: t) = + let marshal_at (buf: string) (off: int) (x: t) = let raw = marshal x in blit raw 0 buf off 4; off + 4 let unmarshal (x: string) : t = match List.map Int32.of_int (_unmarshal x) with | [ a; b; c; d ] -> (a << 24) || (b << 16) || (c << 8) || d | _ -> raise Truncated - + let prettyprint = string_of_int let to_int32 x = x let of_int32 x = x @@ -80,7 +80,7 @@ module UInt64 = struct let (>>) = Int64.shift_right let (&&) = Int64.logand - let marshal (x: t) : string = + let marshal (x: t) : string = _marshal (List.map Int64.to_int [ x >> 56; x >> 48; x >> 40; x >> 32; x >> 24; x >> 16; x >> 8; x ]) let unmarshal (x: string) : t = match List.map Int64.of_int (_unmarshal x) with | [ a; b; c; d; e; f; g; h ] -> (a << 56 ) || (b << 48) || (c << 40) || (d << 32) || (e << 24) || (f << 16) || (g << 8) || h @@ -92,18 +92,18 @@ module UInt64 = struct end (** Really read, raising End_of_file if no more data *) -let really_read fd n = +let really_read fd n = let buf = String.make n '\000' in - let rec rread fd buf ofs len = + let rec rread fd buf ofs len = let n = Unix.read fd buf ofs len in if n = 0 then raise End_of_file; if n < len then rread fd buf (ofs + n) (len - n) in rread fd buf 0 n; buf -let really_write fd buf = +let really_write fd buf = (* Printf.printf "About to write %d bytes [ %s ]\n" - (String.length buf) (String.concat " " (List.map (fun x -> Printf.sprintf "%02x" (int_of_char x)) (String.explode buf))); + (String.length buf) (String.concat " " (List.map (fun x -> Printf.sprintf "%02x" (int_of_char x)) (String.explode buf))); (*Unix.sleep 2; *) *) let len = Unix.write fd buf 0 (String.length buf) in @@ -115,7 +115,7 @@ module ProtocolVersion = struct exception Unmarshal_failure let marshal (x: t) = Printf.sprintf "RFB %03x.%03x\n" x.major x.minor - let unmarshal (s: Unix.file_descr) = + let unmarshal (s: Unix.file_descr) = let x = really_read s 12 in if not(String.startswith "RFB " x) then raise Unmarshal_failure; @@ -123,7 +123,7 @@ module ProtocolVersion = struct and minor = int_of_string (String.sub x 8 3) in { major = major; minor = minor } - let prettyprint (x: t) = + let prettyprint (x: t) = Printf.sprintf "ProtocolVersion major = %d minor = %d" x.major x.minor end @@ -131,7 +131,7 @@ module Error = struct type t = string let marshal (x: t) = UInt32.marshal (Int32.of_int (String.length x)) ^ x - let unmarshal (s: Unix.file_descr) = + let unmarshal (s: Unix.file_descr) = let len = UInt32.unmarshal (really_read s 4) in really_read s (Int32.to_int len) end @@ -147,19 +147,19 @@ module SecurityType = struct | NoSecurity -> UInt32.marshal 1l | VNCAuth -> UInt32.marshal 2l - let unmarshal (s: Unix.file_descr) = + let unmarshal (s: Unix.file_descr) = match UInt32.unmarshal (really_read s 4) with | 0l -> Failed (Error.unmarshal s) | 1l -> NoSecurity - | 2l -> VNCAuth - | _ -> raise Unmarshal_failure + | 2l -> VNCAuth + | _ -> raise Unmarshal_failure end module ClientInit = struct type t = bool (* shared-flag *) let marshal (x: t) = if x then "x" else "\000" - let unmarshal (s: Unix.file_descr) = + let unmarshal (s: Unix.file_descr) = match (really_read s 1).[0] with | '\000' -> false | _ -> true @@ -167,15 +167,15 @@ end module PixelFormat = struct type t = { bpp: int; - depth: int; - big_endian: bool; - true_colour: bool; - red_max: int; - green_max: int; - blue_max: int; - red_shift: int; - green_shift: int; - blue_shift: int } + depth: int; + big_endian: bool; + true_colour: bool; + red_max: int; + green_max: int; + blue_max: int; + red_shift: int; + green_shift: int; + blue_shift: int } let true_colour_default big_endian = { bpp = 32; depth = 24; big_endian = big_endian; true_colour = true; @@ -183,7 +183,7 @@ module PixelFormat = struct red_max = 0; green_max = 0; blue_max = 0; red_shift = 0; green_shift = 0; blue_shift = 0; } - let marshal (x: t) = + let marshal (x: t) = let bpp = String.make 1 (char_of_int x.bpp) in let depth = String.make 1 (char_of_int x.depth) in let big_endian = if x.big_endian then "x" else "\000" in @@ -194,9 +194,9 @@ module PixelFormat = struct let red_shift = String.make 1 (char_of_int x.red_shift) in let green_shift = String.make 1 (char_of_int x.green_shift) in let blue_shift = String.make 1 (char_of_int x.blue_shift) in - bpp ^ depth ^ big_endian ^ true_colour ^ - red_max ^ green_max ^ blue_max ^ red_shift ^ green_shift ^ blue_shift ^ - " " (* padding *) + bpp ^ depth ^ big_endian ^ true_colour ^ + red_max ^ green_max ^ blue_max ^ red_shift ^ green_shift ^ blue_shift ^ + " " (* padding *) let unmarshal (s: Unix.file_descr) = let buf = really_read s 16 in { bpp = int_of_char buf.[0]; @@ -215,10 +215,10 @@ end module ServerInit = struct type t = { width: int; height: int; - name: string; - pixelformat: PixelFormat.t } + name: string; + pixelformat: PixelFormat.t } - let marshal (x: t) = + let marshal (x: t) = let width = UInt16.marshal x.width in let height = UInt16.marshal x.height in let pixel = PixelFormat.marshal x.pixelformat in @@ -228,7 +228,7 @@ end module SetPixelFormat = struct type t = PixelFormat.t - let marshal (x: t) = + let marshal (x: t) = let ty = "\000" in let padding = "\000\000\000" in ty ^ padding ^ (PixelFormat.marshal x) @@ -237,15 +237,15 @@ module SetPixelFormat = struct ignore(really_read s 3); PixelFormat.unmarshal s - let prettyprint (x: t) = - Printf.sprintf "SetPixelFormat (bpp=%d depth=%d)" + let prettyprint (x: t) = + Printf.sprintf "SetPixelFormat (bpp=%d depth=%d)" x.PixelFormat.bpp x.PixelFormat.depth end module SetEncodings = struct type t = UInt32.t list - let unmarshal (s: Unix.file_descr) = + let unmarshal (s: Unix.file_descr) = ignore(really_read s 1); (* padding *) let num = UInt16.unmarshal (really_read s 2) in let encodings = ref [] in @@ -254,16 +254,16 @@ module SetEncodings = struct done; List.rev !encodings - let prettyprint (x: t) = + let prettyprint (x: t) = Printf.sprintf "SetEncodings (num=%d)" (List.length x) end module FramebufferUpdateRequest = struct type t = { incremental: bool; - x: int; y: int; - width: int; height: int } + x: int; y: int; + width: int; height: int } - let unmarshal (s: Unix.file_descr) = + let unmarshal (s: Unix.file_descr) = let buf = really_read s 9 in { incremental = buf.[0] <> '\000'; x = UInt16.unmarshal (String.sub buf 1 2); @@ -271,7 +271,7 @@ module FramebufferUpdateRequest = struct width = UInt16.unmarshal (String.sub buf 5 2); height = UInt16.unmarshal (String.sub buf 7 2); } - let prettyprint (x: t) = + let prettyprint (x: t) = Printf.sprintf "FrameBufferUpdateRequest (incr=%b x=%d y=%d width=%d height=%d)" x.incremental x.x x.y x.width x.height end @@ -281,24 +281,24 @@ module FramebufferUpdate = struct type t = { buffer: string } let sizeof (x: t) = String.length x.buffer let marshal (x: t) = x.buffer - let marshal_at (buf: string) (off: int) (x: t) = - let length = sizeof x in - blit x.buffer 0 buf off length; - off + length + let marshal_at (buf: string) (off: int) (x: t) = + let length = sizeof x in + blit x.buffer 0 buf off length; + off + length end module CopyRect = struct type t = { x: int; y: int } let sizeof (x: t) = 2 + 2 - let marshal (x: t) = + let marshal (x: t) = UInt16.marshal x.x ^ (UInt16.marshal x.y) - let marshal_at (buf: string) (off: int) (x: t) = + let marshal_at (buf: string) (off: int) (x: t) = let off = UInt16.marshal_at buf off x.x in - UInt16.marshal_at buf off x.y - let prettyprint (x: t) = + UInt16.marshal_at buf off x.y + let prettyprint (x: t) = Printf.sprintf "{ x = %d; y = %d }" x.x x.y end module Encoding = struct - type t = + type t = | Raw of Raw.t | CopyRect of CopyRect.t | DesktopSize @@ -311,13 +311,13 @@ module FramebufferUpdate = struct | CopyRect x -> UInt32.marshal 1l ^ (CopyRect.marshal x) | DesktopSize -> UInt32.marshal (-223l) let marshal_at (buf: string) (off: int) (x: t) = match x with - | Raw x -> + | Raw x -> let off = UInt32.marshal_at buf off 0l in Raw.marshal_at buf off x - | CopyRect x -> + | CopyRect x -> let off = UInt32.marshal_at buf off 1l in CopyRect.marshal_at buf off x - | DesktopSize -> + | DesktopSize -> UInt32.marshal_at buf off (-223l) let prettyprint = function | Raw _ -> "Raw" @@ -325,21 +325,21 @@ module FramebufferUpdate = struct | DesktopSize -> "DesktopSize" end type t = { x: int; y: int; w: int; h: int; encoding: Encoding.t } - let sizeof (xs: t list) = + let sizeof (xs: t list) = let one (one: t) = 2 + 2 + 2 + 2 + (Encoding.sizeof one.encoding) in 2 (* \000\000 *) + 2 (* length *) + (List.fold_left (+) 0 (List.map one xs)) - let marshal_at (buf: string) (off: int) (xs: t list) = + let marshal_at (buf: string) (off: int) (xs: t list) = let off = UInt16.marshal_at buf off 0 in let off = UInt16.marshal_at buf off (List.length xs) in - let update (buf: string) (off: int) (one: t) = - let off = UInt16.marshal_at buf off one.x in - let off = UInt16.marshal_at buf off one.y in - let off = UInt16.marshal_at buf off one.w in - let off = UInt16.marshal_at buf off one.h in - Encoding.marshal_at buf off one.encoding in + let update (buf: string) (off: int) (one: t) = + let off = UInt16.marshal_at buf off one.x in + let off = UInt16.marshal_at buf off one.y in + let off = UInt16.marshal_at buf off one.w in + let off = UInt16.marshal_at buf off one.h in + Encoding.marshal_at buf off one.encoding in List.fold_left (fun off x -> update buf off x) off xs - let marshal (xs: t list) = - let update (one: t) = + let marshal (xs: t list) = + let update (one: t) = let x = UInt16.marshal one.x and y = UInt16.marshal one.y in let w = UInt16.marshal one.w and h = UInt16.marshal one.h in x ^ y ^ w ^ h ^ (Encoding.marshal one.encoding) in @@ -348,25 +348,25 @@ module FramebufferUpdate = struct end module SetColourMapEntries = struct - type t = { first_colour: int; - map: (int * int * int) list } - let marshal (x: t) = + type t = { first_colour: int; + map: (int * int * int) list } + let marshal (x: t) = let first_colour = UInt16.marshal x.first_colour in let length = UInt16.marshal (List.length x.map) in - let colour (r, g, b) = + let colour (r, g, b) = UInt16.marshal r ^ (UInt16.marshal g) ^ (UInt16.marshal b) in - "\001\000" ^ first_colour ^ length ^ - (String.concat "" (List.map colour x.map)) + "\001\000" ^ first_colour ^ length ^ + (String.concat "" (List.map colour x.map)) end module KeyEvent = struct type t = { down: bool; key: UInt32.t } - let unmarshal (s: Unix.file_descr) = + let unmarshal (s: Unix.file_descr) = let buf = really_read s 7 in { down = buf.[0] <> '\000'; key = UInt32.unmarshal (String.sub buf 3 4) } - let prettyprint (x: t) = + let prettyprint (x: t) = Printf.sprintf "KeyEvent { down = %b; key = %s }" x.down (Int32.to_string x.key) end @@ -374,13 +374,13 @@ end module PointerEvent = struct type t = { mask: int; x: int; y: int } - let unmarshal (s: Unix.file_descr) = + let unmarshal (s: Unix.file_descr) = let buf = really_read s 5 in { mask = int_of_char buf.[0]; x = UInt16.unmarshal (String.sub buf 1 2); y = UInt16.unmarshal (String.sub buf 3 2); } - let prettyprint (x: t) = + let prettyprint (x: t) = Printf.sprintf "PointerEvent { mask = %d; x = %d; y = %d }" x.mask x.x x.y end @@ -388,18 +388,18 @@ end module ClientCutText = struct type t = string - let unmarshal (s: Unix.file_descr) = + let unmarshal (s: Unix.file_descr) = let buf = really_read s 7 in let length = UInt32.unmarshal (String.sub buf 3 4) in really_read s (Int32.to_int length) - let prettyprint (x: t) = + let prettyprint (x: t) = Printf.sprintf "ClientCutText { %s }" x end module Request = struct - type t = + type t = | SetPixelFormat of SetPixelFormat.t - | SetEncodings of SetEncodings.t + | SetEncodings of SetEncodings.t | FrameBufferUpdateRequest of FramebufferUpdateRequest.t | KeyEvent of KeyEvent.t | PointerEvent of PointerEvent.t @@ -413,22 +413,22 @@ module Request = struct | PointerEvent x -> PointerEvent.prettyprint x | ClientCutText x -> ClientCutText.prettyprint x - let unmarshal (s: Unix.file_descr) = + let unmarshal (s: Unix.file_descr) = match int_of_char (really_read s 1).[0] with | 0 -> - SetPixelFormat (SetPixelFormat.unmarshal s) + SetPixelFormat (SetPixelFormat.unmarshal s) | 2 -> - SetEncodings (SetEncodings.unmarshal s) + SetEncodings (SetEncodings.unmarshal s) | 3 -> - FrameBufferUpdateRequest (FramebufferUpdateRequest.unmarshal s) + FrameBufferUpdateRequest (FramebufferUpdateRequest.unmarshal s) | 4 -> - KeyEvent (KeyEvent.unmarshal s) + KeyEvent (KeyEvent.unmarshal s) | 5 -> - PointerEvent (PointerEvent.unmarshal s) + PointerEvent (PointerEvent.unmarshal s) | 6 -> - ClientCutText (ClientCutText.unmarshal s) + ClientCutText (ClientCutText.unmarshal s) | x -> - failwith (Printf.sprintf "Unknown message type: %d" x) + failwith (Printf.sprintf "Unknown message type: %d" x) end let white = (255, 255, 255) @@ -444,7 +444,7 @@ let handshake w h (s: Unix.file_descr) = if ci then print_endline "Client requests a shared display" else print_endline "Client requests a non-shared display"; let si = { ServerInit.name = "dave's desktop"; - width = w; height = h; - pixelformat = PixelFormat.true_colour_default false } in + width = w; height = h; + pixelformat = PixelFormat.true_colour_default false } in really_write s (ServerInit.marshal si) diff --git a/ocaml/rfb/rfb_randomtest.ml b/ocaml/rfb/rfb_randomtest.ml index c22f80be971..eeda1873eb2 100644 --- a/ocaml/rfb/rfb_randomtest.ml +++ b/ocaml/rfb/rfb_randomtest.ml @@ -16,7 +16,7 @@ open Rfb let w = 640 let h = 480 -let server (s: Unix.file_descr) = +let server (s: Unix.file_descr) = handshake w h s; let started = ref false in @@ -27,30 +27,30 @@ let server (s: Unix.file_descr) = print_endline (Request.prettyprint req); match req with | Request.SetPixelFormat pf -> - bpp := pf.PixelFormat.bpp; + bpp := pf.PixelFormat.bpp; | Request.FrameBufferUpdateRequest _ -> - if not(!started) then begin - (* Update the whole thing *) - let buffer = String.create (w * h * !bpp / 8) in - for i = 0 to String.length buffer - 1 do - buffer.[i] <- char_of_int (Random.int 255) - done; - let raw = { FramebufferUpdate.Raw.buffer = buffer } in - let update = { FramebufferUpdate.x = 0; y = 0; w = w; h = h; - encoding = FramebufferUpdate.Encoding.Raw raw } in - really_write s (FramebufferUpdate.marshal [ update ]); - started := true; - end else begin - (* send a copyrect *) - let w' = Random.int w and h' = Random.int h in - let x' = Random.int (w - w') and y' = Random.int (h - h') in - let x'' = Random.int (w - w') and y'' = Random.int (h - h') in - let cr = { FramebufferUpdate.CopyRect.x = x''; y = y'' } in - let update = { FramebufferUpdate.x = x'; y = y'; w = w'; h = h'; - encoding = FramebufferUpdate.Encoding.CopyRect cr } in - really_write s (FramebufferUpdate.marshal [ update ]) - end + if not(!started) then begin + (* Update the whole thing *) + let buffer = String.create (w * h * !bpp / 8) in + for i = 0 to String.length buffer - 1 do + buffer.[i] <- char_of_int (Random.int 255) + done; + let raw = { FramebufferUpdate.Raw.buffer = buffer } in + let update = { FramebufferUpdate.x = 0; y = 0; w = w; h = h; + encoding = FramebufferUpdate.Encoding.Raw raw } in + really_write s (FramebufferUpdate.marshal [ update ]); + started := true; + end else begin + (* send a copyrect *) + let w' = Random.int w and h' = Random.int h in + let x' = Random.int (w - w') and y' = Random.int (h - h') in + let x'' = Random.int (w - w') and y'' = Random.int (h - h') in + let cr = { FramebufferUpdate.CopyRect.x = x''; y = y'' } in + let update = { FramebufferUpdate.x = x'; y = y'; w = w'; h = h'; + encoding = FramebufferUpdate.Encoding.CopyRect cr } in + really_write s (FramebufferUpdate.marshal [ update ]) + end | _ -> () done - + diff --git a/ocaml/rfb/rfb_randomtest_main.ml b/ocaml/rfb/rfb_randomtest_main.ml index de27c0bd814..a912cdbeffe 100644 --- a/ocaml/rfb/rfb_randomtest_main.ml +++ b/ocaml/rfb/rfb_randomtest_main.ml @@ -14,7 +14,7 @@ open Rfb open Rfb_randomtest -let _ = +let _ = let port = 5902 in let s = Unix.socket Unix.PF_INET Unix.SOCK_STREAM 0 in Unix.handle_unix_error (Unix.setsockopt s Unix.SO_REUSEADDR) true; diff --git a/ocaml/test/mock.ml b/ocaml/test/mock.ml index b1bad6b4134..7f838b61a9a 100644 --- a/ocaml/test/mock.ml +++ b/ocaml/test/mock.ml @@ -14,30 +14,30 @@ module Database = struct - let _schema = Datamodel_schema.of_datamodel () ;; + let _schema = Datamodel_schema.of_datamodel () ;; - let conn = [ Parse_db_conf.make "./xapi-db.xml" ] ;; + let conn = [ Parse_db_conf.make "./xapi-db.xml" ] ;; - let flush ?(conn=conn) __context = - Db_cache_impl.sync conn (Db_ref.get_database (Context.database_of __context)) - ;; + let flush ?(conn=conn) __context = + Db_cache_impl.sync conn (Db_ref.get_database (Context.database_of __context)) + ;; - let make_global ~conn ~reuse () = - Db_backend.__test_set_master_database - (Db_cache_types.Database.make Schema.empty); - let db = Db_backend.make () in - Db_cache_impl.make - db (if reuse then conn else []) (Datamodel_schema.of_datamodel ()); - Db_cache_impl.sync conn (Db_ref.get_database db); - db - ;; + let make_global ~conn ~reuse () = + Db_backend.__test_set_master_database + (Db_cache_types.Database.make Schema.empty); + let db = Db_backend.make () in + Db_cache_impl.make + db (if reuse then conn else []) (Datamodel_schema.of_datamodel ()); + Db_cache_impl.sync conn (Db_ref.get_database db); + db + ;; end (* Database *) module Context : (module type of Context with type t = Context.t) = struct - include Context + include Context end (* Context *) let make_context_with_new_db ?(conn=Database.conn) ?(reuse=false) task_name = - Database.make_global ~conn ~reuse () |> ignore; - Context.make task_name + Database.make_global ~conn ~reuse () |> ignore; + Context.make task_name diff --git a/ocaml/test/suite.ml b/ocaml/test/suite.ml index 7095467e70c..bc8f203d930 100644 --- a/ocaml/test/suite.ml +++ b/ocaml/test/suite.ml @@ -15,76 +15,76 @@ open OUnit let base_suite = - "base_suite" >::: - [ - Test_basic.test; - Test_agility.test; - Test_helpers.test; - Test_datamodel_utils.test; - Test_db_lowlevel.test; - Test_daemon_manager.test; - Test_http.test; - Test_pool_db_backup.test; - Test_xapi_db_upgrade.test; - Test_ca91480.test; - Test_vdi_allowed_operations.test; - Test_ha_vm_failover.test; - Test_map_check.test; - Test_pool_apply_edition.test; - Test_pool_license.test; - Test_features.test; - Test_pool_restore_database.test; - Test_platformdata.test; - Test_sm_features.test; - Test_gpu_group.test; - Test_pci_helpers.test; - Test_vgpu_type.test; - Test_pgpu.test; - Test_pgpu_helpers.test; - Test_storage_migrate_state.test; - Test_vm_helpers.test; - Test_vm_migrate.test; - Test_xenopsd_metadata.test; - Test_workload_balancing.test; - Test_cpuid_helpers.test; - Test_pool_cpuinfo.test; - (* Test_ca121350.test; *) - Test_daily_license_check.test; - Test_dbsync_master.test; - Test_xapi_xenops.test; - Test_no_migrate.test; - ] + "base_suite" >::: + [ + Test_basic.test; + Test_agility.test; + Test_helpers.test; + Test_datamodel_utils.test; + Test_db_lowlevel.test; + Test_daemon_manager.test; + Test_http.test; + Test_pool_db_backup.test; + Test_xapi_db_upgrade.test; + Test_ca91480.test; + Test_vdi_allowed_operations.test; + Test_ha_vm_failover.test; + Test_map_check.test; + Test_pool_apply_edition.test; + Test_pool_license.test; + Test_features.test; + Test_pool_restore_database.test; + Test_platformdata.test; + Test_sm_features.test; + Test_gpu_group.test; + Test_pci_helpers.test; + Test_vgpu_type.test; + Test_pgpu.test; + Test_pgpu_helpers.test; + Test_storage_migrate_state.test; + Test_vm_helpers.test; + Test_vm_migrate.test; + Test_xenopsd_metadata.test; + Test_workload_balancing.test; + Test_cpuid_helpers.test; + Test_pool_cpuinfo.test; + (* Test_ca121350.test; *) + Test_daily_license_check.test; + Test_dbsync_master.test; + Test_xapi_xenops.test; + Test_no_migrate.test; + ] let handlers = [ - "get_services", Http_svr.FdIO Xapi_services.get_handler; - "post_services", Http_svr.FdIO Xapi_services.post_handler; - "put_services", Http_svr.FdIO Xapi_services.put_handler; - "post_root", Http_svr.BufIO (Api_server.callback false); - "post_json", Http_svr.BufIO (Api_server.callback true); - "post_jsonrpc", Http_svr.BufIO Api_server.jsoncallback; - "post_remote_db_access", - Http_svr.BufIO Db_remote_cache_access_v1.handler; - "post_remote_db_access_v2", - Http_svr.BufIO Db_remote_cache_access_v2.handler; + "get_services", Http_svr.FdIO Xapi_services.get_handler; + "post_services", Http_svr.FdIO Xapi_services.post_handler; + "put_services", Http_svr.FdIO Xapi_services.put_handler; + "post_root", Http_svr.BufIO (Api_server.callback false); + "post_json", Http_svr.BufIO (Api_server.callback true); + "post_jsonrpc", Http_svr.BufIO Api_server.jsoncallback; + "post_remote_db_access", + Http_svr.BufIO Db_remote_cache_access_v1.handler; + "post_remote_db_access_v2", + Http_svr.BufIO Db_remote_cache_access_v2.handler; ] let start_server handlers = - List.iter Xapi_http.add_handler handlers; - Xapi.listen_unix_socket "/tmp/xapi-test/xapi-unit-test-socket" + List.iter Xapi_http.add_handler handlers; + Xapi.listen_unix_socket "/tmp/xapi-test/xapi-unit-test-socket" let harness_init () = - Printexc.record_backtrace true; - Xcp_client.use_switch := false; - Pool_role.set_pool_role_for_test (); - Xapi.register_callback_fns (); - start_server handlers + Printexc.record_backtrace true; + Xcp_client.use_switch := false; + Pool_role.set_pool_role_for_test (); + Xapi.register_callback_fns (); + start_server handlers let harness_destroy () = () let () = - Printexc.record_backtrace true; - Inventory.inventory_filename := - Filename.concat Test_common.working_area "xcp-inventory"; - harness_init (); - ounit2_of_ounit1 base_suite |> OUnit2.run_test_tt_main; - harness_destroy (); + Printexc.record_backtrace true; + Inventory.inventory_filename := + Filename.concat Test_common.working_area "xcp-inventory"; + harness_init (); + ounit2_of_ounit1 base_suite |> OUnit2.run_test_tt_main; + harness_destroy (); diff --git a/ocaml/test/test_agility.ml b/ocaml/test/test_agility.ml index 8fe49d5eaaf..64f702eaac7 100644 --- a/ocaml/test/test_agility.ml +++ b/ocaml/test/test_agility.ml @@ -16,19 +16,19 @@ open OUnit open Test_common let test_vm_agility_with_vgpu () = - let __context = make_test_database () in - let vm = make_vm ~__context () in - (* VM has no VIFs, VBDs or VGPUs, so should be agile. *) - Agility.vm_assert_agile ~__context ~self:vm; - (* Create a VGPU - VM should no longer be agile. *) - let (_: API.ref_VGPU) = make_vgpu ~__context ~vM:vm () in - assert_raises_api_error - ~args:[Ref.string_of vm] - Api_errors.vm_has_vgpu - (fun () -> Agility.vm_assert_agile ~__context ~self:vm) + let __context = make_test_database () in + let vm = make_vm ~__context () in + (* VM has no VIFs, VBDs or VGPUs, so should be agile. *) + Agility.vm_assert_agile ~__context ~self:vm; + (* Create a VGPU - VM should no longer be agile. *) + let (_: API.ref_VGPU) = make_vgpu ~__context ~vM:vm () in + assert_raises_api_error + ~args:[Ref.string_of vm] + Api_errors.vm_has_vgpu + (fun () -> Agility.vm_assert_agile ~__context ~self:vm) let test = - "test_agility" >::: - [ - "test_vm_agility_with_vgpu" >:: test_vm_agility_with_vgpu; - ] + "test_agility" >::: + [ + "test_vm_agility_with_vgpu" >:: test_vm_agility_with_vgpu; + ] diff --git a/ocaml/test/test_basic.ml b/ocaml/test/test_basic.ml index 0677f689af3..7f31db61977 100644 --- a/ocaml/test/test_basic.ml +++ b/ocaml/test/test_basic.ml @@ -19,27 +19,27 @@ let test_always_pass () = assert_equal 1 1 let test_always_fail () = skip "This will fail" ; assert_equal 1 0 let test_mock_db () = - let __context = Mock.make_context_with_new_db "Mock context" in - let blob_ref = Ref.make () in - Db.Blob.create __context blob_ref - (Uuid.to_string (Uuid.make_uuid ())) - "BLOB" "" 5L true (Stdext.Date.of_float 0.0) "" ; - ignore (Db.Blob.get_record ~__context ~self:blob_ref) ; - ignore (Db.VM.get_all_records ~__context) ; - let blob_name = Db.Blob.get_name_label ~__context ~self:blob_ref in - assert_equal blob_name "BLOB" + let __context = Mock.make_context_with_new_db "Mock context" in + let blob_ref = Ref.make () in + Db.Blob.create __context blob_ref + (Uuid.to_string (Uuid.make_uuid ())) + "BLOB" "" 5L true (Stdext.Date.of_float 0.0) "" ; + ignore (Db.Blob.get_record ~__context ~self:blob_ref) ; + ignore (Db.VM.get_all_records ~__context) ; + let blob_name = Db.Blob.get_name_label ~__context ~self:blob_ref in + assert_equal blob_name "BLOB" let test_assert_licensed_storage_motion () = skip "TODO" ; - let __context = Mock.make_context_with_new_db "Mock context" in - let licensed = try Xapi_vm_migrate.assert_licensed_storage_motion ~__context; true - with _ -> false in - assert_bool "Not licensed for SXM" licensed + let __context = Mock.make_context_with_new_db "Mock context" in + let licensed = try Xapi_vm_migrate.assert_licensed_storage_motion ~__context; true + with _ -> false in + assert_bool "Not licensed for SXM" licensed let test = - "test_basic" >::: - [ - "test_always_pass" >:: test_always_pass ; - "test_always_fail" >:: test_always_fail ; - "test_mock_db" >:: test_mock_db ; - "test_assert_licensed_storage_motion" >:: test_assert_licensed_storage_motion ; - ] + "test_basic" >::: + [ + "test_always_pass" >:: test_always_pass ; + "test_always_fail" >:: test_always_fail ; + "test_mock_db" >:: test_mock_db ; + "test_assert_licensed_storage_motion" >:: test_assert_licensed_storage_motion ; + ] diff --git a/ocaml/test/test_ca121350.ml b/ocaml/test/test_ca121350.ml index 373a94214c6..7d48eddb5f2 100644 --- a/ocaml/test/test_ca121350.ml +++ b/ocaml/test/test_ca121350.ml @@ -5,61 +5,61 @@ open OUnit open Test_common let setup_fixture () = - let __context = make_test_database () in - let self = make_host ~__context () in - (__context, self) + let __context = make_test_database () in + let self = make_host ~__context () in + (__context, self) let test_invalid_edition () = - debug "*** starting test_invalid_edition"; + debug "*** starting test_invalid_edition"; - let __context, self = setup_fixture () in - let module M = struct - include V6client ;; - let apply_edition ~__context edition _ = (edition, [], []) ;; - let get_editions _ = [ "free", "", "", 0; - "per-socket", "", "", 1; - "xendesktop", "", "", 1; ] ;; - end in - License_init.v6client := (module M); + let __context, self = setup_fixture () in + let module M = struct + include V6client ;; + let apply_edition ~__context edition _ = (edition, [], []) ;; + let get_editions _ = [ "free", "", "", 0; + "per-socket", "", "", 1; + "xendesktop", "", "", 1; ] ;; + end in + License_init.v6client := (module M); - Db.Host.set_edition ~__context ~self ~value:"foobar"; + Db.Host.set_edition ~__context ~self ~value:"foobar"; - License_init.initialise ~__context ~host:self; + License_init.initialise ~__context ~host:self; - let edition = Db.Host.get_edition ~__context ~self in - assert_equal edition "free" + let edition = Db.Host.get_edition ~__context ~self in + assert_equal edition "free" let test_xcp_mode () = - debug "*** starting test_xcp_mode"; + debug "*** starting test_xcp_mode"; - let __context, self = setup_fixture () in - let module M = struct - let get_version _ = "" ;; - let apply_edition ~__context edition _ = - raise Api_errors.(Server_error (v6d_failure, [])) ;; - let get_editions _ = - raise Api_errors.(Server_error (v6d_failure, [])) ;; - end in - License_init.v6client := (module M); + let __context, self = setup_fixture () in + let module M = struct + let get_version _ = "" ;; + let apply_edition ~__context edition _ = + raise Api_errors.(Server_error (v6d_failure, [])) ;; + let get_editions _ = + raise Api_errors.(Server_error (v6d_failure, [])) ;; + end in + License_init.v6client := (module M); - try + try - Server_helpers.exec_with_new_task "test_ca121350" - (fun __context -> - License_init.initialise ~__context ~host:self; - let edition = Db.Host.get_edition ~__context ~self in - assert_equal edition "free/libre"); + Server_helpers.exec_with_new_task "test_ca121350" + (fun __context -> + License_init.initialise ~__context ~host:self; + let edition = Db.Host.get_edition ~__context ~self in + assert_equal edition "free/libre"); - Mock.Database.flush __context + Mock.Database.flush __context - with e -> - let bt = Printexc.get_backtrace () in - Printf.printf "Backtrace:\n%s\n" bt; - raise e + with e -> + let bt = Printexc.get_backtrace () in + Printf.printf "Backtrace:\n%s\n" bt; + raise e let test = - "test_ca121350" >::: - [ - "test_invalid_edition" >:: test_invalid_edition; - "test_xcp_mode" >:: test_xcp_mode; - ] + "test_ca121350" >::: + [ + "test_invalid_edition" >:: test_invalid_edition; + "test_xcp_mode" >:: test_xcp_mode; + ] diff --git a/ocaml/test/test_ca91480.ml b/ocaml/test/test_ca91480.ml index 8c01c0ff87e..f920c2e4e39 100644 --- a/ocaml/test/test_ca91480.ml +++ b/ocaml/test/test_ca91480.ml @@ -6,27 +6,27 @@ open OUnit open Test_common let setup_fixture () = - let __context = make_test_database () in - let self = make_vm ~__context () in + let __context = make_test_database () in + let self = make_vm ~__context () in - let fake_v f = f ~__context ~self ~value:(Ref.make ()) - and fake_m f = f ~__context ~self ~key:"fake" ~value:(Ref.make ()) - and fake_l f = f ~__context ~self ~value:[(Ref.make ())] in + let fake_v f = f ~__context ~self ~value:(Ref.make ()) + and fake_m f = f ~__context ~self ~key:"fake" ~value:(Ref.make ()) + and fake_l f = f ~__context ~self ~value:[(Ref.make ())] in - fake_m Db.VM.add_to_blobs ; - fake_v Db.VM.set_appliance ; - fake_l Db.VM.set_attached_PCIs ; - fake_v Db.VM.set_metrics ; - fake_v Db.VM.set_guest_metrics ; + fake_m Db.VM.add_to_blobs ; + fake_v Db.VM.set_appliance ; + fake_l Db.VM.set_attached_PCIs ; + fake_v Db.VM.set_metrics ; + fake_v Db.VM.set_guest_metrics ; - __context, self + __context, self let test_vm_destroy () = - let __context, self = setup_fixture () in - Xapi_vm_helpers.destroy ~__context ~self + let __context, self = setup_fixture () in + Xapi_vm_helpers.destroy ~__context ~self let test = - "test_ca91480" >::: - [ - "test_vm_destroy" >:: test_vm_destroy; - ] + "test_ca91480" >::: + [ + "test_vm_destroy" >:: test_vm_destroy; + ] diff --git a/ocaml/test/test_common.ml b/ocaml/test/test_common.ml index 9ef5c74cea3..820399f17ab 100644 --- a/ocaml/test/test_common.ml +++ b/ocaml/test/test_common.ml @@ -27,254 +27,254 @@ let skip str = skip_if true str let make_uuid () = Uuid.string_of_uuid (Uuid.make_uuid ()) let assert_raises_api_error (code : string) ?(args : string list option) (f : unit -> 'a) : unit = - try - f (); - assert_failure (Printf.sprintf "Function didn't raise expected API error %s" code) - with Api_errors.Server_error (c, a) -> - assert_equal ~printer:id ~msg:"Function raised unexpected API error" code c; - match args with - | None -> () - | Some args -> - assert_equal ~printer:Test_printers.(list string) ~msg:"Function raised API error with unexpected args" args a + try + f (); + assert_failure (Printf.sprintf "Function didn't raise expected API error %s" code) + with Api_errors.Server_error (c, a) -> + assert_equal ~printer:id ~msg:"Function raised unexpected API error" code c; + match args with + | None -> () + | Some args -> + assert_equal ~printer:Test_printers.(list string) ~msg:"Function raised API error with unexpected args" args a let make_localhost ~__context = - let host_info = { - Create_misc.name_label = "test host"; - xen_verstring = "unknown"; - linux_verstring = "something"; - hostname = "localhost"; - uuid = Xapi_inventory.lookup Xapi_inventory._installation_uuid; - dom0_uuid = "dom0-uuid"; - oem_manufacturer = None; - oem_model = None; - oem_build_number = None; - machine_serial_number = None; - machine_serial_name = None; - total_memory_mib = 1024L; - dom0_static_max = XenopsMemory.bytes_of_mib 512L; - ssl_legacy = false; - } in + let host_info = { + Create_misc.name_label = "test host"; + xen_verstring = "unknown"; + linux_verstring = "something"; + hostname = "localhost"; + uuid = Xapi_inventory.lookup Xapi_inventory._installation_uuid; + dom0_uuid = "dom0-uuid"; + oem_manufacturer = None; + oem_model = None; + oem_build_number = None; + machine_serial_number = None; + machine_serial_name = None; + total_memory_mib = 1024L; + dom0_static_max = XenopsMemory.bytes_of_mib 512L; + ssl_legacy = false; + } in - Dbsync_slave.create_localhost ~__context host_info; - (* We'd like to be able to call refresh_localhost_info, but - create_misc is giving me too many headaches right now. Do the - simple thing first and just set localhost_ref instead. *) - (* Dbsync_slave.refresh_localhost_info ~__context host_info; *) - Xapi_globs.localhost_ref := Helpers.get_localhost ~__context; - Create_misc.ensure_domain_zero_records ~__context ~host:!Xapi_globs.localhost_ref host_info; - Dbsync_master.create_pool_record ~__context + Dbsync_slave.create_localhost ~__context host_info; + (* We'd like to be able to call refresh_localhost_info, but + create_misc is giving me too many headaches right now. Do the + simple thing first and just set localhost_ref instead. *) + (* Dbsync_slave.refresh_localhost_info ~__context host_info; *) + Xapi_globs.localhost_ref := Helpers.get_localhost ~__context; + Create_misc.ensure_domain_zero_records ~__context ~host:!Xapi_globs.localhost_ref host_info; + Dbsync_master.create_pool_record ~__context (** Make a simple in-memory database containing a single host and dom0 VM record. *) let make_test_database ?(conn=Mock.Database.conn) ?(reuse=false) () = - let __context = Mock.make_context_with_new_db ~conn ~reuse "mock" in - make_localhost ~__context; - __context + let __context = Mock.make_context_with_new_db ~conn ~reuse "mock" in + make_localhost ~__context; + __context let make_vm ~__context ?(name_label="name_label") ?(name_description="description") - ?(user_version=1L) ?(is_a_template=false) ?(affinity=Ref.null) - ?(memory_target=500L) ?(memory_static_max=1000L) ?(memory_dynamic_max=500L) - ?(memory_dynamic_min=500L) ?(memory_static_min=0L) ?(vCPUs_params=[]) - ?(vCPUs_max=1L) ?(vCPUs_at_startup=1L) ?(actions_after_shutdown=`destroy) - ?(actions_after_reboot=`restart) ?(actions_after_crash=`destroy) - ?(pV_bootloader="") ?(pV_kernel="") ?(pV_ramdisk="") ?(pV_args="") - ?(pV_bootloader_args="") ?(pV_legacy_args="") ?(hVM_boot_policy="BIOS order") - ?(hVM_boot_params=[]) ?(hVM_shadow_multiplier=1.) ?(platform=[]) ?(pCI_bus="") - ?(other_config=[]) ?(xenstore_data=[]) ?(recommendations="") ?(ha_always_run=false) - ?(ha_restart_priority="") ?(tags=[]) ?(blocked_operations=[]) ?(protection_policy=Ref.null) - ?(is_snapshot_from_vmpp=false) ?(appliance=Ref.null) ?(start_delay=0L) - ?(shutdown_delay=0L) ?(order=0L) ?(suspend_SR=Ref.null) ?(version=0L) - ?(generation_id="0:0") ?(hardware_platform_version=0L) - ?(has_vendor_device=false) ?(has_vendor_device=false) () = - Xapi_vm.create ~__context ~name_label ~name_description ~user_version ~is_a_template - ~affinity ~memory_target ~memory_static_max ~memory_dynamic_max ~memory_dynamic_min - ~memory_static_min ~vCPUs_params ~vCPUs_max ~vCPUs_at_startup ~actions_after_shutdown - ~actions_after_reboot ~actions_after_crash ~pV_bootloader ~pV_kernel ~pV_ramdisk - ~pV_args ~pV_bootloader_args ~pV_legacy_args ~hVM_boot_policy ~hVM_boot_params - ~hVM_shadow_multiplier ~platform ~pCI_bus ~other_config ~xenstore_data ~recommendations - ~ha_always_run ~ha_restart_priority ~tags ~blocked_operations ~protection_policy - ~is_snapshot_from_vmpp ~appliance ~start_delay ~shutdown_delay ~order ~suspend_SR - ~version ~generation_id ~hardware_platform_version ~has_vendor_device + ?(user_version=1L) ?(is_a_template=false) ?(affinity=Ref.null) + ?(memory_target=500L) ?(memory_static_max=1000L) ?(memory_dynamic_max=500L) + ?(memory_dynamic_min=500L) ?(memory_static_min=0L) ?(vCPUs_params=[]) + ?(vCPUs_max=1L) ?(vCPUs_at_startup=1L) ?(actions_after_shutdown=`destroy) + ?(actions_after_reboot=`restart) ?(actions_after_crash=`destroy) + ?(pV_bootloader="") ?(pV_kernel="") ?(pV_ramdisk="") ?(pV_args="") + ?(pV_bootloader_args="") ?(pV_legacy_args="") ?(hVM_boot_policy="BIOS order") + ?(hVM_boot_params=[]) ?(hVM_shadow_multiplier=1.) ?(platform=[]) ?(pCI_bus="") + ?(other_config=[]) ?(xenstore_data=[]) ?(recommendations="") ?(ha_always_run=false) + ?(ha_restart_priority="") ?(tags=[]) ?(blocked_operations=[]) ?(protection_policy=Ref.null) + ?(is_snapshot_from_vmpp=false) ?(appliance=Ref.null) ?(start_delay=0L) + ?(shutdown_delay=0L) ?(order=0L) ?(suspend_SR=Ref.null) ?(version=0L) + ?(generation_id="0:0") ?(hardware_platform_version=0L) + ?(has_vendor_device=false) ?(has_vendor_device=false) () = + Xapi_vm.create ~__context ~name_label ~name_description ~user_version ~is_a_template + ~affinity ~memory_target ~memory_static_max ~memory_dynamic_max ~memory_dynamic_min + ~memory_static_min ~vCPUs_params ~vCPUs_max ~vCPUs_at_startup ~actions_after_shutdown + ~actions_after_reboot ~actions_after_crash ~pV_bootloader ~pV_kernel ~pV_ramdisk + ~pV_args ~pV_bootloader_args ~pV_legacy_args ~hVM_boot_policy ~hVM_boot_params + ~hVM_shadow_multiplier ~platform ~pCI_bus ~other_config ~xenstore_data ~recommendations + ~ha_always_run ~ha_restart_priority ~tags ~blocked_operations ~protection_policy + ~is_snapshot_from_vmpp ~appliance ~start_delay ~shutdown_delay ~order ~suspend_SR + ~version ~generation_id ~hardware_platform_version ~has_vendor_device let make_host ~__context ?(uuid=make_uuid ()) ?(name_label="host") - ?(name_description="description") ?(hostname="localhost") ?(address="127.0.0.1") - ?(external_auth_type="") ?(external_auth_service_name="") ?(external_auth_configuration=[]) - ?(license_params=[]) ?(edition="free") ?(license_server=[]) ?(local_cache_sr=Ref.null) ?(chipset_info=[]) ?(ssl_legacy=false) () = + ?(name_description="description") ?(hostname="localhost") ?(address="127.0.0.1") + ?(external_auth_type="") ?(external_auth_service_name="") ?(external_auth_configuration=[]) + ?(license_params=[]) ?(edition="free") ?(license_server=[]) ?(local_cache_sr=Ref.null) ?(chipset_info=[]) ?(ssl_legacy=false) () = - Xapi_host.create ~__context ~uuid ~name_label ~name_description ~hostname ~address ~external_auth_type ~external_auth_service_name ~external_auth_configuration ~license_params ~edition ~license_server ~local_cache_sr ~chipset_info ~ssl_legacy + Xapi_host.create ~__context ~uuid ~name_label ~name_description ~hostname ~address ~external_auth_type ~external_auth_service_name ~external_auth_configuration ~license_params ~edition ~license_server ~local_cache_sr ~chipset_info ~ssl_legacy let make_pif ~__context ~network ~host ?(device="eth0") ?(mAC="C0:FF:EE:C0:FF:EE") ?(mTU=1500L) - ?(vLAN=(-1L)) ?(physical=true) ?(ip_configuration_mode=`None) ?(iP="") ?(netmask="") - ?(gateway="") ?(dNS="") ?(bond_slave_of=Ref.null) ?(vLAN_master_of=Ref.null) - ?(management=false) ?(other_config=[]) ?(disallow_unplug=false) - ?(ipv6_configuration_mode=`None) ?(iPv6=[""]) ?(ipv6_gateway="") ?(primary_address_type=`IPv4) ?(managed=true) - ?(properties=["gro", "on"]) () = - Xapi_pif.pool_introduce ~__context - ~device ~network ~host ~mAC ~mTU ~vLAN ~physical ~ip_configuration_mode - ~iP ~netmask ~gateway ~dNS ~bond_slave_of ~vLAN_master_of ~management ~other_config ~disallow_unplug - ~ipv6_configuration_mode ~iPv6 ~ipv6_gateway ~primary_address_type ~managed ~properties + ?(vLAN=(-1L)) ?(physical=true) ?(ip_configuration_mode=`None) ?(iP="") ?(netmask="") + ?(gateway="") ?(dNS="") ?(bond_slave_of=Ref.null) ?(vLAN_master_of=Ref.null) + ?(management=false) ?(other_config=[]) ?(disallow_unplug=false) + ?(ipv6_configuration_mode=`None) ?(iPv6=[""]) ?(ipv6_gateway="") ?(primary_address_type=`IPv4) ?(managed=true) + ?(properties=["gro", "on"]) () = + Xapi_pif.pool_introduce ~__context + ~device ~network ~host ~mAC ~mTU ~vLAN ~physical ~ip_configuration_mode + ~iP ~netmask ~gateway ~dNS ~bond_slave_of ~vLAN_master_of ~management ~other_config ~disallow_unplug + ~ipv6_configuration_mode ~iPv6 ~ipv6_gateway ~primary_address_type ~managed ~properties let make_network ~__context ?(name_label="net") ?(name_description="description") ?(mTU=1500L) - ?(other_config=[]) ?(bridge="xenbr0") () = - Xapi_network.pool_introduce ~__context ~name_label ~name_description ~mTU ~other_config ~bridge + ?(other_config=[]) ?(bridge="xenbr0") () = + Xapi_network.pool_introduce ~__context ~name_label ~name_description ~mTU ~other_config ~bridge let make_vif ~__context ?(ref=Ref.make ()) ?(uuid=make_uuid ()) - ?(current_operations=[]) ?(allowed_operations=[]) ?(reserved=false) - ?(device="") ?(network=Ref.null) ?(vM=Ref.null) ?(mAC="00:00:00:00:00:00") - ?(mAC_autogenerated=false) ?(mTU=1500L) ?(qos_algorithm_type="") - ?(qos_algorithm_params=[]) ?(qos_supported_algorithms=[]) - ?(currently_attached=false) ?(status_code=0L) ?(status_detail="") - ?(runtime_properties=[]) ?(other_config=[]) ?(metrics=Ref.null) - ?(locking_mode=`unlocked) ?(ipv4_allowed=[]) ?(ipv6_allowed=[]) - ?(ipv4_configuration_mode=`None) ?(ipv4_addresses=[]) ?(ipv4_gateway="") - ?(ipv6_configuration_mode=`None) ?(ipv6_addresses=[]) ?(ipv6_gateway="") () = - Db.VIF.create ~__context ~ref ~uuid ~current_operations ~allowed_operations - ~reserved ~device ~network ~vM ~mAC ~mAC_autogenerated ~mTU - ~qos_algorithm_type ~qos_algorithm_params ~qos_supported_algorithms - ~currently_attached ~status_code ~status_detail ~runtime_properties - ~other_config ~metrics ~locking_mode ~ipv4_allowed ~ipv6_allowed - ~ipv4_configuration_mode ~ipv4_addresses ~ipv4_gateway - ~ipv6_configuration_mode ~ipv6_addresses ~ipv6_gateway; - ref + ?(current_operations=[]) ?(allowed_operations=[]) ?(reserved=false) + ?(device="") ?(network=Ref.null) ?(vM=Ref.null) ?(mAC="00:00:00:00:00:00") + ?(mAC_autogenerated=false) ?(mTU=1500L) ?(qos_algorithm_type="") + ?(qos_algorithm_params=[]) ?(qos_supported_algorithms=[]) + ?(currently_attached=false) ?(status_code=0L) ?(status_detail="") + ?(runtime_properties=[]) ?(other_config=[]) ?(metrics=Ref.null) + ?(locking_mode=`unlocked) ?(ipv4_allowed=[]) ?(ipv6_allowed=[]) + ?(ipv4_configuration_mode=`None) ?(ipv4_addresses=[]) ?(ipv4_gateway="") + ?(ipv6_configuration_mode=`None) ?(ipv6_addresses=[]) ?(ipv6_gateway="") () = + Db.VIF.create ~__context ~ref ~uuid ~current_operations ~allowed_operations + ~reserved ~device ~network ~vM ~mAC ~mAC_autogenerated ~mTU + ~qos_algorithm_type ~qos_algorithm_params ~qos_supported_algorithms + ~currently_attached ~status_code ~status_detail ~runtime_properties + ~other_config ~metrics ~locking_mode ~ipv4_allowed ~ipv6_allowed + ~ipv4_configuration_mode ~ipv4_addresses ~ipv4_gateway + ~ipv6_configuration_mode ~ipv6_addresses ~ipv6_gateway; + ref let make_pool ~__context ~master ?(name_label="") ?(name_description="") - ?(default_SR=Ref.null) ?(suspend_image_SR=Ref.null) ?(crash_dump_SR=Ref.null) - ?(ha_enabled=false) ?(ha_configuration=[]) ?(ha_statefiles=[]) - ?(ha_host_failures_to_tolerate=0L) ?(ha_plan_exists_for=0L) - ?(ha_allow_overcommit=false) ?(ha_overcommitted=false) ?(blobs=[]) ?(tags=[]) - ?(gui_config=[]) ?(health_check_config=[]) ?(wlb_url="") ?(wlb_username="") ?(wlb_password=Ref.null) - ?(wlb_enabled=false) ?(wlb_verify_cert=false) ?(redo_log_enabled=false) - ?(redo_log_vdi=Ref.null) ?(vswitch_controller="") ?(restrictions=[]) - ?(current_operations=[]) ?(allowed_operations=[]) - ?(other_config=[Xapi_globs.memory_ratio_hvm; Xapi_globs.memory_ratio_pv]) - ?(ha_cluster_stack="xhad") ?(guest_agent_config=[]) ?(cpu_info=[]) ?(policy_no_vendor_device=false) ?(live_patching_disabled=false)() = - let pool_ref = Ref.make () in - Db.Pool.create ~__context ~ref:pool_ref - ~uuid:(make_uuid ()) ~name_label ~name_description - ~master ~default_SR ~suspend_image_SR ~crash_dump_SR ~ha_enabled - ~ha_configuration ~ha_statefiles ~ha_host_failures_to_tolerate - ~ha_plan_exists_for ~ha_allow_overcommit ~ha_overcommitted ~blobs ~tags - ~gui_config ~health_check_config ~wlb_url ~wlb_username ~wlb_password ~wlb_enabled - ~wlb_verify_cert ~redo_log_enabled ~redo_log_vdi ~vswitch_controller - ~current_operations ~allowed_operations - ~restrictions ~other_config ~ha_cluster_stack ~guest_agent_config ~cpu_info ~policy_no_vendor_device ~live_patching_disabled; - pool_ref + ?(default_SR=Ref.null) ?(suspend_image_SR=Ref.null) ?(crash_dump_SR=Ref.null) + ?(ha_enabled=false) ?(ha_configuration=[]) ?(ha_statefiles=[]) + ?(ha_host_failures_to_tolerate=0L) ?(ha_plan_exists_for=0L) + ?(ha_allow_overcommit=false) ?(ha_overcommitted=false) ?(blobs=[]) ?(tags=[]) + ?(gui_config=[]) ?(health_check_config=[]) ?(wlb_url="") ?(wlb_username="") ?(wlb_password=Ref.null) + ?(wlb_enabled=false) ?(wlb_verify_cert=false) ?(redo_log_enabled=false) + ?(redo_log_vdi=Ref.null) ?(vswitch_controller="") ?(restrictions=[]) + ?(current_operations=[]) ?(allowed_operations=[]) + ?(other_config=[Xapi_globs.memory_ratio_hvm; Xapi_globs.memory_ratio_pv]) + ?(ha_cluster_stack="xhad") ?(guest_agent_config=[]) ?(cpu_info=[]) ?(policy_no_vendor_device=false) ?(live_patching_disabled=false)() = + let pool_ref = Ref.make () in + Db.Pool.create ~__context ~ref:pool_ref + ~uuid:(make_uuid ()) ~name_label ~name_description + ~master ~default_SR ~suspend_image_SR ~crash_dump_SR ~ha_enabled + ~ha_configuration ~ha_statefiles ~ha_host_failures_to_tolerate + ~ha_plan_exists_for ~ha_allow_overcommit ~ha_overcommitted ~blobs ~tags + ~gui_config ~health_check_config ~wlb_url ~wlb_username ~wlb_password ~wlb_enabled + ~wlb_verify_cert ~redo_log_enabled ~redo_log_vdi ~vswitch_controller + ~current_operations ~allowed_operations + ~restrictions ~other_config ~ha_cluster_stack ~guest_agent_config ~cpu_info ~policy_no_vendor_device ~live_patching_disabled; + pool_ref let default_sm_features = [ - "SR_PROBE", 1L; - "SR_UPDATE", 1L; - "VDI_CREATE", 1L; - "VDI_DELETE", 1L; - "VDI_ATTACH", 1L; - "VDI_DETACH", 1L; - "VDI_UPDATE", 1L; - "VDI_CLONE", 1L; - "VDI_SNAPSHOT", 1L; - "VDI_RESIZE", 1L; - "VDI_GENERATE_CONFIG", 1L; - "VDI_RESET_ON_BOOT", 2L; + "SR_PROBE", 1L; + "SR_UPDATE", 1L; + "VDI_CREATE", 1L; + "VDI_DELETE", 1L; + "VDI_ATTACH", 1L; + "VDI_DETACH", 1L; + "VDI_UPDATE", 1L; + "VDI_CLONE", 1L; + "VDI_SNAPSHOT", 1L; + "VDI_RESIZE", 1L; + "VDI_GENERATE_CONFIG", 1L; + "VDI_RESET_ON_BOOT", 2L; ] let make_sm ~__context ?(ref=Ref.make ()) ?(uuid=make_uuid ()) ?(_type="sm") - ?(name_label="") ?(name_description="") ?(vendor="") ?(copyright="") - ?(version="") ?(required_api_version="") ?(capabilities=[]) ?(features=default_sm_features) - ?(configuration=[]) ?(other_config=[]) ?(driver_filename="/dev/null") - ?(required_cluster_stack=[]) () = - Db.SM.create ~__context ~ref:ref ~uuid ~_type ~name_label ~name_description - ~vendor ~copyright ~version ~required_api_version ~capabilities ~features - ~configuration ~other_config ~driver_filename ~required_cluster_stack; - ref + ?(name_label="") ?(name_description="") ?(vendor="") ?(copyright="") + ?(version="") ?(required_api_version="") ?(capabilities=[]) ?(features=default_sm_features) + ?(configuration=[]) ?(other_config=[]) ?(driver_filename="/dev/null") + ?(required_cluster_stack=[]) () = + Db.SM.create ~__context ~ref:ref ~uuid ~_type ~name_label ~name_description + ~vendor ~copyright ~version ~required_api_version ~capabilities ~features + ~configuration ~other_config ~driver_filename ~required_cluster_stack; + ref let make_sr ~__context ?(ref=Ref.make ()) ?(uuid=make_uuid ()) ?(name_label="") ?(name_description="") ?(allowed_operations=[]) - ?(current_operations=[]) ?(virtual_allocation=0L) ?(physical_utilisation=0L) ?(physical_size=0L) ?(_type="sm") - ?(content_type="") ?(shared=true) ?(other_config=[]) ?(tags=[]) ?(default_vdi_visibility=true) - ?(sm_config=[]) ?(blobs=[]) ?(local_cache_enabled=false) ?(introduced_by=Ref.make ()) ?(clustered=false) - ?(is_tools_sr=false)() = - Db.SR.create ~__context ~ref ~uuid ~name_label ~name_description ~allowed_operations - ~current_operations ~virtual_allocation ~physical_utilisation ~physical_size ~_type - ~content_type ~shared ~other_config ~tags ~default_vdi_visibility ~sm_config ~blobs - ~local_cache_enabled ~introduced_by ~clustered ~is_tools_sr; - ref + ?(current_operations=[]) ?(virtual_allocation=0L) ?(physical_utilisation=0L) ?(physical_size=0L) ?(_type="sm") + ?(content_type="") ?(shared=true) ?(other_config=[]) ?(tags=[]) ?(default_vdi_visibility=true) + ?(sm_config=[]) ?(blobs=[]) ?(local_cache_enabled=false) ?(introduced_by=Ref.make ()) ?(clustered=false) + ?(is_tools_sr=false)() = + Db.SR.create ~__context ~ref ~uuid ~name_label ~name_description ~allowed_operations + ~current_operations ~virtual_allocation ~physical_utilisation ~physical_size ~_type + ~content_type ~shared ~other_config ~tags ~default_vdi_visibility ~sm_config ~blobs + ~local_cache_enabled ~introduced_by ~clustered ~is_tools_sr; + ref let make_pbd ~__context ?(ref=Ref.make ()) ?(uuid=make_uuid ()) ?(host=Ref.make ()) ?(sR=Ref.make ()) - ?(device_config=[]) ?(currently_attached=true) ?(other_config=[]) () = - Db.PBD.create ~__context ~ref ~uuid ~host ~sR ~device_config ~currently_attached ~other_config; - ref + ?(device_config=[]) ?(currently_attached=true) ?(other_config=[]) () = + Db.PBD.create ~__context ~ref ~uuid ~host ~sR ~device_config ~currently_attached ~other_config; + ref let make_vbd ~__context ?(ref=Ref.make ()) ?(uuid=make_uuid ()) ?(allowed_operations=[]) - ?(current_operations=[]) ?(vM=Ref.make ()) ?(vDI=Ref.make ()) ?(device="") - ?(userdevice="") ?(bootable=true) ?(mode=`RW) ?(_type=`Disk) - ?(unpluggable=false) ?(storage_lock=false) ?(empty=false) - ?(reserved=false) ?(other_config=[]) ?(currently_attached=false) - ?(status_code=0L) ?(status_detail="") ?(runtime_properties=[]) - ?(qos_algorithm_type="") ?(qos_algorithm_params=[]) ?(qos_supported_algorithms=[]) - ?(metrics = Ref.make ()) () = - Db.VBD.create ~__context ~ref ~uuid ~allowed_operations ~current_operations ~vM ~vDI ~device - ~userdevice ~bootable ~mode ~_type ~unpluggable ~storage_lock ~empty ~reserved ~other_config - ~currently_attached ~status_code ~status_detail ~runtime_properties ~qos_algorithm_type - ~qos_algorithm_params ~qos_supported_algorithms ~metrics; - ref + ?(current_operations=[]) ?(vM=Ref.make ()) ?(vDI=Ref.make ()) ?(device="") + ?(userdevice="") ?(bootable=true) ?(mode=`RW) ?(_type=`Disk) + ?(unpluggable=false) ?(storage_lock=false) ?(empty=false) + ?(reserved=false) ?(other_config=[]) ?(currently_attached=false) + ?(status_code=0L) ?(status_detail="") ?(runtime_properties=[]) + ?(qos_algorithm_type="") ?(qos_algorithm_params=[]) ?(qos_supported_algorithms=[]) + ?(metrics = Ref.make ()) () = + Db.VBD.create ~__context ~ref ~uuid ~allowed_operations ~current_operations ~vM ~vDI ~device + ~userdevice ~bootable ~mode ~_type ~unpluggable ~storage_lock ~empty ~reserved ~other_config + ~currently_attached ~status_code ~status_detail ~runtime_properties ~qos_algorithm_type + ~qos_algorithm_params ~qos_supported_algorithms ~metrics; + ref let make_vdi ~__context ?(ref=Ref.make ()) ?(uuid=make_uuid ()) ?(name_label="") - ?(name_description="") ?(allowed_operations=[]) ?(current_operations=[]) ?(sR=Ref.make ()) - ?(virtual_size=0L) ?(physical_utilisation=0L) ?(_type=`user) ?(sharable=false) ?(read_only=false) - ?(other_config=[]) ?(storage_lock=false) ?(location="") ?(managed=false) ?(missing=false) - ?(parent=Ref.null) ?(xenstore_data=[]) ?(sm_config=[]) ?(is_a_snapshot=false) - ?(snapshot_of=Ref.null) ?(snapshot_time=API.Date.never) ?(tags=[]) ?(allow_caching=true) - ?(on_boot=`persist) ?(metadata_of_pool=Ref.make ()) ?(metadata_latest=true) ?(is_tools_iso=false) () = - Db.VDI.create ~__context ~ref ~uuid ~name_label ~name_description ~allowed_operations - ~current_operations ~sR ~virtual_size ~physical_utilisation ~_type ~sharable ~read_only ~other_config - ~storage_lock ~location ~managed ~missing ~parent ~xenstore_data ~sm_config ~is_a_snapshot ~snapshot_of - ~snapshot_time ~tags ~allow_caching ~on_boot ~metadata_of_pool ~metadata_latest ~is_tools_iso; - ref + ?(name_description="") ?(allowed_operations=[]) ?(current_operations=[]) ?(sR=Ref.make ()) + ?(virtual_size=0L) ?(physical_utilisation=0L) ?(_type=`user) ?(sharable=false) ?(read_only=false) + ?(other_config=[]) ?(storage_lock=false) ?(location="") ?(managed=false) ?(missing=false) + ?(parent=Ref.null) ?(xenstore_data=[]) ?(sm_config=[]) ?(is_a_snapshot=false) + ?(snapshot_of=Ref.null) ?(snapshot_time=API.Date.never) ?(tags=[]) ?(allow_caching=true) + ?(on_boot=`persist) ?(metadata_of_pool=Ref.make ()) ?(metadata_latest=true) ?(is_tools_iso=false) () = + Db.VDI.create ~__context ~ref ~uuid ~name_label ~name_description ~allowed_operations + ~current_operations ~sR ~virtual_size ~physical_utilisation ~_type ~sharable ~read_only ~other_config + ~storage_lock ~location ~managed ~missing ~parent ~xenstore_data ~sm_config ~is_a_snapshot ~snapshot_of + ~snapshot_time ~tags ~allow_caching ~on_boot ~metadata_of_pool ~metadata_latest ~is_tools_iso; + ref let make_pci ~__context ?(ref=Ref.make ()) ?(uuid=make_uuid ()) ?(class_id="") - ?(class_name="") ?(vendor_id="") ?(vendor_name="") ?(device_id="") - ?(device_name="") ?(host=Ref.null) ?(pci_id="0000:00:00.0") ?(functions=0L) - ?(dependencies=[]) ?(other_config=[]) ?(subsystem_vendor_id="") - ?(subsystem_vendor_name="") ?(subsystem_device_id="") - ?(subsystem_device_name="") () = - Db.PCI.create ~__context ~ref ~uuid ~class_id ~class_name ~vendor_id - ~vendor_name ~device_id ~device_name ~host ~pci_id ~functions ~dependencies - ~other_config ~subsystem_vendor_id ~subsystem_vendor_name - ~subsystem_device_id ~subsystem_device_name; - ref + ?(class_name="") ?(vendor_id="") ?(vendor_name="") ?(device_id="") + ?(device_name="") ?(host=Ref.null) ?(pci_id="0000:00:00.0") ?(functions=0L) + ?(dependencies=[]) ?(other_config=[]) ?(subsystem_vendor_id="") + ?(subsystem_vendor_name="") ?(subsystem_device_id="") + ?(subsystem_device_name="") () = + Db.PCI.create ~__context ~ref ~uuid ~class_id ~class_name ~vendor_id + ~vendor_name ~device_id ~device_name ~host ~pci_id ~functions ~dependencies + ~other_config ~subsystem_vendor_id ~subsystem_vendor_name + ~subsystem_device_id ~subsystem_device_name; + ref let make_pgpu ~__context ?(ref=Ref.make ()) ?(uuid=make_uuid ()) ?(pCI=Ref.null) - ?(gPU_group=Ref.null) ?(host=Ref.null) ?(other_config=[]) - ?(size=Constants.pgpu_default_size) - ?(supported_VGPU_types=[]) ?(enabled_VGPU_types=[]) - ?(supported_VGPU_max_capacities=[]) ?(dom0_access=`enabled) - ?(is_system_display_device=false) () = - Db.PGPU.create ~__context ~ref ~uuid ~pCI ~gPU_group - ~host ~other_config ~size ~supported_VGPU_max_capacities ~dom0_access - ~is_system_display_device; - Db.PGPU.set_supported_VGPU_types ~__context ~self:ref - ~value:supported_VGPU_types; - Db.PGPU.set_enabled_VGPU_types ~__context ~self:ref - ~value:enabled_VGPU_types; - ref + ?(gPU_group=Ref.null) ?(host=Ref.null) ?(other_config=[]) + ?(size=Constants.pgpu_default_size) + ?(supported_VGPU_types=[]) ?(enabled_VGPU_types=[]) + ?(supported_VGPU_max_capacities=[]) ?(dom0_access=`enabled) + ?(is_system_display_device=false) () = + Db.PGPU.create ~__context ~ref ~uuid ~pCI ~gPU_group + ~host ~other_config ~size ~supported_VGPU_max_capacities ~dom0_access + ~is_system_display_device; + Db.PGPU.set_supported_VGPU_types ~__context ~self:ref + ~value:supported_VGPU_types; + Db.PGPU.set_enabled_VGPU_types ~__context ~self:ref + ~value:enabled_VGPU_types; + ref let make_gpu_group ~__context ?(ref=Ref.make ()) ?(uuid=make_uuid ()) - ?(name_label="") ?(name_description="") ?(gPU_types=[]) ?(other_config=[]) - ?(allocation_algorithm=`depth_first) () = - Db.GPU_group.create ~__context ~ref ~uuid ~name_label ~name_description - ~gPU_types ~other_config ~allocation_algorithm; - ref + ?(name_label="") ?(name_description="") ?(gPU_types=[]) ?(other_config=[]) + ?(allocation_algorithm=`depth_first) () = + Db.GPU_group.create ~__context ~ref ~uuid ~name_label ~name_description + ~gPU_types ~other_config ~allocation_algorithm; + ref let make_vgpu ~__context ?(ref=Ref.make ()) ?(uuid=make_uuid ()) ?(vM=Ref.null) - ?(gPU_group=Ref.null) ?(device="0") ?(currently_attached=false) - ?(other_config=[]) ?(_type=Ref.null) ?(resident_on=Ref.null) - ?(scheduled_to_be_resident_on=Ref.null) () = - Db.VGPU.create ~__context ~ref ~uuid ~vM ~gPU_group ~device ~currently_attached - ~other_config ~_type ~resident_on ~scheduled_to_be_resident_on; - ref + ?(gPU_group=Ref.null) ?(device="0") ?(currently_attached=false) + ?(other_config=[]) ?(_type=Ref.null) ?(resident_on=Ref.null) + ?(scheduled_to_be_resident_on=Ref.null) () = + Db.VGPU.create ~__context ~ref ~uuid ~vM ~gPU_group ~device ~currently_attached + ~other_config ~_type ~resident_on ~scheduled_to_be_resident_on; + ref let make_vgpu_type ~__context ?(ref=Ref.make ()) ?(uuid=make_uuid ()) - ?(vendor_name="") ?(model_name="") ?(framebuffer_size=0L) ?(max_heads=0L) - ?(max_resolution_x=0L) ?(max_resolution_y=0L) ?(size=0L) - ?(internal_config=[]) ?(implementation=`passthrough) - ?(identifier="") ?(experimental=false) () = - Db.VGPU_type.create ~__context ~ref ~uuid ~vendor_name ~model_name - ~framebuffer_size ~max_heads ~max_resolution_x ~max_resolution_y ~size - ~internal_config ~implementation ~identifier ~experimental; - ref + ?(vendor_name="") ?(model_name="") ?(framebuffer_size=0L) ?(max_heads=0L) + ?(max_resolution_x=0L) ?(max_resolution_y=0L) ?(size=0L) + ?(internal_config=[]) ?(implementation=`passthrough) + ?(identifier="") ?(experimental=false) () = + Db.VGPU_type.create ~__context ~ref ~uuid ~vendor_name ~model_name + ~framebuffer_size ~max_heads ~max_resolution_x ~max_resolution_y ~size + ~internal_config ~implementation ~identifier ~experimental; + ref diff --git a/ocaml/test/test_cpuid_helpers.ml b/ocaml/test/test_cpuid_helpers.ml index 26d2e6c34d6..797a94ff61b 100644 --- a/ocaml/test/test_cpuid_helpers.ml +++ b/ocaml/test/test_cpuid_helpers.ml @@ -19,489 +19,489 @@ open Cpuid_helpers module StringOfFeatures = Generic.Make (struct - module Io = struct - type input_t = int64 array - type output_t = string - let string_of_input_t = Test_printers.(array int64) - let string_of_output_t = Test_printers.string - end - - let transform = Cpuid_helpers.string_of_features - - let tests = [ - [|0L; 2L; 123L|], "00000000-00000002-0000007b"; - [|0L|], "00000000"; - [||], ""; - ] -end) + module Io = struct + type input_t = int64 array + type output_t = string + let string_of_input_t = Test_printers.(array int64) + let string_of_output_t = Test_printers.string + end + + let transform = Cpuid_helpers.string_of_features + + let tests = [ + [|0L; 2L; 123L|], "00000000-00000002-0000007b"; + [|0L|], "00000000"; + [||], ""; + ] + end) module FeaturesOfString = Generic.Make (struct - module Io = struct - type input_t = string - type output_t = int64 array - let string_of_input_t = Test_printers.string - let string_of_output_t = Test_printers.(array int64) - end - - let transform = Cpuid_helpers.features_of_string - - let tests = [ - "00000000-00000002-0000007b", [|0L; 2L; 123L|]; - "00000000", [|0L|]; - "", [||]; - ] -end) + module Io = struct + type input_t = string + type output_t = int64 array + let string_of_input_t = Test_printers.string + let string_of_output_t = Test_printers.(array int64) + end + + let transform = Cpuid_helpers.features_of_string + + let tests = [ + "00000000-00000002-0000007b", [|0L; 2L; 123L|]; + "00000000", [|0L|]; + "", [||]; + ] + end) module RoundTripFeaturesToFeatures = Generic.Make (struct - module Io = struct - type input_t = int64 array - type output_t = int64 array - let string_of_input_t = Test_printers.(array int64) - let string_of_output_t = Test_printers.(array int64) - end - - let transform = fun x -> x |> Cpuid_helpers.string_of_features |> Cpuid_helpers.features_of_string - - let tests = List.map (fun x -> x, x) [ - [|0L; 1L; 123L|]; - [|1L|]; - [|0L|]; - [||]; - ] -end) + module Io = struct + type input_t = int64 array + type output_t = int64 array + let string_of_input_t = Test_printers.(array int64) + let string_of_output_t = Test_printers.(array int64) + end + + let transform = fun x -> x |> Cpuid_helpers.string_of_features |> Cpuid_helpers.features_of_string + + let tests = List.map (fun x -> x, x) [ + [|0L; 1L; 123L|]; + [|1L|]; + [|0L|]; + [||]; + ] + end) module RoundTripStringToString = Generic.Make (struct - module Io = struct - type input_t = string - type output_t = string - let string_of_input_t = Test_printers.string - let string_of_output_t = Test_printers.string - end - - let transform = fun x -> x |> Cpuid_helpers.features_of_string |> Cpuid_helpers.string_of_features - - let tests = List.map (fun x -> x, x) [ - "00000000-00000002-0000007b"; - "00000001"; - "00000000"; - ""; - ] -end) + module Io = struct + type input_t = string + type output_t = string + let string_of_input_t = Test_printers.string + let string_of_output_t = Test_printers.string + end + + let transform = fun x -> x |> Cpuid_helpers.features_of_string |> Cpuid_helpers.string_of_features + + let tests = List.map (fun x -> x, x) [ + "00000000-00000002-0000007b"; + "00000001"; + "00000000"; + ""; + ] + end) module ParseFailure = Generic.Make (struct - module Io = struct - type input_t = string - type output_t = exn - let string_of_input_t = Test_printers.string - let string_of_output_t = Test_printers.exn - end - - exception NoExceptionRaised - let transform = fun x -> - try - ignore (Cpuid_helpers.features_of_string x); - raise NoExceptionRaised - with e -> e - - let tests = List.map (fun x -> x, InvalidFeatureString x) [ - "foo bar baz"; - "fgfg-1234"; - "0123-foo"; - "foo-0123"; - "-1234"; - "1234-"; - ] -end) + module Io = struct + type input_t = string + type output_t = exn + let string_of_input_t = Test_printers.string + let string_of_output_t = Test_printers.exn + end + + exception NoExceptionRaised + let transform = fun x -> + try + ignore (Cpuid_helpers.features_of_string x); + raise NoExceptionRaised + with e -> e + + let tests = List.map (fun x -> x, InvalidFeatureString x) [ + "foo bar baz"; + "fgfg-1234"; + "0123-foo"; + "foo-0123"; + "-1234"; + "1234-"; + ] + end) module Extend = Generic.Make (struct - module Io = struct - type input_t = int64 array * int64 array - type output_t = int64 array - let string_of_input_t = Test_printers.(pair (array int64) (array int64)) - let string_of_output_t = Test_printers.(array int64) - end - - let transform = fun (arr0, arr1) -> Cpuid_helpers.extend arr0 arr1 - - let tests = [ - ([| |], [| |]), [| |]; - ([| |], [| 0L; 2L |]), [| 0L; 2L |]; - ([| 1L |], [| |]), [| |]; - ([| 1L |], [| 0L |]), [| 1L |]; - ([| 1L |], [| 0L; 2L |]), [| 1L; 2L |]; - ([| 1L; 0L |], [| 0L; 2L |]), [| 1L; 0L |]; - ([| 1L; 0L |], [| 0L; 2L; 4L; 9L |]), [| 1L; 0L; 4L; 9L |]; - ] -end) + module Io = struct + type input_t = int64 array * int64 array + type output_t = int64 array + let string_of_input_t = Test_printers.(pair (array int64) (array int64)) + let string_of_output_t = Test_printers.(array int64) + end + + let transform = fun (arr0, arr1) -> Cpuid_helpers.extend arr0 arr1 + + let tests = [ + ([| |], [| |]), [| |]; + ([| |], [| 0L; 2L |]), [| 0L; 2L |]; + ([| 1L |], [| |]), [| |]; + ([| 1L |], [| 0L |]), [| 1L |]; + ([| 1L |], [| 0L; 2L |]), [| 1L; 2L |]; + ([| 1L; 0L |], [| 0L; 2L |]), [| 1L; 0L |]; + ([| 1L; 0L |], [| 0L; 2L; 4L; 9L |]), [| 1L; 0L; 4L; 9L |]; + ] + end) module ZeroExtend = Generic.Make (struct - module Io = struct - type input_t = int64 array * int - type output_t = int64 array - let string_of_input_t = Test_printers.(pair (array int64) int) - let string_of_output_t = Test_printers.(array int64) - end - - let transform = fun (arr, len) -> Cpuid_helpers.zero_extend arr len - - let tests = [ - ([| 1L |], 2), [| 1L; 0L |]; - ([| 1L |], 1), [| 1L; |]; - ([| |], 2), [| 0L; 0L |]; - ([| |], 1), [| 0L |]; - ([| |], 0), [| |]; - ([| 1L; 2L |], 0), [| |]; - ([| 1L; 2L |], 1), [| 1L |]; - ([| 1L; 2L |], 2), [| 1L; 2L |]; - ] -end) + module Io = struct + type input_t = int64 array * int + type output_t = int64 array + let string_of_input_t = Test_printers.(pair (array int64) int) + let string_of_output_t = Test_printers.(array int64) + end + + let transform = fun (arr, len) -> Cpuid_helpers.zero_extend arr len + + let tests = [ + ([| 1L |], 2), [| 1L; 0L |]; + ([| 1L |], 1), [| 1L; |]; + ([| |], 2), [| 0L; 0L |]; + ([| |], 1), [| 0L |]; + ([| |], 0), [| |]; + ([| 1L; 2L |], 0), [| |]; + ([| 1L; 2L |], 1), [| 1L |]; + ([| 1L; 2L |], 2), [| 1L; 2L |]; + ] + end) module Intersect = Generic.Make (struct - module Io = struct - type input_t = int64 array * int64 array - type output_t = int64 array - let string_of_input_t = Test_printers.(pair (array int64) (array int64)) - let string_of_output_t = Test_printers.(array int64) - end - - let transform = fun (a, b) -> Cpuid_helpers.intersect a b - - let tests = [ - (* Intersect should follow monoid laws - identity and commutativity *) - ([| |], [| |]), [| |]; - ([| 1L; 2L; 3L |], [| |]), [| 1L; 2L; 3L |]; - ([| |], [| 1L; 2L; 3L |]), [| 1L; 2L; 3L |]; - - ([| 7L; 3L |], [| 5L; |]), [| 5L; 0L |]; - ([| 5L; |], [| 7L; 3L |]), [| 5L; 0L |]; - - ([| 1L |], [| 1L |]), [| 1L |]; - ([| 1L |], [| 1L; 0L |]), [| 1L; 0L |]; - - ([| 1L; 2L; 3L |], [| 1L; 1L; 1L |]), [| 1L; 0L; 1L |]; - ([| 1L; 2L; 3L |], [| 0L; 0L; 0L |]), [| 0L; 0L; 0L |]; - - ([| 0b00000000L |], [| 0b11111111L |]), [| 0b00000000L |]; - ([| 0b11111111L |], [| 0b11111111L |]), [| 0b11111111L |]; - ([| 0b01111111L |], [| 0b11111111L |]), [| 0b01111111L |]; - ([| 0b00000111L |], [| 0b00001111L |]), [| 0b00000111L |]; - ([| 0b00011111L |], [| 0b00001111L |]), [| 0b00001111L |]; - - ([| 0b00000000L; 0b11111111L |], [| 0b11111111L; 0b00000000L |]), - [| 0b00000000L; 0b00000000L |]; - ([| 0b11111111L; 0b01010101L |], [| 0b11111111L; 0b01010101L |]), - [| 0b11111111L; 0b01010101L |]; - ([| 0b01111111L; 0b10000000L |], [| 0b11111111L; 0b00000000L |]), - [| 0b01111111L; 0b00000000L |]; - ([| 0b00000111L; 0b11100000L |], [| 0b00001111L; 0b11110000L |]), - [| 0b00000111L; 0b11100000L |]; - ] -end) + module Io = struct + type input_t = int64 array * int64 array + type output_t = int64 array + let string_of_input_t = Test_printers.(pair (array int64) (array int64)) + let string_of_output_t = Test_printers.(array int64) + end + + let transform = fun (a, b) -> Cpuid_helpers.intersect a b + + let tests = [ + (* Intersect should follow monoid laws - identity and commutativity *) + ([| |], [| |]), [| |]; + ([| 1L; 2L; 3L |], [| |]), [| 1L; 2L; 3L |]; + ([| |], [| 1L; 2L; 3L |]), [| 1L; 2L; 3L |]; + + ([| 7L; 3L |], [| 5L; |]), [| 5L; 0L |]; + ([| 5L; |], [| 7L; 3L |]), [| 5L; 0L |]; + + ([| 1L |], [| 1L |]), [| 1L |]; + ([| 1L |], [| 1L; 0L |]), [| 1L; 0L |]; + + ([| 1L; 2L; 3L |], [| 1L; 1L; 1L |]), [| 1L; 0L; 1L |]; + ([| 1L; 2L; 3L |], [| 0L; 0L; 0L |]), [| 0L; 0L; 0L |]; + + ([| 0b00000000L |], [| 0b11111111L |]), [| 0b00000000L |]; + ([| 0b11111111L |], [| 0b11111111L |]), [| 0b11111111L |]; + ([| 0b01111111L |], [| 0b11111111L |]), [| 0b01111111L |]; + ([| 0b00000111L |], [| 0b00001111L |]), [| 0b00000111L |]; + ([| 0b00011111L |], [| 0b00001111L |]), [| 0b00001111L |]; + + ([| 0b00000000L; 0b11111111L |], [| 0b11111111L; 0b00000000L |]), + [| 0b00000000L; 0b00000000L |]; + ([| 0b11111111L; 0b01010101L |], [| 0b11111111L; 0b01010101L |]), + [| 0b11111111L; 0b01010101L |]; + ([| 0b01111111L; 0b10000000L |], [| 0b11111111L; 0b00000000L |]), + [| 0b01111111L; 0b00000000L |]; + ([| 0b00000111L; 0b11100000L |], [| 0b00001111L; 0b11110000L |]), + [| 0b00000111L; 0b11100000L |]; + ] + end) module Comparisons = Generic.Make (struct - module Io = struct - type input_t = int64 array * int64 array - type output_t = (bool * bool) - let string_of_input_t = Test_printers.(pair (array int64) (array int64)) - let string_of_output_t = Test_printers.(pair bool bool) - end - - let transform = fun (a, b) -> - Cpuid_helpers.(is_subset a b, is_strict_subset a b) - - let tests = [ - (* Some of this behaviour is counterintuitive because - feature flags are automatically zero-extended when - compared *) - ([| |], [| |]), (true, false); - ([| 1L; 2L; 3L |], [| |]), (true, true); - ([| |], [| 1L; 2L; 3L |]), (false, false); - - ([| 7L; 3L |], [| 5L; |]), (false, false); - ([| 5L; |], [| 7L; 3L |]), (false, false); - - ([| 1L |], [| 1L |]), (true, false); - ([| 1L |], [| 1L; 0L |]), (false, false); - ([| 1L; 0L |], [| 1L |]), (true, true); - - (features_of_string "07cbfbff-04082201-20100800-00000001-00000000-00000000-00000000-00000000-00000000", features_of_string "07c9cbf5-80082201-20100800-00000001-00000000-00000000-00000000-00000000-00000000"), (false, false); - - ([| 0b00000000L |], [| 0b11111111L |]), (true, true); - ([| 0b11111111L |], [| 0b11111111L |]), (true, false); - ([| 0b01111111L |], [| 0b11111111L |]), (true, true); - ([| 0b00000111L |], [| 0b00001111L |]), (true, true); - ([| 0b00011111L |], [| 0b00001111L |]), (false, false); - - ([| 0b00000000L; 0b11111111L |], [| 0b11111111L; 0b00000000L |]), - (false, false); - ([| 0b11111111L; 0b01010101L |], [| 0b11111111L; 0b01010101L |]), - (true, false); - ([| 0b01111111L; 0b10000000L |], [| 0b11111111L; 0b00000000L |]), - (false, false); - ([| 0b00000111L; 0b11100000L |], [| 0b00001111L; 0b11110000L |]), - (true, true); - ] -end) + module Io = struct + type input_t = int64 array * int64 array + type output_t = (bool * bool) + let string_of_input_t = Test_printers.(pair (array int64) (array int64)) + let string_of_output_t = Test_printers.(pair bool bool) + end + + let transform = fun (a, b) -> + Cpuid_helpers.(is_subset a b, is_strict_subset a b) + + let tests = [ + (* Some of this behaviour is counterintuitive because + feature flags are automatically zero-extended when + compared *) + ([| |], [| |]), (true, false); + ([| 1L; 2L; 3L |], [| |]), (true, true); + ([| |], [| 1L; 2L; 3L |]), (false, false); + + ([| 7L; 3L |], [| 5L; |]), (false, false); + ([| 5L; |], [| 7L; 3L |]), (false, false); + + ([| 1L |], [| 1L |]), (true, false); + ([| 1L |], [| 1L; 0L |]), (false, false); + ([| 1L; 0L |], [| 1L |]), (true, true); + + (features_of_string "07cbfbff-04082201-20100800-00000001-00000000-00000000-00000000-00000000-00000000", features_of_string "07c9cbf5-80082201-20100800-00000001-00000000-00000000-00000000-00000000-00000000"), (false, false); + + ([| 0b00000000L |], [| 0b11111111L |]), (true, true); + ([| 0b11111111L |], [| 0b11111111L |]), (true, false); + ([| 0b01111111L |], [| 0b11111111L |]), (true, true); + ([| 0b00000111L |], [| 0b00001111L |]), (true, true); + ([| 0b00011111L |], [| 0b00001111L |]), (false, false); + + ([| 0b00000000L; 0b11111111L |], [| 0b11111111L; 0b00000000L |]), + (false, false); + ([| 0b11111111L; 0b01010101L |], [| 0b11111111L; 0b01010101L |]), + (true, false); + ([| 0b01111111L; 0b10000000L |], [| 0b11111111L; 0b00000000L |]), + (false, false); + ([| 0b00000111L; 0b11100000L |], [| 0b00001111L; 0b11110000L |]), + (true, true); + ] + end) module Accessors = Generic.Make (struct - module Io = struct - type input_t = (string * string) list - type output_t = string * int * int * int64 array * int64 array - let string_of_input_t = Test_printers.(assoc_list string string) - let string_of_output_t = Test_printers.(tuple5 string int int (array int64) (array int64)) - end - - let transform = fun record -> - let open Map_check in - getf vendor record, - getf socket_count record, - getf cpu_count record, - getf features_pv record, - getf features_hvm record - - let tests = [ - ["vendor", "Intel"; "socket_count", "1"; "cpu_count", "1"; - "features_pv", "00000001-00000002-00000003"; - "features_hvm", "0000000a-0000000b-0000000c"], - ("Intel", 1, 1, [| 1L; 2L; 3L |], [| 0xaL; 0xbL; 0xcL |]); - ["vendor", "Amd"; "socket_count", "6"; "cpu_count", "24"; - "features_pv", "00000001"; - "features_hvm", ""], - ("Amd", 6, 24, [| 1L |], [| |]); - ] -end) + module Io = struct + type input_t = (string * string) list + type output_t = string * int * int * int64 array * int64 array + let string_of_input_t = Test_printers.(assoc_list string string) + let string_of_output_t = Test_printers.(tuple5 string int int (array int64) (array int64)) + end + + let transform = fun record -> + let open Map_check in + getf vendor record, + getf socket_count record, + getf cpu_count record, + getf features_pv record, + getf features_hvm record + + let tests = [ + ["vendor", "Intel"; "socket_count", "1"; "cpu_count", "1"; + "features_pv", "00000001-00000002-00000003"; + "features_hvm", "0000000a-0000000b-0000000c"], + ("Intel", 1, 1, [| 1L; 2L; 3L |], [| 0xaL; 0xbL; 0xcL |]); + ["vendor", "Amd"; "socket_count", "6"; "cpu_count", "24"; + "features_pv", "00000001"; + "features_hvm", ""], + ("Amd", 6, 24, [| 1L |], [| |]); + ] + end) module Setters = Generic.Make (struct - module Io = struct - type input_t = string * int * int * int64 array * int64 array - type output_t = (string * string) list - let string_of_input_t = Test_printers.(tuple5 string int int (array int64) (array int64)) - let string_of_output_t = Test_printers.(assoc_list string string) - end - - let transform = fun (name, sockets, cpus, pv, hvm) -> - let open Map_check in - [] - |> setf vendor name - |> setf socket_count sockets - |> setf cpu_count cpus - |> setf features_pv pv - |> setf features_hvm hvm - |> List.sort compare - - let tests = [ - ("Intel", 1, 1, [| 1L; 2L; 3L |], [| 0xaL; 0xbL; 0xcL |]), - List.sort compare ["vendor", "Intel"; - "socket_count", "1"; "cpu_count", "1"; - "features_pv", "00000001-00000002-00000003"; - "features_hvm", "0000000a-0000000b-0000000c"]; - - ("Amd", 6, 24, [| 1L |], [| |]), - List.sort compare ["vendor", "Amd"; - "socket_count", "6"; "cpu_count", "24"; - "features_pv", "00000001"; - "features_hvm", ""] - ] -end) + module Io = struct + type input_t = string * int * int * int64 array * int64 array + type output_t = (string * string) list + let string_of_input_t = Test_printers.(tuple5 string int int (array int64) (array int64)) + let string_of_output_t = Test_printers.(assoc_list string string) + end + + let transform = fun (name, sockets, cpus, pv, hvm) -> + let open Map_check in + [] + |> setf vendor name + |> setf socket_count sockets + |> setf cpu_count cpus + |> setf features_pv pv + |> setf features_hvm hvm + |> List.sort compare + + let tests = [ + ("Intel", 1, 1, [| 1L; 2L; 3L |], [| 0xaL; 0xbL; 0xcL |]), + List.sort compare ["vendor", "Intel"; + "socket_count", "1"; "cpu_count", "1"; + "features_pv", "00000001-00000002-00000003"; + "features_hvm", "0000000a-0000000b-0000000c"]; + + ("Amd", 6, 24, [| 1L |], [| |]), + List.sort compare ["vendor", "Amd"; + "socket_count", "6"; "cpu_count", "24"; + "features_pv", "00000001"; + "features_hvm", ""] + ] + end) module Modifiers = Generic.Make (struct - module Io = struct - type input_t = (string * string) list - type output_t = (string * string) list - let string_of_input_t = Test_printers.(assoc_list string string) - let string_of_output_t = Test_printers.(assoc_list string string) - end - - let transform = fun record -> - let open Map_check in - record - |> setf vendor (getf vendor record) - |> setf socket_count (getf socket_count record) - |> setf cpu_count (getf cpu_count record) - |> setf features_pv (getf features_pv record) - |> setf features_hvm (getf features_hvm record) - |> List.sort compare - - let tests = [ - ["cpu_count", "1"; - "features_hvm", "0000000a-0000000b-0000000c"; - "features_pv", "00000001-00000002-00000003"; - "socket_count", "1"; - "vendor", "Intel"], - ["cpu_count", "1"; - "features_hvm", "0000000a-0000000b-0000000c"; - "features_pv", "00000001-00000002-00000003"; - "socket_count", "1"; - "vendor", "Intel"]; - ] -end) + module Io = struct + type input_t = (string * string) list + type output_t = (string * string) list + let string_of_input_t = Test_printers.(assoc_list string string) + let string_of_output_t = Test_printers.(assoc_list string string) + end + + let transform = fun record -> + let open Map_check in + record + |> setf vendor (getf vendor record) + |> setf socket_count (getf socket_count record) + |> setf cpu_count (getf cpu_count record) + |> setf features_pv (getf features_pv record) + |> setf features_hvm (getf features_hvm record) + |> List.sort compare + + let tests = [ + ["cpu_count", "1"; + "features_hvm", "0000000a-0000000b-0000000c"; + "features_pv", "00000001-00000002-00000003"; + "socket_count", "1"; + "vendor", "Intel"], + ["cpu_count", "1"; + "features_hvm", "0000000a-0000000b-0000000c"; + "features_pv", "00000001-00000002-00000003"; + "socket_count", "1"; + "vendor", "Intel"]; + ] + end) module ResetCPUFlags = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - type input_t = (string * string) list - type output_t = string list - - let string_of_input_t = Test_printers.(list (pair string string)) - let string_of_output_t = Test_printers.(list string) - end - module State = Test_state.XapiDb - - let features_hvm = "feedface-feedface" - let features_pv = "deadbeef-deadbeef" - - let load_input __context cases = - let cpu_info = [ - "cpu_count", "1"; - "socket_count", "1"; - "vendor", "Abacus"; - "features_pv", features_pv; - "features_hvm", features_hvm; - ] and master = Test_common.make_host ~__context () in - Db.Host.set_cpu_info ~__context ~self:master ~value:cpu_info; - ignore (Test_common.make_pool ~__context ~master ~cpu_info ()); - - let vms = List.map - (fun (name_label, hVM_boot_policy) -> - Test_common.make_vm ~__context ~name_label - ~hVM_boot_policy ()) - cases in - List.iter (fun vm -> Cpuid_helpers.reset_cpu_flags ~__context ~vm) vms - - let extract_output __context vms = - let get_flags (label, _) = - let self = List.hd (Db.VM.get_by_name_label ~__context ~label) in - let flags = Db.VM.get_last_boot_CPU_flags ~__context ~self in - try List.assoc Xapi_globs.cpu_info_features_key flags - with Not_found -> "" - in List.map get_flags vms - - - (* Tuples of ((features_hvm * features_pv) list, (expected last_boot_CPU_flags) *) - let tests = [ - (["a", "BIOS order"], [features_hvm]); - (["a", ""], [features_pv]); - (["a", "BIOS order"; "b", ""], [features_hvm; features_pv]); - ] -end)) + module Io = struct + type input_t = (string * string) list + type output_t = string list + + let string_of_input_t = Test_printers.(list (pair string string)) + let string_of_output_t = Test_printers.(list string) + end + module State = Test_state.XapiDb + + let features_hvm = "feedface-feedface" + let features_pv = "deadbeef-deadbeef" + + let load_input __context cases = + let cpu_info = [ + "cpu_count", "1"; + "socket_count", "1"; + "vendor", "Abacus"; + "features_pv", features_pv; + "features_hvm", features_hvm; + ] and master = Test_common.make_host ~__context () in + Db.Host.set_cpu_info ~__context ~self:master ~value:cpu_info; + ignore (Test_common.make_pool ~__context ~master ~cpu_info ()); + + let vms = List.map + (fun (name_label, hVM_boot_policy) -> + Test_common.make_vm ~__context ~name_label + ~hVM_boot_policy ()) + cases in + List.iter (fun vm -> Cpuid_helpers.reset_cpu_flags ~__context ~vm) vms + + let extract_output __context vms = + let get_flags (label, _) = + let self = List.hd (Db.VM.get_by_name_label ~__context ~label) in + let flags = Db.VM.get_last_boot_CPU_flags ~__context ~self in + try List.assoc Xapi_globs.cpu_info_features_key flags + with Not_found -> "" + in List.map get_flags vms + + + (* Tuples of ((features_hvm * features_pv) list, (expected last_boot_CPU_flags) *) + let tests = [ + (["a", "BIOS order"], [features_hvm]); + (["a", ""], [features_pv]); + (["a", "BIOS order"; "b", ""], [features_hvm; features_pv]); + ] + end)) module AssertVMIsCompatible = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - type input_t = string * string * (string * string) list - type output_t = (exn, unit) Either.t - - let string_of_input_t = - Test_printers.(tuple3 string string (assoc_list string string)) - let string_of_output_t = Test_printers.(either exn unit) - end - module State = Test_state.XapiDb - - let features_hvm = "feedface-feedface" - let features_pv = "deadbeef-deadbeef" - - let load_input __context (name_label, hVM_boot_policy, last_boot_flags) = - let cpu_info = [ - "cpu_count", "1"; - "socket_count", "1"; - "vendor", "Abacus"; - "features_pv", features_pv; - "features_hvm", features_hvm; - ] and master = Test_common.make_host ~__context () in - Db.Host.set_cpu_info ~__context ~self:master ~value:cpu_info; - ignore (Test_common.make_pool ~__context ~master ~cpu_info ()); - let self = Test_common.make_vm ~__context ~name_label ~hVM_boot_policy () in - Db.VM.set_last_boot_CPU_flags ~__context ~self ~value:last_boot_flags; - Db.VM.set_power_state ~__context ~self ~value:`Running - - let extract_output __context (label, _, _) = - let host = List.hd @@ Db.Host.get_all ~__context in - let vm = List.hd (Db.VM.get_by_name_label ~__context ~label) in - try Either.Right (Cpuid_helpers.assert_vm_is_compatible ~__context ~vm ~host ()) - with - (* Filter out opaquerefs which make matching this exception difficult *) - | Api_errors.Server_error (vm_incompatible_with_this_host, data) -> - Either.Left (Api_errors.Server_error (vm_incompatible_with_this_host, List.filter (fun s -> not @@ Xstringext.String.startswith "OpaqueRef:" s) data)) - | e -> Either.Left e - - let tests = [ - (* HVM *) - ("a", "BIOS order", - Xapi_globs.([cpu_info_vendor_key, "Abacus"; - cpu_info_features_key, features_hvm])), - Either.Right (); - - ("a", "BIOS order", - Xapi_globs.([cpu_info_vendor_key, "Abacus"; - cpu_info_features_key, "cafecafe-cafecafe"])), - Either.Left Api_errors.(Server_error - (vm_incompatible_with_this_host, - ["VM last booted on a CPU with features this host's CPU does not have."])); - - ("a", "BIOS order", - Xapi_globs.([cpu_info_vendor_key, "Napier's Bones"; - cpu_info_features_key, features_hvm])), - Either.Left Api_errors.(Server_error - (vm_incompatible_with_this_host, - ["VM last booted on a host which had a CPU from a different vendor."])); - - (* PV *) - ("a", "", - Xapi_globs.([cpu_info_vendor_key, "Abacus"; - cpu_info_features_key, features_pv])), - Either.Right (); - - ("a", "", - Xapi_globs.([cpu_info_vendor_key, "Abacus"; - cpu_info_features_key, "cafecafe-cafecafe"])), - Either.Left Api_errors.(Server_error - (vm_incompatible_with_this_host, - ["VM last booted on a CPU with features this host's CPU does not have."])); - - ("a", "", - Xapi_globs.([cpu_info_vendor_key, "Napier's Bones"; - cpu_info_features_key, features_pv])), - Either.Left Api_errors.(Server_error - (vm_incompatible_with_this_host, - ["VM last booted on a host which had a CPU from a different vendor."])); - - - ] -end)) + module Io = struct + type input_t = string * string * (string * string) list + type output_t = (exn, unit) Either.t + + let string_of_input_t = + Test_printers.(tuple3 string string (assoc_list string string)) + let string_of_output_t = Test_printers.(either exn unit) + end + module State = Test_state.XapiDb + + let features_hvm = "feedface-feedface" + let features_pv = "deadbeef-deadbeef" + + let load_input __context (name_label, hVM_boot_policy, last_boot_flags) = + let cpu_info = [ + "cpu_count", "1"; + "socket_count", "1"; + "vendor", "Abacus"; + "features_pv", features_pv; + "features_hvm", features_hvm; + ] and master = Test_common.make_host ~__context () in + Db.Host.set_cpu_info ~__context ~self:master ~value:cpu_info; + ignore (Test_common.make_pool ~__context ~master ~cpu_info ()); + let self = Test_common.make_vm ~__context ~name_label ~hVM_boot_policy () in + Db.VM.set_last_boot_CPU_flags ~__context ~self ~value:last_boot_flags; + Db.VM.set_power_state ~__context ~self ~value:`Running + + let extract_output __context (label, _, _) = + let host = List.hd @@ Db.Host.get_all ~__context in + let vm = List.hd (Db.VM.get_by_name_label ~__context ~label) in + try Either.Right (Cpuid_helpers.assert_vm_is_compatible ~__context ~vm ~host ()) + with + (* Filter out opaquerefs which make matching this exception difficult *) + | Api_errors.Server_error (vm_incompatible_with_this_host, data) -> + Either.Left (Api_errors.Server_error (vm_incompatible_with_this_host, List.filter (fun s -> not @@ Xstringext.String.startswith "OpaqueRef:" s) data)) + | e -> Either.Left e + + let tests = [ + (* HVM *) + ("a", "BIOS order", + Xapi_globs.([cpu_info_vendor_key, "Abacus"; + cpu_info_features_key, features_hvm])), + Either.Right (); + + ("a", "BIOS order", + Xapi_globs.([cpu_info_vendor_key, "Abacus"; + cpu_info_features_key, "cafecafe-cafecafe"])), + Either.Left Api_errors.(Server_error + (vm_incompatible_with_this_host, + ["VM last booted on a CPU with features this host's CPU does not have."])); + + ("a", "BIOS order", + Xapi_globs.([cpu_info_vendor_key, "Napier's Bones"; + cpu_info_features_key, features_hvm])), + Either.Left Api_errors.(Server_error + (vm_incompatible_with_this_host, + ["VM last booted on a host which had a CPU from a different vendor."])); + + (* PV *) + ("a", "", + Xapi_globs.([cpu_info_vendor_key, "Abacus"; + cpu_info_features_key, features_pv])), + Either.Right (); + + ("a", "", + Xapi_globs.([cpu_info_vendor_key, "Abacus"; + cpu_info_features_key, "cafecafe-cafecafe"])), + Either.Left Api_errors.(Server_error + (vm_incompatible_with_this_host, + ["VM last booted on a CPU with features this host's CPU does not have."])); + + ("a", "", + Xapi_globs.([cpu_info_vendor_key, "Napier's Bones"; + cpu_info_features_key, features_pv])), + Either.Left Api_errors.(Server_error + (vm_incompatible_with_this_host, + ["VM last booted on a host which had a CPU from a different vendor."])); + + + ] + end)) let test = - "test_cpuid_helpers" >::: - [ - "test_string_of_features" >::: StringOfFeatures.tests; - "test_features_of_string" >::: FeaturesOfString.tests; - "test_roundtrip_features_to_features" >::: - RoundTripFeaturesToFeatures.tests; - "test_roundtrip_string_to_features" >::: - RoundTripStringToString.tests; - "test_parse_failure" >::: - ParseFailure.tests; - "test_extend" >::: - Extend.tests; - "test_zero_extend" >::: - ZeroExtend.tests; - "test_intersect" >::: - Intersect.tests; - "test_comparisons" >::: - Comparisons.tests; - "test_accessors" >::: - Accessors.tests; - "test_setters" >::: - Setters.tests; - "test_modifiers" >::: - Modifiers.tests; - "test_reset_cpu_flags" >::: - ResetCPUFlags.tests; - (* "test_assert_vm_is_compatible" >::: - AssertVMIsCompatible.tests;*) - ] + "test_cpuid_helpers" >::: + [ + "test_string_of_features" >::: StringOfFeatures.tests; + "test_features_of_string" >::: FeaturesOfString.tests; + "test_roundtrip_features_to_features" >::: + RoundTripFeaturesToFeatures.tests; + "test_roundtrip_string_to_features" >::: + RoundTripStringToString.tests; + "test_parse_failure" >::: + ParseFailure.tests; + "test_extend" >::: + Extend.tests; + "test_zero_extend" >::: + ZeroExtend.tests; + "test_intersect" >::: + Intersect.tests; + "test_comparisons" >::: + Comparisons.tests; + "test_accessors" >::: + Accessors.tests; + "test_setters" >::: + Setters.tests; + "test_modifiers" >::: + Modifiers.tests; + "test_reset_cpu_flags" >::: + ResetCPUFlags.tests; + (* "test_assert_vm_is_compatible" >::: + AssertVMIsCompatible.tests;*) + ] diff --git a/ocaml/test/test_daemon_manager.ml b/ocaml/test/test_daemon_manager.ml index 2be83d96c7d..bdf3d228206 100644 --- a/ocaml/test/test_daemon_manager.ml +++ b/ocaml/test/test_daemon_manager.ml @@ -15,50 +15,50 @@ open OUnit type stop_failure = { - error: exn; - (** The exception thrown when trying to stop the daemon. *) - time_until_stopped: float; - (** The mock daemon will be marked as not running [t] seconds after the - exception is thrown. *) + error: exn; + (** The exception thrown when trying to stop the daemon. *) + time_until_stopped: float; + (** The mock daemon will be marked as not running [t] seconds after the + exception is thrown. *) } module Mock_daemon = struct - let running = ref true - - let stop_failure : stop_failure option ref = ref None - - let times_called_start = ref 0 - let times_called_stop = ref 0 - - let reset ~is_running = - running := is_running; - stop_failure := None; - times_called_start := 0; - times_called_stop := 0 - - let check = Daemon_manager.Function (fun () -> !running) - - let start () = - incr times_called_start; - running := true - - let stop () = - incr times_called_stop; - match !stop_failure with - | Some {error; time_until_stopped} -> begin - (* Raise the exception after spawning a thread which will set running to - false after a specified time. *) - let (_: Thread.t) = - Thread.create - (fun () -> - Thread.delay time_until_stopped; - running := false) - () - in - raise error - end - | None -> - running := false + let running = ref true + + let stop_failure : stop_failure option ref = ref None + + let times_called_start = ref 0 + let times_called_stop = ref 0 + + let reset ~is_running = + running := is_running; + stop_failure := None; + times_called_start := 0; + times_called_stop := 0 + + let check = Daemon_manager.Function (fun () -> !running) + + let start () = + incr times_called_start; + running := true + + let stop () = + incr times_called_stop; + match !stop_failure with + | Some {error; time_until_stopped} -> begin + (* Raise the exception after spawning a thread which will set running to + false after a specified time. *) + let (_: Thread.t) = + Thread.create + (fun () -> + Thread.delay time_until_stopped; + running := false) + () + in + raise error + end + | None -> + running := false end module Mock_manager = Daemon_manager.Make(Mock_daemon) @@ -66,91 +66,91 @@ module Mock_manager = Daemon_manager.Make(Mock_daemon) (* Test that the daemon is restarted, and that the return value of the function passed to with_daemon_stopped is propagated. *) let test_basic_operation () = - Mock_daemon.reset ~is_running:true; - let result = Mock_manager.with_daemon_stopped (fun () -> 123) in - assert_equal result 123; - assert_equal !Mock_daemon.times_called_start 1; - assert_equal !Mock_daemon.times_called_stop 1 + Mock_daemon.reset ~is_running:true; + let result = Mock_manager.with_daemon_stopped (fun () -> 123) in + assert_equal result 123; + assert_equal !Mock_daemon.times_called_start 1; + assert_equal !Mock_daemon.times_called_stop 1 (* Two sequential calls to with_daemon_stopped should restart the daemon twice. *) let test_two_restarts () = - Mock_daemon.reset ~is_running:true; - Mock_manager.with_daemon_stopped (fun () -> ()); - Mock_manager.with_daemon_stopped (fun () -> ()); - assert_equal !Mock_daemon.times_called_start 2; - assert_equal !Mock_daemon.times_called_stop 2 + Mock_daemon.reset ~is_running:true; + Mock_manager.with_daemon_stopped (fun () -> ()); + Mock_manager.with_daemon_stopped (fun () -> ()); + assert_equal !Mock_daemon.times_called_start 2; + assert_equal !Mock_daemon.times_called_stop 2 (* Test that if the daemon is stopped, calling with_daemon_stopped does not attempt to stop or start it. *) let test_already_stopped () = - Mock_daemon.reset ~is_running:false; - let result = Mock_manager.with_daemon_stopped (fun () -> 123) in - assert_equal result 123; - assert_equal !Mock_daemon.times_called_start 0; - assert_equal !Mock_daemon.times_called_stop 0 + Mock_daemon.reset ~is_running:false; + let result = Mock_manager.with_daemon_stopped (fun () -> 123) in + assert_equal result 123; + assert_equal !Mock_daemon.times_called_start 0; + assert_equal !Mock_daemon.times_called_stop 0 (* Test that an exception is propagated by with_daemon_stopped. *) let test_exception () = - Mock_daemon.reset ~is_running:true; - assert_raises (Failure "fail") - (fun () -> Mock_manager.with_daemon_stopped (fun () -> failwith "fail")); - assert_equal !Mock_daemon.times_called_start 1; - assert_equal !Mock_daemon.times_called_stop 1 + Mock_daemon.reset ~is_running:true; + assert_raises (Failure "fail") + (fun () -> Mock_manager.with_daemon_stopped (fun () -> failwith "fail")); + assert_equal !Mock_daemon.times_called_start 1; + assert_equal !Mock_daemon.times_called_stop 1 let spawn_threads_and_wait task count = - let rec spawn_threads task count acc = - if count > 0 then begin - let thread = Thread.create task () in - spawn_threads task (count - 1) (thread :: acc) - end - else acc - in - spawn_threads task count [] - |> List.iter Thread.join + let rec spawn_threads task count acc = + if count > 0 then begin + let thread = Thread.create task () in + spawn_threads task (count - 1) (thread :: acc) + end + else acc + in + spawn_threads task count [] + |> List.iter Thread.join (* Run with_daemon_stopped multiple times in parallel. The daemon should only be restarted once. *) let test_threads () = - Mock_daemon.reset ~is_running:true; - let delay_thread () = - Mock_manager.with_daemon_stopped (fun () -> Thread.delay 5.0) - in - spawn_threads_and_wait delay_thread 5; - assert_equal !Mock_daemon.times_called_start 1; - assert_equal !Mock_daemon.times_called_stop 1 + Mock_daemon.reset ~is_running:true; + let delay_thread () = + Mock_manager.with_daemon_stopped (fun () -> Thread.delay 5.0) + in + spawn_threads_and_wait delay_thread 5; + assert_equal !Mock_daemon.times_called_start 1; + assert_equal !Mock_daemon.times_called_stop 1 (* The daemon initially fails to stop, but it stops within the timeout. *) let test_timeout_succeed () = - Mock_daemon.reset ~is_running:true; - Mock_daemon.stop_failure := Some { - error = Failure "stop failed"; - time_until_stopped = 2.0; - }; - Mock_manager.with_daemon_stopped ~timeout:5.0 (fun () -> ()); - assert_equal !Mock_daemon.times_called_start 1; - assert_equal !Mock_daemon.times_called_stop 1 + Mock_daemon.reset ~is_running:true; + Mock_daemon.stop_failure := Some { + error = Failure "stop failed"; + time_until_stopped = 2.0; + }; + Mock_manager.with_daemon_stopped ~timeout:5.0 (fun () -> ()); + assert_equal !Mock_daemon.times_called_start 1; + assert_equal !Mock_daemon.times_called_stop 1 (* The daemon does not stop within the timeout, so the exception is raised. *) let test_timeout_fail () = - Mock_daemon.reset ~is_running:true; - Mock_daemon.stop_failure := Some { - error = Failure "stop failed"; - time_until_stopped = 5.0; - }; - assert_raises (Failure "stop failed") - (fun () -> Mock_manager.with_daemon_stopped ~timeout:2.0 (fun () -> ())); - assert_equal !Mock_daemon.times_called_start 0; - assert_equal !Mock_daemon.times_called_stop 1 + Mock_daemon.reset ~is_running:true; + Mock_daemon.stop_failure := Some { + error = Failure "stop failed"; + time_until_stopped = 5.0; + }; + assert_raises (Failure "stop failed") + (fun () -> Mock_manager.with_daemon_stopped ~timeout:2.0 (fun () -> ())); + assert_equal !Mock_daemon.times_called_start 0; + assert_equal !Mock_daemon.times_called_stop 1 let test = - "daemon_manager" >::: - [ - "test_basic_operation" >:: test_basic_operation; - "test_two_restarts" >:: test_two_restarts; - "test_already_stopped" >:: test_already_stopped; - "test_exception" >:: test_exception; - "test_threads" >:: test_threads; - "test_timeout_succeed" >:: test_timeout_succeed; - "test_timeout_fail" >:: test_timeout_fail; - ] + "daemon_manager" >::: + [ + "test_basic_operation" >:: test_basic_operation; + "test_two_restarts" >:: test_two_restarts; + "test_already_stopped" >:: test_already_stopped; + "test_exception" >:: test_exception; + "test_threads" >:: test_threads; + "test_timeout_succeed" >:: test_timeout_succeed; + "test_timeout_fail" >:: test_timeout_fail; + ] diff --git a/ocaml/test/test_daily_license_check.ml b/ocaml/test/test_daily_license_check.ml index dae24b38f26..589b3bc717b 100644 --- a/ocaml/test/test_daily_license_check.ml +++ b/ocaml/test/test_daily_license_check.ml @@ -17,64 +17,64 @@ open Test_highlevel open Daily_license_check module Tests = Generic.Make (struct - module Io = struct - type input_t = (string * string) list * (string * (string * string) list) list - type output_t = result - let string_of_input_t = Test_printers.(pair (assoc_list string string) (assoc_list string (assoc_list string string))) - let string_of_output_t = function - | Good -> "Not expired" - | Expiring hosts -> "Expiring soon on hosts: " ^ (Test_printers.(list string) hosts) - | Expired hosts -> "Expired on hosts: " ^ (Test_printers.(list string) hosts) - end - - let now = Stdext.Date.to_float (Stdext.Date.of_string "20160601T04:00:00Z") - - let transform = fun (pool_license_state, all_license_params) -> - check_license now pool_license_state all_license_params - - let tests = [ - (["expiry", "20170101T00:00:00Z"], []), - Good; - - (["expiry", "20160701T04:01:00Z"], []), - Good; - - (["expiry", "20160701T04:00:00Z"], []), - Expiring []; - - (["expiry", "20160616T00:00:00Z"], []), - Expiring []; - - (["expiry", "20160601T04:00:01Z"], []), - Expiring []; - - (["expiry", "20160601T04:00:00Z"], []), - Expired []; - - (["expiry", "20160101T00:00:00Z"], []), - Expired []; - - (["expiry", "20160615T00:00:00Z"], - ["host0", ["expiry", "20160615T00:00:00Z"]; - "host1", ["expiry", "20160615T00:00:00Z"]]), - Expiring ["host1"; "host0"]; - - (["expiry", "20160615T00:00:00Z"], - ["host0", ["expiry", "20160615T00:00:00Z"]; - "host1", ["expiry", "20160715T00:00:00Z"]]), - Expiring ["host0"]; - - (["expiry", "20160101T00:00:00Z"], - ["host0", ["expiry", "20160601T00:00:00Z"]; - "host1", ["expiry", "20150601T00:00:00Z"]]), - Expired ["host1"; "host0"]; - - (["expiry", "20160101T00:00:00Z"], - ["host0", ["expiry", "20170601T00:00:00Z"]; - "host1", ["expiry", "20150601T00:00:00Z"]]), - Expired ["host1"]; - ] -end) + module Io = struct + type input_t = (string * string) list * (string * (string * string) list) list + type output_t = result + let string_of_input_t = Test_printers.(pair (assoc_list string string) (assoc_list string (assoc_list string string))) + let string_of_output_t = function + | Good -> "Not expired" + | Expiring hosts -> "Expiring soon on hosts: " ^ (Test_printers.(list string) hosts) + | Expired hosts -> "Expired on hosts: " ^ (Test_printers.(list string) hosts) + end + + let now = Stdext.Date.to_float (Stdext.Date.of_string "20160601T04:00:00Z") + + let transform = fun (pool_license_state, all_license_params) -> + check_license now pool_license_state all_license_params + + let tests = [ + (["expiry", "20170101T00:00:00Z"], []), + Good; + + (["expiry", "20160701T04:01:00Z"], []), + Good; + + (["expiry", "20160701T04:00:00Z"], []), + Expiring []; + + (["expiry", "20160616T00:00:00Z"], []), + Expiring []; + + (["expiry", "20160601T04:00:01Z"], []), + Expiring []; + + (["expiry", "20160601T04:00:00Z"], []), + Expired []; + + (["expiry", "20160101T00:00:00Z"], []), + Expired []; + + (["expiry", "20160615T00:00:00Z"], + ["host0", ["expiry", "20160615T00:00:00Z"]; + "host1", ["expiry", "20160615T00:00:00Z"]]), + Expiring ["host1"; "host0"]; + + (["expiry", "20160615T00:00:00Z"], + ["host0", ["expiry", "20160615T00:00:00Z"]; + "host1", ["expiry", "20160715T00:00:00Z"]]), + Expiring ["host0"]; + + (["expiry", "20160101T00:00:00Z"], + ["host0", ["expiry", "20160601T00:00:00Z"]; + "host1", ["expiry", "20150601T00:00:00Z"]]), + Expired ["host1"; "host0"]; + + (["expiry", "20160101T00:00:00Z"], + ["host0", ["expiry", "20170601T00:00:00Z"]; + "host1", ["expiry", "20150601T00:00:00Z"]]), + Expired ["host1"]; + ] + end) let test = - "test_daily_license_check" >::: Tests.tests + "test_daily_license_check" >::: Tests.tests diff --git a/ocaml/test/test_datamodel_utils.ml b/ocaml/test/test_datamodel_utils.ml index 3293c26536c..92357f59322 100644 --- a/ocaml/test/test_datamodel_utils.ml +++ b/ocaml/test/test_datamodel_utils.ml @@ -18,43 +18,43 @@ open OUnit open Test_highlevel module HasBeenRemoved = Generic.Make(struct - module Io = struct - type input_t = Datamodel_types.lifecycle_transition list - type output_t = bool + module Io = struct + type input_t = Datamodel_types.lifecycle_transition list + type output_t = bool - let string_of_input_t input = - List.map - (fun (lifecycle_change, _, _) -> - Datamodel_types.rpc_of_lifecycle_change lifecycle_change - |> Rpc.to_string) - input - |> String.concat "; " + let string_of_input_t input = + List.map + (fun (lifecycle_change, _, _) -> + Datamodel_types.rpc_of_lifecycle_change lifecycle_change + |> Rpc.to_string) + input + |> String.concat "; " - let string_of_output_t = string_of_bool - end + let string_of_output_t = string_of_bool + end - let transform = Datamodel_utils.has_been_removed + let transform = Datamodel_utils.has_been_removed - let tests = Datamodel_types.([ - [], false; - [Published, "release1", ""], false; - [Removed, "release1", ""], true; - [ - Published, "release1", ""; - Deprecated, "release2", ""; - Removed, "release3", ""; - ], true; - [ - Published, "release1", ""; - Deprecated, "release2", ""; - Removed, "release3", ""; - Published, "release4", ""; - ], false; - ]) -end) + let tests = Datamodel_types.([ + [], false; + [Published, "release1", ""], false; + [Removed, "release1", ""], true; + [ + Published, "release1", ""; + Deprecated, "release2", ""; + Removed, "release3", ""; + ], true; + [ + Published, "release1", ""; + Deprecated, "release2", ""; + Removed, "release3", ""; + Published, "release4", ""; + ], false; + ]) + end) - let test = - "datamodel_utils" >::: - [ - "test_has_been_removed" >::: HasBeenRemoved.tests; - ] +let test = + "datamodel_utils" >::: + [ + "test_has_been_removed" >::: HasBeenRemoved.tests; + ] diff --git a/ocaml/test/test_db_lowlevel.ml b/ocaml/test/test_db_lowlevel.ml index 112b6b9752c..25e7592343e 100644 --- a/ocaml/test/test_db_lowlevel.ml +++ b/ocaml/test/test_db_lowlevel.ml @@ -20,29 +20,29 @@ open Test_common * an Db_exn.DBCache_NotFound("missing row",...) exception, and the return * value should include the deleted record. *) let test_db_get_all_records_race () = - let __context = make_test_database () in - let (vm_ref: API.ref_VM) = make_vm ~__context () in + let __context = make_test_database () in + let (vm_ref: API.ref_VM) = make_vm ~__context () in - Db_cache_impl.fist_delay_read_records_where := true; + Db_cache_impl.fist_delay_read_records_where := true; - (* Kick off the thread which will destroy a VM. *) - let destroyer_thread = - Thread.create (fun self -> Db.VM.destroy ~__context ~self) vm_ref - in + (* Kick off the thread which will destroy a VM. *) + let destroyer_thread = + Thread.create (fun self -> Db.VM.destroy ~__context ~self) vm_ref + in - (* Call get_all_records *) - let _ = - try Db.VM.get_all_records ~__context - with Db_exn.DBCache_NotFound("missing row", _, _) -> - assert_failure "Race condition present" - in - Thread.join destroyer_thread + (* Call get_all_records *) + let _ = + try Db.VM.get_all_records ~__context + with Db_exn.DBCache_NotFound("missing row", _, _) -> + assert_failure "Race condition present" + in + Thread.join destroyer_thread let tear_down () = - Db_cache_impl.fist_delay_read_records_where := false + Db_cache_impl.fist_delay_read_records_where := false let test = - "test_db_lowlevel" >::: - [ - "test_db_get_all_records_race" >:: (bracket id test_db_get_all_records_race tear_down); - ] + "test_db_lowlevel" >::: + [ + "test_db_get_all_records_race" >:: (bracket id test_db_get_all_records_race tear_down); + ] diff --git a/ocaml/test/test_dbsync_master.ml b/ocaml/test/test_dbsync_master.ml index b214a3c90e4..e0e554f9e34 100644 --- a/ocaml/test/test_dbsync_master.ml +++ b/ocaml/test/test_dbsync_master.ml @@ -17,102 +17,102 @@ open Test_highlevel open Dbsync_master module CreateToolsSR = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - type input_t = (string * string * (string * string) list * bool) list - type output_t = (string * string * (string * string) list) list + module Io = struct + type input_t = (string * string * (string * string) list * bool) list + type output_t = (string * string * (string * string) list) list - let string_of_input_t = Test_printers.(list (tuple4 string string (assoc_list string string) bool)) - let string_of_output_t = Test_printers.(list (tuple3 string string (assoc_list string string))) - end - module State = Test_state.XapiDb - - let name = "Tools" - let description = "Tools ISOs" - let other_config = [ - Xapi_globs.xensource_internal, "true"; - Xapi_globs.tools_sr_tag, "true"; - Xapi_globs.i18n_key, "xenserver-tools"; - (Xapi_globs.i18n_original_value_prefix ^ "name_label"), name; - (Xapi_globs.i18n_original_value_prefix ^ "name_description"), description - ] + let string_of_input_t = Test_printers.(list (tuple4 string string (assoc_list string string) bool)) + let string_of_output_t = Test_printers.(list (tuple3 string string (assoc_list string string))) + end + module State = Test_state.XapiDb - let load_input __context srs = - let sr_introduce ~uuid ~name_label ~name_description ~_type ~content_type ~shared ~sm_config = - Test_common.make_sr ~__context ~uuid ~name_label ~name_description ~_type ~content_type ~shared ~sm_config () in - let maybe_create_pbd sR device_config host = - Test_common.make_pbd ~__context ~sR ~device_config ~host () - in - Test_common.make_localhost __context; - List.iter (fun (name_label, name_description, other_config, is_tools_sr) -> - ignore (Test_common.make_sr ~__context ~name_label ~name_description ~other_config ~is_tools_sr ()) - ) srs; - Dbsync_master.create_tools_sr __context name description sr_introduce maybe_create_pbd + let name = "Tools" + let description = "Tools ISOs" + let other_config = [ + Xapi_globs.xensource_internal, "true"; + Xapi_globs.tools_sr_tag, "true"; + Xapi_globs.i18n_key, "xenserver-tools"; + (Xapi_globs.i18n_original_value_prefix ^ "name_label"), name; + (Xapi_globs.i18n_original_value_prefix ^ "name_description"), description + ] - let extract_output __context vms = - List.fold_left (fun acc self -> - if Db.SR.get_is_tools_sr ~__context ~self then ( - Db.SR.get_name_label ~__context ~self, - Db.SR.get_name_description ~__context ~self, - Db.SR.get_other_config ~__context ~self) - :: acc - else - acc - ) [] (Db.SR.get_all ~__context) + let load_input __context srs = + let sr_introduce ~uuid ~name_label ~name_description ~_type ~content_type ~shared ~sm_config = + Test_common.make_sr ~__context ~uuid ~name_label ~name_description ~_type ~content_type ~shared ~sm_config () in + let maybe_create_pbd sR device_config host = + Test_common.make_pbd ~__context ~sR ~device_config ~host () + in + Test_common.make_localhost __context; + List.iter (fun (name_label, name_description, other_config, is_tools_sr) -> + ignore (Test_common.make_sr ~__context ~name_label ~name_description ~other_config ~is_tools_sr ()) + ) srs; + Dbsync_master.create_tools_sr __context name description sr_introduce maybe_create_pbd - (* And other_config key/value pair we use to prove that and existing Tools SR is - * reused rather than destroyed and recreated. *) - let extra_oc = "toolz", "true" + let extract_output __context vms = + List.fold_left (fun acc self -> + if Db.SR.get_is_tools_sr ~__context ~self then ( + Db.SR.get_name_label ~__context ~self, + Db.SR.get_name_description ~__context ~self, + Db.SR.get_other_config ~__context ~self) + :: acc + else + acc + ) [] (Db.SR.get_all ~__context) - (* All tests expect the outcome to be one and only one Tools SR with the pre-specified - * name, description and other_config keys *) - let tests = [ - (* No Tools SR yet *) - [], - [name, description, other_config]; - - (* An existing Tools SR *) - [ - "Toolz", "Toolz ISOs", [extra_oc], true; - ], - [name, description, extra_oc :: other_config]; - - (* Two existing Tools SRs (bad state!) *) - [ - "Toolz", "Toolz ISOs", [extra_oc], true; - "Toolz2", "Toolz ISOs2", [extra_oc], true; - ], - [name, description, extra_oc :: other_config]; - - (* An existing Tools SR with an old tag *) - [ - "Toolz", "Toolz ISOs", [Xapi_globs.tools_sr_tag, "true"; extra_oc], false; - ], - [name, description, extra_oc :: other_config]; + (* And other_config key/value pair we use to prove that and existing Tools SR is + * reused rather than destroyed and recreated. *) + let extra_oc = "toolz", "true" - (* An existing Tools SR with another old tag *) - [ - "Toolz", "Toolz ISOs", [Xapi_globs.xensource_internal, "true"; extra_oc], false; - ], - [name, description, extra_oc :: other_config]; + (* All tests expect the outcome to be one and only one Tools SR with the pre-specified + * name, description and other_config keys *) + let tests = [ + (* No Tools SR yet *) + [], + [name, description, other_config]; - (* Two existing Tools SRs with different tags; expect to keep the one with is_tools_iso=true *) - [ - "Other", "Other SR", [extra_oc], true; - "Toolz", "Toolz ISOs", [Xapi_globs.xensource_internal, "true"], false; - ], - [name, description, extra_oc :: other_config]; + (* An existing Tools SR *) + [ + "Toolz", "Toolz ISOs", [extra_oc], true; + ], + [name, description, extra_oc :: other_config]; - (* Two existing Tools SRs with different tags; expect to keep the one with is_tools_iso=true *) - [ - "Toolz", "Toolz ISOs", [Xapi_globs.tools_sr_tag, "true"], false; - "Other", "Other SR", [extra_oc], true; - ], - [name, description, extra_oc :: other_config]; - ] -end)) + (* Two existing Tools SRs (bad state!) *) + [ + "Toolz", "Toolz ISOs", [extra_oc], true; + "Toolz2", "Toolz ISOs2", [extra_oc], true; + ], + [name, description, extra_oc :: other_config]; + + (* An existing Tools SR with an old tag *) + [ + "Toolz", "Toolz ISOs", [Xapi_globs.tools_sr_tag, "true"; extra_oc], false; + ], + [name, description, extra_oc :: other_config]; + + (* An existing Tools SR with another old tag *) + [ + "Toolz", "Toolz ISOs", [Xapi_globs.xensource_internal, "true"; extra_oc], false; + ], + [name, description, extra_oc :: other_config]; + + (* Two existing Tools SRs with different tags; expect to keep the one with is_tools_iso=true *) + [ + "Other", "Other SR", [extra_oc], true; + "Toolz", "Toolz ISOs", [Xapi_globs.xensource_internal, "true"], false; + ], + [name, description, extra_oc :: other_config]; + + (* Two existing Tools SRs with different tags; expect to keep the one with is_tools_iso=true *) + [ + "Toolz", "Toolz ISOs", [Xapi_globs.tools_sr_tag, "true"], false; + "Other", "Other SR", [extra_oc], true; + ], + [name, description, extra_oc :: other_config]; + ] + end)) let test = - "test_dbsync_master" >::: - [ - "create_tools_sr" >::: CreateToolsSR.tests; - ] + "test_dbsync_master" >::: + [ + "create_tools_sr" >::: CreateToolsSR.tests; + ] diff --git a/ocaml/test/test_features.ml b/ocaml/test/test_features.ml index f2719773c2c..638c05c2149 100644 --- a/ocaml/test/test_features.ml +++ b/ocaml/test/test_features.ml @@ -17,51 +17,51 @@ open Test_highlevel open Features module OfAssocList = Generic.Make(struct - module Io = struct - type input_t = (string * string) list - type output_t = Features.feature list + module Io = struct + type input_t = (string * string) list + type output_t = Features.feature list - let string_of_input_t = Test_printers.(assoc_list string string) - let string_of_output_t = - Test_printers.(fun features -> String.concat "," (List.map name_of_feature features)) - end + let string_of_input_t = Test_printers.(assoc_list string string) + let string_of_output_t = + Test_printers.(fun features -> String.concat "," (List.map name_of_feature features)) + end - let transform = of_assoc_list + let transform = of_assoc_list - (* Xen_motion and AD are enabled unless explicitly disabled. All other features - are disabled unless explitly enabled. *) - let tests = [ - [], - [Xen_motion; AD]; - - ["restrict_xen_motion", "true"; - "restrict_ad", "true"], - []; - - ["restrict_xen_motion", "true"], - [AD]; - - ["restrict_xen_motion", "false"], - [Xen_motion; AD]; - - ["restrict_xen_motion", "false"; - "restrict_dmc", "false"], - [DMC; Xen_motion; AD]; - - ["restrict_xen_motion", "false"; - "restrict_ad", "true"; - "restrict_dmc", "false"], - [DMC; Xen_motion]; - - ["enable_xha", "true"; - "restrict_xen_motion", "true"], - [HA; AD]; - ] -end) + (* Xen_motion and AD are enabled unless explicitly disabled. All other features + are disabled unless explitly enabled. *) + let tests = [ + [], + [Xen_motion; AD]; + + ["restrict_xen_motion", "true"; + "restrict_ad", "true"], + []; + + ["restrict_xen_motion", "true"], + [AD]; + + ["restrict_xen_motion", "false"], + [Xen_motion; AD]; + + ["restrict_xen_motion", "false"; + "restrict_dmc", "false"], + [DMC; Xen_motion; AD]; + + ["restrict_xen_motion", "false"; + "restrict_ad", "true"; + "restrict_dmc", "false"], + [DMC; Xen_motion]; + + ["enable_xha", "true"; + "restrict_xen_motion", "true"], + [HA; AD]; + ] + end) let test = - "pool_license" >::: - [ - "test_of_assoc_list" >::: OfAssocList.tests; - ] + "pool_license" >::: + [ + "test_of_assoc_list" >::: OfAssocList.tests; + ] diff --git a/ocaml/test/test_gpu_group.ml b/ocaml/test/test_gpu_group.ml index 359d0cee5e9..ad47471837a 100644 --- a/ocaml/test/test_gpu_group.ml +++ b/ocaml/test/test_gpu_group.ml @@ -16,64 +16,64 @@ open OUnit open Test_vgpu_common let test_supported_enabled_types () = - let __context = Test_common.make_test_database () in - (* Create a GPU group containing a single K2 PGPU. *) - let host = List.hd (Db.Host.get_all ~__context) in - let gPU_group = Test_common.make_gpu_group ~__context () in - let pgpu = make_pgpu ~__context ~host ~gPU_group default_k2 in - (* Update the group's enabled and supported types, and check that they - * contain all the types enabled and supported by the PGPU. *) - Xapi_gpu_group.update_supported_VGPU_types ~__context ~self:gPU_group; - Xapi_gpu_group.update_enabled_VGPU_types ~__context ~self:gPU_group; - let vgpu_types_and_refs = - List.map - (fun vgpu_type -> - (vgpu_type, Xapi_vgpu_type.find_or_create ~__context vgpu_type)) - k2_vgpu_types - in - let group_supported_types = - Db.GPU_group.get_supported_VGPU_types ~__context ~self:gPU_group - in - let group_enabled_types = - Db.GPU_group.get_enabled_VGPU_types ~__context ~self:gPU_group - in - List.iter - (fun (vgpu_type, vgpu_type_ref) -> - let msg_supported = - Printf.sprintf - "GPU group does not list %s as supported" - vgpu_type.Xapi_vgpu_type.model_name - in - let msg_enabled = - Printf.sprintf - "GPU group does not list %s as enabled" - vgpu_type.Xapi_vgpu_type.model_name - in - assert_bool msg_supported (List.mem vgpu_type_ref group_supported_types); - assert_bool msg_enabled (List.mem vgpu_type_ref group_enabled_types)) - vgpu_types_and_refs; - (* Invalidate the PGPU's host ref, and run a GC pass; this should destroy the - * pgpu, and clear the group's supported and enabled types. *) - Db.PGPU.set_host ~__context ~self:pgpu ~value:Ref.null; - Db_gc.gc_PGPUs ~__context; - let group_supported_types = - Db.GPU_group.get_supported_VGPU_types ~__context ~self:gPU_group - in - let group_enabled_types = - Db.GPU_group.get_enabled_VGPU_types ~__context ~self:gPU_group - in - assert_equal - ~msg:"PGPU has not been destroyed" - (Db.is_valid_ref __context pgpu) false; - assert_equal - ~msg:"GPU group still has supported types after GC" - group_supported_types []; - assert_equal - ~msg:"GPU group still has enabled types after GC" - group_enabled_types [] + let __context = Test_common.make_test_database () in + (* Create a GPU group containing a single K2 PGPU. *) + let host = List.hd (Db.Host.get_all ~__context) in + let gPU_group = Test_common.make_gpu_group ~__context () in + let pgpu = make_pgpu ~__context ~host ~gPU_group default_k2 in + (* Update the group's enabled and supported types, and check that they + * contain all the types enabled and supported by the PGPU. *) + Xapi_gpu_group.update_supported_VGPU_types ~__context ~self:gPU_group; + Xapi_gpu_group.update_enabled_VGPU_types ~__context ~self:gPU_group; + let vgpu_types_and_refs = + List.map + (fun vgpu_type -> + (vgpu_type, Xapi_vgpu_type.find_or_create ~__context vgpu_type)) + k2_vgpu_types + in + let group_supported_types = + Db.GPU_group.get_supported_VGPU_types ~__context ~self:gPU_group + in + let group_enabled_types = + Db.GPU_group.get_enabled_VGPU_types ~__context ~self:gPU_group + in + List.iter + (fun (vgpu_type, vgpu_type_ref) -> + let msg_supported = + Printf.sprintf + "GPU group does not list %s as supported" + vgpu_type.Xapi_vgpu_type.model_name + in + let msg_enabled = + Printf.sprintf + "GPU group does not list %s as enabled" + vgpu_type.Xapi_vgpu_type.model_name + in + assert_bool msg_supported (List.mem vgpu_type_ref group_supported_types); + assert_bool msg_enabled (List.mem vgpu_type_ref group_enabled_types)) + vgpu_types_and_refs; + (* Invalidate the PGPU's host ref, and run a GC pass; this should destroy the + * pgpu, and clear the group's supported and enabled types. *) + Db.PGPU.set_host ~__context ~self:pgpu ~value:Ref.null; + Db_gc.gc_PGPUs ~__context; + let group_supported_types = + Db.GPU_group.get_supported_VGPU_types ~__context ~self:gPU_group + in + let group_enabled_types = + Db.GPU_group.get_enabled_VGPU_types ~__context ~self:gPU_group + in + assert_equal + ~msg:"PGPU has not been destroyed" + (Db.is_valid_ref __context pgpu) false; + assert_equal + ~msg:"GPU group still has supported types after GC" + group_supported_types []; + assert_equal + ~msg:"GPU group still has enabled types after GC" + group_enabled_types [] let test = - "test_gpu_group" >::: - [ - "test_supported_enabled_types" >:: test_supported_enabled_types; - ] + "test_gpu_group" >::: + [ + "test_supported_enabled_types" >:: test_supported_enabled_types; + ] diff --git a/ocaml/test/test_ha_vm_failover.ml b/ocaml/test/test_ha_vm_failover.ml index 4e3fc737c57..648e959acd7 100644 --- a/ocaml/test/test_ha_vm_failover.ml +++ b/ocaml/test/test_ha_vm_failover.ml @@ -23,477 +23,477 @@ let mib x = x |> kib |> kib let gib x = x |> kib |> kib |> kib type vbd = { - agile : bool; + agile : bool; } type vif = { - agile : bool; + agile : bool; } type vm = { - ha_always_run : bool; - ha_restart_priority : string; - memory : int64; - name_label : string; - vbds : vbd list; - vifs : vif list; + ha_always_run : bool; + ha_restart_priority : string; + memory : int64; + name_label : string; + vbds : vbd list; + vifs : vif list; } let basic_vm = { - ha_always_run = true; - ha_restart_priority = "restart"; - memory = gib 1L; - name_label = "vm"; - vbds = [{agile = true}]; - vifs = [{agile = true}]; + ha_always_run = true; + ha_restart_priority = "restart"; + memory = gib 1L; + name_label = "vm"; + vbds = [{agile = true}]; + vifs = [{agile = true}]; } type host = { - memory_total : int64; - name_label : string; - vms : vm list; + memory_total : int64; + name_label : string; + vms : vm list; } type pool = { - master: host; - slaves: host list; - ha_host_failures_to_tolerate: int64; + master: host; + slaves: host list; + ha_host_failures_to_tolerate: int64; } let string_of_vm {memory; name_label} = - Printf.sprintf "{memory = %Ld; name_label = %S}" memory name_label + Printf.sprintf "{memory = %Ld; name_label = %S}" memory name_label let string_of_host {memory_total; name_label; vms} = - Printf.sprintf "{memory_total = %Ld; name_label = %S; vms = [%s]}" - memory_total name_label - (Test_printers.list string_of_vm vms) + Printf.sprintf "{memory_total = %Ld; name_label = %S; vms = [%s]}" + memory_total name_label + (Test_printers.list string_of_vm vms) let string_of_pool {master; slaves; ha_host_failures_to_tolerate} = - Printf.sprintf - "{master = %s; slaves = %s; ha_host_failures_to_tolerate = %Ld}" - (string_of_host master) - (Test_printers.list string_of_host slaves) - ha_host_failures_to_tolerate + Printf.sprintf + "{master = %s; slaves = %s; ha_host_failures_to_tolerate = %Ld}" + (string_of_host master) + (Test_printers.list string_of_host slaves) + ha_host_failures_to_tolerate let load_vm ~__context ~(vm:vm) ~local_sr ~shared_sr ~local_net ~shared_net = - let vm_ref = make_vm ~__context - ~ha_always_run:vm.ha_always_run - ~ha_restart_priority:vm.ha_restart_priority - ~memory_static_min:vm.memory - ~memory_dynamic_min:vm.memory - ~memory_dynamic_max:vm.memory - ~memory_static_max:vm.memory - ~name_label:vm.name_label () - in - let (_ : API.ref_VIF list) = - List.mapi - (fun index (vif:vif) -> - make_vif ~__context ~device:(string_of_int index) ~vM:vm_ref - ~network:(if vif.agile then shared_net else local_net) ()) - vm.vifs - in - let (_ : API.ref_VBD list) = - List.mapi - (fun index (vbd:vbd) -> - let vdi_ref = - make_vdi ~__context ~sR:(if vbd.agile then shared_sr else local_sr) () - in - make_vbd ~__context ~device:(string_of_int index) ~vM:vm_ref - ~vDI:vdi_ref ()) - vm.vbds - in - vm_ref + let vm_ref = make_vm ~__context + ~ha_always_run:vm.ha_always_run + ~ha_restart_priority:vm.ha_restart_priority + ~memory_static_min:vm.memory + ~memory_dynamic_min:vm.memory + ~memory_dynamic_max:vm.memory + ~memory_static_max:vm.memory + ~name_label:vm.name_label () + in + let (_ : API.ref_VIF list) = + List.mapi + (fun index (vif:vif) -> + make_vif ~__context ~device:(string_of_int index) ~vM:vm_ref + ~network:(if vif.agile then shared_net else local_net) ()) + vm.vifs + in + let (_ : API.ref_VBD list) = + List.mapi + (fun index (vbd:vbd) -> + let vdi_ref = + make_vdi ~__context ~sR:(if vbd.agile then shared_sr else local_sr) () + in + make_vbd ~__context ~device:(string_of_int index) ~vM:vm_ref + ~vDI:vdi_ref ()) + vm.vbds + in + vm_ref let load_host ~__context ~host ~local_sr ~shared_sr ~local_net ~shared_net = - let host_ref = make_host ~__context ~name_label:host.name_label () in - Db.Host.set_enabled ~__context ~self:host_ref ~value:true; - let metrics = Db.Host.get_metrics ~__context ~self:host_ref in - Db.Host_metrics.set_live ~__context ~self:metrics ~value:true; - Db.Host_metrics.set_memory_total ~__context - ~self:metrics ~value:host.memory_total; - - let (_ : API.ref_VM list) = - List.map - (fun vm -> - load_vm ~__context ~vm ~local_sr ~shared_sr ~local_net ~shared_net) - host.vms - in - host_ref + let host_ref = make_host ~__context ~name_label:host.name_label () in + Db.Host.set_enabled ~__context ~self:host_ref ~value:true; + let metrics = Db.Host.get_metrics ~__context ~self:host_ref in + Db.Host_metrics.set_live ~__context ~self:metrics ~value:true; + Db.Host_metrics.set_memory_total ~__context + ~self:metrics ~value:host.memory_total; + + let (_ : API.ref_VM list) = + List.map + (fun vm -> + load_vm ~__context ~vm ~local_sr ~shared_sr ~local_net ~shared_net) + host.vms + in + host_ref let setup ~__context {master; slaves; ha_host_failures_to_tolerate} = - let shared_sr = make_sr ~__context ~shared:true () in - let shared_net = make_network ~__context ~bridge:"xenbr0" () in + let shared_sr = make_sr ~__context ~shared:true () in + let shared_net = make_network ~__context ~bridge:"xenbr0" () in - let load_host_and_local_resources host = - let local_sr = make_sr ~__context ~shared:false () in - let local_net = make_network ~__context ~bridge:"xapi0" () in + let load_host_and_local_resources host = + let local_sr = make_sr ~__context ~shared:false () in + let local_net = make_network ~__context ~bridge:"xapi0" () in - let host_ref = - load_host ~__context ~host ~local_sr ~shared_sr ~local_net ~shared_net in + let host_ref = + load_host ~__context ~host ~local_sr ~shared_sr ~local_net ~shared_net in - let (_ : API.ref_PBD) = - make_pbd ~__context ~host:host_ref ~sR:local_sr () in - let (_ : API.ref_PBD) = - make_pbd ~__context ~host:host_ref ~sR:shared_sr () in + let (_ : API.ref_PBD) = + make_pbd ~__context ~host:host_ref ~sR:local_sr () in + let (_ : API.ref_PBD) = + make_pbd ~__context ~host:host_ref ~sR:shared_sr () in - let (_ : API.ref_PIF) = - make_pif ~__context ~host:host_ref ~network:local_net () in - let (_ : API.ref_PIF) = - make_pif ~__context ~host:host_ref ~network:shared_net () in - host_ref - in + let (_ : API.ref_PIF) = + make_pif ~__context ~host:host_ref ~network:local_net () in + let (_ : API.ref_PIF) = + make_pif ~__context ~host:host_ref ~network:shared_net () in + host_ref + in - let master_ref = load_host_and_local_resources master in - let (_ : API.ref_host list) = List.map load_host_and_local_resources slaves in + let master_ref = load_host_and_local_resources master in + let (_ : API.ref_host list) = List.map load_host_and_local_resources slaves in - let (_ : API.ref_pool) = make_pool ~__context - ~ha_enabled:true ~master:master_ref ~ha_host_failures_to_tolerate - ~ha_plan_exists_for:ha_host_failures_to_tolerate () in - () + let (_ : API.ref_pool) = make_pool ~__context + ~ha_enabled:true ~master:master_ref ~ha_host_failures_to_tolerate + ~ha_plan_exists_for:ha_host_failures_to_tolerate () in + () module AllProtectedVms = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - type input_t = pool - type output_t = string list - - let string_of_input_t = string_of_pool - let string_of_output_t = Test_printers.(list string) - end - - module State = Test_state.XapiDb - - let load_input __context input = setup ~__context input - - let extract_output __context _ = - Xapi_ha_vm_failover.all_protected_vms ~__context - |> List.map (fun (_, vm_rec) -> vm_rec.API.vM_name_label) - |> List.sort compare - - let tests = [ - (* No VMs and a single host. *) - { - master = {memory_total = gib 256L; name_label = "master"; vms = []}; - slaves = []; - ha_host_failures_to_tolerate = 0L; - }, - []; - (* One unprotected VM. *) - { - master = { - memory_total = gib 256L; name_label = "master"; - vms = [{basic_vm with - ha_always_run = false; - ha_restart_priority = ""; - }]; - }; - slaves = []; - ha_host_failures_to_tolerate = 0L; - }, - []; - (* One VM which would be protected if it was running. *) - { - master = { - memory_total = gib 256L; name_label = "master"; - vms = [{basic_vm with ha_always_run = false}]; - }; - slaves = []; - ha_host_failures_to_tolerate = 0L; - }, - []; - (* One protected VM. *) - { - master = { - memory_total = gib 256L; name_label = "master"; - vms = [basic_vm]; - }; - slaves = []; - ha_host_failures_to_tolerate = 0L; - }, - ["vm"]; - (* One protected VM and one unprotected VM. *) - { - master = { - memory_total = gib 256L; name_label = "master"; - vms = [ - {basic_vm with name_label = "vm1"}; - {basic_vm with - ha_always_run = false; - ha_restart_priority = ""; - name_label = "vm2" - } - ]; - }; - slaves = []; - ha_host_failures_to_tolerate = 0L; - }, - ["vm1"]; - ] -end)) + module Io = struct + type input_t = pool + type output_t = string list + + let string_of_input_t = string_of_pool + let string_of_output_t = Test_printers.(list string) + end + + module State = Test_state.XapiDb + + let load_input __context input = setup ~__context input + + let extract_output __context _ = + Xapi_ha_vm_failover.all_protected_vms ~__context + |> List.map (fun (_, vm_rec) -> vm_rec.API.vM_name_label) + |> List.sort compare + + let tests = [ + (* No VMs and a single host. *) + { + master = {memory_total = gib 256L; name_label = "master"; vms = []}; + slaves = []; + ha_host_failures_to_tolerate = 0L; + }, + []; + (* One unprotected VM. *) + { + master = { + memory_total = gib 256L; name_label = "master"; + vms = [{basic_vm with + ha_always_run = false; + ha_restart_priority = ""; + }]; + }; + slaves = []; + ha_host_failures_to_tolerate = 0L; + }, + []; + (* One VM which would be protected if it was running. *) + { + master = { + memory_total = gib 256L; name_label = "master"; + vms = [{basic_vm with ha_always_run = false}]; + }; + slaves = []; + ha_host_failures_to_tolerate = 0L; + }, + []; + (* One protected VM. *) + { + master = { + memory_total = gib 256L; name_label = "master"; + vms = [basic_vm]; + }; + slaves = []; + ha_host_failures_to_tolerate = 0L; + }, + ["vm"]; + (* One protected VM and one unprotected VM. *) + { + master = { + memory_total = gib 256L; name_label = "master"; + vms = [ + {basic_vm with name_label = "vm1"}; + {basic_vm with + ha_always_run = false; + ha_restart_priority = ""; + name_label = "vm2" + } + ]; + }; + slaves = []; + ha_host_failures_to_tolerate = 0L; + }, + ["vm1"]; + ] + end)) module PlanForNFailures = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - open Xapi_ha_vm_failover - - type input_t = pool - type output_t = result - - let string_of_input_t = string_of_pool - let string_of_output_t = function - | Plan_exists_for_all_VMs -> "Plan_exists_for_all_VMs" - | Plan_exists_excluding_non_agile_VMs -> "Plan_exists_excluding_non_agile_VMs" - | No_plan_exists -> "No_plan_exists" - end - - module State = Test_state.XapiDb - - let load_input __context = setup ~__context - - let extract_output __context pool = - let all_protected_vms = Xapi_ha_vm_failover.all_protected_vms ~__context in - Xapi_ha_vm_failover.plan_for_n_failures ~__context - ~all_protected_vms (Int64.to_int pool.ha_host_failures_to_tolerate) - - (* TODO: Add a test which causes plan_for_n_failures to return - * Plan_exists_excluding_non_agile_VMs. *) - let tests = [ - (* Two host pool with no VMs. *) - ( - { - master = {memory_total = gib 256L; name_label = "master"; vms = []}; - slaves = [ - {memory_total = gib 256L; name_label = "slave"; vms = []} - ]; - ha_host_failures_to_tolerate = 1L; - }, - Xapi_ha_vm_failover.Plan_exists_for_all_VMs - ); - (* Two host pool, with one VM taking up just under half of one host's - * memory. *) - ( - { - master = { - memory_total = gib 256L; name_label = "master"; - vms = [{basic_vm with - memory = gib 120L; - name_label = "vm1"; - }]; - }; - slaves = [ - {memory_total = gib 256L; name_label = "slave"; vms = []} - ]; - ha_host_failures_to_tolerate = 1L; - }, - Xapi_ha_vm_failover.Plan_exists_for_all_VMs - ); - (* Two host pool, with two VMs taking up almost all of one host's memory. *) - ( - { - master = { - memory_total = gib 256L; name_label = "master"; - vms = [ - {basic_vm with - memory = gib 120L; - name_label = "vm1"; - }; - {basic_vm with - memory = gib 120L; - name_label = "vm2"; - }; - ]; - }; - slaves = [ - {memory_total = gib 256L; name_label = "slave"; vms = []} - ]; - ha_host_failures_to_tolerate = 1L; - }, - Xapi_ha_vm_failover.Plan_exists_for_all_VMs - ); - (* Two host pool, overcommitted. *) - ( - { - master = { - memory_total = gib 256L; name_label = "master"; - vms = [ - {basic_vm with - memory = gib 120L; - name_label = "vm1"; - }; - {basic_vm with - memory = gib 120L; - name_label = "vm2"; - }; - ]; - }; - slaves = [ - { - memory_total = gib 256L; name_label = "slave"; - vms = [ - {basic_vm with - memory = gib 120L; - name_label = "vm3"; - }; - {basic_vm with - memory = gib 120L; - name_label = "vm4"; - }; - ]; - } - ]; - ha_host_failures_to_tolerate = 1L; - }, - Xapi_ha_vm_failover.No_plan_exists - ); - ] -end)) + module Io = struct + open Xapi_ha_vm_failover + + type input_t = pool + type output_t = result + + let string_of_input_t = string_of_pool + let string_of_output_t = function + | Plan_exists_for_all_VMs -> "Plan_exists_for_all_VMs" + | Plan_exists_excluding_non_agile_VMs -> "Plan_exists_excluding_non_agile_VMs" + | No_plan_exists -> "No_plan_exists" + end + + module State = Test_state.XapiDb + + let load_input __context = setup ~__context + + let extract_output __context pool = + let all_protected_vms = Xapi_ha_vm_failover.all_protected_vms ~__context in + Xapi_ha_vm_failover.plan_for_n_failures ~__context + ~all_protected_vms (Int64.to_int pool.ha_host_failures_to_tolerate) + + (* TODO: Add a test which causes plan_for_n_failures to return + * Plan_exists_excluding_non_agile_VMs. *) + let tests = [ + (* Two host pool with no VMs. *) + ( + { + master = {memory_total = gib 256L; name_label = "master"; vms = []}; + slaves = [ + {memory_total = gib 256L; name_label = "slave"; vms = []} + ]; + ha_host_failures_to_tolerate = 1L; + }, + Xapi_ha_vm_failover.Plan_exists_for_all_VMs + ); + (* Two host pool, with one VM taking up just under half of one host's + * memory. *) + ( + { + master = { + memory_total = gib 256L; name_label = "master"; + vms = [{basic_vm with + memory = gib 120L; + name_label = "vm1"; + }]; + }; + slaves = [ + {memory_total = gib 256L; name_label = "slave"; vms = []} + ]; + ha_host_failures_to_tolerate = 1L; + }, + Xapi_ha_vm_failover.Plan_exists_for_all_VMs + ); + (* Two host pool, with two VMs taking up almost all of one host's memory. *) + ( + { + master = { + memory_total = gib 256L; name_label = "master"; + vms = [ + {basic_vm with + memory = gib 120L; + name_label = "vm1"; + }; + {basic_vm with + memory = gib 120L; + name_label = "vm2"; + }; + ]; + }; + slaves = [ + {memory_total = gib 256L; name_label = "slave"; vms = []} + ]; + ha_host_failures_to_tolerate = 1L; + }, + Xapi_ha_vm_failover.Plan_exists_for_all_VMs + ); + (* Two host pool, overcommitted. *) + ( + { + master = { + memory_total = gib 256L; name_label = "master"; + vms = [ + {basic_vm with + memory = gib 120L; + name_label = "vm1"; + }; + {basic_vm with + memory = gib 120L; + name_label = "vm2"; + }; + ]; + }; + slaves = [ + { + memory_total = gib 256L; name_label = "slave"; + vms = [ + {basic_vm with + memory = gib 120L; + name_label = "vm3"; + }; + {basic_vm with + memory = gib 120L; + name_label = "vm4"; + }; + ]; + } + ]; + ha_host_failures_to_tolerate = 1L; + }, + Xapi_ha_vm_failover.No_plan_exists + ); + ] + end)) module AssertNewVMPreservesHAPlan = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - open Xapi_ha_vm_failover - - type input_t = (pool * vm) - type output_t = (exn, unit) Either.t - - let string_of_input_t = Test_printers.pair string_of_pool string_of_vm - let string_of_output_t = Test_printers.(either Printexc.to_string unit) - end - - module State = Test_state.XapiDb - - let load_input __context (pool, _) = setup ~__context pool - - let extract_output __context (pool, vm) = - let open Db_filter_types in - let local_sr = - Db.SR.get_refs_where ~__context - ~expr:(Eq (Field "shared", Literal "false")) - |> List.hd - in - let shared_sr = - Db.SR.get_refs_where ~__context - ~expr:(Eq (Field "shared", Literal "true")) - |> List.hd - in - let local_net = - Db.Network.get_refs_where ~__context - ~expr:(Eq (Field "bridge", Literal "xapi0")) - |> List.hd - in - let shared_net = - Db.Network.get_refs_where ~__context - ~expr:(Eq (Field "bridge", Literal "xenbr0")) - |> List.hd - in - let vm_ref = - load_vm ~__context ~vm ~local_sr ~shared_sr ~local_net ~shared_net in - try Either.Right - (Xapi_ha_vm_failover.assert_new_vm_preserves_ha_plan ~__context vm_ref) - with e -> Either.Left e - - (* n.b. incoming VMs have ha_always_run = false; otherwise they will be - * included when computing the plan for the already-running VMs. *) - let tests = [ - (* 2 host pool, one VM using just under half of one host's memory; - * test that another VM can be added. *) - ( - { - master = { - memory_total = gib 256L; name_label = "master"; - vms = [ - {basic_vm with - memory = gib 120L; - name_label = "vm1"; - }; - ]; - }; - slaves = [ - {memory_total = gib 256L; name_label = "slave"; vms = []} - ]; - ha_host_failures_to_tolerate = 1L; - }, - {basic_vm with - ha_always_run = false; - ha_restart_priority = "restart"; - memory = gib 120L; - name_label = "vm2"; - } - ), - Either.Right (); - (* 2 host pool, two VMs using almost all of one host's memory; - * test that another VM cannot be added. *) - ( - { - master = { - memory_total = gib 256L; name_label = "master"; - vms = [ - {basic_vm with - memory = gib 120L; - name_label = "vm1"; - }; - {basic_vm with - memory = gib 120L; - name_label = "vm2"; - }; - ]; - }; - slaves = [ - {memory_total = gib 256L; name_label = "slave"; vms = []} - ]; - ha_host_failures_to_tolerate = 1L; - }, - {basic_vm with - ha_always_run = false; - ha_restart_priority = "restart"; - memory = gib 120L; - name_label = "vm2"; - } - ), - Either.Left (Api_errors.(Server_error (ha_operation_would_break_failover_plan, []))); - (* 2 host pool which is already overcommitted. Attempting to add another VM - * should not throw an exception. *) - ( - { - master = { - memory_total = gib 256L; name_label = "master"; - vms = [ - {basic_vm with - memory = gib 120L; - name_label = "vm1"; - }; - {basic_vm with - memory = gib 120L; - name_label = "vm2"; - }; - ]; - }; - slaves = [ - { - memory_total = gib 256L; name_label = "slave"; - vms = [ - {basic_vm with - memory = gib 120L; - name_label = "vm1"; - }; - ] - }; - ]; - ha_host_failures_to_tolerate = 1L; - }, - {basic_vm with - ha_always_run = false; - ha_restart_priority = "restart"; - memory = gib 120L; - name_label = "vm2"; - } - ), - Either.Right (); - ] -end)) + module Io = struct + open Xapi_ha_vm_failover + + type input_t = (pool * vm) + type output_t = (exn, unit) Either.t + + let string_of_input_t = Test_printers.pair string_of_pool string_of_vm + let string_of_output_t = Test_printers.(either Printexc.to_string unit) + end + + module State = Test_state.XapiDb + + let load_input __context (pool, _) = setup ~__context pool + + let extract_output __context (pool, vm) = + let open Db_filter_types in + let local_sr = + Db.SR.get_refs_where ~__context + ~expr:(Eq (Field "shared", Literal "false")) + |> List.hd + in + let shared_sr = + Db.SR.get_refs_where ~__context + ~expr:(Eq (Field "shared", Literal "true")) + |> List.hd + in + let local_net = + Db.Network.get_refs_where ~__context + ~expr:(Eq (Field "bridge", Literal "xapi0")) + |> List.hd + in + let shared_net = + Db.Network.get_refs_where ~__context + ~expr:(Eq (Field "bridge", Literal "xenbr0")) + |> List.hd + in + let vm_ref = + load_vm ~__context ~vm ~local_sr ~shared_sr ~local_net ~shared_net in + try Either.Right + (Xapi_ha_vm_failover.assert_new_vm_preserves_ha_plan ~__context vm_ref) + with e -> Either.Left e + + (* n.b. incoming VMs have ha_always_run = false; otherwise they will be + * included when computing the plan for the already-running VMs. *) + let tests = [ + (* 2 host pool, one VM using just under half of one host's memory; + * test that another VM can be added. *) + ( + { + master = { + memory_total = gib 256L; name_label = "master"; + vms = [ + {basic_vm with + memory = gib 120L; + name_label = "vm1"; + }; + ]; + }; + slaves = [ + {memory_total = gib 256L; name_label = "slave"; vms = []} + ]; + ha_host_failures_to_tolerate = 1L; + }, + {basic_vm with + ha_always_run = false; + ha_restart_priority = "restart"; + memory = gib 120L; + name_label = "vm2"; + } + ), + Either.Right (); + (* 2 host pool, two VMs using almost all of one host's memory; + * test that another VM cannot be added. *) + ( + { + master = { + memory_total = gib 256L; name_label = "master"; + vms = [ + {basic_vm with + memory = gib 120L; + name_label = "vm1"; + }; + {basic_vm with + memory = gib 120L; + name_label = "vm2"; + }; + ]; + }; + slaves = [ + {memory_total = gib 256L; name_label = "slave"; vms = []} + ]; + ha_host_failures_to_tolerate = 1L; + }, + {basic_vm with + ha_always_run = false; + ha_restart_priority = "restart"; + memory = gib 120L; + name_label = "vm2"; + } + ), + Either.Left (Api_errors.(Server_error (ha_operation_would_break_failover_plan, []))); + (* 2 host pool which is already overcommitted. Attempting to add another VM + * should not throw an exception. *) + ( + { + master = { + memory_total = gib 256L; name_label = "master"; + vms = [ + {basic_vm with + memory = gib 120L; + name_label = "vm1"; + }; + {basic_vm with + memory = gib 120L; + name_label = "vm2"; + }; + ]; + }; + slaves = [ + { + memory_total = gib 256L; name_label = "slave"; + vms = [ + {basic_vm with + memory = gib 120L; + name_label = "vm1"; + }; + ] + }; + ]; + ha_host_failures_to_tolerate = 1L; + }, + {basic_vm with + ha_always_run = false; + ha_restart_priority = "restart"; + memory = gib 120L; + name_label = "vm2"; + } + ), + Either.Right (); + ] + end)) let test = - "test_ha_vm_failover" >::: - [ - "test_all_protected_vms" >::: AllProtectedVms.tests; - "test_plan_for_n_failures" >::: PlanForNFailures.tests; - "test_assert_new_vm_preserves_ha_plan" >::: - AssertNewVMPreservesHAPlan.tests; - ] + "test_ha_vm_failover" >::: + [ + "test_all_protected_vms" >::: AllProtectedVms.tests; + "test_plan_for_n_failures" >::: PlanForNFailures.tests; + "test_assert_new_vm_preserves_ha_plan" >::: + AssertNewVMPreservesHAPlan.tests; + ] diff --git a/ocaml/test/test_helpers.ml b/ocaml/test/test_helpers.ml index a67ec4139bc..c50c74975f0 100644 --- a/ocaml/test/test_helpers.ml +++ b/ocaml/test/test_helpers.ml @@ -20,188 +20,188 @@ open Stdext type pif = {device: string; management: bool; other_config: (string * string) list} module DetermineGateway = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - (* The type of inputs to a system being tested. *) - type input_t = pif list * string option - (* The type of outputs from a system being tested. *) - type output_t = string option * string option - - (* Helper functions for printing error messages on test failure. *) - let string_of_pif pif = - Printf.sprintf "[device = %s; management = %b; other_config = %s]" - pif.device pif.management - (Test_printers.(assoc_list string string) pif.other_config) - - let string_of_input_t = - Test_printers.(assoc_pair - (list string_of_pif) - (option string)) - - let string_of_output_t = - Test_printers.(assoc_pair - (option string) - (option string)) - end - module State = Test_state.XapiDb - - let load_input __context (pifs, _) = - make_localhost ~__context; - List.iter (fun pif -> - let network = make_network ~__context () in - let _ = make_pif ~__context ~network ~host:!Xapi_globs.localhost_ref - ~ip_configuration_mode:`DHCP ~device:pif.device - ~management:pif.management ~other_config:pif.other_config () in - () - ) pifs - - let extract_output __context (_, mgmt) = - let management_interface = Stdext.Opt.map (fun device -> - let open Db_filter_types in - let pifs = Db.PIF.get_refs_where ~__context ~expr:(Eq (Field "device", Literal device)) in - List.hd pifs - ) mgmt in - let gateway, dns = Helpers.determine_gateway_and_dns_ifs ~__context ?management_interface () in - let get_device = Stdext.Opt.map (fun (self, _) -> Db.PIF.get_device ~__context ~self) in - get_device gateway, - get_device dns - - let tests = [ - ([ - {device="eth0"; management=true; other_config=[]}; - {device="eth1"; management=false; other_config=[]}], - None - ), - (Some "eth0", Some "eth0"); - - ([ - {device="eth0"; management=true; other_config=[]}; - {device="eth1"; management=false; other_config=[]}], - Some "eth1" - ), - (Some "eth1", Some "eth1"); - - ([ - {device="eth0"; management=true; other_config=[]}; - {device="eth1"; management=false; other_config=["defaultroute","true"]}], - None - ), - (Some "eth1", Some "eth0"); - - ([ - {device="eth0"; management=true; other_config=[]}; - {device="eth1"; management=false; other_config=["peerdns","true"]}], - None - ), - (Some "eth0", Some "eth1"); - - ([ - {device="eth0"; management=false; other_config=[]}; - {device="eth1"; management=false; other_config=["defaultroute","true"]}], - Some "eth0" - ), - (Some "eth1", Some "eth0"); - - ([ - {device="eth0"; management=false; other_config=[]}; - {device="eth1"; management=false; other_config=["peerdns","true"]}], - Some "eth0" - ), - (Some "eth0", Some "eth1"); - ] -end)) + module Io = struct + (* The type of inputs to a system being tested. *) + type input_t = pif list * string option + (* The type of outputs from a system being tested. *) + type output_t = string option * string option + + (* Helper functions for printing error messages on test failure. *) + let string_of_pif pif = + Printf.sprintf "[device = %s; management = %b; other_config = %s]" + pif.device pif.management + (Test_printers.(assoc_list string string) pif.other_config) + + let string_of_input_t = + Test_printers.(assoc_pair + (list string_of_pif) + (option string)) + + let string_of_output_t = + Test_printers.(assoc_pair + (option string) + (option string)) + end + module State = Test_state.XapiDb + + let load_input __context (pifs, _) = + make_localhost ~__context; + List.iter (fun pif -> + let network = make_network ~__context () in + let _ = make_pif ~__context ~network ~host:!Xapi_globs.localhost_ref + ~ip_configuration_mode:`DHCP ~device:pif.device + ~management:pif.management ~other_config:pif.other_config () in + () + ) pifs + + let extract_output __context (_, mgmt) = + let management_interface = Stdext.Opt.map (fun device -> + let open Db_filter_types in + let pifs = Db.PIF.get_refs_where ~__context ~expr:(Eq (Field "device", Literal device)) in + List.hd pifs + ) mgmt in + let gateway, dns = Helpers.determine_gateway_and_dns_ifs ~__context ?management_interface () in + let get_device = Stdext.Opt.map (fun (self, _) -> Db.PIF.get_device ~__context ~self) in + get_device gateway, + get_device dns + + let tests = [ + ([ + {device="eth0"; management=true; other_config=[]}; + {device="eth1"; management=false; other_config=[]}], + None + ), + (Some "eth0", Some "eth0"); + + ([ + {device="eth0"; management=true; other_config=[]}; + {device="eth1"; management=false; other_config=[]}], + Some "eth1" + ), + (Some "eth1", Some "eth1"); + + ([ + {device="eth0"; management=true; other_config=[]}; + {device="eth1"; management=false; other_config=["defaultroute","true"]}], + None + ), + (Some "eth1", Some "eth0"); + + ([ + {device="eth0"; management=true; other_config=[]}; + {device="eth1"; management=false; other_config=["peerdns","true"]}], + None + ), + (Some "eth0", Some "eth1"); + + ([ + {device="eth0"; management=false; other_config=[]}; + {device="eth1"; management=false; other_config=["defaultroute","true"]}], + Some "eth0" + ), + (Some "eth1", Some "eth0"); + + ([ + {device="eth0"; management=false; other_config=[]}; + {device="eth1"; management=false; other_config=["peerdns","true"]}], + Some "eth0" + ), + (Some "eth0", Some "eth1"); + ] + end)) module IPCheckers = Generic.Make (struct - module Io = struct - type input_t = [`ipv4 | `ipv6] * string * string - type output_t = (exn, unit) Either.t - - let string_of_input_t = - let open Test_printers in - let kind : [`ipv4 | `ipv6] printer = function `ipv4 -> "IPv4" | `ipv6 -> "IPv6" in - tuple3 kind string string - - let string_of_output_t = Test_printers.(either exn unit) - end - - open Either - open Api_errors - - let transform (kind, field, address) = - try - Right (Helpers.assert_is_valid_ip kind field address) - with e -> - Left e - - let tests = [ - (`ipv4, "address", "192.168.0.1"), (Right ()); - (`ipv4, "address", "255.255.255.0"), (Right ()); - (`ipv4, "address1", ""), (Left (Server_error(invalid_ip_address_specified, ["address1"]))); - (`ipv4, "address2", "192.168.0.300"), (Left (Server_error(invalid_ip_address_specified, ["address2"]))); - (`ipv4, "address3", "192.168.0"), (Left (Server_error(invalid_ip_address_specified, ["address3"]))); - (`ipv4, "address4", "bad-address"), (Left (Server_error(invalid_ip_address_specified, ["address4"]))); - (`ipv6, "address5", "192.168.0.1"), (Left (Server_error(invalid_ip_address_specified, ["address5"]))); - - (`ipv6, "address", "fe80::bae8:56ff:fe29:894a"), (Right ()); - (`ipv6, "address", "fe80:0000:0000:0000:bae8:56ff:fe29:894a"), (Right ()); - (`ipv6, "address", "::1"), (Right ()); - (`ipv6, "address1", ""), (Left (Server_error(invalid_ip_address_specified, ["address1"]))); - (`ipv6, "address2", "fe80:0000:0000:0000:bae8:56ff:fe29:894a:0000"), (Left (Server_error(invalid_ip_address_specified, ["address2"]))); - (`ipv6, "address3", "bad-address"), (Left (Server_error(invalid_ip_address_specified, ["address3"]))); - (`ipv4, "address4", "fe80::bae8:56ff:fe29:894a"), (Left (Server_error(invalid_ip_address_specified, ["address4"]))); - (`ipv6, "address5", "ze80::bae8:56ff:fe29:894a"), (Left (Server_error(invalid_ip_address_specified, ["address5"]))); - ] -end) + module Io = struct + type input_t = [`ipv4 | `ipv6] * string * string + type output_t = (exn, unit) Either.t + + let string_of_input_t = + let open Test_printers in + let kind : [`ipv4 | `ipv6] printer = function `ipv4 -> "IPv4" | `ipv6 -> "IPv6" in + tuple3 kind string string + + let string_of_output_t = Test_printers.(either exn unit) + end + + open Either + open Api_errors + + let transform (kind, field, address) = + try + Right (Helpers.assert_is_valid_ip kind field address) + with e -> + Left e + + let tests = [ + (`ipv4, "address", "192.168.0.1"), (Right ()); + (`ipv4, "address", "255.255.255.0"), (Right ()); + (`ipv4, "address1", ""), (Left (Server_error(invalid_ip_address_specified, ["address1"]))); + (`ipv4, "address2", "192.168.0.300"), (Left (Server_error(invalid_ip_address_specified, ["address2"]))); + (`ipv4, "address3", "192.168.0"), (Left (Server_error(invalid_ip_address_specified, ["address3"]))); + (`ipv4, "address4", "bad-address"), (Left (Server_error(invalid_ip_address_specified, ["address4"]))); + (`ipv6, "address5", "192.168.0.1"), (Left (Server_error(invalid_ip_address_specified, ["address5"]))); + + (`ipv6, "address", "fe80::bae8:56ff:fe29:894a"), (Right ()); + (`ipv6, "address", "fe80:0000:0000:0000:bae8:56ff:fe29:894a"), (Right ()); + (`ipv6, "address", "::1"), (Right ()); + (`ipv6, "address1", ""), (Left (Server_error(invalid_ip_address_specified, ["address1"]))); + (`ipv6, "address2", "fe80:0000:0000:0000:bae8:56ff:fe29:894a:0000"), (Left (Server_error(invalid_ip_address_specified, ["address2"]))); + (`ipv6, "address3", "bad-address"), (Left (Server_error(invalid_ip_address_specified, ["address3"]))); + (`ipv4, "address4", "fe80::bae8:56ff:fe29:894a"), (Left (Server_error(invalid_ip_address_specified, ["address4"]))); + (`ipv6, "address5", "ze80::bae8:56ff:fe29:894a"), (Left (Server_error(invalid_ip_address_specified, ["address5"]))); + ] + end) module CIDRCheckers = Generic.Make (struct - module Io = struct - type input_t = [`ipv4 | `ipv6] * string * string - type output_t = (exn, unit) Either.t - - let string_of_input_t = - let open Test_printers in - let kind : [`ipv4 | `ipv6] printer = function `ipv4 -> "IPv4" | `ipv6 -> "IPv6" in - tuple3 kind string string - - let string_of_output_t = Test_printers.(either exn unit) - end - - open Either - open Api_errors - - let transform (kind, field, cidr) = - try - Right (Helpers.assert_is_valid_cidr kind field cidr) - with e -> - Left e - - let tests = [ - (`ipv4, "address", "192.168.0.1/24"), (Right ()); - (`ipv4, "address", "255.255.255.0/32"), (Right ()); - (`ipv4, "address1", ""), (Left (Server_error(invalid_cidr_address_specified, ["address1"]))); - (`ipv4, "address1", "192.168.0.2"), (Left (Server_error(invalid_cidr_address_specified, ["address1"]))); - (`ipv4, "address1", "192.168.0.2/33"), (Left (Server_error(invalid_cidr_address_specified, ["address1"]))); - (`ipv4, "address1", "192.168.0.2/x"), (Left (Server_error(invalid_cidr_address_specified, ["address1"]))); - (`ipv4, "address2", "192.168.0.300/10"), (Left (Server_error(invalid_cidr_address_specified, ["address2"]))); - (`ipv4, "address3", "192.168.0/20"), (Left (Server_error(invalid_cidr_address_specified, ["address3"]))); - (`ipv4, "address4", "bad-address/24"), (Left (Server_error(invalid_cidr_address_specified, ["address4"]))); - (`ipv6, "address5", "192.168.0.1/24"), (Left (Server_error(invalid_cidr_address_specified, ["address5"]))); - - (`ipv6, "address", "fe80::bae8:56ff:fe29:894a/64"), (Right ()); - (`ipv6, "address", "fe80:0000:0000:0000:bae8:56ff:fe29:894a/80"), (Right ()); - (`ipv6, "address", "::1/128"), (Right ()); - (`ipv6, "address1", ""), (Left (Server_error(invalid_cidr_address_specified, ["address1"]))); - (`ipv6, "address2", "fe80::bae8:56ff:fe29:894a:0000/129"), (Left (Server_error(invalid_cidr_address_specified, ["address2"]))); - (`ipv6, "address2", "fe80::bae8:56ff:fe29:894a:0000"), (Left (Server_error(invalid_cidr_address_specified, ["address2"]))); - (`ipv6, "address3", "bad-address/64"), (Left (Server_error(invalid_cidr_address_specified, ["address3"]))); - (`ipv4, "address4", "fe80::bae8:56ff:fe29:894a/64"), (Left (Server_error(invalid_cidr_address_specified, ["address4"]))); - (`ipv6, "address5", "ze80::bae8:56ff:fe29:894a/64"), (Left (Server_error(invalid_cidr_address_specified, ["address5"]))); - ] -end) + module Io = struct + type input_t = [`ipv4 | `ipv6] * string * string + type output_t = (exn, unit) Either.t + + let string_of_input_t = + let open Test_printers in + let kind : [`ipv4 | `ipv6] printer = function `ipv4 -> "IPv4" | `ipv6 -> "IPv6" in + tuple3 kind string string + + let string_of_output_t = Test_printers.(either exn unit) + end + + open Either + open Api_errors + + let transform (kind, field, cidr) = + try + Right (Helpers.assert_is_valid_cidr kind field cidr) + with e -> + Left e + + let tests = [ + (`ipv4, "address", "192.168.0.1/24"), (Right ()); + (`ipv4, "address", "255.255.255.0/32"), (Right ()); + (`ipv4, "address1", ""), (Left (Server_error(invalid_cidr_address_specified, ["address1"]))); + (`ipv4, "address1", "192.168.0.2"), (Left (Server_error(invalid_cidr_address_specified, ["address1"]))); + (`ipv4, "address1", "192.168.0.2/33"), (Left (Server_error(invalid_cidr_address_specified, ["address1"]))); + (`ipv4, "address1", "192.168.0.2/x"), (Left (Server_error(invalid_cidr_address_specified, ["address1"]))); + (`ipv4, "address2", "192.168.0.300/10"), (Left (Server_error(invalid_cidr_address_specified, ["address2"]))); + (`ipv4, "address3", "192.168.0/20"), (Left (Server_error(invalid_cidr_address_specified, ["address3"]))); + (`ipv4, "address4", "bad-address/24"), (Left (Server_error(invalid_cidr_address_specified, ["address4"]))); + (`ipv6, "address5", "192.168.0.1/24"), (Left (Server_error(invalid_cidr_address_specified, ["address5"]))); + + (`ipv6, "address", "fe80::bae8:56ff:fe29:894a/64"), (Right ()); + (`ipv6, "address", "fe80:0000:0000:0000:bae8:56ff:fe29:894a/80"), (Right ()); + (`ipv6, "address", "::1/128"), (Right ()); + (`ipv6, "address1", ""), (Left (Server_error(invalid_cidr_address_specified, ["address1"]))); + (`ipv6, "address2", "fe80::bae8:56ff:fe29:894a:0000/129"), (Left (Server_error(invalid_cidr_address_specified, ["address2"]))); + (`ipv6, "address2", "fe80::bae8:56ff:fe29:894a:0000"), (Left (Server_error(invalid_cidr_address_specified, ["address2"]))); + (`ipv6, "address3", "bad-address/64"), (Left (Server_error(invalid_cidr_address_specified, ["address3"]))); + (`ipv4, "address4", "fe80::bae8:56ff:fe29:894a/64"), (Left (Server_error(invalid_cidr_address_specified, ["address4"]))); + (`ipv6, "address5", "ze80::bae8:56ff:fe29:894a/64"), (Left (Server_error(invalid_cidr_address_specified, ["address5"]))); + ] + end) let test = - "test_helpers" >::: - [ - "test_determine_gateway" >::: DetermineGateway.tests; - "test_assert_is_valid_ip" >::: IPCheckers.tests; - "test_assert_is_valid_cidr" >::: CIDRCheckers.tests; - ] + "test_helpers" >::: + [ + "test_determine_gateway" >::: DetermineGateway.tests; + "test_assert_is_valid_ip" >::: IPCheckers.tests; + "test_assert_is_valid_cidr" >::: CIDRCheckers.tests; + ] diff --git a/ocaml/test/test_http.ml b/ocaml/test/test_http.ml index 65464a03ddf..f88b9c00879 100644 --- a/ocaml/test/test_http.ml +++ b/ocaml/test/test_http.ml @@ -16,40 +16,40 @@ open OUnit open Test_highlevel module FixCookie = Generic.Make(struct - module Io = struct - type input_t = (string * string) list - type output_t = (string * string) list + module Io = struct + type input_t = (string * string) list + type output_t = (string * string) list - let string_of_input_t = Test_printers.(assoc_list string string) - let string_of_output_t = Test_printers.(assoc_list string string) - end + let string_of_input_t = Test_printers.(assoc_list string string) + let string_of_output_t = Test_printers.(assoc_list string string) + end - let transform = Xapi_services.fix_cookie + let transform = Xapi_services.fix_cookie - let tests = [ - (* These cookies should be unchanged. *) - [], []; - ["foo", "bar"], ["foo", "bar"]; - (* Any pairs where the key starts with '$' should be filtered out. *) - ["$PATH", "baz"], []; - ["$PATH", "baz"; "foo", "bar"], ["foo", "bar"]; - (* These cookies have got a bit mangled, and should get unmangled. *) - ["foo=x, bar", "y"], ["foo", "x"; "bar", "y"]; - ["foo=x,\tbar", "y"], ["foo", "x"; "bar", "y"]; - ["foo=x; bar", "y"], ["foo", "x"; "bar", "y"]; - ["foo=x;\tbar", "y"], ["foo", "x"; "bar", "y"]; - ["foo", "x, bar=y"], ["foo", "x"; "bar", "y"]; - ["foo", "x,\tbar=y"], ["foo", "x"; "bar", "y"]; - ["foo", "x; bar=y"], ["foo", "x"; "bar", "y"]; - ["foo", "x;\tbar=y"], ["foo", "x"; "bar", "y"]; - (* These cookies need unmangling and filtering. *) - ["foo=x,\tbar", "y"; "$Stuff", "whatever"], ["foo", "x"; "bar", "y"]; - ["$Stuff", "whatever"; "foo=x,\tbar", "y"], ["foo", "x"; "bar", "y"]; - ] -end) + let tests = [ + (* These cookies should be unchanged. *) + [], []; + ["foo", "bar"], ["foo", "bar"]; + (* Any pairs where the key starts with '$' should be filtered out. *) + ["$PATH", "baz"], []; + ["$PATH", "baz"; "foo", "bar"], ["foo", "bar"]; + (* These cookies have got a bit mangled, and should get unmangled. *) + ["foo=x, bar", "y"], ["foo", "x"; "bar", "y"]; + ["foo=x,\tbar", "y"], ["foo", "x"; "bar", "y"]; + ["foo=x; bar", "y"], ["foo", "x"; "bar", "y"]; + ["foo=x;\tbar", "y"], ["foo", "x"; "bar", "y"]; + ["foo", "x, bar=y"], ["foo", "x"; "bar", "y"]; + ["foo", "x,\tbar=y"], ["foo", "x"; "bar", "y"]; + ["foo", "x; bar=y"], ["foo", "x"; "bar", "y"]; + ["foo", "x;\tbar=y"], ["foo", "x"; "bar", "y"]; + (* These cookies need unmangling and filtering. *) + ["foo=x,\tbar", "y"; "$Stuff", "whatever"], ["foo", "x"; "bar", "y"]; + ["$Stuff", "whatever"; "foo=x,\tbar", "y"], ["foo", "x"; "bar", "y"]; + ] + end) let test = - "test_http" >::: - [ - "test_fix_cookie" >::: FixCookie.tests; - ] + "test_http" >::: + [ + "test_fix_cookie" >::: FixCookie.tests; + ] diff --git a/ocaml/test/test_map_check.ml b/ocaml/test/test_map_check.ml index 9f71c299f23..21d6472e8d0 100644 --- a/ocaml/test/test_map_check.ml +++ b/ocaml/test/test_map_check.ml @@ -19,108 +19,108 @@ open Test_common open Test_highlevel let string_of_requirement requirement = - Printf.sprintf "{key = \"%s\"; default_value = \"%s\"}" - requirement.key (Test_printers.(option string) requirement.default_value) + Printf.sprintf "{key = \"%s\"; default_value = \"%s\"}" + requirement.key (Test_printers.(option string) requirement.default_value) let true_fun = (fun _ -> true) let false_fun = (fun _ -> false) module AddDefaults = Generic.Make(struct - module Io = struct - type input_t = (requirement list) * ((string * string) list) - type output_t = (string * string) list - - let string_of_input_t = - Test_printers.(assoc_pair - (list string_of_requirement) - (assoc_list string string)) - let string_of_output_t = Test_printers.(assoc_list string string) - end - - let transform (requirements, old_map) = add_defaults requirements old_map - - let tests = [ - (* If default value is None, no value should be added. *) - ( - [{key = "abc"; default_value = None; is_valid_value = true_fun}], - [] - ), - []; - (* If default value is Some _, the default should be added. *) - ( - [{key = "abc"; default_value = Some "def"; is_valid_value = true_fun}], - [] - ), - ["abc", "def"]; - (* If default value is None, an existing value should not be overwritten. *) - ( - [{key = "abc"; default_value = None; is_valid_value = true_fun}], - ["abc", "ghi"] - ), - ["abc", "ghi"]; - (* If default value is Some _, an existing value should not be overwritten. *) - ( - [{key = "abc"; default_value = Some "def"; is_valid_value = true_fun}], - ["abc", "ghi"] - ), - ["abc", "ghi"]; - ] -end) + module Io = struct + type input_t = (requirement list) * ((string * string) list) + type output_t = (string * string) list + + let string_of_input_t = + Test_printers.(assoc_pair + (list string_of_requirement) + (assoc_list string string)) + let string_of_output_t = Test_printers.(assoc_list string string) + end + + let transform (requirements, old_map) = add_defaults requirements old_map + + let tests = [ + (* If default value is None, no value should be added. *) + ( + [{key = "abc"; default_value = None; is_valid_value = true_fun}], + [] + ), + []; + (* If default value is Some _, the default should be added. *) + ( + [{key = "abc"; default_value = Some "def"; is_valid_value = true_fun}], + [] + ), + ["abc", "def"]; + (* If default value is None, an existing value should not be overwritten. *) + ( + [{key = "abc"; default_value = None; is_valid_value = true_fun}], + ["abc", "ghi"] + ), + ["abc", "ghi"]; + (* If default value is Some _, an existing value should not be overwritten. *) + ( + [{key = "abc"; default_value = Some "def"; is_valid_value = true_fun}], + ["abc", "ghi"] + ), + ["abc", "ghi"]; + ] + end) module ValidateKVPair = Generic.Make(struct - module Io = struct - type input_t = requirement list * string * string - type output_t = (exn, unit) Either.t - - let string_of_input_t (requirements, key, value) = - Printf.sprintf "%s, %s, %s" - ((Test_printers.list string_of_requirement) requirements) key value - let string_of_output_t = Test_printers.(either exn unit) - end - - let transform (requirements, key, value) = - try Either.Right (validate_kvpair "test_field" requirements (key, value)) - with e -> Either.Left e - - let tests = [ - (* If all values are valid, the exception should not be thrown. *) - ( - [{key = "abc"; default_value = None; is_valid_value = true_fun}], - "abc", "def" - ), - Either.Right (); - (* If there is no valid value, the exception should always be thrown. *) - ( - [{key = "abc"; default_value = None; is_valid_value = false_fun}], - "abc", "def" - ), - Either.Left (Api_errors.(Server_error - (invalid_value, ["test_field"; "abc = def"]))); - ] -end) + module Io = struct + type input_t = requirement list * string * string + type output_t = (exn, unit) Either.t + + let string_of_input_t (requirements, key, value) = + Printf.sprintf "%s, %s, %s" + ((Test_printers.list string_of_requirement) requirements) key value + let string_of_output_t = Test_printers.(either exn unit) + end + + let transform (requirements, key, value) = + try Either.Right (validate_kvpair "test_field" requirements (key, value)) + with e -> Either.Left e + + let tests = [ + (* If all values are valid, the exception should not be thrown. *) + ( + [{key = "abc"; default_value = None; is_valid_value = true_fun}], + "abc", "def" + ), + Either.Right (); + (* If there is no valid value, the exception should always be thrown. *) + ( + [{key = "abc"; default_value = None; is_valid_value = false_fun}], + "abc", "def" + ), + Either.Left (Api_errors.(Server_error + (invalid_value, ["test_field"; "abc = def"]))); + ] + end) module Accessors = Generic.Make(struct - module Io = struct - type input_t = string * (string * string) list - type output_t = int + module Io = struct + type input_t = string * (string * string) list + type output_t = int - let string_of_input_t = Test_printers.(pair string (list (pair string string))) - let string_of_output_t = Test_printers.int - end + let string_of_input_t = Test_printers.(pair string (list (pair string string))) + let string_of_output_t = Test_printers.int + end - let transform (key, map) = - getf (field key int) map + let transform (key, map) = + getf (field key int) map - let tests = [ - ("a", ["a", "1"]), 1; - ] -end) + let tests = [ + ("a", ["a", "1"]), 1; + ] + end) let test = - "test_map_check" >::: - [ - "test_add_defaults" >::: AddDefaults.tests; - "test_validate_kvpair" >::: ValidateKVPair.tests; - "test_accessors" >::: Accessors.tests; - ] + "test_map_check" >::: + [ + "test_add_defaults" >::: AddDefaults.tests; + "test_validate_kvpair" >::: ValidateKVPair.tests; + "test_accessors" >::: Accessors.tests; + ] diff --git a/ocaml/test/test_no_migrate.ml b/ocaml/test/test_no_migrate.ml index 74ba4b41dc4..66866dad3ae 100644 --- a/ocaml/test/test_no_migrate.ml +++ b/ocaml/test/test_no_migrate.ml @@ -18,54 +18,54 @@ open Test_common module LC = Xapi_vm_lifecycle let ops = - [ `suspend - ; `checkpoint - ; `pool_migrate - ; `migrate_send - ] + [ `suspend + ; `checkpoint + ; `pool_migrate + ; `migrate_send + ] let op_string = function - | `suspend -> "suspend" - | `checkpoint -> "checkpoint" - | `pool_migrate -> "pool_migrate" - | `migrate_send -> "migrate_send" - | _ -> "other" + | `suspend -> "suspend" + | `checkpoint -> "checkpoint" + | `pool_migrate -> "pool_migrate" + | `migrate_send -> "migrate_send" + | _ -> "other" let testcases = - (*nest , nomig, force, permitted *) - [ false, false, false, true - ; false, false, true , true - ; false, true , false, false - ; false, true , true , true - ; true , false, false, false - ; true , false, true , true - ; true , true , false, false - ; true , true , true , true - ] + (*nest , nomig, force, permitted *) + [ false, false, false, true + ; false, false, true , true + ; false, true , false, false + ; false, true , true , true + ; true , false, false, false + ; true , false, true , true + ; true , true , false, false + ; true , true , true , true + ] let test (nv, nm, force, permitted) op = - let __context = make_test_database () in - let vm = make_vm ~__context ~hVM_boot_policy:"" () in - let metrics = Db.VM.get_metrics ~__context ~self:vm in - let strict = not force in - ( Db.VM.set_power_state ~__context ~self:vm ~value:`Running - ; Db.VM_metrics.set_nested_virt ~__context ~self:metrics ~value:nv - ; Db.VM_metrics.set_nomigrate ~__context ~self:metrics ~value:nm - ; LC.get_operation_error ~__context ~self:vm ~op ~strict - |> function - | None when permitted -> assert_bool "success" true - | None -> assert_failure (op_string op) - | Some (x,xs) when not permitted -> assert_bool "success" true - | Some (x,xs) -> assert_failure (op_string op) - ) + let __context = make_test_database () in + let vm = make_vm ~__context ~hVM_boot_policy:"" () in + let metrics = Db.VM.get_metrics ~__context ~self:vm in + let strict = not force in + ( Db.VM.set_power_state ~__context ~self:vm ~value:`Running + ; Db.VM_metrics.set_nested_virt ~__context ~self:metrics ~value:nv + ; Db.VM_metrics.set_nomigrate ~__context ~self:metrics ~value:nm + ; LC.get_operation_error ~__context ~self:vm ~op ~strict + |> function + | None when permitted -> assert_bool "success" true + | None -> assert_failure (op_string op) + | Some (x,xs) when not permitted -> assert_bool "success" true + | Some (x,xs) -> assert_failure (op_string op) + ) let test' op = - testcases |> List.iter (fun t -> test t op) + testcases |> List.iter (fun t -> test t op) let test = "test_no_migrate" >::: - [ "test_no_migrate_00" >:: (fun () -> test' `suspend) - ; "test_no_migrate_01" >:: (fun () -> test' `checkpoint) - ; "test_no_migrate_02" >:: (fun () -> test' `pool_migrate) - ; "test_no_migrate_03" >:: (fun () -> test' `migrate_send) - ] + [ "test_no_migrate_00" >:: (fun () -> test' `suspend) + ; "test_no_migrate_01" >:: (fun () -> test' `checkpoint) + ; "test_no_migrate_02" >:: (fun () -> test' `pool_migrate) + ; "test_no_migrate_03" >:: (fun () -> test' `migrate_send) + ] diff --git a/ocaml/test/test_pci_helpers.ml b/ocaml/test/test_pci_helpers.ml index a19f8faab2a..a829cd9711c 100644 --- a/ocaml/test/test_pci_helpers.ml +++ b/ocaml/test/test_pci_helpers.ml @@ -19,32 +19,32 @@ open Xapi_pci_helpers let skip = true let print_host_pcis () = - skip_if skip "Generates lots of text..."; - try - print_string "===== Host PCIs =====\n\n"; - let pcis = get_host_pcis () in - List.iter - (fun p -> - let x_to_str = Printf.sprintf "%04x" in - Printf.printf "%s " (String.concat " " - [ - p.address; - x_to_str p.vendor.id; - p.vendor.name; - x_to_str p.device.id; - p.device.name; - x_to_str p.pci_class.id; - p.pci_class.name - ]); - List.iter (fun s -> print_string (s ^ ", ")) p.related; - print_newline ()) - pcis - with e -> - print_string (Printexc.to_string e); - assert_equal 0 1 + skip_if skip "Generates lots of text..."; + try + print_string "===== Host PCIs =====\n\n"; + let pcis = get_host_pcis () in + List.iter + (fun p -> + let x_to_str = Printf.sprintf "%04x" in + Printf.printf "%s " (String.concat " " + [ + p.address; + x_to_str p.vendor.id; + p.vendor.name; + x_to_str p.device.id; + p.device.name; + x_to_str p.pci_class.id; + p.pci_class.name + ]); + List.iter (fun s -> print_string (s ^ ", ")) p.related; + print_newline ()) + pcis + with e -> + print_string (Printexc.to_string e); + assert_equal 0 1 let test = - "test_pci_helpers" >::: - [ - "print_host_pcis" >:: print_host_pcis; - ] + "test_pci_helpers" >::: + [ + "print_host_pcis" >:: print_host_pcis; + ] diff --git a/ocaml/test/test_pgpu.ml b/ocaml/test/test_pgpu.ml index c0198d6bd94..fa128d632f6 100644 --- a/ocaml/test/test_pgpu.ml +++ b/ocaml/test/test_pgpu.ml @@ -19,143 +19,143 @@ open Test_vgpu_common (*--- Helper functions ---*) let on_host_with_k2 (f : Context.t -> API.ref_PGPU -> 'a) = - let __context = make_test_database () in - let p = make_pgpu ~__context default_k2 in - f __context p + let __context = make_test_database () in + let p = make_pgpu ~__context default_k2 in + f __context p (*--- Xapi_pgpu.assert_can_run_VGPU tests ---*) let test_can_run_VGPU_succeeds_empty_pgpu () = - on_host_with_k2 (fun __context p -> - let vgpu = make_vgpu ~__context k260q in - Xapi_pgpu.assert_can_run_VGPU ~__context ~self:p ~vgpu) + on_host_with_k2 (fun __context p -> + let vgpu = make_vgpu ~__context k260q in + Xapi_pgpu.assert_can_run_VGPU ~__context ~self:p ~vgpu) let test_can_run_VGPU_succeeds_enabled_types () = - on_host_with_k2 (fun __context p -> - let vgpus = List.map (make_vgpu ~__context) [k200; k240q; k260q] in - ignore (List.map (fun vgpu -> - Xapi_pgpu.assert_can_run_VGPU ~__context ~self:p ~vgpu) - vgpus)) + on_host_with_k2 (fun __context p -> + let vgpus = List.map (make_vgpu ~__context) [k200; k240q; k260q] in + ignore (List.map (fun vgpu -> + Xapi_pgpu.assert_can_run_VGPU ~__context ~self:p ~vgpu) + vgpus)) let test_can_run_VGPU_succeeds_same_type () = - on_host_with_k2 (fun __context p -> - let (_:API.ref_VGPU) = make_vgpu ~__context ~resident_on:p k260q in - let vgpu = make_vgpu ~__context k260q in - Xapi_pgpu.assert_can_run_VGPU ~__context ~self:p ~vgpu) + on_host_with_k2 (fun __context p -> + let (_:API.ref_VGPU) = make_vgpu ~__context ~resident_on:p k260q in + let vgpu = make_vgpu ~__context k260q in + Xapi_pgpu.assert_can_run_VGPU ~__context ~self:p ~vgpu) let test_can_run_VGPU_fails_unsupported_types () = - on_host_with_k2 (fun __context p -> - let vgpus = List.map (make_vgpu ~__context) [k100; k140q] in - ignore (List.map (fun vgpu -> - assert_raises_api_error Api_errors.vgpu_type_not_supported - (fun () -> Xapi_pgpu.assert_can_run_VGPU ~__context ~self:p ~vgpu)) - vgpus)) + on_host_with_k2 (fun __context p -> + let vgpus = List.map (make_vgpu ~__context) [k100; k140q] in + ignore (List.map (fun vgpu -> + assert_raises_api_error Api_errors.vgpu_type_not_supported + (fun () -> Xapi_pgpu.assert_can_run_VGPU ~__context ~self:p ~vgpu)) + vgpus)) let test_can_run_VGPU_fails_disabled_type () = - on_host_with_k2 (fun __context p -> - let vgpu = make_vgpu ~__context k200 in - let vgpu_type = Db.VGPU.get_type ~__context ~self:vgpu in - Db.PGPU.remove_enabled_VGPU_types ~__context ~self:p ~value:vgpu_type; - assert_raises_api_error Api_errors.vgpu_type_not_enabled - (fun () -> Xapi_pgpu.assert_can_run_VGPU ~__context ~self:p ~vgpu)) + on_host_with_k2 (fun __context p -> + let vgpu = make_vgpu ~__context k200 in + let vgpu_type = Db.VGPU.get_type ~__context ~self:vgpu in + Db.PGPU.remove_enabled_VGPU_types ~__context ~self:p ~value:vgpu_type; + assert_raises_api_error Api_errors.vgpu_type_not_enabled + (fun () -> Xapi_pgpu.assert_can_run_VGPU ~__context ~self:p ~vgpu)) let test_can_run_VGPU_fails_different_type () = - on_host_with_k2 (fun __context p -> - let (_:API.ref_VGPU) = make_vgpu ~__context ~resident_on:p k260q in - let vgpu = make_vgpu ~__context k240q in - assert_raises_api_error - Api_errors.vgpu_type_not_compatible_with_running_type - (fun () -> Xapi_pgpu.assert_can_run_VGPU ~__context ~self:p ~vgpu)) + on_host_with_k2 (fun __context p -> + let (_:API.ref_VGPU) = make_vgpu ~__context ~resident_on:p k260q in + let vgpu = make_vgpu ~__context k240q in + assert_raises_api_error + Api_errors.vgpu_type_not_compatible_with_running_type + (fun () -> Xapi_pgpu.assert_can_run_VGPU ~__context ~self:p ~vgpu)) let test_can_run_VGPU_fails_no_capacity () = - on_host_with_k2 (fun __context p -> - (* Fill up the pGPU with 2 x K260Q *) - let (_:API.ref_VGPU list) = - List.map (make_vgpu ~__context ~resident_on:p) [k260q; k260q] in - (* Should fail to put another one on *) - let vgpu = make_vgpu ~__context k260q in - assert_raises_api_error - Api_errors.pgpu_insufficient_capacity_for_vgpu - (fun () -> Xapi_pgpu.assert_can_run_VGPU ~__context ~self:p ~vgpu)) + on_host_with_k2 (fun __context p -> + (* Fill up the pGPU with 2 x K260Q *) + let (_:API.ref_VGPU list) = + List.map (make_vgpu ~__context ~resident_on:p) [k260q; k260q] in + (* Should fail to put another one on *) + let vgpu = make_vgpu ~__context k260q in + assert_raises_api_error + Api_errors.pgpu_insufficient_capacity_for_vgpu + (fun () -> Xapi_pgpu.assert_can_run_VGPU ~__context ~self:p ~vgpu)) (*--- Xapi_pgpu.get_remaining_capacity tests ---*) let check_capacity_is ~__context expected_capacity pgpu vgpu_type = - let vgpu_type = Xapi_vgpu_type.find_or_create ~__context vgpu_type in - assert_equal ~printer:Int64.to_string expected_capacity - (Xapi_pgpu.get_remaining_capacity ~__context ~self:pgpu ~vgpu_type) + let vgpu_type = Xapi_vgpu_type.find_or_create ~__context vgpu_type in + assert_equal ~printer:Int64.to_string expected_capacity + (Xapi_pgpu.get_remaining_capacity ~__context ~self:pgpu ~vgpu_type) let expected_capacities = [(k200, 8L); (k240q, 4L); (k260q, 2L)] let test_remaining_capacity_unsupported_types () = - on_host_with_k2 (fun __context p -> - ignore (List.map (check_capacity_is ~__context 0L p) [k100; k140q])) + on_host_with_k2 (fun __context p -> + ignore (List.map (check_capacity_is ~__context 0L p) [k100; k140q])) let test_remaining_capacity_supported_types () = - on_host_with_k2 (fun __context p -> - ignore (List.map - (fun (v, c) -> check_capacity_is ~__context c p v) expected_capacities)) + on_host_with_k2 (fun __context p -> + ignore (List.map + (fun (v, c) -> check_capacity_is ~__context c p v) expected_capacities)) let test_remaining_capacity_decreases () = - on_host_with_k2 (fun __context _ -> - let rec check_remaining_capacity_and_fill p c vgpu_type = - check_capacity_is ~__context c p vgpu_type; - if c > 0L then begin - ignore (make_vgpu ~__context ~resident_on:p vgpu_type); - check_remaining_capacity_and_fill p (Int64.sub c 1L) vgpu_type - end - in - ignore (List.map - (fun (vgpu_type, capacity) -> - let p = make_pgpu ~__context default_k2 in - check_remaining_capacity_and_fill p capacity vgpu_type) - expected_capacities)) + on_host_with_k2 (fun __context _ -> + let rec check_remaining_capacity_and_fill p c vgpu_type = + check_capacity_is ~__context c p vgpu_type; + if c > 0L then begin + ignore (make_vgpu ~__context ~resident_on:p vgpu_type); + check_remaining_capacity_and_fill p (Int64.sub c 1L) vgpu_type + end + in + ignore (List.map + (fun (vgpu_type, capacity) -> + let p = make_pgpu ~__context default_k2 in + check_remaining_capacity_and_fill p capacity vgpu_type) + expected_capacities)) (*--- Xapi_pgpu.set_GPU_group ---*) let test_set_GPU_group_succeeds_empty_pgpu () = - on_host_with_k2 (fun __context p -> - let group_ref = make_gpu_group ~__context () in - Xapi_pgpu.set_GPU_group ~__context ~self:p ~value:group_ref) + on_host_with_k2 (fun __context p -> + let group_ref = make_gpu_group ~__context () in + Xapi_pgpu.set_GPU_group ~__context ~self:p ~value:group_ref) let test_set_GPU_group_succeeds_orphan_vgpu () = - (* This is OK since vGPUs can be created on empty GPU groups *) - on_host_with_k2 (fun __context p -> - let group, group' = - (make_gpu_group ~__context (), make_gpu_group ~__context ()) - in - Xapi_pgpu.set_GPU_group ~__context ~self:p ~value:group; - let (_: API.ref_VGPU) = - Test_common.make_vgpu ~__context ~gPU_group:group () - in - Xapi_pgpu.set_GPU_group ~__context ~self:p ~value:group') + (* This is OK since vGPUs can be created on empty GPU groups *) + on_host_with_k2 (fun __context p -> + let group, group' = + (make_gpu_group ~__context (), make_gpu_group ~__context ()) + in + Xapi_pgpu.set_GPU_group ~__context ~self:p ~value:group; + let (_: API.ref_VGPU) = + Test_common.make_vgpu ~__context ~gPU_group:group () + in + Xapi_pgpu.set_GPU_group ~__context ~self:p ~value:group') let test_set_GPU_group_fails_resident_vgpu () = - on_host_with_k2 (fun __context p -> - let group, group' = - (make_gpu_group ~__context (), make_gpu_group ~__context ()) - in - Xapi_pgpu.set_GPU_group ~__context ~self:p ~value:group; - ignore (make_vgpu ~__context ~resident_on:p k200); - assert_raises_api_error Api_errors.pgpu_in_use_by_vm (fun () -> - Xapi_pgpu.set_GPU_group ~__context ~self:p ~value:group')) + on_host_with_k2 (fun __context p -> + let group, group' = + (make_gpu_group ~__context (), make_gpu_group ~__context ()) + in + Xapi_pgpu.set_GPU_group ~__context ~self:p ~value:group; + ignore (make_vgpu ~__context ~resident_on:p k200); + assert_raises_api_error Api_errors.pgpu_in_use_by_vm (fun () -> + Xapi_pgpu.set_GPU_group ~__context ~self:p ~value:group')) let test = - "test_pgpu" >::: - [ - "test_can_run_VGPU_succeeds_empty_pgpu" >:: test_can_run_VGPU_succeeds_empty_pgpu; - "test_can_run_VGPU_succeeds_enabled_types" >:: test_can_run_VGPU_succeeds_enabled_types; - "test_can_run_VGPU_succeeds_same_type" >:: test_can_run_VGPU_succeeds_same_type; - "test_can_run_VGPU_fails_unsupported_types" >:: test_can_run_VGPU_fails_unsupported_types; - "test_can_run_VGPU_fails_disabled_type" >:: test_can_run_VGPU_fails_disabled_type; - "test_can_run_VGPU_fails_different_type" >:: test_can_run_VGPU_fails_different_type; - "test_can_run_VGPU_fails_no_capacity" >:: test_can_run_VGPU_fails_no_capacity; - - "test_remaining_capacity_unsupported_types" >:: test_remaining_capacity_unsupported_types; - "test_remaining_capacity_supported_types" >:: test_remaining_capacity_supported_types; - "test_remaining_capacity_decreases" >:: test_remaining_capacity_decreases; - - "test_set_GPU_group_succeeds_empty_pgpu" >:: test_set_GPU_group_succeeds_empty_pgpu; - "test_set_GPU_group_succeeds_orphan_vgpu" >:: test_set_GPU_group_succeeds_orphan_vgpu; - "test_set_GPU_group_fails_resident_vgpu" >:: test_set_GPU_group_fails_resident_vgpu; - ] + "test_pgpu" >::: + [ + "test_can_run_VGPU_succeeds_empty_pgpu" >:: test_can_run_VGPU_succeeds_empty_pgpu; + "test_can_run_VGPU_succeeds_enabled_types" >:: test_can_run_VGPU_succeeds_enabled_types; + "test_can_run_VGPU_succeeds_same_type" >:: test_can_run_VGPU_succeeds_same_type; + "test_can_run_VGPU_fails_unsupported_types" >:: test_can_run_VGPU_fails_unsupported_types; + "test_can_run_VGPU_fails_disabled_type" >:: test_can_run_VGPU_fails_disabled_type; + "test_can_run_VGPU_fails_different_type" >:: test_can_run_VGPU_fails_different_type; + "test_can_run_VGPU_fails_no_capacity" >:: test_can_run_VGPU_fails_no_capacity; + + "test_remaining_capacity_unsupported_types" >:: test_remaining_capacity_unsupported_types; + "test_remaining_capacity_supported_types" >:: test_remaining_capacity_supported_types; + "test_remaining_capacity_decreases" >:: test_remaining_capacity_decreases; + + "test_set_GPU_group_succeeds_empty_pgpu" >:: test_set_GPU_group_succeeds_empty_pgpu; + "test_set_GPU_group_succeeds_orphan_vgpu" >:: test_set_GPU_group_succeeds_orphan_vgpu; + "test_set_GPU_group_fails_resident_vgpu" >:: test_set_GPU_group_fails_resident_vgpu; + ] diff --git a/ocaml/test/test_pgpu_helpers.ml b/ocaml/test/test_pgpu_helpers.ml index 2eff3eef5c6..1b6e357f35a 100644 --- a/ocaml/test/test_pgpu_helpers.ml +++ b/ocaml/test/test_pgpu_helpers.ml @@ -21,78 +21,78 @@ open Test_vgpu_common open Xapi_vgpu_type module GetRemainingCapacity = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - type input_t = (pgpu_state * vgpu_type) - type output_t = int64 + module Io = struct + type input_t = (pgpu_state * vgpu_type) + type output_t = int64 - let string_of_input_t = - Test_printers.(assoc_pair string_of_pgpu_state string_of_vgpu_type) + let string_of_input_t = + Test_printers.(assoc_pair string_of_pgpu_state string_of_vgpu_type) - let string_of_output_t = Test_printers.int64 - end + let string_of_output_t = Test_printers.int64 + end - module State = Test_state.XapiDb + module State = Test_state.XapiDb - let load_input __context (pgpu, _) = - let (_: API.ref_PGPU) = make_pgpu ~__context pgpu in () + let load_input __context (pgpu, _) = + let (_: API.ref_PGPU) = make_pgpu ~__context pgpu in () - let extract_output __context (_, vgpu_type) = - let pgpu_ref = List.hd (Db.PGPU.get_all ~__context) in - let vgpu_type_ref = find_or_create ~__context vgpu_type in - Xapi_pgpu_helpers.get_remaining_capacity - ~__context ~self:pgpu_ref ~vgpu_type:vgpu_type_ref + let extract_output __context (_, vgpu_type) = + let pgpu_ref = List.hd (Db.PGPU.get_all ~__context) in + let vgpu_type_ref = find_or_create ~__context vgpu_type in + Xapi_pgpu_helpers.get_remaining_capacity + ~__context ~self:pgpu_ref ~vgpu_type:vgpu_type_ref - let tests = [ - (* Test that empty PGPUs have the correct capacity for each virtual - * GPU type. *) - (default_k1, k100), 8L; - (default_k1, k140q), 4L; - (default_k1, passthrough_gpu), 1L; - (default_k2, k200), 8L; - (default_k2, k240q), 4L; - (default_k2, k260q), 2L; - (default_k2, passthrough_gpu), 1L; - (* Test that we can't mix VGPU types. *) - ({default_k1 with resident_VGPU_types = [passthrough_gpu]}, k100), 0L; - ({default_k1 with resident_VGPU_types = [k100]}, k140q), 0L; - ({default_k2 with resident_VGPU_types = [passthrough_gpu]}, k200), 0L; - ({default_k2 with resident_VGPU_types = [k260q]}, k200), 0L; - (* Test that remaining capacity values in other situations are correct. *) - ({default_k1 with resident_VGPU_types = [k100; k100]}, k100), 6L; - ({default_k1 with resident_VGPU_types = [k140q; k140q]}, k140q), 2L; - ({default_k1 with resident_VGPU_types = [passthrough_gpu]}, passthrough_gpu), 0L; - ({default_k2 with resident_VGPU_types = [k200]}, k200), 7L; - ({default_k2 with resident_VGPU_types = [k240q; k240q; k240q]}, k240q), 1L; - ({default_k2 with resident_VGPU_types = [k260q]}, k260q), 1L; - ({default_k2 with resident_VGPU_types = [passthrough_gpu]}, passthrough_gpu), 0L; - (* Test that scheduled vGPUs also affect the capacity calculations. *) - ({default_k1 with scheduled_VGPU_types = [k100]}, k100), 7L; - ({default_k1 with scheduled_VGPU_types = [k100]}, passthrough_gpu), 0L; - ({default_k1 with scheduled_VGPU_types = [passthrough_gpu]}, k100), 0L; - ({default_k1 with scheduled_VGPU_types = [passthrough_gpu]}, passthrough_gpu), 0L; - ({default_k2 with scheduled_VGPU_types = [k200; k200]}, k200), 6L; - ({default_k2 with scheduled_VGPU_types = [k200]}, passthrough_gpu), 0L; - ({default_k2 with scheduled_VGPU_types = [passthrough_gpu]}, k200), 0L; - ({default_k2 with scheduled_VGPU_types = [passthrough_gpu]}, passthrough_gpu), 0L; - (* Test that the capacity calculations work with combinations of scheduled - * and resident VGPUs. *) - ( - {default_k1 with - resident_VGPU_types = [k100; k100]; - scheduled_VGPU_types = [k100]}, - k100 - ), 5L; - ( - {default_k2 with - resident_VGPU_types = [k240q; k240q]; - scheduled_VGPU_types = [k240q]}, - k240q - ), 1L; - ] -end)) + let tests = [ + (* Test that empty PGPUs have the correct capacity for each virtual + * GPU type. *) + (default_k1, k100), 8L; + (default_k1, k140q), 4L; + (default_k1, passthrough_gpu), 1L; + (default_k2, k200), 8L; + (default_k2, k240q), 4L; + (default_k2, k260q), 2L; + (default_k2, passthrough_gpu), 1L; + (* Test that we can't mix VGPU types. *) + ({default_k1 with resident_VGPU_types = [passthrough_gpu]}, k100), 0L; + ({default_k1 with resident_VGPU_types = [k100]}, k140q), 0L; + ({default_k2 with resident_VGPU_types = [passthrough_gpu]}, k200), 0L; + ({default_k2 with resident_VGPU_types = [k260q]}, k200), 0L; + (* Test that remaining capacity values in other situations are correct. *) + ({default_k1 with resident_VGPU_types = [k100; k100]}, k100), 6L; + ({default_k1 with resident_VGPU_types = [k140q; k140q]}, k140q), 2L; + ({default_k1 with resident_VGPU_types = [passthrough_gpu]}, passthrough_gpu), 0L; + ({default_k2 with resident_VGPU_types = [k200]}, k200), 7L; + ({default_k2 with resident_VGPU_types = [k240q; k240q; k240q]}, k240q), 1L; + ({default_k2 with resident_VGPU_types = [k260q]}, k260q), 1L; + ({default_k2 with resident_VGPU_types = [passthrough_gpu]}, passthrough_gpu), 0L; + (* Test that scheduled vGPUs also affect the capacity calculations. *) + ({default_k1 with scheduled_VGPU_types = [k100]}, k100), 7L; + ({default_k1 with scheduled_VGPU_types = [k100]}, passthrough_gpu), 0L; + ({default_k1 with scheduled_VGPU_types = [passthrough_gpu]}, k100), 0L; + ({default_k1 with scheduled_VGPU_types = [passthrough_gpu]}, passthrough_gpu), 0L; + ({default_k2 with scheduled_VGPU_types = [k200; k200]}, k200), 6L; + ({default_k2 with scheduled_VGPU_types = [k200]}, passthrough_gpu), 0L; + ({default_k2 with scheduled_VGPU_types = [passthrough_gpu]}, k200), 0L; + ({default_k2 with scheduled_VGPU_types = [passthrough_gpu]}, passthrough_gpu), 0L; + (* Test that the capacity calculations work with combinations of scheduled + * and resident VGPUs. *) + ( + {default_k1 with + resident_VGPU_types = [k100; k100]; + scheduled_VGPU_types = [k100]}, + k100 + ), 5L; + ( + {default_k2 with + resident_VGPU_types = [k240q; k240q]; + scheduled_VGPU_types = [k240q]}, + k240q + ), 1L; + ] + end)) let test = - "test_pgpu_helpers" >::: - [ - "test_get_remaining_capacity" >::: GetRemainingCapacity.tests; - ] + "test_pgpu_helpers" >::: + [ + "test_get_remaining_capacity" >::: GetRemainingCapacity.tests; + ] diff --git a/ocaml/test/test_platformdata.ml b/ocaml/test/test_platformdata.ml index 02a3622fb77..0dfa3f4d554 100644 --- a/ocaml/test/test_platformdata.ml +++ b/ocaml/test/test_platformdata.ml @@ -17,145 +17,145 @@ open OUnit open Test_highlevel module SanityCheck = Generic.Make(struct - module Io = struct - type input_t = ((string * string) list * bool * int64 * int64 * bool) - type output_t = (exn, (string * string) list) Either.t + module Io = struct + type input_t = ((string * string) list * bool * int64 * int64 * bool) + type output_t = (exn, (string * string) list) Either.t - let string_of_input_t (platformdata, filter, vcpu_max, vcpu_startup, hvm) = - Printf.sprintf "(platformdata = %s, filter_out_unknowns = %b, vcpu_max = %Ld, + let string_of_input_t (platformdata, filter, vcpu_max, vcpu_startup, hvm) = + Printf.sprintf "(platformdata = %s, filter_out_unknowns = %b, vcpu_max = %Ld, vcpu_at_startup = %Ld, hvm = %b)" - (platformdata |> Test_printers.(assoc_list string string)) - (filter) (vcpu_max) (vcpu_startup) (hvm) + (platformdata |> Test_printers.(assoc_list string string)) + (filter) (vcpu_max) (vcpu_startup) (hvm) - let string_of_output_t = Test_printers.(either exn (assoc_list string string)) - end + let string_of_output_t = Test_printers.(either exn (assoc_list string string)) + end - let transform (platformdata, filter_out_unknowns, vcpu_max, vcpu_at_startup, hvm) = - try Either.Right (Vm_platform.sanity_check ~platformdata ~vcpu_max ~vcpu_at_startup ~hvm ~filter_out_unknowns) - with e -> Either.Left e + let transform (platformdata, filter_out_unknowns, vcpu_max, vcpu_at_startup, hvm) = + try Either.Right (Vm_platform.sanity_check ~platformdata ~vcpu_max ~vcpu_at_startup ~hvm ~filter_out_unknowns) + with e -> Either.Left e - let tests = - let usb_defaults = [ - "usb", "true"; - "usb_tablet", "true"; - ] in - [ - (* Check that we can filter out unknown platform flags. *) - (([ - "nonsense", "abc"; - "pae", "true"; - "whatever", "def"; - "viridian", "true"; - ], true, 0L, 0L, false), - Either.Right (usb_defaults @ - [ - "pae", "true"; - "viridian", "true"; - ])); - (* Check that usb and usb_tablet are turned on by default. *) - (([], false, 0L, 0L, false), - Either.Right (usb_defaults)); - (* Check that an invalid tsc_mode gets filtered out. *) - ((["tsc_mode", "17";], false, 0L, 0L, false), - Either.right (usb_defaults)); - (* Check that an invalid parallel port gets filtered out. *) - ((["parallel", "/dev/random"], false, 0L, 0L, false), - Either.Right (usb_defaults)); - (* Check that we can't set usb_tablet to true if usb is false. *) - (([ - "usb", "false"; - "usb_tablet", "true"; - ], false, 0L, 0L, false), - Either.Right ([ - "usb", "false"; - "usb_tablet", "false"; - ])); - (* Check that we can fully disable usb. *) - (([ - "usb", "false"; - "usb_tablet", "false"; - ], false, 0L, 0L, false), - Either.Right ([ - "usb", "false"; - "usb_tablet", "false"; - ])); - (* Check that we can disable the parallel port. *) - ((["parallel", "none"], false, 0L, 0L, false), - Either.Right (usb_defaults @ - ["parallel", "none"])); - (* Check that a set of valid fields is unchanged (apart from - * the ordering, which changes due to the implementation of - * List.update_assoc). *) - (([ - "parallel", "/dev/parport2"; - "pae", "true"; - "usb_tablet", "false"; - "tsc_mode", "2"; - "viridian", "true"; - "usb", "true"; - ], false, 0L, 0L, false), - Either.Right ([ - "usb", "true"; - "usb_tablet", "false"; - "parallel", "/dev/parport2"; - "pae", "true"; - "tsc_mode", "2"; - "viridian", "true"; - ])); - (* Check that combination of valid and invalid fields is dealt with - * correctly. *) - (([ - "pae", "true"; - "parallel", "/dev/parport0"; - "tsc_mode", "blah"; - ], false, 0L, 0L, false), - Either.Right (usb_defaults @ - [ - "pae", "true"; - "parallel", "/dev/parport0"; - ])); - (* Check VCPUs configuration - hvm success scenario*) - (([ - "cores-per-socket", "3"; - ], false, 6L, 6L, true), - Either.Right (usb_defaults @ - [ - "cores-per-socket", "3"; - ])); - (* Check VCPUs configuration - pvm success scenario*) - (([ - "cores-per-socket", "3"; - ], false, 0L, 0L, false), - Either.Right (usb_defaults @ - [ - "cores-per-socket", "3"; - ])); - (* Check VCPUs configuration - hvm failure scenario*) - (([ - "cores-per-socket", "3"; - ], false, 6L, 5L, true), - Either.Left (Api_errors.Server_error(Api_errors.invalid_value, - ["platform:cores-per-socket"; - "VCPUs_max/VCPUs_at_startup must be a multiple of this field"]))); - (* Check VCPUs configuration - hvm failure scenario*) - (([ - "cores-per-socket", "4"; - ], false, 6L, 6L, true), - Either.Left (Api_errors.Server_error(Api_errors.invalid_value, - ["platform:cores-per-socket"; - "VCPUs_max/VCPUs_at_startup must be a multiple of this field"]))); - (* Check VCPUs configuration - hvm failure scenario*) - (([ - "cores-per-socket", "abc"; - ], false, 6L, 5L, true), - Either.Left(Api_errors.Server_error(Api_errors.invalid_value, - ["platform:cores-per-socket"; - "value = abc is not a valid int"]))); - ] -end) + let tests = + let usb_defaults = [ + "usb", "true"; + "usb_tablet", "true"; + ] in + [ + (* Check that we can filter out unknown platform flags. *) + (([ + "nonsense", "abc"; + "pae", "true"; + "whatever", "def"; + "viridian", "true"; + ], true, 0L, 0L, false), + Either.Right (usb_defaults @ + [ + "pae", "true"; + "viridian", "true"; + ])); + (* Check that usb and usb_tablet are turned on by default. *) + (([], false, 0L, 0L, false), + Either.Right (usb_defaults)); + (* Check that an invalid tsc_mode gets filtered out. *) + ((["tsc_mode", "17";], false, 0L, 0L, false), + Either.right (usb_defaults)); + (* Check that an invalid parallel port gets filtered out. *) + ((["parallel", "/dev/random"], false, 0L, 0L, false), + Either.Right (usb_defaults)); + (* Check that we can't set usb_tablet to true if usb is false. *) + (([ + "usb", "false"; + "usb_tablet", "true"; + ], false, 0L, 0L, false), + Either.Right ([ + "usb", "false"; + "usb_tablet", "false"; + ])); + (* Check that we can fully disable usb. *) + (([ + "usb", "false"; + "usb_tablet", "false"; + ], false, 0L, 0L, false), + Either.Right ([ + "usb", "false"; + "usb_tablet", "false"; + ])); + (* Check that we can disable the parallel port. *) + ((["parallel", "none"], false, 0L, 0L, false), + Either.Right (usb_defaults @ + ["parallel", "none"])); + (* Check that a set of valid fields is unchanged (apart from + * the ordering, which changes due to the implementation of + * List.update_assoc). *) + (([ + "parallel", "/dev/parport2"; + "pae", "true"; + "usb_tablet", "false"; + "tsc_mode", "2"; + "viridian", "true"; + "usb", "true"; + ], false, 0L, 0L, false), + Either.Right ([ + "usb", "true"; + "usb_tablet", "false"; + "parallel", "/dev/parport2"; + "pae", "true"; + "tsc_mode", "2"; + "viridian", "true"; + ])); + (* Check that combination of valid and invalid fields is dealt with + * correctly. *) + (([ + "pae", "true"; + "parallel", "/dev/parport0"; + "tsc_mode", "blah"; + ], false, 0L, 0L, false), + Either.Right (usb_defaults @ + [ + "pae", "true"; + "parallel", "/dev/parport0"; + ])); + (* Check VCPUs configuration - hvm success scenario*) + (([ + "cores-per-socket", "3"; + ], false, 6L, 6L, true), + Either.Right (usb_defaults @ + [ + "cores-per-socket", "3"; + ])); + (* Check VCPUs configuration - pvm success scenario*) + (([ + "cores-per-socket", "3"; + ], false, 0L, 0L, false), + Either.Right (usb_defaults @ + [ + "cores-per-socket", "3"; + ])); + (* Check VCPUs configuration - hvm failure scenario*) + (([ + "cores-per-socket", "3"; + ], false, 6L, 5L, true), + Either.Left (Api_errors.Server_error(Api_errors.invalid_value, + ["platform:cores-per-socket"; + "VCPUs_max/VCPUs_at_startup must be a multiple of this field"]))); + (* Check VCPUs configuration - hvm failure scenario*) + (([ + "cores-per-socket", "4"; + ], false, 6L, 6L, true), + Either.Left (Api_errors.Server_error(Api_errors.invalid_value, + ["platform:cores-per-socket"; + "VCPUs_max/VCPUs_at_startup must be a multiple of this field"]))); + (* Check VCPUs configuration - hvm failure scenario*) + (([ + "cores-per-socket", "abc"; + ], false, 6L, 5L, true), + Either.Left(Api_errors.Server_error(Api_errors.invalid_value, + ["platform:cores-per-socket"; + "value = abc is not a valid int"]))); + ] + end) let test = - "platformdata" >::: - [ - "test_platform_sanity_check" >::: SanityCheck.tests - ] + "platformdata" >::: + [ + "test_platform_sanity_check" >::: SanityCheck.tests + ] diff --git a/ocaml/test/test_pool_apply_edition.ml b/ocaml/test/test_pool_apply_edition.ml index a37a981197b..581e019ee80 100644 --- a/ocaml/test/test_pool_apply_edition.ml +++ b/ocaml/test/test_pool_apply_edition.ml @@ -15,73 +15,73 @@ open OUnit let apply_edition_succeed ~__context ~host ~edition = - Db.Host.set_edition ~__context ~self:host ~value:edition + Db.Host.set_edition ~__context ~self:host ~value:edition let apply_edition_fail_host_offline ~__context ~host ~edition = - raise (Api_errors.Server_error - (Api_errors.host_offline, [Ref.string_of host])) + raise (Api_errors.Server_error + (Api_errors.host_offline, [Ref.string_of host])) let setup ~__context ~host_count ~edition = - let hosts = ref [] in - for n = 1 to host_count do - hosts := (Test_common.make_host ~__context ~edition ()) :: !hosts - done; - let (_: API.ref_pool) = - Test_common.make_pool ~__context ~master:(List.hd !hosts) () in () + let hosts = ref [] in + for n = 1 to host_count do + hosts := (Test_common.make_host ~__context ~edition ()) :: !hosts + done; + let (_: API.ref_pool) = + Test_common.make_pool ~__context ~master:(List.hd !hosts) () in () (* Test that apply_edition_with_rollback calls apply_fn for each host, * assuming no exceptions are thrown. *) let test_basic_operation () = - let __context = Mock.make_context_with_new_db "test context" in - setup ~__context ~host_count:8 ~edition:"free"; - let hosts = Db.Host.get_all ~__context in - Xapi_pool_license.apply_edition_with_rollback - ~__context ~hosts ~edition:"per-socket" - ~apply_fn:apply_edition_succeed; - List.iter - (fun host -> - let new_edition = Db.Host.get_edition ~__context ~self:host in - assert_equal - ~msg:(Printf.sprintf - "Testing that host %s has had the new license applied" - (Ref.string_of host)) - "per-socket" - new_edition) - hosts + let __context = Mock.make_context_with_new_db "test context" in + setup ~__context ~host_count:8 ~edition:"free"; + let hosts = Db.Host.get_all ~__context in + Xapi_pool_license.apply_edition_with_rollback + ~__context ~hosts ~edition:"per-socket" + ~apply_fn:apply_edition_succeed; + List.iter + (fun host -> + let new_edition = Db.Host.get_edition ~__context ~self:host in + assert_equal + ~msg:(Printf.sprintf + "Testing that host %s has had the new license applied" + (Ref.string_of host)) + "per-socket" + new_edition) + hosts (* Check that if a host is offline, apply_edition_with_rollback rolls all hosts * back to the edition they had to start off with. *) let test_rollback_logic () = - let __context = Mock.make_context_with_new_db "test context" in - setup ~__context ~host_count:8 ~edition:"free"; - let hosts = Db.Host.get_all ~__context in - (* Fourth host will fail to apply_edition with HOST_OFFLINE. *) - let offline_host = List.nth hosts 4 in - let apply_fn ~__context ~host ~edition = - if host = offline_host - then apply_edition_fail_host_offline ~__context ~host ~edition - else apply_edition_succeed ~__context ~host ~edition - in - assert_raises ~msg:"Testing that HOST_OFFLINE is successfully propagated" - (Api_errors.Server_error - (Api_errors.host_offline, [Ref.string_of offline_host])) - (fun () -> - Xapi_pool_license.apply_edition_with_rollback - ~__context ~hosts ~edition:"per-socket" ~apply_fn); - List.iter - (fun host -> - let new_edition = Db.Host.get_edition ~__context ~self:host in - assert_equal - ~msg:(Printf.sprintf - "Testing that host %s has been rolled back to free edition" - (Ref.string_of host)) - "free" - new_edition) - hosts + let __context = Mock.make_context_with_new_db "test context" in + setup ~__context ~host_count:8 ~edition:"free"; + let hosts = Db.Host.get_all ~__context in + (* Fourth host will fail to apply_edition with HOST_OFFLINE. *) + let offline_host = List.nth hosts 4 in + let apply_fn ~__context ~host ~edition = + if host = offline_host + then apply_edition_fail_host_offline ~__context ~host ~edition + else apply_edition_succeed ~__context ~host ~edition + in + assert_raises ~msg:"Testing that HOST_OFFLINE is successfully propagated" + (Api_errors.Server_error + (Api_errors.host_offline, [Ref.string_of offline_host])) + (fun () -> + Xapi_pool_license.apply_edition_with_rollback + ~__context ~hosts ~edition:"per-socket" ~apply_fn); + List.iter + (fun host -> + let new_edition = Db.Host.get_edition ~__context ~self:host in + assert_equal + ~msg:(Printf.sprintf + "Testing that host %s has been rolled back to free edition" + (Ref.string_of host)) + "free" + new_edition) + hosts let test = - "pool_apply_edition" >::: - [ - "test_basic_operation" >:: test_basic_operation; - "test_rollback_logic" >:: test_rollback_logic; - ] + "pool_apply_edition" >::: + [ + "test_basic_operation" >:: test_basic_operation; + "test_rollback_logic" >:: test_rollback_logic; + ] diff --git a/ocaml/test/test_pool_cpuinfo.ml b/ocaml/test/test_pool_cpuinfo.ml index 29783564cbd..a07d4f085a7 100644 --- a/ocaml/test/test_pool_cpuinfo.ml +++ b/ocaml/test/test_pool_cpuinfo.ml @@ -17,75 +17,75 @@ open Fun open OUnit open Test_highlevel module PoolCpuinfo = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - type input_t = (string * string) list list - type output_t = (string * string) list + module Io = struct + type input_t = (string * string) list list + type output_t = (string * string) list - let string_of_input_t = Test_printers.(list (assoc_list string string)) - let string_of_output_t = Test_printers.(assoc_list string string) - end - module State = Test_state.XapiDb + let string_of_input_t = Test_printers.(list (assoc_list string string)) + let string_of_output_t = Test_printers.(assoc_list string string) + end + module State = Test_state.XapiDb - (* Create a host for each edition in the list. *) - let load_input __context inputs = - List.iter - (fun cpu_info -> - let host = Test_common.make_host ~__context () in - Db.Host.set_cpu_info ~__context ~self:host ~value:cpu_info) - inputs; - ignore (Test_common.make_pool ~__context - ~master:(List.hd (Db.Host.get_all ~__context)) ()); - Create_misc.create_pool_cpuinfo ~__context - + (* Create a host for each edition in the list. *) + let load_input __context inputs = + List.iter + (fun cpu_info -> + let host = Test_common.make_host ~__context () in + Db.Host.set_cpu_info ~__context ~self:host ~value:cpu_info) + inputs; + ignore (Test_common.make_pool ~__context + ~master:(List.hd (Db.Host.get_all ~__context)) ()); + Create_misc.create_pool_cpuinfo ~__context - let extract_output __context _ = - let pool = Helpers.get_pool ~__context in - List.sort compare (Db.Pool.get_cpu_info ~__context ~self:pool) - let cpu_info ~vendor ~cpu_count ~socket_count ~features_hvm ~features_pv = - let cpu_info = - ["vendor", vendor; - "cpu_count", cpu_count; - "socket_count", socket_count; - "features_hvm", features_pv; - "features_pv", features_hvm] in + let extract_output __context _ = + let pool = Helpers.get_pool ~__context in + List.sort compare (Db.Pool.get_cpu_info ~__context ~self:pool) - (* Sort the associaton list so the test framework's comparisons work *) - List.sort compare cpu_info + let cpu_info ~vendor ~cpu_count ~socket_count ~features_hvm ~features_pv = + let cpu_info = + ["vendor", vendor; + "cpu_count", cpu_count; + "socket_count", socket_count; + "features_hvm", features_pv; + "features_pv", features_hvm] in - let tests = [ - ([cpu_info "Abacus" "1" "1" "0000000a" "0000000a"], - cpu_info "Abacus" "1" "1" "0000000a" "0000000a"); + (* Sort the associaton list so the test framework's comparisons work *) + List.sort compare cpu_info - ([cpu_info "Abacus" "2" "4" "0000000a" "0000000a"; - cpu_info "Abacus" "1" "1" "0000000a" "0000000a"], - cpu_info "Abacus" "3" "5" "0000000a" "0000000a"); + let tests = [ + ([cpu_info "Abacus" "1" "1" "0000000a" "0000000a"], + cpu_info "Abacus" "1" "1" "0000000a" "0000000a"); - ([cpu_info "Abacus" "8" "2" "0000000a" "00000002"; - cpu_info "Abacus" "4" "1" "0000000f" "00000001"], - cpu_info "Abacus" "12" "3" "0000000a" "00000000"); + ([cpu_info "Abacus" "2" "4" "0000000a" "0000000a"; + cpu_info "Abacus" "1" "1" "0000000a" "0000000a"], + cpu_info "Abacus" "3" "5" "0000000a" "0000000a"); - ([cpu_info "Abacus" "24" "1" "ffffffff-ffffffff" "ffffffff-ffffffff"; - cpu_info "Abacus" "24" "24" "ffffffff-ffffffff" "ffffffff-ffffffff"], - cpu_info "Abacus" "48" "25" "ffffffff-ffffffff" "ffffffff-ffffffff"); + ([cpu_info "Abacus" "8" "2" "0000000a" "00000002"; + cpu_info "Abacus" "4" "1" "0000000f" "00000001"], + cpu_info "Abacus" "12" "3" "0000000a" "00000000"); - ([cpu_info "Abacus" "1" "1" "ffffffff" "ffffffff-ffffffff-ffffffff"; - cpu_info "Abacus" "1" "1" "ffffffff-ffffffff" "ffffffff-ffffffff"], - cpu_info "Abacus" "2" "2" "ffffffff-00000000" "ffffffff-ffffffff-00000000"); + ([cpu_info "Abacus" "24" "1" "ffffffff-ffffffff" "ffffffff-ffffffff"; + cpu_info "Abacus" "24" "24" "ffffffff-ffffffff" "ffffffff-ffffffff"], + cpu_info "Abacus" "48" "25" "ffffffff-ffffffff" "ffffffff-ffffffff"); - ([cpu_info "Abacus" "10" "1" "01230123-5a5a5a5a" "00000002"; - cpu_info "Abacus" "1" "10" "ffff1111-a5a56666" "00004242"], - cpu_info "Abacus" "11" "11" "01230101-00004242" "00000002"); + ([cpu_info "Abacus" "1" "1" "ffffffff" "ffffffff-ffffffff-ffffffff"; + cpu_info "Abacus" "1" "1" "ffffffff-ffffffff" "ffffffff-ffffffff"], + cpu_info "Abacus" "2" "2" "ffffffff-00000000" "ffffffff-ffffffff-00000000"); - (* CA-188665: Test a pool containing an old host which doesn't have the new feature keys *) - ([cpu_info "Abacus" "1" "1" "01230123-5a5a5a5a" "00000002"; - ["cpu_count", "1"; "features", "ffff1111-a5a56666"; "socket_count", "1"; "vendor", "Abacus"]], - cpu_info "Abacus" "1" "1" "01230123-5a5a5a5a" "00000002"); - ] -end)) + ([cpu_info "Abacus" "10" "1" "01230123-5a5a5a5a" "00000002"; + cpu_info "Abacus" "1" "10" "ffff1111-a5a56666" "00004242"], + cpu_info "Abacus" "11" "11" "01230101-00004242" "00000002"); + + (* CA-188665: Test a pool containing an old host which doesn't have the new feature keys *) + ([cpu_info "Abacus" "1" "1" "01230123-5a5a5a5a" "00000002"; + ["cpu_count", "1"; "features", "ffff1111-a5a56666"; "socket_count", "1"; "vendor", "Abacus"]], + cpu_info "Abacus" "1" "1" "01230123-5a5a5a5a" "00000002"); + ] + end)) let test = - "pool_cpuinfo" >::: - [ - "test_pool_cpuinfo" >::: PoolCpuinfo.tests; - ] + "pool_cpuinfo" >::: + [ + "test_pool_cpuinfo" >::: PoolCpuinfo.tests; + ] diff --git a/ocaml/test/test_pool_db_backup.ml b/ocaml/test/test_pool_db_backup.ml index f8914bc7dd3..10516cf5aee 100644 --- a/ocaml/test/test_pool_db_backup.ml +++ b/ocaml/test/test_pool_db_backup.ml @@ -15,42 +15,42 @@ open OUnit open Test_common -let test_prepare_restore () = - let make_context mac1 mac2 host_uuid dom0_uuid = - let __context = make_test_database () in - let master = List.hd (Db.Host.get_all ~__context) in - Db.Host.set_uuid ~__context ~self:master ~value:host_uuid; - List.iter - (fun self -> Db.VM.set_uuid ~__context ~self ~value:dom0_uuid) - (Db.Host.get_resident_VMs ~__context ~self:master); - let slave = make_host ~__context ~name_label:"slave" () in - let management_net = make_network ~__context ~name_label:"management network" () in - let (_: API.ref_PIF) = make_pif ~__context ~network:management_net ~device:"eth0" ~host:master ~management:true ~mAC:mac1 () in - let (_: API.ref_PIF) = make_pif ~__context ~network:management_net ~device:"eth0" ~host:slave ~management:true ~mAC:mac2 () in - __context in - let my_installation_uuid = Xapi_inventory.lookup Xapi_inventory._installation_uuid in - let my_control_uuid = Xapi_inventory.lookup Xapi_inventory._control_domain_uuid in - let old_context = make_context "1" "2" my_installation_uuid my_control_uuid in - let new_context = make_context "a" "b" "host-uuid" "dom0-uuid" in - Pool_db_backup.prepare_database_for_restore ~old_context ~new_context; - let all_hosts = Db.Host.get_all ~__context:new_context in - (* new_context should have exactly 1 host: the master *) - assert_equal ~msg:"test_prepare_restore: should only be 1 host" (List.length all_hosts) 1; - let master = List.hd all_hosts in - (* new_context master host should have PIF with MAC "a" *) - let pif = List.hd (Db.Host.get_PIFs ~__context:new_context ~self:master) in - let mac = Db.PIF.get_MAC ~__context:new_context ~self:pif in - assert_equal ~msg:"test_prepare_restore: PIF should have MAC a" mac "a"; - (* new_context should have correct master host uuid *) - let host_uuid = Db.Host.get_uuid ~__context:new_context ~self:master in - assert_equal ~msg:"test_prepare_restore: master uuid wrong" host_uuid my_installation_uuid; - (* new_context should have correct master dom0 uuid *) - let dom0 = List.hd (Db.Host.get_resident_VMs ~__context:new_context ~self:master) in - let dom0_uuid = Db.VM.get_uuid ~__context:new_context ~self:dom0 in - assert_equal ~msg:"test_prepare_restore: master dom0 uuid wrong" dom0_uuid my_control_uuid +let test_prepare_restore () = + let make_context mac1 mac2 host_uuid dom0_uuid = + let __context = make_test_database () in + let master = List.hd (Db.Host.get_all ~__context) in + Db.Host.set_uuid ~__context ~self:master ~value:host_uuid; + List.iter + (fun self -> Db.VM.set_uuid ~__context ~self ~value:dom0_uuid) + (Db.Host.get_resident_VMs ~__context ~self:master); + let slave = make_host ~__context ~name_label:"slave" () in + let management_net = make_network ~__context ~name_label:"management network" () in + let (_: API.ref_PIF) = make_pif ~__context ~network:management_net ~device:"eth0" ~host:master ~management:true ~mAC:mac1 () in + let (_: API.ref_PIF) = make_pif ~__context ~network:management_net ~device:"eth0" ~host:slave ~management:true ~mAC:mac2 () in + __context in + let my_installation_uuid = Xapi_inventory.lookup Xapi_inventory._installation_uuid in + let my_control_uuid = Xapi_inventory.lookup Xapi_inventory._control_domain_uuid in + let old_context = make_context "1" "2" my_installation_uuid my_control_uuid in + let new_context = make_context "a" "b" "host-uuid" "dom0-uuid" in + Pool_db_backup.prepare_database_for_restore ~old_context ~new_context; + let all_hosts = Db.Host.get_all ~__context:new_context in + (* new_context should have exactly 1 host: the master *) + assert_equal ~msg:"test_prepare_restore: should only be 1 host" (List.length all_hosts) 1; + let master = List.hd all_hosts in + (* new_context master host should have PIF with MAC "a" *) + let pif = List.hd (Db.Host.get_PIFs ~__context:new_context ~self:master) in + let mac = Db.PIF.get_MAC ~__context:new_context ~self:pif in + assert_equal ~msg:"test_prepare_restore: PIF should have MAC a" mac "a"; + (* new_context should have correct master host uuid *) + let host_uuid = Db.Host.get_uuid ~__context:new_context ~self:master in + assert_equal ~msg:"test_prepare_restore: master uuid wrong" host_uuid my_installation_uuid; + (* new_context should have correct master dom0 uuid *) + let dom0 = List.hd (Db.Host.get_resident_VMs ~__context:new_context ~self:master) in + let dom0_uuid = Db.VM.get_uuid ~__context:new_context ~self:dom0 in + assert_equal ~msg:"test_prepare_restore: master dom0 uuid wrong" dom0_uuid my_control_uuid let test = - "test_db_backup" >::: - [ - "test_prepare_restore" >:: test_prepare_restore; - ] + "test_db_backup" >::: + [ + "test_prepare_restore" >:: test_prepare_restore; + ] diff --git a/ocaml/test/test_pool_license.ml b/ocaml/test/test_pool_license.ml index 3ea48cbfada..b08f4dd471e 100644 --- a/ocaml/test/test_pool_license.ml +++ b/ocaml/test/test_pool_license.ml @@ -18,18 +18,18 @@ open OUnit open Test_highlevel type host_license_state = { - license_params: (string * string) list; - edition: string; + license_params: (string * string) list; + edition: string; } let string_of_host_license_state state = - Printf.sprintf "{license_params = %s; edition = %s}" - (Test_printers.(assoc_list string string) state.license_params) - state.edition + Printf.sprintf "{license_params = %s; edition = %s}" + (Test_printers.(assoc_list string string) state.license_params) + state.edition let string_of_date_opt = function - | None -> "None" - | Some date -> Printf.sprintf "Some %s" (Date.to_string date) + | None -> "None" + | Some date -> Printf.sprintf "Some %s" (Date.to_string date) let f2d = Date.of_float let f2d2s = Date.to_string ++ Date.of_float @@ -37,169 +37,169 @@ let f2d2s = Date.to_string ++ Date.of_float let edition_to_int = ["edition1", 1; "edition2", 2; "edition3", 3] module CompareDates = Generic.Make(struct - module Io = struct - type input_t = (Date.iso8601 option * Date.iso8601 option) - type output_t = int + module Io = struct + type input_t = (Date.iso8601 option * Date.iso8601 option) + type output_t = int - let string_of_input_t = - Test_printers.(assoc_pair (option Date.to_string) (option Date.to_string)) + let string_of_input_t = + Test_printers.(assoc_pair (option Date.to_string) (option Date.to_string)) - let string_of_output_t = Test_printers.int - end + let string_of_output_t = Test_printers.int + end - let transform (date1, date2) = Xapi_pool_license.compare_dates date1 date2 + let transform (date1, date2) = Xapi_pool_license.compare_dates date1 date2 - (* Tuples of ((value 1, value 2), expected result from comparing values) *) - let tests = [ - ((None, None), 0); - ((None, Some (f2d 5.0)), 1); - ((Some (f2d 10.0), Some (f2d 5.0)), 1); - ((Some (f2d 15.0), None), -1); - ((Some (f2d 20.0), Some (f2d 30.0)), -1); - ((Some (f2d 150.0), Some (f2d 150.0)), 0); - ] -end) + (* Tuples of ((value 1, value 2), expected result from comparing values) *) + let tests = [ + ((None, None), 0); + ((None, Some (f2d 5.0)), 1); + ((Some (f2d 10.0), Some (f2d 5.0)), 1); + ((Some (f2d 15.0), None), -1); + ((Some (f2d 20.0), Some (f2d 30.0)), -1); + ((Some (f2d 150.0), Some (f2d 150.0)), 0); + ] + end) module PoolExpiryDate = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - type input_t = Date.iso8601 option list - type output_t = Date.iso8601 option - - let string_of_input_t = - Test_printers.(list (option Date.to_string)) - - let string_of_output_t = Test_printers.option Date.to_string - end - module State = Test_state.XapiDb - - (* Create a host in the database for each expiry date in the list. *) - let load_input __context expiry_dates = - List.iter - (fun expiry_date -> - let license_params = match expiry_date with - | None -> [] - | Some date -> ["expiry", (Date.to_string date)] - in - let (_: API.ref_host) = Test_common.make_host ~__context ~edition:"edition1" ~license_params () in ()) - expiry_dates - - let extract_output __context _ = - let hosts = Db.Host.get_all ~__context in - snd (Xapi_pool_license.get_lowest_edition_with_expiry ~__context ~hosts ~edition_to_int) - - (* Tuples of ((host expiry date) list, expected pool expiry date) *) - let tests = [ - ([None; None; Some (f2d 500.0); None], Some (f2d 500.0)); - ([None; None; None; None], None); - ([Some (f2d 100.0)], Some (f2d 100.0)); - ([Some (f2d 300.0); Some (f2d 150.0); Some (f2d 450.0)], Some (f2d 150.0)); - ([None; Some (f2d 650.0); None; Some (f2d 350.0)], Some (f2d 350.0)); - ] -end)) + module Io = struct + type input_t = Date.iso8601 option list + type output_t = Date.iso8601 option + + let string_of_input_t = + Test_printers.(list (option Date.to_string)) + + let string_of_output_t = Test_printers.option Date.to_string + end + module State = Test_state.XapiDb + + (* Create a host in the database for each expiry date in the list. *) + let load_input __context expiry_dates = + List.iter + (fun expiry_date -> + let license_params = match expiry_date with + | None -> [] + | Some date -> ["expiry", (Date.to_string date)] + in + let (_: API.ref_host) = Test_common.make_host ~__context ~edition:"edition1" ~license_params () in ()) + expiry_dates + + let extract_output __context _ = + let hosts = Db.Host.get_all ~__context in + snd (Xapi_pool_license.get_lowest_edition_with_expiry ~__context ~hosts ~edition_to_int) + + (* Tuples of ((host expiry date) list, expected pool expiry date) *) + let tests = [ + ([None; None; Some (f2d 500.0); None], Some (f2d 500.0)); + ([None; None; None; None], None); + ([Some (f2d 100.0)], Some (f2d 100.0)); + ([Some (f2d 300.0); Some (f2d 150.0); Some (f2d 450.0)], Some (f2d 150.0)); + ([None; Some (f2d 650.0); None; Some (f2d 350.0)], Some (f2d 350.0)); + ] + end)) module PoolEdition = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - type input_t = string list - type output_t = string - - let string_of_input_t = Test_printers.(list string) - let string_of_output_t = Test_printers.string - end - module State = Test_state.XapiDb - - (* Create a host for each edition in the list. *) - let load_input __context editions = - List.iter - (fun edition -> - let (_: API.ref_host) = Test_common.make_host ~__context ~edition () in ()) - editions - - let extract_output __context _ = - let hosts = Db.Host.get_all ~__context in - fst (Xapi_pool_license.get_lowest_edition_with_expiry ~__context ~hosts ~edition_to_int) - - (* Tuples of ((host edition) list, expected pool edition) *) - let tests = [ - (["edition1"], "edition1"); - (["edition1"; "edition2"; "edition1"; "edition3"], "edition1"); - (["edition2"; "edition2"; "edition2"; "edition2"], "edition2"); - (["edition3"; "edition3"; "edition3"], "edition3"); - (["edition2"; "edition2"; "edition1"; "edition1"], "edition1"); - ] -end)) + module Io = struct + type input_t = string list + type output_t = string + + let string_of_input_t = Test_printers.(list string) + let string_of_output_t = Test_printers.string + end + module State = Test_state.XapiDb + + (* Create a host for each edition in the list. *) + let load_input __context editions = + List.iter + (fun edition -> + let (_: API.ref_host) = Test_common.make_host ~__context ~edition () in ()) + editions + + let extract_output __context _ = + let hosts = Db.Host.get_all ~__context in + fst (Xapi_pool_license.get_lowest_edition_with_expiry ~__context ~hosts ~edition_to_int) + + (* Tuples of ((host edition) list, expected pool edition) *) + let tests = [ + (["edition1"], "edition1"); + (["edition1"; "edition2"; "edition1"; "edition3"], "edition1"); + (["edition2"; "edition2"; "edition2"; "edition2"], "edition2"); + (["edition3"; "edition3"; "edition3"], "edition3"); + (["edition2"; "edition2"; "edition1"; "edition1"], "edition1"); + ] + end)) module PoolLicenseState = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - type input_t = host_license_state list - type output_t = string * string - - let string_of_input_t = Test_printers.(list string_of_host_license_state) - let string_of_output_t = Test_printers.(pair string string) - end - module State = Test_state.XapiDb - - (* For each (license_params, edition) pair, create a host. - * Also create a pool object. *) - let load_input __context hosts = - List.iter - (fun host -> - let (_: API.ref_host) = - Test_common.make_host ~__context - ~edition:host.edition - ~license_params:host.license_params () in ()) - hosts; - let (_: API.ref_pool) = - Test_common.make_pool ~__context - ~master:(List.hd (Db.Host.get_all ~__context)) () in () - - let extract_output __context _ = - let hosts = Db.Host.get_all ~__context in - let pool_edition, expiry = Xapi_pool_license.get_lowest_edition_with_expiry ~__context ~hosts ~edition_to_int in - let pool_expiry = - match expiry with - | None -> "never" - | Some date -> if date = Date.of_float License_check.never then "never" else Date.to_string date - in - pool_edition, pool_expiry - - (* Tuples of (host_license_state list, expected pool license state) *) - let tests = [ - (* A pool of edition1 hosts, none of which has an expiry date. *) - [ - {license_params = []; edition = "edition1"}; - {license_params = []; edition = "edition1"}; - {license_params = []; edition = "edition1"}; - ], - ("edition1", "never"); - (* A pool of edition2 hosts, of which two have expiry dates. *) - [ - {license_params = []; edition = "edition2"}; - {license_params = ["expiry", f2d2s 500.0]; edition = "edition2"}; - {license_params = ["expiry", f2d2s 350.0]; edition = "edition2"}; - ], - ("edition2", f2d2s 350.0); - (* A pool of edition2 hosts, of which none have expiry dates. *) - [ - {license_params = []; edition = "edition2"}; - {license_params = []; edition = "edition2"}; - {license_params = []; edition = "edition2"}; - ], - ("edition2", "never"); - (* A pool of hosts, some edition2 (with different expiry dates) and some edition1 (no expiry). *) - [ - {license_params = ["expiry", f2d2s 5000.0]; edition = "edition2"}; - {license_params = []; edition = "edition1"}; - {license_params = ["expiry", f2d2s 6000.0]; edition = "edition2"}; - ], - ("edition1", "never"); - ] -end)) + module Io = struct + type input_t = host_license_state list + type output_t = string * string + + let string_of_input_t = Test_printers.(list string_of_host_license_state) + let string_of_output_t = Test_printers.(pair string string) + end + module State = Test_state.XapiDb + + (* For each (license_params, edition) pair, create a host. + * Also create a pool object. *) + let load_input __context hosts = + List.iter + (fun host -> + let (_: API.ref_host) = + Test_common.make_host ~__context + ~edition:host.edition + ~license_params:host.license_params () in ()) + hosts; + let (_: API.ref_pool) = + Test_common.make_pool ~__context + ~master:(List.hd (Db.Host.get_all ~__context)) () in () + + let extract_output __context _ = + let hosts = Db.Host.get_all ~__context in + let pool_edition, expiry = Xapi_pool_license.get_lowest_edition_with_expiry ~__context ~hosts ~edition_to_int in + let pool_expiry = + match expiry with + | None -> "never" + | Some date -> if date = Date.of_float License_check.never then "never" else Date.to_string date + in + pool_edition, pool_expiry + + (* Tuples of (host_license_state list, expected pool license state) *) + let tests = [ + (* A pool of edition1 hosts, none of which has an expiry date. *) + [ + {license_params = []; edition = "edition1"}; + {license_params = []; edition = "edition1"}; + {license_params = []; edition = "edition1"}; + ], + ("edition1", "never"); + (* A pool of edition2 hosts, of which two have expiry dates. *) + [ + {license_params = []; edition = "edition2"}; + {license_params = ["expiry", f2d2s 500.0]; edition = "edition2"}; + {license_params = ["expiry", f2d2s 350.0]; edition = "edition2"}; + ], + ("edition2", f2d2s 350.0); + (* A pool of edition2 hosts, of which none have expiry dates. *) + [ + {license_params = []; edition = "edition2"}; + {license_params = []; edition = "edition2"}; + {license_params = []; edition = "edition2"}; + ], + ("edition2", "never"); + (* A pool of hosts, some edition2 (with different expiry dates) and some edition1 (no expiry). *) + [ + {license_params = ["expiry", f2d2s 5000.0]; edition = "edition2"}; + {license_params = []; edition = "edition1"}; + {license_params = ["expiry", f2d2s 6000.0]; edition = "edition2"}; + ], + ("edition1", "never"); + ] + end)) let test = - "pool_license" >::: - [ - "test_compare_dates" >::: CompareDates.tests; - "test_pool_expiry_date" >::: PoolExpiryDate.tests; - "test_pool_edition" >::: PoolEdition.tests; - "test_pool_license_state" >::: PoolLicenseState.tests; - ] + "pool_license" >::: + [ + "test_compare_dates" >::: CompareDates.tests; + "test_pool_expiry_date" >::: PoolExpiryDate.tests; + "test_pool_edition" >::: PoolEdition.tests; + "test_pool_license_state" >::: PoolLicenseState.tests; + ] diff --git a/ocaml/test/test_pool_restore_database.ml b/ocaml/test/test_pool_restore_database.ml index 8bf17a9519b..568f8d9e3e3 100644 --- a/ocaml/test/test_pool_restore_database.ml +++ b/ocaml/test/test_pool_restore_database.ml @@ -43,6 +43,6 @@ let test_reset_vms_on_missing_host () = let test = "pool_restore_database" >::: - [ - "test_reset_vms_on_missing_host" >:: test_reset_vms_on_missing_host; - ] + [ + "test_reset_vms_on_missing_host" >:: test_reset_vms_on_missing_host; + ] diff --git a/ocaml/test/test_pr1510.ml b/ocaml/test/test_pr1510.ml index 9fd49b927e0..118e99aef25 100644 --- a/ocaml/test/test_pr1510.ml +++ b/ocaml/test/test_pr1510.ml @@ -7,108 +7,108 @@ open Network_utils (* Example of using OUnitDiff with a String Set *) module StringDiff = struct - type t = string - let compare = String.compare - let pp_printer = Format.pp_print_string - let pp_print_sep = OUnitDiff.pp_comma_separator + type t = string + let compare = String.compare + let pp_printer = Format.pp_print_string + let pp_print_sep = OUnitDiff.pp_comma_separator end module OSSet = OUnitDiff.SetMake(StringDiff) let run_bond_prop_test props c_props c_per_iface = - let props, per_iface_props = - Ovs.make_bond_properties "bond_test" props in + let props, per_iface_props = + Ovs.make_bond_properties "bond_test" props in - let propset = OSSet.of_list props in - let correctset = OSSet.of_list c_props in - OSSet.assert_equal correctset propset ; + let propset = OSSet.of_list props in + let correctset = OSSet.of_list c_props in + OSSet.assert_equal correctset propset ; - let propset = OSSet.of_list per_iface_props in - let correctset = OSSet.of_list c_per_iface in - OSSet.assert_equal correctset propset + let propset = OSSet.of_list per_iface_props in + let correctset = OSSet.of_list c_per_iface in + OSSet.assert_equal correctset propset let test_lacp_timeout_prop arg () = - let props = [ "mode", "lacp" ; "lacp-time", arg ; ] - and correct_props = - [ "lacp=active"; - "bond_mode=balance-tcp"; - Printf.sprintf "other-config:lacp-time=\"%s\"" arg ] - and correct_iface_props = [ ] in + let props = [ "mode", "lacp" ; "lacp-time", arg ; ] + and correct_props = + [ "lacp=active"; + "bond_mode=balance-tcp"; + Printf.sprintf "other-config:lacp-time=\"%s\"" arg ] + and correct_iface_props = [ ] in - run_bond_prop_test props correct_props correct_iface_props + run_bond_prop_test props correct_props correct_iface_props let test_lacp_aggregation_key arg () = - let props, per_iface_props = Ovs.make_bond_properties "bond_test" - [ "mode", "lacp" ; "lacp-aggregation-key", arg ] - and correct_props = [ - "lacp=active"; - "bond_mode=balance-tcp"; - ] - and correct_iface_props = [ - Printf.sprintf "other-config:lacp-aggregation-key=\"%s\"" arg ; - ] in + let props, per_iface_props = Ovs.make_bond_properties "bond_test" + [ "mode", "lacp" ; "lacp-aggregation-key", arg ] + and correct_props = [ + "lacp=active"; + "bond_mode=balance-tcp"; + ] + and correct_iface_props = [ + Printf.sprintf "other-config:lacp-aggregation-key=\"%s\"" arg ; + ] in - let propset = OSSet.of_list props in - let correctset = OSSet.of_list correct_props in - OSSet.assert_equal correctset propset ; + let propset = OSSet.of_list props in + let correctset = OSSet.of_list correct_props in + OSSet.assert_equal correctset propset ; - let propset = OSSet.of_list per_iface_props in - let correctset = OSSet.of_list correct_iface_props in - OSSet.assert_equal correctset propset + let propset = OSSet.of_list per_iface_props in + let correctset = OSSet.of_list correct_iface_props in + OSSet.assert_equal correctset propset module OVS_Cli_test = struct - include Ovs.Cli - let vsctl_output = ref [] - let vsctl ?(log=true) args = - vsctl_output := args ; - String.concat " " args + include Ovs.Cli + let vsctl_output = ref [] + let vsctl ?(log=true) args = + vsctl_output := args ; + String.concat " " args end (* XXX TODO write this test *) let test_lacp_aggregation_key_vsctl arg () = skip_if true "Unimplemented" ; - let module Ovs = Ovs.Make(OVS_Cli_test) in - let bond = "bond0" - and ifaces = ["eth0"; "eth1"] - and bridge = "xapi1" - and props = [ "mode", "lacp" ; "lacp-aggregation-key", arg ] - (* other-config:lacp-aggregation-key=42 *) - and answer = "other-config:lacp-aggregation-key=" ^ arg - in - Ovs.create_bond bond ifaces bridge props |> ignore ; - List.iter print_endline !OVS_Cli_test.vsctl_output ; - print_endline answer ; - assert_bool "lacp_aggregation_key is passed to ovs-vsctl command" - (List.exists - (fun s -> (Xstringext.String.(strip isspace s) == answer)) - !OVS_Cli_test.vsctl_output) + let module Ovs = Ovs.Make(OVS_Cli_test) in + let bond = "bond0" + and ifaces = ["eth0"; "eth1"] + and bridge = "xapi1" + and props = [ "mode", "lacp" ; "lacp-aggregation-key", arg ] + (* other-config:lacp-aggregation-key=42 *) + and answer = "other-config:lacp-aggregation-key=" ^ arg + in + Ovs.create_bond bond ifaces bridge props |> ignore ; + List.iter print_endline !OVS_Cli_test.vsctl_output ; + print_endline answer ; + assert_bool "lacp_aggregation_key is passed to ovs-vsctl command" + (List.exists + (fun s -> (Xstringext.String.(strip isspace s) == answer)) + !OVS_Cli_test.vsctl_output) (* Test case for bond_create with default lacp-{time,aggregation-key} settings. This should not call ovs-vsctl with unfinished key=value arguments. So we shouldn't have somthing like "other-config:lacp-aggregation-key= ". *) let test_lacp_defaults_bond_create () = - let module Ovs = Ovs.Make(OVS_Cli_test) in - let bond = "bond0" - and ifaces = ["eth0"; "eth1"] - and bridge = "xapi1" - and default_props = Xapi_bond.__test_add_lacp_defaults [ "mode", "lacp" ] - in - Ovs.create_bond bond ifaces bridge default_props |> ignore; - (* should not have any strings which contain lacp-aggregation-key *) - (*assert_bool "no default property for lacp_aggregation_key" - List.exists (fun s -> String.*) - List.iter - (fun arg -> - assert_bool "key=value argument pairs can't have missing values" - (let open Xstringext.String in - arg |> strip isspace |> endswith "=" |> not)) - !OVS_Cli_test.vsctl_output + let module Ovs = Ovs.Make(OVS_Cli_test) in + let bond = "bond0" + and ifaces = ["eth0"; "eth1"] + and bridge = "xapi1" + and default_props = Xapi_bond.__test_add_lacp_defaults [ "mode", "lacp" ] + in + Ovs.create_bond bond ifaces bridge default_props |> ignore; + (* should not have any strings which contain lacp-aggregation-key *) + (*assert_bool "no default property for lacp_aggregation_key" + List.exists (fun s -> String.*) + List.iter + (fun arg -> + assert_bool "key=value argument pairs can't have missing values" + (let open Xstringext.String in + arg |> strip isspace |> endswith "=" |> not)) + !OVS_Cli_test.vsctl_output let suite = - "pr1510_suite" >::: - [ - "test_lacp_timeout_prop(slow)" >:: test_lacp_timeout_prop "slow"; - "test_lacp_timeout_prop(fast)" >:: test_lacp_timeout_prop "fast"; - "test_lacp_aggregation_key(42)" >:: test_lacp_aggregation_key "42"; - "test_lacp_aggregation_key_vsctl" >:: test_lacp_aggregation_key_vsctl "42"; - "test_lacp_defaults_bond_create" >:: test_lacp_defaults_bond_create; - ] + "pr1510_suite" >::: + [ + "test_lacp_timeout_prop(slow)" >:: test_lacp_timeout_prop "slow"; + "test_lacp_timeout_prop(fast)" >:: test_lacp_timeout_prop "fast"; + "test_lacp_aggregation_key(42)" >:: test_lacp_aggregation_key "42"; + "test_lacp_aggregation_key_vsctl" >:: test_lacp_aggregation_key_vsctl "42"; + "test_lacp_defaults_bond_create" >:: test_lacp_defaults_bond_create; + ] diff --git a/ocaml/test/test_sm_features.ml b/ocaml/test/test_sm_features.ml index e5b8bd98c07..1664f3f8574 100644 --- a/ocaml/test/test_sm_features.ml +++ b/ocaml/test/test_sm_features.ml @@ -19,221 +19,221 @@ open Test_common open Test_highlevel type sm_object = { - capabilities: string list; - features: (string * int64) list; + capabilities: string list; + features: (string * int64) list; } type sm_data_sequence = { - (* Text feature list we get back as part of sr_get_driver_info. *) - raw: string list; - (* SMAPIv1 driver info. *) - smapiv1_features: Smint.feature list; - (* SMAPIv2 driver info. *) - smapiv2_features: string list; - (* SM object created in the database. *) - sm: sm_object; + (* Text feature list we get back as part of sr_get_driver_info. *) + raw: string list; + (* SMAPIv1 driver info. *) + smapiv1_features: Smint.feature list; + (* SMAPIv2 driver info. *) + smapiv2_features: string list; + (* SM object created in the database. *) + sm: sm_object; } let string_of_sm_object sm = - Printf.sprintf "{capabilities = %s; features = %s}" - (Test_printers.(list string) sm.capabilities) - (Test_printers.(list string) - (List.map - (fun (capability, version) -> Printf.sprintf "%s/%Ld" capability version) - sm.features)) + Printf.sprintf "{capabilities = %s; features = %s}" + (Test_printers.(list string) sm.capabilities) + (Test_printers.(list string) + (List.map + (fun (capability, version) -> Printf.sprintf "%s/%Ld" capability version) + sm.features)) let test_sequences = - let open Smint in - [ - (* Test NFS driver features as of Clearwater. *) - { - raw = [ - "SR_PROBE"; - "SR_UPDATE"; - "SR_CACHING"; (* xapi ignores this. *) - "VDI_CREATE"; - "VDI_DELETE"; - "VDI_ATTACH"; - "VDI_DETACH"; - "VDI_UPDATE"; - "VDI_CLONE"; - "VDI_SNAPSHOT"; - "VDI_RESIZE"; - "VDI_GENERATE_CONFIG"; - "VDI_RESET_ON_BOOT/2"; - "ATOMIC_PAUSE"; (* xapi ignores this *) - ]; - smapiv1_features = [ - Sr_probe, 1L; - Sr_update, 1L; - Vdi_create, 1L; - Vdi_delete, 1L; - Vdi_attach, 1L; - Vdi_detach, 1L; - Vdi_update, 1L; - Vdi_clone, 1L; - Vdi_snapshot, 1L; - Vdi_resize, 1L; - Vdi_generate_config, 1L; - Vdi_reset_on_boot, 2L; - ]; - smapiv2_features = [ - "SR_PROBE/1"; - "SR_UPDATE/1"; - "VDI_CREATE/1"; - "VDI_DELETE/1"; - "VDI_ATTACH/1"; - "VDI_DETACH/1"; - "VDI_UPDATE/1"; - "VDI_CLONE/1"; - "VDI_SNAPSHOT/1"; - "VDI_RESIZE/1"; - "VDI_GENERATE_CONFIG/1"; - "VDI_RESET_ON_BOOT/2"; - ]; - sm = { - capabilities = [ - "SR_PROBE"; - "SR_UPDATE"; - "VDI_CREATE"; - "VDI_DELETE"; - "VDI_ATTACH"; - "VDI_DETACH"; - "VDI_UPDATE"; - "VDI_CLONE"; - "VDI_SNAPSHOT"; - "VDI_RESIZE"; - "VDI_GENERATE_CONFIG"; - "VDI_RESET_ON_BOOT"; - ]; - features = [ - "SR_PROBE", 1L; - "SR_UPDATE", 1L; - "VDI_CREATE", 1L; - "VDI_DELETE", 1L; - "VDI_ATTACH", 1L; - "VDI_DETACH", 1L; - "VDI_UPDATE", 1L; - "VDI_CLONE", 1L; - "VDI_SNAPSHOT", 1L; - "VDI_RESIZE", 1L; - "VDI_GENERATE_CONFIG", 1L; - "VDI_RESET_ON_BOOT", 2L; - ]; - }; - }; - (* Test that unknown features are discarded. *) - { - raw = ["UNKNOWN_FEATURE"; "UNKNOWN_VERSIONED_FEATURE/3"]; - smapiv1_features = []; - smapiv2_features = []; - sm = { - capabilities = []; - features = []; - }; - }; - (* Test that versioned features are parsed as expected. *) - { - raw = ["SR_PROBE/5"]; - smapiv1_features = [Sr_probe, 5L]; - smapiv2_features = ["SR_PROBE/5"]; - sm = { - capabilities = ["SR_PROBE"]; - features = ["SR_PROBE", 5L]; - }; - }; - (* Test that unversioned features are implicitly parsed as version 1. *) - { - raw = ["VDI_RESIZE"]; - smapiv1_features = [Vdi_resize, 1L]; - smapiv2_features = ["VDI_RESIZE/1"]; - sm = { - capabilities = ["VDI_RESIZE"]; - features = ["VDI_RESIZE", 1L]; - }; - }; - ] + let open Smint in + [ + (* Test NFS driver features as of Clearwater. *) + { + raw = [ + "SR_PROBE"; + "SR_UPDATE"; + "SR_CACHING"; (* xapi ignores this. *) + "VDI_CREATE"; + "VDI_DELETE"; + "VDI_ATTACH"; + "VDI_DETACH"; + "VDI_UPDATE"; + "VDI_CLONE"; + "VDI_SNAPSHOT"; + "VDI_RESIZE"; + "VDI_GENERATE_CONFIG"; + "VDI_RESET_ON_BOOT/2"; + "ATOMIC_PAUSE"; (* xapi ignores this *) + ]; + smapiv1_features = [ + Sr_probe, 1L; + Sr_update, 1L; + Vdi_create, 1L; + Vdi_delete, 1L; + Vdi_attach, 1L; + Vdi_detach, 1L; + Vdi_update, 1L; + Vdi_clone, 1L; + Vdi_snapshot, 1L; + Vdi_resize, 1L; + Vdi_generate_config, 1L; + Vdi_reset_on_boot, 2L; + ]; + smapiv2_features = [ + "SR_PROBE/1"; + "SR_UPDATE/1"; + "VDI_CREATE/1"; + "VDI_DELETE/1"; + "VDI_ATTACH/1"; + "VDI_DETACH/1"; + "VDI_UPDATE/1"; + "VDI_CLONE/1"; + "VDI_SNAPSHOT/1"; + "VDI_RESIZE/1"; + "VDI_GENERATE_CONFIG/1"; + "VDI_RESET_ON_BOOT/2"; + ]; + sm = { + capabilities = [ + "SR_PROBE"; + "SR_UPDATE"; + "VDI_CREATE"; + "VDI_DELETE"; + "VDI_ATTACH"; + "VDI_DETACH"; + "VDI_UPDATE"; + "VDI_CLONE"; + "VDI_SNAPSHOT"; + "VDI_RESIZE"; + "VDI_GENERATE_CONFIG"; + "VDI_RESET_ON_BOOT"; + ]; + features = [ + "SR_PROBE", 1L; + "SR_UPDATE", 1L; + "VDI_CREATE", 1L; + "VDI_DELETE", 1L; + "VDI_ATTACH", 1L; + "VDI_DETACH", 1L; + "VDI_UPDATE", 1L; + "VDI_CLONE", 1L; + "VDI_SNAPSHOT", 1L; + "VDI_RESIZE", 1L; + "VDI_GENERATE_CONFIG", 1L; + "VDI_RESET_ON_BOOT", 2L; + ]; + }; + }; + (* Test that unknown features are discarded. *) + { + raw = ["UNKNOWN_FEATURE"; "UNKNOWN_VERSIONED_FEATURE/3"]; + smapiv1_features = []; + smapiv2_features = []; + sm = { + capabilities = []; + features = []; + }; + }; + (* Test that versioned features are parsed as expected. *) + { + raw = ["SR_PROBE/5"]; + smapiv1_features = [Sr_probe, 5L]; + smapiv2_features = ["SR_PROBE/5"]; + sm = { + capabilities = ["SR_PROBE"]; + features = ["SR_PROBE", 5L]; + }; + }; + (* Test that unversioned features are implicitly parsed as version 1. *) + { + raw = ["VDI_RESIZE"]; + smapiv1_features = [Vdi_resize, 1L]; + smapiv2_features = ["VDI_RESIZE/1"]; + sm = { + capabilities = ["VDI_RESIZE"]; + features = ["VDI_RESIZE", 1L]; + }; + }; + ] module ParseSMAPIv1Features = Generic.Make(struct - module Io = struct - type input_t = string list - type output_t = Smint.feature list + module Io = struct + type input_t = string list + type output_t = Smint.feature list - let string_of_input_t = Test_printers.(list string) - let string_of_output_t = Test_printers.(list Smint.string_of_feature) - end + let string_of_input_t = Test_printers.(list string) + let string_of_output_t = Test_printers.(list Smint.string_of_feature) + end - let transform = Smint.parse_capability_int64_features + let transform = Smint.parse_capability_int64_features - let tests = - List.map - (fun sequence -> (sequence.raw, sequence.smapiv1_features)) - test_sequences -end) + let tests = + List.map + (fun sequence -> (sequence.raw, sequence.smapiv1_features)) + test_sequences + end) module CreateSMAPIv2Features = Generic.Make(struct - module Io = struct - type input_t = Smint.feature list - type output_t = string list + module Io = struct + type input_t = Smint.feature list + type output_t = string list - let string_of_input_t = Test_printers.(list Smint.string_of_feature) - let string_of_output_t = Test_printers.(list string) - end + let string_of_input_t = Test_printers.(list Smint.string_of_feature) + let string_of_output_t = Test_printers.(list string) + end - let transform = List.map Smint.string_of_feature + let transform = List.map Smint.string_of_feature - let tests = - List.map - (fun sequence -> (sequence.smapiv1_features, sequence.smapiv2_features)) - test_sequences -end) + let tests = + List.map + (fun sequence -> (sequence.smapiv1_features, sequence.smapiv2_features)) + test_sequences + end) let test_sm_name_label = "__test_sm" module CreateSMObject = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - type input_t = string list - type output_t = sm_object - - let string_of_input_t = Test_printers.(list string) - let string_of_output_t = string_of_sm_object - end - - module State = Test_state.XapiDb - - let load_input __context features = - Xapi_sm.create_from_query_result ~__context { - Storage_interface.driver = ""; - name = test_sm_name_label; - description = ""; - vendor = ""; - copyright = ""; - version = ""; - required_api_version = ""; - features = features; - configuration = []; - required_cluster_stack = []; - } - - let extract_output __context _ = - let sm = - List.nth (Db.SM.get_by_name_label ~__context ~label:test_sm_name_label) 0 - in - { - capabilities = Db.SM.get_capabilities ~__context ~self:sm; - features = Db.SM.get_features ~__context ~self:sm; - } - - let tests = - List.map - (fun sequence -> (sequence.smapiv2_features, sequence.sm)) - test_sequences -end)) + module Io = struct + type input_t = string list + type output_t = sm_object + + let string_of_input_t = Test_printers.(list string) + let string_of_output_t = string_of_sm_object + end + + module State = Test_state.XapiDb + + let load_input __context features = + Xapi_sm.create_from_query_result ~__context { + Storage_interface.driver = ""; + name = test_sm_name_label; + description = ""; + vendor = ""; + copyright = ""; + version = ""; + required_api_version = ""; + features = features; + configuration = []; + required_cluster_stack = []; + } + + let extract_output __context _ = + let sm = + List.nth (Db.SM.get_by_name_label ~__context ~label:test_sm_name_label) 0 + in + { + capabilities = Db.SM.get_capabilities ~__context ~self:sm; + features = Db.SM.get_features ~__context ~self:sm; + } + + let tests = + List.map + (fun sequence -> (sequence.smapiv2_features, sequence.sm)) + test_sequences + end)) let test = - "test_sm_features" >::: - [ - "test_parse_smapiv1_features" >::: ParseSMAPIv1Features.tests; - "test_create_smapiv2_features" >::: CreateSMAPIv2Features.tests; - "test_create_sm_object" >::: CreateSMObject.tests; - ] + "test_sm_features" >::: + [ + "test_parse_smapiv1_features" >::: ParseSMAPIv1Features.tests; + "test_create_smapiv2_features" >::: CreateSMAPIv2Features.tests; + "test_create_sm_object" >::: CreateSMObject.tests; + ] diff --git a/ocaml/test/test_state.ml b/ocaml/test/test_state.ml index 162da1111b4..94ed00da69a 100644 --- a/ocaml/test/test_state.ml +++ b/ocaml/test/test_state.ml @@ -15,6 +15,6 @@ open Test_highlevel module XapiDb : Generic.STATE with type state_t = Context.t = struct - type state_t = Context.t - let create_default_state () = Mock.make_context_with_new_db "test context" + type state_t = Context.t + let create_default_state () = Mock.make_context_with_new_db "test context" end diff --git a/ocaml/test/test_storage_migrate_state.ml b/ocaml/test/test_storage_migrate_state.ml index 4d8e0cf06a4..8d30ae2188a 100644 --- a/ocaml/test/test_storage_migrate_state.ml +++ b/ocaml/test/test_storage_migrate_state.ml @@ -17,98 +17,98 @@ open OUnit open Test_highlevel module StorageMigrateState = struct - type state_t = unit + type state_t = unit - let create_default_state () = Storage_migrate.State.clear () + let create_default_state () = Storage_migrate.State.clear () end let sample_send_state = Storage_migrate.State.Send_state.({ - url = "url"; - dest_sr = "dest_sr"; - remote_dp = "remote_dp"; - local_dp = "local_dp"; - mirror_vdi = "mirror_vdi"; - remote_url = "remote_url"; - tapdev = Tapctl.tapdev_of_rpc - (Rpc.Dict ["minor", Rpc.Int 0L; "tapdisk_pid", Rpc.Int 0L]); - failed = false; - watchdog = None; -}) + url = "url"; + dest_sr = "dest_sr"; + remote_dp = "remote_dp"; + local_dp = "local_dp"; + mirror_vdi = "mirror_vdi"; + remote_url = "remote_url"; + tapdev = Tapctl.tapdev_of_rpc + (Rpc.Dict ["minor", Rpc.Int 0L; "tapdisk_pid", Rpc.Int 0L]); + failed = false; + watchdog = None; + }) let sample_receive_state = Storage_migrate.State.Receive_state.({ - sr = "my_sr"; - dummy_vdi = "dummy_vdi"; - leaf_vdi = "leaf_vdi"; - leaf_dp = "leaf_dp"; - parent_vdi = "parent_vdi"; - remote_vdi = "remote_vdi"; -}) + sr = "my_sr"; + dummy_vdi = "dummy_vdi"; + leaf_vdi = "leaf_vdi"; + leaf_dp = "leaf_dp"; + parent_vdi = "parent_vdi"; + remote_vdi = "remote_vdi"; + }) let sample_copy_state = Storage_migrate.State.Copy_state.({ - base_dp = "base_dp"; - leaf_dp = "leaf_dp"; - remote_dp = "remote_dp"; - dest_sr = "dest_sr"; - copy_vdi = "copy_vdi"; - remote_url = "remote_url"; -}) + base_dp = "base_dp"; + leaf_dp = "leaf_dp"; + remote_dp = "remote_dp"; + dest_sr = "dest_sr"; + copy_vdi = "copy_vdi"; + remote_url = "remote_url"; + }) module MapOf = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - open Storage_migrate.State - - type input_t = - (string * osend operation) option * - (string * orecv operation) option * - (string * ocopy operation) option - type output_t = - (string * Send_state.t) list * - (string * Receive_state.t) list * - (string * Copy_state.t) list - - let string_of_input_t _ = "" - let string_of_output_t _ = "" - end - - module State = StorageMigrateState - - open Storage_migrate.State - - let load_input () (send, recv, copy) = - Opt.iter (fun (id, send) -> add id send) send; - Opt.iter (fun (id, recv) -> add id recv) recv; - Opt.iter (fun (id, copy) -> add id copy) copy - - let extract_output () _ = map_of () - - let tests = [ - (* Test that operations don't appear from nowhere. *) - (None, None, None), - ([], [], []); - (* Test that any of the single operations get persisted. *) - (Some ("foo", Send_op sample_send_state), None, None), - (["foo", sample_send_state], [], []); - (None, Some ("bar", Recv_op sample_receive_state), None), - ([], ["bar", sample_receive_state], []); - (None, None, Some ("baz", Copy_op sample_copy_state)), - ([], [], ["baz", sample_copy_state]); - ] -end)) + module Io = struct + open Storage_migrate.State + + type input_t = + (string * osend operation) option * + (string * orecv operation) option * + (string * ocopy operation) option + type output_t = + (string * Send_state.t) list * + (string * Receive_state.t) list * + (string * Copy_state.t) list + + let string_of_input_t _ = "" + let string_of_output_t _ = "" + end + + module State = StorageMigrateState + + open Storage_migrate.State + + let load_input () (send, recv, copy) = + Opt.iter (fun (id, send) -> add id send) send; + Opt.iter (fun (id, recv) -> add id recv) recv; + Opt.iter (fun (id, copy) -> add id copy) copy + + let extract_output () _ = map_of () + + let tests = [ + (* Test that operations don't appear from nowhere. *) + (None, None, None), + ([], [], []); + (* Test that any of the single operations get persisted. *) + (Some ("foo", Send_op sample_send_state), None, None), + (["foo", sample_send_state], [], []); + (None, Some ("bar", Recv_op sample_receive_state), None), + ([], ["bar", sample_receive_state], []); + (None, None, Some ("baz", Copy_op sample_copy_state)), + ([], [], ["baz", sample_copy_state]); + ] + end)) let test_clear () = - let open Storage_migrate.State in - clear (); - add "foo" (Send_op sample_send_state); - add "bar" (Recv_op sample_receive_state); - add "baz" (Copy_op sample_copy_state); - clear (); - let state = map_of () in - assert_equal ~msg:"State was not empty after clearing" state ([], [], []) + let open Storage_migrate.State in + clear (); + add "foo" (Send_op sample_send_state); + add "bar" (Recv_op sample_receive_state); + add "baz" (Copy_op sample_copy_state); + clear (); + let state = map_of () in + assert_equal ~msg:"State was not empty after clearing" state ([], [], []) let test = - Storage_migrate.State.persist_root := Test_common.working_area; - "test_storage_migrate_state" >::: - [ - "test_map_of" >::: MapOf.tests; - "test_clear" >:: test_clear; - ] + Storage_migrate.State.persist_root := Test_common.working_area; + "test_storage_migrate_state" >::: + [ + "test_map_of" >::: MapOf.tests; + "test_clear" >:: test_clear; + ] diff --git a/ocaml/test/test_vdi_allowed_operations.ml b/ocaml/test/test_vdi_allowed_operations.ml index dd038ddc446..f023712a60b 100644 --- a/ocaml/test/test_vdi_allowed_operations.ml +++ b/ocaml/test/test_vdi_allowed_operations.ml @@ -18,157 +18,157 @@ open Test_common (* Helpers for testing Xapi_vdi.check_operation_error *) let setup_test ~__context ~vdi_fun = - let _sm_ref = make_sm ~__context () in - let sr_ref = make_sr ~__context () in - let (_: API.ref_PBD) = make_pbd ~__context ~sR:sr_ref () in - let vdi_ref = make_vdi ~__context ~sR:sr_ref () in - vdi_fun vdi_ref; - let vdi_record = Db.VDI.get_record_internal ~__context ~self:vdi_ref in - vdi_ref, vdi_record + let _sm_ref = make_sm ~__context () in + let sr_ref = make_sr ~__context () in + let (_: API.ref_PBD) = make_pbd ~__context ~sR:sr_ref () in + let vdi_ref = make_vdi ~__context ~sR:sr_ref () in + vdi_fun vdi_ref; + let vdi_record = Db.VDI.get_record_internal ~__context ~self:vdi_ref in + vdi_ref, vdi_record let my_cmp a b = match a,b with - | Some aa, Some bb -> fst aa = fst bb - | None, None -> a = b - | _ -> false + | Some aa, Some bb -> fst aa = fst bb + | None, None -> a = b + | _ -> false let string_of_api_exn_opt = function - | None -> "None" - | Some (code, args) -> - Printf.sprintf "Some (%s, [%s])" code (String.concat "; " args) + | None -> "None" + | Some (code, args) -> + Printf.sprintf "Some (%s, [%s])" code (String.concat "; " args) let run_assert_equal_with_vdi ~__context ?(cmp = my_cmp) ?(ha_enabled=false) ~vdi_fun op exc = - let vdi_ref, vdi_record = setup_test ~__context ~vdi_fun in - assert_equal - ~cmp - ~printer:string_of_api_exn_opt - exc (Xapi_vdi.check_operation_error ~__context ha_enabled vdi_record vdi_ref op) + let vdi_ref, vdi_record = setup_test ~__context ~vdi_fun in + assert_equal + ~cmp + ~printer:string_of_api_exn_opt + exc (Xapi_vdi.check_operation_error ~__context ha_enabled vdi_record vdi_ref op) (* This is to test Xapi_vdi.check_operation_error against CA-98944 code. This DO NOT fully test the aforementionned function *) let test_ca98944 () = - let __context = Mock.make_context_with_new_db "Mock context" in - (* Should raise vdi_in_use *) - run_assert_equal_with_vdi ~__context - ~vdi_fun:(fun vdi_ref -> - make_vbd ~vDI:vdi_ref ~__context - ~reserved:true ~currently_attached:false ~current_operations:["", `attach] ()) - `update (Some (Api_errors.vdi_in_use, [])); - - (* Should raise vdi_in_use *) - run_assert_equal_with_vdi ~__context - ~vdi_fun:(fun vdi_ref -> - make_vbd ~vDI:vdi_ref - ~__context ~reserved:false ~currently_attached:true ~current_operations:["", `attach] ()) - `update (Some (Api_errors.vdi_in_use, [])); - - (* Should raise vdi_in_use *) - run_assert_equal_with_vdi ~__context - ~vdi_fun:(fun vdi_ref -> make_vbd ~vDI:vdi_ref - ~__context ~reserved:true ~currently_attached:true ~current_operations:["", `attach] ()) - `update (Some (Api_errors.vdi_in_use, [])); - - (* Should raise other_operation_in_progress *) - run_assert_equal_with_vdi ~__context - ~vdi_fun:(fun vdi_ref -> make_vbd ~vDI:vdi_ref - ~__context ~reserved:false ~currently_attached:false ~current_operations:["", `attach] ()) - `update (Some (Api_errors.other_operation_in_progress, [])); - - (* Should pass *) - run_assert_equal_with_vdi ~__context - ~vdi_fun:(fun vdi_ref -> make_vbd ~vDI:vdi_ref - ~__context ~reserved:false ~currently_attached:false ~current_operations:[] ()) - `forget None + let __context = Mock.make_context_with_new_db "Mock context" in + (* Should raise vdi_in_use *) + run_assert_equal_with_vdi ~__context + ~vdi_fun:(fun vdi_ref -> + make_vbd ~vDI:vdi_ref ~__context + ~reserved:true ~currently_attached:false ~current_operations:["", `attach] ()) + `update (Some (Api_errors.vdi_in_use, [])); + + (* Should raise vdi_in_use *) + run_assert_equal_with_vdi ~__context + ~vdi_fun:(fun vdi_ref -> + make_vbd ~vDI:vdi_ref + ~__context ~reserved:false ~currently_attached:true ~current_operations:["", `attach] ()) + `update (Some (Api_errors.vdi_in_use, [])); + + (* Should raise vdi_in_use *) + run_assert_equal_with_vdi ~__context + ~vdi_fun:(fun vdi_ref -> make_vbd ~vDI:vdi_ref + ~__context ~reserved:true ~currently_attached:true ~current_operations:["", `attach] ()) + `update (Some (Api_errors.vdi_in_use, [])); + + (* Should raise other_operation_in_progress *) + run_assert_equal_with_vdi ~__context + ~vdi_fun:(fun vdi_ref -> make_vbd ~vDI:vdi_ref + ~__context ~reserved:false ~currently_attached:false ~current_operations:["", `attach] ()) + `update (Some (Api_errors.other_operation_in_progress, [])); + + (* Should pass *) + run_assert_equal_with_vdi ~__context + ~vdi_fun:(fun vdi_ref -> make_vbd ~vDI:vdi_ref + ~__context ~reserved:false ~currently_attached:false ~current_operations:[] ()) + `forget None (* VDI.copy should be allowed if all attached VBDs are read-only. *) let test_ca101669 () = - let __context = Mock.make_context_with_new_db "Mock context" in - - (* Attempting to copy a RW-attached VDI should fail with VDI_IN_USE. *) - run_assert_equal_with_vdi ~__context - ~vdi_fun:(fun vdi_ref -> - make_vbd ~__context ~vDI:vdi_ref ~currently_attached:true ~mode:`RW ()) - `copy (Some (Api_errors.vdi_in_use, [])); - - (* Attempting to copy a RO-attached VDI should pass. *) - run_assert_equal_with_vdi ~__context - ~vdi_fun:(fun vdi_ref -> - make_vbd ~__context ~vDI:vdi_ref ~currently_attached:true ~mode:`RO ()) - `copy None; - - (* Attempting to copy an unattached VDI should pass. *) - run_assert_equal_with_vdi ~__context - ~vdi_fun:(fun vdi_ref -> ()) - `copy None; - - (* Attempting to copy RW- and RO-attached VDIs should fail with VDI_IN_USE. *) - run_assert_equal_with_vdi ~__context - ~vdi_fun:(fun vdi_ref -> - let (_: API.ref_VBD) = make_vbd ~__context ~vDI:vdi_ref ~currently_attached:true ~mode:`RW () in - make_vbd ~__context ~vDI:vdi_ref ~currently_attached:true ~mode:`RO ()) - `copy (Some (Api_errors.vdi_in_use, [])) + let __context = Mock.make_context_with_new_db "Mock context" in + + (* Attempting to copy a RW-attached VDI should fail with VDI_IN_USE. *) + run_assert_equal_with_vdi ~__context + ~vdi_fun:(fun vdi_ref -> + make_vbd ~__context ~vDI:vdi_ref ~currently_attached:true ~mode:`RW ()) + `copy (Some (Api_errors.vdi_in_use, [])); + + (* Attempting to copy a RO-attached VDI should pass. *) + run_assert_equal_with_vdi ~__context + ~vdi_fun:(fun vdi_ref -> + make_vbd ~__context ~vDI:vdi_ref ~currently_attached:true ~mode:`RO ()) + `copy None; + + (* Attempting to copy an unattached VDI should pass. *) + run_assert_equal_with_vdi ~__context + ~vdi_fun:(fun vdi_ref -> ()) + `copy None; + + (* Attempting to copy RW- and RO-attached VDIs should fail with VDI_IN_USE. *) + run_assert_equal_with_vdi ~__context + ~vdi_fun:(fun vdi_ref -> + let (_: API.ref_VBD) = make_vbd ~__context ~vDI:vdi_ref ~currently_attached:true ~mode:`RW () in + make_vbd ~__context ~vDI:vdi_ref ~currently_attached:true ~mode:`RO ()) + `copy (Some (Api_errors.vdi_in_use, [])) let test_ca125187 () = - let __context = Test_common.make_test_database () in - - (* A VDI being copied can be copied again concurrently. *) - run_assert_equal_with_vdi ~__context - ~vdi_fun:(fun vdi_ref -> - let (_: API.ref_VBD) = make_vbd ~__context ~vDI:vdi_ref ~currently_attached:true ~mode:`RO () in - Db.VDI.set_current_operations ~__context - ~self:vdi_ref - ~value:["mytask", `copy]) - `copy None; - - (* A VBD can be plugged to a VDI which is being copied. This is required as - * the VBD is plugged after the VDI is marked with the copy operation. *) - let _, _ = setup_test ~__context - ~vdi_fun:(fun vdi_ref -> - let host_ref = Helpers.get_localhost ~__context in - let vm_ref = Db.Host.get_control_domain ~__context ~self:host_ref in - let vbd_ref = Ref.make () in - let (_: API.ref_VBD) = make_vbd ~__context - ~ref:vbd_ref - ~vDI:vdi_ref - ~vM:vm_ref - ~currently_attached:false - ~mode:`RO () in - Db.VDI.set_current_operations ~__context - ~self:vdi_ref - ~value:["mytask", `copy]; - Db.VDI.set_managed ~__context - ~self:vdi_ref - ~value:true; - Xapi_vbd_helpers.assert_operation_valid ~__context - ~self:vbd_ref - ~op:`plug) - in () + let __context = Test_common.make_test_database () in + + (* A VDI being copied can be copied again concurrently. *) + run_assert_equal_with_vdi ~__context + ~vdi_fun:(fun vdi_ref -> + let (_: API.ref_VBD) = make_vbd ~__context ~vDI:vdi_ref ~currently_attached:true ~mode:`RO () in + Db.VDI.set_current_operations ~__context + ~self:vdi_ref + ~value:["mytask", `copy]) + `copy None; + + (* A VBD can be plugged to a VDI which is being copied. This is required as + * the VBD is plugged after the VDI is marked with the copy operation. *) + let _, _ = setup_test ~__context + ~vdi_fun:(fun vdi_ref -> + let host_ref = Helpers.get_localhost ~__context in + let vm_ref = Db.Host.get_control_domain ~__context ~self:host_ref in + let vbd_ref = Ref.make () in + let (_: API.ref_VBD) = make_vbd ~__context + ~ref:vbd_ref + ~vDI:vdi_ref + ~vM:vm_ref + ~currently_attached:false + ~mode:`RO () in + Db.VDI.set_current_operations ~__context + ~self:vdi_ref + ~value:["mytask", `copy]; + Db.VDI.set_managed ~__context + ~self:vdi_ref + ~value:true; + Xapi_vbd_helpers.assert_operation_valid ~__context + ~self:vbd_ref + ~op:`plug) + in () let test_ca126097 () = - let __context = Mock.make_context_with_new_db "Mock context" in - - (* Attempting to clone a VDI being copied should fail with VDI_IN_USE. *) - run_assert_equal_with_vdi ~__context - ~vdi_fun:(fun vdi_ref -> - let (_: API.ref_VBD) = make_vbd ~__context ~vDI:vdi_ref ~currently_attached:true ~mode:`RO () in - Db.VDI.set_current_operations ~__context - ~self:vdi_ref - ~value:["mytask", `copy]) - `clone None; - - (* Attempting to snapshot a VDI being copied should be allowed. *) - run_assert_equal_with_vdi ~__context - ~vdi_fun:(fun vdi_ref -> - let (_: API.ref_VBD) = make_vbd ~__context ~vDI:vdi_ref ~currently_attached:true ~mode:`RO () in - Db.VDI.set_current_operations ~__context - ~self:vdi_ref - ~value:["mytask", `copy]) - `snapshot (Some (Api_errors.operation_not_allowed, [])) + let __context = Mock.make_context_with_new_db "Mock context" in + + (* Attempting to clone a VDI being copied should fail with VDI_IN_USE. *) + run_assert_equal_with_vdi ~__context + ~vdi_fun:(fun vdi_ref -> + let (_: API.ref_VBD) = make_vbd ~__context ~vDI:vdi_ref ~currently_attached:true ~mode:`RO () in + Db.VDI.set_current_operations ~__context + ~self:vdi_ref + ~value:["mytask", `copy]) + `clone None; + + (* Attempting to snapshot a VDI being copied should be allowed. *) + run_assert_equal_with_vdi ~__context + ~vdi_fun:(fun vdi_ref -> + let (_: API.ref_VBD) = make_vbd ~__context ~vDI:vdi_ref ~currently_attached:true ~mode:`RO () in + Db.VDI.set_current_operations ~__context + ~self:vdi_ref + ~value:["mytask", `copy]) + `snapshot (Some (Api_errors.operation_not_allowed, [])) let test = - "test_vdi_allowed_operations" >::: - [ - "test_ca98944" >:: test_ca98944; - "test_ca101669" >:: test_ca101669; - "test_ca125187" >:: test_ca125187; - "test_ca126097" >:: test_ca126097; - ] + "test_vdi_allowed_operations" >::: + [ + "test_ca98944" >:: test_ca98944; + "test_ca101669" >:: test_ca101669; + "test_ca125187" >:: test_ca125187; + "test_ca126097" >:: test_ca126097; + ] diff --git a/ocaml/test/test_vgpu_common.ml b/ocaml/test/test_vgpu_common.ml index 19212d9c2b9..f953d8ff7d6 100644 --- a/ocaml/test/test_vgpu_common.ml +++ b/ocaml/test/test_vgpu_common.ml @@ -15,235 +15,235 @@ open Xapi_vgpu_type let k100 = { - vendor_name = "NVIDIA Corporation"; - model_name = "GRID K100"; - framebuffer_size = 268435456L; - max_heads = 2L; - max_resolution_x = 1920L; - max_resolution_y = 1200L; - size = Int64.div Constants.pgpu_default_size 8L; - internal_config = [ - Xapi_globs.vgpu_config_key, "/usr/share/nvidia/vgx/grid_k100.conf" - ]; - identifier = Identifier.(Nvidia { - pdev_id = 0x0ff2; - psubdev_id = None; - vdev_id = 0x0fe7; - vsubdev_id = 0x101e; - }); - experimental = false; + vendor_name = "NVIDIA Corporation"; + model_name = "GRID K100"; + framebuffer_size = 268435456L; + max_heads = 2L; + max_resolution_x = 1920L; + max_resolution_y = 1200L; + size = Int64.div Constants.pgpu_default_size 8L; + internal_config = [ + Xapi_globs.vgpu_config_key, "/usr/share/nvidia/vgx/grid_k100.conf" + ]; + identifier = Identifier.(Nvidia { + pdev_id = 0x0ff2; + psubdev_id = None; + vdev_id = 0x0fe7; + vsubdev_id = 0x101e; + }); + experimental = false; } let k140q = { - vendor_name = "NVIDIA Corporation"; - model_name = "GRID K140Q"; - framebuffer_size = 1006632960L; - max_heads = 2L; - max_resolution_x = 2560L; - max_resolution_y = 1600L; - size = Int64.div Constants.pgpu_default_size 4L; - internal_config = [ - Xapi_globs.vgpu_config_key, "/usr/share/nvidia/vgx/grid_k140q.conf" - ]; - identifier = Identifier.(Nvidia { - pdev_id = 0x0ff2; - psubdev_id = None; - vdev_id = 0x0ff7; - vsubdev_id = 0x1037; - }); - experimental = false; + vendor_name = "NVIDIA Corporation"; + model_name = "GRID K140Q"; + framebuffer_size = 1006632960L; + max_heads = 2L; + max_resolution_x = 2560L; + max_resolution_y = 1600L; + size = Int64.div Constants.pgpu_default_size 4L; + internal_config = [ + Xapi_globs.vgpu_config_key, "/usr/share/nvidia/vgx/grid_k140q.conf" + ]; + identifier = Identifier.(Nvidia { + pdev_id = 0x0ff2; + psubdev_id = None; + vdev_id = 0x0ff7; + vsubdev_id = 0x1037; + }); + experimental = false; } let k200 = { - vendor_name = "NVIDIA Corporation"; - model_name = "GRID K200"; - framebuffer_size = 268435456L; - max_heads = 2L; - max_resolution_x = 1920L; - max_resolution_y = 1200L; - size = Int64.div Constants.pgpu_default_size 8L; - internal_config = [ - Xapi_globs.vgpu_config_key, "/usr/share/nvidia/vgx/grid_k200.conf" - ]; - identifier = Identifier.(Nvidia { - pdev_id = 0x11bf; - psubdev_id = None; - vdev_id = 0x118d; - vsubdev_id = 0x101d; - }); - experimental = false; + vendor_name = "NVIDIA Corporation"; + model_name = "GRID K200"; + framebuffer_size = 268435456L; + max_heads = 2L; + max_resolution_x = 1920L; + max_resolution_y = 1200L; + size = Int64.div Constants.pgpu_default_size 8L; + internal_config = [ + Xapi_globs.vgpu_config_key, "/usr/share/nvidia/vgx/grid_k200.conf" + ]; + identifier = Identifier.(Nvidia { + pdev_id = 0x11bf; + psubdev_id = None; + vdev_id = 0x118d; + vsubdev_id = 0x101d; + }); + experimental = false; } let k240q = { - vendor_name = "NVIDIA Corporation"; - model_name = "GRID K240Q"; - framebuffer_size = 1006632960L; - max_heads = 2L; - max_resolution_x = 2560L; - max_resolution_y = 1600L; - size = Int64.div Constants.pgpu_default_size 4L; - internal_config = [ - Xapi_globs.vgpu_config_key, "/usr/share/nvidia/vgx/grid_k240q.conf" - ]; - identifier = Identifier.(Nvidia { - pdev_id = 0x11bf; - psubdev_id = None; - vdev_id = 0x11b0; - vsubdev_id = 0x101a; - }); - experimental = false; + vendor_name = "NVIDIA Corporation"; + model_name = "GRID K240Q"; + framebuffer_size = 1006632960L; + max_heads = 2L; + max_resolution_x = 2560L; + max_resolution_y = 1600L; + size = Int64.div Constants.pgpu_default_size 4L; + internal_config = [ + Xapi_globs.vgpu_config_key, "/usr/share/nvidia/vgx/grid_k240q.conf" + ]; + identifier = Identifier.(Nvidia { + pdev_id = 0x11bf; + psubdev_id = None; + vdev_id = 0x11b0; + vsubdev_id = 0x101a; + }); + experimental = false; } let k260q = { - vendor_name = "NVIDIA Corporation"; - model_name = "GRID K260Q"; - framebuffer_size = 2013265920L; - max_heads = 4L; - max_resolution_x = 2560L; - max_resolution_y = 1600L; - size = Int64.div Constants.pgpu_default_size 2L; - internal_config = [ - Xapi_globs.vgpu_config_key, "/usr/share/nvidia/vgx/grid_k260q.conf" - ]; - identifier = Identifier.(Nvidia { - pdev_id = 0x11bf; - psubdev_id = None; - vdev_id = 0x11b0; - vsubdev_id = 0x101b; - }); - experimental = false; + vendor_name = "NVIDIA Corporation"; + model_name = "GRID K260Q"; + framebuffer_size = 2013265920L; + max_heads = 4L; + max_resolution_x = 2560L; + max_resolution_y = 1600L; + size = Int64.div Constants.pgpu_default_size 2L; + internal_config = [ + Xapi_globs.vgpu_config_key, "/usr/share/nvidia/vgx/grid_k260q.conf" + ]; + identifier = Identifier.(Nvidia { + pdev_id = 0x11bf; + psubdev_id = None; + vdev_id = 0x11b0; + vsubdev_id = 0x101b; + }); + experimental = false; } let k1_vgpu_types = [ - k100; - k140q; - passthrough_gpu; + k100; + k140q; + passthrough_gpu; ] let k2_vgpu_types = [ - k200; - k240q; - k260q; - passthrough_gpu; + k200; + k240q; + k260q; + passthrough_gpu; ] let gvt_g_041a = { - vendor_name = "Intel Corporation"; - model_name = "Intel GVT-g"; - framebuffer_size = 134217728L; - max_heads = 1L; - max_resolution_x = 1920L; - max_resolution_y = 1080L; - size = Int64.div Constants.pgpu_default_size 7L; - internal_config = [ - Xapi_globs.vgt_low_gm_sz, "128"; - Xapi_globs.vgt_high_gm_sz, "384"; - Xapi_globs.vgt_fence_sz, "4"; - Xapi_globs.vgt_monitor_config_file, "/etc/gvt-g-monitor.conf"; - ]; - identifier = Identifier.(GVT_g { - pdev_id = 0x041a; - low_gm_sz = 128L; - high_gm_sz = 384L; - fence_sz = 4L; - monitor_config_file = Some "/etc/gvt-g-monitor.conf"; - }); - experimental = false; + vendor_name = "Intel Corporation"; + model_name = "Intel GVT-g"; + framebuffer_size = 134217728L; + max_heads = 1L; + max_resolution_x = 1920L; + max_resolution_y = 1080L; + size = Int64.div Constants.pgpu_default_size 7L; + internal_config = [ + Xapi_globs.vgt_low_gm_sz, "128"; + Xapi_globs.vgt_high_gm_sz, "384"; + Xapi_globs.vgt_fence_sz, "4"; + Xapi_globs.vgt_monitor_config_file, "/etc/gvt-g-monitor.conf"; + ]; + identifier = Identifier.(GVT_g { + pdev_id = 0x041a; + low_gm_sz = 128L; + high_gm_sz = 384L; + fence_sz = 4L; + monitor_config_file = Some "/etc/gvt-g-monitor.conf"; + }); + experimental = false; } let intel_041a_vgpu_types = [ - gvt_g_041a; - passthrough_gpu; + gvt_g_041a; + passthrough_gpu; ] (* Represents the state of a PGPU, its supported and enabled VGPU types, and * the types of the VGPUs running and scheduled to run on it. *) type pgpu_state = { - supported_VGPU_types: vgpu_type list; - enabled_VGPU_types: vgpu_type list; - resident_VGPU_types: vgpu_type list; - scheduled_VGPU_types: vgpu_type list; + supported_VGPU_types: vgpu_type list; + enabled_VGPU_types: vgpu_type list; + resident_VGPU_types: vgpu_type list; + scheduled_VGPU_types: vgpu_type list; } let default_k1 = { - supported_VGPU_types = k1_vgpu_types; - enabled_VGPU_types = k1_vgpu_types; - resident_VGPU_types = []; - scheduled_VGPU_types = []; + supported_VGPU_types = k1_vgpu_types; + enabled_VGPU_types = k1_vgpu_types; + resident_VGPU_types = []; + scheduled_VGPU_types = []; } let default_k2 = { - supported_VGPU_types = k2_vgpu_types; - enabled_VGPU_types = k2_vgpu_types; - resident_VGPU_types = []; - scheduled_VGPU_types = []; + supported_VGPU_types = k2_vgpu_types; + enabled_VGPU_types = k2_vgpu_types; + resident_VGPU_types = []; + scheduled_VGPU_types = []; } let default_intel_041a = { - supported_VGPU_types = intel_041a_vgpu_types; - enabled_VGPU_types = intel_041a_vgpu_types; - resident_VGPU_types = []; - scheduled_VGPU_types = []; + supported_VGPU_types = intel_041a_vgpu_types; + enabled_VGPU_types = intel_041a_vgpu_types; + resident_VGPU_types = []; + scheduled_VGPU_types = []; } let string_of_vgpu_type vgpu_type = - vgpu_type.model_name + vgpu_type.model_name let string_of_pgpu_state pgpu = - Printf.sprintf "{supported: %s; enabled: %s; resident: %s; scheduled: %s}" - (Test_printers.(list string_of_vgpu_type) pgpu.supported_VGPU_types) - (Test_printers.(list string_of_vgpu_type) pgpu.enabled_VGPU_types) - (Test_printers.(list string_of_vgpu_type) pgpu.resident_VGPU_types) - (Test_printers.(list string_of_vgpu_type) pgpu.scheduled_VGPU_types) + Printf.sprintf "{supported: %s; enabled: %s; resident: %s; scheduled: %s}" + (Test_printers.(list string_of_vgpu_type) pgpu.supported_VGPU_types) + (Test_printers.(list string_of_vgpu_type) pgpu.enabled_VGPU_types) + (Test_printers.(list string_of_vgpu_type) pgpu.resident_VGPU_types) + (Test_printers.(list string_of_vgpu_type) pgpu.scheduled_VGPU_types) let make_vgpu ~__context - ?(vm_ref=Ref.null) - ?(resident_on=Ref.null) - ?(scheduled_to_be_resident_on=Ref.null) - vgpu_type = - let vgpu_type_ref = find_or_create ~__context vgpu_type in - (* For the passthrough VGPU type, create a VM and mark it as attached to the - * PGPU's PCI device. *) - let vm_ref = - if Db.is_valid_ref __context vm_ref - then vm_ref - else Test_common.make_vm ~__context () - in - if (Xapi_vgpu_type.requires_passthrough ~__context ~self:vgpu_type_ref) - && (Db.is_valid_ref __context resident_on) - then begin - let pci_ref = Db.PGPU.get_PCI ~__context ~self:resident_on in - Db.PCI.add_attached_VMs ~__context ~self:pci_ref ~value:vm_ref - end; - Test_common.make_vgpu ~__context - ~vM:vm_ref - ~_type:vgpu_type_ref - ~resident_on - ~scheduled_to_be_resident_on () + ?(vm_ref=Ref.null) + ?(resident_on=Ref.null) + ?(scheduled_to_be_resident_on=Ref.null) + vgpu_type = + let vgpu_type_ref = find_or_create ~__context vgpu_type in + (* For the passthrough VGPU type, create a VM and mark it as attached to the + * PGPU's PCI device. *) + let vm_ref = + if Db.is_valid_ref __context vm_ref + then vm_ref + else Test_common.make_vm ~__context () + in + if (Xapi_vgpu_type.requires_passthrough ~__context ~self:vgpu_type_ref) + && (Db.is_valid_ref __context resident_on) + then begin + let pci_ref = Db.PGPU.get_PCI ~__context ~self:resident_on in + Db.PCI.add_attached_VMs ~__context ~self:pci_ref ~value:vm_ref + end; + Test_common.make_vgpu ~__context + ~vM:vm_ref + ~_type:vgpu_type_ref + ~resident_on + ~scheduled_to_be_resident_on () let make_pgpu ~__context ?address ?(host=Ref.null) ?(gPU_group=Ref.null) pgpu = - let pCI = Test_common.make_pci ~__context ?pci_id:address ~host ~functions:1L () in - let supported_VGPU_types = - List.map (find_or_create ~__context) pgpu.supported_VGPU_types - in - let enabled_VGPU_types = - List.map (find_or_create ~__context) pgpu.supported_VGPU_types - in - let pgpu_ref = Test_common.make_pgpu ~__context - ~pCI - ~host - ~gPU_group - ~supported_VGPU_types - ~enabled_VGPU_types () in - List.iter - (fun vgpu_type -> - let (_: API.ref_VGPU) = - (make_vgpu ~__context ~resident_on:pgpu_ref vgpu_type) in ()) - pgpu.resident_VGPU_types; - List.iter - (fun vgpu_type -> - let (_: API.ref_VGPU) = - (make_vgpu ~__context ~scheduled_to_be_resident_on:pgpu_ref vgpu_type) - in ()) - pgpu.scheduled_VGPU_types; - pgpu_ref + let pCI = Test_common.make_pci ~__context ?pci_id:address ~host ~functions:1L () in + let supported_VGPU_types = + List.map (find_or_create ~__context) pgpu.supported_VGPU_types + in + let enabled_VGPU_types = + List.map (find_or_create ~__context) pgpu.supported_VGPU_types + in + let pgpu_ref = Test_common.make_pgpu ~__context + ~pCI + ~host + ~gPU_group + ~supported_VGPU_types + ~enabled_VGPU_types () in + List.iter + (fun vgpu_type -> + let (_: API.ref_VGPU) = + (make_vgpu ~__context ~resident_on:pgpu_ref vgpu_type) in ()) + pgpu.resident_VGPU_types; + List.iter + (fun vgpu_type -> + let (_: API.ref_VGPU) = + (make_vgpu ~__context ~scheduled_to_be_resident_on:pgpu_ref vgpu_type) + in ()) + pgpu.scheduled_VGPU_types; + pgpu_ref diff --git a/ocaml/test/test_vgpu_type.ml b/ocaml/test/test_vgpu_type.ml index 53492ae5db0..26c074e7bbb 100644 --- a/ocaml/test/test_vgpu_type.ml +++ b/ocaml/test/test_vgpu_type.ml @@ -21,326 +21,326 @@ open Xapi_vgpu_type let mib x = List.fold_left Int64.mul x [1024L; 1024L] module NvidiaTest = struct - let string_of_vgpu_conf conf = - let open Identifier in - let open Nvidia in - Printf.sprintf "%04x %s %04x %04x %Ld" - conf.identifier.pdev_id - (match conf.identifier.psubdev_id with - | Some id -> Printf.sprintf "Some %04x" id - | None -> "None") - conf.identifier.vdev_id - conf.identifier.vsubdev_id - conf.framebufferlength + let string_of_vgpu_conf conf = + let open Identifier in + let open Nvidia in + Printf.sprintf "%04x %s %04x %04x %Ld" + conf.identifier.pdev_id + (match conf.identifier.psubdev_id with + | Some id -> Printf.sprintf "Some %04x" id + | None -> "None") + conf.identifier.vdev_id + conf.identifier.vsubdev_id + conf.framebufferlength - let print_vgpu_conf conf = - Printf.printf "%s\n" (string_of_vgpu_conf conf) + let print_vgpu_conf conf = + Printf.printf "%s\n" (string_of_vgpu_conf conf) - module OfConfFile = Generic.Make(struct - module Io = struct - type input_t = string - type output_t = Nvidia.vgpu_conf + module OfConfFile = Generic.Make(struct + module Io = struct + type input_t = string + type output_t = Nvidia.vgpu_conf - let string_of_input_t x = x - let string_of_output_t = string_of_vgpu_conf - end + let string_of_input_t x = x + let string_of_output_t = string_of_vgpu_conf + end - let transform = Nvidia.of_conf_file + let transform = Nvidia.of_conf_file - let tests = [ - "ocaml/test/data/test_vgpu_subdevid.conf", - Nvidia.({ - identifier = Identifier.({ - pdev_id = 0x3333; - psubdev_id = Some 0x4444; - vdev_id = 0x1111; - vsubdev_id = 0x2222; - }); - framebufferlength = 0x10000000L; - num_heads = 2L; - max_instance = 8L; - max_x = 1920L; - max_y = 1200L; - file_path = "ocaml/test/data/test_vgpu_subdevid.conf"; - }); - "ocaml/test/data/test_vgpu_nosubdevid.conf", - Nvidia.({ - identifier = Identifier.({ - pdev_id = 0x3333; - psubdev_id = None; - vdev_id = 0x1111; - vsubdev_id = 0x2222; - }); - framebufferlength = 0x10000000L; - num_heads = 2L; - max_instance = 8L; - max_x = 1920L; - max_y = 1200L; - file_path = "ocaml/test/data/test_vgpu_nosubdevid.conf"; - }); - ] - end) + let tests = [ + "ocaml/test/data/test_vgpu_subdevid.conf", + Nvidia.({ + identifier = Identifier.({ + pdev_id = 0x3333; + psubdev_id = Some 0x4444; + vdev_id = 0x1111; + vsubdev_id = 0x2222; + }); + framebufferlength = 0x10000000L; + num_heads = 2L; + max_instance = 8L; + max_x = 1920L; + max_y = 1200L; + file_path = "ocaml/test/data/test_vgpu_subdevid.conf"; + }); + "ocaml/test/data/test_vgpu_nosubdevid.conf", + Nvidia.({ + identifier = Identifier.({ + pdev_id = 0x3333; + psubdev_id = None; + vdev_id = 0x1111; + vsubdev_id = 0x2222; + }); + framebufferlength = 0x10000000L; + num_heads = 2L; + max_instance = 8L; + max_x = 1920L; + max_y = 1200L; + file_path = "ocaml/test/data/test_vgpu_nosubdevid.conf"; + }); + ] + end) - (* This test generates a lot of print --- set skip to false to enable *) - let skip = true + (* This test generates a lot of print --- set skip to false to enable *) + let skip = true - let print_nv_types () = - skip_if skip "Generates print..."; - try - let open Nvidia in - if (Sys.file_exists nvidia_conf_dir - && Sys.is_directory nvidia_conf_dir) then - begin - let vgpu_confs = read_config_dir nvidia_conf_dir in - List.iter print_vgpu_conf vgpu_confs - end else - Printf.printf "No NVIDIA conf files found in %s\n" nvidia_conf_dir - with e -> - print_string (Printf.sprintf "%s\n" (Printexc.to_string e)); - assert false (* fail *) + let print_nv_types () = + skip_if skip "Generates print..."; + try + let open Nvidia in + if (Sys.file_exists nvidia_conf_dir + && Sys.is_directory nvidia_conf_dir) then + begin + let vgpu_confs = read_config_dir nvidia_conf_dir in + List.iter print_vgpu_conf vgpu_confs + end else + Printf.printf "No NVIDIA conf files found in %s\n" nvidia_conf_dir + with e -> + print_string (Printf.sprintf "%s\n" (Printexc.to_string e)); + assert false (* fail *) end module IntelTest = struct - let string_of_vgpu_conf conf = - let open Identifier in - let open Intel in - Printf.sprintf "%04x %Ld %Ld %Ld %s %b %s" - conf.identifier.pdev_id - conf.identifier.low_gm_sz - conf.identifier.high_gm_sz - conf.identifier.fence_sz - (Test_printers.(option string) conf.identifier.monitor_config_file) - conf.experimental - conf.model_name + let string_of_vgpu_conf conf = + let open Identifier in + let open Intel in + Printf.sprintf "%04x %Ld %Ld %Ld %s %b %s" + conf.identifier.pdev_id + conf.identifier.low_gm_sz + conf.identifier.high_gm_sz + conf.identifier.fence_sz + (Test_printers.(option string) conf.identifier.monitor_config_file) + conf.experimental + conf.model_name - module ReadWhitelistLine = Generic.Make(struct - module Io = struct - type input_t = string - type output_t = Intel.vgpu_conf option + module ReadWhitelistLine = Generic.Make(struct + module Io = struct + type input_t = string + type output_t = Intel.vgpu_conf option - let string_of_input_t x = x - let string_of_output_t = Test_printers.option string_of_vgpu_conf - end + let string_of_input_t x = x + let string_of_output_t = Test_printers.option string_of_vgpu_conf + end - let transform line = Intel.read_whitelist_line ~line + let transform line = Intel.read_whitelist_line ~line - let tests = [ - (* Test some failure cases. *) - "", None; - "nonsense123", None; - (* Test some success cases. *) - "1234 experimental=0 name='myvgpu' low_gm_sz=128 high_gm_sz=384 fence_sz=4 framebuffer_sz=128 max_heads=1 resolution=1920x1080 monitor_config_file=/my/file", - Some { - Intel.identifier = Identifier.({ - pdev_id = 0x1234; - low_gm_sz = 128L; - high_gm_sz = 384L; - fence_sz = 4L; - monitor_config_file = Some "/my/file"; - }); - experimental = false; - model_name = "myvgpu"; - framebufferlength = mib 128L; - num_heads = 1L; - max_x = 1920L; - max_y = 1080L; - }; - "1234 experimental=1 name='myvgpu' low_gm_sz=128 high_gm_sz=384 fence_sz=4 framebuffer_sz=128 max_heads=1 resolution=1920x1080 monitor_config_file=/my/file", - Some { - Intel.identifier = Identifier.({ - pdev_id = 0x1234; - low_gm_sz = 128L; - high_gm_sz = 384L; - fence_sz = 4L; - monitor_config_file = Some "/my/file"; - }); - experimental = true; - model_name = "myvgpu"; - framebufferlength = mib 128L; - num_heads = 1L; - max_x = 1920L; - max_y = 1080L; - }; - ] - end) + let tests = [ + (* Test some failure cases. *) + "", None; + "nonsense123", None; + (* Test some success cases. *) + "1234 experimental=0 name='myvgpu' low_gm_sz=128 high_gm_sz=384 fence_sz=4 framebuffer_sz=128 max_heads=1 resolution=1920x1080 monitor_config_file=/my/file", + Some { + Intel.identifier = Identifier.({ + pdev_id = 0x1234; + low_gm_sz = 128L; + high_gm_sz = 384L; + fence_sz = 4L; + monitor_config_file = Some "/my/file"; + }); + experimental = false; + model_name = "myvgpu"; + framebufferlength = mib 128L; + num_heads = 1L; + max_x = 1920L; + max_y = 1080L; + }; + "1234 experimental=1 name='myvgpu' low_gm_sz=128 high_gm_sz=384 fence_sz=4 framebuffer_sz=128 max_heads=1 resolution=1920x1080 monitor_config_file=/my/file", + Some { + Intel.identifier = Identifier.({ + pdev_id = 0x1234; + low_gm_sz = 128L; + high_gm_sz = 384L; + fence_sz = 4L; + monitor_config_file = Some "/my/file"; + }); + experimental = true; + model_name = "myvgpu"; + framebufferlength = mib 128L; + num_heads = 1L; + max_x = 1920L; + max_y = 1080L; + }; + ] + end) - module ReadWhitelist = Generic.Make(struct - module Io = struct - type input_t = (string * int) (* whitelist * device_id *) - type output_t = Intel.vgpu_conf list + module ReadWhitelist = Generic.Make(struct + module Io = struct + type input_t = (string * int) (* whitelist * device_id *) + type output_t = Intel.vgpu_conf list - let string_of_input_t (whitelist, device_id) = - Printf.sprintf "(%s, %04x)" whitelist device_id - let string_of_output_t = - Test_printers.list string_of_vgpu_conf - end + let string_of_input_t (whitelist, device_id) = + Printf.sprintf "(%s, %04x)" whitelist device_id + let string_of_output_t = + Test_printers.list string_of_vgpu_conf + end - let transform (whitelist, device_id) = - Intel.read_whitelist ~whitelist ~device_id |> List.rev + let transform (whitelist, device_id) = + Intel.read_whitelist ~whitelist ~device_id |> List.rev - let tests = [ - ("ocaml/test/data/gvt-g-whitelist-empty", 0x1234), []; - ("ocaml/test/data/gvt-g-whitelist-missing", 0x1234), []; - ("ocaml/test/data/gvt-g-whitelist-1234", 0x1234), - [ - Intel.({ - identifier = Identifier.({ - pdev_id = 0x1234; - low_gm_sz = 128L; - high_gm_sz = 384L; - fence_sz = 4L; - monitor_config_file = Some "/path/to/file1"; - }); - experimental = false; - model_name = "GVT-g on 1234"; - framebufferlength = mib 128L; - num_heads = 1L; - max_x = 1920L; - max_y = 1080L; - }); - Intel.({ - identifier = Identifier.({ - pdev_id = 0x1234; - low_gm_sz = 128L; - high_gm_sz = 384L; - fence_sz = 4L; - monitor_config_file = Some "/path/to/file2"; - }); - experimental = true; - model_name = "GVT-g on 1234 (experimental)"; - framebufferlength = mib 128L; - num_heads = 1L; - max_x = 1920L; - max_y = 1080L; - }); - ]; - ("ocaml/test/data/gvt-g-whitelist-1234", 0x5678), []; - ("ocaml/test/data/gvt-g-whitelist-mixed", 0x1234), - [ - Intel.({ - identifier = Identifier.({ - pdev_id = 0x1234; - low_gm_sz = 128L; - high_gm_sz = 384L; - fence_sz = 4L; - monitor_config_file = Some "/path/to/file1"; - }); - experimental = false; - model_name = "GVT-g on 1234"; - framebufferlength = mib 128L; - num_heads = 1L; - max_x = 1920L; - max_y = 1080L; - }); - ]; - ] - end) + let tests = [ + ("ocaml/test/data/gvt-g-whitelist-empty", 0x1234), []; + ("ocaml/test/data/gvt-g-whitelist-missing", 0x1234), []; + ("ocaml/test/data/gvt-g-whitelist-1234", 0x1234), + [ + Intel.({ + identifier = Identifier.({ + pdev_id = 0x1234; + low_gm_sz = 128L; + high_gm_sz = 384L; + fence_sz = 4L; + monitor_config_file = Some "/path/to/file1"; + }); + experimental = false; + model_name = "GVT-g on 1234"; + framebufferlength = mib 128L; + num_heads = 1L; + max_x = 1920L; + max_y = 1080L; + }); + Intel.({ + identifier = Identifier.({ + pdev_id = 0x1234; + low_gm_sz = 128L; + high_gm_sz = 384L; + fence_sz = 4L; + monitor_config_file = Some "/path/to/file2"; + }); + experimental = true; + model_name = "GVT-g on 1234 (experimental)"; + framebufferlength = mib 128L; + num_heads = 1L; + max_x = 1920L; + max_y = 1080L; + }); + ]; + ("ocaml/test/data/gvt-g-whitelist-1234", 0x5678), []; + ("ocaml/test/data/gvt-g-whitelist-mixed", 0x1234), + [ + Intel.({ + identifier = Identifier.({ + pdev_id = 0x1234; + low_gm_sz = 128L; + high_gm_sz = 384L; + fence_sz = 4L; + monitor_config_file = Some "/path/to/file1"; + }); + experimental = false; + model_name = "GVT-g on 1234"; + framebufferlength = mib 128L; + num_heads = 1L; + max_x = 1920L; + max_y = 1080L; + }); + ]; + ] + end) end let test_find_or_create () = - let __context = make_test_database () in - let k100_ref_1 = find_or_create ~__context k100 in - (* Check the VGPU type created in the DB has the expected fields. *) - assert_equal - ~msg:"k100 framebuffer_size is incorrect" - k100.framebuffer_size - (Db.VGPU_type.get_framebuffer_size ~__context ~self:k100_ref_1); - assert_equal - ~msg:"k100 max_heads is incorrect" - k100.max_heads - (Db.VGPU_type.get_max_heads ~__context ~self:k100_ref_1); - assert_equal - ~msg:"k100 size is incorrect" - k100.size - (Db.VGPU_type.get_size ~__context ~self:k100_ref_1); - assert_equal - ~msg:"k100 experimental flag is incorrect" - k100.experimental - (Db.VGPU_type.get_experimental ~__context ~self:k100_ref_1); - (* Simulate an update of framebuffer_size, max_heads, size and the - * experimental flag, as if the config file had been updated. *) - let new_k100 = { - k100 with - framebuffer_size = (Int64.mul k100.framebuffer_size 2L); - max_heads = (Int64.mul k100.max_heads 2L); - size = (Int64.mul k100.size 2L); - experimental = not k100.experimental; - } in - (* We can ignore the result as it should be the same as the VGPU_type ref - * obtained earlier. *) - let k100_ref_2 = find_or_create ~__context new_k100 in - (* Make sure the new ref is the same as the old ref, i.e. no new VGPU_type has - * been created. *) - assert_equal - ~msg:"New k100 type was created erroneously" - k100_ref_1 k100_ref_2; - (* Make sure the existing VGPU type object in the database - * has been updated. *) - assert_equal - ~msg:"k100 framebuffer_size was not updated" - new_k100.framebuffer_size - (Db.VGPU_type.get_framebuffer_size ~__context ~self:k100_ref_1); - assert_equal - ~msg:"k100 max_heads was not updated" - new_k100.max_heads - (Db.VGPU_type.get_max_heads ~__context ~self:k100_ref_1); - assert_equal - ~msg:"k100 size was not updated" - new_k100.size - (Db.VGPU_type.get_size ~__context ~self:k100_ref_1); - assert_equal - ~msg:"k100 was not marked experimental" - new_k100.experimental - (Db.VGPU_type.get_experimental ~__context ~self:k100_ref_1) + let __context = make_test_database () in + let k100_ref_1 = find_or_create ~__context k100 in + (* Check the VGPU type created in the DB has the expected fields. *) + assert_equal + ~msg:"k100 framebuffer_size is incorrect" + k100.framebuffer_size + (Db.VGPU_type.get_framebuffer_size ~__context ~self:k100_ref_1); + assert_equal + ~msg:"k100 max_heads is incorrect" + k100.max_heads + (Db.VGPU_type.get_max_heads ~__context ~self:k100_ref_1); + assert_equal + ~msg:"k100 size is incorrect" + k100.size + (Db.VGPU_type.get_size ~__context ~self:k100_ref_1); + assert_equal + ~msg:"k100 experimental flag is incorrect" + k100.experimental + (Db.VGPU_type.get_experimental ~__context ~self:k100_ref_1); + (* Simulate an update of framebuffer_size, max_heads, size and the + * experimental flag, as if the config file had been updated. *) + let new_k100 = { + k100 with + framebuffer_size = (Int64.mul k100.framebuffer_size 2L); + max_heads = (Int64.mul k100.max_heads 2L); + size = (Int64.mul k100.size 2L); + experimental = not k100.experimental; + } in + (* We can ignore the result as it should be the same as the VGPU_type ref + * obtained earlier. *) + let k100_ref_2 = find_or_create ~__context new_k100 in + (* Make sure the new ref is the same as the old ref, i.e. no new VGPU_type has + * been created. *) + assert_equal + ~msg:"New k100 type was created erroneously" + k100_ref_1 k100_ref_2; + (* Make sure the existing VGPU type object in the database + * has been updated. *) + assert_equal + ~msg:"k100 framebuffer_size was not updated" + new_k100.framebuffer_size + (Db.VGPU_type.get_framebuffer_size ~__context ~self:k100_ref_1); + assert_equal + ~msg:"k100 max_heads was not updated" + new_k100.max_heads + (Db.VGPU_type.get_max_heads ~__context ~self:k100_ref_1); + assert_equal + ~msg:"k100 size was not updated" + new_k100.size + (Db.VGPU_type.get_size ~__context ~self:k100_ref_1); + assert_equal + ~msg:"k100 was not marked experimental" + new_k100.experimental + (Db.VGPU_type.get_experimental ~__context ~self:k100_ref_1) let test_identifier_lookup () = - let test_vendor_name = "test_vendor_name" in - let test_model_name = "test_model_name" in - let __context = make_test_database () in - let k100_ref_1 = find_or_create ~__context k100 in - let k100_ref_2 = find_or_create ~__context - {k100 with vendor_name = test_vendor_name; model_name = test_model_name} in - (* Make sure the new ref is the same as the old ref, i.e. no new VGPU_type has - * been created. *) - assert_equal - ~msg:"New k100 type was created erroneously" - k100_ref_1 k100_ref_2; - (* Make sure the VGPU_type's vendor and model names have been updated. *) - assert_equal - ~msg:"k100 vendor_name was not updated" - test_vendor_name - (Db.VGPU_type.get_vendor_name ~__context ~self:k100_ref_1); - assert_equal - ~msg:"k100 model_name was not updated" - test_model_name - (Db.VGPU_type.get_model_name ~__context ~self:k100_ref_1) + let test_vendor_name = "test_vendor_name" in + let test_model_name = "test_model_name" in + let __context = make_test_database () in + let k100_ref_1 = find_or_create ~__context k100 in + let k100_ref_2 = find_or_create ~__context + {k100 with vendor_name = test_vendor_name; model_name = test_model_name} in + (* Make sure the new ref is the same as the old ref, i.e. no new VGPU_type has + * been created. *) + assert_equal + ~msg:"New k100 type was created erroneously" + k100_ref_1 k100_ref_2; + (* Make sure the VGPU_type's vendor and model names have been updated. *) + assert_equal + ~msg:"k100 vendor_name was not updated" + test_vendor_name + (Db.VGPU_type.get_vendor_name ~__context ~self:k100_ref_1); + assert_equal + ~msg:"k100 model_name was not updated" + test_model_name + (Db.VGPU_type.get_model_name ~__context ~self:k100_ref_1) let test_vendor_model_lookup () = - let __context = make_test_database () in - let k100_ref_1 = find_or_create ~__context k100 in - (* Set the identifier to the empty string, as if we have upgraded from an old - * version that did not have the identifier field. *) - Db.VGPU_type.set_identifier ~__context ~self:k100_ref_1 ~value:""; - let k100_ref_2 = find_or_create ~__context k100 in - (* Make sure the new ref is the same as the old ref, i.e. no new VGPU_type has - * been created. *) - assert_equal - ~msg:"New k100 type was created erroneously" - k100_ref_1 k100_ref_2; - (* Make sure the identifier field has been updated. *) - assert_equal - ~msg:"k100 identifier was not updated." - (Identifier.to_string k100.identifier) - (Db.VGPU_type.get_identifier ~__context ~self:k100_ref_1) + let __context = make_test_database () in + let k100_ref_1 = find_or_create ~__context k100 in + (* Set the identifier to the empty string, as if we have upgraded from an old + * version that did not have the identifier field. *) + Db.VGPU_type.set_identifier ~__context ~self:k100_ref_1 ~value:""; + let k100_ref_2 = find_or_create ~__context k100 in + (* Make sure the new ref is the same as the old ref, i.e. no new VGPU_type has + * been created. *) + assert_equal + ~msg:"New k100 type was created erroneously" + k100_ref_1 k100_ref_2; + (* Make sure the identifier field has been updated. *) + assert_equal + ~msg:"k100 identifier was not updated." + (Identifier.to_string k100.identifier) + (Db.VGPU_type.get_identifier ~__context ~self:k100_ref_1) let test = - "test_vgpu_type" >::: - [ - "test_of_conf_file" >::: NvidiaTest.OfConfFile.tests; - "print_nv_types" >:: NvidiaTest.print_nv_types; - "read_whitelist_line" >::: IntelTest.ReadWhitelistLine.tests; - "read_whitelist" >::: IntelTest.ReadWhitelist.tests; - "test_find_or_create" >:: test_find_or_create; - "test_identifier_lookup" >:: test_identifier_lookup; - "test_vendor_model_lookup" >:: test_vendor_model_lookup; - ] + "test_vgpu_type" >::: + [ + "test_of_conf_file" >::: NvidiaTest.OfConfFile.tests; + "print_nv_types" >:: NvidiaTest.print_nv_types; + "read_whitelist_line" >::: IntelTest.ReadWhitelistLine.tests; + "read_whitelist" >::: IntelTest.ReadWhitelist.tests; + "test_find_or_create" >:: test_find_or_create; + "test_identifier_lookup" >:: test_identifier_lookup; + "test_vendor_model_lookup" >:: test_vendor_model_lookup; + ] diff --git a/ocaml/test/test_vm_helpers.ml b/ocaml/test/test_vm_helpers.ml index 4c59f900eff..28cdf5adb08 100644 --- a/ocaml/test/test_vm_helpers.ml +++ b/ocaml/test/test_vm_helpers.ml @@ -20,203 +20,203 @@ open Xapi_vm_helpers (*--- Helper functions ---*) let on_pool_of_k1s (f : Context.t -> API.ref_host -> API.ref_host -> API.ref_host -> 'a) = - (* Note: f c h h' h'' applied to hosts with the same number of k1s as 's - * +----------+ +----------+ +----------+ - * | | | +--+ | |+--+ +--+| - * | | | |K1| | ||K1| |K1|| - * | | | +--+ | |+--+ +--+| - * +----------+ +----------+ +----------+ - * h h' h'' - *) - let __context = make_test_database () in - let h = List.hd (Db.Host.get_all ~__context) in - (* Make two more hosts *) - match (make_host ~__context (), make_host ~__context ()) with - | (h', h'') -> - let gPU_group = make_gpu_group ~__context () in - let rec make_k1s_on (host, num) = - if num > 0 then - let _ = make_pgpu ~__context ~host ~gPU_group default_k1 in - make_k1s_on (host, (num - 1)) - in - List.iter make_k1s_on [(h, 0); (h', 1); (h'', 2)]; - f __context h h' h'' + (* Note: f c h h' h'' applied to hosts with the same number of k1s as 's + * +----------+ +----------+ +----------+ + * | | | +--+ | |+--+ +--+| + * | | | |K1| | ||K1| |K1|| + * | | | +--+ | |+--+ +--+| + * +----------+ +----------+ +----------+ + * h h' h'' + *) + let __context = make_test_database () in + let h = List.hd (Db.Host.get_all ~__context) in + (* Make two more hosts *) + match (make_host ~__context (), make_host ~__context ()) with + | (h', h'') -> + let gPU_group = make_gpu_group ~__context () in + let rec make_k1s_on (host, num) = + if num > 0 then + let _ = make_pgpu ~__context ~host ~gPU_group default_k1 in + make_k1s_on (host, (num - 1)) + in + List.iter make_k1s_on [(h, 0); (h', 1); (h'', 2)]; + f __context h h' h'' let make_vm_with_vgpu_in_group ~__context vgpu_type gpu_group_ref = - let vgpu_ref = make_vgpu ~__context ~resident_on:Ref.null vgpu_type - and vm_ref = make_vm ~__context () in - Db.VGPU.set_GPU_group ~__context ~self:vgpu_ref ~value:gpu_group_ref; - Db.VGPU.set_VM ~__context ~self:vgpu_ref ~value:vm_ref; - vm_ref + let vgpu_ref = make_vgpu ~__context ~resident_on:Ref.null vgpu_type + and vm_ref = make_vm ~__context () in + Db.VGPU.set_GPU_group ~__context ~self:vgpu_ref ~value:gpu_group_ref; + Db.VGPU.set_VM ~__context ~self:vgpu_ref ~value:vm_ref; + vm_ref (*--- Xapi_vm_helpers.assert_gpus_available ---*) let test_gpus_available_succeeds () = - on_pool_of_k1s (fun __context _ h' _ -> - let group = List.hd (Db.GPU_group.get_all ~__context) in - let vm = make_vm_with_vgpu_in_group ~__context k100 group in - assert_gpus_available ~__context ~self:vm ~host:h') + on_pool_of_k1s (fun __context _ h' _ -> + let group = List.hd (Db.GPU_group.get_all ~__context) in + let vm = make_vm_with_vgpu_in_group ~__context k100 group in + assert_gpus_available ~__context ~self:vm ~host:h') let test_gpus_available_fails_no_pgpu () = - on_pool_of_k1s (fun __context h _ _ -> - let group = List.hd (Db.GPU_group.get_all ~__context) in - let vm = make_vm_with_vgpu_in_group ~__context k100 group in - assert_raises_api_error Api_errors.vm_requires_gpu (fun () -> - assert_gpus_available ~__context ~self:vm ~host:h)) + on_pool_of_k1s (fun __context h _ _ -> + let group = List.hd (Db.GPU_group.get_all ~__context) in + let vm = make_vm_with_vgpu_in_group ~__context k100 group in + assert_raises_api_error Api_errors.vm_requires_gpu (fun () -> + assert_gpus_available ~__context ~self:vm ~host:h)) let test_gpus_available_fails_disabled_type () = - on_pool_of_k1s (fun __context _ h' _ -> - let group = List.hd (Db.GPU_group.get_all ~__context) in - let pgpus = Db.GPU_group.get_PGPUs ~__context ~self:group in - List.iter (fun p -> - Db.PGPU.set_enabled_VGPU_types ~__context ~self:p ~value:[]) - pgpus; - let vm = make_vm_with_vgpu_in_group ~__context k100 group in - assert_raises_api_error Api_errors.vm_requires_gpu (fun () -> - assert_gpus_available ~__context ~self:vm ~host:h')) + on_pool_of_k1s (fun __context _ h' _ -> + let group = List.hd (Db.GPU_group.get_all ~__context) in + let pgpus = Db.GPU_group.get_PGPUs ~__context ~self:group in + List.iter (fun p -> + Db.PGPU.set_enabled_VGPU_types ~__context ~self:p ~value:[]) + pgpus; + let vm = make_vm_with_vgpu_in_group ~__context k100 group in + assert_raises_api_error Api_errors.vm_requires_gpu (fun () -> + assert_gpus_available ~__context ~self:vm ~host:h')) let test_gpus_available_fails_no_capacity () = - on_pool_of_k1s (fun __context _ h' _ -> - let group = List.hd (Db.GPU_group.get_all ~__context) in - let pgpus = Db.GPU_group.get_PGPUs ~__context ~self:group in - (* Fill up all the PGPUs *) - List.iter (fun p -> - ignore (make_vgpu ~__context ~resident_on:p Xapi_vgpu_type.passthrough_gpu)) - pgpus; - let vm = make_vm_with_vgpu_in_group ~__context k100 group in - assert_raises_api_error Api_errors.vm_requires_gpu - (fun () -> assert_gpus_available ~__context ~self:vm ~host:h')) + on_pool_of_k1s (fun __context _ h' _ -> + let group = List.hd (Db.GPU_group.get_all ~__context) in + let pgpus = Db.GPU_group.get_PGPUs ~__context ~self:group in + (* Fill up all the PGPUs *) + List.iter (fun p -> + ignore (make_vgpu ~__context ~resident_on:p Xapi_vgpu_type.passthrough_gpu)) + pgpus; + let vm = make_vm_with_vgpu_in_group ~__context k100 group in + assert_raises_api_error Api_errors.vm_requires_gpu + (fun () -> assert_gpus_available ~__context ~self:vm ~host:h')) (*--- Xapi_vm_helpers.group_hosts_by_best_pgpu ---*) let assert_list_is_set l = - let rec inner ac = function - | [] -> () - | x :: xs -> - if (List.mem x ac) then assert_failure "List is not set" - else inner (x :: ac) xs - in - inner [] l + let rec inner ac = function + | [] -> () + | x :: xs -> + if (List.mem x ac) then assert_failure "List is not set" + else inner (x :: ac) xs + in + inner [] l let assert_host_group_coherent g = - match g with - | [] -> assert_failure "Empty host group" - | (h, c) :: _ -> - assert_list_is_set (List.map fst g); - assert_bool "Score not same for all hosts in group" - (List.for_all (fun x -> snd x = c) g) + match g with + | [] -> assert_failure "Empty host group" + | (h, c) :: _ -> + assert_list_is_set (List.map fst g); + assert_bool "Score not same for all hosts in group" + (List.for_all (fun x -> snd x = c) g) let assert_host_groups_equal g g' = - let extract_host_strings g = - let hosts = List.map fst g in - List.sort String.compare (List.map Ref.string_of hosts) - in - assert_equal (extract_host_strings g) (extract_host_strings g'); - let score_of g = snd (List.hd g) in - assert_equal (score_of g) (score_of g') + let extract_host_strings g = + let hosts = List.map fst g in + List.sort String.compare (List.map Ref.string_of hosts) + in + assert_equal (extract_host_strings g) (extract_host_strings g'); + let score_of g = snd (List.hd g) in + assert_equal (score_of g) (score_of g') let rec assert_equivalent expected_grouping actual_grouping = - match (expected_grouping, actual_grouping) with - | [], [] -> () - | [], xx -> assert_failure (Printf.sprintf "%d more groups than expected." (List.length xx)) - | xx, [] -> assert_failure (Printf.sprintf "%d less groups than expected." (List.length xx)) - | e :: es, g :: gs -> - assert_host_group_coherent g; - assert_host_groups_equal e g; - assert_equivalent es gs + match (expected_grouping, actual_grouping) with + | [], [] -> () + | [], xx -> assert_failure (Printf.sprintf "%d more groups than expected." (List.length xx)) + | xx, [] -> assert_failure (Printf.sprintf "%d less groups than expected." (List.length xx)) + | e :: es, g :: gs -> + assert_host_group_coherent g; + assert_host_groups_equal e g; + assert_equivalent es gs let assert_grouping ~__context gpu_group vgpu_type g = - let vgpu_type_ref = Xapi_vgpu_type.find_or_create ~__context vgpu_type in - let host_lists = group_hosts_by_best_pgpu_in_group ~__context gpu_group vgpu_type_ref in - try assert_equivalent g host_lists - with e -> - let item_to_string (h, c) = Printf.sprintf "(%s, %Ld)" (Ref.string_of h) c in - let group_to_string g = Printf.sprintf "[ %s ]" - (String.concat "; " (List.map item_to_string g)) in - let groups_to_string gs = Printf.sprintf "[ %s ]" - (String.concat "; " (List.map group_to_string gs)) in - let diff_string = Printf.sprintf "Expected: %s\nActual: %s\n" - (groups_to_string g) (groups_to_string host_lists) in - assert_failure (diff_string ^ Printexc.to_string e) + let vgpu_type_ref = Xapi_vgpu_type.find_or_create ~__context vgpu_type in + let host_lists = group_hosts_by_best_pgpu_in_group ~__context gpu_group vgpu_type_ref in + try assert_equivalent g host_lists + with e -> + let item_to_string (h, c) = Printf.sprintf "(%s, %Ld)" (Ref.string_of h) c in + let group_to_string g = Printf.sprintf "[ %s ]" + (String.concat "; " (List.map item_to_string g)) in + let groups_to_string gs = Printf.sprintf "[ %s ]" + (String.concat "; " (List.map group_to_string gs)) in + let diff_string = Printf.sprintf "Expected: %s\nActual: %s\n" + (groups_to_string g) (groups_to_string host_lists) in + assert_failure (diff_string ^ Printexc.to_string e) let rec assert_expectations ~__context gpu_group = function - | [] -> () - | (vgpu_type, expected_grouping) :: remaining -> - assert_grouping ~__context gpu_group vgpu_type expected_grouping; - assert_expectations ~__context gpu_group remaining + | [] -> () + | (vgpu_type, expected_grouping) :: remaining -> + assert_grouping ~__context gpu_group vgpu_type expected_grouping; + assert_expectations ~__context gpu_group remaining let test_group_hosts_bf () = - on_pool_of_k1s (fun __context h h' h'' -> - let gpu_group = List.hd (Db.GPU_group.get_all ~__context) in - Db.GPU_group.set_allocation_algorithm ~__context ~self:gpu_group ~value:`breadth_first; - match Db.Host.get_PGPUs ~__context ~self:h' @ Db.Host.get_PGPUs ~__context ~self:h'' with - | [h'_p; h''_p; h''_p'] -> - assert_expectations ~__context gpu_group [ - k100, [ [(h',8L);(h'',8L)] ]; - k140q, [ [(h',4L);(h'',4L)] ]; - ]; - ignore (make_vgpu ~__context ~resident_on:h''_p k100); - assert_expectations ~__context gpu_group [ - k100, [ [(h',8L);(h'',8L)] ]; - k140q, [ [(h',4L);(h'',4L)] ]; - ]; - ignore (make_vgpu ~__context ~resident_on:h''_p' k140q); - assert_expectations ~__context gpu_group [ - k100, [ [(h',8L)]; [(h'',7L)] ]; - k140q, [ [(h',4L)]; [(h'',3L)] ]; - ]; - ignore (make_vgpu ~__context ~resident_on:h'_p k100); - assert_expectations ~__context gpu_group [ - k100, [ [(h',7L);(h'',7L)] ]; - k140q, [ [(h'',3L)]; ]; - ]; - ignore (make_vgpu ~__context ~resident_on:h'_p k100); - assert_expectations ~__context gpu_group [ - k100, [ [(h'',7L)]; [(h',6L)] ]; - k140q, [ [(h'',3L)]; ]; - ]; - | _ -> failwith "Test-failure: Unexpected number of pgpus in test setup" - ) + on_pool_of_k1s (fun __context h h' h'' -> + let gpu_group = List.hd (Db.GPU_group.get_all ~__context) in + Db.GPU_group.set_allocation_algorithm ~__context ~self:gpu_group ~value:`breadth_first; + match Db.Host.get_PGPUs ~__context ~self:h' @ Db.Host.get_PGPUs ~__context ~self:h'' with + | [h'_p; h''_p; h''_p'] -> + assert_expectations ~__context gpu_group [ + k100, [ [(h',8L);(h'',8L)] ]; + k140q, [ [(h',4L);(h'',4L)] ]; + ]; + ignore (make_vgpu ~__context ~resident_on:h''_p k100); + assert_expectations ~__context gpu_group [ + k100, [ [(h',8L);(h'',8L)] ]; + k140q, [ [(h',4L);(h'',4L)] ]; + ]; + ignore (make_vgpu ~__context ~resident_on:h''_p' k140q); + assert_expectations ~__context gpu_group [ + k100, [ [(h',8L)]; [(h'',7L)] ]; + k140q, [ [(h',4L)]; [(h'',3L)] ]; + ]; + ignore (make_vgpu ~__context ~resident_on:h'_p k100); + assert_expectations ~__context gpu_group [ + k100, [ [(h',7L);(h'',7L)] ]; + k140q, [ [(h'',3L)]; ]; + ]; + ignore (make_vgpu ~__context ~resident_on:h'_p k100); + assert_expectations ~__context gpu_group [ + k100, [ [(h'',7L)]; [(h',6L)] ]; + k140q, [ [(h'',3L)]; ]; + ]; + | _ -> failwith "Test-failure: Unexpected number of pgpus in test setup" + ) let test_group_hosts_df () = - on_pool_of_k1s (fun __context h h' h'' -> - let gpu_group = List.hd (Db.GPU_group.get_all ~__context) in - Db.GPU_group.set_allocation_algorithm ~__context ~self:gpu_group ~value:`depth_first; - match Db.Host.get_PGPUs ~__context ~self:h' @ Db.Host.get_PGPUs ~__context ~self:h'' with - | [h'_p; h''_p; h''_p'] -> - assert_expectations ~__context gpu_group [ - k100, [ [(h',8L);(h'',8L)] ]; - k140q, [ [(h',4L);(h'',4L)] ]; - ]; - ignore (make_vgpu ~__context ~resident_on:h''_p k100); - assert_expectations ~__context gpu_group [ - k100, [ [(h'',7L)]; [(h',8L)] ]; - k140q, [ [(h',4L);(h'',4L)] ]; - ]; - ignore (make_vgpu ~__context ~resident_on:h''_p' k140q); - assert_expectations ~__context gpu_group [ - k100, [ [(h'',7L)]; [(h',8L)] ]; - k140q, [ [(h'',3L)]; [(h',4L)] ]; - ]; - ignore (make_vgpu ~__context ~resident_on:h'_p k100); - assert_expectations ~__context gpu_group [ - k100, [ [(h',7L);(h'',7L)] ]; - k140q, [ [(h'',3L)]; ]; - ]; - ignore (make_vgpu ~__context ~resident_on:h'_p k100); - assert_expectations ~__context gpu_group [ - k100, [ [(h',6L)]; [(h'',7L)] ]; - k140q, [ [(h'',3L)]; ]; - ]; - | _ -> failwith "Test-failure: Unexpected number of pgpus in test setup" - ) + on_pool_of_k1s (fun __context h h' h'' -> + let gpu_group = List.hd (Db.GPU_group.get_all ~__context) in + Db.GPU_group.set_allocation_algorithm ~__context ~self:gpu_group ~value:`depth_first; + match Db.Host.get_PGPUs ~__context ~self:h' @ Db.Host.get_PGPUs ~__context ~self:h'' with + | [h'_p; h''_p; h''_p'] -> + assert_expectations ~__context gpu_group [ + k100, [ [(h',8L);(h'',8L)] ]; + k140q, [ [(h',4L);(h'',4L)] ]; + ]; + ignore (make_vgpu ~__context ~resident_on:h''_p k100); + assert_expectations ~__context gpu_group [ + k100, [ [(h'',7L)]; [(h',8L)] ]; + k140q, [ [(h',4L);(h'',4L)] ]; + ]; + ignore (make_vgpu ~__context ~resident_on:h''_p' k140q); + assert_expectations ~__context gpu_group [ + k100, [ [(h'',7L)]; [(h',8L)] ]; + k140q, [ [(h'',3L)]; [(h',4L)] ]; + ]; + ignore (make_vgpu ~__context ~resident_on:h'_p k100); + assert_expectations ~__context gpu_group [ + k100, [ [(h',7L);(h'',7L)] ]; + k140q, [ [(h'',3L)]; ]; + ]; + ignore (make_vgpu ~__context ~resident_on:h'_p k100); + assert_expectations ~__context gpu_group [ + k100, [ [(h',6L)]; [(h'',7L)] ]; + k140q, [ [(h'',3L)]; ]; + ]; + | _ -> failwith "Test-failure: Unexpected number of pgpus in test setup" + ) let test = - "test_vm_helpers" >::: - [ - "test_gpus_available_succeeds" >:: test_gpus_available_succeeds; - "test_gpus_available_fails_no_pgpu" >:: test_gpus_available_fails_no_pgpu; - "test_gpus_available_fails_disabled_type" >:: test_gpus_available_fails_disabled_type; - "test_gpus_available_fails_no_capacity" >:: test_gpus_available_fails_no_capacity; - - "test_group_hosts_bf" >:: test_group_hosts_bf; - "test_group_hosts_df" >:: test_group_hosts_df; - ] + "test_vm_helpers" >::: + [ + "test_gpus_available_succeeds" >:: test_gpus_available_succeeds; + "test_gpus_available_fails_no_pgpu" >:: test_gpus_available_fails_no_pgpu; + "test_gpus_available_fails_disabled_type" >:: test_gpus_available_fails_disabled_type; + "test_gpus_available_fails_no_capacity" >:: test_gpus_available_fails_no_capacity; + + "test_group_hosts_bf" >:: test_group_hosts_bf; + "test_group_hosts_df" >:: test_group_hosts_df; + ] diff --git a/ocaml/test/test_vm_migrate.ml b/ocaml/test/test_vm_migrate.ml index b8b331703da..b11ffa2c30e 100644 --- a/ocaml/test/test_vm_migrate.ml +++ b/ocaml/test/test_vm_migrate.ml @@ -19,50 +19,50 @@ let mac1 = "00:00:00:00:00:01" let mac2 = "00:00:00:00:00:02" let test_infer_vif_map_empty () = - let __context = make_test_database () in - assert_equal - (Xapi_vm_migrate.infer_vif_map ~__context [] []) - [] + let __context = make_test_database () in + assert_equal + (Xapi_vm_migrate.infer_vif_map ~__context [] []) + [] let test_infer_vif_map () = - let __context = make_test_database () in - let vm_vif1 = make_vif ~__context ~mAC:mac1 () in - let vm_vif2 = make_vif ~__context ~mAC:mac2 () in - let snap_vif1 = make_vif ~__context ~mAC:mac1 () in - let snap_vif2 = make_vif ~__context ~mAC:mac2 () in - (* In reality this network won't be in the local database, but for our - * purposes it is a meaningless UUID so it's OK for it to be in the local - * database. *) - let network1 = make_network ~__context () in - (* Check that a map with a single VIF -> network pair is unchanged. *) - assert_equal - (Xapi_vm_migrate.infer_vif_map ~__context [vm_vif1] [vm_vif1, network1]) - [vm_vif1, network1]; - (* Check that a missing VIF is caught. *) - assert_raises - Api_errors.(Server_error (vif_not_in_map, [Ref.string_of vm_vif2])) - (fun () -> - Xapi_vm_migrate.infer_vif_map ~__context - [vm_vif1; vm_vif2] - [vm_vif1, network1]); - (* Check that a snapshot VIF is mapped implicitly. *) - let inferred_map = - Xapi_vm_migrate.infer_vif_map ~__context - [vm_vif1; snap_vif1] - [vm_vif1, network1] - in - assert_equal (List.assoc snap_vif1 inferred_map) network1; - (* Check that an orphaned, unmapped snapshot VIF is caught. *) - assert_raises - Api_errors.(Server_error (vif_not_in_map, [Ref.string_of snap_vif2])) - (fun () -> - Xapi_vm_migrate.infer_vif_map ~__context - [vm_vif1; snap_vif1; snap_vif2] - [vm_vif1, network1]) + let __context = make_test_database () in + let vm_vif1 = make_vif ~__context ~mAC:mac1 () in + let vm_vif2 = make_vif ~__context ~mAC:mac2 () in + let snap_vif1 = make_vif ~__context ~mAC:mac1 () in + let snap_vif2 = make_vif ~__context ~mAC:mac2 () in + (* In reality this network won't be in the local database, but for our + * purposes it is a meaningless UUID so it's OK for it to be in the local + * database. *) + let network1 = make_network ~__context () in + (* Check that a map with a single VIF -> network pair is unchanged. *) + assert_equal + (Xapi_vm_migrate.infer_vif_map ~__context [vm_vif1] [vm_vif1, network1]) + [vm_vif1, network1]; + (* Check that a missing VIF is caught. *) + assert_raises + Api_errors.(Server_error (vif_not_in_map, [Ref.string_of vm_vif2])) + (fun () -> + Xapi_vm_migrate.infer_vif_map ~__context + [vm_vif1; vm_vif2] + [vm_vif1, network1]); + (* Check that a snapshot VIF is mapped implicitly. *) + let inferred_map = + Xapi_vm_migrate.infer_vif_map ~__context + [vm_vif1; snap_vif1] + [vm_vif1, network1] + in + assert_equal (List.assoc snap_vif1 inferred_map) network1; + (* Check that an orphaned, unmapped snapshot VIF is caught. *) + assert_raises + Api_errors.(Server_error (vif_not_in_map, [Ref.string_of snap_vif2])) + (fun () -> + Xapi_vm_migrate.infer_vif_map ~__context + [vm_vif1; snap_vif1; snap_vif2] + [vm_vif1, network1]) let test = - "test_vm_migrate" >::: - [ - "test_infer_vif_map_empty" >:: test_infer_vif_map_empty; - "test_infer_vif_map" >:: test_infer_vif_map; - ] + "test_vm_migrate" >::: + [ + "test_infer_vif_map_empty" >:: test_infer_vif_map_empty; + "test_infer_vif_map" >:: test_infer_vif_map; + ] diff --git a/ocaml/test/test_workload_balancing.ml b/ocaml/test/test_workload_balancing.ml index 81e7545ea65..7134ad8b2ed 100644 --- a/ocaml/test/test_workload_balancing.ml +++ b/ocaml/test/test_workload_balancing.ml @@ -16,38 +16,38 @@ open OUnit open Test_common let test_split_host_port () = - let split = Workload_balancing.split_host_port in - let assert_succeed url host port = - assert_equal (split url) (host, port) - in - let assert_raise_url_invalid url = - assert_raises_api_error Api_errors.wlb_url_invalid ~args:[url] (fun () -> split url) - in - - (* succeed cases *) - assert_succeed "192.168.0.1:80" "192.168.0.1" 80; - assert_succeed "hostname.com:80" "hostname.com" 80; - assert_succeed "[fe80::a085:31cf:b31a:6a]:80" "fe80::a085:31cf:b31a:6a" 80; - - (* missing port number *) - assert_raise_url_invalid "192.168.0.1"; - assert_raise_url_invalid "hostname.noport.com"; - assert_raise_url_invalid "[fe80::a085:31cf:b31a:6a]"; - - (* non-integer port *) - assert_raise_url_invalid "192.168.0.1:http"; - assert_raise_url_invalid "hostname.com:port"; - assert_raise_url_invalid "[fe80::a085:31cf:b31a:6a]:ipv6"; - - (* malformed IPv6 host port peers *) - assert_raise_url_invalid "[fe80::a085:31cf:b31a:6a]80"; - assert_raise_url_invalid "[fe80::a085:31cf:b31a:6a:80"; - - (* others *) - assert_raise_url_invalid "http://example.com:80/" + let split = Workload_balancing.split_host_port in + let assert_succeed url host port = + assert_equal (split url) (host, port) + in + let assert_raise_url_invalid url = + assert_raises_api_error Api_errors.wlb_url_invalid ~args:[url] (fun () -> split url) + in + + (* succeed cases *) + assert_succeed "192.168.0.1:80" "192.168.0.1" 80; + assert_succeed "hostname.com:80" "hostname.com" 80; + assert_succeed "[fe80::a085:31cf:b31a:6a]:80" "fe80::a085:31cf:b31a:6a" 80; + + (* missing port number *) + assert_raise_url_invalid "192.168.0.1"; + assert_raise_url_invalid "hostname.noport.com"; + assert_raise_url_invalid "[fe80::a085:31cf:b31a:6a]"; + + (* non-integer port *) + assert_raise_url_invalid "192.168.0.1:http"; + assert_raise_url_invalid "hostname.com:port"; + assert_raise_url_invalid "[fe80::a085:31cf:b31a:6a]:ipv6"; + + (* malformed IPv6 host port peers *) + assert_raise_url_invalid "[fe80::a085:31cf:b31a:6a]80"; + assert_raise_url_invalid "[fe80::a085:31cf:b31a:6a:80"; + + (* others *) + assert_raise_url_invalid "http://example.com:80/" let test = - "test_workload_balancing" >::: - [ - "test_split_host_port" >:: test_split_host_port; - ] + "test_workload_balancing" >::: + [ + "test_split_host_port" >:: test_split_host_port; + ] diff --git a/ocaml/test/test_xapi_db_upgrade.ml b/ocaml/test/test_xapi_db_upgrade.ml index 94071d4295f..9fcf5c6d882 100644 --- a/ocaml/test/test_xapi_db_upgrade.ml +++ b/ocaml/test/test_xapi_db_upgrade.ml @@ -17,113 +17,113 @@ open Test_common open Xapi_db_upgrade open Stdext -let upgrade_vm_memory_for_dmc () = - let __context = make_test_database () in - - let self = List.hd (Db.VM.get_all ~__context) in - - (* Set control domain's dynamic_min <> dynamic_max <> target *) - Db.VM.set_memory_dynamic_min ~__context ~self ~value:1L; - Db.VM.set_memory_target ~__context ~self ~value:2L; - Db.VM.set_memory_dynamic_max ~__context ~self ~value:3L; - (* Apply the upgrade rule *) - upgrade_vm_memory_for_dmc.fn ~__context; - let r = Db.VM.get_record ~__context ~self in - assert_equal ~msg:"upgrade_vm_memory_for_dmc: control domain memory_dynamic_min <> memory_target" - r.API.vM_memory_dynamic_min r.API.vM_memory_target; - assert_equal ~msg:"upgrade_vm_memory_for_dmc: control domain memory_dynamic_max <> memory_target" - r.API.vM_memory_dynamic_max r.API.vM_memory_target; - - (* Make this a non-control domain and change all memory fields *) - Db.VM.set_is_control_domain ~__context ~self ~value:false; - Db.VM.set_memory_static_min ~__context ~self ~value:5L; - Db.VM.set_memory_dynamic_min ~__context ~self ~value:1L; - Db.VM.set_memory_target ~__context ~self ~value:2L; - Db.VM.set_memory_dynamic_max ~__context ~self ~value:3L; - Db.VM.set_memory_static_max ~__context ~self ~value:4L; - (* Apply the upgrade rule *) - upgrade_vm_memory_for_dmc.fn ~__context; - let r = Db.VM.get_record ~__context ~self in - assert_equal ~msg:"upgrade_vm_memory_for_dmc: memory_dynamic_max <> memory_static_max" - r.API.vM_memory_dynamic_max r.API.vM_memory_static_max; - assert_equal ~msg:"upgrade_vm_memory_for_dmc: memory_target <> memory_static_max" - r.API.vM_memory_target r.API.vM_memory_static_max; - assert_equal ~msg:"upgrade_vm_memory_for_dmc: memory_dynamic_min <> memory_static_max" - r.API.vM_memory_dynamic_min r.API.vM_memory_static_max; - assert_bool "upgrade_vm_memory_for_dmc: memory_static_min > memory_static_max" - (r.API.vM_memory_static_min <= r.API.vM_memory_static_max) - -let upgrade_bios () = - - let check inventory bios_strings = - Unixext.mkdir_safe "/var/tmp" 0o755; - Unixext.write_string_to_file "/var/tmp/.previousInventory" inventory; - let __context = make_test_database () in - upgrade_bios_strings.fn ~__context; - let _, vm_r = List.hd (Db.VM.get_all_records ~__context) in - assert_equal ~msg:"bios strings upgrade" - vm_r.API.vM_bios_strings bios_strings - in - check "OEM_MANUFACTURER=Dell" Xapi_globs.old_dell_bios_strings; - check "OEM_MANUFACTURER=HP" Xapi_globs.old_hp_bios_strings; - check "" Xapi_globs.generic_bios_strings; - Unixext.unlink_safe "/var/tmp/.previousInventory" - -let update_snapshots () = - let __context = make_test_database () in - let a = make_vm ~__context ~name_label:"a" () in - let a_snap = make_vm ~__context ~name_label:"a snap" () in - Db.VM.set_snapshot_of ~__context ~self:a_snap ~value:a; - Db.VM.set_snapshot_time ~__context ~self:a_snap ~value:(Date.of_float 1.); - - let b = make_vm ~__context ~name_label:"b" () in - let b_snap = make_vm ~__context ~name_label:"b snap" () in - Db.VM.set_snapshot_of ~__context ~self:b_snap ~value:b; - Db.VM.set_snapshot_time ~__context ~self:b_snap ~value:(Date.of_float 1.); - let b_snap2 = make_vm ~__context ~name_label:"b snap2" () in - Db.VM.set_snapshot_of ~__context ~self:b_snap2 ~value:b; - Db.VM.set_snapshot_time ~__context ~self:b_snap2 ~value:(Date.of_float 2.); - - update_snapshots.fn ~__context; - - (* a.parent = a_snap *) - assert_equal ~msg:"a.parent <> a_snap" - (Db.VM.get_parent ~__context ~self:a) a_snap; - - (* b.parent = b_snap2 *) - assert_equal ~msg:"b.parent <> b_snap2" - (Db.VM.get_parent ~__context ~self:b) b_snap2; - - (* b_snap2.parent = b_snap *) - assert_equal ~msg:"b_snap2.parent <> b_snap" - (Db.VM.get_parent ~__context ~self:b_snap2)b_snap +let upgrade_vm_memory_for_dmc () = + let __context = make_test_database () in + + let self = List.hd (Db.VM.get_all ~__context) in + + (* Set control domain's dynamic_min <> dynamic_max <> target *) + Db.VM.set_memory_dynamic_min ~__context ~self ~value:1L; + Db.VM.set_memory_target ~__context ~self ~value:2L; + Db.VM.set_memory_dynamic_max ~__context ~self ~value:3L; + (* Apply the upgrade rule *) + upgrade_vm_memory_for_dmc.fn ~__context; + let r = Db.VM.get_record ~__context ~self in + assert_equal ~msg:"upgrade_vm_memory_for_dmc: control domain memory_dynamic_min <> memory_target" + r.API.vM_memory_dynamic_min r.API.vM_memory_target; + assert_equal ~msg:"upgrade_vm_memory_for_dmc: control domain memory_dynamic_max <> memory_target" + r.API.vM_memory_dynamic_max r.API.vM_memory_target; + + (* Make this a non-control domain and change all memory fields *) + Db.VM.set_is_control_domain ~__context ~self ~value:false; + Db.VM.set_memory_static_min ~__context ~self ~value:5L; + Db.VM.set_memory_dynamic_min ~__context ~self ~value:1L; + Db.VM.set_memory_target ~__context ~self ~value:2L; + Db.VM.set_memory_dynamic_max ~__context ~self ~value:3L; + Db.VM.set_memory_static_max ~__context ~self ~value:4L; + (* Apply the upgrade rule *) + upgrade_vm_memory_for_dmc.fn ~__context; + let r = Db.VM.get_record ~__context ~self in + assert_equal ~msg:"upgrade_vm_memory_for_dmc: memory_dynamic_max <> memory_static_max" + r.API.vM_memory_dynamic_max r.API.vM_memory_static_max; + assert_equal ~msg:"upgrade_vm_memory_for_dmc: memory_target <> memory_static_max" + r.API.vM_memory_target r.API.vM_memory_static_max; + assert_equal ~msg:"upgrade_vm_memory_for_dmc: memory_dynamic_min <> memory_static_max" + r.API.vM_memory_dynamic_min r.API.vM_memory_static_max; + assert_bool "upgrade_vm_memory_for_dmc: memory_static_min > memory_static_max" + (r.API.vM_memory_static_min <= r.API.vM_memory_static_max) + +let upgrade_bios () = + + let check inventory bios_strings = + Unixext.mkdir_safe "/var/tmp" 0o755; + Unixext.write_string_to_file "/var/tmp/.previousInventory" inventory; + let __context = make_test_database () in + upgrade_bios_strings.fn ~__context; + let _, vm_r = List.hd (Db.VM.get_all_records ~__context) in + assert_equal ~msg:"bios strings upgrade" + vm_r.API.vM_bios_strings bios_strings + in + check "OEM_MANUFACTURER=Dell" Xapi_globs.old_dell_bios_strings; + check "OEM_MANUFACTURER=HP" Xapi_globs.old_hp_bios_strings; + check "" Xapi_globs.generic_bios_strings; + Unixext.unlink_safe "/var/tmp/.previousInventory" + +let update_snapshots () = + let __context = make_test_database () in + let a = make_vm ~__context ~name_label:"a" () in + let a_snap = make_vm ~__context ~name_label:"a snap" () in + Db.VM.set_snapshot_of ~__context ~self:a_snap ~value:a; + Db.VM.set_snapshot_time ~__context ~self:a_snap ~value:(Date.of_float 1.); + + let b = make_vm ~__context ~name_label:"b" () in + let b_snap = make_vm ~__context ~name_label:"b snap" () in + Db.VM.set_snapshot_of ~__context ~self:b_snap ~value:b; + Db.VM.set_snapshot_time ~__context ~self:b_snap ~value:(Date.of_float 1.); + let b_snap2 = make_vm ~__context ~name_label:"b snap2" () in + Db.VM.set_snapshot_of ~__context ~self:b_snap2 ~value:b; + Db.VM.set_snapshot_time ~__context ~self:b_snap2 ~value:(Date.of_float 2.); + + update_snapshots.fn ~__context; + + (* a.parent = a_snap *) + assert_equal ~msg:"a.parent <> a_snap" + (Db.VM.get_parent ~__context ~self:a) a_snap; + + (* b.parent = b_snap2 *) + assert_equal ~msg:"b.parent <> b_snap2" + (Db.VM.get_parent ~__context ~self:b) b_snap2; + + (* b_snap2.parent = b_snap *) + assert_equal ~msg:"b_snap2.parent <> b_snap" + (Db.VM.get_parent ~__context ~self:b_snap2)b_snap let remove_restricted_pbd_keys () = - let restricted_keys = ["SRmaster"] in - let other_keys = ["foo"; "bar"] in (* to check we don't remove too much *) - let device_config = List.map (fun k -> (k, "some_value")) (restricted_keys @ other_keys) in - let __context = make_test_database () in - let pbd = make_pbd ~__context ~device_config () in - - remove_restricted_pbd_keys.fn ~__context; - - let device_config' = Db.PBD.get_device_config ~__context ~self:pbd in - - List.iter (fun k -> - assert_bool (Printf.sprintf "Restricted key, %s, not removed from PBD.device_config" k) - (not (List.mem_assoc k device_config')) - ) restricted_keys; - - List.iter (fun k -> - assert_bool (Printf.sprintf "Non-restricted key, %s, removed from PBD.device_config" k) - (List.mem_assoc k device_config') - ) other_keys + let restricted_keys = ["SRmaster"] in + let other_keys = ["foo"; "bar"] in (* to check we don't remove too much *) + let device_config = List.map (fun k -> (k, "some_value")) (restricted_keys @ other_keys) in + let __context = make_test_database () in + let pbd = make_pbd ~__context ~device_config () in + + remove_restricted_pbd_keys.fn ~__context; + + let device_config' = Db.PBD.get_device_config ~__context ~self:pbd in + + List.iter (fun k -> + assert_bool (Printf.sprintf "Restricted key, %s, not removed from PBD.device_config" k) + (not (List.mem_assoc k device_config')) + ) restricted_keys; + + List.iter (fun k -> + assert_bool (Printf.sprintf "Non-restricted key, %s, removed from PBD.device_config" k) + (List.mem_assoc k device_config') + ) other_keys let test = - "test_db_upgrade" >::: - [ - "upgrade_vm_memory_for_dmc" >:: upgrade_vm_memory_for_dmc; - "upgrade_bios" >:: upgrade_bios; - "update_snapshots" >:: update_snapshots; - "remove_restricted_pbd_keys" >:: remove_restricted_pbd_keys; - ] + "test_db_upgrade" >::: + [ + "upgrade_vm_memory_for_dmc" >:: upgrade_vm_memory_for_dmc; + "upgrade_bios" >:: upgrade_bios; + "update_snapshots" >:: update_snapshots; + "remove_restricted_pbd_keys" >:: remove_restricted_pbd_keys; + ] diff --git a/ocaml/test/test_xapi_xenops.ml b/ocaml/test/test_xapi_xenops.ml index ef91d2ecb38..cb303062877 100644 --- a/ocaml/test/test_xapi_xenops.ml +++ b/ocaml/test/test_xapi_xenops.ml @@ -35,7 +35,7 @@ let test_nested_virt_licensing () = (* Nested_virt is restricted in the default test database *) (* List of plaform keys and whether they should be restricted when 'Nested_virt' is restricted. - true -> definitely should be restricted + true -> definitely should be restricted false -> definitely should be unrestricted *) @@ -85,8 +85,8 @@ let test_nested_virt_licensing () = let test = - "test_vm_helpers" >::: - [ - "test_nested_virt_licensing" >:: test_nested_virt_licensing; - "test_enabled_in_xenguest" >:: test_enabled_in_xenguest; - ] + "test_vm_helpers" >::: + [ + "test_nested_virt_licensing" >:: test_nested_virt_licensing; + "test_enabled_in_xenguest" >:: test_enabled_in_xenguest; + ] diff --git a/ocaml/test/test_xenopsd_metadata.ml b/ocaml/test/test_xenopsd_metadata.ml index 9dfcbc5e405..bfc20f53986 100644 --- a/ocaml/test/test_xenopsd_metadata.ml +++ b/ocaml/test/test_xenopsd_metadata.ml @@ -20,359 +20,359 @@ open Xenops_interface let test_vm_name = "__test_vm" type vm_config = { - oc: (string * string) list; - platform: (string * string) list; + oc: (string * string) list; + platform: (string * string) list; } let string_of_vm_config conf = - Printf.sprintf "other_config = %s, platform = %s" - (Test_printers.(assoc_list string string) conf.oc) - (Test_printers.(assoc_list string string) conf.platform) + Printf.sprintf "other_config = %s, platform = %s" + (Test_printers.(assoc_list string string) conf.oc) + (Test_printers.(assoc_list string string) conf.platform) let string_of_vgpu_type {Xapi_vgpu_type.vendor_name; model_name} = - Printf.sprintf "vendor_name = %s, model_name = %s" vendor_name model_name + Printf.sprintf "vendor_name = %s, model_name = %s" vendor_name model_name let load_vm_config __context conf = - let (self: API.ref_VM) = make_vm ~__context - ~name_label:test_vm_name - ~hVM_boot_policy:"BIOS order" - ~other_config:conf.oc - ~platform:conf.platform - () - in - let flags = [ - Xapi_globs.cpu_info_vendor_key, "AuthenticAMD"; - Xapi_globs.cpu_info_features_key, "deadbeef-deadbeef"; - ] in - Db.VM.set_last_boot_CPU_flags ~__context ~self ~value:flags; - self + let (self: API.ref_VM) = make_vm ~__context + ~name_label:test_vm_name + ~hVM_boot_policy:"BIOS order" + ~other_config:conf.oc + ~platform:conf.platform + () + in + let flags = [ + Xapi_globs.cpu_info_vendor_key, "AuthenticAMD"; + Xapi_globs.cpu_info_features_key, "deadbeef-deadbeef"; + ] in + Db.VM.set_last_boot_CPU_flags ~__context ~self ~value:flags; + self let run_create_metadata ~__context = - let localhost_uuid = Helpers.get_localhost_uuid () in - let host = make_host ~__context ~uuid:localhost_uuid () in - let (_: API.ref_pool) = make_pool ~__context ~master:host () in - let vms = Db.VM.get_by_name_label ~__context ~label:test_vm_name in - let vm = List.nth vms 0 in - Xapi_xenops.create_metadata ~__context ~upgrade:false ~self:vm + let localhost_uuid = Helpers.get_localhost_uuid () in + let host = make_host ~__context ~uuid:localhost_uuid () in + let (_: API.ref_pool) = make_pool ~__context ~master:host () in + let vms = Db.VM.get_by_name_label ~__context ~label:test_vm_name in + let vm = List.nth vms 0 in + Xapi_xenops.create_metadata ~__context ~upgrade:false ~self:vm (* Test the behaviour of the "hvm_serial" other_config/platform key. *) module HVMSerial = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - type input_t = vm_config - type output_t = string option - - let string_of_input_t = string_of_vm_config - let string_of_output_t = Test_printers.(option string) - end - - module State = Test_state.XapiDb - - let load_input __context conf = - let (_ : API.ref_VM) = load_vm_config __context conf in () - - let extract_output __context _ = - let metadata = run_create_metadata ~__context in - match metadata.Metadata.vm.Vm.ty with - | Vm.HVM {Vm.serial = serial} -> serial - | _ -> failwith "expected HVM metadata" - - let tests = - [ - (* Should default to "pty" if nothing is set. *) - ( - {oc=[]; platform=[]}, - Some "pty" - ); - (* other_config value should override default if no platform value. *) - ( - {oc=["hvm_serial", "none"]; platform=[]}, - Some "none" - ); - (* Should be able to disable serial emulation via the platform key. *) - ( - {oc=[]; platform=["hvm_serial", "none"]}, - Some "none" - ); - (* platform value should override other_config value. *) - ( - {oc=["hvm_serial", "none"]; platform=["hvm_serial", "pty"]}, - Some "pty" - ); - (* platform value should override other_config value. *) - ( - {oc=["hvm_serial", "pty"]; platform=["hvm_serial", "none"]}, - Some "none" - ); - (* Windows debugger redirects the serial port to tcp - this should be - * configurable via the other_config key. *) - ( - {oc=["hvm_serial", "tcp:1.2.3.4:7001"]; platform=[]}, - Some "tcp:1.2.3.4:7001" - ); - (* Windows debugger should be configurable via the platform key too. *) - ( - {oc=[]; platform=["hvm_serial", "tcp:1.2.3.4:7001"]}, - Some "tcp:1.2.3.4:7001" - ); - (* Windows debugger setting via the platform key should override anything - * set in other_config. *) - ( - {oc=["hvm_serial", "none"]; platform=["hvm_serial", "tcp:1.2.3.4:7001"]}, - Some "tcp:1.2.3.4:7001" - ); - ] -end)) + module Io = struct + type input_t = vm_config + type output_t = string option + + let string_of_input_t = string_of_vm_config + let string_of_output_t = Test_printers.(option string) + end + + module State = Test_state.XapiDb + + let load_input __context conf = + let (_ : API.ref_VM) = load_vm_config __context conf in () + + let extract_output __context _ = + let metadata = run_create_metadata ~__context in + match metadata.Metadata.vm.Vm.ty with + | Vm.HVM {Vm.serial = serial} -> serial + | _ -> failwith "expected HVM metadata" + + let tests = + [ + (* Should default to "pty" if nothing is set. *) + ( + {oc=[]; platform=[]}, + Some "pty" + ); + (* other_config value should override default if no platform value. *) + ( + {oc=["hvm_serial", "none"]; platform=[]}, + Some "none" + ); + (* Should be able to disable serial emulation via the platform key. *) + ( + {oc=[]; platform=["hvm_serial", "none"]}, + Some "none" + ); + (* platform value should override other_config value. *) + ( + {oc=["hvm_serial", "none"]; platform=["hvm_serial", "pty"]}, + Some "pty" + ); + (* platform value should override other_config value. *) + ( + {oc=["hvm_serial", "pty"]; platform=["hvm_serial", "none"]}, + Some "none" + ); + (* Windows debugger redirects the serial port to tcp - this should be + * configurable via the other_config key. *) + ( + {oc=["hvm_serial", "tcp:1.2.3.4:7001"]; platform=[]}, + Some "tcp:1.2.3.4:7001" + ); + (* Windows debugger should be configurable via the platform key too. *) + ( + {oc=[]; platform=["hvm_serial", "tcp:1.2.3.4:7001"]}, + Some "tcp:1.2.3.4:7001" + ); + (* Windows debugger setting via the platform key should override anything + * set in other_config. *) + ( + {oc=["hvm_serial", "none"]; platform=["hvm_serial", "tcp:1.2.3.4:7001"]}, + Some "tcp:1.2.3.4:7001" + ); + ] + end)) let vgpu_manual_setup = Xapi_globs.vgpu_manual_setup_key, "true" let vgpu_pci_id = Xapi_globs.vgpu_pci_key, "0000:0a:00.0" let vgpu_config = - Xapi_globs.vgpu_config_key, "/usr/share/nvidia/vgx/grid_k100.conf" + Xapi_globs.vgpu_config_key, "/usr/share/nvidia/vgx/grid_k100.conf" let vgpu_platform_data = [vgpu_manual_setup; vgpu_pci_id; vgpu_config] module VideoMode = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - type input_t = vm_config - type output_t = Vm.video_card - - let string_of_input_t = string_of_vm_config - let string_of_output_t = function - | Vm.Cirrus -> "Cirrus" - | Vm.Standard_VGA -> "Standard_VGA" - | Vm.Vgpu -> "Vgpu" - | Vm.IGD_passthrough Vm.GVT_d -> "IGD_passthrough" - end - - module State = Test_state.XapiDb - - let load_input __context conf = - let (_ : API.ref_VM) = load_vm_config __context conf in () - - let extract_output __context _ = - let metadata = run_create_metadata ~__context in - match metadata.Metadata.vm.Vm.ty with - | Vm.HVM {Vm.video = video_mode} -> video_mode - | _ -> failwith "expected HVM metadata" - - let tests = [ - (* Default video mode should be Cirrus. *) - {oc=[]; platform=[]}, Vm.Cirrus; - (* Unrecognised video mode should default to Cirrus. *) - {oc=[]; platform=["vga", "foo"]}, Vm.Cirrus; - (* Video modes set in the platform map should be respected. *) - {oc=[]; platform=["vga", "cirrus"]}, Vm.Cirrus; - {oc=[]; platform=["vga", "std"]}, Vm.Standard_VGA; - (* The IGD passthrough key should be respected. *) - {oc=[]; platform=["igd_passthrough", "true"]}, Vm.(IGD_passthrough GVT_d); - (* The IGD passthrough should override the "vga" key. *) - { - oc=[]; - platform=["igd_passthrough", "true"; "vga", "cirrus"] - }, Vm.(IGD_passthrough GVT_d); - { - oc=[]; - platform=["igd_passthrough", "true"; "vga", "std"] - }, Vm.(IGD_passthrough GVT_d); - (* We should be able to enable vGPU via the manual setup mode. *) - {oc=[]; platform=vgpu_platform_data}, Vm.Vgpu; - (* vGPU mode should override whatever's set for the "vga" key. *) - {oc=[]; platform=["vga", "cirrus"] @ vgpu_platform_data}, Vm.Vgpu; - {oc=[]; platform=["vga", "std"] @ vgpu_platform_data}, Vm.Vgpu; - (* If somehow only one of the vGPU keys is set, this shouldn't - * trigger vGPU mode. This should only ever happen if a user is - * experimenting with vgpu_manual_setup and has got things wrong. *) - {oc=[]; platform=[vgpu_manual_setup; vgpu_pci_id]}, Vm.Cirrus; - { - oc=[]; - platform=["vga", "cirrus"; vgpu_manual_setup; vgpu_pci_id] - }, Vm.Cirrus; - { - oc=[]; - platform=["vga", "std"; vgpu_manual_setup; vgpu_pci_id] - }, Vm.Standard_VGA; - {oc=[]; platform=[vgpu_manual_setup; vgpu_config]}, Vm.Cirrus; - { - oc=[]; - platform=["vga", "cirrus"; vgpu_manual_setup; vgpu_config] - }, Vm.Cirrus; - { - oc=[]; - platform=["vga", "std"; vgpu_manual_setup; vgpu_config] - }, Vm.Standard_VGA; - ] -end)) + module Io = struct + type input_t = vm_config + type output_t = Vm.video_card + + let string_of_input_t = string_of_vm_config + let string_of_output_t = function + | Vm.Cirrus -> "Cirrus" + | Vm.Standard_VGA -> "Standard_VGA" + | Vm.Vgpu -> "Vgpu" + | Vm.IGD_passthrough Vm.GVT_d -> "IGD_passthrough" + end + + module State = Test_state.XapiDb + + let load_input __context conf = + let (_ : API.ref_VM) = load_vm_config __context conf in () + + let extract_output __context _ = + let metadata = run_create_metadata ~__context in + match metadata.Metadata.vm.Vm.ty with + | Vm.HVM {Vm.video = video_mode} -> video_mode + | _ -> failwith "expected HVM metadata" + + let tests = [ + (* Default video mode should be Cirrus. *) + {oc=[]; platform=[]}, Vm.Cirrus; + (* Unrecognised video mode should default to Cirrus. *) + {oc=[]; platform=["vga", "foo"]}, Vm.Cirrus; + (* Video modes set in the platform map should be respected. *) + {oc=[]; platform=["vga", "cirrus"]}, Vm.Cirrus; + {oc=[]; platform=["vga", "std"]}, Vm.Standard_VGA; + (* The IGD passthrough key should be respected. *) + {oc=[]; platform=["igd_passthrough", "true"]}, Vm.(IGD_passthrough GVT_d); + (* The IGD passthrough should override the "vga" key. *) + { + oc=[]; + platform=["igd_passthrough", "true"; "vga", "cirrus"] + }, Vm.(IGD_passthrough GVT_d); + { + oc=[]; + platform=["igd_passthrough", "true"; "vga", "std"] + }, Vm.(IGD_passthrough GVT_d); + (* We should be able to enable vGPU via the manual setup mode. *) + {oc=[]; platform=vgpu_platform_data}, Vm.Vgpu; + (* vGPU mode should override whatever's set for the "vga" key. *) + {oc=[]; platform=["vga", "cirrus"] @ vgpu_platform_data}, Vm.Vgpu; + {oc=[]; platform=["vga", "std"] @ vgpu_platform_data}, Vm.Vgpu; + (* If somehow only one of the vGPU keys is set, this shouldn't + * trigger vGPU mode. This should only ever happen if a user is + * experimenting with vgpu_manual_setup and has got things wrong. *) + {oc=[]; platform=[vgpu_manual_setup; vgpu_pci_id]}, Vm.Cirrus; + { + oc=[]; + platform=["vga", "cirrus"; vgpu_manual_setup; vgpu_pci_id] + }, Vm.Cirrus; + { + oc=[]; + platform=["vga", "std"; vgpu_manual_setup; vgpu_pci_id] + }, Vm.Standard_VGA; + {oc=[]; platform=[vgpu_manual_setup; vgpu_config]}, Vm.Cirrus; + { + oc=[]; + platform=["vga", "cirrus"; vgpu_manual_setup; vgpu_config] + }, Vm.Cirrus; + { + oc=[]; + platform=["vga", "std"; vgpu_manual_setup; vgpu_config] + }, Vm.Standard_VGA; + ] + end)) module VideoRam = Generic.Make(Generic.EncapsulateState(struct - module Io = struct - type input_t = vm_config - type output_t = int - - let string_of_input_t = string_of_vm_config - let string_of_output_t = Test_printers.int - end - - module State = Test_state.XapiDb - - let load_input __context conf = - let (_ : API.ref_VM) = load_vm_config __context conf in () - - let extract_output __context _ = - let metadata = run_create_metadata ~__context in - match metadata.Metadata.vm.Vm.ty with - | Vm.HVM {Vm.video_mib = video_mib} -> video_mib - | _ -> failwith "expected HVM metadata" - - let tests = [ - (* Video ram defaults to 4MiB. *) - {oc=[]; platform=[]}, 4; - (* Specifying a different amount of videoram works. *) - {oc=[]; platform=["videoram", "8"]}, 8; - (* Default videoram should be 16MiB for vGPU. *) - {oc = []; platform=vgpu_platform_data}, 16; - (* Insufficient videoram values should be overridden for vGPU. *) - {oc = []; platform=vgpu_platform_data @ ["videoram", "8"]}, 16; - (* videoram values larger than the default should be allowed for vGPU. *) - {oc = []; platform=vgpu_platform_data @ ["videoram", "32"]}, 32; - (* Other VGA options shouldn't affect the videoram setting. *) - {oc = []; platform=["vga", "cirrus"]}, 4; - {oc = []; platform=["vga", "cirrus"; "videoram", "8"]}, 8; - ] -end)) + module Io = struct + type input_t = vm_config + type output_t = int + + let string_of_input_t = string_of_vm_config + let string_of_output_t = Test_printers.int + end + + module State = Test_state.XapiDb + + let load_input __context conf = + let (_ : API.ref_VM) = load_vm_config __context conf in () + + let extract_output __context _ = + let metadata = run_create_metadata ~__context in + match metadata.Metadata.vm.Vm.ty with + | Vm.HVM {Vm.video_mib = video_mib} -> video_mib + | _ -> failwith "expected HVM metadata" + + let tests = [ + (* Video ram defaults to 4MiB. *) + {oc=[]; platform=[]}, 4; + (* Specifying a different amount of videoram works. *) + {oc=[]; platform=["videoram", "8"]}, 8; + (* Default videoram should be 16MiB for vGPU. *) + {oc = []; platform=vgpu_platform_data}, 16; + (* Insufficient videoram values should be overridden for vGPU. *) + {oc = []; platform=vgpu_platform_data @ ["videoram", "8"]}, 16; + (* videoram values larger than the default should be allowed for vGPU. *) + {oc = []; platform=vgpu_platform_data @ ["videoram", "32"]}, 32; + (* Other VGA options shouldn't affect the videoram setting. *) + {oc = []; platform=["vga", "cirrus"]}, 4; + {oc = []; platform=["vga", "cirrus"; "videoram", "8"]}, 8; + ] + end)) module GenerateVGPUMetadata = Generic.Make(Generic.EncapsulateState(struct - open Test_vgpu_common - - module Io = struct - type input_t = - vm_config * (Test_vgpu_common.pgpu_state * Xapi_vgpu_type.vgpu_type) list - type output_t = Xenops_interface.Vgpu.implementation list - - let string_of_input_t = - Test_printers.(pair - string_of_vm_config - (list (pair Test_vgpu_common.string_of_pgpu_state string_of_vgpu_type))) - let string_of_output_t = - Test_printers.list (fun vgpu -> - Xenops_interface.Vgpu.rpc_of_implementation vgpu |> Rpc.to_string) - end - - module State = Test_state.XapiDb - - let load_input __context (vm_config, pgpus_and_vgpu_types) = - let vm_ref = load_vm_config __context vm_config in - List.iteri - (fun index (pgpu, vgpu_type) -> - let pgpu_ref = make_pgpu ~__context - ~address:(Printf.sprintf "0000:%02d:00.0" index) pgpu - in - let (_ : API.ref_VGPU) = - make_vgpu ~__context - ~vm_ref ~scheduled_to_be_resident_on:pgpu_ref vgpu_type in - ()) - pgpus_and_vgpu_types - - let extract_output __context _ = - let metadata = run_create_metadata ~__context in - List.map - (fun vgpu -> vgpu.Xenops_interface.Vgpu.implementation) - metadata.Metadata.vgpus - - let tests = [ - (* No vGPUs. *) - ( - {oc = []; platform = []}, - [] - ), - []; - (* One passthrough GPU. *) - ( - {oc = []; platform = []}, - [default_k1, Xapi_vgpu_type.passthrough_gpu] - ), - []; - (* One NVIDIA vGPU. *) - ( - {oc = []; platform = []}, - [default_k1, k100] - ), - [ - Xenops_interface.Vgpu.(Nvidia { - physical_pci_address = Xenops_interface.Pci.({ - domain = 0; - bus = 0; - dev = 0; - fn = 0; - }); - config_file = "/usr/share/nvidia/vgx/grid_k100.conf"; - }) - ]; - (* One Intel vGPU. *) - ( - {oc = []; platform = []}, - [default_intel_041a, gvt_g_041a] - ), - [ - Xenops_interface.Vgpu.(GVT_g { - physical_pci_address = Xenops_interface.Pci.({ - domain = 0; - bus = 0; - dev = 0; - fn = 0; - }); - low_gm_sz = 128L; - high_gm_sz = 384L; - fence_sz = 4L; - monitor_config_file = Some "/etc/gvt-g-monitor.conf"; - }) - ]; - ] -end)) + open Test_vgpu_common + + module Io = struct + type input_t = + vm_config * (Test_vgpu_common.pgpu_state * Xapi_vgpu_type.vgpu_type) list + type output_t = Xenops_interface.Vgpu.implementation list + + let string_of_input_t = + Test_printers.(pair + string_of_vm_config + (list (pair Test_vgpu_common.string_of_pgpu_state string_of_vgpu_type))) + let string_of_output_t = + Test_printers.list (fun vgpu -> + Xenops_interface.Vgpu.rpc_of_implementation vgpu |> Rpc.to_string) + end + + module State = Test_state.XapiDb + + let load_input __context (vm_config, pgpus_and_vgpu_types) = + let vm_ref = load_vm_config __context vm_config in + List.iteri + (fun index (pgpu, vgpu_type) -> + let pgpu_ref = make_pgpu ~__context + ~address:(Printf.sprintf "0000:%02d:00.0" index) pgpu + in + let (_ : API.ref_VGPU) = + make_vgpu ~__context + ~vm_ref ~scheduled_to_be_resident_on:pgpu_ref vgpu_type in + ()) + pgpus_and_vgpu_types + + let extract_output __context _ = + let metadata = run_create_metadata ~__context in + List.map + (fun vgpu -> vgpu.Xenops_interface.Vgpu.implementation) + metadata.Metadata.vgpus + + let tests = [ + (* No vGPUs. *) + ( + {oc = []; platform = []}, + [] + ), + []; + (* One passthrough GPU. *) + ( + {oc = []; platform = []}, + [default_k1, Xapi_vgpu_type.passthrough_gpu] + ), + []; + (* One NVIDIA vGPU. *) + ( + {oc = []; platform = []}, + [default_k1, k100] + ), + [ + Xenops_interface.Vgpu.(Nvidia { + physical_pci_address = Xenops_interface.Pci.({ + domain = 0; + bus = 0; + dev = 0; + fn = 0; + }); + config_file = "/usr/share/nvidia/vgx/grid_k100.conf"; + }) + ]; + (* One Intel vGPU. *) + ( + {oc = []; platform = []}, + [default_intel_041a, gvt_g_041a] + ), + [ + Xenops_interface.Vgpu.(GVT_g { + physical_pci_address = Xenops_interface.Pci.({ + domain = 0; + bus = 0; + dev = 0; + fn = 0; + }); + low_gm_sz = 128L; + high_gm_sz = 384L; + fence_sz = 4L; + monitor_config_file = Some "/etc/gvt-g-monitor.conf"; + }) + ]; + ] + end)) module VgpuExtraArgs = Generic.Make(Generic.EncapsulateState(struct - open Test_vgpu_common - - module Io = struct - type input_t = vm_config - type output_t = string - - let string_of_input_t = string_of_vm_config - let string_of_output_t = Test_printers.string - end - - module State = Test_state.XapiDb - - let load_input __context conf = - let pgpu_ref = make_pgpu ~__context ~address:"0000:07:00.0" default_k1 in - let vm_ref = load_vm_config __context conf in - let (_ : API.ref_VGPU) = - make_vgpu ~__context ~vm_ref ~scheduled_to_be_resident_on:pgpu_ref k100 in - () - - let extract_output __context _ = - let metadata = run_create_metadata ~__context in - match metadata.Metadata.vgpus with - | [{Vgpu.implementation = Vgpu.Nvidia nvidia_vgpu}] -> - nvidia_vgpu.Vgpu.config_file - | _ -> assert_failure "Incorrect vGPU configuration found" - - let tests = [ - (* No vgpu_extra_args. *) - {oc = []; platform = []}, "/usr/share/nvidia/vgx/grid_k100.conf"; - (* One key-value pair in vgpu_extra_args. *) - {oc = []; platform = ["vgpu_extra_args", "foo=bar"]}, - "/usr/share/nvidia/vgx/grid_k100.conf,foo=bar"; - (* Two key-value pairs in vgpu_extra_args. *) - {oc = []; platform = ["vgpu_extra_args", "foo=bar,baz=123"]}, - "/usr/share/nvidia/vgx/grid_k100.conf,foo=bar,baz=123"; - ] -end)) + open Test_vgpu_common + + module Io = struct + type input_t = vm_config + type output_t = string + + let string_of_input_t = string_of_vm_config + let string_of_output_t = Test_printers.string + end + + module State = Test_state.XapiDb + + let load_input __context conf = + let pgpu_ref = make_pgpu ~__context ~address:"0000:07:00.0" default_k1 in + let vm_ref = load_vm_config __context conf in + let (_ : API.ref_VGPU) = + make_vgpu ~__context ~vm_ref ~scheduled_to_be_resident_on:pgpu_ref k100 in + () + + let extract_output __context _ = + let metadata = run_create_metadata ~__context in + match metadata.Metadata.vgpus with + | [{Vgpu.implementation = Vgpu.Nvidia nvidia_vgpu}] -> + nvidia_vgpu.Vgpu.config_file + | _ -> assert_failure "Incorrect vGPU configuration found" + + let tests = [ + (* No vgpu_extra_args. *) + {oc = []; platform = []}, "/usr/share/nvidia/vgx/grid_k100.conf"; + (* One key-value pair in vgpu_extra_args. *) + {oc = []; platform = ["vgpu_extra_args", "foo=bar"]}, + "/usr/share/nvidia/vgx/grid_k100.conf,foo=bar"; + (* Two key-value pairs in vgpu_extra_args. *) + {oc = []; platform = ["vgpu_extra_args", "foo=bar,baz=123"]}, + "/usr/share/nvidia/vgx/grid_k100.conf,foo=bar,baz=123"; + ] + end)) let test = - "test_xenopsd_metadata" >::: - [ - "test_hvm_serial" >::: HVMSerial.tests; - "test_videomode" >::: VideoMode.tests; - "test_videoram" >::: VideoRam.tests; - "test_generate_vgpu_metadata" >::: GenerateVGPUMetadata.tests; - "test_vgpu_extra_args" >::: VgpuExtraArgs.tests; - ] + "test_xenopsd_metadata" >::: + [ + "test_hvm_serial" >::: HVMSerial.tests; + "test_videomode" >::: VideoMode.tests; + "test_videoram" >::: VideoRam.tests; + "test_generate_vgpu_metadata" >::: GenerateVGPUMetadata.tests; + "test_vgpu_extra_args" >::: VgpuExtraArgs.tests; + ] diff --git a/ocaml/toplevel/testscript.ml b/ocaml/toplevel/testscript.ml index 2cbee6ba430..20c1b8b1e43 100644 --- a/ocaml/toplevel/testscript.ml +++ b/ocaml/toplevel/testscript.ml @@ -15,19 +15,19 @@ open Toplevelhelper let get_vm_records session_id = let allvms = Remote.VM.get_all session_id in - List.map (fun vm->(vm,Remote.VM.get_record session_id vm)) allvms + List.map (fun vm->(vm,Remote.VM.get_record session_id vm)) allvms let get_vm_by_name_or_id session_id name = let vms = get_vm_records session_id in let vms = List.filter (fun (_,x) -> (x.API.vM_name_label = name - || x.API.vM_uuid = name)) vms in + || x.API.vM_uuid = name)) vms in if List.length vms = 0 then raise (Failure ("VM "^name^" not found")); List.nth vms 0 type vmop = Start | Shutdown | Reboot | Resume | Suspend -let vmop_to_string = function +let vmop_to_string = function | Start -> "start" | Shutdown -> "shutdown" | Reboot -> "reboot" @@ -35,20 +35,20 @@ let vmop_to_string = function | Suspend -> "suspend" let change_vm_state session_id vm force st = - Printf.printf "Telling vm to %s\n" (vmop_to_string st); - (match st with - | Start -> Remote.VM.start session_id vm false - | Shutdown -> - if force - then Remote.VM.hard_shutdown session_id vm - else Remote.VM.clean_shutdown session_id vm - | Suspend -> Remote.VM.pause session_id vm - | Reboot -> - if force - then Remote.VM.hard_reboot session_id vm - else Remote.VM.clean_shutdown session_id vm - | Resume -> Remote.VM.unpause session_id vm); - Remote.VM.get_power_state session_id vm + Printf.printf "Telling vm to %s\n" (vmop_to_string st); + (match st with + | Start -> Remote.VM.start session_id vm false + | Shutdown -> + if force + then Remote.VM.hard_shutdown session_id vm + else Remote.VM.clean_shutdown session_id vm + | Suspend -> Remote.VM.pause session_id vm + | Reboot -> + if force + then Remote.VM.hard_reboot session_id vm + else Remote.VM.clean_shutdown session_id vm + | Resume -> Remote.VM.unpause session_id vm); + Remote.VM.get_power_state session_id vm let power_state_to_string state = match state with @@ -58,7 +58,7 @@ let power_state_to_string state = | `Suspended -> "Suspended" | `ShuttingDown -> "Shutting down" | `Migrating -> "Migrating" - + let change_vm_state2 session_id vm force state = ignore(change_vm_state session_id vm force state); let newstate = Remote.VM.get_power_state session_id vm in @@ -77,4 +77,4 @@ let _ = change_vm_state2 session_id vm true Shutdown - + diff --git a/ocaml/toplevel/toplevelhelper.ml b/ocaml/toplevel/toplevelhelper.ml index d92f58f4ab6..dea770efb61 100644 --- a/ocaml/toplevel/toplevelhelper.ml +++ b/ocaml/toplevel/toplevelhelper.ml @@ -16,10 +16,10 @@ let host = ref "" let port = ref 0 let rpc xml = - let open Xmlrpcclient in - let transport = SSL(SSL.make(), !host, !port) in - let http = xmlrpc ~version:"1.1" "/" in - XML_protocol.rpc ~transport ~http xml + let open Xmlrpcclient in + let transport = SSL(SSL.make(), !host, !port) in + let http = xmlrpc ~version:"1.1" "/" in + XML_protocol.rpc ~transport ~http xml open Client diff --git a/ocaml/toplevel/vm_start.ml b/ocaml/toplevel/vm_start.ml index 3e74bdefffd..17e45dbc807 100644 --- a/ocaml/toplevel/vm_start.ml +++ b/ocaml/toplevel/vm_start.ml @@ -14,8 +14,8 @@ open Toplevelhelper let _ = - host := "mindanao"; - port := 8086; - let s = init_session "root" "xenroot" in - let vm = List.nth (Remote.VM.get_by_name_label s Sys.argv.(1)) 0 in - Remote.VM.start s vm false + host := "mindanao"; + port := 8086; + let s = init_session "root" "xenroot" in + let vm = List.nth (Remote.VM.get_by_name_label s Sys.argv.(1)) 0 in + Remote.VM.start s vm false diff --git a/ocaml/util/file_access_test.ml b/ocaml/util/file_access_test.ml index 13f202058c3..1bc345bda8c 100644 --- a/ocaml/util/file_access_test.ml +++ b/ocaml/util/file_access_test.ml @@ -20,104 +20,104 @@ open Printf type file_evt = { ty : Inotify.type_event; - time : float; } + time : float; } type file_events = file_evt list type stat = { dir : string; - events : (string, file_events) Hashtbl.t } - + events : (string, file_events) Hashtbl.t } -let hashtbl = Hashtbl.create 128 + +let hashtbl = Hashtbl.create 128 let unlink_safe file = - try Unix.unlink file with (* Unix.Unix_error (Unix.ENOENT, _ , _)*) _ -> () + try Unix.unlink file with (* Unix.Unix_error (Unix.ENOENT, _ , _)*) _ -> () let safe_move src dst = unlink_safe dst; Unix.rename src dst - + let _ = - if Array.length Sys.argv < 2 then ( - eprintf "usage: %s \n" Sys.argv.(0); - exit 1 - ); - - let fd = Inotify.init () in - - let ic = open_in Sys.argv.(1) in - let rec read dirs = - try - let dir = input_line ic in - read (dir::dirs) - with - End_of_file -> dirs - in - - let dirs = read [] in - - List.iter (fun dirname -> - let wd = Inotify.add_watch fd dirname [ Inotify.S_Delete; Inotify.S_Create; Inotify.S_Close_write; Inotify.S_Attrib ] in - Hashtbl.add hashtbl wd {dir=dirname; events=Hashtbl.create 10}) dirs; - - let process_event time ev = - let wd,mask,cookie,fname = ev in - let mystat = Hashtbl.find hashtbl wd in - let newevt = {ty = List.hd mask; time=time } in - match fname with - Some fname -> - let fname = - try - let len = String.index fname '\000' in - String.sub fname 0 len - with - _ -> fname - in - if Hashtbl.mem mystat.events fname - then - let fileevents = Hashtbl.find mystat.events fname in - Hashtbl.replace mystat.events fname (newevt::fileevents) - else - Hashtbl.replace mystat.events fname [newevt] - | None -> () - in - - let dump_file_events oc fname events = - Printf.fprintf oc "Filename: %s (%d events)\n" fname (List.length events); - List.iter (fun evt -> - Printf.fprintf oc "%f: %s\n" evt.time (Inotify.string_of_event evt.ty)) events; - Printf.fprintf oc "\n" - in - let dump_data () = - let oc = open_out "/var/log/file_write.log.tmp" in - let dump_dir wd st = - if Hashtbl.length st.events > 0 then - begin - Printf.fprintf oc "directory: %s\n" st.dir; - Printf.fprintf oc "---------\n"; - Hashtbl.iter (dump_file_events oc) st.events - end - in - Hashtbl.iter dump_dir hashtbl; - close_out oc; - safe_move "/var/log/file_write.log.tmp" "/var/log/file_write.log" - - in - - let stdin = Unix.descr_of_in_channel stdin in - let nb = ref 0 in - while true - do - let ins, _, _ = Unix.select [ fd ] [] [] (-1.) in - let evs = Inotify.read fd in - let time = Unix.time () in - List.iter (process_event time) evs; - dump_data (); - incr nb - done; - - dump_data (); - - Unix.close fd - - - + if Array.length Sys.argv < 2 then ( + eprintf "usage: %s \n" Sys.argv.(0); + exit 1 + ); + + let fd = Inotify.init () in + + let ic = open_in Sys.argv.(1) in + let rec read dirs = + try + let dir = input_line ic in + read (dir::dirs) + with + End_of_file -> dirs + in + + let dirs = read [] in + + List.iter (fun dirname -> + let wd = Inotify.add_watch fd dirname [ Inotify.S_Delete; Inotify.S_Create; Inotify.S_Close_write; Inotify.S_Attrib ] in + Hashtbl.add hashtbl wd {dir=dirname; events=Hashtbl.create 10}) dirs; + + let process_event time ev = + let wd,mask,cookie,fname = ev in + let mystat = Hashtbl.find hashtbl wd in + let newevt = {ty = List.hd mask; time=time } in + match fname with + Some fname -> + let fname = + try + let len = String.index fname '\000' in + String.sub fname 0 len + with + _ -> fname + in + if Hashtbl.mem mystat.events fname + then + let fileevents = Hashtbl.find mystat.events fname in + Hashtbl.replace mystat.events fname (newevt::fileevents) + else + Hashtbl.replace mystat.events fname [newevt] + | None -> () + in + + let dump_file_events oc fname events = + Printf.fprintf oc "Filename: %s (%d events)\n" fname (List.length events); + List.iter (fun evt -> + Printf.fprintf oc "%f: %s\n" evt.time (Inotify.string_of_event evt.ty)) events; + Printf.fprintf oc "\n" + in + let dump_data () = + let oc = open_out "/var/log/file_write.log.tmp" in + let dump_dir wd st = + if Hashtbl.length st.events > 0 then + begin + Printf.fprintf oc "directory: %s\n" st.dir; + Printf.fprintf oc "---------\n"; + Hashtbl.iter (dump_file_events oc) st.events + end + in + Hashtbl.iter dump_dir hashtbl; + close_out oc; + safe_move "/var/log/file_write.log.tmp" "/var/log/file_write.log" + + in + + let stdin = Unix.descr_of_in_channel stdin in + let nb = ref 0 in + while true + do + let ins, _, _ = Unix.select [ fd ] [] [] (-1.) in + let evs = Inotify.read fd in + let time = Unix.time () in + List.iter (process_event time) evs; + dump_data (); + incr nb + done; + + dump_data (); + + Unix.close fd + + + diff --git a/ocaml/util/hashdbg.ml b/ocaml/util/hashdbg.ml index 377ccde53f2..020ec94d5ca 100644 --- a/ocaml/util/hashdbg.ml +++ b/ocaml/util/hashdbg.ml @@ -17,32 +17,32 @@ let hashdbg_raise = ref false module Hashtbl = struct - exception Hashtbl_too_fat of int + exception Hashtbl_too_fat of int - type ('a, 'b) t = { t: ('a, 'b) Hashtbl.t; mutable c: int; mutable m: int } + type ('a, 'b) t = { t: ('a, 'b) Hashtbl.t; mutable c: int; mutable m: int } - let create n = - { t = Hashtbl.create n; c = 0; m = 1000; } - - let add x k v = - if x.c >= x.m then ( - let len = Hashtbl.length x.t in - !hashdbg_log (Printf.sprintf "hashdbg--hashtbl went over %d" len); - if !hashdbg_raise then - raise (Hashtbl_too_fat len); - x.m <- x.m * 2 - ); - Hashtbl.add x.t k v; x.c <- x.c + 1 + let create n = + { t = Hashtbl.create n; c = 0; m = 1000; } - let replace x k v = - let z = if Hashtbl.mem x.t k then 0 else 1 in - Hashtbl.replace x.t k v; x.c <- x.c + z + let add x k v = + if x.c >= x.m then ( + let len = Hashtbl.length x.t in + !hashdbg_log (Printf.sprintf "hashdbg--hashtbl went over %d" len); + if !hashdbg_raise then + raise (Hashtbl_too_fat len); + x.m <- x.m * 2 + ); + Hashtbl.add x.t k v; x.c <- x.c + 1 - let remove x k = Hashtbl.remove x.t k; x.c <- x.c - 1 - let iter f x = Hashtbl.iter f x.t - let find x k = Hashtbl.find x.t k - let find_all x k = Hashtbl.find_all x.t k - let mem x k = Hashtbl.mem x.t k - let fold f x c = Hashtbl.fold f x.t c - let length x = Hashtbl.length x.t + let replace x k v = + let z = if Hashtbl.mem x.t k then 0 else 1 in + Hashtbl.replace x.t k v; x.c <- x.c + z + + let remove x k = Hashtbl.remove x.t k; x.c <- x.c - 1 + let iter f x = Hashtbl.iter f x.t + let find x k = Hashtbl.find x.t k + let find_all x k = Hashtbl.find_all x.t k + let mem x k = Hashtbl.mem x.t k + let fold f x c = Hashtbl.fold f x.t c + let length x = Hashtbl.length x.t end diff --git a/ocaml/util/ocamltest.ml b/ocaml/util/ocamltest.ml index 75234bcbe4b..8b776e53023 100644 --- a/ocaml/util/ocamltest.ml +++ b/ocaml/util/ocamltest.ml @@ -18,12 +18,12 @@ open Printf (* === Types === *) type test = - | Case of name * description * case - | Suite of name * description * suite - and name = string - and description = string - and case = unit -> unit - and suite = test list + | Case of name * description * case + | Suite of name * description * suite +and name = string +and description = string +and case = unit -> unit +and suite = test list exception Failure_expected exception Fail of string @@ -32,16 +32,16 @@ exception Skip of string (* === Equality assertions === *) let assert_equal ?to_string x y = - if not (x = y) then raise - (Fail - (match to_string with - | None -> - "found different values where equal values were expected." - | Some to_string -> sprintf - "found different values where equal values were expected: %s != %s." - (to_string x) (to_string y) - ) - ) + if not (x = y) then raise + (Fail + (match to_string with + | None -> + "found different values where equal values were expected." + | Some to_string -> sprintf + "found different values where equal values were expected: %s != %s." + (to_string x) (to_string y) + ) + ) let assert_equal_bool = assert_equal ~to_string:string_of_bool let assert_equal_float = assert_equal ~to_string:string_of_float @@ -59,23 +59,23 @@ let assert_true x = assert x let assert_false x = assert (not x) let assert_raises_match exception_match fn = - try - fn (); - raise Failure_expected - with failure -> - if not (exception_match failure) - then raise failure - else () + try + fn (); + raise Failure_expected + with failure -> + if not (exception_match failure) + then raise failure + else () let assert_raises expected = - assert_raises_match (function exn -> exn = expected) + assert_raises_match (function exn -> exn = expected) let assert_raises_any f = - try - f (); - raise Failure_expected - with failure -> - () + try + f (); + raise Failure_expected + with failure -> + () let fail message = raise (Fail (message)) @@ -86,15 +86,15 @@ let skip message = raise (Skip (message)) type style = Reset | Bold | Reverse | Dim | Red | Green | Blue | Yellow | Black let int_of_style = function - | Reset -> 0 - | Bold -> 1 - | Dim -> 2 - | Reverse-> 7 - | Black -> 30 - | Red -> 31 - | Green -> 32 - | Yellow -> 33 - | Blue -> 34 + | Reset -> 0 + | Bold -> 1 + | Dim -> 2 + | Reverse-> 7 + | Black -> 30 + | Red -> 31 + | Green -> 32 + | Yellow -> 33 + | Blue -> 34 let string_of_style value = string_of_int (int_of_style value) @@ -104,45 +104,45 @@ let escape = String.make 1 (char_of_int 0x1b) let ugly = ref false let style values = - if !ugly then "" else - sprintf "%s[%sm" escape (String.concat ";" (List.map (string_of_style) values)) + if !ugly then "" else + sprintf "%s[%sm" escape (String.concat ";" (List.map (string_of_style) values)) (* === Indices === *) let index_of_test = - let rec build prefix = function - | Case (name, description, case) -> - [(prefix ^ name, Case (name, description, case))] - | Suite (name, description, tests) -> - (prefix ^ name, Suite (name, description, tests)) :: - (List.flatten (List.map (build (prefix ^ name ^ ".")) tests)) - in - build "" + let rec build prefix = function + | Case (name, description, case) -> + [(prefix ^ name, Case (name, description, case))] + | Suite (name, description, tests) -> + (prefix ^ name, Suite (name, description, tests)) :: + (List.flatten (List.map (build (prefix ^ name ^ ".")) tests)) + in + build "" let string_of_index_entry = function - | (key, Case (_, description, _)) - | (key, Suite (_, description, _)) - -> (style [Bold]) ^ key ^ (style [Reset]) ^ "\n " ^ description + | (key, Case (_, description, _)) + | (key, Suite (_, description, _)) + -> (style [Bold]) ^ key ^ (style [Reset]) ^ "\n " ^ description let string_of_index index = - "\n" ^ (String.concat "\n" (List.map string_of_index_entry index)) ^ "\n" + "\n" ^ (String.concat "\n" (List.map string_of_index_entry index)) ^ "\n" let max x y = if x > y then x else y let longest_key_of_index index = - List.fold_left - (fun longest_key (key, _) -> - max longest_key (String.length key)) - 0 index + List.fold_left + (fun longest_key (key, _) -> + max longest_key (String.length key)) + 0 index (* === Runners === *) type test_result = {passed: int; failed: int; skipped: int} let add_result - {passed = p1 ; failed = f1 ; skipped = s1 } - {passed = p2; failed = f2; skipped = s2} = - {passed = p1 + p2; failed = f1 + f2; skipped = s1 + s2} + {passed = p1 ; failed = f1 ; skipped = s1 } + {passed = p2; failed = f2; skipped = s2} = + {passed = p1 + p2; failed = f1 + f2; skipped = s1 + s2} let singleton_pass = {passed = 1; failed = 0; skipped = 0} let singleton_fail = {passed = 0; failed = 1; skipped = 0} @@ -153,120 +153,120 @@ let singleton_skip = {passed = 0; failed = 0; skipped = 1} let debugging = ref false let start_debugging () = - if not !debugging then - begin - debugging := true; - print_endline "\n" - end + if not !debugging then + begin + debugging := true; + print_endline "\n" + end (** Runs the given test. *) let run test = - let longest_key_width = longest_key_of_index (index_of_test test) in - - (** Runs the given test with the given name prefix. *) - let rec run (test : test) (name_prefix : string) : test_result = - match test with - | Case (name, description, fn) -> - run_case (name_prefix ^ name, description, fn) - | Suite (name, description, tests) -> - run_suite (name_prefix ^ name, description, tests) - - (** Runs the given test case. *) - and run_case (name, description, fn) = - - let pre_status_padding = - String.make (longest_key_width - (String.length name)) ' ' in - - let generate_status_string colour result = - sprintf "%s\t[%s%s%s]" pre_status_padding (style [colour; Bold]) result (style [Reset]) - in - - let describe_current_test () = - printf "%stesting %s%s" (style [Bold]) name (style [Reset]); - flush stdout - in - - let display_start_message () = - describe_current_test (); - debugging := false - in - - let display_finish_message colour result = - if !debugging - then - begin - print_endline ""; - describe_current_test (); - end; - print_endline (generate_status_string colour result) - in - - display_start_message (); - try - fn (); - display_finish_message Green "pass"; - singleton_pass - with - | Skip (message) -> - display_finish_message Blue "skip"; - printf "\nskipped: %s\n\n" message; - singleton_skip - | Fail (message) -> - display_finish_message Red "fail"; - printf "\nfailed: %s\n\n" message; - singleton_fail - | failure -> - display_finish_message Red "fail"; - printf "\nfailed: %s\n%s\n" - (Printexc.to_string failure) - (Printexc.get_backtrace ()); - singleton_fail - - (** Runs the given test suite. *) - and run_suite (name, description, tests) = - flush stdout; - let result = List.fold_left ( - fun accumulating_result test -> - add_result accumulating_result (run test (name ^ ".")) - ) {passed = 0; failed = 0; skipped = 0} tests in - result - in - - Printexc.record_backtrace true; - printf "\n"; - let {passed = passed; failed = failed; skipped = skipped} = run test "" in - printf "\n"; - printf " tested [%s%i%s]\n" (style [Bold]) (passed + failed + skipped) (style [Reset]); - printf " passed [%s%i%s]\n" (style [Bold]) (passed ) (style [Reset]); - printf " failed [%s%i%s]\n" (style [Bold]) ( failed ) (style [Reset]); - printf "skipped [%s%i%s]\n" (style [Bold]) ( skipped) (style [Reset]); - printf "\n"; - {passed = passed; failed = failed; skipped = skipped} + let longest_key_width = longest_key_of_index (index_of_test test) in + + (** Runs the given test with the given name prefix. *) + let rec run (test : test) (name_prefix : string) : test_result = + match test with + | Case (name, description, fn) -> + run_case (name_prefix ^ name, description, fn) + | Suite (name, description, tests) -> + run_suite (name_prefix ^ name, description, tests) + + (** Runs the given test case. *) + and run_case (name, description, fn) = + + let pre_status_padding = + String.make (longest_key_width - (String.length name)) ' ' in + + let generate_status_string colour result = + sprintf "%s\t[%s%s%s]" pre_status_padding (style [colour; Bold]) result (style [Reset]) + in + + let describe_current_test () = + printf "%stesting %s%s" (style [Bold]) name (style [Reset]); + flush stdout + in + + let display_start_message () = + describe_current_test (); + debugging := false + in + + let display_finish_message colour result = + if !debugging + then + begin + print_endline ""; + describe_current_test (); + end; + print_endline (generate_status_string colour result) + in + + display_start_message (); + try + fn (); + display_finish_message Green "pass"; + singleton_pass + with + | Skip (message) -> + display_finish_message Blue "skip"; + printf "\nskipped: %s\n\n" message; + singleton_skip + | Fail (message) -> + display_finish_message Red "fail"; + printf "\nfailed: %s\n\n" message; + singleton_fail + | failure -> + display_finish_message Red "fail"; + printf "\nfailed: %s\n%s\n" + (Printexc.to_string failure) + (Printexc.get_backtrace ()); + singleton_fail + + (** Runs the given test suite. *) + and run_suite (name, description, tests) = + flush stdout; + let result = List.fold_left ( + fun accumulating_result test -> + add_result accumulating_result (run test (name ^ ".")) + ) {passed = 0; failed = 0; skipped = 0} tests in + result + in + + Printexc.record_backtrace true; + printf "\n"; + let {passed = passed; failed = failed; skipped = skipped} = run test "" in + printf "\n"; + printf " tested [%s%i%s]\n" (style [Bold]) (passed + failed + skipped) (style [Reset]); + printf " passed [%s%i%s]\n" (style [Bold]) (passed ) (style [Reset]); + printf " failed [%s%i%s]\n" (style [Bold]) ( failed ) (style [Reset]); + printf "skipped [%s%i%s]\n" (style [Bold]) ( skipped) (style [Reset]); + printf "\n"; + {passed = passed; failed = failed; skipped = skipped} let print_endline string = - start_debugging (); - print_endline string; - flush stdout + start_debugging (); + print_endline string; + flush stdout let print_string string = - start_debugging (); - print_string string; - flush stdout + start_debugging (); + print_string string; + flush stdout (* === Factories === *) let make_test_case name description case = - Case (name, description, case) + Case (name, description, case) let make_function_test_case name case = - Case (name, sprintf "Tests the %s function." name, case) + Case (name, sprintf "Tests the %s function." name, case) let make_test_suite name description suite = - Suite (name, description, suite) + Suite (name, description, suite) let make_module_test_suite name suite = - Suite (name, sprintf "Tests the %s module." name, suite) + Suite (name, sprintf "Tests the %s module." name, suite) (* === Command line interface === *) @@ -276,17 +276,17 @@ let name = ref None (** Argument definitions. *) let arguments = -[ - "-list", - Arg.Set list, - "lists the tests available in this module"; - "-name", - Arg.String (fun name' -> name := Some name'), - "runs the test with the given name"; - "-ugly", - Arg.Set ugly, - "disables pretty-printing"; -] + [ + "-list", + Arg.Set list, + "lists the tests available in this module"; + "-name", + Arg.String (fun name' -> name := Some name'), + "runs the test with the given name"; + "-ugly", + Arg.Set ugly, + "disables pretty-printing"; + ] (** For now, ignore anonymous arguments. *) let process_anonymous_argument string = () @@ -295,22 +295,22 @@ let process_anonymous_argument string = () let usage = "" let make_command_line_interface test = - (* TODO: Use stderr in appropriate places when presented with failures. *) - Arg.parse arguments process_anonymous_argument usage; - let index = index_of_test test in - if !list - then - begin - print_endline (string_of_index index); - flush stdout - end - else - begin - let {passed = passed; failed = failed; skipped = skipped} = run - (match !name with - | Some name -> (List.assoc name index) - | None -> test) - in - flush stdout; - exit (if failed = 0 then 0 else 1) - end + (* TODO: Use stderr in appropriate places when presented with failures. *) + Arg.parse arguments process_anonymous_argument usage; + let index = index_of_test test in + if !list + then + begin + print_endline (string_of_index index); + flush stdout + end + else + begin + let {passed = passed; failed = failed; skipped = skipped} = run + (match !name with + | Some name -> (List.assoc name index) + | None -> test) + in + flush stdout; + exit (if failed = 0 then 0 else 1) + end diff --git a/ocaml/util/ocamltest.mli b/ocaml/util/ocamltest.mli index 0160389a5a6..f5588ff9cb3 100644 --- a/ocaml/util/ocamltest.mli +++ b/ocaml/util/ocamltest.mli @@ -14,12 +14,12 @@ (* A unit testing framework for OCaml. *) type test = - | Case of name * description * case - | Suite of name * description * suite - and name = string - and description = string - and case = unit -> unit - and suite = test list + | Case of name * description * case + | Suite of name * description * suite +and name = string +and description = string +and case = unit -> unit +and suite = test list (** Indicates that the current test should be skipped. *) exception Skip of string diff --git a/ocaml/util/rpc_retry.ml b/ocaml/util/rpc_retry.ml index 39bb38ab995..fa8cda94a46 100644 --- a/ocaml/util/rpc_retry.ml +++ b/ocaml/util/rpc_retry.ml @@ -18,43 +18,43 @@ open D open Xmlrpc_client module type RPC_META = - sig - val client_name : string - val server_name : string - val server_path : string - val should_retry : bool - end +sig + val client_name : string + val server_name : string + val server_path : string + val should_retry : bool +end module Make = functor (Meta : RPC_META) -> - struct - let transport = Unix Meta.server_path +struct + let transport = Unix Meta.server_path - let simple_rpc = - XMLRPC_protocol.rpc ~srcstr:Meta.client_name ~dststr:Meta.server_name - ~transport ~http:(xmlrpc ~version:"1.0" "/") + let simple_rpc = + XMLRPC_protocol.rpc ~srcstr:Meta.client_name ~dststr:Meta.server_name + ~transport ~http:(xmlrpc ~version:"1.0" "/") - let rpc call = - let rec aux ~retrying = - let response' = - try - let response = simple_rpc call in - if retrying then - debug "Successfully communicated with service at %s after retrying!" - Meta.server_path; - Some response - with Unix.Unix_error (code, _, _) as e -> - if code = Unix.ECONNREFUSED || code = Unix.ENOENT then begin - if not retrying then - error "Could not reach the service at %s. Retrying every second..." - Meta.server_path; - Thread.delay 1.; - None - end else - raise e - in - match response' with - | Some response -> response - | None -> aux ~retrying:true - in - if Meta.should_retry then aux ~retrying:false else simple_rpc call - end + let rpc call = + let rec aux ~retrying = + let response' = + try + let response = simple_rpc call in + if retrying then + debug "Successfully communicated with service at %s after retrying!" + Meta.server_path; + Some response + with Unix.Unix_error (code, _, _) as e -> + if code = Unix.ECONNREFUSED || code = Unix.ENOENT then begin + if not retrying then + error "Could not reach the service at %s. Retrying every second..." + Meta.server_path; + Thread.delay 1.; + None + end else + raise e + in + match response' with + | Some response -> response + | None -> aux ~retrying:true + in + if Meta.should_retry then aux ~retrying:false else simple_rpc call +end diff --git a/ocaml/util/rpc_retry.mli b/ocaml/util/rpc_retry.mli index 3770f161fc6..e903e42bafd 100644 --- a/ocaml/util/rpc_retry.mli +++ b/ocaml/util/rpc_retry.mli @@ -15,15 +15,15 @@ open Xmlrpc_client module type RPC_META = - sig - val client_name : string - val server_name : string - val server_path : string - val should_retry : bool - end +sig + val client_name : string + val server_name : string + val server_path : string + val should_retry : bool +end module Make : functor (Meta : RPC_META) -> - sig - val transport : Xmlrpc_client.transport - val rpc : Rpc.call -> Rpc.response - end +sig + val transport : Xmlrpc_client.transport + val rpc : Rpc.call -> Rpc.response +end diff --git a/ocaml/util/sanitycheck.ml b/ocaml/util/sanitycheck.ml index 3728d3bc61b..3ac1e50901b 100644 --- a/ocaml/util/sanitycheck.ml +++ b/ocaml/util/sanitycheck.ml @@ -17,14 +17,14 @@ open D let check_for_bad_link () = - (* Look for the exception catching bug caused by dodgy linking (thanks, ocamlfind) *) - try - Unix.access "/etc/xapi.d/doesntexist" [ Unix.F_OK ] - with - | Unix.Unix_error(_, _, _) -> debug "Binary appears to be correctly linked" - | e -> - let msg = "This binary is broken: check your link lines (see c/s 4200:694e7dabb159)" in - debug "%s" msg; - failwith msg + (* Look for the exception catching bug caused by dodgy linking (thanks, ocamlfind) *) + try + Unix.access "/etc/xapi.d/doesntexist" [ Unix.F_OK ] + with + | Unix.Unix_error(_, _, _) -> debug "Binary appears to be correctly linked" + | e -> + let msg = "This binary is broken: check your link lines (see c/s 4200:694e7dabb159)" in + debug "%s" msg; + failwith msg diff --git a/ocaml/util/stats.ml b/ocaml/util/stats.ml index 87ebab8df6b..324d59a8e88 100644 --- a/ocaml/util/stats.ml +++ b/ocaml/util/stats.ml @@ -14,27 +14,27 @@ (** Time activities, monitor the mean and standard deviation. Try to help understand how long key operations take under load. *) open Stdext - + module Normal_population = struct (** Stats on a normally-distributed population *) type t = { sigma_x: float; - sigma_xx: float; - n: int } - + sigma_xx: float; + n: int } + let empty = { sigma_x = 0.; sigma_xx = 0.; n = 0 } - - let sample (p: t) (x: float) : t = + + let sample (p: t) (x: float) : t = { sigma_x = p.sigma_x +. x; sigma_xx = p.sigma_xx +. x *. x; n = p.n + 1 } - + exception Unknown - + let mean (p: t) : float = p.sigma_x /. (float_of_int p.n) - let sd (p: t) : float = - if p.n = 0 + let sd (p: t) : float = + if p.n = 0 then raise Unknown - else + else let n = float_of_int p.n in sqrt (n *. p.sigma_xx -. p.sigma_x *. p.sigma_x) /. n end @@ -44,9 +44,9 @@ end number should never be proportional to the number of VMs, VIFs etc! Since these are used for timing data, which is better approximated by a - lognormal distribution than a normal one, we take care to apply the + lognormal distribution than a normal one, we take care to apply the lognormal transformations here. - *) +*) module D=Debug.Make(struct let name="stats" end) open D @@ -56,33 +56,33 @@ open Pervasiveext let timings : (string, Normal_population.t) Hashtbl.t = Hashtbl.create 10 let timings_m = Mutex.create () -let mean (p: Normal_population.t) = +let mean (p: Normal_population.t) = let sigma = Normal_population.sd p in let mu = Normal_population.mean p in exp (mu +. sigma *. sigma /. 2.) -let sd (p: Normal_population.t) = +let sd (p: Normal_population.t) = let sigma = Normal_population.sd p in let mu = Normal_population.mean p in let v = (exp(sigma *. sigma) -. 1.) *. (exp (2. *. mu +. sigma *. sigma)) in sqrt v -let string_of (p: Normal_population.t) = +let string_of (p: Normal_population.t) = Printf.sprintf "%f [sd = %f]" (mean p) (sd p) -let sample (name: string) (x: float) : unit = +let sample (name: string) (x: float) : unit = (* Use the lognormal distribution: *) let x' = log x in Mutex.execute timings_m (fun () -> - let p = - if Hashtbl.mem timings name - then Hashtbl.find timings name - else Normal_population.empty in + let p = + if Hashtbl.mem timings name + then Hashtbl.find timings name + else Normal_population.empty in let p' = Normal_population.sample p x' in Hashtbl.replace timings name p'; -(* debug "Population %s time = %f mean = %s" name x (string_of p'); *) - ) + (* debug "Population %s time = %f mean = %s" name x (string_of p'); *) + ) (* (* Check to see if the value is > 3 standard deviations from the mean *) if abs_float (x -. (mean p)) > (sd p *. 3.) @@ -90,18 +90,18 @@ let sample (name: string) (x: float) : unit = *) (** Helper function to time a specific thing *) -let time_this (name: string) f = +let time_this (name: string) f = let start_time = Unix.gettimeofday () in finally f (fun () -> try - let end_time = Unix.gettimeofday () in - sample name (end_time -. start_time) + let end_time = Unix.gettimeofday () in + sample name (end_time -. start_time) with e -> - warn "Ignoring exception %s while timing: %s" (Printexc.to_string e) name + warn "Ignoring exception %s while timing: %s" (Printexc.to_string e) name ) - -let summarise () = + +let summarise () = Mutex.execute timings_m (fun () -> Hashtbl.fold (fun k v acc -> (k, string_of v) :: acc) timings [] @@ -123,26 +123,26 @@ let dbstats_threads : (int, (string * dbcallty) list) Hashtbl.t = Hashtbl.create let log_stats = ref false -let log_db_call task_opt dbcall ty = +let log_db_call task_opt dbcall ty = if not !log_stats then () else Mutex.execute dbstats_m (fun () -> - let hashtbl = match ty with - | Read -> dbstats_read_dbcalls - | Write -> dbstats_write_dbcalls - | Create -> dbstats_create_dbcalls - | Drop -> dbstats_drop_dbcalls - in - Hashtbl.replace hashtbl dbcall (1 + (try Hashtbl.find hashtbl dbcall with _ -> 0)); - let threadid = Thread.id (Thread.self ()) in - Hashtbl.replace dbstats_threads threadid ((dbcall,ty)::(try Hashtbl.find dbstats_threads threadid with _ -> [])); - match task_opt with - | Some task -> - Hashtbl.replace dbstats_task task ((dbcall,ty)::(try Hashtbl.find dbstats_task task with _ -> [])) - | None -> () - - - ) - + let hashtbl = match ty with + | Read -> dbstats_read_dbcalls + | Write -> dbstats_write_dbcalls + | Create -> dbstats_create_dbcalls + | Drop -> dbstats_drop_dbcalls + in + Hashtbl.replace hashtbl dbcall (1 + (try Hashtbl.find hashtbl dbcall with _ -> 0)); + let threadid = Thread.id (Thread.self ()) in + Hashtbl.replace dbstats_threads threadid ((dbcall,ty)::(try Hashtbl.find dbstats_threads threadid with _ -> [])); + match task_opt with + | Some task -> + Hashtbl.replace dbstats_task task ((dbcall,ty)::(try Hashtbl.find dbstats_task task with _ -> [])) + | None -> () + + + ) + let summarise_db_calls () = let string_of_ty = function | Read -> "read" | Write -> "write" | Create -> "create" | Drop -> "drop" in let summarise_table hashtbl = @@ -153,16 +153,16 @@ let summarise_db_calls () = in Mutex.execute dbstats_m (fun () -> - (summarise_table dbstats_write_dbcalls, - summarise_table dbstats_read_dbcalls, - summarise_table dbstats_create_dbcalls, - summarise_table dbstats_drop_dbcalls, - Hashtbl.fold (fun k v acc -> (k,List.map (fun (dbcall,ty) -> (string_of_ty ty,dbcall)) (List.rev v))::acc) dbstats_task [], - List.sort (fun (a,_) (b,_) -> compare a b) (Hashtbl.fold (fun k v acc -> (k,List.map (fun (dbcall,ty) -> (string_of_ty ty,dbcall)) (List.rev v))::acc) dbstats_threads []))) + (summarise_table dbstats_write_dbcalls, + summarise_table dbstats_read_dbcalls, + summarise_table dbstats_create_dbcalls, + summarise_table dbstats_drop_dbcalls, + Hashtbl.fold (fun k v acc -> (k,List.map (fun (dbcall,ty) -> (string_of_ty ty,dbcall)) (List.rev v))::acc) dbstats_task [], + List.sort (fun (a,_) (b,_) -> compare a b) (Hashtbl.fold (fun k v acc -> (k,List.map (fun (dbcall,ty) -> (string_of_ty ty,dbcall)) (List.rev v))::acc) dbstats_threads []))) + + - - diff --git a/ocaml/util/table.ml b/ocaml/util/table.ml index cc5311d6c25..dda8b4f2301 100644 --- a/ocaml/util/table.ml +++ b/ocaml/util/table.ml @@ -17,12 +17,12 @@ let pad n s before = if String.length s>n then (if String.length s > 2 then - (String.sub s 0 (n-2))^".." + (String.sub s 0 (n-2))^".." else - String.sub s 0 n) + String.sub s 0 n) else let padding = String.make (n-(String.length s)) ' ' in - if before then padding^s else s^padding + if before then padding^s else s^padding let left n s = pad n s false let right n s = pad n s true @@ -37,8 +37,8 @@ let compute_col_widths rows = List.map (List.fold_left max 0) cols let print (rows: string list list) = match rows with - | [] -> () - | _ -> - let widths = compute_col_widths rows in - let sll = List.map (List.map2 right widths) rows in - List.iter (fun line -> print_endline (String.concat " | " line)) sll + | [] -> () + | _ -> + let widths = compute_col_widths rows in + let sll = List.map (List.map2 right widths) rows in + List.iter (fun line -> print_endline (String.concat " | " line)) sll diff --git a/ocaml/util/vm_memory_constraints.ml b/ocaml/util/vm_memory_constraints.ml index 91154d16bb7..490ff4ac798 100644 --- a/ocaml/util/vm_memory_constraints.ml +++ b/ocaml/util/vm_memory_constraints.ml @@ -15,69 +15,69 @@ (** Operations for transforming and validating memory constraints. *) module type T = sig - (** Represents a set of memory constraints for a guest. Constraints - * are in valid order if (and only if) they satisfy the following: - * static_min <= dynamic_min <= dynamic_max <= static_max - *) - type t = - { - static_min : Int64.t; - dynamic_min : Int64.t; - target : Int64.t; - dynamic_max : Int64.t; - static_max : Int64.t; - } - - (** Given a set of constraints [c], returns [true] if and only if - [c.dynamic_min] = [c.dynamic_max]. *) - val are_pinned : constraints:t -> bool - - (** Given a set of constraints [c], returns [true] if and only if - [c.dynamic_min] = [c.dynamic_max] = [c.static-max]. *) - val are_pinned_at_static_max : constraints:t -> bool - - (** Given a set of constraints [c], returns [true] if and only if - [c.static_min] ≤ [c.dynamic_min] ≤ [c.dynamic_max] ≤ [c.static_max]. *) - val are_valid : constraints:t -> bool - - (** Given a set of constraints [c], returns [true] if and only if - [c.static_min] ≤ [c.dynamic_min] = [c.dynamic_max] = [c.static-max]. *) - val are_valid_and_pinned_at_static_max : constraints:t -> bool - - (** Creates a set of memory constraints from the given tuple whose - * elements appear in order of increasing size. - *) - val create : (int64 * int64 * int64 * int64 * int64) -> t - - (** Transforms the given set of memory constraints into a valid set, if - * possible, or else returns None. Constraints returned by this function - * are guaranteed to be in valid order such that: - * - * static_min <= dynamic_min <= target <= dynamic_max <= static_max - * - * If the given constraints are valid, this function simply returns a copy - * of those constraints. - * - * If the given constraints are invalid, but can be made valid by adjusting - * [(dynamic_min, dynamic_max)] to be in the range defined by [static_min, - * static_max], or by adjusting [target] to be within the range defined by - * [(dynamic_min, dynamic_max)], this function returns such a modified set - * of constraints. - * - * If the given constraints are invalid and they cannot be made valid by - * modifying the dynamic constraints, this function function returns None. - *) - val transform : constraints:t -> t option - - (** Takes the given set of possibly-invalid memory constraints {i s}, and - * returns a new set of valid and unballooned constraints {i t} s.t.: - * {ol - * {- t.dynamic_max := s.static_max} - * {- t.target := s.static_max} - * {- t.dynamic_min := s.static_max} - * {- t.static_min := minimum (s.static_min, s.static_max)}} - *) - val reset_to_safe_defaults : constraints:t -> t + (** Represents a set of memory constraints for a guest. Constraints + * are in valid order if (and only if) they satisfy the following: + * static_min <= dynamic_min <= dynamic_max <= static_max + *) + type t = + { + static_min : Int64.t; + dynamic_min : Int64.t; + target : Int64.t; + dynamic_max : Int64.t; + static_max : Int64.t; + } + + (** Given a set of constraints [c], returns [true] if and only if + [c.dynamic_min] = [c.dynamic_max]. *) + val are_pinned : constraints:t -> bool + + (** Given a set of constraints [c], returns [true] if and only if + [c.dynamic_min] = [c.dynamic_max] = [c.static-max]. *) + val are_pinned_at_static_max : constraints:t -> bool + + (** Given a set of constraints [c], returns [true] if and only if + [c.static_min] ≤ [c.dynamic_min] ≤ [c.dynamic_max] ≤ [c.static_max]. *) + val are_valid : constraints:t -> bool + + (** Given a set of constraints [c], returns [true] if and only if + [c.static_min] ≤ [c.dynamic_min] = [c.dynamic_max] = [c.static-max]. *) + val are_valid_and_pinned_at_static_max : constraints:t -> bool + + (** Creates a set of memory constraints from the given tuple whose + * elements appear in order of increasing size. + *) + val create : (int64 * int64 * int64 * int64 * int64) -> t + + (** Transforms the given set of memory constraints into a valid set, if + * possible, or else returns None. Constraints returned by this function + * are guaranteed to be in valid order such that: + * + * static_min <= dynamic_min <= target <= dynamic_max <= static_max + * + * If the given constraints are valid, this function simply returns a copy + * of those constraints. + * + * If the given constraints are invalid, but can be made valid by adjusting + * [(dynamic_min, dynamic_max)] to be in the range defined by [static_min, + * static_max], or by adjusting [target] to be within the range defined by + * [(dynamic_min, dynamic_max)], this function returns such a modified set + * of constraints. + * + * If the given constraints are invalid and they cannot be made valid by + * modifying the dynamic constraints, this function function returns None. + *) + val transform : constraints:t -> t option + + (** Takes the given set of possibly-invalid memory constraints {i s}, and + * returns a new set of valid and unballooned constraints {i t} s.t.: + * {ol + * {- t.dynamic_max := s.static_max} + * {- t.target := s.static_max} + * {- t.dynamic_min := s.static_max} + * {- t.static_min := minimum (s.static_min, s.static_max)}} + *) + val reset_to_safe_defaults : constraints:t -> t end @@ -88,68 +88,68 @@ let ( // ) = Int64.div module Vm_memory_constraints : T = struct - type t = - { - static_min : Int64.t; - dynamic_min : Int64.t; - target : Int64.t; - dynamic_max : Int64.t; - static_max : Int64.t; - } - - let create (static_min, dynamic_min, target, dynamic_max, static_max) = - { - static_min = static_min; - dynamic_min = dynamic_min; - target = target; - dynamic_max = dynamic_max; - static_max = static_max; - } - - let transform ~constraints:c = - (* Constrains a value between two limits. *) - let constrain value minimum maximum = - if value < minimum then minimum else - if value > maximum then maximum else value in - (* Fail if either maximum is less than its corresponding minimum. *) - if c.static_max < c.static_min then None else - if c.dynamic_max < c.dynamic_min then None else - (* Ensure dynamic constraints are within static constraints. *) - let dynamic_min = constrain c.dynamic_min c.static_min c.static_max in - let dynamic_max = constrain c.dynamic_max c.static_min c.static_max in - (* Ensure target is within dynamic constraints. *) - let target = constrain c.target dynamic_min dynamic_max in - Some {c with - dynamic_min = dynamic_min; - target = target; - dynamic_max = dynamic_max; - } - - let are_pinned ~constraints = - constraints.dynamic_min = constraints.dynamic_max - - let are_pinned_at_static_max ~constraints = true - && constraints.dynamic_max = constraints.static_max - && are_pinned constraints - - let are_valid ~constraints = true - && constraints.static_min <= constraints.dynamic_min - && constraints.dynamic_min <= constraints.dynamic_max - && constraints.dynamic_max <= constraints.static_max - - let are_valid_and_pinned_at_static_max ~constraints = true - && constraints.static_min <= constraints.dynamic_min - && are_pinned_at_static_max constraints - - let reset_to_safe_defaults ~constraints = - let max = constraints.static_max in - let min = constraints.static_min in - { - static_max = max; - dynamic_max = max; - target = max; - dynamic_min = max; - static_min = if min < max then min else max - } + type t = + { + static_min : Int64.t; + dynamic_min : Int64.t; + target : Int64.t; + dynamic_max : Int64.t; + static_max : Int64.t; + } + + let create (static_min, dynamic_min, target, dynamic_max, static_max) = + { + static_min = static_min; + dynamic_min = dynamic_min; + target = target; + dynamic_max = dynamic_max; + static_max = static_max; + } + + let transform ~constraints:c = + (* Constrains a value between two limits. *) + let constrain value minimum maximum = + if value < minimum then minimum else + if value > maximum then maximum else value in + (* Fail if either maximum is less than its corresponding minimum. *) + if c.static_max < c.static_min then None else + if c.dynamic_max < c.dynamic_min then None else + (* Ensure dynamic constraints are within static constraints. *) + let dynamic_min = constrain c.dynamic_min c.static_min c.static_max in + let dynamic_max = constrain c.dynamic_max c.static_min c.static_max in + (* Ensure target is within dynamic constraints. *) + let target = constrain c.target dynamic_min dynamic_max in + Some {c with + dynamic_min = dynamic_min; + target = target; + dynamic_max = dynamic_max; + } + + let are_pinned ~constraints = + constraints.dynamic_min = constraints.dynamic_max + + let are_pinned_at_static_max ~constraints = true + && constraints.dynamic_max = constraints.static_max + && are_pinned constraints + + let are_valid ~constraints = true + && constraints.static_min <= constraints.dynamic_min + && constraints.dynamic_min <= constraints.dynamic_max + && constraints.dynamic_max <= constraints.static_max + + let are_valid_and_pinned_at_static_max ~constraints = true + && constraints.static_min <= constraints.dynamic_min + && are_pinned_at_static_max constraints + + let reset_to_safe_defaults ~constraints = + let max = constraints.static_max in + let min = constraints.static_min in + { + static_max = max; + dynamic_max = max; + target = max; + dynamic_min = max; + static_min = if min < max then min else max + } end diff --git a/ocaml/vncproxy/vncproxy.ml b/ocaml/vncproxy/vncproxy.ml index 19561605dea..9d26d1969bb 100644 --- a/ocaml/vncproxy/vncproxy.ml +++ b/ocaml/vncproxy/vncproxy.ml @@ -56,18 +56,18 @@ let _ = Unix.chdir "/"; ignore (Unix.umask 0); - Stdext.Unixext.close_all_fds_except [ sock ]; + Stdext.Unixext.close_all_fds_except [ sock ]; let s, _ = Unix.accept sock in - let rpc xml = - let open Xmlrpc_client in - let http = xmlrpc ~version:"1.0" "/" in - match !server with - | "" -> XMLRPC_protocol.rpc ~srcstr:"vncproxy" ~dststr:"xapi" ~transport:(Unix (Filename.concat "/var/lib/xcp" "xapi")) ~http xml - | host -> XMLRPC_protocol.rpc ~srcstr:"vncproxy" ~dststr:"xapi" ~transport:(SSL(SSL.make ~use_fork_exec_helper:false (), host, 443)) ~http xml in + let rpc xml = + let open Xmlrpc_client in + let http = xmlrpc ~version:"1.0" "/" in + match !server with + | "" -> XMLRPC_protocol.rpc ~srcstr:"vncproxy" ~dststr:"xapi" ~transport:(Unix (Filename.concat "/var/lib/xcp" "xapi")) ~http xml + | host -> XMLRPC_protocol.rpc ~srcstr:"vncproxy" ~dststr:"xapi" ~transport:(SSL(SSL.make ~use_fork_exec_helper:false (), host, 443)) ~http xml in - let find_vm rpc session_id vm = + let find_vm rpc session_id vm = try Client.VM.get_by_uuid rpc session_id vm with _ -> @@ -80,17 +80,17 @@ let _ = let resident_on = Client.VM.get_resident_on rpc session_id vm in let address = Client.Host.get_address rpc session_id resident_on in - let open Xmlrpc_client in - let http = connect - ~session_id:(Ref.string_of session_id) - (Printf.sprintf "%s?ref=%s" Constants.console_uri (Ref.string_of vm)) in - let transport = SSL(SSL.make ~use_fork_exec_helper:false (), address, 443) in - with_transport transport - (with_http http - (fun (response, fd) -> - (* NB this will double-close [fd] *) - Stdext.Unixext.proxy s fd - ) - ) + let open Xmlrpc_client in + let http = connect + ~session_id:(Ref.string_of session_id) + (Printf.sprintf "%s?ref=%s" Constants.console_uri (Ref.string_of vm)) in + let transport = SSL(SSL.make ~use_fork_exec_helper:false (), address, 443) in + with_transport transport + (with_http http + (fun (response, fd) -> + (* NB this will double-close [fd] *) + Stdext.Unixext.proxy s fd + ) + ) ) (fun () -> Client.Session.logout rpc session_id) diff --git a/ocaml/xapi/agility.ml b/ocaml/xapi/agility.ml index a646b7d8527..c5aa2ccb37a 100644 --- a/ocaml/xapi/agility.ml +++ b/ocaml/xapi/agility.ml @@ -20,35 +20,35 @@ open Stdext.Listext (* Only returns true if the SR is marked as shared, all hosts have PBDs and all PBDs are currently_attached. Is used to prevent a non-shared disk being added to a protected VM *) let is_sr_properly_shared ~__context ~self = - let shared = Db.SR.get_shared ~__context ~self in - if not shared then begin - false - end else begin - let pbds = Db.SR.get_PBDs ~__context ~self in - let plugged_pbds = List.filter (fun pbd -> Db.PBD.get_currently_attached ~__context ~self:pbd) pbds in - let plugged_hosts = List.setify (List.map (fun pbd -> Db.PBD.get_host ~__context ~self:pbd) plugged_pbds) in - let all_hosts = Db.Host.get_all ~__context in - let enabled_hosts = List.filter (fun host -> Db.Host.get_enabled ~__context ~self:host) all_hosts in - if not(List.subset enabled_hosts plugged_hosts) then begin - warn "SR %s not shared properly: Not all enabled hosts have a currently_attached PBD" (Ref.string_of self); - false - end else true - end + let shared = Db.SR.get_shared ~__context ~self in + if not shared then begin + false + end else begin + let pbds = Db.SR.get_PBDs ~__context ~self in + let plugged_pbds = List.filter (fun pbd -> Db.PBD.get_currently_attached ~__context ~self:pbd) pbds in + let plugged_hosts = List.setify (List.map (fun pbd -> Db.PBD.get_host ~__context ~self:pbd) plugged_pbds) in + let all_hosts = Db.Host.get_all ~__context in + let enabled_hosts = List.filter (fun host -> Db.Host.get_enabled ~__context ~self:host) all_hosts in + if not(List.subset enabled_hosts plugged_hosts) then begin + warn "SR %s not shared properly: Not all enabled hosts have a currently_attached PBD" (Ref.string_of self); + false + end else true + end (* Only returns true if the network is shared properly: all (enabled) hosts in the pool must have a PIF on * the network, and none of these PIFs may be bond slaves. This ensures that a VM with a VIF on this * network can run on (and be migrated to) any (enabled) host in the pool. *) let is_network_properly_shared ~__context ~self = - let pifs = Db.Network.get_PIFs ~__context ~self in - let non_slave_pifs = List.filter (fun pif -> - not (Db.is_valid_ref __context (Db.PIF.get_bond_slave_of ~__context ~self:pif))) pifs in - let hosts_with_pif = List.setify (List.map (fun pif -> Db.PIF.get_host ~__context ~self:pif) non_slave_pifs) in - let all_hosts = Db.Host.get_all ~__context in - let enabled_hosts = List.filter (fun host -> Db.Host.get_enabled ~__context ~self:host) all_hosts in - let properly_shared = List.subset enabled_hosts hosts_with_pif in - if not properly_shared then - warn "Network %s not shared properly: Not all hosts have PIFs" (Ref.string_of self); - properly_shared + let pifs = Db.Network.get_PIFs ~__context ~self in + let non_slave_pifs = List.filter (fun pif -> + not (Db.is_valid_ref __context (Db.PIF.get_bond_slave_of ~__context ~self:pif))) pifs in + let hosts_with_pif = List.setify (List.map (fun pif -> Db.PIF.get_host ~__context ~self:pif) non_slave_pifs) in + let all_hosts = Db.Host.get_all ~__context in + let enabled_hosts = List.filter (fun host -> Db.Host.get_enabled ~__context ~self:host) all_hosts in + let properly_shared = List.subset enabled_hosts hosts_with_pif in + if not properly_shared then + warn "Network %s not shared properly: Not all hosts have PIFs" (Ref.string_of self); + properly_shared module SRSet = Set.Make(struct type t = API.ref_SR let compare = compare end) module NetworkSet = Set.Make(struct type t = API.ref_network let compare = compare end) @@ -56,47 +56,47 @@ module NetworkSet = Set.Make(struct type t = API.ref_network let compare = compa let empty_cache = (SRSet.empty, NetworkSet.empty) let caching_vm_t_assert_agile ~__context (ok_srs, ok_networks) vm vm_t = - (* Any kind of vGPU means that the VM is not agile. *) - if vm_t.API.vM_VGPUs <> [] then - raise (Api_errors.Server_error - (Api_errors.vm_has_vgpu, [Ref.string_of vm])); - (* All referenced VDIs should be in shared SRs *) - let check_vbd ok_srs vbd = - if Db.VBD.get_empty ~__context ~self:vbd - then ok_srs - else - let vdi = Db.VBD.get_VDI ~__context ~self:vbd in - let sr = Db.VDI.get_SR ~__context ~self:vdi in - if SRSet.mem sr ok_srs - then ok_srs - else - if not (is_sr_properly_shared ~__context ~self:sr) - then raise (Api_errors.Server_error(Api_errors.ha_constraint_violation_sr_not_shared, [Ref.string_of sr])) - else SRSet.add sr ok_srs in - (* All referenced VIFs should be on shared networks *) - let check_vif ok_networks vif = - let network = Db.VIF.get_network ~__context ~self:vif in - if NetworkSet.mem network ok_networks - then ok_networks - else - if not (is_network_properly_shared ~__context ~self:network) - then raise (Api_errors.Server_error(Api_errors.ha_constraint_violation_network_not_shared, [Ref.string_of network])) - else NetworkSet.add network ok_networks in - let ok_srs = List.fold_left check_vbd ok_srs vm_t.API.vM_VBDs in - let ok_networks = List.fold_left check_vif ok_networks vm_t.API.vM_VIFs in - (ok_srs, ok_networks) + (* Any kind of vGPU means that the VM is not agile. *) + if vm_t.API.vM_VGPUs <> [] then + raise (Api_errors.Server_error + (Api_errors.vm_has_vgpu, [Ref.string_of vm])); + (* All referenced VDIs should be in shared SRs *) + let check_vbd ok_srs vbd = + if Db.VBD.get_empty ~__context ~self:vbd + then ok_srs + else + let vdi = Db.VBD.get_VDI ~__context ~self:vbd in + let sr = Db.VDI.get_SR ~__context ~self:vdi in + if SRSet.mem sr ok_srs + then ok_srs + else + if not (is_sr_properly_shared ~__context ~self:sr) + then raise (Api_errors.Server_error(Api_errors.ha_constraint_violation_sr_not_shared, [Ref.string_of sr])) + else SRSet.add sr ok_srs in + (* All referenced VIFs should be on shared networks *) + let check_vif ok_networks vif = + let network = Db.VIF.get_network ~__context ~self:vif in + if NetworkSet.mem network ok_networks + then ok_networks + else + if not (is_network_properly_shared ~__context ~self:network) + then raise (Api_errors.Server_error(Api_errors.ha_constraint_violation_network_not_shared, [Ref.string_of network])) + else NetworkSet.add network ok_networks in + let ok_srs = List.fold_left check_vbd ok_srs vm_t.API.vM_VBDs in + let ok_networks = List.fold_left check_vif ok_networks vm_t.API.vM_VIFs in + (ok_srs, ok_networks) let vm_assert_agile ~__context ~self = - let vm_t = Db.VM.get_record ~__context ~self in - let _ = caching_vm_t_assert_agile ~__context empty_cache self vm_t in - () + let vm_t = Db.VM.get_record ~__context ~self in + let _ = caching_vm_t_assert_agile ~__context empty_cache self vm_t in + () let partition_vm_ps_by_agile ~__context vm_ps = - let distinguish_vm (agile_vm_ps, not_agile_vm_ps, cache) ((vm, vm_t) as vm_p) = - try - let cache = caching_vm_t_assert_agile ~__context cache vm vm_t in - (vm_p :: agile_vm_ps, not_agile_vm_ps, cache) - with _ -> - (agile_vm_ps, vm_p :: not_agile_vm_ps, cache) in - let agile_vm_ps, not_agile_vm_ps, _ = List.fold_left distinguish_vm ([], [], empty_cache) vm_ps in - (List.rev agile_vm_ps, List.rev not_agile_vm_ps) + let distinguish_vm (agile_vm_ps, not_agile_vm_ps, cache) ((vm, vm_t) as vm_p) = + try + let cache = caching_vm_t_assert_agile ~__context cache vm vm_t in + (vm_p :: agile_vm_ps, not_agile_vm_ps, cache) + with _ -> + (agile_vm_ps, vm_p :: not_agile_vm_ps, cache) in + let agile_vm_ps, not_agile_vm_ps, _ = List.fold_left distinguish_vm ([], [], empty_cache) vm_ps in + (List.rev agile_vm_ps, List.rev not_agile_vm_ps) diff --git a/ocaml/xapi/api_server.ml b/ocaml/xapi/api_server.ml index 190f68a2729..705730c4119 100644 --- a/ocaml/xapi/api_server.ml +++ b/ocaml/xapi/api_server.ml @@ -13,76 +13,76 @@ *) (** The main callback function. * @group API Messaging - *) +*) (** Actions module *) module Actions = struct - (** The DebugVersion throws a NotImplemented exception for everything - by default. The ReleaseVersion is missing all the fields; - so server will not compile unless everything is overridden *) - - module Task = Xapi_task - module Session = Xapi_session - module Auth = Xapi_auth - module Subject = Xapi_subject - module Role = Xapi_role - module Event = Xapi_event - module Alert = Xapi_alert - module VM = struct - include Xapi_vm - include Xapi_vm_migrate - end - module VM_metrics = struct end - module VM_guest_metrics = struct end - module VMPP = Xapi_vmpp - module VM_appliance = Xapi_vm_appliance - module DR_task = Xapi_dr_task - module LVHD = struct end - module Host = Xapi_host - module Host_crashdump = Xapi_host_crashdump - module Pool = Xapi_pool - module Pool_patch = Xapi_pool_patch - module Host_patch = Xapi_host_patch - module Host_metrics = struct end - module Host_cpu = Xapi_host_cpu - module Network = Xapi_network - module VIF = Xapi_vif - module VIF_metrics = struct end - module PIF = Xapi_pif - module PIF_metrics = struct end - module SR = Xapi_sr - module SM = Xapi_sm - module VDI = struct - include Xapi_vdi - let pool_migrate = Xapi_vm_migrate.vdi_pool_migrate - end - module VBD = Xapi_vbd - module VBD_metrics = struct end - module Crashdump = Xapi_crashdump - module PBD = Xapi_pbd - module Data_source = struct end - let not_implemented x = raise (Api_errors.Server_error (Api_errors.not_implemented, [ x ])) - - module VTPM = struct - let create ~__context ~vM ~backend = not_implemented "VTPM.create" - let destroy ~__context ~self = not_implemented "VTPM.destroy" - end - module Console = struct - let create ~__context ~other_config = not_implemented "Console.create" - let destroy ~__context ~self = not_implemented "Console.destroy" - end - module Bond = Xapi_bond - module VLAN = Xapi_vlan - module User = Xapi_user - module Blob = Xapi_blob - module Message = Xapi_message - module Secret = Xapi_secret - module Tunnel = Xapi_tunnel - module PCI = Xapi_pci - module PGPU = Xapi_pgpu - module GPU_group = Xapi_gpu_group - module VGPU = Xapi_vgpu - module VGPU_type = Xapi_vgpu_type + (** The DebugVersion throws a NotImplemented exception for everything + by default. The ReleaseVersion is missing all the fields; + so server will not compile unless everything is overridden *) + + module Task = Xapi_task + module Session = Xapi_session + module Auth = Xapi_auth + module Subject = Xapi_subject + module Role = Xapi_role + module Event = Xapi_event + module Alert = Xapi_alert + module VM = struct + include Xapi_vm + include Xapi_vm_migrate + end + module VM_metrics = struct end + module VM_guest_metrics = struct end + module VMPP = Xapi_vmpp + module VM_appliance = Xapi_vm_appliance + module DR_task = Xapi_dr_task + module LVHD = struct end + module Host = Xapi_host + module Host_crashdump = Xapi_host_crashdump + module Pool = Xapi_pool + module Pool_patch = Xapi_pool_patch + module Host_patch = Xapi_host_patch + module Host_metrics = struct end + module Host_cpu = Xapi_host_cpu + module Network = Xapi_network + module VIF = Xapi_vif + module VIF_metrics = struct end + module PIF = Xapi_pif + module PIF_metrics = struct end + module SR = Xapi_sr + module SM = Xapi_sm + module VDI = struct + include Xapi_vdi + let pool_migrate = Xapi_vm_migrate.vdi_pool_migrate + end + module VBD = Xapi_vbd + module VBD_metrics = struct end + module Crashdump = Xapi_crashdump + module PBD = Xapi_pbd + module Data_source = struct end + let not_implemented x = raise (Api_errors.Server_error (Api_errors.not_implemented, [ x ])) + + module VTPM = struct + let create ~__context ~vM ~backend = not_implemented "VTPM.create" + let destroy ~__context ~self = not_implemented "VTPM.destroy" + end + module Console = struct + let create ~__context ~other_config = not_implemented "Console.create" + let destroy ~__context ~self = not_implemented "Console.destroy" + end + module Bond = Xapi_bond + module VLAN = Xapi_vlan + module User = Xapi_user + module Blob = Xapi_blob + module Message = Xapi_message + module Secret = Xapi_secret + module Tunnel = Xapi_tunnel + module PCI = Xapi_pci + module PGPU = Xapi_pgpu + module GPU_group = Xapi_gpu_group + module VGPU = Xapi_vgpu + module VGPU_type = Xapi_vgpu_type end (** Use the server functor to make an XML-RPC dispatcher. *) @@ -105,16 +105,16 @@ let forward req body rpc = (* Whitelist of functions that do *not* get forwarded to the master (e.g. session.login_with_password) *) (* !!! Note, this only blocks synchronous calls. As is it happens, all the calls we want to block right now are only synchronous. However, we'd probably want to change this is the list starts getting longer. *) -let whitelist = List.map (fun (obj,msg) -> Datamodel_utils.wire_name ~sync:true obj msg) Datamodel.whitelist +let whitelist = List.map (fun (obj,msg) -> Datamodel_utils.wire_name ~sync:true obj msg) Datamodel.whitelist let emergency_call_list = List.map (fun (obj,msg) -> Datamodel_utils.wire_name ~sync:true obj msg) Datamodel.emergency_calls let is_himn_req req = - match req.Http.Request.host with - | Some h -> - (match !Xapi_mgmt_iface.himn_addr with - | Some himn -> himn = h - | None -> false) - | None -> false + match req.Http.Request.host with + | Some h -> + (match !Xapi_mgmt_iface.himn_addr with + | Some himn -> himn = h + | None -> false) + | None -> false (* This bit is called directly by the fake_rpc callback *) let callback1 is_json req fd body call = @@ -129,22 +129,22 @@ let callback1 is_json req fd body call = if !Xapi_globs.slave_emergency_mode && (not emergency_call) then raise !Xapi_globs.emergency_mode_error; - if is_slave && - ((Context.is_unix_socket fd && not whitelisted) || - (is_himn_req req && not emergency_call)) + if is_slave && + ((Context.is_unix_socket fd && not whitelisted) || + (is_himn_req req && not emergency_call)) then forward req body call else - let response = Server.dispatch_call req fd call in - let translated = - if is_json && response.Rpc.success && call.Rpc.name <> "system.listMethods" then - {response with Rpc.contents = Rpc.rpc_of_string (Jsonrpc.to_string response.Rpc.contents)} - else - response in - translated + let response = Server.dispatch_call req fd call in + let translated = + if is_json && response.Rpc.success && call.Rpc.name <> "system.listMethods" then + {response with Rpc.contents = Rpc.rpc_of_string (Jsonrpc.to_string response.Rpc.contents)} + else + response in + translated - (* debug(fmt "response = %s" response); *) +(* debug(fmt "response = %s" response); *) open Stdext @@ -154,34 +154,34 @@ let callback is_json req bio _ = let body = Http_svr.read_body ~limit:Xapi_globs.http_limit_max_rpc_size req bio in try let rpc = Xmlrpc.call_of_string body in - let response = callback1 is_json req fd (Some body) rpc in - let response_str = - if rpc.Rpc.name = "system.listMethods" - then - let inner = Xmlrpc.to_a - ~empty:Bigbuffer.make - ~append:(fun buf s -> Bigbuffer.append_substring buf s 0 (String.length s)) - response.Rpc.contents in - let s = Printf.sprintf "%s" (Bigbuffer.to_string inner) in - let buf = Bigbuffer.make () in - Bigbuffer.append_string buf s; - buf - else - Xmlrpc.a_of_response - ~empty:Bigbuffer.make - ~append:(fun buf s -> Bigbuffer.append_substring buf s 0 (String.length s)) - response in + let response = callback1 is_json req fd (Some body) rpc in + let response_str = + if rpc.Rpc.name = "system.listMethods" + then + let inner = Xmlrpc.to_a + ~empty:Bigbuffer.make + ~append:(fun buf s -> Bigbuffer.append_substring buf s 0 (String.length s)) + response.Rpc.contents in + let s = Printf.sprintf "%s" (Bigbuffer.to_string inner) in + let buf = Bigbuffer.make () in + Bigbuffer.append_string buf s; + buf + else + Xmlrpc.a_of_response + ~empty:Bigbuffer.make + ~append:(fun buf s -> Bigbuffer.append_substring buf s 0 (String.length s)) + response in Http_svr.response_fct req ~hdrs:[ Http.Hdr.content_type, "text/xml"; - "Access-Control-Allow-Origin", "*"; - "Access-Control-Allow-Headers", "X-Requested-With"] fd (Bigbuffer.length response_str) + "Access-Control-Allow-Origin", "*"; + "Access-Control-Allow-Headers", "X-Requested-With"] fd (Bigbuffer.length response_str) (fun fd -> Bigbuffer.to_fct response_str (fun s -> ignore(Unixext.really_write_string fd s))) - with - | (Api_errors.Server_error (err, params)) -> - Http_svr.response_str req ~hdrs:[ Http.Hdr.content_type, "text/xml" ] fd - (Xmlrpc.string_of_response (Rpc.failure (Rpc.Enum (List.map (fun s -> Rpc.String s) (err :: params))))) - | e -> - Backtrace.is_important e; - raise e + with + | (Api_errors.Server_error (err, params)) -> + Http_svr.response_str req ~hdrs:[ Http.Hdr.content_type, "text/xml" ] fd + (Xmlrpc.string_of_response (Rpc.failure (Rpc.Enum (List.map (fun s -> Rpc.String s) (err :: params))))) + | e -> + Backtrace.is_important e; + raise e (** HTML callback that dispatches an RPC and returns the response. *) @@ -190,22 +190,22 @@ let jsoncallback req bio _ = let body = Http_svr.read_body ~limit:Xapi_globs.http_limit_max_rpc_size req bio in debug "Here in jsoncallback"; try - debug "Got the jsonrpc body: %s" body; + debug "Got the jsonrpc body: %s" body; let rpc = Jsonrpc.call_of_string body in - debug "Got the jsonrpc body: %s" body; - let response = Jsonrpc.a_of_response - ~empty:Bigbuffer.make - ~append:(fun buf s -> Bigbuffer.append_substring buf s 0 (String.length s)) - (callback1 false req fd (Some body) rpc) in + debug "Got the jsonrpc body: %s" body; + let response = Jsonrpc.a_of_response + ~empty:Bigbuffer.make + ~append:(fun buf s -> Bigbuffer.append_substring buf s 0 (String.length s)) + (callback1 false req fd (Some body) rpc) in Http_svr.response_fct req ~hdrs:[ Http.Hdr.content_type, "application/json"; - "Access-Control-Allow-Origin", "*"; - "Access-Control-Allow-Headers", "X-Requested-With"] fd (Bigbuffer.length response) - (fun fd -> Bigbuffer.to_fct response (fun s -> ignore(Unixext.really_write_string fd s))) - with - | (Api_errors.Server_error (err, params)) -> - Http_svr.response_str req ~hdrs:[ Http.Hdr.content_type, "application/json" ] fd - (Jsonrpc.string_of_response (Rpc.failure (Rpc.Enum (List.map (fun s -> Rpc.String s) (err :: params))))) + "Access-Control-Allow-Origin", "*"; + "Access-Control-Allow-Headers", "X-Requested-With"] fd (Bigbuffer.length response) + (fun fd -> Bigbuffer.to_fct response (fun s -> ignore(Unixext.really_write_string fd s))) + with + | (Api_errors.Server_error (err, params)) -> + Http_svr.response_str req ~hdrs:[ Http.Hdr.content_type, "application/json" ] fd + (Jsonrpc.string_of_response (Rpc.failure (Rpc.Enum (List.map (fun s -> Rpc.String s) (err :: params))))) let options_callback req bio _ = - let fd = Buf_io.fd_of bio in - Http_svr.respond_to_options req fd + let fd = Buf_io.fd_of bio in + Http_svr.respond_to_options req fd diff --git a/ocaml/xapi/at_least_once_more.ml b/ocaml/xapi/at_least_once_more.ml index a31b3657268..1456ff88ce1 100644 --- a/ocaml/xapi/at_least_once_more.ml +++ b/ocaml/xapi/at_least_once_more.ml @@ -12,8 +12,8 @@ * GNU Lesser General Public License for more details. *) -(* A common requirement is to execute an idempotent operation in a background thread when 'something' changes but - to minimise the number of times we run the operation i.e. if a large set of changes happen we'd ideally like to +(* A common requirement is to execute an idempotent operation in a background thread when 'something' changes but + to minimise the number of times we run the operation i.e. if a large set of changes happen we'd ideally like to just execute the function once or twice but not once per thing that changed. *) open Stdext @@ -46,27 +46,27 @@ let make name f = { } (** Signal that 'something' has changed and so the operation needs re-executed. *) -let again (x: manager) = +let again (x: manager) = Mutex.execute x.m (fun () -> if x.in_progress then x.needs_doing_again <- true (* existing thread will go around the loop again *) else begin - (* no existing thread so we need to start one off *) - x.in_progress <- true; - x.needs_doing_again <- false; - let (_: Thread.t) = - Thread.create - (fun () -> - (* Always do the operation immediately: thread is only created when work needs doing *) - x.f (); - while Mutex.execute x.m - (fun () -> - if x.needs_doing_again - then (x.needs_doing_again <- false; true) (* another request came in while we were processing *) - else (x.in_progress <- false; false) (* no more requests: thread will shutdown *) - ) do - x. f() - done) () in - () + (* no existing thread so we need to start one off *) + x.in_progress <- true; + x.needs_doing_again <- false; + let (_: Thread.t) = + Thread.create + (fun () -> + (* Always do the operation immediately: thread is only created when work needs doing *) + x.f (); + while Mutex.execute x.m + (fun () -> + if x.needs_doing_again + then (x.needs_doing_again <- false; true) (* another request came in while we were processing *) + else (x.in_progress <- false; false) (* no more requests: thread will shutdown *) + ) do + x. f() + done) () in + () end) diff --git a/ocaml/xapi/at_least_once_more_test.ml b/ocaml/xapi/at_least_once_more_test.ml index 2275b29f5b9..ea920e61964 100644 --- a/ocaml/xapi/at_least_once_more_test.ml +++ b/ocaml/xapi/at_least_once_more_test.ml @@ -35,20 +35,20 @@ let keep_changing_inputs_m = Mutex.create () let ( +* ) = Int64.add let num_invocations = make 0L -let update_total () = +let update_total () = set num_invocations (get num_invocations +* 1L); let inputs' = List.map get inputs in set total (List.fold_left Int64.add 0L inputs') let need_to_recompute_total = At_least_once_more.make "recompute total" update_total -let background_thread_changing_input input = +let background_thread_changing_input input = while (Mutex.execute keep_changing_inputs_m (fun () -> !keep_changing_inputs)) do set input (get input +* 1L); At_least_once_more.again need_to_recompute_total; done - -let _ = + +let _ = (* Start background threads *) let threads = List.map (Thread.create background_thread_changing_input) inputs in (* Wait for a while *) diff --git a/ocaml/xapi/attach_helpers.ml b/ocaml/xapi/attach_helpers.ml index 9ad6bc809f4..ea5d4e2878b 100644 --- a/ocaml/xapi/attach_helpers.ml +++ b/ocaml/xapi/attach_helpers.ml @@ -19,15 +19,15 @@ open D let timeout = 300. (* 5 minutes, should never take this long *) -(** Attempt an unplug, and if it fails because the device is in use, wait for it to +(** Attempt an unplug, and if it fails because the device is in use, wait for it to detach by polling the currently-attached field. *) -let safe_unplug rpc session_id self = +let safe_unplug rpc session_id self = try Client.VBD.unplug rpc session_id self with - | Api_errors.Server_error(error, _) when error = Api_errors.device_already_detached -> - debug "safe_unplug caught DEVICE_ALREADY_DETACHED: this is safe to ignore" - | Api_errors.Server_error(error, _) as e when error = Api_errors.device_detach_rejected -> + | Api_errors.Server_error(error, _) when error = Api_errors.device_already_detached -> + debug "safe_unplug caught DEVICE_ALREADY_DETACHED: this is safe to ignore" + | Api_errors.Server_error(error, _) as e when error = Api_errors.device_detach_rejected -> debug "safe_unplug caught DEVICE_DETACH_REJECTED: polling the currently_attached flag of the VBD"; let start = Unix.gettimeofday () in let unplugged = ref false in @@ -41,62 +41,62 @@ let safe_unplug rpc session_id self = end (** For a VBD attached to a control domain, it may correspond to a running task - (if so the task will be linked via an other_config key) or it may be a qemu - frontend (if so it will be linked to another frontend) *) + (if so the task will be linked via an other_config key) or it may be a qemu + frontend (if so it will be linked to another frontend) *) let has_vbd_leaked __context vbd = - let other_config = Db.VBD.get_other_config ~__context ~self:vbd in - let device = Db.VBD.get_device ~__context ~self:vbd in - let has_task = List.mem_assoc Xapi_globs.vbd_task_key other_config in - let has_related = List.mem_assoc Xapi_globs.related_to_key other_config in - if not has_task && (not has_related) - then (info "Ignoring orphaned disk attached to control domain (device = %s)" device; false) - else begin - let has_valid_task = has_task && ( - let task_id = Ref.of_string (List.assoc Xapi_globs.vbd_task_key other_config) in - (* check if the task record still exists and is pending *) - try - let status = Db.Task.get_status ~__context ~self:task_id in - List.mem status [ `pending; `cancelling ] (* pending and cancelling => not leaked *) - with _ -> false (* task record gone *) - ) in - let has_valid_related = has_related && ( - let related = Ref.of_string (List.assoc Xapi_globs.related_to_key other_config) in - (* check if the VBD still exists and is currently_attached *) - try - Db.VBD.get_currently_attached ~__context ~self:related - with _ -> false (* VBD record gone *) - ) in - (* leaked if neither of the two keys are still valid *) - not has_valid_task && (not has_valid_related) - end + let other_config = Db.VBD.get_other_config ~__context ~self:vbd in + let device = Db.VBD.get_device ~__context ~self:vbd in + let has_task = List.mem_assoc Xapi_globs.vbd_task_key other_config in + let has_related = List.mem_assoc Xapi_globs.related_to_key other_config in + if not has_task && (not has_related) + then (info "Ignoring orphaned disk attached to control domain (device = %s)" device; false) + else begin + let has_valid_task = has_task && ( + let task_id = Ref.of_string (List.assoc Xapi_globs.vbd_task_key other_config) in + (* check if the task record still exists and is pending *) + try + let status = Db.Task.get_status ~__context ~self:task_id in + List.mem status [ `pending; `cancelling ] (* pending and cancelling => not leaked *) + with _ -> false (* task record gone *) + ) in + let has_valid_related = has_related && ( + let related = Ref.of_string (List.assoc Xapi_globs.related_to_key other_config) in + (* check if the VBD still exists and is currently_attached *) + try + Db.VBD.get_currently_attached ~__context ~self:related + with _ -> false (* VBD record gone *) + ) in + (* leaked if neither of the two keys are still valid *) + not has_valid_task && (not has_valid_related) + end (** Execute a function with a list of VBDs after attaching a bunch of VDIs to an vm *) -let with_vbds rpc session_id __context vm vdis mode f = +let with_vbds rpc session_id __context vm vdis mode f = let task_id = Context.get_task_id __context in let vbds = ref [] in finally (fun () -> List.iter (fun vdi -> - let vbd = Client.VBD.create ~rpc ~session_id ~vM:vm ~empty:false ~vDI:vdi - ~userdevice:"autodetect" ~bootable:false ~mode ~_type:`Disk ~unpluggable:true - ~qos_algorithm_type:"" ~qos_algorithm_params:[] - ~other_config:[ Xapi_globs.vbd_task_key, Ref.string_of task_id ] in - (* sanity-check *) - if has_vbd_leaked __context vbd - then error "Attach_helpers.with_vbds new VBD has leaked: %s" (Ref.string_of vbd); + let vbd = Client.VBD.create ~rpc ~session_id ~vM:vm ~empty:false ~vDI:vdi + ~userdevice:"autodetect" ~bootable:false ~mode ~_type:`Disk ~unpluggable:true + ~qos_algorithm_type:"" ~qos_algorithm_params:[] + ~other_config:[ Xapi_globs.vbd_task_key, Ref.string_of task_id ] in + (* sanity-check *) + if has_vbd_leaked __context vbd + then error "Attach_helpers.with_vbds new VBD has leaked: %s" (Ref.string_of vbd); - let vbd_uuid = Client.VBD.get_uuid ~rpc ~session_id ~self:vbd in - let uuid = Client.VM.get_uuid ~rpc ~session_id ~self:vm in - debug "created VBD (uuid %s); attempting to hotplug to VM (uuid: %s)" vbd_uuid uuid; - vbds := vbd :: !vbds; - Client.VBD.plug rpc session_id vbd - ) vdis; + let vbd_uuid = Client.VBD.get_uuid ~rpc ~session_id ~self:vbd in + let uuid = Client.VM.get_uuid ~rpc ~session_id ~self:vm in + debug "created VBD (uuid %s); attempting to hotplug to VM (uuid: %s)" vbd_uuid uuid; + vbds := vbd :: !vbds; + Client.VBD.plug rpc session_id vbd + ) vdis; vbds := List.rev !vbds; f !vbds) (fun () -> - (* Use a new session here to cover the case where the session has become invalid *) - Helpers.call_api_functions ~__context (fun rpc session_id -> - List.iter (Helpers.log_exn_continue "unplugging disk from VM" - (fun self -> safe_unplug rpc session_id self)) !vbds; - List.iter (Helpers.log_exn_continue "destroying VBD on VM" - (fun self -> Client.VBD.destroy rpc session_id self)) !vbds)) + (* Use a new session here to cover the case where the session has become invalid *) + Helpers.call_api_functions ~__context (fun rpc session_id -> + List.iter (Helpers.log_exn_continue "unplugging disk from VM" + (fun self -> safe_unplug rpc session_id self)) !vbds; + List.iter (Helpers.log_exn_continue "destroying VBD on VM" + (fun self -> Client.VBD.destroy rpc session_id self)) !vbds)) diff --git a/ocaml/xapi/audit_log.ml b/ocaml/xapi/audit_log.ml index 367c3b7f1ae..00e92cde35f 100644 --- a/ocaml/xapi/audit_log.ml +++ b/ocaml/xapi/audit_log.ml @@ -24,106 +24,106 @@ let audit_log_whitelist_prefix = "/var/log/audit.log" let line_timestamp_length = 21 (* the timestamp length at the debug line *) (* location of [ at the beginning of the line timestamp *) -let timestamp_index line = - try ((String.index line '[') + 1) with Not_found -> 0 +let timestamp_index line = + try ((String.index line '[') + 1) with Not_found -> 0 let went_through ?filter line = - match filter with - |None->true - |Some fs-> - List.fold_left - (fun acc f->acc&&(String.has_substr line f)) - true - fs + match filter with + |None->true + |Some fs-> + List.fold_left + (fun acc f->acc&&(String.has_substr line f)) + true + fs let write_line line fd ?filter since = - if String.length line > - (line_timestamp_length + (timestamp_index line)) - then - let line_timestamp = - String.sub line (timestamp_index line) line_timestamp_length - in - if since="" || ((String.compare line_timestamp since) >= 0) - then - if went_through ?filter line + if String.length line > + (line_timestamp_length + (timestamp_index line)) then - let len = String.length line in - ignore(Unix.write fd line 0 len) + let line_timestamp = + String.sub line (timestamp_index line) line_timestamp_length + in + if since="" || ((String.compare line_timestamp since) >= 0) + then + if went_through ?filter line + then + let len = String.length line in + ignore(Unix.write fd line 0 len) let transfer_audit_file _path compression fd_out ?filter since : unit = - let path = Unixext.resolve_dot_and_dotdot _path in - let in_whitelist = (String.startswith audit_log_whitelist_prefix path) in - if in_whitelist then - let file_exists = (Unixext.file_exists path) in - if file_exists then - begin - debug "transfer_audit_file path=%s,compression=[%s],since=%s" path compression since; - try - if compression="" (* uncompressed *) - then begin - Unixext.readfile_line - (fun line -> write_line (line^"\n") fd_out ?filter since) - path - end - else if compression="gz" - then ( - Unixext.with_file path [ Unix.O_RDONLY ] 0o0 - (fun gz_fd_in -> - Gzip.decompress_passive gz_fd_in - (fun fd_in -> (*fd_in is closed by gzip module*) - let cin = Unix.in_channel_of_descr fd_in in - try - while true do - let line = input_line cin in - write_line (line^"\n") fd_out ?filter since - done - with End_of_file -> () (* ok, expected *) - ) - ) - ) - else ( - (* nothing to do with an unknown file format *) - debug "unknown compression format %s in audit log file %s" compression path - ) - with e -> begin - debug "error reading audit log file %s: %s" path (ExnHelper.string_of_exn e); - raise e - end - end + let path = Unixext.resolve_dot_and_dotdot _path in + let in_whitelist = (String.startswith audit_log_whitelist_prefix path) in + if in_whitelist then + let file_exists = (Unixext.file_exists path) in + if file_exists then + begin + debug "transfer_audit_file path=%s,compression=[%s],since=%s" path compression since; + try + if compression="" (* uncompressed *) + then begin + Unixext.readfile_line + (fun line -> write_line (line^"\n") fd_out ?filter since) + path + end + else if compression="gz" + then ( + Unixext.with_file path [ Unix.O_RDONLY ] 0o0 + (fun gz_fd_in -> + Gzip.decompress_passive gz_fd_in + (fun fd_in -> (*fd_in is closed by gzip module*) + let cin = Unix.in_channel_of_descr fd_in in + try + while true do + let line = input_line cin in + write_line (line^"\n") fd_out ?filter since + done + with End_of_file -> () (* ok, expected *) + ) + ) + ) + else ( + (* nothing to do with an unknown file format *) + debug "unknown compression format %s in audit log file %s" compression path + ) + with e -> begin + debug "error reading audit log file %s: %s" path (ExnHelper.string_of_exn e); + raise e + end + end let transfer_all_audit_files fd_out ?filter since = - let atransfer _infix _suffix = - let infix = if _infix="" then "" else "."^_infix in - let suffix = if _suffix="" then "" else "."^_suffix in - transfer_audit_file - (audit_log_whitelist_prefix^infix^suffix) - _suffix - fd_out - ?filter - since - in - let atransfer_try_gz infix = - ignore_exn (fun ()->atransfer infix "gz");(* try the compressed file *) - ignore_exn (fun ()->atransfer infix "") (* then the uncompressed one *) - in - (* go through audit.log.n->0 first, ascending order of time *) - for i=100 downto 0 do - atransfer_try_gz (string_of_int i) - done; - (* finally transfer /var/log/audit.log (the latest one in time) *) - atransfer_try_gz "" + let atransfer _infix _suffix = + let infix = if _infix="" then "" else "."^_infix in + let suffix = if _suffix="" then "" else "."^_suffix in + transfer_audit_file + (audit_log_whitelist_prefix^infix^suffix) + _suffix + fd_out + ?filter + since + in + let atransfer_try_gz infix = + ignore_exn (fun ()->atransfer infix "gz");(* try the compressed file *) + ignore_exn (fun ()->atransfer infix "") (* then the uncompressed one *) + in + (* go through audit.log.n->0 first, ascending order of time *) + for i=100 downto 0 do + atransfer_try_gz (string_of_int i) + done; + (* finally transfer /var/log/audit.log (the latest one in time) *) + atransfer_try_gz "" (* map the ISO8601 timestamp format into the one in our logs *) let log_timestamp_of_iso8601 iso8601_timestamp = - let step1 = iso8601_timestamp in - let step2 = Xstringext.String.replace "-" "" step1 in - let step3 = Xstringext.String.replace "Z" "" step2 in - step3 + let step1 = iso8601_timestamp in + let step2 = Xstringext.String.replace "-" "" step1 in + let step3 = Xstringext.String.replace "Z" "" step2 in + step3 (* Assume that RBAC access for the session_id already verified by xapi_http.ml - + GET /audit_log?session_id=&task_id=& [since=] @@ -136,24 +136,24 @@ let log_timestamp_of_iso8601 iso8601_timestamp = *) let handler (req: Request.t) (bio: Buf_io.t) _ = - let s = Buf_io.fd_of bio in - Buf_io.assert_buffer_empty bio; - req.Request.close <- true; - - Xapi_http.with_context (* makes sure to signal task-completed to cli *) - (Printf.sprintf "audit_log_get request") - req - s - (fun __context -> - - let all = req.Request.cookie @ req.Request.query in - let since_iso8601 = - if List.mem_assoc "since" all then List.assoc "since" all else "" - in - let since = log_timestamp_of_iso8601 since_iso8601 in - (*debug "since=[%s]" since;*) - (* we need to return an http header without content-length *) - Http_svr.headers s (http_200_ok() @ [ Http.Hdr.content_type ^": text/plain"]); - (* then the contents *) - transfer_all_audit_files s since - ) + let s = Buf_io.fd_of bio in + Buf_io.assert_buffer_empty bio; + req.Request.close <- true; + + Xapi_http.with_context (* makes sure to signal task-completed to cli *) + (Printf.sprintf "audit_log_get request") + req + s + (fun __context -> + + let all = req.Request.cookie @ req.Request.query in + let since_iso8601 = + if List.mem_assoc "since" all then List.assoc "since" all else "" + in + let since = log_timestamp_of_iso8601 since_iso8601 in + (*debug "since=[%s]" since;*) + (* we need to return an http header without content-length *) + Http_svr.headers s (http_200_ok() @ [ Http.Hdr.content_type ^": text/plain"]); + (* then the contents *) + transfer_all_audit_files s since + ) diff --git a/ocaml/xapi/balloon.ml b/ocaml/xapi/balloon.ml index d7ff314b755..7104aeb106f 100644 --- a/ocaml/xapi/balloon.ml +++ b/ocaml/xapi/balloon.ml @@ -30,41 +30,41 @@ let _high_mem_balloon = "info/high_kb" (** Reads /proc/xen/balloon into a string * int64 option association list *) let parse_proc_xen_balloon () = - let keys = [ - _current_allocation; - _requested_target; - _low_mem_balloon; - _high_mem_balloon] in - List.map (fun key -> - let s = (Unixext.string_of_file (sysfs_stem ^ key)) in - let stripped = Xstringext.String.strip Xstringext.String.isspace s in - (key, Some (Int64.of_string stripped))) keys - + let keys = [ + _current_allocation; + _requested_target; + _low_mem_balloon; + _high_mem_balloon] in + List.map (fun key -> + let s = (Unixext.string_of_file (sysfs_stem ^ key)) in + let stripped = Xstringext.String.strip Xstringext.String.isspace s in + (key, Some (Int64.of_string stripped))) keys + let _proc_meminfo = "/proc/meminfo" - + let parse_meminfo () = - let ic = open_in _proc_meminfo in - finally - (fun () -> - let table = ref [] in - begin - try - while true do - let line = input_line ic in - match Xstringext.String.split ' ' line with - | key :: value :: "kB" :: [] -> - table := (key, Int64.(mul (of_string value) 1024L)) :: !table - | _ -> () - done - with End_of_file -> () - end; - !table - ) (fun () -> close_in ic) + let ic = open_in _proc_meminfo in + finally + (fun () -> + let table = ref [] in + begin + try + while true do + let line = input_line ic in + match Xstringext.String.split ' ' line with + | key :: value :: "kB" :: [] -> + table := (key, Int64.(mul (of_string value) 1024L)) :: !table + | _ -> () + done + with End_of_file -> () + end; + !table + ) (fun () -> close_in ic) let _memtotal = "MemTotal:" let get_memtotal () = - let table = parse_meminfo () in - if List.mem_assoc _memtotal table - then Some (List.assoc _memtotal table) - else None + let table = parse_meminfo () in + if List.mem_assoc _memtotal table + then Some (List.assoc _memtotal table) + else None diff --git a/ocaml/xapi/binpack.ml b/ocaml/xapi/binpack.ml index 314c7928be4..d2d9d0d337c 100644 --- a/ocaml/xapi/binpack.ml +++ b/ocaml/xapi/binpack.ml @@ -13,39 +13,39 @@ *) (* Used to sort pairs into descending order of their second component *) -let less_than (_, a) (_, b) = compare b a +let less_than (_, a) (_, b) = compare b a let rec insert compare elt sorted_list = match sorted_list with | [] -> [ elt ] | x :: xs -> if compare elt x <= 0 then elt :: x :: xs else x :: (insert compare elt xs) -let biggest_fit_decreasing (things: ('a * int64) list) (bins: ('b * int64) list) : ('a * 'b) list = +let biggest_fit_decreasing (things: ('a * int64) list) (bins: ('b * int64) list) : ('a * 'b) list = let things' = List.sort less_than things and bins' = List.sort less_than bins in - + (* Walk through the things allocating them to bins. We keep the bins sorted into biggest first. *) let initial = [], bins' in (* no things allocated, all bins full and sorted *) let allocate_one (mapping, bins) (thing_id, thing_size) = match bins with | [] -> mapping, bins (* nowhere to put it *) | (first_bin_id, first_bin_size) :: rest -> - let remaining = Int64.sub first_bin_size thing_size in - if remaining < 0L - then (mapping, bins) (* leave it out *) - else - (* Allocate the thing to this bin, subtract from bin size and resort *) - let bins = insert less_than (first_bin_id, remaining) rest in - (thing_id, first_bin_id) :: mapping, bins in + let remaining = Int64.sub first_bin_size thing_size in + if remaining < 0L + then (mapping, bins) (* leave it out *) + else + (* Allocate the thing to this bin, subtract from bin size and resort *) + let bins = insert less_than (first_bin_id, remaining) rest in + (thing_id, first_bin_id) :: mapping, bins in (* Only return the mapping: we aren't interested in the remaining free space *) fst(List.fold_left allocate_one initial things') (* Simple hashtbl-based function memoiser *) -let memoise f = +let memoise f = let table = Hashtbl.create 10 in - let rec lookup x = + let rec lookup x = if Hashtbl.mem table x then Hashtbl.find table x else let result = f lookup x in - Hashtbl.add table x result; - result in + Hashtbl.add table x result; + result in lookup (** Raised when an int64 addition overflows (positive numbers only) *) @@ -54,15 +54,15 @@ let ( +* ) a b = let result = Int64.add a b in if result < 0L then raise Overflo let ( ** ) a b = let result = Int64.mul a b in if result < 0L then raise Overflow else result (** Compute nCr (the binomial coefficient) by dynamic programming. Raises Overflow if the result is too big for an int64 (eg 68 C 34) *) -let binomial n r = - let choose lookup (n, r) = +let binomial n r = + let choose lookup (n, r) = if r = 0 || r = n then 1L else (lookup (n - 1, r - 1)) +* (lookup (n - 1, r)) in memoise choose (n, r) (** Return all sublists of length 'n' from list 'l'. Returns a list of length (binomial (List.length l) n) *) -let choose l n = +let choose l n = let choose' lookup (l, n) = match l, n with | _, 0 -> [ [] ] | [], _ -> [] @@ -70,7 +70,7 @@ let choose l n = memoise choose' (l, n) (** Return all permutations of a list *) -let rec permutations : 'a list -> 'a list list = +let rec permutations : 'a list -> 'a list list = let rotate n xs = let a, b = Stdext.Listext.List.chop n xs in b @ a in let insert_at n x xs = rotate (List.length xs - n + 1) (x :: (rotate n xs)) in let mkints_exclusive n = Stdext.Range.to_list (Stdext.Range.make 0 n) in @@ -90,38 +90,38 @@ type ('a, 'b) configuration = { num_failures: int; (** number of failures to tolerate 'r' *) } -let check_configuration config = +let check_configuration config = (* All hosts and VMs in placement should be in the hosts and vms list *) - List.iter (fun (vm, host) -> - if not(List.mem_assoc vm config.vms) then failwith "VM not found"; - if not(List.mem_assoc host config.hosts) then failwith "Host not found" - ) config.placement; + List.iter (fun (vm, host) -> + if not(List.mem_assoc vm config.vms) then failwith "VM not found"; + if not(List.mem_assoc host config.hosts) then failwith "Host not found" + ) config.placement; (* num_failures needs to be <= the total number of hosts *) if config.num_failures > config.total_hosts then failwith "num_failures > total_hosts"; if config.num_failures < 0 then failwith "num_failures < 0" -let string_of_configuration string_of_a string_of_b c = - let semicolon x = String.concat "; " x in - let comma (a, b) = Printf.sprintf "%s, %s" a b in - let map f_a f_b (a, b) = (f_a a, f_b b) in - let int64 = Int64.to_string in - Printf.sprintf "{ total_hosts = %d; num_failures = %d; hosts = [ %s ]; vms = [ %s ]; placement = [ %s ] }" - c.total_hosts c.num_failures - (semicolon (List.map comma (List.map (map string_of_a int64) c.hosts))) - (semicolon (List.map comma (List.map (map string_of_b int64) c.vms))) - (semicolon (List.map comma (List.map (map string_of_b string_of_a) c.placement))) +let string_of_configuration string_of_a string_of_b c = + let semicolon x = String.concat "; " x in + let comma (a, b) = Printf.sprintf "%s, %s" a b in + let map f_a f_b (a, b) = (f_a a, f_b b) in + let int64 = Int64.to_string in + Printf.sprintf "{ total_hosts = %d; num_failures = %d; hosts = [ %s ]; vms = [ %s ]; placement = [ %s ] }" + c.total_hosts c.num_failures + (semicolon (List.map comma (List.map (map string_of_a int64) c.hosts))) + (semicolon (List.map comma (List.map (map string_of_b int64) c.vms))) + (semicolon (List.map comma (List.map (map string_of_b string_of_a) c.placement))) let assoc errmsg x xs = try List.assoc x xs with Not_found -> failwith ("Not_found: " ^ errmsg) (* Allocate the VMs in plan to hosts, return the new host free memory *) -let account hosts vms plan = - let memory_needed_on_host h = +let account hosts vms plan = + let memory_needed_on_host h = let memory_needed = List.map (fun (vm, host) -> if h = host then assoc "memory_needed_on_host" vm vms else 0L) plan in List.fold_left Int64.add 0L memory_needed in - List.map (fun (host, memory) -> host, Int64.sub memory (memory_needed_on_host host)) hosts + List.map (fun (host, memory) -> host, Int64.sub memory (memory_needed_on_host host)) hosts (* Given a configuration and a plan, return the new configuration with the plan applied *) -let apply_plan config plan = +let apply_plan config plan = let hosts = account config.hosts config.vms plan in (* compute the VM -> host mappings which are unchanged *) let untouched = List.filter (fun (vm, host) -> not(List.mem_assoc vm plan)) config.placement in @@ -135,32 +135,32 @@ type ('a, 'b) heuristic = { } (** Return a list of failed VMs given a set of dead Hosts *) -let get_failed_vms config dead_hosts = - List.map fst (List.filter (fun (vm, host) -> List.mem host dead_hosts) config.placement) +let get_failed_vms config dead_hosts = + List.map fst (List.filter (fun (vm, host) -> List.mem host dead_hosts) config.placement) (** Given a configuration and a set of failed VMs, return a map of failed VM -> new Host *) -let pack_failed_vms_onto_live_hosts (config: ('a, 'b) configuration) (failed_vms: 'b list) : ('b * 'a) list = +let pack_failed_vms_onto_live_hosts (config: ('a, 'b) configuration) (failed_vms: 'b list) : ('b * 'a) list = (* pack failed VMs ... *) let things = List.map (fun vm -> vm, assoc "pack_failed_vms_onto_live_hosts" vm config.vms) failed_vms in (* ... into remaining hosts *) let bins = config.hosts in (* NB plan may omit some VMs if they don't fit anywhere *) biggest_fit_decreasing things bins - + (** Internal exception used to fast-track planning failures *) exception Stop (** A plan is trivially never possible if there aren't enough hosts for future failures, irrespective of VM size *) -let plan_trivially_never_possible config = - let hosts = List.map fst config.hosts in - false (* indent *) - (* If there are fewer hosts than config.num_failures then no plan is ever possible *) - || (List.length hosts < config.num_failures) - (* If there are exactly config.num_failures hosts and any VMs to protect then no plan is ever possible *) - || (List.length hosts = config.num_failures && config.vms <> []) +let plan_trivially_never_possible config = + let hosts = List.map fst config.hosts in + false (* indent *) + (* If there are fewer hosts than config.num_failures then no plan is ever possible *) + || (List.length hosts < config.num_failures) + (* If there are exactly config.num_failures hosts and any VMs to protect then no plan is ever possible *) + || (List.length hosts = config.num_failures && config.vms <> []) (* Return the state of the world after we generate and follow a failover plan for one host *) -let simulate_failure config dead_host = +let simulate_failure config dead_host = let failed_vms = get_failed_vms config [ dead_host ] in let config = { config with hosts = List.filter (fun (h, _) -> h <> dead_host) config.hosts } in let plan = pack_failed_vms_onto_live_hosts config failed_vms in @@ -169,7 +169,7 @@ let simulate_failure config dead_host = (* Return a new configuration with the host memory and VM placement adjusted *) let hosts = account config.hosts config.vms plan in let placement = List.map (fun (vm, host) -> vm, (if List.mem_assoc vm plan then assoc "simulate_failure" vm plan else host)) config.placement in - { config with hosts = hosts; placement = placement; num_failures = config.num_failures - 1 } + { config with hosts = hosts; placement = placement; num_failures = config.num_failures - 1 } (** For the nCr binpack strategy return true if a plan is always possible *) let plan_always_possible config = @@ -178,22 +178,22 @@ let plan_always_possible config = let hosts = List.map fst config.hosts in (* For every nCr combination of r host failures, check that we can generate a plan for them happening in any order. *) - List.iter + List.iter (fun combination -> - List.iter - (fun permutation -> - let (_: ('a, 'b) configuration) = List.fold_left simulate_failure config permutation in () - ) (permutations combination) + List.iter + (fun permutation -> + let (_: ('a, 'b) configuration) = List.fold_left simulate_failure config permutation in () + ) (permutations combination) ) (choose hosts config.num_failures); true with Stop -> false - + let bin_pack_every_combination = { name = "exhaustively binpack every host failure combination (expensive)"; plan_always_possible = plan_always_possible; - get_specific_plan = + get_specific_plan = (fun config failed_vms -> List.iter (fun vm -> ignore(assoc "bin_pack_every_combination/get_specific_plan" vm config.vms)) failed_vms; (* config.hosts contains only live hosts *) @@ -217,11 +217,11 @@ let rec mkints = function 1. every VM that fails is as big as the biggest protected VM 2. the number of VMs which fail is always the maximum possible (even if these are all very small VMs) 3. the largest hosts fail - If we can find a failover plan then all real failures will be "easier" to deal with; failed hosts and VMs will + If we can find a failover plan then all real failures will be "easier" to deal with; failed hosts and VMs will be smaller (or equal to) in both number and size. *) (** Return the maximum number of VMs that could fail due to one host failures *) -let largest_resident_vms config = +let largest_resident_vms config = let num_vms_per_host = List.map (fun (host, _) -> List.length (List.filter (fun (vm, h) -> h = host) config.placement)) config.hosts in List.fold_left max 0 num_vms_per_host @@ -233,7 +233,7 @@ let approximate_config config = (* Return a config which has all these VMs on it *) (* Identify VMs by (host, index) in the abstract simulation *) let vm_ids = List.concat (List.map (fun host -> List.map (fun idx -> host, idx) (mkints number_vms)) (List.map fst config.hosts)) in - { + { hosts = config.hosts; (* host free memory is unapproximated *) vms = List.map (fun vm -> vm, vm_size) vm_ids; placement = List.map (fun ((host, idx) as vm) -> vm, host) vm_ids; @@ -244,25 +244,25 @@ let approximate_config config = let approximate_bin_pack = { name = "bin pack a worst-case scenario with conservative assumptions"; - plan_always_possible = + plan_always_possible = (fun config -> try - if plan_trivially_never_possible config then raise Stop; - (* Return the state of the world after we generate and follow a failover plan for the biggest host that - could fail. Raises 'Stop' if a plan could not be found. *) - let simulate_worst_single_failure config = - (* Assume the biggest host fails *) - let biggest_host = fst (List.hd (List.sort less_than config.hosts)) in - approximate_config (simulate_failure config biggest_host) in - - let initial_config = approximate_config config in - - (* Simulate the n worst failures *) - ignore (List.fold_left (fun config _ -> simulate_worst_single_failure config) initial_config (mkints initial_config.num_failures)); - true + if plan_trivially_never_possible config then raise Stop; + (* Return the state of the world after we generate and follow a failover plan for the biggest host that + could fail. Raises 'Stop' if a plan could not be found. *) + let simulate_worst_single_failure config = + (* Assume the biggest host fails *) + let biggest_host = fst (List.hd (List.sort less_than config.hosts)) in + approximate_config (simulate_failure config biggest_host) in + + let initial_config = approximate_config config in + + (* Simulate the n worst failures *) + ignore (List.fold_left (fun config _ -> simulate_worst_single_failure config) initial_config (mkints initial_config.num_failures)); + true with Stop -> false ); - get_specific_plan = + get_specific_plan = (fun config failed_vms -> (* Make sure we know the VM sizes *) List.iter (fun vm -> ignore(assoc "approximate_bin_pack/get_specific_plan" vm config.vms)) failed_vms; @@ -270,31 +270,31 @@ let approximate_bin_pack = { (* Guaranteed to always work if plan_always_possible returned true *) pack_failed_vms_onto_live_hosts config failed_vms ); -} +} -let all_heuristics = [ +let all_heuristics = [ bin_pack_every_combination; approximate_bin_pack; ] -let choose_heuristic config = +let choose_heuristic config = (* If the number of combinations to check is small, perform all possible bin-packings: this will produce good solutions for small pool sizes. For larger pools we switch back to a less expensive heuristic. *) let n = config.total_hosts in let r = config.num_failures in - if n > 32 || Xapi_fist.choose_approximate_planner () then approximate_bin_pack + if n > 32 || Xapi_fist.choose_approximate_planner () then approximate_bin_pack else begin try if binomial n r ** (factorial r) < 3500L then bin_pack_every_combination else approximate_bin_pack with Overflow -> approximate_bin_pack end -let plan_for_n_failures config = +let plan_for_n_failures config = let h = choose_heuristic config in Printf.printf "Chosen heuristic: %s\n" h.name; h.plan_always_possible config - + diff --git a/ocaml/xapi/binpack_test.ml b/ocaml/xapi/binpack_test.ml index a9ac361f421..23e29455fa8 100644 --- a/ocaml/xapi/binpack_test.ml +++ b/ocaml/xapi/binpack_test.ml @@ -15,18 +15,18 @@ open Binpack -let time f = +let time f = let start = Unix.gettimeofday () in let result = f () in let time = Unix.gettimeofday () -. start in Printf.printf "result: %Ld time taken: %.2f\n" result time - + (* Return a table of hosts or VMs *) let make_thing base extra n = List.map (fun x -> x, Int64.add base (Random.int64 extra)) (mkints n) - + let choose_one list = List.nth list (Random.int (List.length list)) - -let make_config host_num host_base host_extra vm_num vm_base vm_extra num_failures = + +let make_config host_num host_base host_extra vm_num vm_base vm_extra num_failures = let hosts = make_thing host_base host_extra host_num in let vms = make_thing vm_base vm_extra vm_num in let placement = List.map (fun (vm, _) -> vm, fst (choose_one hosts)) vms in @@ -38,7 +38,7 @@ let make_config host_num host_base host_extra vm_num vm_base vm_extra num_failur 1. no host is overcommitted (free memory after new VMs are subtracted >= 0) 2. every VM running on the dead hosts is mentioned in the plan *) (* Return true if the hosts have enough free memory to run the VMs in the plan *) -let check_plan config dead_hosts plan = +let check_plan config dead_hosts plan = let memory_remaining = account config.hosts config.vms plan in (* List.iter (fun mem -> Printf.printf "%Ld\n" mem) free; *) (* No host should be overcommitted: *) @@ -46,14 +46,14 @@ let check_plan config dead_hosts plan = (* All failed VMs should be restarted: *) let failed_vms = get_failed_vms config dead_hosts in if List.length failed_vms > (List.length plan) then failwith "bad plan" - + (* Convince ourselves that a plan is always possible (call if plan_always_possible returns true) by searching for a counterexample. Returns true -- definitely OK (exhaustive search failed to find any bad plans) Returns false -- maybe OK (too many for exhaustive search, didn't find any bad plans) Throws (Failure "bad plan") -- definitely bad - *) -let prove_plan_is_possible_via_counterexample_search (h: (int, int) Binpack.heuristic) config = +*) +let prove_plan_is_possible_via_counterexample_search (h: (int, int) Binpack.heuristic) config = (* If a small number of combinations then try each one. Otherwise try a bunch at random *) let limit = 10000L in let num_hosts = List.length config.hosts in @@ -61,25 +61,25 @@ let prove_plan_is_possible_via_counterexample_search (h: (int, int) Binpack.heur let combinations_to_try, exhaustive = if total_combinations < limit then choose (List.map fst config.hosts) config.num_failures, true - else List.map (fun _ -> - let num_failures = Random.int config.num_failures in - (* choose 'num_failures' elements at random *) - let alive, dead = List.fold_left - (fun (remaining, sofar) _ -> - if List.length sofar = num_failures - then (remaining, sofar) - else begin - let host = choose_one remaining in - List.filter (fun x -> x <> host) remaining, host :: sofar - end) - (List.map fst config.hosts, []) (mkints num_failures) in - dead) (mkints (Int64.to_int limit)), false in + else List.map (fun _ -> + let num_failures = Random.int config.num_failures in + (* choose 'num_failures' elements at random *) + let alive, dead = List.fold_left + (fun (remaining, sofar) _ -> + if List.length sofar = num_failures + then (remaining, sofar) + else begin + let host = choose_one remaining in + List.filter (fun x -> x <> host) remaining, host :: sofar + end) + (List.map fst config.hosts, []) (mkints num_failures) in + dead) (mkints (Int64.to_int limit)), false in Printf.printf "Trying %d (out of %Ld) combinations %s\n" (List.length combinations_to_try) total_combinations (if exhaustive then "(EXHAUSTIVE)" else ""); List.iter (fun dead_hosts -> let failed_vms = get_failed_vms config dead_hosts in let config = { config with hosts = List.filter (fun (x, _) -> not(List.mem x dead_hosts)) config.hosts } in -(* +(* Printf.printf "Config = %s\n" (string_of_configuration string_of_int string_of_int config); Printf.printf " Dead hosts = [ %s ]; failed VMs = [ %s ]\n" (String.concat ";" (List.map string_of_int dead_hosts)) (String.concat ";" (List.map string_of_int failed_vms)); *) @@ -92,9 +92,9 @@ let prove_plan_is_possible_via_counterexample_search (h: (int, int) Binpack.heur exhaustive (* Negative tests -- make sure the planner fails in obviously impossible situations *) -let try_impossible_cases () = +let try_impossible_cases () = Printf.printf "Trying impossible cases\n"; - + (* Make sure an obviously bad plan is detected by the 'check_plan' fn *) Printf.printf "Making sure an obviously bad plan is detected by the 'check_plan' fn: "; let hosts = [ 0, 1L; 1, 1L ] (* two hosts, 1 unit free each *) @@ -118,18 +118,18 @@ let try_impossible_cases () = let placement = List.combine (List.map fst vms) (List.map fst hosts) in let config = { hosts = hosts; vms = vms; placement = placement; total_hosts = List.length hosts; num_failures = 5 } in List.iter (fun h -> - Printf.printf "Trying heuristic: %s\n" h.name; - Printf.printf "* checking plan_always_possible = false\n"; - if h.plan_always_possible config then failwith "plan_always_possible shouldn't return true"; - try - Printf.printf "* checking 'check_plan_always_possible' agrees\n"; - if prove_plan_is_possible_via_counterexample_search h config - then failwith "prove_plan_is_possible_via_counterexample_search performed exhaustive search and found no counterexample" - else Printf.printf "WARNING: failed to find a counterexample; not sure if plan is ok or not\n" - with Failure "bad plan" -> Printf.printf "Found a counterexample: no plan is possible\n") all_heuristics - + Printf.printf "Trying heuristic: %s\n" h.name; + Printf.printf "* checking plan_always_possible = false\n"; + if h.plan_always_possible config then failwith "plan_always_possible shouldn't return true"; + try + Printf.printf "* checking 'check_plan_always_possible' agrees\n"; + if prove_plan_is_possible_via_counterexample_search h config + then failwith "prove_plan_is_possible_via_counterexample_search performed exhaustive search and found no counterexample" + else Printf.printf "WARNING: failed to find a counterexample; not sure if plan is ok or not\n" + with Failure "bad plan" -> Printf.printf "Found a counterexample: no plan is possible\n") all_heuristics + (* Positive test -- make sure the planner succeeds in easy cases *) -let try_possible_cases () = +let try_possible_cases () = Printf.printf "Trying possible cases\n"; let c = make_config 10 500L 1000L 5 256L 1L 3 in let h = choose_heuristic c in @@ -139,10 +139,10 @@ let try_possible_cases () = if prove_plan_is_possible_via_counterexample_search h c then Printf.printf "Proved that plan is always possible\n" else Printf.printf "Failed to prove that plan is always possible -- might be ok still\n" - + let int_of_heuristic h = if h.name = approximate_bin_pack.name then 0 else 1 -let check_planning_performance filename n' r' i = +let check_planning_performance filename n' r' i = let file = open_out filename in (* Printf.printf "Checking performance of planner\n"; *) let successes = Array.make (n' * r') 0 in @@ -154,23 +154,23 @@ let check_planning_performance filename n' r' i = for attempts = 1 to i do for n = 1 to n' do for r = 1 to r' do - if r < n then begin - let c = make_config n 8000L 4000L (16 * n) 500L 250L r in - let h = choose_heuristic c in - let start = Unix.gettimeofday () in - let always = h.plan_always_possible c in - (* If it should always be possible then look for a proof. Don't fail if we can't find one; only fail if we find - a counterexample showing it doesn't work *) - if always then ignore(prove_plan_is_possible_via_counterexample_search h c); - let time = Unix.gettimeofday () -. start in - if always then set successes n r (get successes n r + 1); - set max_time n r (max (get max_time n r) time); - (* Assumes heuristic choice is a function of n and r only *) - set heuristic n r (int_of_heuristic h); - - Printf.fprintf stderr "%d %d %d %d %.2f\n" n r (get heuristic n r) (get successes n r) (get max_time n r); flush stderr; - - end + if r < n then begin + let c = make_config n 8000L 4000L (16 * n) 500L 250L r in + let h = choose_heuristic c in + let start = Unix.gettimeofday () in + let always = h.plan_always_possible c in + (* If it should always be possible then look for a proof. Don't fail if we can't find one; only fail if we find + a counterexample showing it doesn't work *) + if always then ignore(prove_plan_is_possible_via_counterexample_search h c); + let time = Unix.gettimeofday () -. start in + if always then set successes n r (get successes n r + 1); + set max_time n r (max (get max_time n r) time); + (* Assumes heuristic choice is a function of n and r only *) + set heuristic n r (int_of_heuristic h); + + Printf.fprintf stderr "%d %d %d %d %.2f\n" n r (get heuristic n r) (get successes n r) (get max_time n r); flush stderr; + + end done done done; @@ -180,18 +180,18 @@ let check_planning_performance filename n' r' i = done done; close_out file - -let _ = + +let _ = let graph = ref "" in let graph_n = ref 64 and graph_r = ref 64 and graph_i = ref 1 in Arg.parse [ "-graph", Arg.Set_string graph, "Run performance tests and write graph output to file specified"; - "-graph_n", Arg.Set_int graph_n, "Set the maximum N value for the performance tests (eg total hosts)"; - "-graph_r", Arg.Set_int graph_r, "Set the maximum R value for the performance tests (eg host failures to simulate)"; - "-graph_i", Arg.Set_int graph_i, "Set the number of iterations to run the performance tests over" ] + "-graph_n", Arg.Set_int graph_n, "Set the maximum N value for the performance tests (eg total hosts)"; + "-graph_r", Arg.Set_int graph_r, "Set the maximum R value for the performance tests (eg host failures to simulate)"; + "-graph_i", Arg.Set_int graph_i, "Set the number of iterations to run the performance tests over" ] (fun x -> Printf.fprintf stderr "Skipping unknown argument: %s" x) "Run unit and optional performance tests on the binpacker"; - + try_impossible_cases (); try_possible_cases (); diff --git a/ocaml/xapi/bios_strings.ml b/ocaml/xapi/bios_strings.ml index 3ea9c0f58d6..06abab265c7 100644 --- a/ocaml/xapi/bios_strings.ml +++ b/ocaml/xapi/bios_strings.ml @@ -18,82 +18,82 @@ open Stdext.Xstringext let dmidecode_prog = "/usr/sbin/dmidecode" let remove_invisible str = - let l = String.split '\n' str in - let l = List.filter (fun s -> not (String.startswith "#" s)) l in - let str = String.concat "" l in - String.fold_left (fun s c -> if c >= ' ' && c <= '~' then s ^ (String.of_char c) else s) "" str + let l = String.split '\n' str in + let l = List.filter (fun s -> not (String.startswith "#" s)) l in + let str = String.concat "" l in + String.fold_left (fun s c -> if c >= ' ' && c <= '~' then s ^ (String.of_char c) else s) "" str let trim str = - let l = String.length str in - let rec check_left i = - if i < l && String.isspace str.[i] then - check_left (i+1) - else - i - in - let rec check_right i = - if i > 0 && String.isspace str.[i] then - check_right (i-1) - else - i+1 - in - let a = check_left 0 in - let b = (check_right (l-1)) - a in - try String.sub str a b with Invalid_argument _ -> "" + let l = String.length str in + let rec check_left i = + if i < l && String.isspace str.[i] then + check_left (i+1) + else + i + in + let rec check_right i = + if i > 0 && String.isspace str.[i] then + check_right (i-1) + else + i+1 + in + let a = check_left 0 in + let b = (check_right (l-1)) - a in + try String.sub str a b with Invalid_argument _ -> "" (* obtain the BIOS string with the given name from dmidecode *) let get_bios_string name = - try - let str, _ = Forkhelpers.execute_command_get_output dmidecode_prog [dmidecode_prog; "-s"; name] in - let str = trim (remove_invisible str) in - if str = "" || str = "Not Specified" then "" - else str - with _ -> "" + try + let str, _ = Forkhelpers.execute_command_get_output dmidecode_prog [dmidecode_prog; "-s"; name] in + let str = trim (remove_invisible str) in + if str = "" || str = "Not Specified" then "" + else str + with _ -> "" (* Obtain the Type 11 OEM strings from dmidecode, and prepend with the standard ones. *) let get_oem_strings () = - let standard = Xapi_globs.standard_type11_strings in - try - let result, _ = Forkhelpers.execute_command_get_output dmidecode_prog [dmidecode_prog; "-t11"; "-q"] in - let n = List.length standard in - let rec loop index a = - try - let b = String.index_from result a ':' in - let c = String.index_from result b '\n' in - let str = "oem-" ^ (string_of_int index) in - let value = trim (remove_invisible (String.sub result (b+2) (c-b-2))) in - if value <> "" then - (str, value) :: loop (index+1) c - else - loop index c - with _ -> [] - in - standard @ (loop (n+1) 0) - with _ -> standard + let standard = Xapi_globs.standard_type11_strings in + try + let result, _ = Forkhelpers.execute_command_get_output dmidecode_prog [dmidecode_prog; "-t11"; "-q"] in + let n = List.length standard in + let rec loop index a = + try + let b = String.index_from result a ':' in + let c = String.index_from result b '\n' in + let str = "oem-" ^ (string_of_int index) in + let value = trim (remove_invisible (String.sub result (b+2) (c-b-2))) in + if value <> "" then + (str, value) :: loop (index+1) c + else + loop index c + with _ -> [] + in + standard @ (loop (n+1) 0) + with _ -> standard (* Get the HP-specific ROMBIOS OEM string: * 6 bytes from the memory starting at 0xfffea *) let get_hp_rombios () = - let hp_rombios = String.make 6 ' ' in - begin try - let mem = Unix.openfile "/dev/mem" [Unix.O_RDONLY] 0 in - Stdext.Pervasiveext.finally (fun () -> - ignore (Unix.lseek mem 0xfffea Unix.SEEK_SET); - ignore (Unix.read mem hp_rombios 0 6)) - (fun () -> Unix.close mem) - with _ -> () - end; - if trim (remove_invisible hp_rombios) = "COMPAQ" then "COMPAQ" else "" + let hp_rombios = String.make 6 ' ' in + begin try + let mem = Unix.openfile "/dev/mem" [Unix.O_RDONLY] 0 in + Stdext.Pervasiveext.finally (fun () -> + ignore (Unix.lseek mem 0xfffea Unix.SEEK_SET); + ignore (Unix.read mem hp_rombios 0 6)) + (fun () -> Unix.close mem) + with _ -> () + end; + if trim (remove_invisible hp_rombios) = "COMPAQ" then "COMPAQ" else "" (* Get host bios strings *) let get_host_bios_strings ~__context = - info "Getting host BIOS strings."; - (* named BIOS strings *) - let dmidecode_strings = ["bios-vendor"; "bios-version"; "system-manufacturer"; - "system-product-name"; "system-version"; "system-serial-number"] in - let named_strings = List.map (fun str -> str, (get_bios_string str)) dmidecode_strings in - (* type 11 OEM strings *) - let oem_strings = get_oem_strings () in - (* HP-specific ROMBIOS OEM string *) - let hp_rombios = ["hp-rombios", get_hp_rombios ()] in - named_strings @ oem_strings @ hp_rombios + info "Getting host BIOS strings."; + (* named BIOS strings *) + let dmidecode_strings = ["bios-vendor"; "bios-version"; "system-manufacturer"; + "system-product-name"; "system-version"; "system-serial-number"] in + let named_strings = List.map (fun str -> str, (get_bios_string str)) dmidecode_strings in + (* type 11 OEM strings *) + let oem_strings = get_oem_strings () in + (* HP-specific ROMBIOS OEM string *) + let hp_rombios = ["hp-rombios", get_hp_rombios ()] in + named_strings @ oem_strings @ hp_rombios diff --git a/ocaml/xapi/bios_strings.mli b/ocaml/xapi/bios_strings.mli index d9153bf8cb5..e3400103bc3 100644 --- a/ocaml/xapi/bios_strings.mli +++ b/ocaml/xapi/bios_strings.mli @@ -15,4 +15,4 @@ (** Obtains the BIOS strings of localhost. *) val get_host_bios_strings : - __context:Context.t -> (string * string) list + __context:Context.t -> (string * string) list diff --git a/ocaml/xapi/bootloader_test.ml b/ocaml/xapi/bootloader_test.ml index bbdf220fe94..2cda9087846 100644 --- a/ocaml/xapi/bootloader_test.ml +++ b/ocaml/xapi/bootloader_test.ml @@ -15,15 +15,15 @@ open Debug open Bootloader -let _ = +let _ = let disk = ref "" in - Arg.parse [ + Arg.parse [ "-debug", Arg.Set debug_flag, "enable debug output" ] - (fun x -> + (fun x -> if !disk = "" then disk := x else - warn ("Ignoring unexpected extra argument: " ^ x)) - "Test code for pygrub wrapper"; + warn ("Ignoring unexpected extra argument: " ^ x)) + "Test code for pygrub wrapper"; let disk = !disk in if disk = "" then failwith "You must supply a disk name as an argument"; diff --git a/ocaml/xapi/cancel_tasks.ml b/ocaml/xapi/cancel_tasks.ml index fa4c70e48b7..9b11021793f 100644 --- a/ocaml/xapi/cancel_tasks.ml +++ b/ocaml/xapi/cancel_tasks.ml @@ -18,61 +18,61 @@ open D let safe_wrapper n f x = try f x - with e -> + with e -> debug "Caught exception while cancelling tasks (%s): %s" n (ExnHelper.string_of_exn e); Debug.log_backtrace e (Backtrace.get e) let update_all_allowed_operations ~__context = - let open Stats in - let all_vms = Db.VM.get_all ~__context - and all_vbds = Db.VBD.get_all ~__context - and all_vifs = Db.VIF.get_all ~__context - and all_vdis = Db.VDI.get_all ~__context - and all_srs = Db.SR.get_all ~__context - and all_pbds = Db.PBD.get_all ~__context - and all_hosts = Db.Host.get_all ~__context - and pool = Helpers.get_pool ~__context in - (* VM *) - time_this "Cancel_tasks.update_all_allowed_operations: VM" (fun () -> - debug "Updating allowed operations: VM"; - List.iter (safe_wrapper "allowed_ops - VMs" (fun self -> Xapi_vm_lifecycle.update_allowed_operations ~__context ~self)) all_vms; - debug "Finished updating allowed operations: VM"); - (* VBD *) - time_this "Cancel_tasks.update_all_allowed_operations: VBD" (fun () -> - debug "Updating allowed operations: VBD"; - List.iter (safe_wrapper "allowed_ops - VBDs" (fun self -> Xapi_vbd_helpers.update_allowed_operations ~__context ~self)) all_vbds; - debug "Finished updating allowed operations: VBD"); - (* VIF *) - time_this "Cancel_tasks.update_all_allowed_operations: VIF" (fun () -> - debug "Updating allowed operations: VIF"; - List.iter (safe_wrapper "allowed_ops - VIFs" (fun self -> Xapi_vif_helpers.update_allowed_operations ~__context ~self)) all_vifs; - debug "Finished updating allowed operations: VIF"); - (* VDI *) - time_this "Cancel_tasks.update_all_allowed_operations: VDI" (fun () -> - debug "Updating allowed operations: VDI"; - let sr_records = List.map (fun sr -> (sr, Db.SR.get_record_internal ~__context ~self:sr)) all_srs in - let pbd_records = List.map (fun pbd -> (pbd, Db.PBD.get_record ~__context ~self:pbd)) all_pbds in - let vbd_records = List.map (fun vbd -> (vbd, Db.VBD.get_record_internal ~__context ~self:vbd)) all_vbds in - List.iter (safe_wrapper "allowed_ops - VDIs" - (fun self -> Xapi_vdi.update_allowed_operations_internal ~__context ~self ~sr_records ~pbd_records ~vbd_records)) all_vdis; - debug "Finished updating allowed operations: VDI"); - (* SR *) - time_this "Cancel_tasks.update_all_allowed_operations: SR" (fun () -> - debug "Updating allowed operations: SR"; - List.iter (safe_wrapper "allowed_ops" (fun self -> - Db.SR.set_current_operations ~__context ~self ~value:[]; - Xapi_sr_operations.update_allowed_operations ~__context ~self)) all_srs; - debug "Finished updating allowed operations: SR"); - (* Host *) - time_this "Cancel_tasks.update_all_allowed_operations: host" (fun () -> - debug "Updating allowed operations: host"; - List.iter (safe_wrapper "allowed_ops - host" (fun self -> Xapi_host_helpers.update_allowed_operations ~__context ~self)) all_hosts; - debug "Finished updating allowed operations: host"); - (* Pool *) - time_this "Cancel_tasks.update_all_allowed_operations: pool" (fun () -> - debug "Updating allowed operations: pool"; - safe_wrapper "allowed_ops - pool" (fun pool -> Xapi_pool_helpers.update_allowed_operations ~__context ~self:pool) pool; - debug "Finished updating allowed operations: pool") + let open Stats in + let all_vms = Db.VM.get_all ~__context + and all_vbds = Db.VBD.get_all ~__context + and all_vifs = Db.VIF.get_all ~__context + and all_vdis = Db.VDI.get_all ~__context + and all_srs = Db.SR.get_all ~__context + and all_pbds = Db.PBD.get_all ~__context + and all_hosts = Db.Host.get_all ~__context + and pool = Helpers.get_pool ~__context in + (* VM *) + time_this "Cancel_tasks.update_all_allowed_operations: VM" (fun () -> + debug "Updating allowed operations: VM"; + List.iter (safe_wrapper "allowed_ops - VMs" (fun self -> Xapi_vm_lifecycle.update_allowed_operations ~__context ~self)) all_vms; + debug "Finished updating allowed operations: VM"); + (* VBD *) + time_this "Cancel_tasks.update_all_allowed_operations: VBD" (fun () -> + debug "Updating allowed operations: VBD"; + List.iter (safe_wrapper "allowed_ops - VBDs" (fun self -> Xapi_vbd_helpers.update_allowed_operations ~__context ~self)) all_vbds; + debug "Finished updating allowed operations: VBD"); + (* VIF *) + time_this "Cancel_tasks.update_all_allowed_operations: VIF" (fun () -> + debug "Updating allowed operations: VIF"; + List.iter (safe_wrapper "allowed_ops - VIFs" (fun self -> Xapi_vif_helpers.update_allowed_operations ~__context ~self)) all_vifs; + debug "Finished updating allowed operations: VIF"); + (* VDI *) + time_this "Cancel_tasks.update_all_allowed_operations: VDI" (fun () -> + debug "Updating allowed operations: VDI"; + let sr_records = List.map (fun sr -> (sr, Db.SR.get_record_internal ~__context ~self:sr)) all_srs in + let pbd_records = List.map (fun pbd -> (pbd, Db.PBD.get_record ~__context ~self:pbd)) all_pbds in + let vbd_records = List.map (fun vbd -> (vbd, Db.VBD.get_record_internal ~__context ~self:vbd)) all_vbds in + List.iter (safe_wrapper "allowed_ops - VDIs" + (fun self -> Xapi_vdi.update_allowed_operations_internal ~__context ~self ~sr_records ~pbd_records ~vbd_records)) all_vdis; + debug "Finished updating allowed operations: VDI"); + (* SR *) + time_this "Cancel_tasks.update_all_allowed_operations: SR" (fun () -> + debug "Updating allowed operations: SR"; + List.iter (safe_wrapper "allowed_ops" (fun self -> + Db.SR.set_current_operations ~__context ~self ~value:[]; + Xapi_sr_operations.update_allowed_operations ~__context ~self)) all_srs; + debug "Finished updating allowed operations: SR"); + (* Host *) + time_this "Cancel_tasks.update_all_allowed_operations: host" (fun () -> + debug "Updating allowed operations: host"; + List.iter (safe_wrapper "allowed_ops - host" (fun self -> Xapi_host_helpers.update_allowed_operations ~__context ~self)) all_hosts; + debug "Finished updating allowed operations: host"); + (* Pool *) + time_this "Cancel_tasks.update_all_allowed_operations: pool" (fun () -> + debug "Updating allowed operations: pool"; + safe_wrapper "allowed_ops - pool" (fun pool -> Xapi_pool_helpers.update_allowed_operations ~__context ~self:pool) pool; + debug "Finished updating allowed operations: pool") (* !!! This code was written in a world when tasks, current_operations and allowed_operations were persistent. This is no longer the case (we changed this to reduce writes to flash for OEM case + to simplify xapi logic elsewhere). @@ -102,22 +102,22 @@ let cancel_tasks_on_host ~__context ~host_opt = let tasks = Db.Task.get_all ~__context in let this_host_tasks, should_update_all_allowed_operations = - match host_opt with - None -> - debug "cancel_tasks_on_host: master will cancel all tasks"; - tasks, true - | (Some host) -> - debug "cancel_tasks_on_host: host = %s" (Ref.string_of host); - let should_cancel = - if List.mem host !hosts_already_cancelled then true else begin - hosts_already_cancelled := host :: !hosts_already_cancelled; - false - end in - List.filter (fun t -> Db.Task.get_resident_on ~__context ~self:t = host) tasks, should_cancel in + match host_opt with + None -> + debug "cancel_tasks_on_host: master will cancel all tasks"; + tasks, true + | (Some host) -> + debug "cancel_tasks_on_host: host = %s" (Ref.string_of host); + let should_cancel = + if List.mem host !hosts_already_cancelled then true else begin + hosts_already_cancelled := host :: !hosts_already_cancelled; + false + end in + List.filter (fun t -> Db.Task.get_resident_on ~__context ~self:t = host) tasks, should_cancel in let mytask = Context.get_task_id __context in let incomplete_tasks = List.filter (fun t -> - let s = Db.Task.get_status ~__context ~self:t in - t<>mytask && (s=`pending || s=`cancelling)) this_host_tasks in + let s = Db.Task.get_status ~__context ~self:t in + t<>mytask && (s=`pending || s=`cancelling)) this_host_tasks in (* Need to remove any current_operations associated with these tasks *) let all_vms = Db.VM.get_all ~__context @@ -140,9 +140,9 @@ let cancel_tasks_on_host ~__context ~host_opt = let hosts = default (Db.Host.get_all ~__context) (may (fun x -> [x]) host_opt) in List.iter (safe_wrapper "host_helpers - cancel tasks" (fun self -> Xapi_host_helpers.cancel_tasks ~__context ~self ~all_tasks_in_db:tasks ~task_ids)) hosts; List.iter (safe_wrapper "host_helpers - allowed ops" (fun self -> Xapi_host_helpers.update_allowed_operations ~__context ~self)) hosts; - + List.iter (safe_wrapper "destroy_tasks" (fun task -> TaskHelper.destroy ~__context task)) incomplete_tasks; - + if should_update_all_allowed_operations then update_all_allowed_operations ~__context ) diff --git a/ocaml/xapi/cancel_tests.ml b/ocaml/xapi/cancel_tests.ml index b46ef8a077f..f3a0243f732 100644 --- a/ocaml/xapi/cancel_tests.ml +++ b/ocaml/xapi/cancel_tests.ml @@ -16,7 +16,7 @@ open Pervasiveext let debug (fmt: ('a , unit, string, unit) format4) = (* Convert calendar time, x, to tm in UTC *) - let of_float x = + let of_float x = let time = Unix.gmtime x in Printf.sprintf "%04d%02d%02dT%02d:%02d:%02dZ" (time.Unix.tm_year+1900) @@ -26,406 +26,406 @@ let debug (fmt: ('a , unit, string, unit) format4) = time.Unix.tm_min time.Unix.tm_sec in - Printf.kprintf (fun s -> Printf.printf "%s [%d] %s\n" (of_float (Unix.gettimeofday ())) (Thread.id (Thread.self ())) s; flush stdout) fmt + Printf.kprintf (fun s -> Printf.printf "%s [%d] %s\n" (of_float (Unix.gettimeofday ())) (Thread.id (Thread.self ())) s; flush stdout) fmt let host = ref "127.0.0.1" let port = ref 80 let username = ref "root" let password = ref "" let vm = ref "" -let make_rpc ?dbg () xml = - let open Xmlrpc_client in - let http = { - (xmlrpc ~version:"1.0" "/") with - Http.Request.additional_headers = Opt.default [] (Opt.map (fun dbg -> [ "X-Http-other-config-dbg", dbg ]) dbg) - } in - XMLRPC_protocol.rpc ~srcstr:"graph" ~dststr:"xapi" ~transport:(TCP(!host, !port)) ~http xml +let make_rpc ?dbg () xml = + let open Xmlrpc_client in + let http = { + (xmlrpc ~version:"1.0" "/") with + Http.Request.additional_headers = Opt.default [] (Opt.map (fun dbg -> [ "X-Http-other-config-dbg", dbg ]) dbg) + } in + XMLRPC_protocol.rpc ~srcstr:"graph" ~dststr:"xapi" ~transport:(TCP(!host, !port)) ~http xml let wait_for_guest_agent ~rpc ~session_id ~vm = - debug "prepare: waiting for guest agent in VM %s" (Ref.string_of vm); - let classes = [ Printf.sprintf "VM/%s" (Ref.string_of vm) ] in - let timeout = 5.0 in - let rec wait ~token = - let open Event_types in - let event_from = Client.Event.from ~rpc ~session_id ~classes ~token ~timeout |> event_from_of_rpc in - let records = List.map Event_helper.record_of_event event_from.events in - let valid = function - | Event_helper.VM (vm, Some vm_rec) -> - vm_rec.API.vM_guest_metrics <> Ref.null - | _ -> false in - if not (List.fold_left (||) false (List.map valid records)) - then wait ~token:event_from.token in - let token = "" in - wait ~token + debug "prepare: waiting for guest agent in VM %s" (Ref.string_of vm); + let classes = [ Printf.sprintf "VM/%s" (Ref.string_of vm) ] in + let timeout = 5.0 in + let rec wait ~token = + let open Event_types in + let event_from = Client.Event.from ~rpc ~session_id ~classes ~token ~timeout |> event_from_of_rpc in + let records = List.map Event_helper.record_of_event event_from.events in + let valid = function + | Event_helper.VM (vm, Some vm_rec) -> + vm_rec.API.vM_guest_metrics <> Ref.null + | _ -> false in + if not (List.fold_left (||) false (List.map valid records)) + then wait ~token:event_from.token in + let token = "" in + wait ~token type operation = - | Start - | Shutdown - | Reboot (* assume hard_ versions are strictly smaller than clean_ versions *) - | Suspend - | Resume - | Pool_migrate - | VBD_plug - | VBD_unplug - | VIF_plug - | VIF_unplug + | Start + | Shutdown + | Reboot (* assume hard_ versions are strictly smaller than clean_ versions *) + | Suspend + | Resume + | Pool_migrate + | VBD_plug + | VBD_unplug + | VIF_plug + | VIF_unplug with rpc let operations = [ - VBD_plug - ; VBD_unplug - ; VIF_plug - ; VIF_unplug - ; Start - ; Shutdown - ; Reboot - ; Suspend - ; Resume + VBD_plug +; VBD_unplug +; VIF_plug +; VIF_unplug +; Start +; Shutdown +; Reboot +; Suspend +; Resume (* ; Pool_migrate *) ] type environment = { - session_id: API.ref_session; - vm: API.ref_VM; - id: string; - net: API.ref_network; - vdi: API.ref_VDI; + session_id: API.ref_session; + vm: API.ref_VM; + id: string; + net: API.ref_network; + vdi: API.ref_VDI; } let find_or_create_vif { session_id = session_id; vm = vm; net = net } = - let rpc = make_rpc () in - let vifs = Client.VM.get_VIFs ~rpc ~session_id ~self:vm in - let vif_records = List.map (fun vif -> Client.VIF.get_record ~rpc ~session_id ~self:vif) vifs in - let vif_idx = "4" in (* arbitrary *) - try - let vif, _ = List.find (fun (_, r) -> r.API.vIF_device = vif_idx) (List.combine vifs vif_records) in - vif - with Not_found -> - Client.VIF.create ~rpc ~session_id ~vM:vm ~network:net ~mAC:"" ~device:vif_idx ~mTU:1500L ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[] ~locking_mode:`network_default ~ipv4_allowed:[] ~ipv6_allowed:[] + let rpc = make_rpc () in + let vifs = Client.VM.get_VIFs ~rpc ~session_id ~self:vm in + let vif_records = List.map (fun vif -> Client.VIF.get_record ~rpc ~session_id ~self:vif) vifs in + let vif_idx = "4" in (* arbitrary *) + try + let vif, _ = List.find (fun (_, r) -> r.API.vIF_device = vif_idx) (List.combine vifs vif_records) in + vif + with Not_found -> + Client.VIF.create ~rpc ~session_id ~vM:vm ~network:net ~mAC:"" ~device:vif_idx ~mTU:1500L ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[] ~locking_mode:`network_default ~ipv4_allowed:[] ~ipv6_allowed:[] let find_or_create_vbd { session_id = session_id; vm = vm; vdi = vdi } = - let rpc = make_rpc () in - let vbds = Client.VM.get_VBDs ~rpc ~session_id ~self:vm in - let vbd_records = List.map (fun vbd -> Client.VBD.get_record ~rpc ~session_id ~self:vbd) vbds in - let vbd_idx = "4" in (* arbitrary *) - try - let vbd, _ = List.find (fun (_, r) -> r.API.vBD_userdevice = vbd_idx) (List.combine vbds vbd_records) in - vbd - with Not_found -> - Client.VBD.create ~rpc ~session_id ~vM:vm ~vDI:vdi ~userdevice:vbd_idx ~mode:`RO ~_type:`CD ~other_config:[] ~bootable:false ~unpluggable:true ~empty:false ~qos_algorithm_type:"" ~qos_algorithm_params:[] + let rpc = make_rpc () in + let vbds = Client.VM.get_VBDs ~rpc ~session_id ~self:vm in + let vbd_records = List.map (fun vbd -> Client.VBD.get_record ~rpc ~session_id ~self:vbd) vbds in + let vbd_idx = "4" in (* arbitrary *) + try + let vbd, _ = List.find (fun (_, r) -> r.API.vBD_userdevice = vbd_idx) (List.combine vbds vbd_records) in + vbd + with Not_found -> + Client.VBD.create ~rpc ~session_id ~vM:vm ~vDI:vdi ~userdevice:vbd_idx ~mode:`RO ~_type:`CD ~other_config:[] ~bootable:false ~unpluggable:true ~empty:false ~qos_algorithm_type:"" ~qos_algorithm_params:[] (* Get the VM into a state for performing a particular op. If this fails it may be because of left-over parallel activity from the previous run; it should suffice to run the function a second time. *) let prepare ({ session_id = session_id; vm = vm } as env) op rpc = - let one () = - let power_state = Client.VM.get_power_state ~rpc ~session_id ~self:vm in - begin match op, power_state with - | Start, `Halted -> () - | Start, `Running -> - wait_for_guest_agent ~rpc ~session_id ~vm; - debug "prepare: VM.clean_shutdown %s" (Ref.string_of vm); - Client.VM.clean_shutdown ~rpc ~session_id ~vm - | Start, `Paused -> - debug "prepare: VM.unpause %s" (Ref.string_of vm); - Client.VM.unpause ~rpc ~session_id ~vm; - wait_for_guest_agent ~rpc ~session_id ~vm; - debug "prepare: VM.clean_shutdown %s" (Ref.string_of vm); - Client.VM.clean_shutdown ~rpc ~session_id ~vm - | Start, `Suspended -> - debug "prepare: VM.resume %s" (Ref.string_of vm); - Client.VM.resume ~rpc ~session_id ~vm ~start_paused:false ~force:false; - debug "prepare: VM.clean_shutdown %s" (Ref.string_of vm); - Client.VM.clean_shutdown ~rpc ~session_id ~vm - | Resume, `Halted -> - debug "prepare: VM.start %s" (Ref.string_of vm); - Client.VM.start ~rpc ~session_id ~vm ~start_paused:false ~force:false; - wait_for_guest_agent ~rpc ~session_id ~vm; - debug "prepare: VM.suspend %s" (Ref.string_of vm); - Client.VM.suspend ~rpc ~session_id ~vm - | Resume, `Running -> - debug "prepare: VM.suspend %s" (Ref.string_of vm); - Client.VM.suspend ~rpc ~session_id ~vm - | Resume, `Paused -> - debug "prepare: VM.unpause %s" (Ref.string_of vm); - Client.VM.unpause ~rpc ~session_id ~vm; - wait_for_guest_agent ~rpc ~session_id ~vm; - debug "prepare: VM.suspend %s" (Ref.string_of vm); - Client.VM.suspend ~rpc ~session_id ~vm - | Resume, `Suspended -> () - | _, `Running -> - wait_for_guest_agent ~rpc ~session_id ~vm - | _, `Halted -> - debug "prepare: VM.start %s" (Ref.string_of vm); - Client.VM.start ~rpc ~session_id ~vm ~start_paused:false ~force:false; - wait_for_guest_agent ~rpc ~session_id ~vm; - | _, `Paused -> - debug "prepare: VM.unpause %s" (Ref.string_of vm); - Client.VM.unpause ~rpc ~session_id ~vm; - wait_for_guest_agent ~rpc ~session_id ~vm; - | _, `Suspended -> - debug "prepare: VM.resume %s" (Ref.string_of vm); - Client.VM.resume ~rpc ~session_id ~vm ~start_paused:false ~force:false; - end; - begin match op with - | VIF_unplug -> - let vif = find_or_create_vif env in - if Client.VIF.get_currently_attached ~rpc ~session_id ~self:vif - then Client.VIF.unplug ~rpc ~session_id ~self:vif; - Client.VIF.plug ~rpc ~session_id ~self:vif - | VIF_plug -> - let vif = find_or_create_vif env in - if not(Client.VIF.get_currently_attached ~rpc ~session_id ~self:vif) - then Client.VIF.plug ~rpc ~session_id ~self:vif; - Client.VIF.unplug ~rpc ~session_id ~self:vif - | VBD_unplug -> - let vbd = find_or_create_vbd env in - if Client.VBD.get_currently_attached ~rpc ~session_id ~self:vbd - then Client.VBD.unplug ~rpc ~session_id ~self:vbd; - Client.VBD.plug ~rpc ~session_id ~self:vbd - | VBD_plug -> - let vbd = find_or_create_vbd env in - if not(Client.VBD.get_currently_attached ~rpc ~session_id ~self:vbd) - then Client.VBD.plug ~rpc ~session_id ~self:vbd; - Client.VBD.unplug ~rpc ~session_id ~self:vbd - | _ -> () - end - in - try - one () - with Api_errors.Server_error(code, params) -> - Printf.fprintf stderr "prepare: ignoring one-off error %s %s\n" code (String.concat " " params); - one () (* a single second go should be enough *) + let one () = + let power_state = Client.VM.get_power_state ~rpc ~session_id ~self:vm in + begin match op, power_state with + | Start, `Halted -> () + | Start, `Running -> + wait_for_guest_agent ~rpc ~session_id ~vm; + debug "prepare: VM.clean_shutdown %s" (Ref.string_of vm); + Client.VM.clean_shutdown ~rpc ~session_id ~vm + | Start, `Paused -> + debug "prepare: VM.unpause %s" (Ref.string_of vm); + Client.VM.unpause ~rpc ~session_id ~vm; + wait_for_guest_agent ~rpc ~session_id ~vm; + debug "prepare: VM.clean_shutdown %s" (Ref.string_of vm); + Client.VM.clean_shutdown ~rpc ~session_id ~vm + | Start, `Suspended -> + debug "prepare: VM.resume %s" (Ref.string_of vm); + Client.VM.resume ~rpc ~session_id ~vm ~start_paused:false ~force:false; + debug "prepare: VM.clean_shutdown %s" (Ref.string_of vm); + Client.VM.clean_shutdown ~rpc ~session_id ~vm + | Resume, `Halted -> + debug "prepare: VM.start %s" (Ref.string_of vm); + Client.VM.start ~rpc ~session_id ~vm ~start_paused:false ~force:false; + wait_for_guest_agent ~rpc ~session_id ~vm; + debug "prepare: VM.suspend %s" (Ref.string_of vm); + Client.VM.suspend ~rpc ~session_id ~vm + | Resume, `Running -> + debug "prepare: VM.suspend %s" (Ref.string_of vm); + Client.VM.suspend ~rpc ~session_id ~vm + | Resume, `Paused -> + debug "prepare: VM.unpause %s" (Ref.string_of vm); + Client.VM.unpause ~rpc ~session_id ~vm; + wait_for_guest_agent ~rpc ~session_id ~vm; + debug "prepare: VM.suspend %s" (Ref.string_of vm); + Client.VM.suspend ~rpc ~session_id ~vm + | Resume, `Suspended -> () + | _, `Running -> + wait_for_guest_agent ~rpc ~session_id ~vm + | _, `Halted -> + debug "prepare: VM.start %s" (Ref.string_of vm); + Client.VM.start ~rpc ~session_id ~vm ~start_paused:false ~force:false; + wait_for_guest_agent ~rpc ~session_id ~vm; + | _, `Paused -> + debug "prepare: VM.unpause %s" (Ref.string_of vm); + Client.VM.unpause ~rpc ~session_id ~vm; + wait_for_guest_agent ~rpc ~session_id ~vm; + | _, `Suspended -> + debug "prepare: VM.resume %s" (Ref.string_of vm); + Client.VM.resume ~rpc ~session_id ~vm ~start_paused:false ~force:false; + end; + begin match op with + | VIF_unplug -> + let vif = find_or_create_vif env in + if Client.VIF.get_currently_attached ~rpc ~session_id ~self:vif + then Client.VIF.unplug ~rpc ~session_id ~self:vif; + Client.VIF.plug ~rpc ~session_id ~self:vif + | VIF_plug -> + let vif = find_or_create_vif env in + if not(Client.VIF.get_currently_attached ~rpc ~session_id ~self:vif) + then Client.VIF.plug ~rpc ~session_id ~self:vif; + Client.VIF.unplug ~rpc ~session_id ~self:vif + | VBD_unplug -> + let vbd = find_or_create_vbd env in + if Client.VBD.get_currently_attached ~rpc ~session_id ~self:vbd + then Client.VBD.unplug ~rpc ~session_id ~self:vbd; + Client.VBD.plug ~rpc ~session_id ~self:vbd + | VBD_plug -> + let vbd = find_or_create_vbd env in + if not(Client.VBD.get_currently_attached ~rpc ~session_id ~self:vbd) + then Client.VBD.plug ~rpc ~session_id ~self:vbd; + Client.VBD.unplug ~rpc ~session_id ~self:vbd + | _ -> () + end + in + try + one () + with Api_errors.Server_error(code, params) -> + Printf.fprintf stderr "prepare: ignoring one-off error %s %s\n" code (String.concat " " params); + one () (* a single second go should be enough *) let execute ({ session_id = session_id; vm = vm } as env) op rpc = match op with - | Start -> - debug "execute: VM.start %s" (Ref.string_of vm); - Client.Async.VM.start ~rpc ~session_id ~vm ~start_paused:false ~force:false - | Shutdown -> - debug "execute: VM.clean_shutdown %s" (Ref.string_of vm); - Client.Async.VM.clean_shutdown ~rpc ~session_id ~vm - | Reboot -> - debug "execute: VM.clean_reboot %s" (Ref.string_of vm); - Client.Async.VM.clean_reboot ~rpc ~session_id ~vm - | Suspend -> - debug "execute: VM.suspend %s" (Ref.string_of vm); - Client.Async.VM.suspend ~rpc ~session_id ~vm - | Resume -> - debug "execute: VM.resume %s" (Ref.string_of vm); - Client.Async.VM.resume ~rpc ~session_id ~vm ~start_paused:false ~force:false - | Pool_migrate -> - debug "execute: VM.pool_migrate %s to localhost" (Ref.string_of vm); - let host = Client.VM.get_resident_on ~rpc ~session_id ~self:vm in - Client.Async.VM.pool_migrate ~rpc ~session_id ~vm ~host ~options:["live", "true"] - | VBD_plug -> - let vbd = find_or_create_vbd env in - Client.Async.VBD.plug ~rpc ~session_id ~self:vbd - | VBD_unplug -> - let vbd = find_or_create_vbd env in - Client.Async.VBD.unplug ~rpc ~session_id ~self:vbd - | VIF_plug -> - let vif = find_or_create_vif env in - Client.Async.VIF.plug ~rpc ~session_id ~self:vif - | VIF_unplug -> - let vif = find_or_create_vif env in - Client.Async.VIF.unplug ~rpc ~session_id ~self:vif + | Start -> + debug "execute: VM.start %s" (Ref.string_of vm); + Client.Async.VM.start ~rpc ~session_id ~vm ~start_paused:false ~force:false + | Shutdown -> + debug "execute: VM.clean_shutdown %s" (Ref.string_of vm); + Client.Async.VM.clean_shutdown ~rpc ~session_id ~vm + | Reboot -> + debug "execute: VM.clean_reboot %s" (Ref.string_of vm); + Client.Async.VM.clean_reboot ~rpc ~session_id ~vm + | Suspend -> + debug "execute: VM.suspend %s" (Ref.string_of vm); + Client.Async.VM.suspend ~rpc ~session_id ~vm + | Resume -> + debug "execute: VM.resume %s" (Ref.string_of vm); + Client.Async.VM.resume ~rpc ~session_id ~vm ~start_paused:false ~force:false + | Pool_migrate -> + debug "execute: VM.pool_migrate %s to localhost" (Ref.string_of vm); + let host = Client.VM.get_resident_on ~rpc ~session_id ~self:vm in + Client.Async.VM.pool_migrate ~rpc ~session_id ~vm ~host ~options:["live", "true"] + | VBD_plug -> + let vbd = find_or_create_vbd env in + Client.Async.VBD.plug ~rpc ~session_id ~self:vbd + | VBD_unplug -> + let vbd = find_or_create_vbd env in + Client.Async.VBD.unplug ~rpc ~session_id ~self:vbd + | VIF_plug -> + let vif = find_or_create_vif env in + Client.Async.VIF.plug ~rpc ~session_id ~self:vif + | VIF_unplug -> + let vif = find_or_create_vif env in + Client.Async.VIF.unplug ~rpc ~session_id ~self:vif module OpMap = Map.Make(struct type t = operation let compare = compare end) type tc = operation * int (* operation * cancel point index *) let test ({ session_id = session_id; vm = vm; id = id } as env) (op, n) = - let dbg = "cancel_tests" in - prepare env op (make_rpc ()); - let module XN = Xenops_client.Client in - XN.DEBUG.trigger "cancel_tests" "set-cancel-trigger" [ dbg; string_of_int n ]; - let task = execute env op (make_rpc ~dbg ()) in - let rpc = make_rpc () in - Tasks.wait_for_all ~rpc ~session_id ~tasks:[task]; - begin match Client.Task.get_status ~rpc ~session_id ~self:task with - | `pending -> failwith "task is pending (not cancelled)" - | `success -> failwith "task succeed (not cancelled)" - | `failure -> failwith "task failed (not cancelled)" - | `cancelling -> failwith "task cancelling (not cancelled)" - | `cancelled -> () - end; - Client.Task.destroy ~rpc ~session_id ~self:task; - (* Wait for the states to stabilise *) + let dbg = "cancel_tests" in + prepare env op (make_rpc ()); + let module XN = Xenops_client.Client in + XN.DEBUG.trigger "cancel_tests" "set-cancel-trigger" [ dbg; string_of_int n ]; + let task = execute env op (make_rpc ~dbg ()) in + let rpc = make_rpc () in + Tasks.wait_for_all ~rpc ~session_id ~tasks:[task]; + begin match Client.Task.get_status ~rpc ~session_id ~self:task with + | `pending -> failwith "task is pending (not cancelled)" + | `success -> failwith "task succeed (not cancelled)" + | `failure -> failwith "task failed (not cancelled)" + | `cancelling -> failwith "task cancelling (not cancelled)" + | `cancelled -> () + end; + Client.Task.destroy ~rpc ~session_id ~self:task; + (* Wait for the states to stabilise *) - let suspend_vdi () = Client.VM.get_suspend_VDI ~rpc ~session_id ~self:vm in - let xenopsd f = - try - let _, info = XN.VM.stat dbg id in - f (Some info) - with _ -> f None in - let running_in_xenopsd () = xenopsd (function - | Some info -> info.Xenops_interface.Vm.power_state = Xenops_interface.Running - | None -> false) in - let paused_in_xenopsd () = xenopsd (function - | Some info -> info.Xenops_interface.Vm.power_state = Xenops_interface.Paused - | None -> false) in - let missing_in_xenopsd () = xenopsd (function - | Some _ -> false - | None -> true) in - let domain f = - let open Xenstore in - Xenops_helpers.with_xs - (fun xs -> - try - match xs.Xs.directory (Printf.sprintf "/vm/%s/domains" id) with - | [ domid ] -> - let open Xenctrl in - with_intf - (fun xc -> - let di = domain_getinfo xc (int_of_string domid) in - f (Some di) - ) - | _ -> f None - with _ -> f None - ) in - let running_domain () = domain (function - | Some di -> not(di.Xenctrl.paused) && not(di.Xenctrl.shutdown) - | None -> false) in - let paused_domain () = domain (function - | Some di -> di.Xenctrl.paused && not(di.Xenctrl.shutdown) - | None -> false) in - let missing_domain () = domain (function - | Some _ -> false - | None -> true) in + let suspend_vdi () = Client.VM.get_suspend_VDI ~rpc ~session_id ~self:vm in + let xenopsd f = + try + let _, info = XN.VM.stat dbg id in + f (Some info) + with _ -> f None in + let running_in_xenopsd () = xenopsd (function + | Some info -> info.Xenops_interface.Vm.power_state = Xenops_interface.Running + | None -> false) in + let paused_in_xenopsd () = xenopsd (function + | Some info -> info.Xenops_interface.Vm.power_state = Xenops_interface.Paused + | None -> false) in + let missing_in_xenopsd () = xenopsd (function + | Some _ -> false + | None -> true) in + let domain f = + let open Xenstore in + Xenops_helpers.with_xs + (fun xs -> + try + match xs.Xs.directory (Printf.sprintf "/vm/%s/domains" id) with + | [ domid ] -> + let open Xenctrl in + with_intf + (fun xc -> + let di = domain_getinfo xc (int_of_string domid) in + f (Some di) + ) + | _ -> f None + with _ -> f None + ) in + let running_domain () = domain (function + | Some di -> not(di.Xenctrl.paused) && not(di.Xenctrl.shutdown) + | None -> false) in + let paused_domain () = domain (function + | Some di -> di.Xenctrl.paused && not(di.Xenctrl.shutdown) + | None -> false) in + let missing_domain () = domain (function + | Some _ -> false + | None -> true) in - let devices_in_sync () = - let vifs_xenops = - List.filter (fun (_, state) -> state.Xenops_interface.Vif.active) - (XN.VIF.list dbg env.id) in - let vifs_xapi = - List.filter (fun vif -> Client.VIF.get_currently_attached ~rpc ~session_id ~self:vif) - (Client.VM.get_VIFs ~rpc ~session_id ~self:env.vm) in + let devices_in_sync () = + let vifs_xenops = + List.filter (fun (_, state) -> state.Xenops_interface.Vif.active) + (XN.VIF.list dbg env.id) in + let vifs_xapi = + List.filter (fun vif -> Client.VIF.get_currently_attached ~rpc ~session_id ~self:vif) + (Client.VM.get_VIFs ~rpc ~session_id ~self:env.vm) in - let vbds_xenops = - List.filter (fun (_, state) -> state.Xenops_interface.Vbd.active) - (XN.VBD.list dbg env.id) in - let vbds_xapi = - List.filter (fun vbd -> Client.VBD.get_currently_attached ~rpc ~session_id ~self:vbd) - (Client.VM.get_VBDs ~rpc ~session_id ~self:env.vm) in - List.length vifs_xenops = (List.length vifs_xapi) && (List.length vbds_xenops = (List.length vbds_xapi)) in + let vbds_xenops = + List.filter (fun (_, state) -> state.Xenops_interface.Vbd.active) + (XN.VBD.list dbg env.id) in + let vbds_xapi = + List.filter (fun vbd -> Client.VBD.get_currently_attached ~rpc ~session_id ~self:vbd) + (Client.VM.get_VBDs ~rpc ~session_id ~self:env.vm) in + List.length vifs_xenops = (List.length vifs_xapi) && (List.length vbds_xenops = (List.length vbds_xapi)) in - let finished = ref false in - let start = Unix.gettimeofday () in - let timeout = 30. in - while not(!finished) && (Unix.gettimeofday () -. start < timeout) do - finally - (fun () -> - finished := - match Client.VM.get_power_state ~rpc ~session_id ~self:vm with - | `Halted -> - missing_in_xenopsd () && missing_domain () && (suspend_vdi () = Ref.null) - | `Running -> - running_in_xenopsd () && running_domain () && (suspend_vdi () = Ref.null) && devices_in_sync () - | `Suspended -> - missing_in_xenopsd () && missing_domain () && (suspend_vdi () <> Ref.null) - | `Paused -> - paused_in_xenopsd () && paused_domain () && (suspend_vdi () = Ref.null) && devices_in_sync () - ) (fun () -> Thread.delay 1.) - done; - if not !finished then failwith "State never stabilised" + let finished = ref false in + let start = Unix.gettimeofday () in + let timeout = 30. in + while not(!finished) && (Unix.gettimeofday () -. start < timeout) do + finally + (fun () -> + finished := + match Client.VM.get_power_state ~rpc ~session_id ~self:vm with + | `Halted -> + missing_in_xenopsd () && missing_domain () && (suspend_vdi () = Ref.null) + | `Running -> + running_in_xenopsd () && running_domain () && (suspend_vdi () = Ref.null) && devices_in_sync () + | `Suspended -> + missing_in_xenopsd () && missing_domain () && (suspend_vdi () <> Ref.null) + | `Paused -> + paused_in_xenopsd () && paused_domain () && (suspend_vdi () = Ref.null) && devices_in_sync () + ) (fun () -> Thread.delay 1.) + done; + if not !finished then failwith "State never stabilised" let cancel_points_seen = "debug_info:cancel_points_seen" let counter = ref 0 let cancel_points_of session_id f = - incr counter; - let rpc = make_rpc ~dbg:(Printf.sprintf "cancel_points_of:%d" !counter) () in - let task : API.ref_task = f rpc in - let rpc = make_rpc () in - Tasks.wait_for_all ~rpc ~session_id ~tasks:[task]; - let status = Client.Task.get_status ~rpc ~session_id ~self:task in - if status <> `success then begin - let error = Client.Task.get_error_info ~rpc ~session_id ~self:task in - failwith (Printf.sprintf "Failed with %s" (String.concat " " error)) - end; - let other_config = Client.Task.get_other_config ~rpc ~session_id ~self:task in - Client.Task.destroy ~rpc ~session_id ~self:task; - if List.mem_assoc cancel_points_seen other_config - then Some (int_of_string (List.assoc cancel_points_seen other_config)) - else None + incr counter; + let rpc = make_rpc ~dbg:(Printf.sprintf "cancel_points_of:%d" !counter) () in + let task : API.ref_task = f rpc in + let rpc = make_rpc () in + Tasks.wait_for_all ~rpc ~session_id ~tasks:[task]; + let status = Client.Task.get_status ~rpc ~session_id ~self:task in + if status <> `success then begin + let error = Client.Task.get_error_info ~rpc ~session_id ~self:task in + failwith (Printf.sprintf "Failed with %s" (String.concat " " error)) + end; + let other_config = Client.Task.get_other_config ~rpc ~session_id ~self:task in + Client.Task.destroy ~rpc ~session_id ~self:task; + if List.mem_assoc cancel_points_seen other_config + then Some (int_of_string (List.assoc cancel_points_seen other_config)) + else None let probe_tcs env = - debug "probe: computing lists of test cases"; - let operation_to_cancel_points = ref OpMap.empty in - List.iter - (fun operation -> - prepare env operation (make_rpc ()); - match cancel_points_of env.session_id (execute env operation) with - | Some x -> - operation_to_cancel_points := OpMap.add operation x !operation_to_cancel_points - | None -> - debug "Seen no cancel points" - ) operations; - OpMap.iter - (fun k v -> - debug "probe: %s has %d cancel points" (k |> rpc_of_operation |> Jsonrpc.to_string) v - ) !operation_to_cancel_points; - OpMap.fold - (fun op num acc -> - (* need to trigger cancel points [1..num] *) - let rec integers first last = - if first > last - then [] - else first :: (integers (first + 1) last) in - List.map (fun i -> op, i) (integers 1 num) @ acc - ) !operation_to_cancel_points [] + debug "probe: computing lists of test cases"; + let operation_to_cancel_points = ref OpMap.empty in + List.iter + (fun operation -> + prepare env operation (make_rpc ()); + match cancel_points_of env.session_id (execute env operation) with + | Some x -> + operation_to_cancel_points := OpMap.add operation x !operation_to_cancel_points + | None -> + debug "Seen no cancel points" + ) operations; + OpMap.iter + (fun k v -> + debug "probe: %s has %d cancel points" (k |> rpc_of_operation |> Jsonrpc.to_string) v + ) !operation_to_cancel_points; + OpMap.fold + (fun op num acc -> + (* need to trigger cancel points [1..num] *) + let rec integers first last = + if first > last + then [] + else first :: (integers (first + 1) last) in + List.map (fun i -> op, i) (integers 1 num) @ acc + ) !operation_to_cancel_points [] let run env = - let all = probe_tcs env in - debug "probe: there are a total of %d tests" (List.length all); - List.iter - (fun (k, v) -> - debug "test: %s cancelling at %d" (k |> rpc_of_operation |> Jsonrpc.to_string) v; - test env (k, v) - ) all; - debug "tests complete" + let all = probe_tcs env in + debug "probe: there are a total of %d tests" (List.length all); + List.iter + (fun (k, v) -> + debug "test: %s cancelling at %d" (k |> rpc_of_operation |> Jsonrpc.to_string) v; + test env (k, v) + ) all; + debug "tests complete" let _ = - Arg.parse [ - "-h", Arg.Set_string host, "hostname to connect to"; - "-p", Arg.Set_int port, "port number to connect to"; - "-u", Arg.Set_string username, "username to connect with"; - "-pw", Arg.Set_string password, "password to connect with"; - "-vm", Arg.Set_string vm, "name of VM to manipulate"; - ] + Arg.parse [ + "-h", Arg.Set_string host, "hostname to connect to"; + "-p", Arg.Set_int port, "port number to connect to"; + "-u", Arg.Set_string username, "username to connect with"; + "-pw", Arg.Set_string password, "password to connect with"; + "-vm", Arg.Set_string vm, "name of VM to manipulate"; + ] (fun x -> Printf.fprintf stderr "Ignoring argument: %s\n" x) "Test VM lifecycle cancellation leaves the system in a valid state"; - let rpc = make_rpc () in - let session_id = Client.Session.login_with_password ~rpc ~uname:!username ~pwd:!password ~version:"1.2" ~originator:"cancel_tests" in - finally - (fun () -> - match Client.VM.get_by_name_label ~rpc ~session_id ~label:!vm with - | [] -> - failwith (Printf.sprintf "Failed to find a VM with name: %s" !vm) - | [ v ] -> - let net = Client.Network.get_by_name_label ~rpc ~session_id ~label:"Host internal management network" in - if List.length net = 0 then failwith "Failed to find the host internal management network"; - let vdi = - let tools_iso_filter = "field \"is_tools_iso\"=\"true\"" in - begin match Client.VDI.get_all_records_where !rpc session_id tools_iso_filter with - | (vdi, _)::_ -> vdi - | [] -> failwith "Failed to find the tools ISO"; - end - in - let env = { - session_id = session_id; - vm = v; - id = Client.VM.get_uuid ~rpc ~session_id ~self:v; - net = List.hd net; - vdi; - } in - run env - | _ -> - failwith (Printf.sprintf "Found multiple VMs with name: %s" !vm) - ) (fun () -> - Client.Session.logout ~rpc ~session_id - ) + let rpc = make_rpc () in + let session_id = Client.Session.login_with_password ~rpc ~uname:!username ~pwd:!password ~version:"1.2" ~originator:"cancel_tests" in + finally + (fun () -> + match Client.VM.get_by_name_label ~rpc ~session_id ~label:!vm with + | [] -> + failwith (Printf.sprintf "Failed to find a VM with name: %s" !vm) + | [ v ] -> + let net = Client.Network.get_by_name_label ~rpc ~session_id ~label:"Host internal management network" in + if List.length net = 0 then failwith "Failed to find the host internal management network"; + let vdi = + let tools_iso_filter = "field \"is_tools_iso\"=\"true\"" in + begin match Client.VDI.get_all_records_where !rpc session_id tools_iso_filter with + | (vdi, _)::_ -> vdi + | [] -> failwith "Failed to find the tools ISO"; + end + in + let env = { + session_id = session_id; + vm = v; + id = Client.VM.get_uuid ~rpc ~session_id ~self:v; + net = List.hd net; + vdi; + } in + run env + | _ -> + failwith (Printf.sprintf "Found multiple VMs with name: %s" !vm) + ) (fun () -> + Client.Session.logout ~rpc ~session_id + ) diff --git a/ocaml/xapi/certificates.ml b/ocaml/xapi/certificates.ml index 56a21675e45..da0a16d59ac 100644 --- a/ocaml/xapi/certificates.ml +++ b/ocaml/xapi/certificates.ml @@ -50,13 +50,13 @@ let get_type is_cert = let safe_char c = match c with - | 'A'..'Z' - | 'a'..'z' - | '0'..'9' - | '.' | '_' | '-' -> - true - | _ -> - false + | 'A'..'Z' + | 'a'..'z' + | '0'..'9' + | '.' | '_' | '-' -> + true + | _ -> + false let not_safe_chars name = let n = String.length name in @@ -93,7 +93,7 @@ let raise_does_not_exist is_cert n = let raise_corrupt is_cert n = raise_server_error n (if is_cert then certificate_corrupt else crl_corrupt) - + let raise_library_corrupt () = raise (Server_error (certificate_library_corrupt, [])) @@ -109,10 +109,10 @@ let local_sync () = try rehash() with - | e -> - warn "Exception rehashing certificates: %s" - (ExnHelper.string_of_exn e); - raise_library_corrupt() + | e -> + warn "Exception rehashing certificates: %s" + (ExnHelper.string_of_exn e); + raise_library_corrupt() let cert_perms is_cert = let stat = Unix.stat (library_path is_cert) in @@ -132,10 +132,10 @@ let host_install is_cert ~name ~cert = Unix.chmod filename (cert_perms is_cert); rehash() with - | e -> - warn "Exception installing %s %s: %s" (get_type is_cert) name - (ExnHelper.string_of_exn e); - raise_library_corrupt() + | e -> + warn "Exception installing %s %s: %s" (get_type is_cert) name + (ExnHelper.string_of_exn e); + raise_library_corrupt() let host_uninstall is_cert ~name = if not_safe is_cert name then @@ -148,10 +148,10 @@ let host_uninstall is_cert ~name = Sys.remove filename; rehash() with - | e -> - warn "Exception uninstalling %s %s: %s" (get_type is_cert) name - (ExnHelper.string_of_exn e); - raise_corrupt is_cert name + | e -> + warn "Exception uninstalling %s %s: %s" (get_type is_cert) name + (ExnHelper.string_of_exn e); + raise_corrupt is_cert name let get_cert is_cert name = if not_safe is_cert name then @@ -160,10 +160,10 @@ let get_cert is_cert name = try string_of_file filename with - | e -> - warn "Exception reading %s %s: %s" (get_type is_cert) name - (ExnHelper.string_of_exn e); - raise_corrupt is_cert name + | e -> + warn "Exception reading %s %s: %s" (get_type is_cert) name + (ExnHelper.string_of_exn e); + raise_corrupt is_cert name let sync_all_hosts ~__context hosts = let exn = ref None in @@ -174,12 +174,12 @@ let sync_all_hosts ~__context hosts = try Client.Host.certificate_sync rpc session_id host with - | e -> - exn := Some e) + | e -> + exn := Some e) hosts); match !exn with - | Some e -> raise e - | None -> () + | Some e -> raise e + | None -> () let sync_certs_crls is_cert list_func install_func uninstall_func ~__context master_certs host = @@ -217,19 +217,19 @@ let sync_certs is_cert ~__context master_certs host = Client.Host.crl_uninstall rpc session_id host c) ~__context master_certs host -let sync_certs_all_hosts is_cert ~__context master_certs hosts_but_master = +let sync_certs_all_hosts is_cert ~__context master_certs hosts_but_master = let exn = ref None in List.iter (fun host -> try sync_certs is_cert ~__context master_certs host with - | e -> - exn := Some e) + | e -> + exn := Some e) hosts_but_master; match !exn with - | Some e -> raise e - | None -> () + | Some e -> raise e + | None -> () let pool_sync ~__context = let hosts = Db.Host.get_all ~__context in @@ -247,16 +247,16 @@ let pool_install is_cert ~__context ~name ~cert = try pool_sync ~__context with - | exn -> - begin - try - host_uninstall is_cert ~name - with - | e -> - warn "Exception unwinding install of %s %s: %s" - (get_type is_cert) name (ExnHelper.string_of_exn e) - end; - raise exn + | exn -> + begin + try + host_uninstall is_cert ~name + with + | e -> + warn "Exception unwinding install of %s %s: %s" + (get_type is_cert) name (ExnHelper.string_of_exn e) + end; + raise exn let pool_uninstall is_cert ~__context ~name = host_uninstall is_cert ~name; @@ -264,28 +264,28 @@ let pool_uninstall is_cert ~__context ~name = let rec trim_cert = function | x :: xs -> - if x = pem_certificate_header then - trim_cert' [x] xs - else - trim_cert xs + if x = pem_certificate_header then + trim_cert' [x] xs + else + trim_cert xs | [] -> - [] + [] and trim_cert' acc = function | x :: xs -> - if x = pem_certificate_footer then - List.rev (x :: acc) - else - trim_cert' (x :: acc) xs + if x = pem_certificate_footer then + List.rev (x :: acc) + else + trim_cert' (x :: acc) xs | [] -> - [] + [] let get_server_certificate () = try String.concat "\n" (trim_cert (String.split '\n' (string_of_file !Xapi_globs.server_cert_path))) with - | e -> - warn "Exception reading server certificate: %s" - (ExnHelper.string_of_exn e); - raise_library_corrupt() + | e -> + warn "Exception reading server certificate: %s" + (ExnHelper.string_of_exn e); + raise_library_corrupt() diff --git a/ocaml/xapi/cli_cmdtable.ml b/ocaml/xapi/cli_cmdtable.ml index 5af88bc1988..198b4af4cfe 100644 --- a/ocaml/xapi/cli_cmdtable.ml +++ b/ocaml/xapi/cli_cmdtable.ml @@ -11,16 +11,16 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(** +(** * @group Command-Line Interface (CLI) - *) +*) -type op = - Cli_printer.print_fn -> - (Rpc.call -> Rpc.response) -> - API.ref_session -> ((string*string) list) -> unit +type op = + Cli_printer.print_fn -> + (Rpc.call -> Rpc.response) -> + API.ref_session -> ((string*string) list) -> unit -type imp = +type imp = With_fd of (Unix.file_descr -> op) | With_fd_local_session of (Unix.file_descr -> op) | No_fd of op @@ -29,20 +29,20 @@ type imp = (* FIXME: print warnings to the standard error channel when a user invokes a deprecated command. *) (** special options for CLI commands *) -type flag = -| Vm_selectors (** adds a "vm" parameter for the name of a VM (rather than a UUID) *) -| Host_selectors (** a "host" parameter for the name of a host (rather than a UUID) *) -| Sr_selectors (** a "sr" parameter for the name of a SR (rather than a UUID) *) -| Standard (** includes the command in the list of common commands displayed by "xe help" *) -| Neverforward -| Hidden -| Deprecated of string list +type flag = + | Vm_selectors (** adds a "vm" parameter for the name of a VM (rather than a UUID) *) + | Host_selectors (** a "host" parameter for the name of a host (rather than a UUID) *) + | Sr_selectors (** a "sr" parameter for the name of a SR (rather than a UUID) *) + | Standard (** includes the command in the list of common commands displayed by "xe help" *) + | Neverforward + | Hidden + | Deprecated of string list type cmd_spec = - {reqd:string list; - optn:string list; (* optional arguments *) - help:string; - implementation: imp; - flags:flag list} + {reqd:string list; + optn:string list; (* optional arguments *) + help:string; + implementation: imp; + flags:flag list} diff --git a/ocaml/xapi/cli_frontend.ml b/ocaml/xapi/cli_frontend.ml index 16a3fcaf435..a2ce030213f 100644 --- a/ocaml/xapi/cli_frontend.ml +++ b/ocaml/xapi/cli_frontend.ml @@ -11,10 +11,10 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(** +(** * @group Command-Line Interface (CLI) - *) - +*) + (* ---------------------------------------------------------------------- XE-CLI Front End ---------------------------------------------------------------------- *) @@ -33,41 +33,41 @@ open D let vmselectors = [""] let vmselectorsinfo = " The simplest way to select the VM on which the \ - operation is to be performed is by supplying the argument \ - 'vm='. VMs can also be \ - specified by filtering the full list of VMs on the values of fields. For \ - example, specifying 'power-state=halted' will select all VMs whose \ - power-state field is equal to 'halted'. Where multiple VMs are matching, \ - the option '--multiple' must be specified to perform the operation. The \ - full list of fields that can be matched can be obtained by the command \ - 'xe vm-list params=all'. If no parameters to select VMs are given, \ - the operation will be performed on all VMs." + operation is to be performed is by supplying the argument \ + 'vm='. VMs can also be \ + specified by filtering the full list of VMs on the values of fields. For \ + example, specifying 'power-state=halted' will select all VMs whose \ + power-state field is equal to 'halted'. Where multiple VMs are matching, \ + the option '--multiple' must be specified to perform the operation. The \ + full list of fields that can be matched can be obtained by the command \ + 'xe vm-list params=all'. If no parameters to select VMs are given, \ + the operation will be performed on all VMs." let hostselectors = [""] let hostselectorsinfo = " The simplest way to select the host on which the \ - operation is to be performed is by supplying the argument \ - 'host='. Hosts can also be \ - specified by filtering the full list of hosts on the values of fields. For \ - example, specifying 'enabled=true' will select all hosts whose \ - 'enabled' field is equal to 'true'. Where multiple hosts are matching, \ - and the operation can be performed on multiple hosts, the option '--multiple' \ - must be specified to perform the operation. The \ - full list of fields that can be matched can be obtained by the command \ - 'xe host-list params=all'. If no parameters to select hosts are given, \ - the operation will be performed on all hosts." + operation is to be performed is by supplying the argument \ + 'host='. Hosts can also be \ + specified by filtering the full list of hosts on the values of fields. For \ + example, specifying 'enabled=true' will select all hosts whose \ + 'enabled' field is equal to 'true'. Where multiple hosts are matching, \ + and the operation can be performed on multiple hosts, the option '--multiple' \ + must be specified to perform the operation. The \ + full list of fields that can be matched can be obtained by the command \ + 'xe host-list params=all'. If no parameters to select hosts are given, \ + the operation will be performed on all hosts." let srselectors = [""] let srselectorsinfo = " The simplest way to select the SR on which the \ - operation is to be performed is by supplying the argument \ - 'sr='. SRs can also be \ - specified by filtering the full list of hosts on the values of fields. For \ - example, specifying 'enabled=true' will select all hosts whose \ - 'enabled' field is equal to 'true'. Where multiple SRs are matching, \ - and the operation can be performed on multiple SRs, the option '--multiple' \ - must be specified to perform the operation. The \ - full list of fields that can be matched can be obtained by the command \ - 'xe sr-list params=all'. If no parameters to select SRs are given, \ - the operation will be performed on all SRs." + operation is to be performed is by supplying the argument \ + 'sr='. SRs can also be \ + specified by filtering the full list of hosts on the values of fields. For \ + example, specifying 'enabled=true' will select all hosts whose \ + 'enabled' field is equal to 'true'. Where multiple SRs are matching, \ + and the operation can be performed on multiple SRs, the option '--multiple' \ + must be specified to perform the operation. The \ + full list of fields that can be matched can be obtained by the command \ + 'xe sr-list params=all'. If no parameters to select SRs are given, \ + the operation will be performed on all SRs." let rec cmdtable_data : (string*cmd_spec) list = [ @@ -143,22 +143,22 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "message-destroy", - { - reqd=["uuid"]; - optn=[]; - help="Destroy an existing message."; - implementation=No_fd Cli_operations.message_destroy; - flags=[]; - }; - -(* "host-introduce", + "message-destroy", { - reqd=["name"; "address"; "remote-port"; "remote-username"; "remote-password"]; - optn=["description"]; - help="Introduce a remote host"; - implementation=No_fd Cli_operations.host_introduce - };*) + reqd=["uuid"]; + optn=[]; + help="Destroy an existing message."; + implementation=No_fd Cli_operations.message_destroy; + flags=[]; + }; + + (* "host-introduce", + { + reqd=["name"; "address"; "remote-port"; "remote-username"; "remote-password"]; + optn=["description"]; + help="Introduce a remote host"; + implementation=No_fd Cli_operations.host_introduce + };*) "pool-enable-binary-storage", { @@ -194,7 +194,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=No_fd Cli_operations.pool_sync_database; flags=[]; }; - "pool-join", + "pool-join", { reqd=["master-address"; "master-username"; "master-password"]; optn=["force"]; @@ -203,7 +203,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "pool-emergency-reset-master", + "pool-emergency-reset-master", { reqd=["master-address"]; optn=[]; @@ -212,7 +212,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Neverforward]; }; - "pool-emergency-transition-to-master", + "pool-emergency-transition-to-master", { reqd=[]; optn=[]; @@ -221,7 +221,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Neverforward]; }; - "pool-recover-slaves", + "pool-recover-slaves", { reqd=[]; optn=[]; @@ -230,7 +230,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "pool-eject", + "pool-eject", { reqd=["host-uuid"]; optn=[]; @@ -239,7 +239,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "pool-dump-database", + "pool-dump-database", { reqd=["file-name"]; optn=[]; @@ -248,7 +248,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "pool-restore-database", + "pool-restore-database", { reqd=["file-name"]; optn=["dry-run"]; @@ -257,7 +257,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "pool-enable-external-auth", + "pool-enable-external-auth", { reqd=["auth-type"; "service-name";]; optn=["uuid"; "config:"]; @@ -266,7 +266,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "pool-disable-external-auth", + "pool-disable-external-auth", { reqd=[]; optn=["uuid"; "config:"]; @@ -274,7 +274,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=No_fd Cli_operations.pool_disable_external_auth; flags=[]; }; - + "pool-initialize-wlb", { reqd=["wlb_url"; "wlb_username"; "wlb_password"; "xenserver_username"; "xenserver_password"]; @@ -283,7 +283,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=No_fd Cli_operations.pool_initialize_wlb; flags=[]; }; - + "pool-deconfigure-wlb", { reqd=[]; @@ -292,7 +292,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=No_fd Cli_operations.pool_deconfigure_wlb; flags=[]; }; - + "pool-send-wlb-configuration", { reqd=[]; @@ -301,7 +301,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=No_fd Cli_operations.pool_send_wlb_configuration; flags=[]; }; - + "pool-retrieve-wlb-configuration", { reqd=[]; @@ -310,7 +310,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=No_fd Cli_operations.pool_retrieve_wlb_configuration; flags=[]; }; - + "pool-retrieve-wlb-recommendations", { reqd=[]; @@ -328,7 +328,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=With_fd Cli_operations.pool_retrieve_wlb_report; flags=[Neverforward]; }; - + "pool-retrieve-wlb-diagnostics", { reqd=[]; @@ -337,7 +337,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=With_fd Cli_operations.pool_retrieve_wlb_diagnostics; flags=[Neverforward]; }; - + "pool-send-test-post", { reqd=["dest-host"; "dest-port"; "body"]; @@ -346,7 +346,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=No_fd Cli_operations.pool_send_test_post; flags=[]; }; - + "pool-certificate-install", { reqd=["filename"]; @@ -355,7 +355,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=With_fd Cli_operations.pool_certificate_install; flags=[]; }; - + "pool-certificate-uninstall", { reqd=["name"]; @@ -364,7 +364,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=No_fd Cli_operations.pool_certificate_uninstall; flags=[]; }; - + "pool-certificate-list", { reqd=[]; @@ -373,7 +373,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=No_fd Cli_operations.pool_certificate_list; flags=[]; }; - + "pool-crl-install", { reqd=["filename"]; @@ -382,7 +382,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=With_fd Cli_operations.pool_crl_install; flags=[]; }; - + "pool-crl-uninstall", { reqd=["name"]; @@ -391,7 +391,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=No_fd Cli_operations.pool_crl_uninstall; flags=[]; }; - + "pool-crl-list", { reqd=[]; @@ -400,7 +400,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=No_fd Cli_operations.pool_crl_list; flags=[]; }; - + "pool-certificate-sync", { reqd=[]; @@ -409,35 +409,35 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=No_fd Cli_operations.pool_certificate_sync; flags=[]; }; - - "pool-set-vswitch-controller", - { + + "pool-set-vswitch-controller", + { reqd=["address"]; optn=[]; help="Set the IP address of the vswitch controller."; implementation=No_fd Cli_operations.pool_set_vswitch_controller; flags=[Hidden]; - }; + }; - "pool-enable-ssl-legacy", - { + "pool-enable-ssl-legacy", + { reqd=[]; optn=["uuid"]; help="Set ssl-legacy to True on each host."; implementation=No_fd Cli_operations.pool_enable_ssl_legacy; flags=[]; - }; + }; - "pool-disable-ssl-legacy", - { + "pool-disable-ssl-legacy", + { reqd=[]; optn=["uuid"]; help="Set ssl-legacy to False on each host."; implementation=No_fd Cli_operations.pool_disable_ssl_legacy; flags=[]; - }; - - "host-is-in-emergency-mode", + }; + + "host-is-in-emergency-mode", { reqd=[]; optn=[]; @@ -446,7 +446,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Neverforward]; }; - "host-forget", + "host-forget", { reqd=["uuid"]; optn=[]; @@ -455,7 +455,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "host-declare-dead", + "host-declare-dead", { reqd=["uuid"]; optn=[]; @@ -464,7 +464,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "host-disable", + "host-disable", { reqd=[]; optn=[]; @@ -473,7 +473,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Host_selectors]; }; - "host-sync-data", + "host-sync-data", { reqd=[]; optn=[]; @@ -482,7 +482,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Host_selectors]; }; - "host-enable", + "host-enable", { reqd=[]; optn=[]; @@ -491,52 +491,52 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Host_selectors]; }; - "host-enable-local-storage-caching", - { - reqd=["sr-uuid"]; - optn=[]; - help="Enable local storage caching on the specified host"; - implementation=No_fd Cli_operations.host_enable_local_storage_caching; - flags=[Host_selectors]; - }; - - "host-disable-local-storage-caching", - { - reqd=[]; - optn=[]; - help="Disable local storage caching on the specified host"; - implementation=No_fd Cli_operations.host_disable_local_storage_caching; - flags=[Host_selectors]; - }; - - "pool-enable-local-storage-caching", - { - reqd=["uuid"]; - optn=[]; - help="Enable local storage caching across the pool"; - implementation=No_fd Cli_operations.pool_enable_local_storage_caching; - flags=[]; - }; - - "pool-disable-local-storage-caching", - { - reqd=["uuid"]; - optn=[]; - help="Disable local storage caching across the pool"; - implementation=No_fd Cli_operations.pool_disable_local_storage_caching; - flags=[]; - }; - - "pool-apply-edition", - { - reqd=["edition"]; - optn=["uuid"; "license-server-address"; "license-server-port"]; - help="Apply an edition across the pool"; - implementation=No_fd Cli_operations.pool_apply_edition; - flags=[]; - }; - - "host-shutdown", + "host-enable-local-storage-caching", + { + reqd=["sr-uuid"]; + optn=[]; + help="Enable local storage caching on the specified host"; + implementation=No_fd Cli_operations.host_enable_local_storage_caching; + flags=[Host_selectors]; + }; + + "host-disable-local-storage-caching", + { + reqd=[]; + optn=[]; + help="Disable local storage caching on the specified host"; + implementation=No_fd Cli_operations.host_disable_local_storage_caching; + flags=[Host_selectors]; + }; + + "pool-enable-local-storage-caching", + { + reqd=["uuid"]; + optn=[]; + help="Enable local storage caching across the pool"; + implementation=No_fd Cli_operations.pool_enable_local_storage_caching; + flags=[]; + }; + + "pool-disable-local-storage-caching", + { + reqd=["uuid"]; + optn=[]; + help="Disable local storage caching across the pool"; + implementation=No_fd Cli_operations.pool_disable_local_storage_caching; + flags=[]; + }; + + "pool-apply-edition", + { + reqd=["edition"]; + optn=["uuid"; "license-server-address"; "license-server-port"]; + help="Apply an edition across the pool"; + implementation=No_fd Cli_operations.pool_apply_edition; + flags=[]; + }; + + "host-shutdown", { reqd=[]; optn=[]; @@ -545,7 +545,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Host_selectors]; }; - "host-reboot", + "host-reboot", { reqd=[]; optn=[]; @@ -554,7 +554,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Host_selectors]; }; - "host-power-on", + "host-power-on", { reqd=[]; optn=[]; @@ -563,7 +563,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Host_selectors]; }; - "host-dmesg", + "host-dmesg", { reqd=[]; optn=[]; @@ -572,7 +572,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Host_selectors]; }; - "host-crashdump-upload", + "host-crashdump-upload", { reqd=["uuid"]; optn=["url";"http_proxy"]; @@ -581,7 +581,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "host-crashdump-destroy", + "host-crashdump-destroy", { reqd=["uuid"]; optn=[]; @@ -590,7 +590,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "host-bugreport-upload", + "host-bugreport-upload", { reqd=[]; optn=["url"; "http_proxy"]; @@ -599,7 +599,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Host_selectors]; }; - "host-backup", + "host-backup", { reqd=["file-name"]; optn=[]; @@ -608,7 +608,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Host_selectors]; }; - "host-restore", + "host-restore", { reqd=["file-name"]; optn=[]; @@ -617,7 +617,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Host_selectors]; }; - "host-logs-download", + "host-logs-download", { reqd=[]; optn=["file-name"]; @@ -626,7 +626,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Host_selectors]; }; - "host-signal-networking-change", + "host-signal-networking-change", { reqd=[]; optn=[]; @@ -635,7 +635,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Neverforward; Hidden]; }; - "host-send-debug-keys", + "host-send-debug-keys", { reqd=["host-uuid"; "keys"]; optn=[]; @@ -662,7 +662,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "host-emergency-management-reconfigure", + "host-emergency-management-reconfigure", { reqd=["interface"]; optn=[]; @@ -671,7 +671,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[ Neverforward ]; }; - "host-emergency-ha-disable", + "host-emergency-ha-disable", { reqd=[]; optn=["force"]; @@ -680,7 +680,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[ Neverforward ]; }; - "host-management-reconfigure", + "host-management-reconfigure", { reqd=["pif-uuid"]; optn=[]; @@ -689,7 +689,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "host-management-disable", + "host-management-disable", { reqd=[]; optn=[]; @@ -698,25 +698,25 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[ Neverforward ]; }; - "host-compute-free-memory", - { - reqd=[]; - optn=[]; - help="Computes the amount of free memory on the host."; - implementation=No_fd Cli_operations.host_compute_free_memory; - flags=[Host_selectors]; - }; + "host-compute-free-memory", + { + reqd=[]; + optn=[]; + help="Computes the amount of free memory on the host."; + implementation=No_fd Cli_operations.host_compute_free_memory; + flags=[Host_selectors]; + }; - "host-compute-memory-overhead", - { - reqd=[]; - optn=[]; - help="Computes the virtualization memory overhead of a host."; - implementation=No_fd Cli_operations.host_compute_memory_overhead; - flags=[Host_selectors]; - }; + "host-compute-memory-overhead", + { + reqd=[]; + optn=[]; + help="Computes the virtualization memory overhead of a host."; + implementation=No_fd Cli_operations.host_compute_memory_overhead; + flags=[Host_selectors]; + }; - "host-get-system-status-capabilities", + "host-get-system-status-capabilities", { reqd=[]; optn=[]; @@ -725,7 +725,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Neverforward; Host_selectors]; }; - "host-get-system-status", + "host-get-system-status", { reqd=["filename"]; optn=["entries"; "output"]; @@ -734,7 +734,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Host_selectors]; }; - "host-set-hostname-live", + "host-set-hostname-live", { reqd=["host-uuid"; "host-name"]; optn=[]; @@ -742,8 +742,8 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=No_fd Cli_operations.host_set_hostname_live; flags=[Host_selectors]; }; - - "host-set-power-on-mode", + + "host-set-power-on-mode", { reqd=["power-on-mode"]; optn=["power-on-config"]; @@ -752,7 +752,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Host_selectors]; }; - "host-call-plugin", + "host-call-plugin", { reqd=["host-uuid"; "plugin"; "fn"]; optn=["args:"]; @@ -760,7 +760,7 @@ let rec cmdtable_data : (string*cmd_spec) list = implementation=No_fd Cli_operations.host_call_plugin; flags=[]; }; - + "host-retrieve-wlb-evacuate-recommendations", { reqd=["uuid"]; @@ -770,7 +770,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Hidden]; }; - "host-enable-external-auth", + "host-enable-external-auth", { reqd=["host-uuid"; "auth-type"; "service-name"]; optn=["config:"]; @@ -779,7 +779,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Hidden]; }; - "host-disable-external-auth", + "host-disable-external-auth", { reqd=["host-uuid"]; optn=["config:"]; @@ -788,7 +788,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Hidden]; }; - "host-refresh-pack-info", + "host-refresh-pack-info", { reqd=["host-uuid"]; optn=[]; @@ -797,7 +797,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Hidden]; }; - "host-cpu-info", + "host-cpu-info", { reqd=[]; optn=["uuid"]; @@ -806,7 +806,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "host-get-cpu-features", + "host-get-cpu-features", { reqd=[]; optn=["uuid"]; @@ -815,7 +815,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "host-enable-display", + "host-enable-display", { reqd=["uuid"]; optn=[]; @@ -824,7 +824,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "host-disable-display", + "host-disable-display", { reqd=["uuid"]; optn=[]; @@ -833,7 +833,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "patch-upload", + "patch-upload", { reqd=["file-name"]; optn=[]; @@ -842,7 +842,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "patch-destroy", + "patch-destroy", { reqd=["uuid"]; optn=[]; @@ -851,16 +851,16 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "update-upload", + "update-upload", { reqd=["file-name"; "host-uuid"]; optn=[]; help="Stream new update to the server."; implementation=With_fd Cli_operations.update_upload; flags=[]; - }; + }; - "patch-precheck", + "patch-precheck", { reqd=["uuid"; "host-uuid"]; optn=[]; @@ -869,7 +869,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "patch-apply", + "patch-apply", { reqd=["uuid"; "host-uuid"]; optn=[]; @@ -878,7 +878,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "patch-pool-apply", + "patch-pool-apply", { reqd=["uuid"]; optn=[]; @@ -887,7 +887,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "patch-clean", + "patch-clean", { reqd=["uuid"]; optn=[]; @@ -896,16 +896,16 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "patch-pool-clean", - { - reqd=["uuid"]; - optn=[]; - help="Delete a previously uploaded patch file on all hosts in the pool."; - implementation=No_fd Cli_operations.patch_pool_clean; - flags=[]; - }; + "patch-pool-clean", + { + reqd=["uuid"]; + optn=[]; + help="Delete a previously uploaded patch file on all hosts in the pool."; + implementation=No_fd Cli_operations.patch_pool_clean; + flags=[]; + }; - "user-password-change", + "user-password-change", { reqd=["new"]; optn=["old"]; @@ -914,124 +914,124 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[]; }; - "vm-compute-memory-overhead", - { - reqd=[]; - optn=[]; - help="Computes the virtualization memory overhead of a VM."; - implementation=No_fd Cli_operations.vm_compute_memory_overhead; - flags=[Vm_selectors]; - }; - - "vm-memory-balloon", - { - reqd=["target"]; - optn=[]; - help="Set the memory target for a running VM. The given value must be within the "^ - "range defined by the VM's memory_dynamic_min and memory_dynamic_max values."; - implementation=No_fd Cli_operations.vm_memory_target_set; - flags=[Deprecated ["vm-memory-dynamic-range-set"]; Vm_selectors; Hidden]; - }; - - "vm-memory-dynamic-range-set", - { - reqd=["min"; "max"]; - optn=[]; - help="Configure the dynamic memory range of a VM. The dynamic memory \ - range defines soft lower and upper limits for a VM's memory. It's \ - possible to change these fields when a VM is running or halted. The \ - dynamic range must fit within the static range."; - implementation=No_fd Cli_operations.vm_memory_dynamic_range_set; - flags=[Vm_selectors]; - }; - - "vm-memory-static-range-set", - { - reqd=["min"; "max"]; - optn=[]; - help="Configure the static memory range of a VM. The static memory \ - range defines hard lower and upper limits for a VM's memory. It's \ - possible to change these fields only when a VM is halted. The static \ - range must encompass the dynamic range."; - implementation=No_fd Cli_operations.vm_memory_static_range_set; - flags=[Vm_selectors]; - }; - - "vm-memory-limits-set", - { - reqd=["static-min"; "static-max"; "dynamic-min"; "dynamic-max"]; - optn=[]; - help="Configure the memory limits of a VM."; - implementation=No_fd Cli_operations.vm_memory_limits_set; - flags=[Vm_selectors]; - }; - - "vm-memory-set", - { - reqd=["memory"]; - optn=[]; - help="Configure the memory allocation of a VM."; - implementation=No_fd Cli_operations.vm_memory_set; - flags=[Vm_selectors]; - }; - - "vm-memory-target-set", - { - reqd=["target"]; - optn=[]; - help="Set the memory target for a halted or running VM. The given \ - value must be within the range defined by the VM's memory_static_min \ - and memory_static_max values."; - implementation=No_fd Cli_operations.vm_memory_target_set; - flags=[Vm_selectors]; - }; - - "vm-memory-target-wait", - { - reqd=[]; - optn=[]; - help="Wait for a running VM to reach its current memory target."; - implementation=No_fd Cli_operations.vm_memory_target_wait; - flags=[Vm_selectors; Hidden]; - }; - - "vm-data-source-list", - { - reqd=[]; - optn=[]; - help="List the data sources that can be recorded for a VM."; - implementation=No_fd Cli_operations.vm_data_source_list; - flags=[Vm_selectors]; - }; - - "vm-data-source-record", - { - reqd=["data-source"]; - optn=[]; - help="Record the specified data source for a VM."; - implementation=No_fd Cli_operations.vm_data_source_record; - flags=[Vm_selectors]; - }; - - "vm-data-source-query", - { - reqd=["data-source"]; - optn=[]; - help="Query the last value read from a VM data source."; - implementation=No_fd Cli_operations.vm_data_source_query; - flags=[Vm_selectors]; - }; - - "vm-data-source-forget", - { - reqd=["data-source"]; - optn=[]; - help="Stop recording the specified data source for a VM, and forget all of the recorded data."; - implementation=No_fd Cli_operations.vm_data_source_forget; - flags=[Vm_selectors]; - }; - - "vm-memory-shadow-multiplier-set", + "vm-compute-memory-overhead", + { + reqd=[]; + optn=[]; + help="Computes the virtualization memory overhead of a VM."; + implementation=No_fd Cli_operations.vm_compute_memory_overhead; + flags=[Vm_selectors]; + }; + + "vm-memory-balloon", + { + reqd=["target"]; + optn=[]; + help="Set the memory target for a running VM. The given value must be within the "^ + "range defined by the VM's memory_dynamic_min and memory_dynamic_max values."; + implementation=No_fd Cli_operations.vm_memory_target_set; + flags=[Deprecated ["vm-memory-dynamic-range-set"]; Vm_selectors; Hidden]; + }; + + "vm-memory-dynamic-range-set", + { + reqd=["min"; "max"]; + optn=[]; + help="Configure the dynamic memory range of a VM. The dynamic memory \ + range defines soft lower and upper limits for a VM's memory. It's \ + possible to change these fields when a VM is running or halted. The \ + dynamic range must fit within the static range."; + implementation=No_fd Cli_operations.vm_memory_dynamic_range_set; + flags=[Vm_selectors]; + }; + + "vm-memory-static-range-set", + { + reqd=["min"; "max"]; + optn=[]; + help="Configure the static memory range of a VM. The static memory \ + range defines hard lower and upper limits for a VM's memory. It's \ + possible to change these fields only when a VM is halted. The static \ + range must encompass the dynamic range."; + implementation=No_fd Cli_operations.vm_memory_static_range_set; + flags=[Vm_selectors]; + }; + + "vm-memory-limits-set", + { + reqd=["static-min"; "static-max"; "dynamic-min"; "dynamic-max"]; + optn=[]; + help="Configure the memory limits of a VM."; + implementation=No_fd Cli_operations.vm_memory_limits_set; + flags=[Vm_selectors]; + }; + + "vm-memory-set", + { + reqd=["memory"]; + optn=[]; + help="Configure the memory allocation of a VM."; + implementation=No_fd Cli_operations.vm_memory_set; + flags=[Vm_selectors]; + }; + + "vm-memory-target-set", + { + reqd=["target"]; + optn=[]; + help="Set the memory target for a halted or running VM. The given \ + value must be within the range defined by the VM's memory_static_min \ + and memory_static_max values."; + implementation=No_fd Cli_operations.vm_memory_target_set; + flags=[Vm_selectors]; + }; + + "vm-memory-target-wait", + { + reqd=[]; + optn=[]; + help="Wait for a running VM to reach its current memory target."; + implementation=No_fd Cli_operations.vm_memory_target_wait; + flags=[Vm_selectors; Hidden]; + }; + + "vm-data-source-list", + { + reqd=[]; + optn=[]; + help="List the data sources that can be recorded for a VM."; + implementation=No_fd Cli_operations.vm_data_source_list; + flags=[Vm_selectors]; + }; + + "vm-data-source-record", + { + reqd=["data-source"]; + optn=[]; + help="Record the specified data source for a VM."; + implementation=No_fd Cli_operations.vm_data_source_record; + flags=[Vm_selectors]; + }; + + "vm-data-source-query", + { + reqd=["data-source"]; + optn=[]; + help="Query the last value read from a VM data source."; + implementation=No_fd Cli_operations.vm_data_source_query; + flags=[Vm_selectors]; + }; + + "vm-data-source-forget", + { + reqd=["data-source"]; + optn=[]; + help="Stop recording the specified data source for a VM, and forget all of the recorded data."; + implementation=No_fd Cli_operations.vm_data_source_forget; + flags=[Vm_selectors]; + }; + + "vm-memory-shadow-multiplier-set", { reqd=["multiplier"]; optn=[]; @@ -1040,14 +1040,14 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Vm_selectors]; }; - "vm-clone", - { - reqd=["new-name-label"]; - optn=["new-name-description"]; - help="Clone an existing VM, using storage-level fast disk clone operation where available."; - implementation=No_fd Cli_operations.vm_clone; - flags=[Standard; Vm_selectors]; - }; + "vm-clone", + { + reqd=["new-name-label"]; + optn=["new-name-description"]; + help="Clone an existing VM, using storage-level fast disk clone operation where available."; + implementation=No_fd Cli_operations.vm_clone; + flags=[Standard; Vm_selectors]; + }; "vm-snapshot", { @@ -1067,7 +1067,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Standard; Vm_selectors]; }; - "vm-checkpoint", + "vm-checkpoint", { reqd=["new-name-label"]; optn=["new-name-description"]; @@ -1076,125 +1076,125 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Standard; Vm_selectors]; }; - "vm-copy", - { - reqd=["new-name-label"]; - optn=["new-name-description"; "sr-uuid"]; - help="Copy an existing VM, but without using storage-level fast disk clone operation (even if this is available). The disk images of the copied VM are guaranteed to be 'full images' - i.e. not part of a CoW chain."; - implementation=No_fd Cli_operations.vm_copy; - flags=[Standard; Vm_selectors]; - }; + "vm-copy", + { + reqd=["new-name-label"]; + optn=["new-name-description"; "sr-uuid"]; + help="Copy an existing VM, but without using storage-level fast disk clone operation (even if this is available). The disk images of the copied VM are guaranteed to be 'full images' - i.e. not part of a CoW chain."; + implementation=No_fd Cli_operations.vm_copy; + flags=[Standard; Vm_selectors]; + }; - "snapshot-revert", - { + "snapshot-revert", + { reqd=[]; optn=["uuid"; "snapshot-uuid"]; help="Revert an existing VM to a previous checkpointed or snapshotted state."; implementation=No_fd Cli_operations.snapshot_revert; flags=[Standard]; - }; - - - "vm-install", - { - reqd=["new-name-label"]; - optn=["sr-name-label";"sr-uuid";"template";"copy-bios-strings-from"]; - help="Install a new VM from a template. The template parameter can match either the template name or the uuid."; - implementation=No_fd Cli_operations.vm_install; - flags=[Standard]; - }; - - "vm-uninstall", - { - reqd=[]; - optn=["force"]; - help="Uninstall a VM. This operation will destroy those VDIs that are marked RW and connected to this VM only. To simply destroy the VM record, use vm-destroy."; - implementation=With_fd Cli_operations.vm_uninstall; - flags=[Standard;Vm_selectors]; - }; - - "console", - { - reqd=[]; - optn=[]; - help="Attach to a particular console."; - implementation=With_fd Cli_operations.console; - flags=[Vm_selectors]; - }; - - "vm-query-services", - { - reqd=[]; - optn=[]; - help="Query the system services offered by the given VM(s)."; - implementation=No_fd Cli_operations.vm_query_services; - flags=[Standard;Vm_selectors;Hidden]; - }; - - "vm-start", - { - reqd=[]; - optn=["force"; "on"; "paused"]; - help="Start the selected VM(s). Where pooling is enabled, the host on which to start can be specified with the 'on' parameter that takes a uuid. The optional parameter '--force' will bypass any hardware-compatibility warnings."; - implementation=No_fd Cli_operations.vm_start; - flags=[Standard;Vm_selectors]; - }; - - "vm-suspend", - { - reqd=[]; - optn=[]; - help="Suspend the selected VM(s)."; - implementation=No_fd Cli_operations.vm_suspend; - flags=[Standard; Vm_selectors]; - }; - - "vm-resume", - { - reqd=[]; - optn=["force"; "on"]; - help="Resume the selected VM(s)."; - implementation=No_fd Cli_operations.vm_resume; - flags=[Standard; Vm_selectors]; - }; - - "vm-shutdown", - { - reqd=[]; - optn=["force"]; - help="Shutdown the selected VM(s). The optional argument --force will forcibly shut down the VM."; - implementation=No_fd Cli_operations.vm_shutdown; - flags=[Standard; Vm_selectors]; - }; - - "vm-reset-powerstate", - { - reqd=[]; - optn=["force"]; - help="Force the VM powerstate to halted in the management toolstack database only. This command is used to recover a VM that is marked as 'running', but is known to be on a dead slave host that will not recover. This is a potentially dangerous operation: you must ensure that the VM you are forcing to 'halted' is definitely not running anywhere."; - implementation=No_fd Cli_operations.vm_reset_powerstate; - flags=[Standard; Vm_selectors]; - }; - - "snapshot-reset-powerstate", - { - reqd=[]; - optn=["uuid"; "snapshot-uuid"; "force"]; - help="Force the VM powerstate to halted in the management toolstack database only. This command is used to recover a snapshot that is marked as 'suspended'. This is a potentially dangerous operation: you must ensure that you do not need the memory image anymore (ie. you will not be able to resume your snapshot anymore)."; - implementation=No_fd Cli_operations.snapshot_reset_powerstate; - flags=[Standard; Vm_selectors]; - }; - - "vm-reboot", - { - reqd=[]; - optn=["force"]; - help="Reboot the selected VM(s)."; - implementation=No_fd Cli_operations.vm_reboot; - flags=[Standard; Vm_selectors]; - }; - - "vm-compute-maximum-memory", + }; + + + "vm-install", + { + reqd=["new-name-label"]; + optn=["sr-name-label";"sr-uuid";"template";"copy-bios-strings-from"]; + help="Install a new VM from a template. The template parameter can match either the template name or the uuid."; + implementation=No_fd Cli_operations.vm_install; + flags=[Standard]; + }; + + "vm-uninstall", + { + reqd=[]; + optn=["force"]; + help="Uninstall a VM. This operation will destroy those VDIs that are marked RW and connected to this VM only. To simply destroy the VM record, use vm-destroy."; + implementation=With_fd Cli_operations.vm_uninstall; + flags=[Standard;Vm_selectors]; + }; + + "console", + { + reqd=[]; + optn=[]; + help="Attach to a particular console."; + implementation=With_fd Cli_operations.console; + flags=[Vm_selectors]; + }; + + "vm-query-services", + { + reqd=[]; + optn=[]; + help="Query the system services offered by the given VM(s)."; + implementation=No_fd Cli_operations.vm_query_services; + flags=[Standard;Vm_selectors;Hidden]; + }; + + "vm-start", + { + reqd=[]; + optn=["force"; "on"; "paused"]; + help="Start the selected VM(s). Where pooling is enabled, the host on which to start can be specified with the 'on' parameter that takes a uuid. The optional parameter '--force' will bypass any hardware-compatibility warnings."; + implementation=No_fd Cli_operations.vm_start; + flags=[Standard;Vm_selectors]; + }; + + "vm-suspend", + { + reqd=[]; + optn=[]; + help="Suspend the selected VM(s)."; + implementation=No_fd Cli_operations.vm_suspend; + flags=[Standard; Vm_selectors]; + }; + + "vm-resume", + { + reqd=[]; + optn=["force"; "on"]; + help="Resume the selected VM(s)."; + implementation=No_fd Cli_operations.vm_resume; + flags=[Standard; Vm_selectors]; + }; + + "vm-shutdown", + { + reqd=[]; + optn=["force"]; + help="Shutdown the selected VM(s). The optional argument --force will forcibly shut down the VM."; + implementation=No_fd Cli_operations.vm_shutdown; + flags=[Standard; Vm_selectors]; + }; + + "vm-reset-powerstate", + { + reqd=[]; + optn=["force"]; + help="Force the VM powerstate to halted in the management toolstack database only. This command is used to recover a VM that is marked as 'running', but is known to be on a dead slave host that will not recover. This is a potentially dangerous operation: you must ensure that the VM you are forcing to 'halted' is definitely not running anywhere."; + implementation=No_fd Cli_operations.vm_reset_powerstate; + flags=[Standard; Vm_selectors]; + }; + + "snapshot-reset-powerstate", + { + reqd=[]; + optn=["uuid"; "snapshot-uuid"; "force"]; + help="Force the VM powerstate to halted in the management toolstack database only. This command is used to recover a snapshot that is marked as 'suspended'. This is a potentially dangerous operation: you must ensure that you do not need the memory image anymore (ie. you will not be able to resume your snapshot anymore)."; + implementation=No_fd Cli_operations.snapshot_reset_powerstate; + flags=[Standard; Vm_selectors]; + }; + + "vm-reboot", + { + reqd=[]; + optn=["force"]; + help="Reboot the selected VM(s)."; + implementation=No_fd Cli_operations.vm_reboot; + flags=[Standard; Vm_selectors]; + }; + + "vm-compute-maximum-memory", { reqd=["total"]; optn=["approximate"]; @@ -1212,7 +1212,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Vm_selectors]; }; - "vm-migrate", + "vm-migrate", { reqd=[]; optn=["live"; "host"; "host-uuid"; "remote-master"; "remote-username"; "remote-password"; "remote-network"; "force"; "copy"; "vif:"; "vdi:"]; @@ -1221,7 +1221,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Standard; Vm_selectors]; }; - "vm-pause", + "vm-pause", { reqd=[]; optn=[]; @@ -1230,7 +1230,7 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Standard; Vm_selectors]; }; - "vm-unpause", + "vm-unpause", { reqd=[]; optn=[]; @@ -1239,72 +1239,72 @@ let rec cmdtable_data : (string*cmd_spec) list = flags=[Standard; Vm_selectors]; }; - "vm-disk-list", - { - reqd=[]; - optn=["vbd-params";"vdi-params"]; - help="List the disks on the selected VM(s)."; - implementation=No_fd (Cli_operations.vm_disk_list false); - flags=[Standard; Vm_selectors]; - }; - - "vm-crashdump-list", - { - reqd=[]; - optn=[]; - help="List crashdumps associated with the selected VM(s)."; - implementation=No_fd Cli_operations.vm_crashdump_list; - flags=[Vm_selectors]; - }; - - "vm-cd-add", - { - reqd=["cd-name";"device"]; - optn=[]; - help="Add a CD to the VM(s). The device field should be selected from the parameter 'allowed-VBD-devices' of the VM."; - implementation=No_fd Cli_operations.vm_cd_add; - flags=[Standard; Vm_selectors]; - }; - - "vm-cd-list", - { - reqd=[]; - optn=["vbd-params";"vdi-params"]; - help="List the CDs currently attached to the VM(s)."; - implementation=No_fd (Cli_operations.vm_disk_list true); - flags=[Standard; Vm_selectors]; - }; - - "vm-cd-remove", - { - optn=[]; - reqd=["cd-name"]; - help="Remove the selected CDs from the VM(s)."; - implementation=No_fd Cli_operations.vm_cd_remove; - flags=[Standard; Vm_selectors]; - }; - - "vm-cd-eject", + "vm-disk-list", + { + reqd=[]; + optn=["vbd-params";"vdi-params"]; + help="List the disks on the selected VM(s)."; + implementation=No_fd (Cli_operations.vm_disk_list false); + flags=[Standard; Vm_selectors]; + }; + + "vm-crashdump-list", + { + reqd=[]; + optn=[]; + help="List crashdumps associated with the selected VM(s)."; + implementation=No_fd Cli_operations.vm_crashdump_list; + flags=[Vm_selectors]; + }; + + "vm-cd-add", + { + reqd=["cd-name";"device"]; + optn=[]; + help="Add a CD to the VM(s). The device field should be selected from the parameter 'allowed-VBD-devices' of the VM."; + implementation=No_fd Cli_operations.vm_cd_add; + flags=[Standard; Vm_selectors]; + }; + + "vm-cd-list", + { + reqd=[]; + optn=["vbd-params";"vdi-params"]; + help="List the CDs currently attached to the VM(s)."; + implementation=No_fd (Cli_operations.vm_disk_list true); + flags=[Standard; Vm_selectors]; + }; + + "vm-cd-remove", + { + optn=[]; + reqd=["cd-name"]; + help="Remove the selected CDs from the VM(s)."; + implementation=No_fd Cli_operations.vm_cd_remove; + flags=[Standard; Vm_selectors]; + }; + + "vm-cd-eject", { optn=[]; reqd=[]; help="Eject a CD from the virtual CD drive. This command will only work if there is one and only one CD attached to the VM. When there are two \ -or more CDs, please use the command 'vbd-eject' and specify the uuid of the VBD."; + or more CDs, please use the command 'vbd-eject' and specify the uuid of the VBD."; implementation=No_fd Cli_operations.vm_cd_eject; flags=[Standard; Vm_selectors]; }; - - "vm-cd-insert", + + "vm-cd-insert", { optn=[]; reqd=["cd-name"]; help="Insert a CD into the virtual CD drive. This command will only work if there is one and only one empty CD device attached to the VM. When \ -there are two or more empty CD devices, please use the command 'vbd-insert' and specify the uuids of the VBD and of the VDI to insert."; + there are two or more empty CD devices, please use the command 'vbd-insert' and specify the uuids of the VBD and of the VDI to insert."; implementation=No_fd Cli_operations.vm_cd_insert; flags=[Standard; Vm_selectors]; }; - "vm-vcpu-hotplug", + "vm-vcpu-hotplug", { reqd=["new-vcpus"]; optn=[]; @@ -1313,7 +1313,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[Vm_selectors]; }; - "cd-list", + "cd-list", { reqd=[]; optn=["params"]; @@ -1322,34 +1322,34 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[Standard]; }; - "vm-disk-add", - { - reqd=["disk-size";"device"]; - optn=["sr-uuid"]; - help="Add a new disk to the selected VM(s). The device field should be selected from the field 'allowed-VBD-devices' of the VM."; - implementation=No_fd Cli_operations.vm_disk_add; - flags=[Standard; Vm_selectors]; - }; - - "vm-disk-remove", - { - reqd=["device"]; - optn=[]; - help="Remove a disk from the selected VM and destroy it."; - implementation=No_fd Cli_operations.vm_disk_remove; - flags=[Standard; Vm_selectors]; - }; - - "vm-import", - { - reqd=[]; - optn=["filename"; "preserve"; "sr-uuid"; "force"; "host-username"; "host-password"; "type"; "remote-config"; "url"; "vdi:"]; - help="Import a VM. If type=ESXServer is given, it will import from a VMWare server and 'host-username', 'host-password' and 'remote-config' are required. Otherwise, it will import from a file, and 'filename' is required. If the option preserve=true is given then as many settings as possible are restored, including VIF MAC addresses. The default is to regenerate VIF MAC addresses. The VDIs will be imported into the Pool's default SR unless an override is provided. If the force option is given then any disk data checksum failures will be ignored. If the parameter 'url' is specified, xapi will attempt to import from that URL."; - implementation=With_fd Cli_operations.vm_import; - flags=[Standard]; - }; - - "vm-export", + "vm-disk-add", + { + reqd=["disk-size";"device"]; + optn=["sr-uuid"]; + help="Add a new disk to the selected VM(s). The device field should be selected from the field 'allowed-VBD-devices' of the VM."; + implementation=No_fd Cli_operations.vm_disk_add; + flags=[Standard; Vm_selectors]; + }; + + "vm-disk-remove", + { + reqd=["device"]; + optn=[]; + help="Remove a disk from the selected VM and destroy it."; + implementation=No_fd Cli_operations.vm_disk_remove; + flags=[Standard; Vm_selectors]; + }; + + "vm-import", + { + reqd=[]; + optn=["filename"; "preserve"; "sr-uuid"; "force"; "host-username"; "host-password"; "type"; "remote-config"; "url"; "vdi:"]; + help="Import a VM. If type=ESXServer is given, it will import from a VMWare server and 'host-username', 'host-password' and 'remote-config' are required. Otherwise, it will import from a file, and 'filename' is required. If the option preserve=true is given then as many settings as possible are restored, including VIF MAC addresses. The default is to regenerate VIF MAC addresses. The VDIs will be imported into the Pool's default SR unless an override is provided. If the force option is given then any disk data checksum failures will be ignored. If the parameter 'url' is specified, xapi will attempt to import from that URL."; + implementation=With_fd Cli_operations.vm_import; + flags=[Standard]; + }; + + "vm-export", { reqd=["filename"]; optn=["preserve-power-state"; "compress"]; @@ -1358,7 +1358,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[Standard; Vm_selectors]; }; - "vm-copy-bios-strings", + "vm-copy-bios-strings", { reqd=["host-uuid"]; optn=[]; @@ -1367,7 +1367,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[Vm_selectors]; }; - "vm-is-bios-customized", + "vm-is-bios-customized", { reqd=[]; optn=[]; @@ -1376,7 +1376,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[Vm_selectors]; }; - "vm-call-plugin", + "vm-call-plugin", { reqd=["vm-uuid"; "plugin"; "fn"]; optn=["args:"]; @@ -1385,7 +1385,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[]; }; - "snapshot-export-to-template", + "snapshot-export-to-template", { reqd=["filename"; "snapshot-uuid"]; optn=["preserve-power-state"]; @@ -1394,7 +1394,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[Standard]; }; - "snapshot-clone", + "snapshot-clone", { reqd=["new-name-label"]; optn=["uuid"; "new-name-description"]; @@ -1403,7 +1403,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[Standard]; }; - "snapshot-copy", + "snapshot-copy", { reqd=["new-name-label"]; optn=["uuid"; "new-name-description"; "sr-uuid"]; @@ -1412,16 +1412,16 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[Standard]; }; - "snapshot-uninstall", - { - reqd=[]; - optn=["uuid"; "snapshot-uuid"; "force"]; - help="Uninstall a snapshot. This operation will destroy those VDIs that are marked RW and connected to this snapshot only. To simply destroy the VM record, use snapshot-destroy."; - implementation=With_fd Cli_operations.snapshot_uninstall; - flags=[Standard]; - }; + "snapshot-uninstall", + { + reqd=[]; + optn=["uuid"; "snapshot-uuid"; "force"]; + help="Uninstall a snapshot. This operation will destroy those VDIs that are marked RW and connected to this snapshot only. To simply destroy the VM record, use snapshot-destroy."; + implementation=With_fd Cli_operations.snapshot_uninstall; + flags=[Standard]; + }; - "snapshot-destroy", + "snapshot-destroy", { reqd=[]; optn=["uuid"; "snapshot-uuid"]; @@ -1430,16 +1430,16 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[]; }; - "snapshot-disk-list", - { - reqd=[]; - optn=["uuid"; "snapshot-uuid"; "vbd-params"; "vdi-params"]; - help="List the disks on the selected VM(s)."; - implementation=No_fd (Cli_operations.snapshot_disk_list false); - flags=[Standard; Vm_selectors]; - }; + "snapshot-disk-list", + { + reqd=[]; + optn=["uuid"; "snapshot-uuid"; "vbd-params"; "vdi-params"]; + help="List the disks on the selected VM(s)."; + implementation=No_fd (Cli_operations.snapshot_disk_list false); + flags=[Standard; Vm_selectors]; + }; - "template-export", + "template-export", { reqd=["filename"; "template-uuid"]; optn=[]; @@ -1448,16 +1448,16 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[Standard]; }; - "template-uninstall", - { - reqd=["template-uuid"]; - optn=["force"]; - help="Uninstall a custom template. This operation will destroy those VDIs that are marked as 'owned' by this template"; - implementation=With_fd Cli_operations.template_uninstall; + "template-uninstall", + { + reqd=["template-uuid"]; + optn=["force"]; + help="Uninstall a custom template. This operation will destroy those VDIs that are marked as 'owned' by this template"; + implementation=With_fd Cli_operations.template_uninstall; flags=[Standard]; - }; + }; - "vm-vif-list", + "vm-vif-list", { reqd=[]; optn=[]; @@ -1466,16 +1466,16 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[Standard; Vm_selectors]; }; - "vlan-create", - { - reqd=["pif-uuid"; "vlan"; "network-uuid"]; - optn=[]; - help="Create a new VLAN on a host."; - implementation=No_fd Cli_operations.vlan_create; + "vlan-create", + { + reqd=["pif-uuid"; "vlan"; "network-uuid"]; + optn=[]; + help="Create a new VLAN on a host."; + implementation=No_fd Cli_operations.vlan_create; flags=[]; - }; + }; - "vlan-destroy", + "vlan-destroy", { reqd=["uuid"]; optn=[]; @@ -1483,17 +1483,17 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vlan_destroy; flags=[]; }; - - "tunnel-create", - { - reqd=["pif-uuid"; "network-uuid"]; - optn=[]; - help="Create a new tunnel on a host."; - implementation=No_fd Cli_operations.tunnel_create; - flags=[]; - }; - "tunnel-destroy", + "tunnel-create", + { + reqd=["pif-uuid"; "network-uuid"]; + optn=[]; + help="Create a new tunnel on a host."; + implementation=No_fd Cli_operations.tunnel_create; + flags=[]; + }; + + "tunnel-destroy", { reqd=["uuid"]; optn=[]; @@ -1502,7 +1502,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[]; }; - "pif-unplug", + "pif-unplug", { reqd=["uuid"]; optn=[]; @@ -1511,7 +1511,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[] }; - "pif-plug", + "pif-plug", { reqd=["uuid"]; optn=[]; @@ -1520,7 +1520,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[] }; - "pif-reconfigure-ip", + "pif-reconfigure-ip", { reqd=["uuid"; "mode"]; optn=["IP"; "netmask"; "gateway"; "DNS"]; @@ -1529,7 +1529,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[]; }; - "pif-reconfigure-ipv6", + "pif-reconfigure-ipv6", { reqd=["uuid"; "mode"]; optn=["IPv6"; "gateway"; "DNS"]; @@ -1538,7 +1538,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[]; }; - "pif-set-primary-address-type", + "pif-set-primary-address-type", { reqd=["uuid"; "primary_address_type"]; optn=[]; @@ -1547,8 +1547,8 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[]; }; - "pif-scan", - { + "pif-scan", + { reqd=["host-uuid"]; optn=[]; help="Scan for new physical interfaces on a host."; @@ -1556,8 +1556,8 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[]; }; - "pif-introduce", - { + "pif-introduce", + { reqd=["host-uuid"; "device"]; optn=["mac"; "managed"]; help="Create a new PIF object representing a physical interface on a host."; @@ -1565,7 +1565,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[]; }; - "pif-forget", + "pif-forget", { reqd=["uuid"]; optn=[]; @@ -1574,7 +1574,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[]; }; - "pif-db-forget", + "pif-db-forget", { reqd=["uuid"]; optn=[]; @@ -1583,7 +1583,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[Hidden]; }; - "bond-create", + "bond-create", { reqd=["network-uuid"; "pif-uuids"]; optn=["mac"; "mode"; "properties:"]; @@ -1592,7 +1592,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[]; }; - "bond-destroy", + "bond-destroy", { reqd=["uuid"]; optn=[]; @@ -1601,7 +1601,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[]; }; - "bond-set-mode", + "bond-set-mode", { reqd=["uuid"; "mode"]; optn=[]; @@ -1610,9 +1610,9 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and flags=[]; }; -(* Lowlevel non-autogenerated stuff *) + (* Lowlevel non-autogenerated stuff *) - "vbd-create", + "vbd-create", { reqd=["vm-uuid";"device"]; optn=["vdi-uuid"; "bootable"; "type"; "mode"; "unpluggable" ]; @@ -1620,7 +1620,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vbd_create; flags=[]; }; - "vbd-destroy", + "vbd-destroy", { reqd=["uuid"]; optn=[]; @@ -1628,7 +1628,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vbd_destroy; flags=[]; }; - "vbd-insert", + "vbd-insert", { reqd=["uuid"; "vdi-uuid"]; optn=[]; @@ -1636,7 +1636,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vbd_insert; flags=[]; }; - "vbd-eject", + "vbd-eject", { reqd=["uuid"]; optn=[]; @@ -1644,7 +1644,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vbd_eject; flags=[]; }; - "vbd-plug", + "vbd-plug", { reqd=["uuid"]; optn=[]; @@ -1652,7 +1652,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vbd_plug; flags=[]; }; - "vbd-unplug", + "vbd-unplug", { reqd=["uuid"]; optn=["timeout"; "force"]; @@ -1660,23 +1660,23 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vbd_unplug; flags=[]; }; - "vbd-pause", - { - reqd=["uuid"]; - optn=[]; - help="Request that a backend block device pauses itself."; - implementation=No_fd Cli_operations.vbd_pause; - flags=[Hidden]; - }; - "vbd-unpause", - { - reqd=["uuid"; "token"]; - optn=[]; - help="Request that a backend block device unpauses itself."; - implementation=No_fd Cli_operations.vbd_unpause; - flags=[Hidden]; - }; - "sr-create", + "vbd-pause", + { + reqd=["uuid"]; + optn=[]; + help="Request that a backend block device pauses itself."; + implementation=No_fd Cli_operations.vbd_pause; + flags=[Hidden]; + }; + "vbd-unpause", + { + reqd=["uuid"; "token"]; + optn=[]; + help="Request that a backend block device unpauses itself."; + implementation=No_fd Cli_operations.vbd_unpause; + flags=[Hidden]; + }; + "sr-create", { reqd=["name-label";"type"]; optn=["host-uuid";"device-config:";"shared";"physical-size";"content-type";"sm-config:"]; @@ -1684,7 +1684,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=With_fd Cli_operations.sr_create; flags=[]; }; - "sr-probe", + "sr-probe", { reqd=["type"]; optn=["host-uuid";"device-config:";"sm-config:"]; @@ -1692,7 +1692,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.sr_probe; flags=[]; }; - "sr-scan", + "sr-scan", { reqd=["uuid"]; optn=[]; @@ -1700,7 +1700,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.sr_scan; flags=[]; }; - "sr-introduce", + "sr-introduce", { reqd=["name-label"; "type"; "uuid"]; optn=["shared"; "content-type"]; @@ -1708,7 +1708,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.sr_introduce; flags=[]; }; - "sr-destroy", + "sr-destroy", { reqd=["uuid"]; optn=[]; @@ -1716,7 +1716,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.sr_destroy; flags=[]; }; - "sr-forget", + "sr-forget", { reqd=["uuid"]; optn=[]; @@ -1724,7 +1724,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.sr_forget; flags=[]; }; - "sr-update", + "sr-update", { reqd=["uuid"]; optn=[]; @@ -1732,58 +1732,58 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.sr_update; flags=[]; }; - "sr-enable-database-replication", - { - reqd=["uuid"]; - optn=[]; - help="Enable database replication to the SR."; - implementation=No_fd Cli_operations.sr_enable_database_replication; - flags=[]; - }; - "sr-disable-database-replication", - { - reqd=["uuid"]; - optn=[]; - help="Disable database replication to the SR."; - implementation=No_fd Cli_operations.sr_disable_database_replication; - flags=[]; - }; - "sr-data-source-list", - { - reqd=[]; - optn=[]; - help="List the data sources that can be recorded for a SR."; - implementation=No_fd Cli_operations.sr_data_source_list; - flags=[Sr_selectors]; - }; - "sr-data-source-record", - { - reqd=["data-source"]; - optn=[]; - help="Record the specified data source for a SR."; - implementation=No_fd Cli_operations.sr_data_source_record; - flags=[Sr_selectors]; - }; - - "sr-data-source-query", - { - reqd=["data-source"]; - optn=[]; - help="Query the last value read from a SR data source."; - implementation=No_fd Cli_operations.sr_data_source_query; - flags=[Sr_selectors]; - }; - - "sr-data-source-forget", - { - reqd=["data-source"]; - optn=[]; - help="Stop recording the specified data source for a SR, and forget all of the recorded data."; - implementation=No_fd Cli_operations.sr_data_source_forget; - flags=[Sr_selectors]; - }; - - "vdi-create", + "sr-enable-database-replication", + { + reqd=["uuid"]; + optn=[]; + help="Enable database replication to the SR."; + implementation=No_fd Cli_operations.sr_enable_database_replication; + flags=[]; + }; + "sr-disable-database-replication", + { + reqd=["uuid"]; + optn=[]; + help="Disable database replication to the SR."; + implementation=No_fd Cli_operations.sr_disable_database_replication; + flags=[]; + }; + "sr-data-source-list", + { + reqd=[]; + optn=[]; + help="List the data sources that can be recorded for a SR."; + implementation=No_fd Cli_operations.sr_data_source_list; + flags=[Sr_selectors]; + }; + "sr-data-source-record", + { + reqd=["data-source"]; + optn=[]; + help="Record the specified data source for a SR."; + implementation=No_fd Cli_operations.sr_data_source_record; + flags=[Sr_selectors]; + }; + + "sr-data-source-query", + { + reqd=["data-source"]; + optn=[]; + help="Query the last value read from a SR data source."; + implementation=No_fd Cli_operations.sr_data_source_query; + flags=[Sr_selectors]; + }; + + "sr-data-source-forget", + { + reqd=["data-source"]; + optn=[]; + help="Stop recording the specified data source for a SR, and forget all of the recorded data."; + implementation=No_fd Cli_operations.sr_data_source_forget; + flags=[Sr_selectors]; + }; + + "vdi-create", { reqd=["sr-uuid";"name-label";"virtual-size"]; optn=["sm-config:";"sharable"; "tags:";"type"]; @@ -1791,7 +1791,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vdi_create; flags=[]; }; - "vdi-destroy", + "vdi-destroy", { reqd=["uuid"]; optn=[]; @@ -1799,7 +1799,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vdi_destroy; flags=[]; }; - "vdi-forget", + "vdi-forget", { reqd=["uuid"]; optn=[]; @@ -1807,7 +1807,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vdi_forget; flags=[]; }; - "vdi-update", + "vdi-update", { reqd=["uuid"]; optn=[]; @@ -1815,7 +1815,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vdi_update; flags=[]; }; - "vdi-introduce", + "vdi-introduce", { reqd=["uuid"; "sr-uuid";"type";"location"]; optn=["name-description"; "sharable"; "read-only"; "other-config:"; "xenstore-data:"; "sm-config:"; "name-label"]; @@ -1823,23 +1823,23 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vdi_introduce; flags=[]; }; - "vdi-import", - { - reqd=["uuid"; "filename"]; - optn=["format"; "progress"]; - help="Import a raw VDI."; - implementation=With_fd Cli_operations.vdi_import; - flags=[]; - }; - "vdi-export", - { - reqd = [ "uuid"; "filename" ]; - optn = [ "format"; "base"; "progress" ]; - help = "Export a VDI."; - implementation=With_fd Cli_operations.vdi_export; - flags = []; - }; - "vdi-resize", + "vdi-import", + { + reqd=["uuid"; "filename"]; + optn=["format"; "progress"]; + help="Import a raw VDI."; + implementation=With_fd Cli_operations.vdi_import; + flags=[]; + }; + "vdi-export", + { + reqd = [ "uuid"; "filename" ]; + optn = [ "format"; "base"; "progress" ]; + help = "Export a VDI."; + implementation=With_fd Cli_operations.vdi_export; + flags = []; + }; + "vdi-resize", { reqd=["uuid"; "disk-size"]; optn=["online"]; @@ -1847,15 +1847,15 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vdi_resize; flags=[]; }; - "vdi-generate-config", - { - reqd=["host-uuid"; "uuid"]; - optn=[]; - help="Generate a static VDI configuration."; - implementation=No_fd Cli_operations.vdi_generate_config; - flags=[Hidden]; - }; - "vdi-copy", + "vdi-generate-config", + { + reqd=["host-uuid"; "uuid"]; + optn=[]; + help="Generate a static VDI configuration."; + implementation=No_fd Cli_operations.vdi_generate_config; + flags=[Hidden]; + }; + "vdi-copy", { reqd=["uuid"]; optn=["sr-uuid"; "base-vdi-uuid"; "into-vdi-uuid"]; @@ -1863,15 +1863,15 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vdi_copy; flags=[]; }; - "vdi-pool-migrate", + "vdi-pool-migrate", { reqd=["uuid"; "sr-uuid"]; - optn=[]; + optn=[]; help="Migrate a VDI to a specified SR, while the VDI is attached to a running guest."; implementation=No_fd Cli_operations.vdi_pool_migrate; flags=[]; }; - "vdi-clone", + "vdi-clone", { reqd=["uuid"]; optn=["driver-params-"; "new-name-label"; "new-name-description"]; @@ -1879,7 +1879,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vdi_clone; flags=[]; }; - "vdi-snapshot", + "vdi-snapshot", { reqd=["uuid"]; optn=["driver-params-"]; @@ -1887,7 +1887,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vdi_snapshot; flags=[]; }; - "vdi-unlock", + "vdi-unlock", { reqd=["uuid"]; optn=["force"]; @@ -1895,7 +1895,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.vdi_unlock; flags=[]; }; - "diagnostic-vdi-status", + "diagnostic-vdi-status", { reqd=["uuid"]; optn=[]; @@ -1903,7 +1903,7 @@ there are two or more empty CD devices, please use the command 'vbd-insert' and implementation=No_fd Cli_operations.diagnostic_vdi_status; flags=[]; }; - "pbd-create", + "pbd-create", { reqd=["host-uuid";"sr-uuid"]; optn=["device-config:"]; @@ -1912,7 +1912,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.pbd_create; flags=[]; }; - "pbd-destroy", + "pbd-destroy", { reqd=["uuid"]; optn=[]; @@ -1920,7 +1920,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.pbd_destroy; flags=[]; }; - "pbd-plug", + "pbd-plug", { reqd=["uuid"]; optn=[]; @@ -1928,7 +1928,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.pbd_plug; flags=[]; }; - "pbd-unplug", + "pbd-unplug", { reqd=["uuid"]; optn=[]; @@ -1936,23 +1936,23 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.pbd_unplug; flags=[]; }; - "network-create", - { - reqd=["name-label"]; - optn=["name-description"; "MTU"]; - help="Create a new network."; - implementation=No_fd Cli_operations.net_create; + "network-create", + { + reqd=["name-label"]; + optn=["name-description"; "MTU"]; + help="Create a new network."; + implementation=No_fd Cli_operations.net_create; flags=[]; - }; - "network-destroy", - { - reqd=["uuid"]; - optn=[]; - help="Delete an existing network."; - implementation=No_fd Cli_operations.net_destroy; + }; + "network-destroy", + { + reqd=["uuid"]; + optn=[]; + help="Delete an existing network."; + implementation=No_fd Cli_operations.net_destroy; flags=[]; - }; - "network-attach", + }; + "network-attach", { reqd=["uuid"; "host-uuid"]; optn=[]; @@ -1960,7 +1960,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.net_attach; flags=[Hidden]; }; - "vif-create", + "vif-create", { reqd=["device";"network-uuid";"vm-uuid"]; optn=["mac"]; @@ -1968,7 +1968,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.vif_create; flags=[]; }; - "vif-destroy", + "vif-destroy", { reqd=["uuid"]; optn=[]; @@ -1976,7 +1976,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.vif_destroy; flags=[]; }; - "vif-plug", + "vif-plug", { reqd=["uuid"]; optn=[]; @@ -1984,7 +1984,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.vif_plug; flags=[]; }; - "vif-unplug", + "vif-unplug", { reqd=["uuid"]; optn=["force"]; @@ -1992,7 +1992,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.vif_unplug; flags=[]; }; - "vif-configure-ipv4", + "vif-configure-ipv4", { reqd=["uuid"; "mode"]; optn=["address"; "gateway"]; @@ -2000,7 +2000,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.vif_configure_ipv4; flags=[]; }; - "vif-configure-ipv6", + "vif-configure-ipv6", { reqd=["uuid"; "mode"]; optn=["address"; "gateway"]; @@ -2008,7 +2008,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.vif_configure_ipv6; flags=[]; }; - "vif-move", + "vif-move", { reqd=["uuid";"network-uuid"]; optn=[]; @@ -2016,7 +2016,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.vif_move; flags=[]; }; - "vm-create", + "vm-create", { reqd=["name-label"]; optn=["name-description"]; @@ -2024,7 +2024,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.vm_create; flags=[Hidden]; }; - "vm-destroy", + "vm-destroy", { reqd=["uuid"]; optn=[]; @@ -2032,23 +2032,23 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.vm_destroy; flags=[]; }; - "vm-recover", - { - reqd=["uuid"]; - optn=["database:";"force"]; - help="Recover a VM from the database contained in the supplied VDI."; - implementation=No_fd Cli_operations.vm_recover; - flags=[]; - }; - "vm-assert-can-be-recovered", - { - reqd=["uuid"]; - optn=["database:"]; - help="Test whether storage is available to recover this VM."; - implementation=No_fd Cli_operations.vm_assert_can_be_recovered; - flags=[]; - }; - "diagnostic-vm-status", + "vm-recover", + { + reqd=["uuid"]; + optn=["database:";"force"]; + help="Recover a VM from the database contained in the supplied VDI."; + implementation=No_fd Cli_operations.vm_recover; + flags=[]; + }; + "vm-assert-can-be-recovered", + { + reqd=["uuid"]; + optn=["database:"]; + help="Test whether storage is available to recover this VM."; + implementation=No_fd Cli_operations.vm_assert_can_be_recovered; + flags=[]; + }; + "diagnostic-vm-status", { reqd=["uuid"]; optn=[]; @@ -2058,7 +2058,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument }; (* "diagnostic-event-deltas", - { + { reqd=["class"]; optn=[]; help="Print the changes that are happening to all objects of class specified."; @@ -2066,7 +2066,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument flags=[]; }; *) - "diagnostic-license-status", + "diagnostic-license-status", { reqd=[]; optn=[]; @@ -2075,7 +2075,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument flags=[]; }; - "event-wait", + "event-wait", { reqd=["class"]; optn=[]; @@ -2083,43 +2083,43 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.event_wait; flags=[]; }; - "host-data-source-list", + "host-data-source-list", { - reqd=[]; + reqd=[]; optn=[]; help="List the data sources that can be recorded for a Host."; implementation=No_fd Cli_operations.host_data_source_list; flags=[Host_selectors]; }; - "host-data-source-record", - { - reqd=["data-source"]; - optn=[]; - help="Record the specified data source for a Host."; - implementation=No_fd Cli_operations.host_data_source_record; - flags=[Host_selectors]; - }; + "host-data-source-record", + { + reqd=["data-source"]; + optn=[]; + help="Record the specified data source for a Host."; + implementation=No_fd Cli_operations.host_data_source_record; + flags=[Host_selectors]; + }; - "host-data-source-query", + "host-data-source-query", { - reqd=["data-source"]; - optn=[]; - help="Query the last value read from a Host data source."; - implementation=No_fd Cli_operations.host_data_source_query; - flags=[Host_selectors]; - }; + reqd=["data-source"]; + optn=[]; + help="Query the last value read from a Host data source."; + implementation=No_fd Cli_operations.host_data_source_query; + flags=[Host_selectors]; + }; - "host-data-source-forget", + "host-data-source-forget", { - reqd=["data-source"]; - optn=[]; - help="Stop recording the specified data source for a Host, and forget all of the recorded data."; - implementation=No_fd Cli_operations.host_data_source_forget; - flags=[Host_selectors]; - }; + reqd=["data-source"]; + optn=[]; + help="Stop recording the specified data source for a Host, and forget all of the recorded data."; + implementation=No_fd Cli_operations.host_data_source_forget; + flags=[Host_selectors]; + }; - "host-license-add", + "host-license-add", { reqd=["license-file"]; optn=["host-uuid"]; @@ -2127,7 +2127,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=With_fd Cli_operations.host_license_add; flags=[]; }; - "host-license-remove", + "host-license-remove", { reqd=[]; optn=["host-uuid"]; @@ -2135,7 +2135,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.host_license_remove; flags=[]; }; - "host-license-view", + "host-license-view", { reqd=[]; optn=["host-uuid"]; @@ -2159,7 +2159,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.host_all_editions; flags=[]; }; - "host-evacuate", + "host-evacuate", { reqd=[]; optn=[]; @@ -2167,7 +2167,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.host_evacuate; flags=[Host_selectors]; }; - "host-get-vms-which-prevent-evacuation", + "host-get-vms-which-prevent-evacuation", { reqd=["uuid"]; optn=[]; @@ -2175,7 +2175,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.host_get_vms_which_prevent_evacuation; flags=[]; }; - "host-shutdown-agent", + "host-shutdown-agent", { reqd=[]; optn=[]; @@ -2183,7 +2183,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd_local_session Cli_operations.host_shutdown_agent; flags=[Neverforward]; }; - "diagnostic-compact", + "diagnostic-compact", { reqd=[]; optn=[]; @@ -2191,7 +2191,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.diagnostic_compact; flags=[Neverforward]; }; - "diagnostic-gc-stats", + "diagnostic-gc-stats", { reqd=[]; optn=[]; @@ -2199,7 +2199,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.diagnostic_gc_stats; flags=[Neverforward]; }; - "diagnostic-timing-stats", + "diagnostic-timing-stats", { reqd=[]; optn=[]; @@ -2207,7 +2207,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.diagnostic_timing_stats; flags=[]; }; - "diagnostic-db-stats", + "diagnostic-db-stats", { reqd=[]; optn=[]; @@ -2215,15 +2215,15 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.diagnostic_db_stats; flags=[Neverforward]; }; - "diagnostic-net-stats", - { - reqd=[]; - optn=["uri"; "method"; "params"]; - help="Print network stats."; - implementation=No_fd Cli_operations.diagnostic_net_stats; - flags=[Neverforward]; - }; - "diagnostic-db-log", + "diagnostic-net-stats", + { + reqd=[]; + optn=["uri"; "method"; "params"]; + help="Print network stats."; + implementation=No_fd Cli_operations.diagnostic_net_stats; + flags=[Neverforward]; + }; + "diagnostic-db-log", { reqd=[]; optn=[]; @@ -2231,7 +2231,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.diagnostic_db_log; flags=[Neverforward]; }; - "host-get-sm-diagnostics", + "host-get-sm-diagnostics", { reqd=["uuid"]; optn=[]; @@ -2239,15 +2239,15 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.host_get_sm_diagnostics; flags=[]; }; - "host-get-thread-diagnostics", + "host-get-thread-diagnostics", { reqd=["uuid"]; optn=[]; help="Display per-host thread diagnostic information."; implementation=No_fd Cli_operations.host_get_thread_diagnostics; flags=[]; - }; - "host-sm-dp-destroy", + }; + "host-sm-dp-destroy", { reqd=["uuid"; "dp"]; optn=["allow-leak"]; @@ -2255,7 +2255,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=No_fd Cli_operations.host_sm_dp_destroy; flags=[]; }; - "task-cancel", + "task-cancel", { reqd=["uuid"]; optn=[]; @@ -2292,14 +2292,14 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument }; *) - "pool-vlan-create", - { - reqd=["pif-uuid"; "vlan"; "network-uuid"]; - optn=[]; - help="Create a new VLAN on each host in a pool."; - implementation=No_fd Cli_operations.pool_vlan_create; - flags=[]; - }; + "pool-vlan-create", + { + reqd=["pif-uuid"; "vlan"; "network-uuid"]; + optn=[]; + help="Create a new VLAN on each host in a pool."; + implementation=No_fd Cli_operations.pool_vlan_create; + flags=[]; + }; "pool-ha-enable", @@ -2375,7 +2375,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument implementation=With_fd_local_session Cli_operations.host_ha_xapi_healthcheck; flags=[Hidden;Neverforward] }; - + (* "host-ha-query", { @@ -2390,8 +2390,8 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument (* "subject-list", { - reqd=[]; - optn=[]; + reqd=[]; + optn=[]; help="Returns a list of subject names that can access the pool"; implementation=No_fd Cli_operations.subject_list; flags=[] @@ -2400,7 +2400,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument "subject-add", { reqd=["subject-name"]; - optn=[]; + optn=[]; help="Add a subject to the list of subjects that can access the pool"; implementation=No_fd Cli_operations.subject_add; flags=[] @@ -2409,7 +2409,7 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument "subject-remove", { reqd=["subject-uuid"]; - optn=[]; + optn=[]; help="Remove a subject from the list of subjects that can access the pool"; implementation=No_fd Cli_operations.subject_remove; flags=[] @@ -2442,16 +2442,16 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument flags=[] }; -(* RBAC 2.0 only - "role-create", - { - reqd=["id";"name"]; - optn=[]; - help="Add a role to the pool"; - implementation=No_fd Cli_operations.role_create; - flags=[] - }; -*) + (* RBAC 2.0 only + "role-create", + { + reqd=["id";"name"]; + optn=[]; + help="Add a role to the pool"; + implementation=No_fd Cli_operations.role_create; + flags=[] + }; + *) "session-subject-identifier-list", { reqd=[]; @@ -2488,152 +2488,152 @@ add a mapping of 'path' -> '/tmp', the command line should contain the argument flags=[Host_selectors] }; - "secret-create", - { reqd = ["value"] - ; optn = [] - ; help = "Create a secret" - ; implementation = No_fd Cli_operations.secret_create - ; flags = [] - }; - - "secret-destroy", - { reqd = ["uuid"] - ; optn = [] - ; help = "Destroy a secret" - ; implementation = No_fd Cli_operations.secret_destroy - ; flags = [] - }; - "appliance-create", - { - reqd=["name-label"]; - optn=["name-description"]; - help="Create a VM appliance."; - implementation=No_fd Cli_operations.vm_appliance_create; - flags=[]; - }; - "appliance-destroy", - { - reqd=["uuid"]; - optn=[]; - help="Destroy a VM appliance."; - implementation=No_fd Cli_operations.vm_appliance_destroy; - flags=[]; - }; - "appliance-start", - { - reqd=["uuid"]; - optn=["paused"]; - help="Start a VM appliance."; - implementation=No_fd Cli_operations.vm_appliance_start; - flags=[]; - }; - "appliance-shutdown", - { - reqd=["uuid"]; - optn=["force"]; - help="Shut down all VMs in a VM appliance."; - implementation=No_fd Cli_operations.vm_appliance_shutdown; - flags=[]; - }; - "appliance-recover", - { - reqd=["uuid"]; - optn=["database:";"force"]; - help="Recover a VM appliance from the database contained in the supplied VDI."; - implementation=No_fd Cli_operations.vm_appliance_recover; - flags=[]; - }; - "appliance-assert-can-be-recovered", - { - reqd=["uuid"]; - optn=["database:"]; - help="Test whether storage is available to recover this VM appliance."; - implementation=No_fd Cli_operations.vm_appliance_assert_can_be_recovered; - flags=[]; - }; - "gpu-group-create", - { - reqd=["name-label"]; - optn=["name-description"]; - help="Create an empty GPU group"; - implementation=No_fd Cli_operations.gpu_group_create; - flags=[]; - }; - "gpu-group-destroy", - { - reqd=["uuid"]; - optn=[""]; - help="Destroy a GPU group"; - implementation=No_fd Cli_operations.gpu_group_destroy; - flags=[]; - }; - "gpu-group-get-remaining-capacity", - { - reqd=["uuid";"vgpu-type-uuid"]; - optn=[]; - help="Calculate the number of VGPUs of the specified type which still be started in the group"; - implementation = No_fd Cli_operations.gpu_group_get_remaining_capacity; - flags=[]; - }; - "vgpu-create", - { - reqd=["vm-uuid";"gpu-group-uuid"]; - optn=["vgpu-type-uuid"]; (* "device" should be added here once we allow >1 vGPU/VM *) - help="Create a vGPU."; - implementation=No_fd Cli_operations.vgpu_create; - flags=[]; - }; - "vgpu-destroy", - { - reqd=["uuid"]; - optn=[]; - help="Destroy a vGPU."; - implementation=No_fd Cli_operations.vgpu_destroy; - flags=[]; - }; - "drtask-create", - { - reqd=["type"]; - optn=["device-config:"; "sr-whitelist"]; - help="Create a disaster recovery task."; - implementation=No_fd Cli_operations.dr_task_create; - flags=[] - }; - "drtask-destroy", - { - reqd=["uuid"]; - optn=[]; - help="Destroy a disaster recovery task."; - implementation=No_fd Cli_operations.dr_task_destroy; - flags=[] - }; - - "pgpu-enable-dom0-access", - { - reqd=["uuid"]; - optn=[]; - help="Enable PGPU access to dom0."; - implementation=No_fd Cli_operations.pgpu_enable_dom0_access; - flags=[] - }; - - "pgpu-disable-dom0-access", - { - reqd=["uuid"]; - optn=[]; - help="Disable PGPU access to dom0."; - implementation=No_fd Cli_operations.pgpu_disable_dom0_access; - flags=[] - }; - - "lvhd-enable-thin-provisioning", - { - reqd=["sr-uuid"; "initial-allocation"; "allocation-quantum"]; - optn=[]; - help="Enable thin-provisioning on an LVHD SR."; - implementation=No_fd Cli_operations.lvhd_enable_thin_provisioning; - flags=[Host_selectors]; - } + "secret-create", + { reqd = ["value"] + ; optn = [] + ; help = "Create a secret" + ; implementation = No_fd Cli_operations.secret_create + ; flags = [] + }; + + "secret-destroy", + { reqd = ["uuid"] + ; optn = [] + ; help = "Destroy a secret" + ; implementation = No_fd Cli_operations.secret_destroy + ; flags = [] + }; + "appliance-create", + { + reqd=["name-label"]; + optn=["name-description"]; + help="Create a VM appliance."; + implementation=No_fd Cli_operations.vm_appliance_create; + flags=[]; + }; + "appliance-destroy", + { + reqd=["uuid"]; + optn=[]; + help="Destroy a VM appliance."; + implementation=No_fd Cli_operations.vm_appliance_destroy; + flags=[]; + }; + "appliance-start", + { + reqd=["uuid"]; + optn=["paused"]; + help="Start a VM appliance."; + implementation=No_fd Cli_operations.vm_appliance_start; + flags=[]; + }; + "appliance-shutdown", + { + reqd=["uuid"]; + optn=["force"]; + help="Shut down all VMs in a VM appliance."; + implementation=No_fd Cli_operations.vm_appliance_shutdown; + flags=[]; + }; + "appliance-recover", + { + reqd=["uuid"]; + optn=["database:";"force"]; + help="Recover a VM appliance from the database contained in the supplied VDI."; + implementation=No_fd Cli_operations.vm_appliance_recover; + flags=[]; + }; + "appliance-assert-can-be-recovered", + { + reqd=["uuid"]; + optn=["database:"]; + help="Test whether storage is available to recover this VM appliance."; + implementation=No_fd Cli_operations.vm_appliance_assert_can_be_recovered; + flags=[]; + }; + "gpu-group-create", + { + reqd=["name-label"]; + optn=["name-description"]; + help="Create an empty GPU group"; + implementation=No_fd Cli_operations.gpu_group_create; + flags=[]; + }; + "gpu-group-destroy", + { + reqd=["uuid"]; + optn=[""]; + help="Destroy a GPU group"; + implementation=No_fd Cli_operations.gpu_group_destroy; + flags=[]; + }; + "gpu-group-get-remaining-capacity", + { + reqd=["uuid";"vgpu-type-uuid"]; + optn=[]; + help="Calculate the number of VGPUs of the specified type which still be started in the group"; + implementation = No_fd Cli_operations.gpu_group_get_remaining_capacity; + flags=[]; + }; + "vgpu-create", + { + reqd=["vm-uuid";"gpu-group-uuid"]; + optn=["vgpu-type-uuid"]; (* "device" should be added here once we allow >1 vGPU/VM *) + help="Create a vGPU."; + implementation=No_fd Cli_operations.vgpu_create; + flags=[]; + }; + "vgpu-destroy", + { + reqd=["uuid"]; + optn=[]; + help="Destroy a vGPU."; + implementation=No_fd Cli_operations.vgpu_destroy; + flags=[]; + }; + "drtask-create", + { + reqd=["type"]; + optn=["device-config:"; "sr-whitelist"]; + help="Create a disaster recovery task."; + implementation=No_fd Cli_operations.dr_task_create; + flags=[] + }; + "drtask-destroy", + { + reqd=["uuid"]; + optn=[]; + help="Destroy a disaster recovery task."; + implementation=No_fd Cli_operations.dr_task_destroy; + flags=[] + }; + + "pgpu-enable-dom0-access", + { + reqd=["uuid"]; + optn=[]; + help="Enable PGPU access to dom0."; + implementation=No_fd Cli_operations.pgpu_enable_dom0_access; + flags=[] + }; + + "pgpu-disable-dom0-access", + { + reqd=["uuid"]; + optn=[]; + help="Disable PGPU access to dom0."; + implementation=No_fd Cli_operations.pgpu_disable_dom0_access; + flags=[] + }; + + "lvhd-enable-thin-provisioning", + { + reqd=["sr-uuid"; "initial-allocation"; "allocation-quantum"]; + optn=[]; + help="Enable thin-provisioning on an LVHD SR."; + implementation=No_fd Cli_operations.lvhd_enable_thin_provisioning; + flags=[Host_selectors]; + } ] let cmdtable : (string, cmd_spec) Hashtbl.t = @@ -2645,16 +2645,16 @@ let cmdtable_geneva : (string, cmd_spec) Hashtbl.t = let populated = ref false let populate_cmdtable rpc session_id = - if !populated then () + if !populated then () else begin populated := true; List.iter - (fun (n,c)->Hashtbl.add cmdtable n c) - (cmdtable_data @ (Cli_operations.gen_cmds rpc session_id)); (* Autogenerated commands too *) + (fun (n,c)->Hashtbl.add cmdtable n c) + (cmdtable_data @ (Cli_operations.gen_cmds rpc session_id)); (* Autogenerated commands too *) List.iter - (fun (n,c)->Hashtbl.add cmdtable_geneva n c) - (Cli_operations_geneva.cmdtable_data) + (fun (n,c)->Hashtbl.add cmdtable_geneva n c) + (Cli_operations_geneva.cmdtable_data) end (* ---------------------------------------------------------------------- @@ -2688,13 +2688,13 @@ let convert_switch switch = Backtrace.reraise e (ParseError ("Unknown switch: "^switch)) type token = - | Id of string - | Eq - + | Id of string + | Eq + type commandline = - { cmdname : string; - argv0 : string; - params : (string*string) list } + { cmdname : string; + argv0 : string; + params : (string*string) list } let get_params cmd = cmd.params let get_cmdname cmd = cmd.cmdname @@ -2707,14 +2707,14 @@ let get_reqd_param cmd p = let string_of_token t = match t with - | Id s -> "Id("^s^")" - | Eq -> "Eq" + | Id s -> "Id("^s^")" + | Eq -> "Eq" let starts_with s prefix = let s_len = String.length s in let p_len = String.length prefix in (p_len<=s_len) && - (String.sub s 0 p_len)=prefix + (String.sub s 0 p_len)=prefix let tokens_of_argv argv_list = @@ -2724,101 +2724,101 @@ let tokens_of_argv argv_list = let split_on_eq s = let rec f cl sofar = let flush_to_id() = - if sofar<>[] then - add_param (Id (String.implode (List.rev sofar))) in - - match cl with - [] -> flush_to_id() - | ('='::cs) -> - (flush_to_id(); - add_param Eq; - f cs []) - | (c::cs) -> - f cs (c::sofar) + if sofar<>[] then + add_param (Id (String.implode (List.rev sofar))) in + + match cl with + [] -> flush_to_id() + | ('='::cs) -> + (flush_to_id(); + add_param Eq; + f cs []) + | (c::cs) -> + f cs (c::sofar) in f (String.explode s) [] in let rec f argv_list = match argv_list with - [] -> () - | (x::xs) -> - (* x may be a unary switch: *) - if (starts_with x "--") then - begin - add_param (Id (convert_switch x)); - add_param Eq; - add_param (Id "true"); - f xs - end - (* x may be a diadic switch *) - else if (starts_with x "-") then - begin - match xs with - [] -> - raise (ParseError ("Switch "^x^" requires parameter")) - | (z::zs) -> - begin - add_param (Id (convert_switch x)); - add_param Eq; - add_param (Id z); - f zs - end - end - else - (* otherwise tokenize, splitting on equals: *) - begin - split_on_eq x; - f xs - end in - begin - f argv_list; - List.rev (!tokens) - end + [] -> () + | (x::xs) -> + (* x may be a unary switch: *) + if (starts_with x "--") then + begin + add_param (Id (convert_switch x)); + add_param Eq; + add_param (Id "true"); + f xs + end + (* x may be a diadic switch *) + else if (starts_with x "-") then + begin + match xs with + [] -> + raise (ParseError ("Switch "^x^" requires parameter")) + | (z::zs) -> + begin + add_param (Id (convert_switch x)); + add_param Eq; + add_param (Id z); + f zs + end + end + else + (* otherwise tokenize, splitting on equals: *) + begin + split_on_eq x; + f xs + end in + begin + f argv_list; + List.rev (!tokens) + end let parse tokens = - let rec read_rval ts sofar = + let rec read_rval ts sofar = match ts with - [] -> ([],sofar) - | (Id s1)::(Id s2)::ts -> ((Id s2)::ts, sofar^s1) - | (Eq::ts) -> read_rval ts (sofar^"=") - | ((Id s1)::ts) -> read_rval ts (sofar^s1) in + [] -> ([],sofar) + | (Id s1)::(Id s2)::ts -> ((Id s2)::ts, sofar^s1) + | (Eq::ts) -> read_rval ts (sofar^"=") + | ((Id s1)::ts) -> read_rval ts (sofar^s1) in let rec parse_params ts = match ts with - [] -> [] - | ((Id s)::Eq::ts) -> - let (rest_of_tokens,rval) = read_rval ts "" in - (s,rval)::(parse_params rest_of_tokens) - | x::_ -> raise (ParseError (string_of_token x)) (* !!! Do some diagnostic here *) + [] -> [] + | ((Id s)::Eq::ts) -> + let (rest_of_tokens,rval) = read_rval ts "" in + (s,rval)::(parse_params rest_of_tokens) + | x::_ -> raise (ParseError (string_of_token x)) (* !!! Do some diagnostic here *) in match tokens with - | ((Id cli_name)::(Id cname)::ts) -> - {cmdname = cname; - argv0 = cli_name; - params = (parse_params ts) } - | _ -> raise (ParseError ("No arguments given")) + | ((Id cli_name)::(Id cname)::ts) -> + {cmdname = cname; + argv0 = cli_name; + params = (parse_params ts) } + | _ -> raise (ParseError ("No arguments given")) let rec parse_params_2 xs = match xs with - p::ps -> - (* unary *) - if (starts_with p "--") - then (String.sub p 2 (String.length p - 2),"true")::parse_params_2 ps - else - begin - (* x may be a diadic switch *) - if (starts_with p "-") - then - match ps with - q::qs -> (convert_switch p,q)::parse_params_2 qs - | _ -> failwith (Printf.sprintf "Switch %s requires a parameter\n" p) - else - let list = String.split '=' p in - let param_name=List.hd list in - let rest = String.concat "=" (List.tl list) in - (param_name,rest)::parse_params_2 ps - end - | [] -> [] + p::ps -> + (* unary *) + if (starts_with p "--") + then (String.sub p 2 (String.length p - 2),"true")::parse_params_2 ps + else + begin + (* x may be a diadic switch *) + if (starts_with p "-") + then + match ps with + q::qs -> (convert_switch p,q)::parse_params_2 qs + | _ -> failwith (Printf.sprintf "Switch %s requires a parameter\n" p) + else + let list = String.split '=' p in + let param_name=List.hd list in + let rest = String.concat "=" (List.tl list) in + (param_name,rest)::parse_params_2 ps + end + | [] -> [] let parse_commandline arg_list = try @@ -2830,7 +2830,7 @@ let parse_commandline arg_list = {cmdname = cmdname; argv0 = argv0; params = params} - with + with | e -> error "Rethrowing %s as ParseError \"\"" (Printexc.to_string e); Backtrace.reraise e (ParseError "") @@ -2842,7 +2842,7 @@ let parse_commandline arg_list = let make_list l = let indent = " " in let rec doline cur lines cmds = - match cmds with [] -> List.rev (cur::lines) | cmd::cmds -> + match cmds with [] -> List.rev (cur::lines) | cmd::cmds -> if String.length cur + String.length cmd > 74 then doline (indent^cmd) (cur::lines) cmds else doline (cur^", "^cmd) lines cmds @@ -2858,29 +2858,29 @@ let rio_help printer minimal cmd = let host_selectors = List.mem Host_selectors cmd_spec.flags in let sr_selectors = List.mem Sr_selectors cmd_spec.flags in let optional = - cmd_spec.optn @ - (if vm_selectors then vmselectors else []) @ - (if sr_selectors then srselectors else []) @ - (if host_selectors then hostselectors else []) + cmd_spec.optn @ + (if vm_selectors then vmselectors else []) @ + (if sr_selectors then srselectors else []) @ + (if host_selectors then hostselectors else []) in let desc = match (vm_selectors,host_selectors,sr_selectors) with - | (false,false,false) -> cmd_spec.help - | (true,false,false) -> cmd_spec.help ^ vmselectorsinfo - | (false,true,false) -> cmd_spec.help ^ hostselectorsinfo - | (false,false,true) -> cmd_spec.help ^ srselectorsinfo - | _ -> cmd_spec.help (* never happens currently *) + | (false,false,false) -> cmd_spec.help + | (true,false,false) -> cmd_spec.help ^ vmselectorsinfo + | (false,true,false) -> cmd_spec.help ^ hostselectorsinfo + | (false,false,true) -> cmd_spec.help ^ srselectorsinfo + | _ -> cmd_spec.help (* never happens currently *) in - let recs = - [("command name ",cmd); - ("reqd params ",String.concat ", " cmd_spec.reqd); - ("optional params ",String.concat ", " optional); - ("description ",desc)] in + let recs = + [("command name ",cmd); + ("reqd params ",String.concat ", " cmd_spec.reqd); + ("optional params ",String.concat ", " optional); + ("description ",desc)] in printer (Cli_printer.PTable [recs]) with - Not_found as e -> - Debug.log_backtrace e (Backtrace.get e); - error "Responding with Unknown command %s" cmd; - printer (Cli_printer.PList ["Unknown command '"^cmd^"'"]) + Not_found as e -> + Debug.log_backtrace e (Backtrace.get e); + error "Responding with Unknown command %s" cmd; + printer (Cli_printer.PList ["Unknown command '"^cmd^"'"]) in let cmds = List.filter (fun (x,_) -> not (List.mem x ["server";"username";"password";"port";"minimal";"all"])) cmd.params in if List.length cmds > 0 then @@ -2889,67 +2889,67 @@ let rio_help printer minimal cmd = let cmds = Hashtbl.fold (fun name cmd list -> ((name,cmd)::list)) cmdtable [] in let cmds = List.filter (fun (name,cmd) -> not (List.mem Hidden cmd.flags)) cmds in (* Filter hidden commands from help *) let cmds = List.sort (fun (name1,cmd1) (name2,cmd2) -> compare name1 name2) cmds in - + begin - if(List.mem_assoc "all" cmd.params && List.assoc "all" cmd.params = "true") then - let cmds = List.map fst cmds in - let (host_cmds,other) = List.partition (fun n -> String.startswith "host-" n) cmds in - let (vm_cmds,other) = List.partition (fun n -> String.startswith "vm-" n) other in - - let h = "Usage: "^cmd.argv0^" [-s server] [-pw passwd] [-p port] [-u user] [-pwf password-file]\n" in - let h = h ^ " [command specific arguments]\n\n" in - let h = h ^ "To get help on a specific command: "^cmd.argv0^" help \n\n" in - let h = h ^ "Full command list\n-----------------" in - begin - if (minimal) - then - printer (Cli_printer.PList cmds) - else - begin - printer (Cli_printer.PList [h]); - printer (Cli_printer.PList (make_list (host_cmds))); - printer (Cli_printer.PList (make_list [""])); - printer (Cli_printer.PList (make_list (vm_cmds))); - printer (Cli_printer.PList (make_list [""])); - printer (Cli_printer.PList (make_list (other))); - end - end + if(List.mem_assoc "all" cmd.params && List.assoc "all" cmd.params = "true") then + let cmds = List.map fst cmds in + let (host_cmds,other) = List.partition (fun n -> String.startswith "host-" n) cmds in + let (vm_cmds,other) = List.partition (fun n -> String.startswith "vm-" n) other in + + let h = "Usage: "^cmd.argv0^" [-s server] [-pw passwd] [-p port] [-u user] [-pwf password-file]\n" in + let h = h ^ " [command specific arguments]\n\n" in + let h = h ^ "To get help on a specific command: "^cmd.argv0^" help \n\n" in + let h = h ^ "Full command list\n-----------------" in + begin + if (minimal) + then + printer (Cli_printer.PList cmds) + else + begin + printer (Cli_printer.PList [h]); + printer (Cli_printer.PList (make_list (host_cmds))); + printer (Cli_printer.PList (make_list [""])); + printer (Cli_printer.PList (make_list (vm_cmds))); + printer (Cli_printer.PList (make_list [""])); + printer (Cli_printer.PList (make_list (other))); + end + end else - let cmds = List.filter (fun (name,cmd) -> List.mem Standard cmd.flags) cmds in - let cmds = List.map fst cmds in - let h = "Usage: "^cmd.argv0^" [-s server] [-pw passwd] [-p port] [-u user] [-pwf password-file]\n" in - let h = h ^ " [command specific arguments]\n\n" in - let h = h ^ "To get help on a specific command: "^cmd.argv0^" help \n" in - let h = h ^ "To get a full listing of commands: "^cmd.argv0^" help --all\n\n" in - - let h = h ^ "Common command list\n-------------------" in - - if (minimal) - then - printer (Cli_printer.PList cmds) - else - begin - printer (Cli_printer.PList [h]); - printer (Cli_printer.PList (make_list (cmds))); - end + let cmds = List.filter (fun (name,cmd) -> List.mem Standard cmd.flags) cmds in + let cmds = List.map fst cmds in + let h = "Usage: "^cmd.argv0^" [-s server] [-pw passwd] [-p port] [-u user] [-pwf password-file]\n" in + let h = h ^ " [command specific arguments]\n\n" in + let h = h ^ "To get help on a specific command: "^cmd.argv0^" help \n" in + let h = h ^ "To get a full listing of commands: "^cmd.argv0^" help --all\n\n" in + + let h = h ^ "Common command list\n-------------------" in + + if (minimal) + then + printer (Cli_printer.PList cmds) + else + begin + printer (Cli_printer.PList [h]); + printer (Cli_printer.PList (make_list (cmds))); + end end - + let geneva_help printer minimal cmd = let docmd cmd = try let cmd_spec = Hashtbl.find cmdtable_geneva cmd in - let recs = - [("command name ",cmd); - ("description ",cmd_spec.help); - ("reqd params ",String.concat ", " cmd_spec.reqd); - ("optional params ",String.concat ", " cmd_spec.optn)] in + let recs = + [("command name ",cmd); + ("description ",cmd_spec.help); + ("reqd params ",String.concat ", " cmd_spec.reqd); + ("optional params ",String.concat ", " cmd_spec.optn)] in printer (Cli_printer.PTable [recs]) with - Not_found as e -> - Debug.log_backtrace e (Backtrace.get e); - error "Responding with Unknown command %s" cmd; - printer (Cli_printer.PList ["Unknown command '"^cmd^"'"]) + Not_found as e -> + Debug.log_backtrace e (Backtrace.get e); + error "Responding with Unknown command %s" cmd; + printer (Cli_printer.PList ["Unknown command '"^cmd^"'"]) in if List.mem_assoc "cmd" cmd.params then docmd (List.assoc "cmd" cmd.params) @@ -2971,9 +2971,9 @@ let geneva_help printer minimal cmd = printer (Cli_printer.PList ["VM Commands:"]); printer (Cli_printer.PList (make_list vm_cmds)) end - + let cmd_help printer minimal cmd = - rio_help printer minimal cmd + rio_help printer minimal cmd diff --git a/ocaml/xapi/cli_key.ml b/ocaml/xapi/cli_key.ml index 1fe43987982..b0571847b2d 100644 --- a/ocaml/xapi/cli_key.ml +++ b/ocaml/xapi/cli_key.ml @@ -11,10 +11,10 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(** +(** * @group Command-Line Interface (CLI) - *) - +*) + let force="force" let live="live" let internal="internal" diff --git a/ocaml/xapi/cli_operations.ml b/ocaml/xapi/cli_operations.ml index 667cf2f4857..43e583385e1 100644 --- a/ocaml/xapi/cli_operations.ml +++ b/ocaml/xapi/cli_operations.ml @@ -13,7 +13,7 @@ *) (** * @group Command-Line Interface (CLI) - *) +*) open Cli_protocol open Cli_util @@ -33,278 +33,278 @@ let failwith str = raise (Cli_util.Cli_failure str) exception ExitWithError of int let bool_of_string param string = - let s = String.lowercase string in - match s with - "true" -> true - | "t" -> true - | "1" -> true - | "false" -> false - | "f" -> false - | "0" -> false - | _ -> failwith ("Failed to parse parameter '"^param^"': expecting 'true' or 'false'") + let s = String.lowercase string in + match s with + "true" -> true + | "t" -> true + | "1" -> true + | "false" -> false + | "f" -> false + | "0" -> false + | _ -> failwith ("Failed to parse parameter '"^param^"': expecting 'true' or 'false'") let get_bool_param params ?(default = false) param = - if List.mem_assoc param params - then bool_of_string param (List.assoc param params) - else default + if List.mem_assoc param params + then bool_of_string param (List.assoc param params) + else default open Client let progress_bar printer task_record = - let progress = task_record.API.task_progress in - let hashes = String.make (int_of_float (progress *. 70.)) '#' in - let animation = "|/-\\" in - let char = animation.[int_of_float (progress *. 100.) mod (String.length animation)] in - let line = Printf.sprintf "\r %3d%% %c %s" (int_of_float (progress *. 100.)) char hashes in - Cli_printer.PStderr line |> printer + let progress = task_record.API.task_progress in + let hashes = String.make (int_of_float (progress *. 70.)) '#' in + let animation = "|/-\\" in + let char = animation.[int_of_float (progress *. 100.) mod (String.length animation)] in + let line = Printf.sprintf "\r %3d%% %c %s" (int_of_float (progress *. 100.)) char hashes in + Cli_printer.PStderr line |> printer let wait_with_progress_bar printer rpc session_id task = - Cli_util.track (progress_bar printer) rpc session_id task; - Cli_printer.PStderr "\n" |> printer; - Cli_util.result_from_task rpc session_id task + Cli_util.track (progress_bar printer) rpc session_id task; + Cli_printer.PStderr "\n" |> printer; + Cli_util.result_from_task rpc session_id task let wait printer rpc session_id task = - Cli_util.track (fun _ -> ()) rpc session_id task; - Cli_util.result_from_task rpc session_id task + Cli_util.track (fun _ -> ()) rpc session_id task; + Cli_util.result_from_task rpc session_id task let waiter printer rpc session_id params task = - finally - (fun () -> - (if List.mem_assoc "progress" params - then wait_with_progress_bar - else wait) printer rpc session_id task) - (fun () -> - Client.Task.destroy rpc session_id task - ) + finally + (fun () -> + (if List.mem_assoc "progress" params + then wait_with_progress_bar + else wait) printer rpc session_id task) + (fun () -> + Client.Task.destroy rpc session_id task + ) (* Return the list of k=v pairs for maps *) let read_map_params name params = - let len = String.length name + 1 in (* include ':' *) - let filter_params = List.filter (fun (p,_) -> (String.startswith name p) && (String.length p > len)) params in - List.map (fun (k,v) -> String.sub k len (String.length k - len),v) filter_params + let len = String.length name + 1 in (* include ':' *) + let filter_params = List.filter (fun (p,_) -> (String.startswith name p) && (String.length p > len)) params in + List.map (fun (k,v) -> String.sub k len (String.length k - len),v) filter_params let read_set_params name params = List.map fst (read_map_params name params) let get_chunks fd = - let buffer = Buffer.create 10240 in - let rec f () = - match unmarshal fd with - | Blob (Chunk len) -> - debug "Reading a chunk of %ld bytes" len; - let data = Unixext.really_read_string fd (Int32.to_int len) in - Buffer.add_string buffer data; - f() - | Blob End -> - Buffer.contents buffer - | _ -> - failwith "Thin CLI protocol error" - in - f() + let buffer = Buffer.create 10240 in + let rec f () = + match unmarshal fd with + | Blob (Chunk len) -> + debug "Reading a chunk of %ld bytes" len; + let data = Unixext.really_read_string fd (Int32.to_int len) in + Buffer.add_string buffer data; + f() + | Blob End -> + Buffer.contents buffer + | _ -> + failwith "Thin CLI protocol error" + in + f() let get_client_file fd filename = - marshal fd (Command (Load filename)); - match unmarshal fd with - | Response OK -> - Some (get_chunks fd) - | Response Failed -> - None - | _ -> - failwith "Thin CLI protocol error" + marshal fd (Command (Load filename)); + match unmarshal fd with + | Response OK -> + Some (get_chunks fd) + | Response Failed -> + None + | _ -> + failwith "Thin CLI protocol error" let diagnostic_compact printer rpc session_id params = - Gc.compact () + Gc.compact () let diagnostic_gc_stats printer rpc session_id params = - let stat = Gc.stat () in - let table = - ["minor_words",string_of_float stat.Gc.minor_words; - "promoted_words",string_of_float stat.Gc.promoted_words; - "major_words",string_of_float stat.Gc.major_words; - "minor_collections",string_of_int stat.Gc.minor_collections; - "major_collections",string_of_int stat.Gc.major_collections; - "heap_words",string_of_int stat.Gc.heap_words; - "heap_chunks",string_of_int stat.Gc.heap_chunks; - "live_words",string_of_int stat.Gc.live_words; - "live_blocks",string_of_int stat.Gc.live_blocks; - "free_words",string_of_int stat.Gc.free_words; - "free_blocks",string_of_int stat.Gc.free_blocks; - "largest_free",string_of_int stat.Gc.largest_free; - "fragments",string_of_int stat.Gc.fragments; - "compactions",string_of_int stat.Gc.compactions; - "top_heap_words",string_of_int stat.Gc.top_heap_words; - ] in - printer (Cli_printer.PTable [table]) + let stat = Gc.stat () in + let table = + ["minor_words",string_of_float stat.Gc.minor_words; + "promoted_words",string_of_float stat.Gc.promoted_words; + "major_words",string_of_float stat.Gc.major_words; + "minor_collections",string_of_int stat.Gc.minor_collections; + "major_collections",string_of_int stat.Gc.major_collections; + "heap_words",string_of_int stat.Gc.heap_words; + "heap_chunks",string_of_int stat.Gc.heap_chunks; + "live_words",string_of_int stat.Gc.live_words; + "live_blocks",string_of_int stat.Gc.live_blocks; + "free_words",string_of_int stat.Gc.free_words; + "free_blocks",string_of_int stat.Gc.free_blocks; + "largest_free",string_of_int stat.Gc.largest_free; + "fragments",string_of_int stat.Gc.fragments; + "compactions",string_of_int stat.Gc.compactions; + "top_heap_words",string_of_int stat.Gc.top_heap_words; + ] in + printer (Cli_printer.PTable [table]) let diagnostic_timing_stats printer rpc session_id params = - let table_of_host host = - [ "host-uuid", Client.Host.get_uuid rpc session_id host; - "host-name-label", Client.Host.get_name_label rpc session_id host ] @ - (try - Client.Host.get_diagnostic_timing_stats rpc session_id host - with e -> - [ "Error", Api_errors.to_string e ]) in - let all = List.map table_of_host (Client.Host.get_all rpc session_id) in + let table_of_host host = + [ "host-uuid", Client.Host.get_uuid rpc session_id host; + "host-name-label", Client.Host.get_name_label rpc session_id host ] @ + (try + Client.Host.get_diagnostic_timing_stats rpc session_id host + with e -> + [ "Error", Api_errors.to_string e ]) in + let all = List.map table_of_host (Client.Host.get_all rpc session_id) in - printer (Cli_printer.PTable all) + printer (Cli_printer.PTable all) let diagnostic_net_stats printer rpc session_id params = - let all = Http_svr.Server.all_stats Xapi_http.server in - let meth (m, _, _) = - not (List.mem_assoc "method" params) - || (String.lowercase(Http.string_of_method_t m) = String.lowercase (List.assoc "method" params)) in - let uri (_, u, _) = - not (List.mem_assoc "uri" params) - || (String.lowercase u = String.lowercase (List.assoc "uri" params)) in - let has_param x = not(List.mem_assoc "params" params) || (List.mem x (String.split ',' (List.assoc "params" params))) in - let all = List.filter meth (List.filter uri all) in - let rows = List.map - (fun (m, uri, stats) -> - let m' = if has_param "method" then [ Http.string_of_method_t m ] else [] in - let uri' = if has_param "uri" then [ uri ] else [] in - let requests' = if has_param "requests" then [ string_of_int stats.Http_svr.Stats.n_requests ] else [] in - let connections' = if has_param "connections" then [ string_of_int stats.Http_svr.Stats.n_connections ] else [] in - let framed' = if has_param "framed" then [ string_of_int stats.Http_svr.Stats.n_framed ] else [] in - m' @ uri' @ requests' @ connections' @ framed' - ) all in - let widths = Table.compute_col_widths rows in - let sll = List.map (List.map2 Table.right widths) rows in - List.iter (fun line -> printer (Cli_printer.PMsg (String.concat " | " line))) sll + let all = Http_svr.Server.all_stats Xapi_http.server in + let meth (m, _, _) = + not (List.mem_assoc "method" params) + || (String.lowercase(Http.string_of_method_t m) = String.lowercase (List.assoc "method" params)) in + let uri (_, u, _) = + not (List.mem_assoc "uri" params) + || (String.lowercase u = String.lowercase (List.assoc "uri" params)) in + let has_param x = not(List.mem_assoc "params" params) || (List.mem x (String.split ',' (List.assoc "params" params))) in + let all = List.filter meth (List.filter uri all) in + let rows = List.map + (fun (m, uri, stats) -> + let m' = if has_param "method" then [ Http.string_of_method_t m ] else [] in + let uri' = if has_param "uri" then [ uri ] else [] in + let requests' = if has_param "requests" then [ string_of_int stats.Http_svr.Stats.n_requests ] else [] in + let connections' = if has_param "connections" then [ string_of_int stats.Http_svr.Stats.n_connections ] else [] in + let framed' = if has_param "framed" then [ string_of_int stats.Http_svr.Stats.n_framed ] else [] in + m' @ uri' @ requests' @ connections' @ framed' + ) all in + let widths = Table.compute_col_widths rows in + let sll = List.map (List.map2 Table.right widths) rows in + List.iter (fun line -> printer (Cli_printer.PMsg (String.concat " | " line))) sll let diagnostic_db_stats printer rpc session_id params = - let (n,avgtime,min,max) = Db_lock.report () in - let (writes,reads,creates,drops,tasks,threads) = Stats.summarise_db_calls () in - printer (Cli_printer.PMsg (Printf.sprintf "DB lock stats: n=%d avgtime=%f min=%f max=%f" n avgtime min max)); - printer (Cli_printer.PMsg "Reads:"); - printer (Cli_printer.PList reads); - printer (Cli_printer.PMsg "Writes:"); - printer (Cli_printer.PList writes); - printer (Cli_printer.PMsg "Creates:"); - printer (Cli_printer.PList creates); - printer (Cli_printer.PMsg "Drops:"); - printer (Cli_printer.PList drops); - printer (Cli_printer.PMsg "Tasks:"); - printer (Cli_printer.PTable (List.map (fun (name,ops)-> ("task",name)::ops) (List.sort (fun (t1,ops1) (t2,ops2)-> compare (List.length ops2) (List.length ops1)) tasks))); - printer (Cli_printer.PMsg "Threads:"); - printer (Cli_printer.PTable (List.map (fun (id,ops)-> ("thread",string_of_int id)::ops) threads)) + let (n,avgtime,min,max) = Db_lock.report () in + let (writes,reads,creates,drops,tasks,threads) = Stats.summarise_db_calls () in + printer (Cli_printer.PMsg (Printf.sprintf "DB lock stats: n=%d avgtime=%f min=%f max=%f" n avgtime min max)); + printer (Cli_printer.PMsg "Reads:"); + printer (Cli_printer.PList reads); + printer (Cli_printer.PMsg "Writes:"); + printer (Cli_printer.PList writes); + printer (Cli_printer.PMsg "Creates:"); + printer (Cli_printer.PList creates); + printer (Cli_printer.PMsg "Drops:"); + printer (Cli_printer.PList drops); + printer (Cli_printer.PMsg "Tasks:"); + printer (Cli_printer.PTable (List.map (fun (name,ops)-> ("task",name)::ops) (List.sort (fun (t1,ops1) (t2,ops2)-> compare (List.length ops2) (List.length ops1)) tasks))); + printer (Cli_printer.PMsg "Threads:"); + printer (Cli_printer.PTable (List.map (fun (id,ops)-> ("thread",string_of_int id)::ops) threads)) let diagnostic_db_log printer rpc session_id params = - Stats.log_stats := true; - printer (Cli_printer.PMsg "Database/task statistics gathering enabled. Warning, this never releases memory! Restart xapi to reset.") + Stats.log_stats := true; + printer (Cli_printer.PMsg "Database/task statistics gathering enabled. Warning, this never releases memory! Restart xapi to reset.") type host_license = { - hostname: string; - uuid: string; - rstr: Features.feature list; - edition: string; - edition_short: string; - expiry: float; + hostname: string; + uuid: string; + rstr: Features.feature list; + edition: string; + edition_short: string; + expiry: float; } let host_license_of_r host_r editions = - let params = host_r.API.host_license_params in - let rstr = Features.of_assoc_list params in - let expiry = - if List.mem_assoc "expiry" params then - Date.to_float (Date.of_string (List.assoc "expiry" params)) - else - 0. - in - let edition = host_r.API.host_edition in - let edition_short = List.hd - (List.filter_map (fun (a, _, b, _) -> if a = edition then Some b else None) editions) in - { - hostname = host_r.API.host_hostname; - uuid = host_r.API.host_uuid; - rstr = rstr; - edition = edition; - edition_short = edition_short; - expiry = expiry; - } + let params = host_r.API.host_license_params in + let rstr = Features.of_assoc_list params in + let expiry = + if List.mem_assoc "expiry" params then + Date.to_float (Date.of_string (List.assoc "expiry" params)) + else + 0. + in + let edition = host_r.API.host_edition in + let edition_short = List.hd + (List.filter_map (fun (a, _, b, _) -> if a = edition then Some b else None) editions) in + { + hostname = host_r.API.host_hostname; + uuid = host_r.API.host_uuid; + rstr = rstr; + edition = edition; + edition_short = edition_short; + expiry = expiry; + } let diagnostic_license_status printer rpc session_id params = - let hosts = Client.Host.get_all_records rpc session_id in - let heading = [ "Hostname"; "UUID"; "Features"; "Code"; "Free"; "Expiry"; "Days left" ] in - let editions = V6client.get_editions "diagnostic_license_status" in - - let valid, invalid = List.partition (fun (_, host_r) -> try ignore(host_license_of_r host_r editions); true with _ -> false) hosts in - let host_licenses = List.map (fun (_, host_r) -> host_license_of_r host_r editions) valid in - (* Sort licenses into nearest-expiry first then free *) - let host_licenses = List.sort (fun a b -> - let a_expiry = a.expiry and b_expiry = b.expiry in - let a_free = a.edition = "free" - and b_free = b.edition = "free" in - if a_expiry < b_expiry then -1 - else - if a_expiry > b_expiry then 1 - else - if a_free && not b_free then -1 - else - if not a_free && b_free then 1 - else 0) host_licenses in - let now = Unix.gettimeofday () in - let hosts = List.map (fun h -> [ h.hostname; - String.sub h.uuid 0 8; - Features.to_compact_string h.rstr; - h.edition_short; - string_of_bool (h.edition = "free"); - Date.to_string (Date.of_float h.expiry); - Printf.sprintf "%.1f" ((h.expiry -. now) /. (24. *. 60. *. 60.)); - ]) host_licenses in - let invalid_hosts = List.map (fun (_, host_r) -> [ host_r.API.host_hostname; - String.sub host_r.API.host_uuid 0 8; - "-"; "-"; "-"; "-"; "-" ]) invalid in - let __context = Context.make "diagnostic_license_status" in - let pool = Helpers.get_pool ~__context in - let pool_features = Features.of_assoc_list (Db.Pool.get_restrictions ~__context ~self:pool) in - let pool_free = List.fold_left (||) false (List.map (fun h -> h.edition = "free") host_licenses) in - let divider = [ "-"; "-"; "-"; "-"; "-"; "-"; "-" ] in - let pool = [ "-"; "-"; Features.to_compact_string pool_features; "-"; string_of_bool pool_free; "-"; "-" ] in - let table = heading :: divider :: hosts @ invalid_hosts @ [ divider; pool ] in - - (* Compute the required column widths *) - let rec transpose x = - if List.filter (fun x -> x <> []) x = [] - then [] - else - let heads = List.map List.hd x in - let tails = List.map List.tl x in - heads :: (transpose tails) in - let map f x = List.map (List.map f) x in - let column_sizes = List.map (List.fold_left max 0) (transpose (map String.length table)) in - - List.iter - (fun row -> - printer (Cli_printer.PMsg (String.concat " " (List.map (fun (data, len) -> data ^ (String.make (len - String.length data) ' ')) (List.combine row column_sizes)))) - ) table + let hosts = Client.Host.get_all_records rpc session_id in + let heading = [ "Hostname"; "UUID"; "Features"; "Code"; "Free"; "Expiry"; "Days left" ] in + let editions = V6client.get_editions "diagnostic_license_status" in + + let valid, invalid = List.partition (fun (_, host_r) -> try ignore(host_license_of_r host_r editions); true with _ -> false) hosts in + let host_licenses = List.map (fun (_, host_r) -> host_license_of_r host_r editions) valid in + (* Sort licenses into nearest-expiry first then free *) + let host_licenses = List.sort (fun a b -> + let a_expiry = a.expiry and b_expiry = b.expiry in + let a_free = a.edition = "free" + and b_free = b.edition = "free" in + if a_expiry < b_expiry then -1 + else + if a_expiry > b_expiry then 1 + else + if a_free && not b_free then -1 + else + if not a_free && b_free then 1 + else 0) host_licenses in + let now = Unix.gettimeofday () in + let hosts = List.map (fun h -> [ h.hostname; + String.sub h.uuid 0 8; + Features.to_compact_string h.rstr; + h.edition_short; + string_of_bool (h.edition = "free"); + Date.to_string (Date.of_float h.expiry); + Printf.sprintf "%.1f" ((h.expiry -. now) /. (24. *. 60. *. 60.)); + ]) host_licenses in + let invalid_hosts = List.map (fun (_, host_r) -> [ host_r.API.host_hostname; + String.sub host_r.API.host_uuid 0 8; + "-"; "-"; "-"; "-"; "-" ]) invalid in + let __context = Context.make "diagnostic_license_status" in + let pool = Helpers.get_pool ~__context in + let pool_features = Features.of_assoc_list (Db.Pool.get_restrictions ~__context ~self:pool) in + let pool_free = List.fold_left (||) false (List.map (fun h -> h.edition = "free") host_licenses) in + let divider = [ "-"; "-"; "-"; "-"; "-"; "-"; "-" ] in + let pool = [ "-"; "-"; Features.to_compact_string pool_features; "-"; string_of_bool pool_free; "-"; "-" ] in + let table = heading :: divider :: hosts @ invalid_hosts @ [ divider; pool ] in + + (* Compute the required column widths *) + let rec transpose x = + if List.filter (fun x -> x <> []) x = [] + then [] + else + let heads = List.map List.hd x in + let tails = List.map List.tl x in + heads :: (transpose tails) in + let map f x = List.map (List.map f) x in + let column_sizes = List.map (List.fold_left max 0) (transpose (map String.length table)) in + + List.iter + (fun row -> + printer (Cli_printer.PMsg (String.concat " " (List.map (fun (data, len) -> data ^ (String.make (len - String.length data) ' ')) (List.combine row column_sizes)))) + ) table let get_hosts_by_name_or_id rpc session_id name = - let hosts = Client.Host.get_all_records_where rpc session_id "true" in - let allrecs = List.map (fun (host,host_r) -> let r = host_record rpc session_id host in r.setrefrec (host,host_r); r) hosts in - let hosts = List.filter - (fun x -> (safe_get_field (field_lookup x.fields "name-label") = name - || (safe_get_field (field_lookup x.fields "uuid") = name))) allrecs in - hosts + let hosts = Client.Host.get_all_records_where rpc session_id "true" in + let allrecs = List.map (fun (host,host_r) -> let r = host_record rpc session_id host in r.setrefrec (host,host_r); r) hosts in + let hosts = List.filter + (fun x -> (safe_get_field (field_lookup x.fields "name-label") = name + || (safe_get_field (field_lookup x.fields "uuid") = name))) allrecs in + hosts let get_host_by_name_or_id rpc session_id name = - let hosts = get_hosts_by_name_or_id rpc session_id name in - if List.length hosts = 0 then (failwith ("Host "^name^" not found")); - List.nth hosts 0 + let hosts = get_hosts_by_name_or_id rpc session_id name in + if List.length hosts = 0 then (failwith ("Host "^name^" not found")); + List.nth hosts 0 let get_host_from_session rpc session_id = - Client.Session.get_this_host rpc session_id session_id + Client.Session.get_this_host rpc session_id session_id (* Create a VBD record in database and attempt to hotplug it, ignoring hotplug errors *) let create_vbd_and_plug_with_other_config rpc session_id vm vdi device_name bootable rw cd unpluggable qtype qparams other_config = - let vbd = Client.VBD.create ~rpc ~session_id ~vM:vm ~vDI:vdi ~userdevice:device_name ~bootable ~mode:rw - ~_type:cd ~unpluggable ~empty:false ~qos_algorithm_type:qtype ~qos_algorithm_params:qparams ~other_config in - try Client.VBD.plug rpc session_id vbd - with Api_errors.Server_error(_, _) as e -> - debug "VBD created but not hotplugged: %s" (Api_errors.to_string e) + let vbd = Client.VBD.create ~rpc ~session_id ~vM:vm ~vDI:vdi ~userdevice:device_name ~bootable ~mode:rw + ~_type:cd ~unpluggable ~empty:false ~qos_algorithm_type:qtype ~qos_algorithm_params:qparams ~other_config in + try Client.VBD.plug rpc session_id vbd + with Api_errors.Server_error(_, _) as e -> + debug "VBD created but not hotplugged: %s" (Api_errors.to_string e) let create_vbd_and_plug rpc session_id vm vdi device_name bootable rw cd unpluggable qtype qparams = - create_vbd_and_plug_with_other_config rpc session_id vm vdi device_name bootable rw cd unpluggable qtype qparams [] + create_vbd_and_plug_with_other_config rpc session_id vm vdi device_name bootable rw cd unpluggable qtype qparams [] let create_owner_vbd_and_plug rpc session_id vm vdi device_name bootable rw cd unpluggable qtype qparams = - create_vbd_and_plug_with_other_config rpc session_id vm vdi device_name bootable rw cd unpluggable qtype qparams [Xapi_globs.owner_key,""] + create_vbd_and_plug_with_other_config rpc session_id vm vdi device_name bootable rw cd unpluggable qtype qparams [Xapi_globs.owner_key,""] (* --------------------------------------------------------------------- @@ -313,29 +313,29 @@ let create_owner_vbd_and_plug rpc session_id vm vdi device_name bootable rw cd u (* NB: all logging is now via syslog. No manual log controls are here. *) let log_set_output printer _ session_id params = - () + () let log_get_keys printer _ session_id params = - () + () let log_get printer _ session_id params = - () + () let log_reopen printer _ session_id params = - () + () let string_of_task_status task = match task.API.task_status with - | `pending -> - Printf.sprintf "%d %% complete " - (int_of_float (task.API.task_progress *. 100.)) - | `success -> - "Completed" - | `failure -> - "Failed" - | `cancelling -> - "Cancelling" - | `cancelled -> - "Cancelled" + | `pending -> + Printf.sprintf "%d %% complete " + (int_of_float (task.API.task_progress *. 100.)) + | `success -> + "Completed" + | `failure -> + "Failed" + | `cancelling -> + "Cancelling" + | `cancelled -> + "Cancelled" (*let task_list printer rpc session_id params = let internal = try (List.assoc "internal" params)="true" with _ -> false in @@ -349,7 +349,7 @@ let string_of_task_status task = match task.API.task_status with "descr",task.API.task_name_description; "status", (string_of_task_status task); ] in -(* If in show-internal mode, list the locks too *) + (* If in show-internal mode, list the locks too *) let locks = if internal then List.map (fun lock -> "lock", lock) (Client.Task.get_locks rpc session_id ref) @@ -358,13 +358,13 @@ let string_of_task_status task = match task.API.task_status with task_records in printer (Cli_printer.PTable recs) - *) +*) let user_password_change _ rpc session_id params = - let old_pwd = List.assoc_default "old" params "" - (* "new" must be in params here, since it is a required parameter. *) - and new_pwd = List.assoc "new" params in - Client.Session.change_password rpc session_id old_pwd new_pwd + let old_pwd = List.assoc_default "old" params "" + (* "new" must be in params here, since it is a required parameter. *) + and new_pwd = List.assoc "new" params in + Client.Session.change_password rpc session_id old_pwd new_pwd (** Low level CLI interface **) @@ -378,135 +378,135 @@ let user_password_change _ rpc session_id params = (* vm-param-list takes the uuid and lists either a default set of parameters, or those passed *) let alltrue l = - List.fold_left (&&) true l + List.fold_left (&&) true l let get_set_names rlist = - let sets = List.filter (fun r -> r.get_set <> None) rlist in - List.map (fun r -> r.name) sets + let sets = List.filter (fun r -> r.get_set <> None) rlist in + List.map (fun r -> r.name) sets let get_map_names rlist = - let maps = List.filter (fun r -> r.get_map <> None) rlist in - List.map (fun r -> r.name) maps + let maps = List.filter (fun r -> r.get_map <> None) rlist in + List.map (fun r -> r.name) maps let safe_get_field x = - try x.get () - with - | Api_errors.Server_error(s,_) as e-> if s=Api_errors.handle_invalid then "" else raise e - | e -> raise e + try x.get () + with + | Api_errors.Server_error(s,_) as e-> if s=Api_errors.handle_invalid then "" else raise e + | e -> raise e type fieldtype = Normal | Set of string | Map of string let get_field_type fieldname record = - if List.exists (fun field -> field.name=fieldname) record - then Normal - else - begin - (* New 'normal' behaviour is to split map name from key by the separator ':' *) - if String.contains fieldname ':' then - begin - let i = String.index fieldname ':' in - let real_fieldname = String.sub fieldname 0 i in - try - let field = List.find (fun field -> field.name=real_fieldname) record in - if field.get_set <> None - then Set field.name - else if field.get_map <> None - then Map field.name - else failwith ("Field '"^(field.name)^"' is not a set or map") - with - Not_found -> failwith ("Unknown field '"^fieldname^"'") - end - else - (* Old behaviour is to match like this: param-name-key=value *) - begin - (* Find all the maps, then sort in length order, longest first *) - let mapfields = List.filter (fun field -> field.get_map <> None) record in - let mapfields = List.sort (fun a b -> compare (String.length b.name) (String.length a.name)) mapfields in - try - (* Find the first (longest) matching field *) - let field = List.find (fun field -> String.startswith (field.name^"-") fieldname) mapfields in - Map field.name - with - Not_found -> - let setfields = List.filter (fun field -> field.get_set <> None) record in - let setfields = List.sort (fun a b -> compare (String.length b.name) (String.length a.name)) setfields in - try - let field = List.find (fun field -> String.startswith (field.name^"-") fieldname) setfields in - Set field.name - with - _ -> failwith ("Unknown field '"^fieldname^"'") - end - end + if List.exists (fun field -> field.name=fieldname) record + then Normal + else + begin + (* New 'normal' behaviour is to split map name from key by the separator ':' *) + if String.contains fieldname ':' then + begin + let i = String.index fieldname ':' in + let real_fieldname = String.sub fieldname 0 i in + try + let field = List.find (fun field -> field.name=real_fieldname) record in + if field.get_set <> None + then Set field.name + else if field.get_map <> None + then Map field.name + else failwith ("Field '"^(field.name)^"' is not a set or map") + with + Not_found -> failwith ("Unknown field '"^fieldname^"'") + end + else + (* Old behaviour is to match like this: param-name-key=value *) + begin + (* Find all the maps, then sort in length order, longest first *) + let mapfields = List.filter (fun field -> field.get_map <> None) record in + let mapfields = List.sort (fun a b -> compare (String.length b.name) (String.length a.name)) mapfields in + try + (* Find the first (longest) matching field *) + let field = List.find (fun field -> String.startswith (field.name^"-") fieldname) mapfields in + Map field.name + with + Not_found -> + let setfields = List.filter (fun field -> field.get_set <> None) record in + let setfields = List.sort (fun a b -> compare (String.length b.name) (String.length a.name)) setfields in + try + let field = List.find (fun field -> String.startswith (field.name^"-") fieldname) setfields in + Set field.name + with + _ -> failwith ("Unknown field '"^fieldname^"'") + end + end let filter_records_on_set_param records (k,v) s = - (* On entry here, s is the name of the parameter, and k will be of the form s[:-]contains *) - let n = String.length s in - let contains = String.sub k (n + 1) (String.length k - n - 1) in - if contains<>"contains" then failwith "Invalid syntax for set filtering (should be set-param:contains=key)"; - let filterfn record = - let field = field_lookup record.fields s in - let get_set = match field.get_set with - | Some x -> x - | None -> (failwith (Printf.sprintf "Client_records broken (field %s)" s)) - in - try - let set = get_set () in - let set, v = - if field.case_insensitive - then List.map String.lowercase set, String.lowercase v - else set, v in - List.exists (fun member -> v=member) set - with - _ -> false - in - List.filter filterfn records + (* On entry here, s is the name of the parameter, and k will be of the form s[:-]contains *) + let n = String.length s in + let contains = String.sub k (n + 1) (String.length k - n - 1) in + if contains<>"contains" then failwith "Invalid syntax for set filtering (should be set-param:contains=key)"; + let filterfn record = + let field = field_lookup record.fields s in + let get_set = match field.get_set with + | Some x -> x + | None -> (failwith (Printf.sprintf "Client_records broken (field %s)" s)) + in + try + let set = get_set () in + let set, v = + if field.case_insensitive + then List.map String.lowercase set, String.lowercase v + else set, v in + List.exists (fun member -> v=member) set + with + _ -> false + in + List.filter filterfn records let filter_records_on_map_param records (k,v) s = - (* On entry here, s is the name of the parameter, and k will be of the form s[:-]key *) - let n = String.length s in - let key = String.sub k (n + 1) (String.length k - n - 1) in - let filterfn record = - let field = field_lookup record.fields s in - let get_map = match field.get_map with - | Some x -> x - | None -> failwith (Printf.sprintf "Client_records broken (field %s)" s) - in - try - let map = get_map () in - let map, key, v = - if field.case_insensitive - then List.map (fun (k, v) -> String.lowercase k, v) map, String.lowercase key, String.lowercase v - else map, key, v in - List.mem_assoc key map && List.assoc key map = v - with - _ -> false - in - List.filter filterfn records + (* On entry here, s is the name of the parameter, and k will be of the form s[:-]key *) + let n = String.length s in + let key = String.sub k (n + 1) (String.length k - n - 1) in + let filterfn record = + let field = field_lookup record.fields s in + let get_map = match field.get_map with + | Some x -> x + | None -> failwith (Printf.sprintf "Client_records broken (field %s)" s) + in + try + let map = get_map () in + let map, key, v = + if field.case_insensitive + then List.map (fun (k, v) -> String.lowercase k, v) map, String.lowercase key, String.lowercase v + else map, key, v in + List.mem_assoc key map && List.assoc key map = v + with + _ -> false + in + List.filter filterfn records let filter_records_on_normal_param records (k,v) = - let filterfn record = - let field = field_lookup record.fields k in - let value = safe_get_field field in - if field.case_insensitive - then String.lowercase value = String.lowercase v - else value=v - in - List.filter filterfn records + let filterfn record = + let field = field_lookup record.fields k in + let value = safe_get_field field in + if field.case_insensitive + then String.lowercase value = String.lowercase v + else value=v + in + List.filter filterfn records let filter_records_on_fields records (k,v) = - (* Ignore empty lists *) - if records = [] then [] else begin + (* Ignore empty lists *) + if records = [] then [] else begin - (* We can only tell what types fields are by looking at a record itself. *) - (* We use the first one *) - let firstrec = List.hd records in + (* We can only tell what types fields are by looking at a record itself. *) + (* We use the first one *) + let firstrec = List.hd records in - (* Switch on the type of the field *) - match get_field_type k firstrec.fields with - Normal -> filter_records_on_normal_param records (k,v) - | Map s -> filter_records_on_map_param records (k,v) s - | Set s -> filter_records_on_set_param records (k,v) s - end + (* Switch on the type of the field *) + match get_field_type k firstrec.fields with + Normal -> filter_records_on_normal_param records (k,v) + | Map s -> filter_records_on_map_param records (k,v) s + | Set s -> filter_records_on_set_param records (k,v) s + end let stdparams = ["server";"password";"port";"username"; "minimal"; "force"; "multiple"; "all"; "message-priority"; "trace"] @@ -517,38 +517,38 @@ let stdparams = ["server";"password";"port";"username"; "minimal"; "force"; "mul let choose_params params defaults = - if List.mem_assoc "params" params - then - let ps = List.assoc "params" params in - (if ps="all" then [] else String.split_f (fun c -> c = ',') ps) - else defaults + if List.mem_assoc "params" params + then + let ps = List.assoc "params" params in + (if ps="all" then [] else String.split_f (fun c -> c = ',') ps) + else defaults let select_fields params records default_params = - let params = choose_params params default_params in - if params=[] then (List.map (fun record -> record.fields) records) else - (List.map (fun record -> List.filter (fun field -> List.mem field.name params) record.fields) records) + let params = choose_params params default_params in + if params=[] then (List.map (fun record -> record.fields) records) else + (List.map (fun record -> List.filter (fun field -> List.mem field.name params) record.fields) records) let print_field x = - let append = - if x.get_set <> None then - (* Set *) - if x.add_to_set = None then - " (SRO)" - else - " (SRW)" - else if x.get_map <> None then - (* map *) - if x.add_to_map = None then - " (MRO)" - else - " (MRW)" - else if x.set = None then - " ( RO)" - else - " ( RW)" - in - let result = safe_get_field x in - (x.name ^ append ^ (if x.deprecated then " [DEPRECATED]" else ""), result) + let append = + if x.get_set <> None then + (* Set *) + if x.add_to_set = None then + " (SRO)" + else + " (SRW)" + else if x.get_map <> None then + (* map *) + if x.add_to_map = None then + " (MRO)" + else + " (MRW)" + else if x.set = None then + " ( RO)" + else + " ( RW)" + in + let result = safe_get_field x in + (x.name ^ append ^ (if x.deprecated then " [DEPRECATED]" else ""), result) type printer = Cli_printer.print_fn type rpc = (Rpc.call -> Rpc.response) @@ -558,249 +558,249 @@ type params = (string * string) list (* open the database on the specified VDI and use the resulting session_id. *) (* If the parameter is not present, use the original session_id. *) let with_specified_database rpc session_id params f = - let database_params = read_map_params "database" params in - let use_db_vdi = List.mem_assoc "vdi-uuid" database_params in - let use_db_file = List.mem_assoc "filename" database_params in - if use_db_vdi && use_db_file then - failwith "xapi can query a DB vdi or a DB file, but not both."; - let session_id = - if use_db_vdi then begin - let database_vdi_uuid = List.assoc "vdi-uuid" database_params in - let database_vdi = Client.VDI.get_by_uuid ~rpc ~session_id ~uuid:database_vdi_uuid in - Client.VDI.open_database ~rpc ~session_id ~self:database_vdi - end else if use_db_file then begin - let database_file = List.assoc "filename" database_params in - Client.Session.create_from_db_file ~rpc ~session_id ~filename:database_file - end else - session_id - in - finally - (fun () -> f session_id) - (fun () -> if use_db_vdi || use_db_file then Client.Session.logout ~rpc ~session_id) + let database_params = read_map_params "database" params in + let use_db_vdi = List.mem_assoc "vdi-uuid" database_params in + let use_db_file = List.mem_assoc "filename" database_params in + if use_db_vdi && use_db_file then + failwith "xapi can query a DB vdi or a DB file, but not both."; + let session_id = + if use_db_vdi then begin + let database_vdi_uuid = List.assoc "vdi-uuid" database_params in + let database_vdi = Client.VDI.get_by_uuid ~rpc ~session_id ~uuid:database_vdi_uuid in + Client.VDI.open_database ~rpc ~session_id ~self:database_vdi + end else if use_db_file then begin + let database_file = List.assoc "filename" database_params in + Client.Session.create_from_db_file ~rpc ~session_id ~filename:database_file + end else + session_id + in + finally + (fun () -> f session_id) + (fun () -> if use_db_vdi || use_db_file then Client.Session.logout ~rpc ~session_id) let make_param_funs getall getallrecs getbyuuid record class_name def_filters def_list_params rpc session_id = - let get_record2 rpc session_id x = - let r = record rpc session_id x in - r.fields - in - - let get_record rpc session_id uuid = - get_record2 rpc session_id (getbyuuid ~rpc ~session_id ~uuid) - in - - let list printer rpc session_id params : unit = - with_specified_database rpc session_id params - (fun session_id -> - let all = getallrecs ~rpc ~session_id ~expr:"true" in - let all_recs = List.map (fun (r,x) -> let record = record rpc session_id r in record.setrefrec (r,x); record) all in - - (* Filter on everything on the cmd line except params=... *) - let filter_params = List.filter (fun (p,_) -> not (List.mem p ("params"::stdparams))) params in - (* Filter out all params beginning with "database:" *) - let filter_params = List.filter (fun (p,_) -> not (String.startswith "database:" p)) filter_params in - (* Add in the default filters *) - let filter_params = def_filters @ filter_params in - (* Filter all the records *) - let records = List.fold_left filter_records_on_fields all_recs filter_params in - - let print_all = get_bool_param params "all" in - - let print_params = select_fields params (if print_all then all_recs else records) def_list_params in - let print_params = List.map (fun fields -> List.filter (fun field -> not field.hidden) fields) print_params in - let print_params = List.map (fun fields -> List.map (fun field -> if field.expensive then makeexpensivefield field else field) fields) print_params in - - printer (Cli_printer.PTable (List.map (List.map print_field) print_params)) - ) - in - - let p_list printer rpc session_id params : unit = - with_specified_database rpc session_id params - (fun session_id -> - let record = get_record rpc session_id (List.assoc "uuid" params) in - let record = List.filter (fun field -> not field.hidden) record in - printer (Cli_printer.PTable [List.map print_field record]) - ) - in - - let p_get printer rpc session_id params : unit = - with_specified_database rpc session_id params - (fun session_id -> - let record = get_record rpc session_id (List.assoc "uuid" params) in - let param = List.assoc "param-name" params in - let x = field_lookup record param in - let std () = - printer (Cli_printer.PList [ safe_get_field x]) - in - if List.mem_assoc "param-key" params then - let key = List.assoc "param-key" params in - match x.get_map with - Some f -> - let result = - try List.assoc key (f ()) with _ -> failwith (Printf.sprintf "Key %s not found in map" key) in - printer (Cli_printer.PList [result]) - | None -> std () - else std () - ) - in - - let p_set (printer : printer) rpc session_id params = - let record = get_record rpc session_id (List.assoc "uuid" params) in - let set_params = List.filter (fun (p,_) -> not (List.mem p ("uuid"::stdparams))) params in - - let set_field (k,v) = - let field_type = get_field_type k record in - match field_type with - | Map s -> - let field=field_lookup record s in - let n = String.length s in - let key = String.sub k (n + 1) (String.length k - n - 1) in - let get_map = match field.get_map with - | Some x -> x - | None -> failwith (Printf.sprintf "Broken Client_records (field %s)" s) - in begin - (* If set_in_map is present, use it instead of using remove_from_map followed by add_to_map. *) - match field.set_in_map with - | Some set_in_map -> set_in_map key v - | None -> - let add_to_map = match field.add_to_map with Some f -> f | None -> failwith ("Map field '"^s^"' is read-only.") in - let remove_from_map = match field.remove_from_map with Some f -> f | None -> failwith (Printf.sprintf "Client_records broken (field %s)" s) in - let map = get_map () in - if List.mem_assoc key map then remove_from_map key; - add_to_map key v - end - | Set s -> failwith "Cannot param-set on set fields" - | Normal -> - let field=field_lookup record k in - let set = match field.set, field.add_to_map with - | Some f, _ -> f - | None, Some f -> failwith ("Field '"^k^"' is a map or set. use the 'name:key=value' syntax.") - | None, None -> failwith ("Field '"^k^"' is read-only.") in - try - set v - with - (Failure "int_of_string") -> failwith ("Parameter "^k^" must be an integer") - | (Failure "float_of_string") -> failwith ("Parameter "^k^" must be a floating-point number") - | (Invalid_argument "bool_of_string") -> failwith ("Parameter "^k^" must be a boolean (true or false)") - | e -> raise e - in - List.iter set_field set_params - in - - let p_add (printer : printer) rpc session_id params = - let record = get_record rpc session_id (List.assoc "uuid" params) in - let param_name = List.assoc "param-name" params in - let filter_params = List.filter (fun (p,_) -> not (List.mem p ("uuid"::"param-name"::"param-key"::stdparams))) params in - match field_lookup record param_name with - | { add_to_set = Some f } -> - if List.mem_assoc "param-key" params then - let key = List.assoc "param-key" params in - f key - else - failwith "When adding a key to a set, use the syntax: *-param-add param-name= param-key=" - | { add_to_map = Some f } -> List.iter (fun (k,x) -> f k x) filter_params - | { get_set = Some _; add_to_set=None } - | { get_map = Some _; add_to_map=None } -> - failwith "Parameter is read-only" - | _ -> failwith "Can only add to parameters of type Set or Map" - in - - let p_remove (printer : printer) rpc session_id params = - let record = get_record rpc session_id (List.assoc "uuid" params) in - let param_name = List.assoc "param-name" params in - let param_key = List.assoc "param-key" params in - match field_lookup record param_name with - | { get_set = Some g; remove_from_set = Some f } -> if List.mem param_key (g ()) then f param_key else failwith (Printf.sprintf "Key %s is not in the set" param_key) - | { get_map = Some g; remove_from_map = Some f } -> if List.mem_assoc param_key (g ()) then f param_key else failwith (Printf.sprintf "Key %s is not in map" param_key) - | { get_set = Some _; remove_from_set = None } - | { get_map = Some _; remove_from_map = None } -> failwith "Cannot remove parameters from read-only map" - | _ -> failwith "Can only remove from parameters of type Set or Map" - in - - let p_clear (printer : printer) rpc session_id params = - let record = get_record rpc session_id (List.assoc "uuid" params) in - let param_name = List.assoc "param-name" params in - match field_lookup record param_name with - | { get_set = Some f; remove_from_set = Some g } -> List.iter g (f ()) - | { get_map = Some f; remove_from_map = Some g } -> List.iter g (List.map fst (f ())) - | { set = Some f } -> (try f "" with _ -> failwith "Cannot clear this parameter") - | _ -> failwith "Can only clear RW parameters" - in - - let gen_frontend (rpc:rpc) (session_id:API.ref_session) = - let make_cmdtable_data (opname, reqd, optn, help, impl, std) = - (opname,{reqd=reqd; optn=optn; help=help; implementation=No_fd impl; flags=if std then [Standard] else []}) - in - try - let all = List.filter (fun x -> not x.hidden) (record rpc session_id (Ref.null)).fields in - let all_optn = List.map (fun r -> r.name) all in - let settable = List.map (fun r -> r.name) (List.filter (fun r -> r.set <> None) all) in - let settable = settable @ (List.map (fun r -> r.name ^ ":") (List.filter (fun r -> r.add_to_map <> None) all)) in - let settable = settable @ (List.map (fun r -> r.name ^ ":") (List.filter (fun r -> r.set_in_map <> None) all)) in - let addable = List.map (fun r -> r.name) (List.filter (fun r -> r.add_to_set <> None || r.add_to_map <> None) all) in - let clearable = List.map (fun r -> r.name) (List.filter (fun r -> r.set <> None || r.get_set <> None || r.get_map <> None) all) in - (* We need the names of the set and map filters *) - let sm_param_names = - let sets = List.filter (fun field -> field.get_set <> None) all in - List.map (fun field -> field.name^":contains") sets - in - let cli_name n = class_name^"-"^n in - let plural = if class_name="patch" then "patches" else class_name^"s" in - let ops = [(cli_name "list",[],"params"::"database:"::all_optn@sm_param_names, "Lists all the "^plural^", filtering on the optional arguments. To filter on map parameters, use the syntax 'map-param:key=value'",list,(class_name="vm" || class_name="network" || class_name="sr")); - (cli_name "param-list",["uuid"],["database:"],"Lists all the parameters of the object specified by the uuid.",p_list,false); - (cli_name "param-get",["uuid";"param-name"],["param-key";"database:"],"Gets the parameter specified of the object. If the parameter is a map of key=value pairs, use 'param-key=' to get the value associated with a particular key.",p_get,false)] in - let ops = if List.length settable > 0 then - (cli_name "param-set",["uuid";],settable,"Sets the parameter specified. If param-value is not given, the parameter is set to a null value. To set a (key,value) pair in a map parameter, use the syntax 'map-param:key=value'.",p_set,false)::ops - else ops in - let ops = if List.length addable > 0 then - ops @ [(cli_name "param-add",["uuid";"param-name"],["param-key"],"Adds to a set or map parameter. If the parameter is a set, use param-key=. If the parameter is a map, pass the values to add as 'key=value' pairs.",p_add,false); - (cli_name "param-remove",["uuid";"param-name";"param-key"],[],"Removes a member or a key,value pair from a set/map respectively.",p_remove,false)] - else ops in - let ops = if List.length clearable > 0 then - ops @ [(cli_name "param-clear",["uuid";"param-name"],[],"Clears the specified parameter (param-name can be "^(String.concat "," clearable)^").",p_clear,false)] - else ops in - List.map make_cmdtable_data ops - with _ -> [] - in - gen_frontend rpc session_id + let get_record2 rpc session_id x = + let r = record rpc session_id x in + r.fields + in + + let get_record rpc session_id uuid = + get_record2 rpc session_id (getbyuuid ~rpc ~session_id ~uuid) + in + + let list printer rpc session_id params : unit = + with_specified_database rpc session_id params + (fun session_id -> + let all = getallrecs ~rpc ~session_id ~expr:"true" in + let all_recs = List.map (fun (r,x) -> let record = record rpc session_id r in record.setrefrec (r,x); record) all in + + (* Filter on everything on the cmd line except params=... *) + let filter_params = List.filter (fun (p,_) -> not (List.mem p ("params"::stdparams))) params in + (* Filter out all params beginning with "database:" *) + let filter_params = List.filter (fun (p,_) -> not (String.startswith "database:" p)) filter_params in + (* Add in the default filters *) + let filter_params = def_filters @ filter_params in + (* Filter all the records *) + let records = List.fold_left filter_records_on_fields all_recs filter_params in + + let print_all = get_bool_param params "all" in + + let print_params = select_fields params (if print_all then all_recs else records) def_list_params in + let print_params = List.map (fun fields -> List.filter (fun field -> not field.hidden) fields) print_params in + let print_params = List.map (fun fields -> List.map (fun field -> if field.expensive then makeexpensivefield field else field) fields) print_params in + + printer (Cli_printer.PTable (List.map (List.map print_field) print_params)) + ) + in + + let p_list printer rpc session_id params : unit = + with_specified_database rpc session_id params + (fun session_id -> + let record = get_record rpc session_id (List.assoc "uuid" params) in + let record = List.filter (fun field -> not field.hidden) record in + printer (Cli_printer.PTable [List.map print_field record]) + ) + in + + let p_get printer rpc session_id params : unit = + with_specified_database rpc session_id params + (fun session_id -> + let record = get_record rpc session_id (List.assoc "uuid" params) in + let param = List.assoc "param-name" params in + let x = field_lookup record param in + let std () = + printer (Cli_printer.PList [ safe_get_field x]) + in + if List.mem_assoc "param-key" params then + let key = List.assoc "param-key" params in + match x.get_map with + Some f -> + let result = + try List.assoc key (f ()) with _ -> failwith (Printf.sprintf "Key %s not found in map" key) in + printer (Cli_printer.PList [result]) + | None -> std () + else std () + ) + in + + let p_set (printer : printer) rpc session_id params = + let record = get_record rpc session_id (List.assoc "uuid" params) in + let set_params = List.filter (fun (p,_) -> not (List.mem p ("uuid"::stdparams))) params in + + let set_field (k,v) = + let field_type = get_field_type k record in + match field_type with + | Map s -> + let field=field_lookup record s in + let n = String.length s in + let key = String.sub k (n + 1) (String.length k - n - 1) in + let get_map = match field.get_map with + | Some x -> x + | None -> failwith (Printf.sprintf "Broken Client_records (field %s)" s) + in begin + (* If set_in_map is present, use it instead of using remove_from_map followed by add_to_map. *) + match field.set_in_map with + | Some set_in_map -> set_in_map key v + | None -> + let add_to_map = match field.add_to_map with Some f -> f | None -> failwith ("Map field '"^s^"' is read-only.") in + let remove_from_map = match field.remove_from_map with Some f -> f | None -> failwith (Printf.sprintf "Client_records broken (field %s)" s) in + let map = get_map () in + if List.mem_assoc key map then remove_from_map key; + add_to_map key v + end + | Set s -> failwith "Cannot param-set on set fields" + | Normal -> + let field=field_lookup record k in + let set = match field.set, field.add_to_map with + | Some f, _ -> f + | None, Some f -> failwith ("Field '"^k^"' is a map or set. use the 'name:key=value' syntax.") + | None, None -> failwith ("Field '"^k^"' is read-only.") in + try + set v + with + (Failure "int_of_string") -> failwith ("Parameter "^k^" must be an integer") + | (Failure "float_of_string") -> failwith ("Parameter "^k^" must be a floating-point number") + | (Invalid_argument "bool_of_string") -> failwith ("Parameter "^k^" must be a boolean (true or false)") + | e -> raise e + in + List.iter set_field set_params + in + + let p_add (printer : printer) rpc session_id params = + let record = get_record rpc session_id (List.assoc "uuid" params) in + let param_name = List.assoc "param-name" params in + let filter_params = List.filter (fun (p,_) -> not (List.mem p ("uuid"::"param-name"::"param-key"::stdparams))) params in + match field_lookup record param_name with + | { add_to_set = Some f } -> + if List.mem_assoc "param-key" params then + let key = List.assoc "param-key" params in + f key + else + failwith "When adding a key to a set, use the syntax: *-param-add param-name= param-key=" + | { add_to_map = Some f } -> List.iter (fun (k,x) -> f k x) filter_params + | { get_set = Some _; add_to_set=None } + | { get_map = Some _; add_to_map=None } -> + failwith "Parameter is read-only" + | _ -> failwith "Can only add to parameters of type Set or Map" + in + + let p_remove (printer : printer) rpc session_id params = + let record = get_record rpc session_id (List.assoc "uuid" params) in + let param_name = List.assoc "param-name" params in + let param_key = List.assoc "param-key" params in + match field_lookup record param_name with + | { get_set = Some g; remove_from_set = Some f } -> if List.mem param_key (g ()) then f param_key else failwith (Printf.sprintf "Key %s is not in the set" param_key) + | { get_map = Some g; remove_from_map = Some f } -> if List.mem_assoc param_key (g ()) then f param_key else failwith (Printf.sprintf "Key %s is not in map" param_key) + | { get_set = Some _; remove_from_set = None } + | { get_map = Some _; remove_from_map = None } -> failwith "Cannot remove parameters from read-only map" + | _ -> failwith "Can only remove from parameters of type Set or Map" + in + + let p_clear (printer : printer) rpc session_id params = + let record = get_record rpc session_id (List.assoc "uuid" params) in + let param_name = List.assoc "param-name" params in + match field_lookup record param_name with + | { get_set = Some f; remove_from_set = Some g } -> List.iter g (f ()) + | { get_map = Some f; remove_from_map = Some g } -> List.iter g (List.map fst (f ())) + | { set = Some f } -> (try f "" with _ -> failwith "Cannot clear this parameter") + | _ -> failwith "Can only clear RW parameters" + in + + let gen_frontend (rpc:rpc) (session_id:API.ref_session) = + let make_cmdtable_data (opname, reqd, optn, help, impl, std) = + (opname,{reqd=reqd; optn=optn; help=help; implementation=No_fd impl; flags=if std then [Standard] else []}) + in + try + let all = List.filter (fun x -> not x.hidden) (record rpc session_id (Ref.null)).fields in + let all_optn = List.map (fun r -> r.name) all in + let settable = List.map (fun r -> r.name) (List.filter (fun r -> r.set <> None) all) in + let settable = settable @ (List.map (fun r -> r.name ^ ":") (List.filter (fun r -> r.add_to_map <> None) all)) in + let settable = settable @ (List.map (fun r -> r.name ^ ":") (List.filter (fun r -> r.set_in_map <> None) all)) in + let addable = List.map (fun r -> r.name) (List.filter (fun r -> r.add_to_set <> None || r.add_to_map <> None) all) in + let clearable = List.map (fun r -> r.name) (List.filter (fun r -> r.set <> None || r.get_set <> None || r.get_map <> None) all) in + (* We need the names of the set and map filters *) + let sm_param_names = + let sets = List.filter (fun field -> field.get_set <> None) all in + List.map (fun field -> field.name^":contains") sets + in + let cli_name n = class_name^"-"^n in + let plural = if class_name="patch" then "patches" else class_name^"s" in + let ops = [(cli_name "list",[],"params"::"database:"::all_optn@sm_param_names, "Lists all the "^plural^", filtering on the optional arguments. To filter on map parameters, use the syntax 'map-param:key=value'",list,(class_name="vm" || class_name="network" || class_name="sr")); + (cli_name "param-list",["uuid"],["database:"],"Lists all the parameters of the object specified by the uuid.",p_list,false); + (cli_name "param-get",["uuid";"param-name"],["param-key";"database:"],"Gets the parameter specified of the object. If the parameter is a map of key=value pairs, use 'param-key=' to get the value associated with a particular key.",p_get,false)] in + let ops = if List.length settable > 0 then + (cli_name "param-set",["uuid";],settable,"Sets the parameter specified. If param-value is not given, the parameter is set to a null value. To set a (key,value) pair in a map parameter, use the syntax 'map-param:key=value'.",p_set,false)::ops + else ops in + let ops = if List.length addable > 0 then + ops @ [(cli_name "param-add",["uuid";"param-name"],["param-key"],"Adds to a set or map parameter. If the parameter is a set, use param-key=. If the parameter is a map, pass the values to add as 'key=value' pairs.",p_add,false); + (cli_name "param-remove",["uuid";"param-name";"param-key"],[],"Removes a member or a key,value pair from a set/map respectively.",p_remove,false)] + else ops in + let ops = if List.length clearable > 0 then + ops @ [(cli_name "param-clear",["uuid";"param-name"],[],"Clears the specified parameter (param-name can be "^(String.concat "," clearable)^").",p_clear,false)] + else ops in + List.map make_cmdtable_data ops + with _ -> [] + in + gen_frontend rpc session_id let gen_cmds rpc session_id = - (make_param_funs (Client.Pool.get_all) (Client.Pool.get_all_records_where) (Client.Pool.get_by_uuid) (pool_record) "pool" [] ["uuid";"name-label";"name-description";"master";"default-SR"] rpc session_id) @ - (make_param_funs (Client.PIF.get_all) (Client.PIF.get_all_records_where) (Client.PIF.get_by_uuid) (pif_record) "pif" [] ["uuid";"device";"VLAN";"mac";"network-uuid"; "currently-attached"] rpc session_id) @ - (make_param_funs (Client.Bond.get_all) (Client.Bond.get_all_records_where) (Client.Bond.get_by_uuid) (bond_record) "bond" [] ["uuid";"master";"slaves"] rpc session_id) @ - (make_param_funs (Client.VLAN.get_all) (Client.VLAN.get_all_records_where) (Client.VLAN.get_by_uuid) (vlan_record) "vlan" [] ["uuid";"tagged-PIF";"untagged-PIF"; "tag"] rpc session_id) @ - (make_param_funs (Client.Tunnel.get_all) (Client.Tunnel.get_all_records_where) (Client.Tunnel.get_by_uuid) (tunnel_record) "tunnel" [] ["uuid";"transport-PIF";"access-PIF";"status"] rpc session_id) @ - (make_param_funs (Client.VIF.get_all) (Client.VIF.get_all_records_where) (Client.VIF.get_by_uuid) (vif_record) "vif" [] ["uuid";"device";"vm-uuid";"network-uuid"] rpc session_id) @ - (make_param_funs (Client.Network.get_all) (Client.Network.get_all_records_where) (Client.Network.get_by_uuid) (net_record) "network" [] ["uuid";"name-label";"name-description";"bridge"] rpc session_id) @ - (make_param_funs (Client.Console.get_all) (Client.Console.get_all_records_where) (Client.Console.get_by_uuid) (console_record) "console" [] ["uuid";"vm-uuid";"vm-name-label";"protocol";"location"] rpc session_id) @ - (make_param_funs (Client.VM.get_all) (Client.VM.get_all_records_where) (Client.VM.get_by_uuid) (vm_record) "vm" [("is-a-template","false")] ["name-label";"uuid";"power-state"] rpc session_id) @ - (make_param_funs (Client.VM.get_all) (Client.VM.get_all_records_where) (Client.VM.get_by_uuid) (vm_record) "template" [("is-a-template","true");("is-a-snapshot","false")] ["name-label";"name-description";"uuid"] rpc session_id) @ - (make_param_funs (Client.VM.get_all) (Client.VM.get_all_records_where) (Client.VM.get_by_uuid) (vm_record) "snapshot" [("is-a-snapshot","true")] ["name-label";"name-description";"uuid";"snapshot_of"; "snapshot_time"] rpc session_id) @ - (make_param_funs (Client.Host.get_all) (Client.Host.get_all_records_where) (Client.Host.get_by_uuid) (host_record) "host" [] ["uuid";"name-label";"name-description"] rpc session_id) @ - (make_param_funs (Client.Host_cpu.get_all) (Client.Host_cpu.get_all_records_where) (Client.Host_cpu.get_by_uuid) (host_cpu_record) "host-cpu" [] ["uuid";"number";"vendor";"speed";"utilisation"] rpc session_id) @ - (make_param_funs (Client.Host_crashdump.get_all) (Client.Host_crashdump.get_all_records_where) (Client.Host_crashdump.get_by_uuid) (host_crashdump_record) "host-crashdump" [] ["uuid";"host";"timestamp";"size"] rpc session_id) @ - (make_param_funs (Client.Pool_patch.get_all) (Client.Pool_patch.get_all_records_where) (Client.Pool_patch.get_by_uuid) (pool_patch_record) "patch" [] ["uuid"; "name-label"; "name-description"; "size"; "hosts"; "after-apply-guidance"] rpc session_id) @ - (make_param_funs (Client.VDI.get_all) (Client.VDI.get_all_records_where) (Client.VDI.get_by_uuid) (vdi_record) "vdi" [] ["uuid";"name-label";"name-description";"virtual-size";"read-only";"sharable";"sr-uuid"] rpc session_id) @ - (make_param_funs (Client.VBD.get_all) (Client.VBD.get_all_records_where) (Client.VBD.get_by_uuid) (vbd_record) "vbd" [] ["uuid";"vm-uuid";"vm-name-label";"vdi-uuid";"device"; "empty"] rpc session_id) @ - (make_param_funs (Client.SR.get_all) (Client.SR.get_all_records_where) (Client.SR.get_by_uuid) (sr_record) "sr" [] ["uuid";"name-label";"name-description";"host";"type";"content-type"] rpc session_id) @ - (make_param_funs (Client.SM.get_all) (Client.SM.get_all_records_where) (Client.SM.get_by_uuid) (sm_record) "sm" [] ["uuid";"type"; "name-label";"name-description";"vendor"; "copyright"; "configuration"] rpc session_id) @ - (make_param_funs (Client.PBD.get_all) (Client.PBD.get_all_records_where) (Client.PBD.get_by_uuid) (pbd_record) "pbd" [] ["uuid";"host-uuid";"sr-uuid";"device-config";"currently-attached"] rpc session_id) @ - (make_param_funs (Client.Task.get_all) (Client.Task.get_all_records_where) (Client.Task.get_by_uuid) (task_record) "task" [] ["uuid";"name-label";"name-description";"status";"progress"] rpc session_id) @ - (make_param_funs (Client.Subject.get_all) (Client.Subject.get_all_records_where) (Client.Subject.get_by_uuid) (subject_record) "subject" [] ["uuid";"subject-identifier";"other-config";"roles"] rpc session_id) @ - (make_param_funs (Client.Role.get_all) (fun ~rpc ~session_id ~expr -> Client.Role.get_all_records_where ~rpc ~session_id ~expr:Xapi_role.expr_no_permissions) - (Client.Role.get_by_uuid) (role_record) "role" [] ["uuid";"name";"description";"subroles"] rpc session_id) @ - (* + (make_param_funs (Client.Pool.get_all) (Client.Pool.get_all_records_where) (Client.Pool.get_by_uuid) (pool_record) "pool" [] ["uuid";"name-label";"name-description";"master";"default-SR"] rpc session_id) @ + (make_param_funs (Client.PIF.get_all) (Client.PIF.get_all_records_where) (Client.PIF.get_by_uuid) (pif_record) "pif" [] ["uuid";"device";"VLAN";"mac";"network-uuid"; "currently-attached"] rpc session_id) @ + (make_param_funs (Client.Bond.get_all) (Client.Bond.get_all_records_where) (Client.Bond.get_by_uuid) (bond_record) "bond" [] ["uuid";"master";"slaves"] rpc session_id) @ + (make_param_funs (Client.VLAN.get_all) (Client.VLAN.get_all_records_where) (Client.VLAN.get_by_uuid) (vlan_record) "vlan" [] ["uuid";"tagged-PIF";"untagged-PIF"; "tag"] rpc session_id) @ + (make_param_funs (Client.Tunnel.get_all) (Client.Tunnel.get_all_records_where) (Client.Tunnel.get_by_uuid) (tunnel_record) "tunnel" [] ["uuid";"transport-PIF";"access-PIF";"status"] rpc session_id) @ + (make_param_funs (Client.VIF.get_all) (Client.VIF.get_all_records_where) (Client.VIF.get_by_uuid) (vif_record) "vif" [] ["uuid";"device";"vm-uuid";"network-uuid"] rpc session_id) @ + (make_param_funs (Client.Network.get_all) (Client.Network.get_all_records_where) (Client.Network.get_by_uuid) (net_record) "network" [] ["uuid";"name-label";"name-description";"bridge"] rpc session_id) @ + (make_param_funs (Client.Console.get_all) (Client.Console.get_all_records_where) (Client.Console.get_by_uuid) (console_record) "console" [] ["uuid";"vm-uuid";"vm-name-label";"protocol";"location"] rpc session_id) @ + (make_param_funs (Client.VM.get_all) (Client.VM.get_all_records_where) (Client.VM.get_by_uuid) (vm_record) "vm" [("is-a-template","false")] ["name-label";"uuid";"power-state"] rpc session_id) @ + (make_param_funs (Client.VM.get_all) (Client.VM.get_all_records_where) (Client.VM.get_by_uuid) (vm_record) "template" [("is-a-template","true");("is-a-snapshot","false")] ["name-label";"name-description";"uuid"] rpc session_id) @ + (make_param_funs (Client.VM.get_all) (Client.VM.get_all_records_where) (Client.VM.get_by_uuid) (vm_record) "snapshot" [("is-a-snapshot","true")] ["name-label";"name-description";"uuid";"snapshot_of"; "snapshot_time"] rpc session_id) @ + (make_param_funs (Client.Host.get_all) (Client.Host.get_all_records_where) (Client.Host.get_by_uuid) (host_record) "host" [] ["uuid";"name-label";"name-description"] rpc session_id) @ + (make_param_funs (Client.Host_cpu.get_all) (Client.Host_cpu.get_all_records_where) (Client.Host_cpu.get_by_uuid) (host_cpu_record) "host-cpu" [] ["uuid";"number";"vendor";"speed";"utilisation"] rpc session_id) @ + (make_param_funs (Client.Host_crashdump.get_all) (Client.Host_crashdump.get_all_records_where) (Client.Host_crashdump.get_by_uuid) (host_crashdump_record) "host-crashdump" [] ["uuid";"host";"timestamp";"size"] rpc session_id) @ + (make_param_funs (Client.Pool_patch.get_all) (Client.Pool_patch.get_all_records_where) (Client.Pool_patch.get_by_uuid) (pool_patch_record) "patch" [] ["uuid"; "name-label"; "name-description"; "size"; "hosts"; "after-apply-guidance"] rpc session_id) @ + (make_param_funs (Client.VDI.get_all) (Client.VDI.get_all_records_where) (Client.VDI.get_by_uuid) (vdi_record) "vdi" [] ["uuid";"name-label";"name-description";"virtual-size";"read-only";"sharable";"sr-uuid"] rpc session_id) @ + (make_param_funs (Client.VBD.get_all) (Client.VBD.get_all_records_where) (Client.VBD.get_by_uuid) (vbd_record) "vbd" [] ["uuid";"vm-uuid";"vm-name-label";"vdi-uuid";"device"; "empty"] rpc session_id) @ + (make_param_funs (Client.SR.get_all) (Client.SR.get_all_records_where) (Client.SR.get_by_uuid) (sr_record) "sr" [] ["uuid";"name-label";"name-description";"host";"type";"content-type"] rpc session_id) @ + (make_param_funs (Client.SM.get_all) (Client.SM.get_all_records_where) (Client.SM.get_by_uuid) (sm_record) "sm" [] ["uuid";"type"; "name-label";"name-description";"vendor"; "copyright"; "configuration"] rpc session_id) @ + (make_param_funs (Client.PBD.get_all) (Client.PBD.get_all_records_where) (Client.PBD.get_by_uuid) (pbd_record) "pbd" [] ["uuid";"host-uuid";"sr-uuid";"device-config";"currently-attached"] rpc session_id) @ + (make_param_funs (Client.Task.get_all) (Client.Task.get_all_records_where) (Client.Task.get_by_uuid) (task_record) "task" [] ["uuid";"name-label";"name-description";"status";"progress"] rpc session_id) @ + (make_param_funs (Client.Subject.get_all) (Client.Subject.get_all_records_where) (Client.Subject.get_by_uuid) (subject_record) "subject" [] ["uuid";"subject-identifier";"other-config";"roles"] rpc session_id) @ + (make_param_funs (Client.Role.get_all) (fun ~rpc ~session_id ~expr -> Client.Role.get_all_records_where ~rpc ~session_id ~expr:Xapi_role.expr_no_permissions) + (Client.Role.get_by_uuid) (role_record) "role" [] ["uuid";"name";"description";"subroles"] rpc session_id) @ + (* (make_param_funs (Client.Blob.get_all) (Client.Blob.get_all_records_where) (Client.Blob.get_by_uuid) (blob_record) "blob" [] ["uuid";"mime-type"] rpc session_id) @ *) - (make_param_funs (Client.Message.get_all) (Client.Message.get_all_records_where) (Client.Message.get_by_uuid) (message_record) "message" [] [] rpc session_id) @ - (make_param_funs (Client.Secret.get_all) (Client.Secret.get_all_records_where) (Client.Secret.get_by_uuid) (secret_record) "secret" [] [] rpc session_id) @ - (make_param_funs (Client.VM_appliance.get_all) (Client.VM_appliance.get_all_records_where) (Client.VM_appliance.get_by_uuid) (vm_appliance_record) "appliance" [] [] rpc session_id) @ - (make_param_funs (Client.PGPU.get_all) (Client.PGPU.get_all_records_where) (Client.PGPU.get_by_uuid) (pgpu_record) "pgpu" [] ["uuid";"vendor-name";"device-name";"gpu-group-uuid"] rpc session_id) @ - (make_param_funs (Client.GPU_group.get_all) (Client.GPU_group.get_all_records_where) (Client.GPU_group.get_by_uuid) (gpu_group_record) "gpu-group" [] ["uuid";"name-label";"name-description"] rpc session_id) @ - (make_param_funs (Client.VGPU.get_all) (Client.VGPU.get_all_records_where) (Client.VGPU.get_by_uuid) (vgpu_record) "vgpu" [] ["uuid";"vm-uuid";"device";"gpu-group-uuid"] rpc session_id) @ - (make_param_funs (Client.VGPU_type.get_all) (Client.VGPU_type.get_all_records_where) (Client.VGPU_type.get_by_uuid) (vgpu_type_record) "vgpu-type" [] ["uuid";"vendor-name";"model-name";"max-resolution";"max-heads"] rpc session_id) @ - (make_param_funs (Client.DR_task.get_all) (Client.DR_task.get_all_records_where) (Client.DR_task.get_by_uuid) (dr_task_record) "drtask" [] [] rpc session_id) - (* + (make_param_funs (Client.Message.get_all) (Client.Message.get_all_records_where) (Client.Message.get_by_uuid) (message_record) "message" [] [] rpc session_id) @ + (make_param_funs (Client.Secret.get_all) (Client.Secret.get_all_records_where) (Client.Secret.get_by_uuid) (secret_record) "secret" [] [] rpc session_id) @ + (make_param_funs (Client.VM_appliance.get_all) (Client.VM_appliance.get_all_records_where) (Client.VM_appliance.get_by_uuid) (vm_appliance_record) "appliance" [] [] rpc session_id) @ + (make_param_funs (Client.PGPU.get_all) (Client.PGPU.get_all_records_where) (Client.PGPU.get_by_uuid) (pgpu_record) "pgpu" [] ["uuid";"vendor-name";"device-name";"gpu-group-uuid"] rpc session_id) @ + (make_param_funs (Client.GPU_group.get_all) (Client.GPU_group.get_all_records_where) (Client.GPU_group.get_by_uuid) (gpu_group_record) "gpu-group" [] ["uuid";"name-label";"name-description"] rpc session_id) @ + (make_param_funs (Client.VGPU.get_all) (Client.VGPU.get_all_records_where) (Client.VGPU.get_by_uuid) (vgpu_record) "vgpu" [] ["uuid";"vm-uuid";"device";"gpu-group-uuid"] rpc session_id) @ + (make_param_funs (Client.VGPU_type.get_all) (Client.VGPU_type.get_all_records_where) (Client.VGPU_type.get_by_uuid) (vgpu_type_record) "vgpu-type" [] ["uuid";"vendor-name";"model-name";"max-resolution";"max-heads"] rpc session_id) @ + (make_param_funs (Client.DR_task.get_all) (Client.DR_task.get_all_records_where) (Client.DR_task.get_by_uuid) (dr_task_record) "drtask" [] [] rpc session_id) + (* @ (make_param_funs (Client.Alert.get_all) (Client.Alert.get_all_records_where) (Client.Alert.get_by_uuid) (alert_record) "alert" [] ["uuid";"message";"level";"timestamp";"system";"task"] rpc session_id) *) @@ -813,849 +813,849 @@ let gen_cmds rpc session_id = * ("description","name-description"); * ("vcpus","vcpus-number"); * ("memory_set","memory-dynamic-max");] - *) +*) let message_create printer rpc session_id params = - let body = List.assoc "body" params in - let priority = try Int64.of_string (List.assoc "priority" params) with _ -> failwith "Priority field should be an integer" in - let name = List.assoc "name" params in - let uuid,cls = - if (List.mem_assoc "vm-uuid" params) then - List.assoc "vm-uuid" params, `VM - else if (List.mem_assoc "pool-uuid" params) then - List.assoc "pool-uuid" params, `Pool - else if (List.mem_assoc "sr-uuid" params) then - List.assoc "sr-uuid" params, `SR - else if (List.mem_assoc "host-uuid" params) then - List.assoc "host-uuid" params, `Host - else - raise (Cli_util.Cli_failure "Need one of: vm-uuid, host-uuid, sr-uuid or pool-uuid") - in - ignore(Client.Message.create rpc session_id name priority cls uuid body) + let body = List.assoc "body" params in + let priority = try Int64.of_string (List.assoc "priority" params) with _ -> failwith "Priority field should be an integer" in + let name = List.assoc "name" params in + let uuid,cls = + if (List.mem_assoc "vm-uuid" params) then + List.assoc "vm-uuid" params, `VM + else if (List.mem_assoc "pool-uuid" params) then + List.assoc "pool-uuid" params, `Pool + else if (List.mem_assoc "sr-uuid" params) then + List.assoc "sr-uuid" params, `SR + else if (List.mem_assoc "host-uuid" params) then + List.assoc "host-uuid" params, `Host + else + raise (Cli_util.Cli_failure "Need one of: vm-uuid, host-uuid, sr-uuid or pool-uuid") + in + ignore(Client.Message.create rpc session_id name priority cls uuid body) let message_destroy printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let message = Client.Message.get_by_uuid rpc session_id uuid in - Client.Message.destroy rpc session_id message + let uuid = List.assoc "uuid" params in + let message = Client.Message.get_by_uuid rpc session_id uuid in + Client.Message.destroy rpc session_id message (* Pool operations *) let get_pool_with_default rpc session_id params key = - if List.mem_assoc key params then - (* User provided a pool uuid. *) - let pool_uuid = List.assoc key params in - Client.Pool.get_by_uuid rpc session_id pool_uuid - else - (* User didn't provide a pool uuid: let's fetch the default pool. *) - List.hd (Client.Pool.get_all rpc session_id) + if List.mem_assoc key params then + (* User provided a pool uuid. *) + let pool_uuid = List.assoc key params in + Client.Pool.get_by_uuid rpc session_id pool_uuid + else + (* User didn't provide a pool uuid: let's fetch the default pool. *) + List.hd (Client.Pool.get_all rpc session_id) let pool_enable_binary_storage printer rpc session_id params = - Client.Pool.enable_binary_storage rpc session_id + Client.Pool.enable_binary_storage rpc session_id let pool_disable_binary_storage printer rpc session_id params = - Client.Pool.disable_binary_storage rpc session_id + Client.Pool.disable_binary_storage rpc session_id let pool_ha_enable printer rpc session_id params = - let config = read_map_params "ha-config" params in - let uuids = if List.mem_assoc "heartbeat-sr-uuids" params then String.split ',' (List.assoc "heartbeat-sr-uuids" params) else [] in - let srs = List.map (fun uuid -> Client.SR.get_by_uuid rpc session_id uuid) uuids in - Client.Pool.enable_ha rpc session_id srs config + let config = read_map_params "ha-config" params in + let uuids = if List.mem_assoc "heartbeat-sr-uuids" params then String.split ',' (List.assoc "heartbeat-sr-uuids" params) else [] in + let srs = List.map (fun uuid -> Client.SR.get_by_uuid rpc session_id uuid) uuids in + Client.Pool.enable_ha rpc session_id srs config let pool_ha_disable printer rpc session_id params = - Client.Pool.disable_ha rpc session_id + Client.Pool.disable_ha rpc session_id let pool_ha_prevent_restarts_for printer rpc session_id params = - let seconds = Int64.of_string (List.assoc "seconds" params) in - Client.Pool.ha_prevent_restarts_for rpc session_id seconds + let seconds = Int64.of_string (List.assoc "seconds" params) in + Client.Pool.ha_prevent_restarts_for rpc session_id seconds let pool_ha_compute_max_host_failures_to_tolerate printer rpc session_id params = - let n = Client.Pool.ha_compute_max_host_failures_to_tolerate rpc session_id in - printer (Cli_printer.PList [ Int64.to_string n ]) + let n = Client.Pool.ha_compute_max_host_failures_to_tolerate rpc session_id in + printer (Cli_printer.PList [ Int64.to_string n ]) let pool_ha_compute_hypothetical_max_host_failures_to_tolerate printer rpc session_id params = - (* Walk through the params in order constructing a VM -> restart_priority map *) - let vms = List.map snd (List.filter (fun (k, _) -> k = "vm-uuid") params) - and pri = List.map snd (List.filter (fun (k, _) -> k = "restart-priority") params) in - if List.length vms <> (List.length pri) then failwith "Call requires an equal number of vm-uuid and restart-priority arguments"; - let vms = List.map (fun uuid -> Client.VM.get_by_uuid rpc session_id uuid) vms in - let n = Client.Pool.ha_compute_hypothetical_max_host_failures_to_tolerate rpc session_id (List.combine vms pri) in - printer (Cli_printer.PList [ Int64.to_string n ]) + (* Walk through the params in order constructing a VM -> restart_priority map *) + let vms = List.map snd (List.filter (fun (k, _) -> k = "vm-uuid") params) + and pri = List.map snd (List.filter (fun (k, _) -> k = "restart-priority") params) in + if List.length vms <> (List.length pri) then failwith "Call requires an equal number of vm-uuid and restart-priority arguments"; + let vms = List.map (fun uuid -> Client.VM.get_by_uuid rpc session_id uuid) vms in + let n = Client.Pool.ha_compute_hypothetical_max_host_failures_to_tolerate rpc session_id (List.combine vms pri) in + printer (Cli_printer.PList [ Int64.to_string n ]) let pool_ha_compute_vm_failover_plan printer rpc session_id params = - let host_uuids = String.split ',' (List.assoc "host-uuids" params) in - let hosts = List.map (fun uuid -> Client.Host.get_by_uuid rpc session_id uuid) host_uuids in - (* For now select all VMs resident on the given hosts *) - let vms = List.concat (List.map (fun host -> Client.Host.get_resident_VMs rpc session_id host) hosts) in - let vms = List.filter (fun vm -> not(Client.VM.get_is_control_domain rpc session_id vm)) vms in - let plan = Client.Pool.ha_compute_vm_failover_plan rpc session_id hosts vms in - let table = List.map (fun (vm, result) -> - Printf.sprintf "%s (%s)" (Client.VM.get_uuid rpc session_id vm) (Client.VM.get_name_label rpc session_id vm), - if List.mem_assoc "host" result then begin - let host = Ref.of_string (List.assoc "host" result) in - Printf.sprintf "%s (%s)" (Client.Host.get_uuid rpc session_id host) (Client.Host.get_name_label rpc session_id host) - end else if List.mem_assoc "error_code" result then begin - List.assoc "error_code" result - end else "UNKNOWN") plan in - printer (Cli_printer.PTable [ ("VM", "Destination Host or Error") :: table ]) + let host_uuids = String.split ',' (List.assoc "host-uuids" params) in + let hosts = List.map (fun uuid -> Client.Host.get_by_uuid rpc session_id uuid) host_uuids in + (* For now select all VMs resident on the given hosts *) + let vms = List.concat (List.map (fun host -> Client.Host.get_resident_VMs rpc session_id host) hosts) in + let vms = List.filter (fun vm -> not(Client.VM.get_is_control_domain rpc session_id vm)) vms in + let plan = Client.Pool.ha_compute_vm_failover_plan rpc session_id hosts vms in + let table = List.map (fun (vm, result) -> + Printf.sprintf "%s (%s)" (Client.VM.get_uuid rpc session_id vm) (Client.VM.get_name_label rpc session_id vm), + if List.mem_assoc "host" result then begin + let host = Ref.of_string (List.assoc "host" result) in + Printf.sprintf "%s (%s)" (Client.Host.get_uuid rpc session_id host) (Client.Host.get_name_label rpc session_id host) + end else if List.mem_assoc "error_code" result then begin + List.assoc "error_code" result + end else "UNKNOWN") plan in + printer (Cli_printer.PTable [ ("VM", "Destination Host or Error") :: table ]) let host_ha_xapi_healthcheck fd printer rpc session_id params = - try - let result = Client.Host.ha_xapi_healthcheck rpc session_id in - if not(result) then begin - marshal fd (Command (PrintStderr "Host.ha_xapi_healthcheck reports false\n")); - raise (ExitWithError 2) (* comms failure exits with error 1 in the thin CLI itself *) - end; - marshal fd (Command (Print "xapi is healthy.")) - with e -> - marshal fd (Command (PrintStderr (Printf.sprintf "Host.ha_xapi_healthcheck threw exception: %s\n" (ExnHelper.string_of_exn e)))); - raise (ExitWithError 3) + try + let result = Client.Host.ha_xapi_healthcheck rpc session_id in + if not(result) then begin + marshal fd (Command (PrintStderr "Host.ha_xapi_healthcheck reports false\n")); + raise (ExitWithError 2) (* comms failure exits with error 1 in the thin CLI itself *) + end; + marshal fd (Command (Print "xapi is healthy.")) + with e -> + marshal fd (Command (PrintStderr (Printf.sprintf "Host.ha_xapi_healthcheck threw exception: %s\n" (ExnHelper.string_of_exn e)))); + raise (ExitWithError 3) let pool_sync_database printer rpc session_id params = - Client.Pool.sync_database rpc session_id + Client.Pool.sync_database rpc session_id let pool_designate_new_master printer rpc session_id params = - let host_uuid=List.assoc "host-uuid" params in - let host = Client.Host.get_by_uuid rpc session_id host_uuid in - Client.Pool.designate_new_master rpc session_id host + let host_uuid=List.assoc "host-uuid" params in + let host = Client.Host.get_by_uuid rpc session_id host_uuid in + Client.Pool.designate_new_master rpc session_id host let pool_join printer rpc session_id params = - try - let force = get_bool_param params "force" in - if force then - Client.Pool.join_force ~rpc ~session_id - ~master_address:(List.assoc "master-address" params) - ~master_username:(List.assoc "master-username" params) - ~master_password:(List.assoc "master-password" params) - else - Client.Pool.join ~rpc ~session_id - ~master_address:(List.assoc "master-address" params) - ~master_username:(List.assoc "master-username" params) - ~master_password:(List.assoc "master-password" params); - printer (Cli_printer.PList ["Host agent will restart and attempt to join pool in "^(string_of_float !Xapi_globs.fuse_time)^" seconds..."]) - with - | Api_errors.Server_error(code, params) when code=Api_errors.pool_joining_host_connection_failed -> - printer (Cli_printer.PList ["Host cannot contact destination host: connection refused."; - "Check destination host has services running and accessible from this host."]) + try + let force = get_bool_param params "force" in + if force then + Client.Pool.join_force ~rpc ~session_id + ~master_address:(List.assoc "master-address" params) + ~master_username:(List.assoc "master-username" params) + ~master_password:(List.assoc "master-password" params) + else + Client.Pool.join ~rpc ~session_id + ~master_address:(List.assoc "master-address" params) + ~master_username:(List.assoc "master-username" params) + ~master_password:(List.assoc "master-password" params); + printer (Cli_printer.PList ["Host agent will restart and attempt to join pool in "^(string_of_float !Xapi_globs.fuse_time)^" seconds..."]) + with + | Api_errors.Server_error(code, params) when code=Api_errors.pool_joining_host_connection_failed -> + printer (Cli_printer.PList ["Host cannot contact destination host: connection refused."; + "Check destination host has services running and accessible from this host."]) let pool_eject fd printer rpc session_id params = - let host_uuid = List.assoc "host-uuid" params in - let host=Client.Host.get_by_uuid rpc session_id host_uuid in - let force = get_bool_param params "force" in - - let go () = - Client.Pool.eject ~rpc ~session_id ~host; - printer (Cli_printer.PList ["Specified host will attempt to restart as a master of a new pool in "^(string_of_float !Xapi_globs.fuse_time)^" seconds..."]) in - - if force - then go () - else begin - - (* Best-effort attempt to warn the user that VDIs in local SRs are going to be lost. *) - let warnings = - try - (* Find local SRs *) - let pbds = Client.Host.get_PBDs rpc session_id host in - (* Find the subset of SRs which cannot be seen from other hosts *) - let srs = List.concat - (List.map - (fun pbd -> - try - let sr = Client.PBD.get_SR rpc session_id pbd in - let other_pbds = Client.SR.get_PBDs rpc session_id sr in - let other_hosts = List.map (fun pbd -> Client.PBD.get_host rpc session_id pbd) other_pbds in - let other_hosts_than_me = List.filter (fun other -> other <> host) other_hosts in - if other_hosts_than_me = [] - then [ sr ] else [] - with _ -> []) pbds) in - let warnings = ref [] in - List.iter - (fun sr -> - try - let vdis = Client.SR.get_VDIs rpc session_id sr in - List.iter - (fun vdi -> - try - let uuid = Client.VDI.get_uuid rpc session_id vdi - and name_label = Client.VDI.get_name_label rpc session_id vdi in - warnings := Printf.sprintf "VDI: %s (%s)" uuid name_label :: !warnings - with _ -> () - ) vdis - with _ -> () - ) srs; - !warnings - with _ -> [] - in - - marshal fd (Command (Print "WARNING: Ejecting a host from the pool will reinitialise that host's local SRs.")); - marshal fd (Command (Print "WARNING: Any data contained with the local SRs will be lost.")); - if warnings <> [] then begin - marshal fd (Command (Print "The following VDI objects will be destroyed:")); - List.iter (fun msg -> marshal fd (Command (Print msg))) warnings - end; - if user_says_yes fd - then go () - end + let host_uuid = List.assoc "host-uuid" params in + let host=Client.Host.get_by_uuid rpc session_id host_uuid in + let force = get_bool_param params "force" in + + let go () = + Client.Pool.eject ~rpc ~session_id ~host; + printer (Cli_printer.PList ["Specified host will attempt to restart as a master of a new pool in "^(string_of_float !Xapi_globs.fuse_time)^" seconds..."]) in + + if force + then go () + else begin + + (* Best-effort attempt to warn the user that VDIs in local SRs are going to be lost. *) + let warnings = + try + (* Find local SRs *) + let pbds = Client.Host.get_PBDs rpc session_id host in + (* Find the subset of SRs which cannot be seen from other hosts *) + let srs = List.concat + (List.map + (fun pbd -> + try + let sr = Client.PBD.get_SR rpc session_id pbd in + let other_pbds = Client.SR.get_PBDs rpc session_id sr in + let other_hosts = List.map (fun pbd -> Client.PBD.get_host rpc session_id pbd) other_pbds in + let other_hosts_than_me = List.filter (fun other -> other <> host) other_hosts in + if other_hosts_than_me = [] + then [ sr ] else [] + with _ -> []) pbds) in + let warnings = ref [] in + List.iter + (fun sr -> + try + let vdis = Client.SR.get_VDIs rpc session_id sr in + List.iter + (fun vdi -> + try + let uuid = Client.VDI.get_uuid rpc session_id vdi + and name_label = Client.VDI.get_name_label rpc session_id vdi in + warnings := Printf.sprintf "VDI: %s (%s)" uuid name_label :: !warnings + with _ -> () + ) vdis + with _ -> () + ) srs; + !warnings + with _ -> [] + in + + marshal fd (Command (Print "WARNING: Ejecting a host from the pool will reinitialise that host's local SRs.")); + marshal fd (Command (Print "WARNING: Any data contained with the local SRs will be lost.")); + if warnings <> [] then begin + marshal fd (Command (Print "The following VDI objects will be destroyed:")); + List.iter (fun msg -> marshal fd (Command (Print msg))) warnings + end; + if user_says_yes fd + then go () + end let pool_emergency_reset_master printer rpc session_id params = - let master_address = List.assoc "master-address" params in - Client.Pool.emergency_reset_master ~rpc ~session_id ~master_address; - printer (Cli_printer.PList ["Host agent will restart and become slave of "^master_address^" in "^(string_of_float !Xapi_globs.fuse_time)^" seconds..."]) + let master_address = List.assoc "master-address" params in + Client.Pool.emergency_reset_master ~rpc ~session_id ~master_address; + printer (Cli_printer.PList ["Host agent will restart and become slave of "^master_address^" in "^(string_of_float !Xapi_globs.fuse_time)^" seconds..."]) let pool_emergency_transition_to_master printer rpc session_id params = - Client.Pool.emergency_transition_to_master ~rpc ~session_id; - printer (Cli_printer.PList ["Host agent will restart and transition to master in "^(string_of_float !Xapi_globs.fuse_time)^" seconds..."]) + Client.Pool.emergency_transition_to_master ~rpc ~session_id; + printer (Cli_printer.PList ["Host agent will restart and transition to master in "^(string_of_float !Xapi_globs.fuse_time)^" seconds..."]) let pool_recover_slaves printer rpc session_id params = - let hosts = Client.Pool.recover_slaves ~rpc ~session_id in - let host_uuids = List.map (fun href -> Client.Host.get_uuid rpc session_id href) hosts in - printer (Cli_printer.PList host_uuids) + let hosts = Client.Pool.recover_slaves ~rpc ~session_id in + let host_uuids = List.map (fun href -> Client.Host.get_uuid rpc session_id href) hosts in + printer (Cli_printer.PList host_uuids) let pool_initialize_wlb printer rpc session_id params = - let wlb_url = List.assoc "wlb_url" params in - let wlb_username = List.assoc "wlb_username" params in - let wlb_password = List.assoc "wlb_password" params in - let xenserver_username = List.assoc "xenserver_username" params in - let xenserver_password = List.assoc "xenserver_password" params in - Client.Pool.initialize_wlb ~rpc ~session_id ~wlb_url ~wlb_username ~wlb_password ~xenserver_username ~xenserver_password + let wlb_url = List.assoc "wlb_url" params in + let wlb_username = List.assoc "wlb_username" params in + let wlb_password = List.assoc "wlb_password" params in + let xenserver_username = List.assoc "xenserver_username" params in + let xenserver_password = List.assoc "xenserver_password" params in + Client.Pool.initialize_wlb ~rpc ~session_id ~wlb_url ~wlb_username ~wlb_password ~xenserver_username ~xenserver_password let pool_deconfigure_wlb printer rpc session_id params = - Client.Pool.deconfigure_wlb ~rpc ~session_id + Client.Pool.deconfigure_wlb ~rpc ~session_id let pool_send_wlb_configuration printer rpc session_id params = - let len = String.length "config:" in - let filter_params = List.filter (fun (p,_) -> (String.startswith "config" p) && (String.length p > len)) params in - let config = List.map (fun (k,v) -> String.sub k len (String.length k - len),v) filter_params in - Client.Pool.send_wlb_configuration ~rpc ~session_id ~config + let len = String.length "config:" in + let filter_params = List.filter (fun (p,_) -> (String.startswith "config" p) && (String.length p > len)) params in + let config = List.map (fun (k,v) -> String.sub k len (String.length k - len),v) filter_params in + Client.Pool.send_wlb_configuration ~rpc ~session_id ~config let pool_retrieve_wlb_configuration printer rpc session_id params = - printer (Cli_printer.PTable [(Client.Pool.retrieve_wlb_configuration ~rpc ~session_id)]) + printer (Cli_printer.PTable [(Client.Pool.retrieve_wlb_configuration ~rpc ~session_id)]) let pool_retrieve_wlb_recommendations printer rpc session_id params = - let table t = - List.map (fun (vm, recom) -> (Printf.sprintf "%s (%s)" (Client.VM.get_uuid rpc session_id vm) (Client.VM.get_name_label rpc session_id vm), String.concat " " recom)) t - in - printer (Cli_printer.PTable ([("VM", "Host, OptID, RecID, Reason") :: table (Client.Pool.retrieve_wlb_recommendations ~rpc ~session_id)])) + let table t = + List.map (fun (vm, recom) -> (Printf.sprintf "%s (%s)" (Client.VM.get_uuid rpc session_id vm) (Client.VM.get_name_label rpc session_id vm), String.concat " " recom)) t + in + printer (Cli_printer.PTable ([("VM", "Host, OptID, RecID, Reason") :: table (Client.Pool.retrieve_wlb_recommendations ~rpc ~session_id)])) let pool_send_test_post printer rpc session_id params = - let host = List.assoc "dest-host" params in - let port = Int64.of_string (List.assoc "dest-port" params) in - let body = List.assoc "body" params in - printer (Cli_printer.PMsg - (Client.Pool.send_test_post ~rpc ~session_id ~host ~port ~body)) + let host = List.assoc "dest-host" params in + let port = Int64.of_string (List.assoc "dest-port" params) in + let body = List.assoc "body" params in + printer (Cli_printer.PMsg + (Client.Pool.send_test_post ~rpc ~session_id ~host ~port ~body)) let pool_certificate_install fd printer rpc session_id params = - let filename = List.assoc "filename" params in - match get_client_file fd filename with - | Some cert -> - Client.Pool.certificate_install ~rpc ~session_id - ~name:(Filename.basename filename) ~cert - | None -> - marshal fd (Command (PrintStderr "Failed to read certificate\n")); - raise (ExitWithError 1) + let filename = List.assoc "filename" params in + match get_client_file fd filename with + | Some cert -> + Client.Pool.certificate_install ~rpc ~session_id + ~name:(Filename.basename filename) ~cert + | None -> + marshal fd (Command (PrintStderr "Failed to read certificate\n")); + raise (ExitWithError 1) let pool_certificate_uninstall printer rpc session_id params = - let name = List.assoc "name" params in - Client.Pool.certificate_uninstall ~rpc ~session_id ~name + let name = List.assoc "name" params in + Client.Pool.certificate_uninstall ~rpc ~session_id ~name let pool_certificate_list printer rpc session_id params = - printer (Cli_printer.PList - (Client.Pool.certificate_list ~rpc ~session_id)) + printer (Cli_printer.PList + (Client.Pool.certificate_list ~rpc ~session_id)) let pool_crl_install fd printer rpc session_id params = - let filename = List.assoc "filename" params in - match get_client_file fd filename with - | Some cert -> - Client.Pool.crl_install ~rpc ~session_id - ~name:(Filename.basename filename) ~cert - | None -> - marshal fd (Command (PrintStderr "Failed to read CRL\n")); - raise (ExitWithError 1) + let filename = List.assoc "filename" params in + match get_client_file fd filename with + | Some cert -> + Client.Pool.crl_install ~rpc ~session_id + ~name:(Filename.basename filename) ~cert + | None -> + marshal fd (Command (PrintStderr "Failed to read CRL\n")); + raise (ExitWithError 1) let pool_crl_uninstall printer rpc session_id params = - let name = List.assoc "name" params in - Client.Pool.crl_uninstall ~rpc ~session_id ~name + let name = List.assoc "name" params in + Client.Pool.crl_uninstall ~rpc ~session_id ~name let pool_crl_list printer rpc session_id params = - printer (Cli_printer.PList - (Client.Pool.crl_list ~rpc ~session_id)) + printer (Cli_printer.PList + (Client.Pool.crl_list ~rpc ~session_id)) let pool_certificate_sync printer rpc session_id params = - Client.Pool.certificate_sync ~rpc ~session_id + Client.Pool.certificate_sync ~rpc ~session_id let pool_enable_redo_log printer rpc session_id params = - let sr = Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) in - Client.Pool.enable_redo_log ~rpc ~session_id ~sr + let sr = Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) in + Client.Pool.enable_redo_log ~rpc ~session_id ~sr let pool_disable_redo_log printer rpc session_id params = - Client.Pool.disable_redo_log ~rpc ~session_id + Client.Pool.disable_redo_log ~rpc ~session_id let pool_set_vswitch_controller printer rpc session_id params = - let address = List.assoc "address" params in - Client.Pool.set_vswitch_controller ~rpc ~session_id ~address + let address = List.assoc "address" params in + Client.Pool.set_vswitch_controller ~rpc ~session_id ~address let pool_enable_ssl_legacy printer rpc session_id params = - let self = get_pool_with_default rpc session_id params "uuid" in - Client.Pool.enable_ssl_legacy ~rpc ~session_id ~self + let self = get_pool_with_default rpc session_id params "uuid" in + Client.Pool.enable_ssl_legacy ~rpc ~session_id ~self let pool_disable_ssl_legacy printer rpc session_id params = - let self = get_pool_with_default rpc session_id params "uuid" in - Client.Pool.disable_ssl_legacy ~rpc ~session_id ~self + let self = get_pool_with_default rpc session_id params "uuid" in + Client.Pool.disable_ssl_legacy ~rpc ~session_id ~self let vdi_type_of_string = function - | "system" -> `system - | "user" -> `user - | "suspend" -> `suspend - | "crashdump" -> `crashdump - | x -> failwith (Printf.sprintf "Unknown vdi type: %s" x) + | "system" -> `system + | "user" -> `user + | "suspend" -> `suspend + | "crashdump" -> `crashdump + | x -> failwith (Printf.sprintf "Unknown vdi type: %s" x) let vdi_create printer rpc session_id params = - let sR = Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) in - let name_label=List.assoc "name-label" params in - let virtual_size = Record_util.bytes_of_string "virtual-size" (List.assoc "virtual-size" params) in - let ty = - if List.mem_assoc "type" params - then vdi_type_of_string (List.assoc "type" params) - else `user in - let sharable = get_bool_param params "sharable" in - let sm_config=read_map_params "sm-config" params in - let tags=read_set_params "tags" params in - - let vdi = Client.VDI.create ~rpc ~session_id ~name_label ~name_description:"" ~sR ~virtual_size ~_type:ty - ~sharable ~read_only:false ~xenstore_data:[] ~other_config:[] ~sm_config ~tags in - let vdi_uuid = Client.VDI.get_uuid rpc session_id vdi in - printer (Cli_printer.PList [vdi_uuid]) + let sR = Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) in + let name_label=List.assoc "name-label" params in + let virtual_size = Record_util.bytes_of_string "virtual-size" (List.assoc "virtual-size" params) in + let ty = + if List.mem_assoc "type" params + then vdi_type_of_string (List.assoc "type" params) + else `user in + let sharable = get_bool_param params "sharable" in + let sm_config=read_map_params "sm-config" params in + let tags=read_set_params "tags" params in + + let vdi = Client.VDI.create ~rpc ~session_id ~name_label ~name_description:"" ~sR ~virtual_size ~_type:ty + ~sharable ~read_only:false ~xenstore_data:[] ~other_config:[] ~sm_config ~tags in + let vdi_uuid = Client.VDI.get_uuid rpc session_id vdi in + printer (Cli_printer.PList [vdi_uuid]) let vdi_introduce printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let sR = Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) in - (* CA-13140: Some of the backends set their own name-labels, and the VDI introduce will - not override them if we pass in the empty string. *) - let name_label = try List.assoc "name-label" params with _ -> "" in - let name_description = if List.mem_assoc "name-description" params then List.assoc "name-description" params else "" in - let _type = vdi_type_of_string (List.assoc "type" params) in - let sharable = get_bool_param params "sharable" in - let read_only = get_bool_param params "read-only" in - (* NB call is new so backwards compat other-config- not required *) - let other_config = read_map_params "other-config" params in - let xenstore_data = read_map_params "xenstore-data" params in - let sm_config = read_map_params "sm-config" params in - let location = List.assoc "location" params in - let managed = get_bool_param params "managed" in - let virtual_size = 0L and physical_utilisation = 0L in - let metadata_of_pool = Ref.null in - let is_a_snapshot = false in - let snapshot_time = Date.never in - let snapshot_of = Ref.null in - let vdi = Client.VDI.introduce ~rpc ~session_id ~uuid ~name_label ~name_description - ~sR ~_type ~sharable ~read_only ~other_config ~location ~xenstore_data ~sm_config - ~managed ~virtual_size ~physical_utilisation ~metadata_of_pool ~is_a_snapshot - ~snapshot_time ~snapshot_of in - (* round-trip catches partial application errors *) - let vdi_uuid = Client.VDI.get_uuid ~rpc ~session_id ~self:vdi in - printer (Cli_printer.PList [ vdi_uuid ]) + let uuid = List.assoc "uuid" params in + let sR = Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) in + (* CA-13140: Some of the backends set their own name-labels, and the VDI introduce will + not override them if we pass in the empty string. *) + let name_label = try List.assoc "name-label" params with _ -> "" in + let name_description = if List.mem_assoc "name-description" params then List.assoc "name-description" params else "" in + let _type = vdi_type_of_string (List.assoc "type" params) in + let sharable = get_bool_param params "sharable" in + let read_only = get_bool_param params "read-only" in + (* NB call is new so backwards compat other-config- not required *) + let other_config = read_map_params "other-config" params in + let xenstore_data = read_map_params "xenstore-data" params in + let sm_config = read_map_params "sm-config" params in + let location = List.assoc "location" params in + let managed = get_bool_param params "managed" in + let virtual_size = 0L and physical_utilisation = 0L in + let metadata_of_pool = Ref.null in + let is_a_snapshot = false in + let snapshot_time = Date.never in + let snapshot_of = Ref.null in + let vdi = Client.VDI.introduce ~rpc ~session_id ~uuid ~name_label ~name_description + ~sR ~_type ~sharable ~read_only ~other_config ~location ~xenstore_data ~sm_config + ~managed ~virtual_size ~physical_utilisation ~metadata_of_pool ~is_a_snapshot + ~snapshot_time ~snapshot_of in + (* round-trip catches partial application errors *) + let vdi_uuid = Client.VDI.get_uuid ~rpc ~session_id ~self:vdi in + printer (Cli_printer.PList [ vdi_uuid ]) let vdi_resize printer rpc session_id params = - let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let new_size = Record_util.bytes_of_string "disk-size" (List.assoc "disk-size" params) in - let online = List.mem_assoc "online" params && (List.assoc "online" params = "true") in - if online - then Client.VDI.resize_online rpc session_id vdi new_size - else Client.VDI.resize rpc session_id vdi new_size + let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let new_size = Record_util.bytes_of_string "disk-size" (List.assoc "disk-size" params) in + let online = List.mem_assoc "online" params && (List.assoc "online" params = "true") in + if online + then Client.VDI.resize_online rpc session_id vdi new_size + else Client.VDI.resize rpc session_id vdi new_size let vdi_generate_config printer rpc session_id params = - let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let host = Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) in - printer (Cli_printer.PList [ Client.VDI.generate_config rpc session_id host vdi ]) + let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let host = Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) in + printer (Cli_printer.PList [ Client.VDI.generate_config rpc session_id host vdi ]) let vdi_copy printer rpc session_id params = - let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let base_vdi = - if List.mem_assoc "base-vdi-uuid" params - then Client.VDI.get_by_uuid rpc session_id (List.assoc "base-vdi-uuid" params) - else Ref.null in - let sr, into = match List.mem_assoc "sr-uuid" params, List.mem_assoc "into-vdi-uuid" params with - | false, false - | true, true -> - failwith "Please specify one but not both of: a destination sr-uuid (I will create a fresh VDI); or a destination into-vdi-uuid (I will copy the blocks into this VDI)" - | true, false -> - Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params), Ref.null - | false, true -> - Ref.null, Client.VDI.get_by_uuid rpc session_id (List.assoc "into-vdi-uuid" params) in - let newvdi = Client.VDI.copy rpc session_id vdi sr base_vdi into in - let newuuid = Client.VDI.get_uuid rpc session_id newvdi in - printer (Cli_printer.PList [newuuid]) + let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let base_vdi = + if List.mem_assoc "base-vdi-uuid" params + then Client.VDI.get_by_uuid rpc session_id (List.assoc "base-vdi-uuid" params) + else Ref.null in + let sr, into = match List.mem_assoc "sr-uuid" params, List.mem_assoc "into-vdi-uuid" params with + | false, false + | true, true -> + failwith "Please specify one but not both of: a destination sr-uuid (I will create a fresh VDI); or a destination into-vdi-uuid (I will copy the blocks into this VDI)" + | true, false -> + Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params), Ref.null + | false, true -> + Ref.null, Client.VDI.get_by_uuid rpc session_id (List.assoc "into-vdi-uuid" params) in + let newvdi = Client.VDI.copy rpc session_id vdi sr base_vdi into in + let newuuid = Client.VDI.get_uuid rpc session_id newvdi in + printer (Cli_printer.PList [newuuid]) let vdi_pool_migrate printer rpc session_id params = - let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) - and sr = Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) - and options = [] (* no options implemented yet *) - in - let newvdi = Client.VDI.pool_migrate rpc session_id vdi sr options in - let newuuid = Client.VDI.get_uuid rpc session_id newvdi in - printer (Cli_printer.PList [newuuid]) + let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) + and sr = Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) + and options = [] (* no options implemented yet *) + in + let newvdi = Client.VDI.pool_migrate rpc session_id vdi sr options in + let newuuid = Client.VDI.get_uuid rpc session_id newvdi in + printer (Cli_printer.PList [newuuid]) let vdi_clone printer rpc session_id params = - let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let driver_params = read_map_params "driver-params" params in - let name_label = try Some (List.assoc "new-name-label" params) with Not_found -> None in - let name_description = try Some (List.assoc "new-name-description" params) with Not_found -> None in - let newvdi = Client.VDI.clone rpc session_id vdi driver_params in - maybe (fun x -> Client.VDI.set_name_label rpc session_id newvdi x) name_label; - maybe (fun x -> Client.VDI.set_name_description rpc session_id newvdi x) name_description; - let newuuid = Client.VDI.get_uuid rpc session_id newvdi in - printer (Cli_printer.PList [newuuid]) + let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let driver_params = read_map_params "driver-params" params in + let name_label = try Some (List.assoc "new-name-label" params) with Not_found -> None in + let name_description = try Some (List.assoc "new-name-description" params) with Not_found -> None in + let newvdi = Client.VDI.clone rpc session_id vdi driver_params in + maybe (fun x -> Client.VDI.set_name_label rpc session_id newvdi x) name_label; + maybe (fun x -> Client.VDI.set_name_description rpc session_id newvdi x) name_description; + let newuuid = Client.VDI.get_uuid rpc session_id newvdi in + printer (Cli_printer.PList [newuuid]) let vdi_snapshot printer rpc session_id params = - let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let driver_params = read_map_params "driver-params" params in - let newvdi = Client.VDI.snapshot rpc session_id vdi driver_params in - let newuuid = Client.VDI.get_uuid rpc session_id newvdi in - printer (Cli_printer.PList [newuuid]) + let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let driver_params = read_map_params "driver-params" params in + let newvdi = Client.VDI.snapshot rpc session_id vdi driver_params in + let newuuid = Client.VDI.get_uuid rpc session_id newvdi in + printer (Cli_printer.PList [newuuid]) let vdi_destroy printer rpc session_id params = - let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in - Client.VDI.destroy rpc session_id vdi + let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in + Client.VDI.destroy rpc session_id vdi let vdi_forget printer rpc session_id params = - let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in - Client.VDI.forget rpc session_id vdi + let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in + Client.VDI.forget rpc session_id vdi let vdi_update printer rpc session_id params = - let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in - Client.VDI.update rpc session_id vdi + let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in + Client.VDI.update rpc session_id vdi let vdi_unlock printer rpc session_id params = - let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in - if not(List.mem_assoc "force" params) - then failwith "This operation is extremely dangerous and may cause data loss. This operation must be forced (use --force)."; - Client.VDI.force_unlock rpc session_id vdi + let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in + if not(List.mem_assoc "force" params) + then failwith "This operation is extremely dangerous and may cause data loss. This operation must be forced (use --force)."; + Client.VDI.force_unlock rpc session_id vdi let diagnostic_vdi_status printer rpc session_id params = - let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let vdi_r = vdi_record rpc session_id vdi in - let vdi_fields = List.filter - (fun x -> List.mem x.name [ "uuid"; "name-label"; "sr-uuid"; "mode"; "read-only"; "sharable"; "storage-lock" ]) vdi_r.fields in - printer (Cli_printer.PTable [List.map print_field vdi_fields]); - let all_vbds = Client.VDI.get_VBDs rpc session_id vdi in - let all_vbd_records = List.map (vbd_record rpc session_id) all_vbds in - let active_records = List.filter (fun x -> (field_lookup (x.fields) "currently-attached").get() = "true") all_vbd_records in - let inactive_records = List.set_difference all_vbd_records active_records in - let show_vbds records = - List.iter (fun vbd_record -> - let fields = List.filter (fun x -> List.mem x.name [ "uuid"; "userdevice"; "device"; "empty"; "mode"; "type"; "storage-lock" ]) vbd_record.fields in - printer (Cli_printer.PTable [List.map print_field fields])) records in - if active_records = [] - then printer (Cli_printer.PList [ "no active VBDs." ]) - else begin - printer (Cli_printer.PList [ "active VBDs:" ]); - show_vbds active_records; - end; - if inactive_records <> [] then begin - printer (Cli_printer.PList [ "inactive VBDs:" ]); - show_vbds inactive_records - end + let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let vdi_r = vdi_record rpc session_id vdi in + let vdi_fields = List.filter + (fun x -> List.mem x.name [ "uuid"; "name-label"; "sr-uuid"; "mode"; "read-only"; "sharable"; "storage-lock" ]) vdi_r.fields in + printer (Cli_printer.PTable [List.map print_field vdi_fields]); + let all_vbds = Client.VDI.get_VBDs rpc session_id vdi in + let all_vbd_records = List.map (vbd_record rpc session_id) all_vbds in + let active_records = List.filter (fun x -> (field_lookup (x.fields) "currently-attached").get() = "true") all_vbd_records in + let inactive_records = List.set_difference all_vbd_records active_records in + let show_vbds records = + List.iter (fun vbd_record -> + let fields = List.filter (fun x -> List.mem x.name [ "uuid"; "userdevice"; "device"; "empty"; "mode"; "type"; "storage-lock" ]) vbd_record.fields in + printer (Cli_printer.PTable [List.map print_field fields])) records in + if active_records = [] + then printer (Cli_printer.PList [ "no active VBDs." ]) + else begin + printer (Cli_printer.PList [ "active VBDs:" ]); + show_vbds active_records; + end; + if inactive_records <> [] then begin + printer (Cli_printer.PList [ "inactive VBDs:" ]); + show_vbds inactive_records + end (* Print a table of hosts, reporting whether a VM can start on each host and if not, why not! *) let print_assert_exception e = - let rec get_arg n xs = - match n,xs with - 1,x::_ -> x - | n,_::xs -> get_arg (n-1) xs - | _ -> "" in - match e with - Api_errors.Server_error(code, params) when code=Api_errors.vm_requires_sr -> - "VM requires access to SR: "^(Cli_util.ref_convert (get_arg 2 params)) - | Api_errors.Server_error(code, params) when code=Api_errors.host_disabled -> - "Host disabled (use 'xe host-enable' to re-enable)" - | Api_errors.Server_error(code, params) when code=Api_errors.host_not_live -> - "Host down" - | Api_errors.Server_error(code, params) when code=Api_errors.host_not_enough_free_memory -> - Printf.sprintf "Not enough free memory" - | Api_errors.Server_error(code, params) when code=Api_errors.vm_requires_net -> - "VM requires access to network: "^(Cli_util.ref_convert (get_arg 2 params)) - | Api_errors.Server_error(code, params) when code=Api_errors.host_cannot_attach_network -> - "Host cannot attach to network: "^(Cli_util.ref_convert (get_arg 2 params)) - | Api_errors.Server_error(code, params) when code=Api_errors.vm_hvm_required -> - "HVM not supported" - | Api_errors.Server_error(code, [key; v] ) when code=Api_errors.invalid_value -> - Printf.sprintf "Field has invalid value: %s = %s" key v - - (* Used by VM.assert_agile: *) - | Api_errors.Server_error(code, [ sr ]) when code=Api_errors.ha_constraint_violation_sr_not_shared -> - Printf.sprintf "VM requires access to non-shared SR: %s. SR must both be marked as shared and a properly configured PBD must be plugged-in on every host" (Cli_util.ref_convert sr) - | Api_errors.Server_error(code, [ net]) when code = Api_errors.ha_constraint_violation_network_not_shared -> - Printf.sprintf "VM requires access to non-shared Network: %s. Network must either be entirely virtual or there must be a PIF connecting to this Network on every host." (Cli_util.ref_convert net) - - | e -> Printexc.to_string e + let rec get_arg n xs = + match n,xs with + 1,x::_ -> x + | n,_::xs -> get_arg (n-1) xs + | _ -> "" in + match e with + Api_errors.Server_error(code, params) when code=Api_errors.vm_requires_sr -> + "VM requires access to SR: "^(Cli_util.ref_convert (get_arg 2 params)) + | Api_errors.Server_error(code, params) when code=Api_errors.host_disabled -> + "Host disabled (use 'xe host-enable' to re-enable)" + | Api_errors.Server_error(code, params) when code=Api_errors.host_not_live -> + "Host down" + | Api_errors.Server_error(code, params) when code=Api_errors.host_not_enough_free_memory -> + Printf.sprintf "Not enough free memory" + | Api_errors.Server_error(code, params) when code=Api_errors.vm_requires_net -> + "VM requires access to network: "^(Cli_util.ref_convert (get_arg 2 params)) + | Api_errors.Server_error(code, params) when code=Api_errors.host_cannot_attach_network -> + "Host cannot attach to network: "^(Cli_util.ref_convert (get_arg 2 params)) + | Api_errors.Server_error(code, params) when code=Api_errors.vm_hvm_required -> + "HVM not supported" + | Api_errors.Server_error(code, [key; v] ) when code=Api_errors.invalid_value -> + Printf.sprintf "Field has invalid value: %s = %s" key v + + (* Used by VM.assert_agile: *) + | Api_errors.Server_error(code, [ sr ]) when code=Api_errors.ha_constraint_violation_sr_not_shared -> + Printf.sprintf "VM requires access to non-shared SR: %s. SR must both be marked as shared and a properly configured PBD must be plugged-in on every host" (Cli_util.ref_convert sr) + | Api_errors.Server_error(code, [ net]) when code = Api_errors.ha_constraint_violation_network_not_shared -> + Printf.sprintf "VM requires access to non-shared Network: %s. Network must either be entirely virtual or there must be a PIF connecting to this Network on every host." (Cli_util.ref_convert net) + + | e -> Printexc.to_string e let print_vm_host_report printer rpc session_id vm_ref = - let hosts = Client.Host.get_all rpc session_id in - let table = List.map (fun host -> Client.Host.get_name_label rpc session_id host, - try Client.VM.assert_can_boot_here rpc session_id vm_ref host; "OK" - with e -> "Cannot start here ["^(print_assert_exception e)^"]") hosts in - printer (Cli_printer.PTable [table]) + let hosts = Client.Host.get_all rpc session_id in + let table = List.map (fun host -> Client.Host.get_name_label rpc session_id host, + try Client.VM.assert_can_boot_here rpc session_id vm_ref host; "OK" + with e -> "Cannot start here ["^(print_assert_exception e)^"]") hosts in + printer (Cli_printer.PTable [table]) let diagnostic_vm_status printer rpc session_id params = - let vm = Client.VM.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let vm_r = vm_record rpc session_id vm in - let vm_fields = List.filter - (fun x -> List.mem x.name [ "uuid"; "name-label"; "power-state"; "possible-hosts"]) vm_r.fields in - - printer (Cli_printer.PTable [List.map print_field vm_fields]); - printer (Cli_printer.PList [ "Checking to see whether disks are attachable" ]); - let show_vbds records = - List.iter (fun vbd_record -> - let fields = List.filter (fun x -> List.mem x.name [ "uuid"; "userdevice"; "device"; "vdi-uuid"; "empty"; "mode"; "type"; "storage-lock"; "attachable" ]) vbd_record.fields in - printer (Cli_printer.PTable [List.map print_field fields])) records in - let all_vbds = Client.VM.get_VBDs rpc session_id vm in - let all_vbd_records = List.map (vbd_record rpc session_id) all_vbds in - show_vbds all_vbd_records; - printer (Cli_printer.PList [ "Checking to see whether VM can boot on each host" ]); - print_vm_host_report printer rpc session_id vm; - printer (Cli_printer.PList [ - try Client.VM.assert_agile rpc session_id vm; "VM is agile." - with e -> "VM is not agile because: " ^ (print_assert_exception e) ]) + let vm = Client.VM.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let vm_r = vm_record rpc session_id vm in + let vm_fields = List.filter + (fun x -> List.mem x.name [ "uuid"; "name-label"; "power-state"; "possible-hosts"]) vm_r.fields in + + printer (Cli_printer.PTable [List.map print_field vm_fields]); + printer (Cli_printer.PList [ "Checking to see whether disks are attachable" ]); + let show_vbds records = + List.iter (fun vbd_record -> + let fields = List.filter (fun x -> List.mem x.name [ "uuid"; "userdevice"; "device"; "vdi-uuid"; "empty"; "mode"; "type"; "storage-lock"; "attachable" ]) vbd_record.fields in + printer (Cli_printer.PTable [List.map print_field fields])) records in + let all_vbds = Client.VM.get_VBDs rpc session_id vm in + let all_vbd_records = List.map (vbd_record rpc session_id) all_vbds in + show_vbds all_vbd_records; + printer (Cli_printer.PList [ "Checking to see whether VM can boot on each host" ]); + print_vm_host_report printer rpc session_id vm; + printer (Cli_printer.PList [ + try Client.VM.assert_agile rpc session_id vm; "VM is agile." + with e -> "VM is not agile because: " ^ (print_assert_exception e) ]) (* VBD create destroy list param-list param-get param-set param-add param-remove *) let vbd_create printer rpc session_id params = - let vM=Client.VM.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "vm-uuid" params) in - let empty = not(List.mem_assoc "vdi-uuid" params) in - let vDI = - if empty - then Ref.null - else Client.VDI.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "vdi-uuid" params) in - let bootable = get_bool_param params "bootable" in - let mode = - if List.mem_assoc "mode" params - then match String.lowercase (List.assoc "mode" params) with - | "ro" -> `RO | "rw" -> `RW - | x -> failwith (Printf.sprintf "Unknown mode: %s (should be \"ro\" or \"rw\"" x) - else `RW in - let _type = - if List.mem_assoc "type" params - then match String.lowercase (List.assoc "type" params) with - | "cd" -> `CD | "disk" -> `Disk - | x -> failwith (Printf.sprintf "Unknown type: %s (should be \"cd\" or \"disk\"" x) - else `Disk in - let unpluggable = get_bool_param params ~default:true "unpluggable" in - if _type=`Disk && empty then failwith "Empty VBDs can only be made for type=CD"; - let vbd=Client.VBD.create ~rpc ~session_id ~vM ~vDI ~userdevice:(List.assoc "device" params) - ~bootable - ~mode - ~_type - ~unpluggable - ~empty - ~qos_algorithm_type:"" - ~qos_algorithm_params:[] ~other_config:[] in - let vbd_uuid=Client.VBD.get_uuid rpc session_id vbd in - printer (Cli_printer.PList [vbd_uuid]) + let vM=Client.VM.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "vm-uuid" params) in + let empty = not(List.mem_assoc "vdi-uuid" params) in + let vDI = + if empty + then Ref.null + else Client.VDI.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "vdi-uuid" params) in + let bootable = get_bool_param params "bootable" in + let mode = + if List.mem_assoc "mode" params + then match String.lowercase (List.assoc "mode" params) with + | "ro" -> `RO | "rw" -> `RW + | x -> failwith (Printf.sprintf "Unknown mode: %s (should be \"ro\" or \"rw\"" x) + else `RW in + let _type = + if List.mem_assoc "type" params + then match String.lowercase (List.assoc "type" params) with + | "cd" -> `CD | "disk" -> `Disk + | x -> failwith (Printf.sprintf "Unknown type: %s (should be \"cd\" or \"disk\"" x) + else `Disk in + let unpluggable = get_bool_param params ~default:true "unpluggable" in + if _type=`Disk && empty then failwith "Empty VBDs can only be made for type=CD"; + let vbd=Client.VBD.create ~rpc ~session_id ~vM ~vDI ~userdevice:(List.assoc "device" params) + ~bootable + ~mode + ~_type + ~unpluggable + ~empty + ~qos_algorithm_type:"" + ~qos_algorithm_params:[] ~other_config:[] in + let vbd_uuid=Client.VBD.get_uuid rpc session_id vbd in + printer (Cli_printer.PList [vbd_uuid]) let vbd_destroy printer rpc session_id params = - let self = Client.VBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in - Client.VBD.destroy ~rpc ~session_id ~self + let self = Client.VBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in + Client.VBD.destroy ~rpc ~session_id ~self let vbd_eject printer rpc session_id params = - let self = Client.VBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in - Client.VBD.eject rpc session_id self + let self = Client.VBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in + Client.VBD.eject rpc session_id self let vbd_insert printer rpc session_id params = - let self = Client.VBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in - let vdi_uuid = List.assoc "vdi-uuid" params in - let vdi = Client.VDI.get_by_uuid rpc session_id vdi_uuid in - Client.VBD.insert rpc session_id self vdi + let self = Client.VBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in + let vdi_uuid = List.assoc "vdi-uuid" params in + let vdi = Client.VDI.get_by_uuid rpc session_id vdi_uuid in + Client.VBD.insert rpc session_id self vdi let vbd_plug printer rpc session_id params = - let vbd = Client.VBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in - Client.VBD.plug rpc session_id vbd + let vbd = Client.VBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in + Client.VBD.plug rpc session_id vbd let vbd_unplug printer rpc session_id params = - let vbd = Client.VBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in - let timeout = - if List.mem_assoc "timeout" params then - (try float_of_string (List.assoc "timeout" params) with _ -> failwith "Failed to parse parameter 'timeout': expecting a float") - else 0. in - let force = get_bool_param params "force" in - let start = Unix.gettimeofday () in - try - (if force then Client.VBD.unplug_force else Client.VBD.unplug) rpc session_id vbd - with Api_errors.Server_error(code, _) as e when code = Api_errors.device_detach_rejected -> - (* enter polling mode *) - let unplugged = ref false in - while not(!unplugged) && (Unix.gettimeofday () -. start < timeout) do - Thread.delay 5.; - unplugged := not(Client.VBD.get_currently_attached rpc session_id vbd) - done; - if not(!unplugged) then raise e + let vbd = Client.VBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in + let timeout = + if List.mem_assoc "timeout" params then + (try float_of_string (List.assoc "timeout" params) with _ -> failwith "Failed to parse parameter 'timeout': expecting a float") + else 0. in + let force = get_bool_param params "force" in + let start = Unix.gettimeofday () in + try + (if force then Client.VBD.unplug_force else Client.VBD.unplug) rpc session_id vbd + with Api_errors.Server_error(code, _) as e when code = Api_errors.device_detach_rejected -> + (* enter polling mode *) + let unplugged = ref false in + while not(!unplugged) && (Unix.gettimeofday () -. start < timeout) do + Thread.delay 5.; + unplugged := not(Client.VBD.get_currently_attached rpc session_id vbd) + done; + if not(!unplugged) then raise e let vbd_pause printer rpc session_id params = - let vbd = Client.VBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in - let token = Client.VBD.pause rpc session_id vbd in - printer (Cli_printer.PList [token]) + let vbd = Client.VBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in + let token = Client.VBD.pause rpc session_id vbd in + printer (Cli_printer.PList [token]) let vbd_unpause printer rpc session_id params = - let vbd = Client.VBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in - let token = List.assoc "token" params in - Client.VBD.unpause rpc session_id vbd token + let vbd = Client.VBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in + let token = List.assoc "token" params in + Client.VBD.unpause rpc session_id vbd token (* SR scan *) let sr_scan printer rpc session_id params = - let sr_uuid = List.assoc "uuid" params in - let sr_ref = Client.SR.get_by_uuid rpc session_id sr_uuid in - Client.SR.scan rpc session_id sr_ref + let sr_uuid = List.assoc "uuid" params in + let sr_ref = Client.SR.get_by_uuid rpc session_id sr_uuid in + Client.SR.scan rpc session_id sr_ref let parse_host_uuid ?(default_master=true) rpc session_id params = - if List.mem_assoc "host-uuid" params then - let host_uuid=List.assoc "host-uuid" params in - Client.Host.get_by_uuid rpc session_id host_uuid - else begin - let hosts = Client.Host.get_all rpc session_id in - let standalone = List.length hosts = 1 in - if standalone || default_master - then - let pool = List.hd (Client.Pool.get_all rpc session_id) in - Client.Pool.get_master rpc session_id pool - else failwith "Required parameter not found: host-uuid" - end + if List.mem_assoc "host-uuid" params then + let host_uuid=List.assoc "host-uuid" params in + Client.Host.get_by_uuid rpc session_id host_uuid + else begin + let hosts = Client.Host.get_all rpc session_id in + let standalone = List.length hosts = 1 in + if standalone || default_master + then + let pool = List.hd (Client.Pool.get_all rpc session_id) in + Client.Pool.get_master rpc session_id pool + else failwith "Required parameter not found: host-uuid" + end let parse_device_config params = - (* Ack! We're supposed to use the format device-config:key=value but we need to match device-config-key=value for *) - (* backwards compatability *) - let len = String.length "device-config:" in - let filter_params = List.filter (fun (p,_) -> (String.startswith "device-config" p) && (String.length p > len)) params in - List.map (fun (k,v) -> String.sub k len (String.length k - len),v) filter_params + (* Ack! We're supposed to use the format device-config:key=value but we need to match device-config-key=value for *) + (* backwards compatability *) + let len = String.length "device-config:" in + let filter_params = List.filter (fun (p,_) -> (String.startswith "device-config" p) && (String.length p > len)) params in + List.map (fun (k,v) -> String.sub k len (String.length k - len),v) filter_params (* SR create destroy list param-list param-get param-set param-add param-remove *) let sr_create fd printer rpc session_id params = - let name_label=List.assoc "name-label" params in - let shared = get_bool_param params "shared" in - let host = parse_host_uuid ~default_master:shared rpc session_id params in - let physical_size= - try - Record_util.bytes_of_string "physical-size" (List.assoc "physical-size" params) - with _ -> 0L in - let _type=List.assoc "type" params in - let content_type = List.assoc_default "content-type" params "" in - let device_config = parse_device_config params in - (* If the device-config parameter is of the form k-filename=v, then we assume the - key is 'k' and the value is stored in a file named 'v' *) - let suffix = "-filename" in - let device_config = List.map (fun (k,v) -> - if String.endswith suffix k then begin - let k = String.sub k 0 (String.length k - (String.length suffix)) in - match get_client_file fd v with - | Some v -> k,v - | None -> - marshal fd (Command(PrintStderr (Printf.sprintf "File not found: %s" v))); - failwith "File not found" - end else (k, v) - ) device_config in - let sm_config = read_map_params "sm-config" params in - let sr=Client.SR.create ~rpc ~session_id ~host ~device_config ~name_label - ~name_description:"" - ~physical_size ~_type ~content_type ~shared:shared ~sm_config in - let sr_uuid=Client.SR.get_uuid ~rpc ~session_id ~self:sr in - marshal fd (Command (Print sr_uuid)) + let name_label=List.assoc "name-label" params in + let shared = get_bool_param params "shared" in + let host = parse_host_uuid ~default_master:shared rpc session_id params in + let physical_size= + try + Record_util.bytes_of_string "physical-size" (List.assoc "physical-size" params) + with _ -> 0L in + let _type=List.assoc "type" params in + let content_type = List.assoc_default "content-type" params "" in + let device_config = parse_device_config params in + (* If the device-config parameter is of the form k-filename=v, then we assume the + key is 'k' and the value is stored in a file named 'v' *) + let suffix = "-filename" in + let device_config = List.map (fun (k,v) -> + if String.endswith suffix k then begin + let k = String.sub k 0 (String.length k - (String.length suffix)) in + match get_client_file fd v with + | Some v -> k,v + | None -> + marshal fd (Command(PrintStderr (Printf.sprintf "File not found: %s" v))); + failwith "File not found" + end else (k, v) + ) device_config in + let sm_config = read_map_params "sm-config" params in + let sr=Client.SR.create ~rpc ~session_id ~host ~device_config ~name_label + ~name_description:"" + ~physical_size ~_type ~content_type ~shared:shared ~sm_config in + let sr_uuid=Client.SR.get_uuid ~rpc ~session_id ~self:sr in + marshal fd (Command (Print sr_uuid)) let sr_introduce printer rpc session_id params = - let name_label=List.assoc "name-label" params in - let _type=List.assoc "type" params in - let content_type = List.assoc_default "content-type" params "" in - let uuid = List.assoc "uuid" params in - let shared = get_bool_param params "shared" in - let sm_config = read_map_params "sm-config" params in - let _ = Client.SR.introduce ~rpc ~session_id ~uuid ~name_label ~name_description:"" ~_type ~content_type ~shared ~sm_config in - printer (Cli_printer.PList [uuid]) + let name_label=List.assoc "name-label" params in + let _type=List.assoc "type" params in + let content_type = List.assoc_default "content-type" params "" in + let uuid = List.assoc "uuid" params in + let shared = get_bool_param params "shared" in + let sm_config = read_map_params "sm-config" params in + let _ = Client.SR.introduce ~rpc ~session_id ~uuid ~name_label ~name_description:"" ~_type ~content_type ~shared ~sm_config in + printer (Cli_printer.PList [uuid]) let sr_probe printer rpc session_id params = - let host = parse_host_uuid rpc session_id params in - let _type = List.assoc "type" params in - let device_config = parse_device_config params in - let sm_config = read_map_params "sm-config" params in - let txt = Client.SR.probe ~rpc ~session_id ~host ~_type ~device_config ~sm_config in - try - (* If it's the new format, try to print it more nicely *) - let open Storage_interface in - match probe_result_of_rpc (Xmlrpc.of_string txt) with - | Raw x -> printer (Cli_printer.PList [ x ]) - | Probe x -> - let sr (uri, x) = [ - "uri", uri; - "name-label", x.name_label; - "name-description", x.name_description; - "total-space", Int64.to_string x.total_space; - "free-space", Int64.to_string x.free_space; - ] in - if x.srs <> [] - then printer (Cli_printer.PMsg "The following SRs were found:"); - printer (Cli_printer.PTable (List.map sr x.srs)); - if x.uris <> [] - then printer (Cli_printer.PMsg "The following URIs may contain SRs:"); - printer (Cli_printer.PList x.uris) - with _ -> - printer (Cli_printer.PList [txt]) + let host = parse_host_uuid rpc session_id params in + let _type = List.assoc "type" params in + let device_config = parse_device_config params in + let sm_config = read_map_params "sm-config" params in + let txt = Client.SR.probe ~rpc ~session_id ~host ~_type ~device_config ~sm_config in + try + (* If it's the new format, try to print it more nicely *) + let open Storage_interface in + match probe_result_of_rpc (Xmlrpc.of_string txt) with + | Raw x -> printer (Cli_printer.PList [ x ]) + | Probe x -> + let sr (uri, x) = [ + "uri", uri; + "name-label", x.name_label; + "name-description", x.name_description; + "total-space", Int64.to_string x.total_space; + "free-space", Int64.to_string x.free_space; + ] in + if x.srs <> [] + then printer (Cli_printer.PMsg "The following SRs were found:"); + printer (Cli_printer.PTable (List.map sr x.srs)); + if x.uris <> [] + then printer (Cli_printer.PMsg "The following URIs may contain SRs:"); + printer (Cli_printer.PList x.uris) + with _ -> + printer (Cli_printer.PList [txt]) let sr_destroy printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let sr = Client.SR.get_by_uuid rpc session_id uuid in - Client.SR.destroy rpc session_id sr + let uuid = List.assoc "uuid" params in + let sr = Client.SR.get_by_uuid rpc session_id uuid in + Client.SR.destroy rpc session_id sr let sr_forget printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let sr = Client.SR.get_by_uuid rpc session_id uuid in - Client.SR.forget rpc session_id sr + let uuid = List.assoc "uuid" params in + let sr = Client.SR.get_by_uuid rpc session_id uuid in + Client.SR.forget rpc session_id sr let sr_update printer rpc session_id params = - let sr = Client.SR.get_by_uuid rpc session_id (List.assoc "uuid" params) in - Client.SR.update rpc session_id sr + let sr = Client.SR.get_by_uuid rpc session_id (List.assoc "uuid" params) in + Client.SR.update rpc session_id sr let sr_enable_database_replication printer rpc session_id params = - let sr = Client.SR.get_by_uuid rpc session_id (List.assoc "uuid" params) in - Client.SR.enable_database_replication rpc session_id sr + let sr = Client.SR.get_by_uuid rpc session_id (List.assoc "uuid" params) in + Client.SR.enable_database_replication rpc session_id sr let sr_disable_database_replication printer rpc session_id params = - let sr = Client.SR.get_by_uuid rpc session_id (List.assoc "uuid" params) in - Client.SR.disable_database_replication rpc session_id sr + let sr = Client.SR.get_by_uuid rpc session_id (List.assoc "uuid" params) in + Client.SR.disable_database_replication rpc session_id sr (* PIF destroy* list param-list param-get param-set param-add param-remove *) let pbd_create printer rpc session_id params = - let host_uuid = List.assoc "host-uuid" params in - let sr_uuid = List.assoc "sr-uuid" params in - - (* Ack! We're supposed to use the format device-config:key=value but we need to match device-config-key=value for *) - (* backwards compatability *) - let len = String.length "device-config:" in - let filter_params = List.filter (fun (p,_) -> (String.startswith "device-config" p) && (String.length p > len)) params in - let device_config = List.map (fun (k,v) -> String.sub k len (String.length k - len),v) filter_params in - - let host = Client.Host.get_by_uuid rpc session_id host_uuid in - let sr = Client.SR.get_by_uuid rpc session_id sr_uuid in - let pbd = Client.PBD.create rpc session_id host sr device_config [] in - let uuid = Client.PBD.get_uuid rpc session_id pbd in - printer (Cli_printer.PList [uuid]) + let host_uuid = List.assoc "host-uuid" params in + let sr_uuid = List.assoc "sr-uuid" params in + + (* Ack! We're supposed to use the format device-config:key=value but we need to match device-config-key=value for *) + (* backwards compatability *) + let len = String.length "device-config:" in + let filter_params = List.filter (fun (p,_) -> (String.startswith "device-config" p) && (String.length p > len)) params in + let device_config = List.map (fun (k,v) -> String.sub k len (String.length k - len),v) filter_params in + + let host = Client.Host.get_by_uuid rpc session_id host_uuid in + let sr = Client.SR.get_by_uuid rpc session_id sr_uuid in + let pbd = Client.PBD.create rpc session_id host sr device_config [] in + let uuid = Client.PBD.get_uuid rpc session_id pbd in + printer (Cli_printer.PList [uuid]) let pbd_destroy printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let pbd = Client.PBD.get_by_uuid rpc session_id uuid in - Client.PBD.destroy rpc session_id pbd + let uuid = List.assoc "uuid" params in + let pbd = Client.PBD.get_by_uuid rpc session_id uuid in + Client.PBD.destroy rpc session_id pbd let pbd_plug printer rpc session_id params = - let pbd = Client.PBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in - Client.PBD.plug rpc session_id pbd + let pbd = Client.PBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in + Client.PBD.plug rpc session_id pbd let pbd_unplug printer rpc session_id params = - let pbd = Client.PBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in - Client.PBD.unplug rpc session_id pbd + let pbd = Client.PBD.get_by_uuid ~rpc ~session_id ~uuid:(List.assoc "uuid" params) in + Client.PBD.unplug rpc session_id pbd let vif_create printer rpc session_id params = - let device = List.assoc "device" params in - let network_uuid = List.assoc "network-uuid" params in - let vm_uuid=List.assoc "vm-uuid" params in - let mac=List.assoc_default "mac" params "" in - let mac=if mac="random" then (Record_util.random_mac_local ()) else mac in - let vm=Client.VM.get_by_uuid rpc session_id vm_uuid in - let network=Client.Network.get_by_uuid rpc session_id network_uuid in - let mtu = Client.Network.get_MTU rpc session_id network in - let vif = Client.VIF.create rpc session_id device network vm mac mtu [] "" [] `network_default [] [] in - let uuid = Client.VIF.get_uuid rpc session_id vif in - printer (Cli_printer.PList [uuid]) + let device = List.assoc "device" params in + let network_uuid = List.assoc "network-uuid" params in + let vm_uuid=List.assoc "vm-uuid" params in + let mac=List.assoc_default "mac" params "" in + let mac=if mac="random" then (Record_util.random_mac_local ()) else mac in + let vm=Client.VM.get_by_uuid rpc session_id vm_uuid in + let network=Client.Network.get_by_uuid rpc session_id network_uuid in + let mtu = Client.Network.get_MTU rpc session_id network in + let vif = Client.VIF.create rpc session_id device network vm mac mtu [] "" [] `network_default [] [] in + let uuid = Client.VIF.get_uuid rpc session_id vif in + printer (Cli_printer.PList [uuid]) let vif_destroy printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let vif = Client.VIF.get_by_uuid rpc session_id uuid in - Client.VIF.destroy rpc session_id vif + let uuid = List.assoc "uuid" params in + let vif = Client.VIF.get_by_uuid rpc session_id uuid in + Client.VIF.destroy rpc session_id vif let vif_plug printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let vif = Client.VIF.get_by_uuid rpc session_id uuid in - Client.VIF.plug rpc session_id vif + let uuid = List.assoc "uuid" params in + let vif = Client.VIF.get_by_uuid rpc session_id uuid in + Client.VIF.plug rpc session_id vif let vif_unplug printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let vif = Client.VIF.get_by_uuid rpc session_id uuid in - let force = get_bool_param params "force" in - (if force then Client.VIF.unplug_force else Client.VIF.unplug) rpc session_id vif + let uuid = List.assoc "uuid" params in + let vif = Client.VIF.get_by_uuid rpc session_id uuid in + let force = get_bool_param params "force" in + (if force then Client.VIF.unplug_force else Client.VIF.unplug) rpc session_id vif let vif_configure_ipv4 printer rpc session_id params = - let vif = Client.VIF.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let mode = Record_util.vif_ipv4_configuration_mode_of_string (List.assoc "mode" params) in - let address = List.assoc_default "address" params "" in - let gateway = List.assoc_default "gateway" params "" in - if mode = `Static && address = "" then failwith "Required parameter not found: address"; - Client.VIF.configure_ipv4 rpc session_id vif mode address gateway + let vif = Client.VIF.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let mode = Record_util.vif_ipv4_configuration_mode_of_string (List.assoc "mode" params) in + let address = List.assoc_default "address" params "" in + let gateway = List.assoc_default "gateway" params "" in + if mode = `Static && address = "" then failwith "Required parameter not found: address"; + Client.VIF.configure_ipv4 rpc session_id vif mode address gateway let vif_configure_ipv6 printer rpc session_id params = - let vif = Client.VIF.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let mode = Record_util.vif_ipv6_configuration_mode_of_string (List.assoc "mode" params) in - let address = List.assoc_default "address" params "" in - let gateway = List.assoc_default "gateway" params "" in - if mode = `Static && address = "" then failwith "Required parameter not found: address"; - Client.VIF.configure_ipv6 rpc session_id vif mode address gateway + let vif = Client.VIF.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let mode = Record_util.vif_ipv6_configuration_mode_of_string (List.assoc "mode" params) in + let address = List.assoc_default "address" params "" in + let gateway = List.assoc_default "gateway" params "" in + if mode = `Static && address = "" then failwith "Required parameter not found: address"; + Client.VIF.configure_ipv6 rpc session_id vif mode address gateway let vif_move printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let network_uuid = List.assoc "network-uuid" params in - let vif = Client.VIF.get_by_uuid rpc session_id uuid in - let network = Client.Network.get_by_uuid rpc session_id network_uuid in - Client.VIF.move rpc session_id vif network + let uuid = List.assoc "uuid" params in + let network_uuid = List.assoc "network-uuid" params in + let vif = Client.VIF.get_by_uuid rpc session_id uuid in + let network = Client.Network.get_by_uuid rpc session_id network_uuid in + Client.VIF.move rpc session_id vif network let net_create printer rpc session_id params = - let network = List.assoc "name-label" params in - let descr = List.assoc_default "name-description" params "" in - let mtu = if List.mem_assoc "MTU" params then Int64.of_string (List.assoc "MTU" params) else 1500L in - let net = Client.Network.create rpc session_id network descr mtu [] [] in - let uuid = Client.Network.get_uuid rpc session_id net in - printer (Cli_printer.PList [uuid]) + let network = List.assoc "name-label" params in + let descr = List.assoc_default "name-description" params "" in + let mtu = if List.mem_assoc "MTU" params then Int64.of_string (List.assoc "MTU" params) else 1500L in + let net = Client.Network.create rpc session_id network descr mtu [] [] in + let uuid = Client.Network.get_uuid rpc session_id net in + printer (Cli_printer.PList [uuid]) let net_destroy printer rpc session_id params = - let network = Client.Network.get_by_uuid rpc session_id (List.assoc "uuid" params) in - ignore(Client.Network.destroy rpc session_id network) + let network = Client.Network.get_by_uuid rpc session_id (List.assoc "uuid" params) in + ignore(Client.Network.destroy rpc session_id network) let net_attach printer rpc session_id params = - let network = Client.Network.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let host = Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) in - let () = Client.Network.attach rpc session_id network host in () + let network = Client.Network.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let host = Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) in + let () = Client.Network.attach rpc session_id network host in () let vm_create printer rpc session_id params = - let name_label=List.assoc "name-label" params in - let name_description=List.assoc_default "name-description" params "" in - let ( ** ) = Int64.mul in - let mib = 1024L ** 1024L in - let memory_max = 256L ** mib in - let memory_min = 128L ** mib in - let vm = Client.VM.create ~rpc ~session_id ~name_label ~name_description ~user_version:0L ~is_a_template:false - ~blocked_operations:[] - ~affinity:Ref.null - ~memory_target:memory_max - ~memory_static_max:memory_max - ~memory_dynamic_max:memory_max - ~memory_dynamic_min:memory_min - ~memory_static_min:memory_min - ~vCPUs_params:[] ~vCPUs_max:1L ~vCPUs_at_startup:1L - ~actions_after_shutdown:`destroy ~actions_after_reboot:`restart ~actions_after_crash:`destroy ~pV_bootloader:"" - ~pV_kernel:"" ~pV_ramdisk:"" ~pV_args:"" ~pV_bootloader_args:"" ~pV_legacy_args:"" ~hVM_boot_policy:"" - ~hVM_boot_params:[] ~hVM_shadow_multiplier:1. ~platform:[] ~pCI_bus:"" ~other_config:[] ~xenstore_data:[] ~recommendations:"" ~ha_always_run:false ~ha_restart_priority:"" - ~tags:[] ~protection_policy:Ref.null ~is_snapshot_from_vmpp:false - ~appliance:Ref.null - ~start_delay:0L - ~shutdown_delay:0L - ~order:0L - ~suspend_SR:Ref.null - ~version:0L - ~generation_id:"" - ~hardware_platform_version:0L - ~has_vendor_device:false - in - let uuid=Client.VM.get_uuid rpc session_id vm in - printer (Cli_printer.PList [uuid]) + let name_label=List.assoc "name-label" params in + let name_description=List.assoc_default "name-description" params "" in + let ( ** ) = Int64.mul in + let mib = 1024L ** 1024L in + let memory_max = 256L ** mib in + let memory_min = 128L ** mib in + let vm = Client.VM.create ~rpc ~session_id ~name_label ~name_description ~user_version:0L ~is_a_template:false + ~blocked_operations:[] + ~affinity:Ref.null + ~memory_target:memory_max + ~memory_static_max:memory_max + ~memory_dynamic_max:memory_max + ~memory_dynamic_min:memory_min + ~memory_static_min:memory_min + ~vCPUs_params:[] ~vCPUs_max:1L ~vCPUs_at_startup:1L + ~actions_after_shutdown:`destroy ~actions_after_reboot:`restart ~actions_after_crash:`destroy ~pV_bootloader:"" + ~pV_kernel:"" ~pV_ramdisk:"" ~pV_args:"" ~pV_bootloader_args:"" ~pV_legacy_args:"" ~hVM_boot_policy:"" + ~hVM_boot_params:[] ~hVM_shadow_multiplier:1. ~platform:[] ~pCI_bus:"" ~other_config:[] ~xenstore_data:[] ~recommendations:"" ~ha_always_run:false ~ha_restart_priority:"" + ~tags:[] ~protection_policy:Ref.null ~is_snapshot_from_vmpp:false + ~appliance:Ref.null + ~start_delay:0L + ~shutdown_delay:0L + ~order:0L + ~suspend_SR:Ref.null + ~version:0L + ~generation_id:"" + ~hardware_platform_version:0L + ~has_vendor_device:false + in + let uuid=Client.VM.get_uuid rpc session_id vm in + printer (Cli_printer.PList [uuid]) let vm_destroy printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let vm = Client.VM.get_by_uuid rpc session_id uuid in - if (Client.VM.get_is_control_domain rpc session_id vm) then - raise (Api_errors.Server_error (Api_errors.operation_not_allowed, - ["You cannot destroy a control domain via the CLI"])) - else - Client.VM.destroy rpc session_id vm + let uuid = List.assoc "uuid" params in + let vm = Client.VM.get_by_uuid rpc session_id uuid in + if (Client.VM.get_is_control_domain rpc session_id vm) then + raise (Api_errors.Server_error (Api_errors.operation_not_allowed, + ["You cannot destroy a control domain via the CLI"])) + else + Client.VM.destroy rpc session_id vm (* Event *) @@ -1696,115 +1696,115 @@ let vm_destroy printer rpc session_id params = exception Finished let event_wait_gen rpc session_id classname record_matches = - (* Immediately register *) - let classes = [classname] in - Client.Event.register ~rpc ~session_id ~classes; - - debug "Registered for events"; - - (* Check to see if the condition is already satisfied - get all objects of whatever class specified... *) - let poll () = - let current_tbls = - match classname with - | "vm" -> List.map (fun x -> (vm_record rpc session_id x).fields) (Client.VM.get_all rpc session_id) - | "vdi" -> List.map (fun x -> (vdi_record rpc session_id x).fields) (Client.VDI.get_all rpc session_id) - | "sr" -> List.map (fun x -> (sr_record rpc session_id x).fields) (Client.SR.get_all rpc session_id) - | "host" -> List.map (fun x -> (host_record rpc session_id x).fields) (Client.Host.get_all rpc session_id) - | "network" -> List.map (fun x -> (net_record rpc session_id x).fields) (Client.Network.get_all rpc session_id) - | "vif" -> List.map (fun x -> (vif_record rpc session_id x).fields) (Client.VIF.get_all rpc session_id) - | "pif" -> List.map (fun x -> (pif_record rpc session_id x).fields) (Client.PIF.get_all rpc session_id) - | "vbd" -> List.map (fun x -> (vbd_record rpc session_id x).fields) (Client.VBD.get_all rpc session_id) - | "pbd" -> List.map (fun x -> (pbd_record rpc session_id x).fields) (Client.PBD.get_all rpc session_id) - | "pool" -> List.map (fun x -> (pool_record rpc session_id x).fields) (Client.Pool.get_all rpc session_id) - | "task" -> List.map (fun x -> (task_record rpc session_id x).fields) (Client.Task.get_all rpc session_id) - | "subject" -> List.map (fun x -> (subject_record rpc session_id x).fields) (Client.Subject.get_all rpc session_id) - | "role" -> List.map (fun x -> (role_record rpc session_id x).fields) (Client.Role.get_all rpc session_id) - | "secret" -> List.map (fun x -> (secret_record rpc session_id x).fields) (Client.Secret.get_all rpc session_id) - (* | "alert" -> List.map (fun x -> (alert_record rpc session_id x).fields) (Client.Alert.get_all rpc session_id) *) - | _ -> failwith ("Cli listening for class '"^classname^"' not currently implemented") - in - - debug "Getting all records"; - (* Records of every object of the class specified *) - let all_recs = List.map (List.map (fun r -> (r.name,(fun () -> safe_get_field r)))) current_tbls in - - debug "Got %d records" (List.length all_recs); - - (* true if anything matches now *) - let find_any_match recs = - let ls = List.map record_matches recs in - (List.length (List.filter (fun x -> x) ls)) > 0 - in - find_any_match all_recs - in - - finally - (fun () -> - if not(poll ()) then - try - while true do - try - let events = Event_types.events_of_rpc (Client.Event.next ~rpc ~session_id) in - let doevent event = - let tbl = match Event_helper.record_of_event event with - | Event_helper.VM (r,Some x) -> let record = vm_record rpc session_id r in record.setrefrec (r,x); record.fields - | Event_helper.VDI (r,Some x) -> let record = vdi_record rpc session_id r in record.setrefrec (r,x); record.fields - | Event_helper.SR (r,Some x) -> let record = sr_record rpc session_id r in record.setrefrec (r,x); record.fields - | Event_helper.Host (r,Some x) -> let record = host_record rpc session_id r in record.setrefrec (r,x); record.fields - | Event_helper.Network (r,Some x) -> let record = net_record rpc session_id r in record.setrefrec (r,x); record.fields - | Event_helper.VIF (r,Some x) -> let record = vif_record rpc session_id r in record.setrefrec (r,x); record.fields - | Event_helper.PIF (r,Some x) -> let record = pif_record rpc session_id r in record.setrefrec (r,x); record.fields - | Event_helper.VBD (r,Some x) -> let record = vbd_record rpc session_id r in record.setrefrec (r,x); record.fields - | Event_helper.PBD (r,Some x) -> let record = pbd_record rpc session_id r in record.setrefrec (r,x); record.fields - | Event_helper.Pool (r,Some x) -> let record = pool_record rpc session_id r in record.setrefrec (r,x); record.fields - | Event_helper.Task (r,Some x) -> let record = task_record rpc session_id r in record.setrefrec (r,x); record.fields - | Event_helper.Secret (r,Some x) -> let record = secret_record rpc session_id r in record.setrefrec (r,x); record.fields - | _ -> failwith ("Cli listening for class '"^classname^"' not currently implemented") - in - let record = List.map (fun r -> (r.name,fun () -> safe_get_field r)) tbl in - if record_matches record then raise Finished - in - List.iter doevent (List.filter (fun e -> e.Event_types.snapshot <> None) events) - with Api_errors.Server_error(code, _) when code = Api_errors.events_lost -> - debug "Got EVENTS_LOST; reregistering"; - Client.Event.unregister ~rpc ~session_id ~classes; - Client.Event.register ~rpc ~session_id ~classes; - if poll() then raise Finished - done - with Finished -> () - ) (fun () -> Client.Event.unregister ~rpc ~session_id ~classes) (* We're done. Unregister and finish *) + (* Immediately register *) + let classes = [classname] in + Client.Event.register ~rpc ~session_id ~classes; + + debug "Registered for events"; + + (* Check to see if the condition is already satisfied - get all objects of whatever class specified... *) + let poll () = + let current_tbls = + match classname with + | "vm" -> List.map (fun x -> (vm_record rpc session_id x).fields) (Client.VM.get_all rpc session_id) + | "vdi" -> List.map (fun x -> (vdi_record rpc session_id x).fields) (Client.VDI.get_all rpc session_id) + | "sr" -> List.map (fun x -> (sr_record rpc session_id x).fields) (Client.SR.get_all rpc session_id) + | "host" -> List.map (fun x -> (host_record rpc session_id x).fields) (Client.Host.get_all rpc session_id) + | "network" -> List.map (fun x -> (net_record rpc session_id x).fields) (Client.Network.get_all rpc session_id) + | "vif" -> List.map (fun x -> (vif_record rpc session_id x).fields) (Client.VIF.get_all rpc session_id) + | "pif" -> List.map (fun x -> (pif_record rpc session_id x).fields) (Client.PIF.get_all rpc session_id) + | "vbd" -> List.map (fun x -> (vbd_record rpc session_id x).fields) (Client.VBD.get_all rpc session_id) + | "pbd" -> List.map (fun x -> (pbd_record rpc session_id x).fields) (Client.PBD.get_all rpc session_id) + | "pool" -> List.map (fun x -> (pool_record rpc session_id x).fields) (Client.Pool.get_all rpc session_id) + | "task" -> List.map (fun x -> (task_record rpc session_id x).fields) (Client.Task.get_all rpc session_id) + | "subject" -> List.map (fun x -> (subject_record rpc session_id x).fields) (Client.Subject.get_all rpc session_id) + | "role" -> List.map (fun x -> (role_record rpc session_id x).fields) (Client.Role.get_all rpc session_id) + | "secret" -> List.map (fun x -> (secret_record rpc session_id x).fields) (Client.Secret.get_all rpc session_id) + (* | "alert" -> List.map (fun x -> (alert_record rpc session_id x).fields) (Client.Alert.get_all rpc session_id) *) + | _ -> failwith ("Cli listening for class '"^classname^"' not currently implemented") + in + + debug "Getting all records"; + (* Records of every object of the class specified *) + let all_recs = List.map (List.map (fun r -> (r.name,(fun () -> safe_get_field r)))) current_tbls in + + debug "Got %d records" (List.length all_recs); + + (* true if anything matches now *) + let find_any_match recs = + let ls = List.map record_matches recs in + (List.length (List.filter (fun x -> x) ls)) > 0 + in + find_any_match all_recs + in + + finally + (fun () -> + if not(poll ()) then + try + while true do + try + let events = Event_types.events_of_rpc (Client.Event.next ~rpc ~session_id) in + let doevent event = + let tbl = match Event_helper.record_of_event event with + | Event_helper.VM (r,Some x) -> let record = vm_record rpc session_id r in record.setrefrec (r,x); record.fields + | Event_helper.VDI (r,Some x) -> let record = vdi_record rpc session_id r in record.setrefrec (r,x); record.fields + | Event_helper.SR (r,Some x) -> let record = sr_record rpc session_id r in record.setrefrec (r,x); record.fields + | Event_helper.Host (r,Some x) -> let record = host_record rpc session_id r in record.setrefrec (r,x); record.fields + | Event_helper.Network (r,Some x) -> let record = net_record rpc session_id r in record.setrefrec (r,x); record.fields + | Event_helper.VIF (r,Some x) -> let record = vif_record rpc session_id r in record.setrefrec (r,x); record.fields + | Event_helper.PIF (r,Some x) -> let record = pif_record rpc session_id r in record.setrefrec (r,x); record.fields + | Event_helper.VBD (r,Some x) -> let record = vbd_record rpc session_id r in record.setrefrec (r,x); record.fields + | Event_helper.PBD (r,Some x) -> let record = pbd_record rpc session_id r in record.setrefrec (r,x); record.fields + | Event_helper.Pool (r,Some x) -> let record = pool_record rpc session_id r in record.setrefrec (r,x); record.fields + | Event_helper.Task (r,Some x) -> let record = task_record rpc session_id r in record.setrefrec (r,x); record.fields + | Event_helper.Secret (r,Some x) -> let record = secret_record rpc session_id r in record.setrefrec (r,x); record.fields + | _ -> failwith ("Cli listening for class '"^classname^"' not currently implemented") + in + let record = List.map (fun r -> (r.name,fun () -> safe_get_field r)) tbl in + if record_matches record then raise Finished + in + List.iter doevent (List.filter (fun e -> e.Event_types.snapshot <> None) events) + with Api_errors.Server_error(code, _) when code = Api_errors.events_lost -> + debug "Got EVENTS_LOST; reregistering"; + Client.Event.unregister ~rpc ~session_id ~classes; + Client.Event.register ~rpc ~session_id ~classes; + if poll() then raise Finished + done + with Finished -> () + ) (fun () -> Client.Event.unregister ~rpc ~session_id ~classes) (* We're done. Unregister and finish *) let event_wait printer rpc session_id params = - let classname=List.assoc "class" params in - let filter_params = List.filter (fun (p,_) -> not (List.mem p ("class"::stdparams))) params in - - (* Each filter_params is a key value pair: - (key, value) if the user entered "key=value" - (key, "/=" value) if the user entered "key=/=value" - We now parse these into a slightly nicer form *) - - let filter_params = List.map - (fun (key, value) -> - if String.startswith "/=" value then begin - let key' = key in - let value' = String.sub value 2 (String.length value - 2) in - `NotEquals, key', value' - end else begin - `Equals, key, value - end) filter_params in - - (* This returns true if the record matches the cmd line constraints *) - let record_matches record = - let matches = List.map (fun (operator, p,v) -> - if not(List.mem_assoc p record) - then failwith (Printf.sprintf "key missing: %s" p); - let v' = List.assoc p record () in match operator with - | `NotEquals -> v <> v' - | `Equals -> v = v') filter_params in - alltrue matches - in - event_wait_gen rpc session_id classname record_matches + let classname=List.assoc "class" params in + let filter_params = List.filter (fun (p,_) -> not (List.mem p ("class"::stdparams))) params in + + (* Each filter_params is a key value pair: + (key, value) if the user entered "key=value" + (key, "/=" value) if the user entered "key=/=value" + We now parse these into a slightly nicer form *) + + let filter_params = List.map + (fun (key, value) -> + if String.startswith "/=" value then begin + let key' = key in + let value' = String.sub value 2 (String.length value - 2) in + `NotEquals, key', value' + end else begin + `Equals, key, value + end) filter_params in + + (* This returns true if the record matches the cmd line constraints *) + let record_matches record = + let matches = List.map (fun (operator, p,v) -> + if not(List.mem_assoc p record) + then failwith (Printf.sprintf "key missing: %s" p); + let v' = List.assoc p record () in match operator with + | `NotEquals -> v <> v' + | `Equals -> v = v') filter_params in + alltrue matches + in + event_wait_gen rpc session_id classname record_matches @@ -1814,787 +1814,787 @@ let event_wait printer rpc session_id params = (* Convenience functions *) let select_vms ?(include_control_vms = false) ?(include_template_vms = false) rpc session_id params ignore_params = - (* Make sure we don't select a template or control domain by mistake *) - let params = if not include_control_vms then ("is-control-domain", "false") :: params else params in - let params = if not include_template_vms then ("is-a-template" , "false") :: params else params in - - let do_filter params = - let vms = Client.VM.get_all_records_where rpc session_id "true" in - let all_recs = List.map (fun (vm,vm_r) -> let r = vm_record rpc session_id vm in r.setrefrec (vm,vm_r); r) vms in - (* Filter on everything on the cmd line except params=... *) - let filter_params = List.filter (fun (p,_) -> - let p' = - try - let i = String.index p ':' in - String.sub p 0 i - with Not_found -> p - in - not (List.mem p' (stdparams @ ignore_params)) - ) params in - (* Filter all the records *) - List.fold_left filter_records_on_fields all_recs filter_params - in - - (* try matching vm= first *) - if List.mem_assoc "vm" params - then - try [vm_record rpc session_id (Client.VM.get_by_uuid rpc session_id (List.assoc "vm" params))] - with _ -> do_filter (List.map (fun (k,v) -> if k="vm" then ("name-label",v) else (k,v)) params) - else - do_filter params + (* Make sure we don't select a template or control domain by mistake *) + let params = if not include_control_vms then ("is-control-domain", "false") :: params else params in + let params = if not include_template_vms then ("is-a-template" , "false") :: params else params in + + let do_filter params = + let vms = Client.VM.get_all_records_where rpc session_id "true" in + let all_recs = List.map (fun (vm,vm_r) -> let r = vm_record rpc session_id vm in r.setrefrec (vm,vm_r); r) vms in + (* Filter on everything on the cmd line except params=... *) + let filter_params = List.filter (fun (p,_) -> + let p' = + try + let i = String.index p ':' in + String.sub p 0 i + with Not_found -> p + in + not (List.mem p' (stdparams @ ignore_params)) + ) params in + (* Filter all the records *) + List.fold_left filter_records_on_fields all_recs filter_params + in + + (* try matching vm= first *) + if List.mem_assoc "vm" params + then + try [vm_record rpc session_id (Client.VM.get_by_uuid rpc session_id (List.assoc "vm" params))] + with _ -> do_filter (List.map (fun (k,v) -> if k="vm" then ("name-label",v) else (k,v)) params) + else + do_filter params let select_hosts rpc session_id params ignore_params = - (* try matching host= first *) - let do_filter params = - let hosts = Client.Host.get_all_records_where rpc session_id "true" in - let all_recs = List.map (fun (host,host_r) -> let r = host_record rpc session_id host in r.setrefrec (host,host_r); r) hosts in - - let filter_params = List.filter (fun (p,_) -> - let stem=List.hd (String.split ':' p) in not (List.mem stem (stdparams @ ignore_params))) params in - (* Filter all the records *) - List.fold_left filter_records_on_fields all_recs filter_params - in - - if List.mem_assoc "host" params - then - try [host_record rpc session_id (Client.Host.get_by_uuid rpc session_id (List.assoc "host" params))] - with _ -> do_filter (List.map (fun (k,v) -> if k="host" then ("name-label",v) else (k,v)) params) - else - do_filter params + (* try matching host= first *) + let do_filter params = + let hosts = Client.Host.get_all_records_where rpc session_id "true" in + let all_recs = List.map (fun (host,host_r) -> let r = host_record rpc session_id host in r.setrefrec (host,host_r); r) hosts in + + let filter_params = List.filter (fun (p,_) -> + let stem=List.hd (String.split ':' p) in not (List.mem stem (stdparams @ ignore_params))) params in + (* Filter all the records *) + List.fold_left filter_records_on_fields all_recs filter_params + in + + if List.mem_assoc "host" params + then + try [host_record rpc session_id (Client.Host.get_by_uuid rpc session_id (List.assoc "host" params))] + with _ -> do_filter (List.map (fun (k,v) -> if k="host" then ("name-label",v) else (k,v)) params) + else + do_filter params let select_vm_geneva rpc session_id params = - if List.mem_assoc "vm-name" params then - begin - let vmname = List.assoc "vm-name" params in - let vms = Client.VM.get_all rpc session_id in - let vm = List.filter (fun vm -> Client.VM.get_name_label rpc session_id vm = vmname) vms in - if List.length vm = 0 then - failwith ("VM with name '"^vmname^"' not found") - else if List.length vm > 1 then - failwith ("Multiple VMs with name '"^vmname^"' found") - else - vm_record rpc session_id (List.hd vm) - end - else if List.mem_assoc "vm-id" params then - begin - let vmid = List.assoc "vm-id" params in - try - vm_record rpc session_id (Client.VM.get_by_uuid rpc session_id vmid) - with - e -> failwith ("Failed to find VM with id '"^vmid^"'") - end - else - (failwith ("Must select a VM using either vm-name or vm-id: params=" - ^(String.concat "," (List.map (fun (a,b) -> a^"="^b) params)))) + if List.mem_assoc "vm-name" params then + begin + let vmname = List.assoc "vm-name" params in + let vms = Client.VM.get_all rpc session_id in + let vm = List.filter (fun vm -> Client.VM.get_name_label rpc session_id vm = vmname) vms in + if List.length vm = 0 then + failwith ("VM with name '"^vmname^"' not found") + else if List.length vm > 1 then + failwith ("Multiple VMs with name '"^vmname^"' found") + else + vm_record rpc session_id (List.hd vm) + end + else if List.mem_assoc "vm-id" params then + begin + let vmid = List.assoc "vm-id" params in + try + vm_record rpc session_id (Client.VM.get_by_uuid rpc session_id vmid) + with + e -> failwith ("Failed to find VM with id '"^vmid^"'") + end + else + (failwith ("Must select a VM using either vm-name or vm-id: params=" + ^(String.concat "," (List.map (fun (a,b) -> a^"="^b) params)))) let select_srs rpc session_id params ignore_params = - let do_filter params = - let srs = Client.SR.get_all_records_where rpc session_id "true" in - let all_recs = List.map (fun (sr,sr_r) -> let r = sr_record rpc session_id sr in r.setrefrec (sr,sr_r); r) srs in - - let filter_params = List.filter (fun (p,_) -> - let stem=List.hd (String.split ':' p) in not (List.mem stem (stdparams @ ignore_params))) params in - (* Filter all the records *) - List.fold_left filter_records_on_fields all_recs filter_params - in - (* try matching sr= first *) - if List.mem_assoc "sr" params - then - try [sr_record rpc session_id (Client.SR.get_by_uuid rpc session_id (List.assoc "sr" params))] - with _ -> do_filter (List.map (fun (k,v) -> if k="sr" then ("name-label",v) else (k,v)) params) - else - do_filter params + let do_filter params = + let srs = Client.SR.get_all_records_where rpc session_id "true" in + let all_recs = List.map (fun (sr,sr_r) -> let r = sr_record rpc session_id sr in r.setrefrec (sr,sr_r); r) srs in + + let filter_params = List.filter (fun (p,_) -> + let stem=List.hd (String.split ':' p) in not (List.mem stem (stdparams @ ignore_params))) params in + (* Filter all the records *) + List.fold_left filter_records_on_fields all_recs filter_params + in + (* try matching sr= first *) + if List.mem_assoc "sr" params + then + try [sr_record rpc session_id (Client.SR.get_by_uuid rpc session_id (List.assoc "sr" params))] + with _ -> do_filter (List.map (fun (k,v) -> if k="sr" then ("name-label",v) else (k,v)) params) + else + do_filter params exception Multiple_failure of (string * string) list let format_message msg = - Printf.sprintf "Message: time=%s priority=%Ld name='%s'" (Date.to_string msg.API.message_timestamp) - (msg.API.message_priority) (msg.API.message_name) + Printf.sprintf "Message: time=%s priority=%Ld name='%s'" (Date.to_string msg.API.message_timestamp) + (msg.API.message_priority) (msg.API.message_name) let wrap_op printer pri rpc session_id op e = - let now = (Unix.gettimeofday ()) in - let result = op e in - let msgs = try Client.Message.get ~rpc ~session_id ~cls:`VM ~obj_uuid:(safe_get_field (field_lookup e.fields "uuid")) ~since:(Date.of_float now) with _ -> [] in - List.iter (fun (ref,msg) -> - if msg.API.message_priority < pri - then printer (Cli_printer.PStderr (format_message msg ^ "\n"))) msgs; - result + let now = (Unix.gettimeofday ()) in + let result = op e in + let msgs = try Client.Message.get ~rpc ~session_id ~cls:`VM ~obj_uuid:(safe_get_field (field_lookup e.fields "uuid")) ~since:(Date.of_float now) with _ -> [] in + List.iter (fun (ref,msg) -> + if msg.API.message_priority < pri + then printer (Cli_printer.PStderr (format_message msg ^ "\n"))) msgs; + result let do_multiple op set = - let fails = ref [] in - let append_fail e msg = - let uuid = safe_get_field (field_lookup e.fields "uuid") in - fails := (uuid, msg) :: !fails - in - (* do every operations and record every failure *) - let ret = List.map (fun e -> - try - Some (op e); - with - | Api_errors.Server_error(code, params) as exn -> ( - match Cli_util.get_server_error code params with - | None -> append_fail e (ExnHelper.string_of_exn exn) - | Some (msg, ps) -> append_fail e (msg ^ "\n" ^ (String.concat "\n" ps)) - ); None - | exn -> append_fail e (ExnHelper.string_of_exn exn); None - ) set in - - let success = List.fold_left (fun acc e -> match e with None -> acc | Some x -> x :: acc) [] ret in - if !fails <> [] then raise (Multiple_failure (!fails)); - success + let fails = ref [] in + let append_fail e msg = + let uuid = safe_get_field (field_lookup e.fields "uuid") in + fails := (uuid, msg) :: !fails + in + (* do every operations and record every failure *) + let ret = List.map (fun e -> + try + Some (op e); + with + | Api_errors.Server_error(code, params) as exn -> ( + match Cli_util.get_server_error code params with + | None -> append_fail e (ExnHelper.string_of_exn exn) + | Some (msg, ps) -> append_fail e (msg ^ "\n" ^ (String.concat "\n" ps)) + ); None + | exn -> append_fail e (ExnHelper.string_of_exn exn); None + ) set in + + let success = List.fold_left (fun acc e -> match e with None -> acc | Some x -> x :: acc) [] ret in + if !fails <> [] then raise (Multiple_failure (!fails)); + success let do_vm_op ?(include_control_vms = false) ?(include_template_vms = false) - printer rpc session_id op params ?(multiple=true) ignore_params = - let msg_prio = try Int64.of_string (List.assoc "message-priority" params) with _ -> 5L in - let op = wrap_op printer msg_prio rpc session_id op in - try - let vms = select_vms ~include_control_vms ~include_template_vms rpc session_id params ignore_params in - match List.length vms with - | 0 -> failwith "No matching VMs found" - | 1 -> [ op (List.hd vms) ] - | _ -> - if multiple && get_bool_param params "multiple" then - do_multiple op vms - else - failwith - (if not multiple - then "Multiple matches VMs found. Operation can only be performed on one VM at a time" - else "Multiple matches VMs found. --multiple required to complete the operation") - with - | Records.CLI_failed_to_find_param name -> - failwith ("Parameter '"^name^"' is not a field of the VM class. Failed to select VM for operation.") + printer rpc session_id op params ?(multiple=true) ignore_params = + let msg_prio = try Int64.of_string (List.assoc "message-priority" params) with _ -> 5L in + let op = wrap_op printer msg_prio rpc session_id op in + try + let vms = select_vms ~include_control_vms ~include_template_vms rpc session_id params ignore_params in + match List.length vms with + | 0 -> failwith "No matching VMs found" + | 1 -> [ op (List.hd vms) ] + | _ -> + if multiple && get_bool_param params "multiple" then + do_multiple op vms + else + failwith + (if not multiple + then "Multiple matches VMs found. Operation can only be performed on one VM at a time" + else "Multiple matches VMs found. --multiple required to complete the operation") + with + | Records.CLI_failed_to_find_param name -> + failwith ("Parameter '"^name^"' is not a field of the VM class. Failed to select VM for operation.") let do_host_op rpc session_id op params ?(multiple=true) ignore_params = - let hosts = select_hosts rpc session_id params ignore_params in - match List.length hosts with - | 0 -> failwith "No matching hosts found" - | 1 -> [ op 1 (List.hd hosts) ] - | _ -> - if multiple && get_bool_param params "multiple" then - do_multiple (op (List.length hosts)) hosts - else - failwith - (if not multiple - then "Multiple matching hosts found. Operation can only be performed on one host at a time" - else "Multiple matching hosts found. --multiple required to complete the operation") + let hosts = select_hosts rpc session_id params ignore_params in + match List.length hosts with + | 0 -> failwith "No matching hosts found" + | 1 -> [ op 1 (List.hd hosts) ] + | _ -> + if multiple && get_bool_param params "multiple" then + do_multiple (op (List.length hosts)) hosts + else + failwith + (if not multiple + then "Multiple matching hosts found. Operation can only be performed on one host at a time" + else "Multiple matching hosts found. --multiple required to complete the operation") let do_sr_op rpc session_id op params ?(multiple=true) ignore_params = - let srs = select_srs rpc session_id params ignore_params in - match List.length srs with - | 0 -> failwith "No matching hosts found" - | 1 -> [ op (List.hd srs) ] - | _ -> - if multiple && get_bool_param params "multiple" then - do_multiple op srs - else - failwith - (if not multiple - then "Multiple matching SRs found. Operation can only be performed on one SR at a time" - else "Multiple matching SRs found. --multiple required to complete the operation") + let srs = select_srs rpc session_id params ignore_params in + match List.length srs with + | 0 -> failwith "No matching hosts found" + | 1 -> [ op (List.hd srs) ] + | _ -> + if multiple && get_bool_param params "multiple" then + do_multiple op srs + else + failwith + (if not multiple + then "Multiple matching SRs found. Operation can only be performed on one SR at a time" + else "Multiple matching SRs found. --multiple required to complete the operation") (* Execute f; if we get a no_hosts_available error then print a vm diagnostic table and reraise exception *) let hook_no_hosts_available printer rpc session_id vm f = - try - f () - with - (Api_errors.Server_error(code,params) as e) -> - if code=Api_errors.no_hosts_available then - begin - printer (Cli_printer.PList ["There are no suitable hosts to start this VM on."; - "The following table provides per-host reasons for why the VM could not be started:";""]); - print_vm_host_report printer rpc session_id vm; - end; - raise e + try + f () + with + (Api_errors.Server_error(code,params) as e) -> + if code=Api_errors.no_hosts_available then + begin + printer (Cli_printer.PList ["There are no suitable hosts to start this VM on."; + "The following table provides per-host reasons for why the VM could not be started:";""]); + print_vm_host_report printer rpc session_id vm; + end; + raise e let vm_compute_memory_overhead printer rpc session_id params = - ignore - (do_vm_op ~include_control_vms:true printer rpc session_id - (fun vm -> - let memory_overhead = Client.VM.compute_memory_overhead - rpc session_id (vm.getref ()) in - printer (Cli_printer.PMsg (Int64.to_string memory_overhead)) - ) - params [] - ) + ignore + (do_vm_op ~include_control_vms:true printer rpc session_id + (fun vm -> + let memory_overhead = Client.VM.compute_memory_overhead + rpc session_id (vm.getref ()) in + printer (Cli_printer.PMsg (Int64.to_string memory_overhead)) + ) + params [] + ) let vm_memory_dynamic_range_set printer rpc session_id params = - let min = Record_util.bytes_of_string "min" (List.assoc "min" params) - and max = Record_util.bytes_of_string "max" (List.assoc "max" params) in - ignore - (do_vm_op ~include_control_vms:true ~include_template_vms:true - printer rpc session_id - (fun vm -> - Client.VM.set_memory_dynamic_range rpc session_id - (vm.getref ()) min max) - params ["min"; "max"]) + let min = Record_util.bytes_of_string "min" (List.assoc "min" params) + and max = Record_util.bytes_of_string "max" (List.assoc "max" params) in + ignore + (do_vm_op ~include_control_vms:true ~include_template_vms:true + printer rpc session_id + (fun vm -> + Client.VM.set_memory_dynamic_range rpc session_id + (vm.getref ()) min max) + params ["min"; "max"]) let vm_memory_static_range_set printer rpc session_id params = - let min = Record_util.bytes_of_string "min" (List.assoc "min" params) - and max = Record_util.bytes_of_string "max" (List.assoc "max" params) in - ignore - (do_vm_op ~include_control_vms:true ~include_template_vms:true - printer rpc session_id - (fun vm -> - Client.VM.set_memory_static_range rpc session_id - (vm.getref ()) min max) - params ["min"; "max"]) + let min = Record_util.bytes_of_string "min" (List.assoc "min" params) + and max = Record_util.bytes_of_string "max" (List.assoc "max" params) in + ignore + (do_vm_op ~include_control_vms:true ~include_template_vms:true + printer rpc session_id + (fun vm -> + Client.VM.set_memory_static_range rpc session_id + (vm.getref ()) min max) + params ["min"; "max"]) let vm_memory_limits_set printer rpc session_id params = - let extract key = - Record_util.bytes_of_string key (List.assoc key params) in - let static_min = extract "static-min" - and static_max = extract "static-max" - and dynamic_min = extract "dynamic-min" - and dynamic_max = extract "dynamic-max" in - ignore - (do_vm_op ~include_control_vms:true ~include_template_vms:true - printer rpc session_id - (fun vm -> - Client.VM.set_memory_limits rpc session_id (vm.getref ()) - static_min static_max dynamic_min dynamic_max) - params ["static-min"; "static-max"; "dynamic-min"; "dynamic-max"]) + let extract key = + Record_util.bytes_of_string key (List.assoc key params) in + let static_min = extract "static-min" + and static_max = extract "static-max" + and dynamic_min = extract "dynamic-min" + and dynamic_max = extract "dynamic-max" in + ignore + (do_vm_op ~include_control_vms:true ~include_template_vms:true + printer rpc session_id + (fun vm -> + Client.VM.set_memory_limits rpc session_id (vm.getref ()) + static_min static_max dynamic_min dynamic_max) + params ["static-min"; "static-max"; "dynamic-min"; "dynamic-max"]) let vm_memory_set printer rpc session_id params = - let value = Record_util.bytes_of_string "memory" (List.assoc "memory" params) in - ignore - (do_vm_op ~include_control_vms:true ~include_template_vms:true - printer rpc session_id - (fun vm -> - Client.VM.set_memory rpc session_id (vm.getref ()) value) - params ["memory"]) + let value = Record_util.bytes_of_string "memory" (List.assoc "memory" params) in + ignore + (do_vm_op ~include_control_vms:true ~include_template_vms:true + printer rpc session_id + (fun vm -> + Client.VM.set_memory rpc session_id (vm.getref ()) value) + params ["memory"]) let vm_memory_target_set printer rpc session_id params = - let target = Record_util.bytes_of_string "target" - (List.assoc "target" params) in - ignore (do_vm_op ~include_control_vms:true printer rpc session_id - (fun vm -> - Client.VM.set_memory_dynamic_range rpc session_id - (vm.getref ()) target target) params ["target"] - ) + let target = Record_util.bytes_of_string "target" + (List.assoc "target" params) in + ignore (do_vm_op ~include_control_vms:true printer rpc session_id + (fun vm -> + Client.VM.set_memory_dynamic_range rpc session_id + (vm.getref ()) target target) params ["target"] + ) let vm_memory_target_wait printer rpc session_id params = - ignore (do_vm_op ~include_control_vms:true printer rpc session_id - (fun vm -> - let vm=vm.getref () in - Client.VM.wait_memory_target_live rpc session_id vm) params []) + ignore (do_vm_op ~include_control_vms:true printer rpc session_id + (fun vm -> + let vm=vm.getref () in + Client.VM.wait_memory_target_live rpc session_id vm) params []) let vm_call_plugin fd printer rpc session_id params = - let vm_uuid = List.assoc "vm-uuid" params in - let vm = Client.VM.get_by_uuid rpc session_id vm_uuid in - let plugin = List.assoc "plugin" params in - let fn = List.assoc "fn" params in - let args = read_map_params "args" params in - (* Syntax interpretation: args:key:file=filename equals args:key=filename_content *) - let convert ((k,v) as p) = - match String.split ~limit:2 ':' k with - | key :: "file" :: [] -> - begin - match get_client_file fd v with - | Some s -> (key, s) - | None -> - marshal fd (Command (PrintStderr (Printf.sprintf "Failed to read file %s\n" v))); - raise (ExitWithError 1) - end - | _ -> p in - let args = List.map convert args in - let result = Client.VM.call_plugin rpc session_id vm plugin fn args in - printer (Cli_printer.PList [ result ]) + let vm_uuid = List.assoc "vm-uuid" params in + let vm = Client.VM.get_by_uuid rpc session_id vm_uuid in + let plugin = List.assoc "plugin" params in + let fn = List.assoc "fn" params in + let args = read_map_params "args" params in + (* Syntax interpretation: args:key:file=filename equals args:key=filename_content *) + let convert ((k,v) as p) = + match String.split ~limit:2 ':' k with + | key :: "file" :: [] -> + begin + match get_client_file fd v with + | Some s -> (key, s) + | None -> + marshal fd (Command (PrintStderr (Printf.sprintf "Failed to read file %s\n" v))); + raise (ExitWithError 1) + end + | _ -> p in + let args = List.map convert args in + let result = Client.VM.call_plugin rpc session_id vm plugin fn args in + printer (Cli_printer.PList [ result ]) let data_source_to_kvs ds = - ["name_label",ds.API.data_source_name_label; - "name_description",ds.API.data_source_name_description; - "enabled",string_of_bool ds.API.data_source_enabled; - "standard",string_of_bool ds.API.data_source_standard; - "min",string_of_float ds.API.data_source_min; - "max",string_of_float ds.API.data_source_max; - "units",ds.API.data_source_units; - ] + ["name_label",ds.API.data_source_name_label; + "name_description",ds.API.data_source_name_description; + "enabled",string_of_bool ds.API.data_source_enabled; + "standard",string_of_bool ds.API.data_source_standard; + "min",string_of_float ds.API.data_source_min; + "max",string_of_float ds.API.data_source_max; + "units",ds.API.data_source_units; + ] let vm_data_source_list printer rpc session_id params = - ignore(do_vm_op ~include_control_vms:true printer rpc session_id ~multiple:false - (fun vm -> - let vm=vm.getref () in - let dss =Client.VM.get_data_sources rpc session_id vm in - let output = List.map data_source_to_kvs dss in - printer (Cli_printer.PTable output)) params []) + ignore(do_vm_op ~include_control_vms:true printer rpc session_id ~multiple:false + (fun vm -> + let vm=vm.getref () in + let dss =Client.VM.get_data_sources rpc session_id vm in + let output = List.map data_source_to_kvs dss in + printer (Cli_printer.PTable output)) params []) let vm_data_source_record printer rpc session_id params = - ignore(do_vm_op ~include_control_vms:true printer rpc session_id ~multiple:false - (fun vm -> - let vm=vm.getref () in - let ds=List.assoc "data-source" params in - Client.VM.record_data_source rpc session_id vm ds) params ["data-source"]) + ignore(do_vm_op ~include_control_vms:true printer rpc session_id ~multiple:false + (fun vm -> + let vm=vm.getref () in + let ds=List.assoc "data-source" params in + Client.VM.record_data_source rpc session_id vm ds) params ["data-source"]) let vm_data_source_query printer rpc session_id params = - ignore(do_vm_op ~include_control_vms:true printer rpc session_id ~multiple:false - (fun vm -> - let vm=vm.getref () in - let ds=List.assoc "data-source" params in - let value = Client.VM.query_data_source rpc session_id vm ds in - printer (Cli_printer.PList [Printf.sprintf "%f" value])) params ["data-source"]) + ignore(do_vm_op ~include_control_vms:true printer rpc session_id ~multiple:false + (fun vm -> + let vm=vm.getref () in + let ds=List.assoc "data-source" params in + let value = Client.VM.query_data_source rpc session_id vm ds in + printer (Cli_printer.PList [Printf.sprintf "%f" value])) params ["data-source"]) let vm_data_source_forget printer rpc session_id params = - ignore(do_vm_op ~include_control_vms:true printer rpc session_id ~multiple:false - (fun vm -> - let vm=vm.getref () in - let ds=List.assoc "data-source" params in - Client.VM.forget_data_source_archives rpc session_id vm ds) params ["data-source"]) + ignore(do_vm_op ~include_control_vms:true printer rpc session_id ~multiple:false + (fun vm -> + let vm=vm.getref () in + let ds=List.assoc "data-source" params in + Client.VM.forget_data_source_archives rpc session_id vm ds) params ["data-source"]) (* APIs to collect SR level RRDs *) let sr_data_source_list printer rpc session_id params = - ignore(do_sr_op rpc session_id ~multiple:false - (fun sr -> - let sr=sr.getref () in - let dss = Client.SR.get_data_sources rpc session_id sr in - let output = List.map data_source_to_kvs dss in - printer (Cli_printer.PTable output)) params []) + ignore(do_sr_op rpc session_id ~multiple:false + (fun sr -> + let sr=sr.getref () in + let dss = Client.SR.get_data_sources rpc session_id sr in + let output = List.map data_source_to_kvs dss in + printer (Cli_printer.PTable output)) params []) let sr_data_source_record printer rpc session_id params = - ignore(do_sr_op rpc session_id ~multiple:false - (fun sr -> - let sr=sr.getref () in - let ds=List.assoc "data-source" params in - Client.SR.record_data_source rpc session_id sr ds) params ["data-source"]) + ignore(do_sr_op rpc session_id ~multiple:false + (fun sr -> + let sr=sr.getref () in + let ds=List.assoc "data-source" params in + Client.SR.record_data_source rpc session_id sr ds) params ["data-source"]) let sr_data_source_query printer rpc session_id params = - ignore(do_sr_op rpc session_id ~multiple:false - (fun sr -> - let sr=sr.getref () in - let ds=List.assoc "data-source" params in - let value = Client.SR.query_data_source rpc session_id sr ds in - printer (Cli_printer.PList [Printf.sprintf "%f" value])) params ["data-source"]) + ignore(do_sr_op rpc session_id ~multiple:false + (fun sr -> + let sr=sr.getref () in + let ds=List.assoc "data-source" params in + let value = Client.SR.query_data_source rpc session_id sr ds in + printer (Cli_printer.PList [Printf.sprintf "%f" value])) params ["data-source"]) let sr_data_source_forget printer rpc session_id params = - ignore(do_sr_op rpc session_id ~multiple:false - (fun sr -> - let sr=sr.getref () in - let ds=List.assoc "data-source" params in - Client.SR.forget_data_source_archives rpc session_id sr ds) params ["data-source"]) + ignore(do_sr_op rpc session_id ~multiple:false + (fun sr -> + let sr=sr.getref () in + let ds=List.assoc "data-source" params in + Client.SR.forget_data_source_archives rpc session_id sr ds) params ["data-source"]) let host_data_source_list printer rpc session_id params = - ignore(do_host_op rpc session_id ~multiple:false - (fun _ host -> - let host=host.getref () in - let dss =Client.Host.get_data_sources rpc session_id host in - let output = List.map data_source_to_kvs dss in - printer (Cli_printer.PTable output)) params []) + ignore(do_host_op rpc session_id ~multiple:false + (fun _ host -> + let host=host.getref () in + let dss =Client.Host.get_data_sources rpc session_id host in + let output = List.map data_source_to_kvs dss in + printer (Cli_printer.PTable output)) params []) let host_data_source_record printer rpc session_id params = - ignore(do_host_op rpc session_id ~multiple:false - (fun _ host -> - let host=host.getref () in - let ds=List.assoc "data-source" params in - Client.Host.record_data_source rpc session_id host ds) params ["data-source"]) + ignore(do_host_op rpc session_id ~multiple:false + (fun _ host -> + let host=host.getref () in + let ds=List.assoc "data-source" params in + Client.Host.record_data_source rpc session_id host ds) params ["data-source"]) let host_data_source_query printer rpc session_id params = - ignore(do_host_op rpc session_id ~multiple:false - (fun _ host -> - let host=host.getref () in - let ds=List.assoc "data-source" params in - let value = Client.Host.query_data_source rpc session_id host ds in - printer (Cli_printer.PList [Printf.sprintf "%f" value])) params ["data-source"]) + ignore(do_host_op rpc session_id ~multiple:false + (fun _ host -> + let host=host.getref () in + let ds=List.assoc "data-source" params in + let value = Client.Host.query_data_source rpc session_id host ds in + printer (Cli_printer.PList [Printf.sprintf "%f" value])) params ["data-source"]) let host_data_source_forget printer rpc session_id params = - ignore(do_host_op rpc session_id ~multiple:false - (fun _ host -> - let host=host.getref () in - let ds=List.assoc "data-source" params in - Client.Host.forget_data_source_archives rpc session_id host ds) params ["data-source"]) + ignore(do_host_op rpc session_id ~multiple:false + (fun _ host -> + let host=host.getref () in + let ds=List.assoc "data-source" params in + Client.Host.forget_data_source_archives rpc session_id host ds) params ["data-source"]) let host_compute_free_memory printer rpc session_id params = - ignore (do_host_op rpc session_id ~multiple:false ( - fun _ host -> - let host = host.getref () in - let free_memory = Client.Host.compute_free_memory rpc session_id host in - printer (Cli_printer.PMsg (Int64.to_string free_memory)) - ) params []) + ignore (do_host_op rpc session_id ~multiple:false ( + fun _ host -> + let host = host.getref () in + let free_memory = Client.Host.compute_free_memory rpc session_id host in + printer (Cli_printer.PMsg (Int64.to_string free_memory)) + ) params []) let host_compute_memory_overhead printer rpc session_id params = - ignore - (do_host_op rpc session_id ~multiple:false - (fun _ host -> - let host = host.getref () in - let memory_overhead = Client.Host.compute_memory_overhead - rpc session_id host in - printer (Cli_printer.PMsg (Int64.to_string memory_overhead)) - ) - params [] - ) + ignore + (do_host_op rpc session_id ~multiple:false + (fun _ host -> + let host = host.getref () in + let memory_overhead = Client.Host.compute_memory_overhead + rpc session_id host in + printer (Cli_printer.PMsg (Int64.to_string memory_overhead)) + ) + params [] + ) let host_get_server_certificate printer rpc session_id params = - ignore (do_host_op rpc session_id ~multiple:false - (fun _ host -> - let host = host.getref () in - printer - (Cli_printer.PMsg - (Client.Host.get_server_certificate rpc session_id host))) - params []) + ignore (do_host_op rpc session_id ~multiple:false + (fun _ host -> + let host = host.getref () in + printer + (Cli_printer.PMsg + (Client.Host.get_server_certificate rpc session_id host))) + params []) let host_get_sm_diagnostics printer rpc session_id params = - ignore (do_host_op rpc session_id ~multiple:false - (fun _ host -> - let host = host.getref () in - printer - (Cli_printer.PMsg - (Client.Host.get_sm_diagnostics rpc session_id host))) - params []) + ignore (do_host_op rpc session_id ~multiple:false + (fun _ host -> + let host = host.getref () in + printer + (Cli_printer.PMsg + (Client.Host.get_sm_diagnostics rpc session_id host))) + params []) let host_get_thread_diagnostics printer rpc session_id params = - ignore (do_host_op rpc session_id ~multiple:false - (fun _ host -> - let host = host.getref () in - printer - (Cli_printer.PMsg - (Client.Host.get_thread_diagnostics rpc session_id host))) - params []) + ignore (do_host_op rpc session_id ~multiple:false + (fun _ host -> + let host = host.getref () in + printer + (Cli_printer.PMsg + (Client.Host.get_thread_diagnostics rpc session_id host))) + params []) let host_sm_dp_destroy printer rpc session_id params = - let dp = List.assoc "dp" params in - let allow_leak = get_bool_param params "allow-leak" in - ignore (do_host_op rpc session_id ~multiple:false - (fun _ host -> - let host = host.getref () in - Client.Host.sm_dp_destroy rpc session_id host dp allow_leak) - params ["dp"; "allow-leak"]) + let dp = List.assoc "dp" params in + let allow_leak = get_bool_param params "allow-leak" in + ignore (do_host_op rpc session_id ~multiple:false + (fun _ host -> + let host = host.getref () in + Client.Host.sm_dp_destroy rpc session_id host dp allow_leak) + params ["dp"; "allow-leak"]) let vm_memory_shadow_multiplier_set printer rpc session_id params = - let multiplier = (try float_of_string (List.assoc "multiplier" params) with _ -> failwith "Failed to parse parameter 'multiplier': expecting a float") in - let (_: unit list) = do_vm_op printer rpc session_id - (fun vm -> - let vm = vm.getref () in - Client.VM.set_shadow_multiplier_live rpc session_id vm multiplier) params ["multiplier"] in - () + let multiplier = (try float_of_string (List.assoc "multiplier" params) with _ -> failwith "Failed to parse parameter 'multiplier': expecting a float") in + let (_: unit list) = do_vm_op printer rpc session_id + (fun vm -> + let vm = vm.getref () in + Client.VM.set_shadow_multiplier_live rpc session_id vm multiplier) params ["multiplier"] in + () let vm_query_services printer rpc session_id params = - ignore(do_vm_op printer rpc session_id - (fun vm -> - let vm=vm.getref () in - let record = Client.VM.query_services rpc session_id vm in - printer (Cli_printer.PTable [ ("Type", "Name") :: record ]) - ) params []) + ignore(do_vm_op printer rpc session_id + (fun vm -> + let vm=vm.getref () in + let record = Client.VM.query_services rpc session_id vm in + printer (Cli_printer.PTable [ ("Type", "Name") :: record ]) + ) params []) let vm_start printer rpc session_id params = - let force = get_bool_param params "force" in - let paused = get_bool_param params "paused" in - ignore(do_vm_op ~include_control_vms:true printer rpc session_id - (fun vm -> - let vm=vm.getref () in - let task = - if List.mem_assoc "on" params - then - let host = get_host_by_name_or_id rpc session_id (List.assoc "on" params) in - Client.Async.VM.start_on rpc session_id vm (host.getref()) paused force - else - Client.Async.VM.start rpc session_id vm paused force in - hook_no_hosts_available printer rpc session_id vm - (fun () -> - waiter printer rpc session_id params task - ) - ) params ["on"; "paused"; "progress"]) + let force = get_bool_param params "force" in + let paused = get_bool_param params "paused" in + ignore(do_vm_op ~include_control_vms:true printer rpc session_id + (fun vm -> + let vm=vm.getref () in + let task = + if List.mem_assoc "on" params + then + let host = get_host_by_name_or_id rpc session_id (List.assoc "on" params) in + Client.Async.VM.start_on rpc session_id vm (host.getref()) paused force + else + Client.Async.VM.start rpc session_id vm paused force in + hook_no_hosts_available printer rpc session_id vm + (fun () -> + waiter printer rpc session_id params task + ) + ) params ["on"; "paused"; "progress"]) let vm_suspend printer rpc session_id params = - ignore(do_vm_op printer rpc session_id (fun vm -> - let task = Client.Async.VM.suspend rpc session_id (vm.getref ()) in - waiter printer rpc session_id params task - ) params ["progress"]) + ignore(do_vm_op printer rpc session_id (fun vm -> + let task = Client.Async.VM.suspend rpc session_id (vm.getref ()) in + waiter printer rpc session_id params task + ) params ["progress"]) let vm_resume printer rpc session_id params = - let force = get_bool_param params "force" in - ignore(do_vm_op printer rpc session_id - (fun vm -> - if List.mem_assoc "on" params then - let host = get_host_by_name_or_id rpc session_id (List.assoc "on" params) in - let task = Client.Async.VM.resume_on rpc session_id (vm.getref()) (host.getref()) false force in - waiter printer rpc session_id params task - else - let vm=vm.getref() in - hook_no_hosts_available printer rpc session_id vm - (fun ()-> - let task = Client.Async.VM.resume rpc session_id vm false force in - waiter printer rpc session_id params task - ) - ) params ["on"; "progress"]) + let force = get_bool_param params "force" in + ignore(do_vm_op printer rpc session_id + (fun vm -> + if List.mem_assoc "on" params then + let host = get_host_by_name_or_id rpc session_id (List.assoc "on" params) in + let task = Client.Async.VM.resume_on rpc session_id (vm.getref()) (host.getref()) false force in + waiter printer rpc session_id params task + else + let vm=vm.getref() in + hook_no_hosts_available printer rpc session_id vm + (fun ()-> + let task = Client.Async.VM.resume rpc session_id vm false force in + waiter printer rpc session_id params task + ) + ) params ["on"; "progress"]) let vm_pause printer rpc session_id params = - ignore(do_vm_op printer rpc session_id (fun vm -> Client.VM.pause rpc session_id (vm.getref ())) params []) + ignore(do_vm_op printer rpc session_id (fun vm -> Client.VM.pause rpc session_id (vm.getref ())) params []) let vm_unpause printer rpc session_id params = - ignore(do_vm_op printer rpc session_id (fun vm -> Client.VM.unpause rpc session_id (vm.getref ())) params []) + ignore(do_vm_op printer rpc session_id (fun vm -> Client.VM.unpause rpc session_id (vm.getref ())) params []) (* A helper function for VM install *) let is_recommended recommendations_xml fieldname = - let rec seek_recommendation i = - if Xmlm.eoi i then false - else match Xmlm.input i with - | `El_start ((ns, tag), attrs) - when tag = "restriction" && (List.mem ((ns, "field"), fieldname) attrs) - -> List.mem ((ns, "value"), "true") attrs - | _ -> seek_recommendation i - in - let i = Xmlm.make_input (`String (0, recommendations_xml)) in - try - seek_recommendation i - with Xmlm.Error ((line, col), err) -> - debug "Invalid VM.recommendations xml at line %d, column %d: %s" line col (Xmlm.error_message err); - false + let rec seek_recommendation i = + if Xmlm.eoi i then false + else match Xmlm.input i with + | `El_start ((ns, tag), attrs) + when tag = "restriction" && (List.mem ((ns, "field"), fieldname) attrs) + -> List.mem ((ns, "value"), "true") attrs + | _ -> seek_recommendation i + in + let i = Xmlm.make_input (`String (0, recommendations_xml)) in + try + seek_recommendation i + with Xmlm.Error ((line, col), err) -> + debug "Invalid VM.recommendations xml at line %d, column %d: %s" line col (Xmlm.error_message err); + false let vm_install_real printer rpc session_id template name description params = - let sr_ref = - if Client.VM.get_is_a_snapshot rpc session_id template then - if false - || (List.mem_assoc "sr-name-label" params - || List.mem_assoc "sr-uuid" params) then - failwith "Do not use the sr-name-label or sr-uuid argument when installing from a snapshot. By default, it will install each new disk on the same SR as the corresponding snapshot disks." - else Some Ref.null - else None in - - let sr_ref = match sr_ref with - | Some _ -> sr_ref - | None -> - if List.mem_assoc "sr-uuid" params then - let uuid = List.assoc "sr-uuid" params in - Some (Client.SR.get_by_uuid rpc session_id uuid) - else None in - - let sr_ref = - if List.mem_assoc "sr-name-label" params then - let name = List.assoc "sr-name-label" params in - match Client.SR.get_by_name_label rpc session_id name with - | [] -> failwith "No SR with that name-label found" - | sr_list -> match sr_ref with - | Some sr -> - if List.mem sr sr_list then sr_ref - else failwith "SR specified via sr-uuid doesn't have the name specified via sr-name-label" - | None -> - if List.length sr_list > 1 then - failwith "Multiple SRs with that name-label found" - else Some (List.hd sr_list) - else sr_ref in - - let suspend_sr_ref = match sr_ref with - | Some sr -> - let ref_is_valid = Server_helpers.exec_with_new_task - ~session_id "Checking suspend_SR validity" - (fun __context -> Db.is_valid_ref __context sr) - in - if ref_is_valid then - (* sr-uuid and/or sr-name-label was specified - use this as the suspend_SR *) - sr - else - (* Template is a snapshot - copy the suspend_SR from the template *) - Client.VM.get_suspend_SR rpc session_id template - | None -> - (* Not a snapshot and no sr-uuid or sr-name-label specified - copy the suspend_SR from the template *) - Client.VM.get_suspend_SR rpc session_id template in - - (* It's fine that we still don't have a SR information till this step, we'll do - a VM.clone instead of VM.copy. However we need to figure out sr_uuid for - provisioning disks if any. *) - let sr_uuid = match sr_ref with - | Some r when r <> Ref.null -> Client.SR.get_uuid rpc session_id r - | _ -> - match get_default_sr_uuid rpc session_id with - | Some uuid -> uuid - | None -> - match Xapi_templates.get_template_record rpc session_id template - with - | None | Some {Xapi_templates.disks = []} -> Ref.string_of Ref.null - | _ -> failwith "Failed to find a valid default SR for the \ -Pool. Please provide an sr-name-label or sr-uuid parameter." in - - let new_vm = - match sr_ref with - | Some r when r <> Ref.null -> - Client.VM.copy rpc session_id template name r - | _ -> - Client.VM.clone rpc session_id template name in - - try - Client.VM.set_name_description rpc session_id new_vm description; - Client.VM.set_suspend_SR rpc session_id new_vm suspend_sr_ref; - rewrite_provisioning_xml rpc session_id new_vm sr_uuid; - let recommendations = Client.VM.get_recommendations rpc session_id template in - let licerr = Api_errors.Server_error(Api_errors.license_restriction, [Features.name_of_feature Features.PCI_device_for_auto_update]) in - let pool = List.hd (Client.Pool.get_all rpc session_id) in - let policy_vendor_device_is_ok = not (Client.Pool.get_policy_no_vendor_device rpc session_id pool) in - let want_dev = (is_recommended recommendations "has-vendor-device") && policy_vendor_device_is_ok in - ( - try Client.VM.set_has_vendor_device rpc session_id new_vm want_dev - with e when e = licerr -> - let msg = Printf.sprintf "Note: the VM template recommends setting has-vendor-device=true (to provide the option of obtaining PV drivers through Windows Update), but a suitable licence has not been deployed for this host. Ignoring this recommendation and continuing with installation of VM %S..." - (Client.VM.get_name_label rpc session_id new_vm) in - warn "%s" msg; - Cli_printer.PStderr (msg^"\n") |> printer - ); - Client.VM.provision rpc session_id new_vm; - (* Client.VM.start rpc session_id new_vm false true; *) (* stop install starting VMs *) - - (* copy BIOS strings if needed *) - if List.mem_assoc "copy-bios-strings-from" params then begin - let host = Client.Host.get_by_uuid rpc session_id (List.assoc "copy-bios-strings-from" params) in - Client.VM.copy_bios_strings rpc session_id new_vm host - end; - let vm_uuid = Client.VM.get_uuid rpc session_id new_vm in - printer (Cli_printer.PList [vm_uuid]) - with e -> - (try Client.VM.destroy rpc session_id new_vm with _ -> ()); - raise e + let sr_ref = + if Client.VM.get_is_a_snapshot rpc session_id template then + if false + || (List.mem_assoc "sr-name-label" params + || List.mem_assoc "sr-uuid" params) then + failwith "Do not use the sr-name-label or sr-uuid argument when installing from a snapshot. By default, it will install each new disk on the same SR as the corresponding snapshot disks." + else Some Ref.null + else None in + + let sr_ref = match sr_ref with + | Some _ -> sr_ref + | None -> + if List.mem_assoc "sr-uuid" params then + let uuid = List.assoc "sr-uuid" params in + Some (Client.SR.get_by_uuid rpc session_id uuid) + else None in + + let sr_ref = + if List.mem_assoc "sr-name-label" params then + let name = List.assoc "sr-name-label" params in + match Client.SR.get_by_name_label rpc session_id name with + | [] -> failwith "No SR with that name-label found" + | sr_list -> match sr_ref with + | Some sr -> + if List.mem sr sr_list then sr_ref + else failwith "SR specified via sr-uuid doesn't have the name specified via sr-name-label" + | None -> + if List.length sr_list > 1 then + failwith "Multiple SRs with that name-label found" + else Some (List.hd sr_list) + else sr_ref in + + let suspend_sr_ref = match sr_ref with + | Some sr -> + let ref_is_valid = Server_helpers.exec_with_new_task + ~session_id "Checking suspend_SR validity" + (fun __context -> Db.is_valid_ref __context sr) + in + if ref_is_valid then + (* sr-uuid and/or sr-name-label was specified - use this as the suspend_SR *) + sr + else + (* Template is a snapshot - copy the suspend_SR from the template *) + Client.VM.get_suspend_SR rpc session_id template + | None -> + (* Not a snapshot and no sr-uuid or sr-name-label specified - copy the suspend_SR from the template *) + Client.VM.get_suspend_SR rpc session_id template in + + (* It's fine that we still don't have a SR information till this step, we'll do + a VM.clone instead of VM.copy. However we need to figure out sr_uuid for + provisioning disks if any. *) + let sr_uuid = match sr_ref with + | Some r when r <> Ref.null -> Client.SR.get_uuid rpc session_id r + | _ -> + match get_default_sr_uuid rpc session_id with + | Some uuid -> uuid + | None -> + match Xapi_templates.get_template_record rpc session_id template + with + | None | Some {Xapi_templates.disks = []} -> Ref.string_of Ref.null + | _ -> failwith "Failed to find a valid default SR for the \ + Pool. Please provide an sr-name-label or sr-uuid parameter." in + + let new_vm = + match sr_ref with + | Some r when r <> Ref.null -> + Client.VM.copy rpc session_id template name r + | _ -> + Client.VM.clone rpc session_id template name in + + try + Client.VM.set_name_description rpc session_id new_vm description; + Client.VM.set_suspend_SR rpc session_id new_vm suspend_sr_ref; + rewrite_provisioning_xml rpc session_id new_vm sr_uuid; + let recommendations = Client.VM.get_recommendations rpc session_id template in + let licerr = Api_errors.Server_error(Api_errors.license_restriction, [Features.name_of_feature Features.PCI_device_for_auto_update]) in + let pool = List.hd (Client.Pool.get_all rpc session_id) in + let policy_vendor_device_is_ok = not (Client.Pool.get_policy_no_vendor_device rpc session_id pool) in + let want_dev = (is_recommended recommendations "has-vendor-device") && policy_vendor_device_is_ok in + ( + try Client.VM.set_has_vendor_device rpc session_id new_vm want_dev + with e when e = licerr -> + let msg = Printf.sprintf "Note: the VM template recommends setting has-vendor-device=true (to provide the option of obtaining PV drivers through Windows Update), but a suitable licence has not been deployed for this host. Ignoring this recommendation and continuing with installation of VM %S..." + (Client.VM.get_name_label rpc session_id new_vm) in + warn "%s" msg; + Cli_printer.PStderr (msg^"\n") |> printer + ); + Client.VM.provision rpc session_id new_vm; + (* Client.VM.start rpc session_id new_vm false true; *) (* stop install starting VMs *) + + (* copy BIOS strings if needed *) + if List.mem_assoc "copy-bios-strings-from" params then begin + let host = Client.Host.get_by_uuid rpc session_id (List.assoc "copy-bios-strings-from" params) in + Client.VM.copy_bios_strings rpc session_id new_vm host + end; + let vm_uuid = Client.VM.get_uuid rpc session_id new_vm in + printer (Cli_printer.PList [vm_uuid]) + with e -> + (try Client.VM.destroy rpc session_id new_vm with _ -> ()); + raise e (* The process of finding the VM in this case is special-cased since we want to call the * params 'template-name', like a foreign key, sort of *) let vm_install printer rpc session_id params = - (* Filter on everything on the cmd line except params=... *) - let template = - if List.mem_assoc "template-uuid" params - then - try - Client.VM.get_by_uuid rpc session_id (List.assoc "template-uuid" params) - with _ -> failwith "Cannot find template" - else - begin - let filter_params = [("is-a-template", "true"); ("is-control-domain", "false")] in - let vms = Client.VM.get_all_records_where rpc session_id "true" in - let all_recs = List.map (fun (vm,vm_r) -> let r = vm_record rpc session_id vm in r.setrefrec (vm,vm_r); r) vms in - let find_by_name name = - let templates = List.fold_left filter_records_on_fields all_recs (("name-label",name)::filter_params) in - match List.length templates with - 0 -> failwith "No templates matched" - | 1 -> (List.hd templates).getref () - | _ -> failwith "More than one matching template found" - in - - if (List.mem_assoc "template-name-label" params) || (List.mem_assoc "template-name" params) - then - let template_name = - if List.mem_assoc "template-name-label" params - then (List.assoc "template-name-label" params) - else (List.assoc "template-name" params) in - find_by_name template_name - else if List.mem_assoc "template" params - then - try - Client.VM.get_by_uuid rpc session_id (List.assoc "template" params) - with _ -> - find_by_name (List.assoc "template" params) - else - failwith "Template must be specified by parameter 'template-uuid', 'template-name', 'template-name-label' or 'template'" - end - in - - if not (Client.VM.get_is_a_template rpc session_id template) then failwith "Can only install from templates"; - let new_name = List.assoc "new-name-label" params in - let new_description = "Installed via xe CLI" in (* Client.VM.get_name_description rpc session_id template in *) - vm_install_real printer rpc session_id template new_name new_description params + (* Filter on everything on the cmd line except params=... *) + let template = + if List.mem_assoc "template-uuid" params + then + try + Client.VM.get_by_uuid rpc session_id (List.assoc "template-uuid" params) + with _ -> failwith "Cannot find template" + else + begin + let filter_params = [("is-a-template", "true"); ("is-control-domain", "false")] in + let vms = Client.VM.get_all_records_where rpc session_id "true" in + let all_recs = List.map (fun (vm,vm_r) -> let r = vm_record rpc session_id vm in r.setrefrec (vm,vm_r); r) vms in + let find_by_name name = + let templates = List.fold_left filter_records_on_fields all_recs (("name-label",name)::filter_params) in + match List.length templates with + 0 -> failwith "No templates matched" + | 1 -> (List.hd templates).getref () + | _ -> failwith "More than one matching template found" + in + + if (List.mem_assoc "template-name-label" params) || (List.mem_assoc "template-name" params) + then + let template_name = + if List.mem_assoc "template-name-label" params + then (List.assoc "template-name-label" params) + else (List.assoc "template-name" params) in + find_by_name template_name + else if List.mem_assoc "template" params + then + try + Client.VM.get_by_uuid rpc session_id (List.assoc "template" params) + with _ -> + find_by_name (List.assoc "template" params) + else + failwith "Template must be specified by parameter 'template-uuid', 'template-name', 'template-name-label' or 'template'" + end + in + + if not (Client.VM.get_is_a_template rpc session_id template) then failwith "Can only install from templates"; + let new_name = List.assoc "new-name-label" params in + let new_description = "Installed via xe CLI" in (* Client.VM.get_name_description rpc session_id template in *) + vm_install_real printer rpc session_id template new_name new_description params let console fd printer rpc session_id params = - let c = match select_vms ~include_control_vms:true rpc session_id params [] with - | [ vm_r ] -> - let vm = vm_r.getref () in - let cs = Client.VM.get_consoles rpc session_id vm in - begin - try - List.find (fun c -> Client.Console.get_protocol rpc session_id c = `vt100) cs - with Not_found -> - marshal fd (Command (PrintStderr "No text console available\n")); - raise (ExitWithError 1) - end - | [] -> - marshal fd (Command (PrintStderr "No VM found\n")); - raise (ExitWithError 1) - | _ :: _ -> - marshal fd (Command (PrintStderr "Multiple VMs found: please narrow your request to one VM.\n")); - raise (ExitWithError 1) - in - let vm = Client.Console.get_VM rpc session_id c in - let vm_name_label = Client.VM.get_name_label rpc session_id vm in - marshal fd (Command (Print (Printf.sprintf "Connecting to console on VM %s. Press Ctrl + ']' to quit." vm_name_label))); - let l = Client.Console.get_location rpc session_id c in - let uri = Printf.sprintf "%s&session_id=%s" l (Ref.string_of session_id) in - marshal fd (Command (HttpConnect uri)); - let response = ref (Response Wait) in - while !response = Response Wait do response := unmarshal fd done; - match !response with - | Response OK -> () - | _ -> - failwith "Failure" + let c = match select_vms ~include_control_vms:true rpc session_id params [] with + | [ vm_r ] -> + let vm = vm_r.getref () in + let cs = Client.VM.get_consoles rpc session_id vm in + begin + try + List.find (fun c -> Client.Console.get_protocol rpc session_id c = `vt100) cs + with Not_found -> + marshal fd (Command (PrintStderr "No text console available\n")); + raise (ExitWithError 1) + end + | [] -> + marshal fd (Command (PrintStderr "No VM found\n")); + raise (ExitWithError 1) + | _ :: _ -> + marshal fd (Command (PrintStderr "Multiple VMs found: please narrow your request to one VM.\n")); + raise (ExitWithError 1) + in + let vm = Client.Console.get_VM rpc session_id c in + let vm_name_label = Client.VM.get_name_label rpc session_id vm in + marshal fd (Command (Print (Printf.sprintf "Connecting to console on VM %s. Press Ctrl + ']' to quit." vm_name_label))); + let l = Client.Console.get_location rpc session_id c in + let uri = Printf.sprintf "%s&session_id=%s" l (Ref.string_of session_id) in + marshal fd (Command (HttpConnect uri)); + let response = ref (Response Wait) in + while !response = Response Wait do response := unmarshal fd done; + match !response with + | Response OK -> () + | _ -> + failwith "Failure" let vm_uninstall_common fd printer rpc session_id params vms = - let toremove = ref [] in - let toprint = ref [] in - (* Destroy the disks too *) - let choose_objects_to_delete vm = - let vbds=Client.VM.get_VBDs rpc session_id vm in - - let string_of_vdi vdi = - (* add extra text if the VDI is being shared *) - let r = Client.VDI.get_record rpc session_id vdi in - Printf.sprintf "VDI: %s (%s) %s" r.API.vDI_uuid r.API.vDI_name_label - (if List.length r.API.vDI_VBDs <= 1 then "" else " ** WARNING: disk is shared by other VMs") in - let string_of_vm vm = - let r = Client.VM.get_record rpc session_id vm in - Printf.sprintf "VM : %s (%s)" r.API.vM_uuid r.API.vM_name_label in - - (* NB If a VDI is deleted then the VBD may be GCed at any time. *) - let vdis = List.concat (List.map - (fun vbd -> - try - (* We only destroy VDIs where VBD.other_config contains 'owner' *) - let other_config = Client.VBD.get_other_config rpc session_id vbd in - let vdi = Client.VBD.get_VDI rpc session_id vbd in - (* Double-check the VDI actually exists *) - ignore(Client.VDI.get_uuid rpc session_id vdi); - if List.mem_assoc Xapi_globs.owner_key other_config - then [ vdi ] else [ ] - with _ -> []) vbds) in - let suspend_VDI = - try - let vdi = Client.VM.get_suspend_VDI rpc session_id vm in - ignore (Client.VDI.get_uuid rpc session_id vdi); - vdi - with _ -> Ref.null in - let output = string_of_vm vm :: (List.map string_of_vdi vdis) @ (if suspend_VDI = Ref.null then [] else [string_of_vdi suspend_VDI]) in - toprint := !toprint @ output; - let destroy () = - if Client.VM.get_power_state rpc session_id vm <> `Halted then Client.VM.hard_shutdown rpc session_id vm; - Client.VM.destroy rpc session_id vm; - List.iter (fun vdi -> Client.VDI.destroy rpc session_id vdi) vdis; - if suspend_VDI <> Ref.null then try Client.VDI.destroy rpc session_id suspend_VDI with _ -> () - in - toremove := !toremove @ [destroy]; - in - List.iter choose_objects_to_delete vms; - marshal fd (Command (Print "The following items are about to be destroyed")); - List.iter (fun s -> marshal fd (Command (Print s))) !toprint; - if get_bool_param params "force" then - (List.iter (fun f -> f ()) !toremove; marshal fd (Command (Print "All objects destroyed"))) - else - begin - if user_says_yes fd - then (List.iter (fun f -> f ()) !toremove; marshal fd (Command (Print "All objects destroyed"))) - end + let toremove = ref [] in + let toprint = ref [] in + (* Destroy the disks too *) + let choose_objects_to_delete vm = + let vbds=Client.VM.get_VBDs rpc session_id vm in + + let string_of_vdi vdi = + (* add extra text if the VDI is being shared *) + let r = Client.VDI.get_record rpc session_id vdi in + Printf.sprintf "VDI: %s (%s) %s" r.API.vDI_uuid r.API.vDI_name_label + (if List.length r.API.vDI_VBDs <= 1 then "" else " ** WARNING: disk is shared by other VMs") in + let string_of_vm vm = + let r = Client.VM.get_record rpc session_id vm in + Printf.sprintf "VM : %s (%s)" r.API.vM_uuid r.API.vM_name_label in + + (* NB If a VDI is deleted then the VBD may be GCed at any time. *) + let vdis = List.concat (List.map + (fun vbd -> + try + (* We only destroy VDIs where VBD.other_config contains 'owner' *) + let other_config = Client.VBD.get_other_config rpc session_id vbd in + let vdi = Client.VBD.get_VDI rpc session_id vbd in + (* Double-check the VDI actually exists *) + ignore(Client.VDI.get_uuid rpc session_id vdi); + if List.mem_assoc Xapi_globs.owner_key other_config + then [ vdi ] else [ ] + with _ -> []) vbds) in + let suspend_VDI = + try + let vdi = Client.VM.get_suspend_VDI rpc session_id vm in + ignore (Client.VDI.get_uuid rpc session_id vdi); + vdi + with _ -> Ref.null in + let output = string_of_vm vm :: (List.map string_of_vdi vdis) @ (if suspend_VDI = Ref.null then [] else [string_of_vdi suspend_VDI]) in + toprint := !toprint @ output; + let destroy () = + if Client.VM.get_power_state rpc session_id vm <> `Halted then Client.VM.hard_shutdown rpc session_id vm; + Client.VM.destroy rpc session_id vm; + List.iter (fun vdi -> Client.VDI.destroy rpc session_id vdi) vdis; + if suspend_VDI <> Ref.null then try Client.VDI.destroy rpc session_id suspend_VDI with _ -> () + in + toremove := !toremove @ [destroy]; + in + List.iter choose_objects_to_delete vms; + marshal fd (Command (Print "The following items are about to be destroyed")); + List.iter (fun s -> marshal fd (Command (Print s))) !toprint; + if get_bool_param params "force" then + (List.iter (fun f -> f ()) !toremove; marshal fd (Command (Print "All objects destroyed"))) + else + begin + if user_says_yes fd + then (List.iter (fun f -> f ()) !toremove; marshal fd (Command (Print "All objects destroyed"))) + end let vm_uninstall fd printer rpc session_id params = - let vms = do_vm_op printer rpc session_id (fun vm -> vm.getref()) params [] in - let snapshots = List.flatten (List.map (fun vm -> Client.VM.get_snapshots rpc session_id vm) vms) in - vm_uninstall_common fd printer rpc session_id params (vms @ snapshots) + let vms = do_vm_op printer rpc session_id (fun vm -> vm.getref()) params [] in + let snapshots = List.flatten (List.map (fun vm -> Client.VM.get_snapshots rpc session_id vm) vms) in + vm_uninstall_common fd printer rpc session_id params (vms @ snapshots) let template_uninstall fd printer rpc session_id params = - let uuid = List.assoc "template-uuid" params in - let vm = Client.VM.get_by_uuid rpc session_id uuid in - vm_uninstall_common fd printer rpc session_id params [ vm ] + let uuid = List.assoc "template-uuid" params in + let vm = Client.VM.get_by_uuid rpc session_id uuid in + vm_uninstall_common fd printer rpc session_id params [ vm ] let vm_clone_aux clone_op cloned_string printer include_template_vms rpc session_id params = - let new_name = List.assoc "new-name-label" params in - let desc = try Some (List.assoc "new-name-description" params) with _ -> None in - let new_vms = do_vm_op printer ~include_template_vms rpc session_id - (fun vm -> clone_op ~rpc ~session_id ~vm: (vm.getref()) ~new_name) params ["new-name-label"; "new-name-description"] in - ignore (may (fun desc -> Client.VM.set_name_description rpc session_id (List.hd new_vms) desc) desc); - printer (Cli_printer.PList (List.map (fun vm -> Client.VM.get_uuid rpc session_id vm) new_vms)) + let new_name = List.assoc "new-name-label" params in + let desc = try Some (List.assoc "new-name-description" params) with _ -> None in + let new_vms = do_vm_op printer ~include_template_vms rpc session_id + (fun vm -> clone_op ~rpc ~session_id ~vm: (vm.getref()) ~new_name) params ["new-name-label"; "new-name-description"] in + ignore (may (fun desc -> Client.VM.set_name_description rpc session_id (List.hd new_vms) desc) desc); + printer (Cli_printer.PList (List.map (fun vm -> Client.VM.get_uuid rpc session_id vm) new_vms)) let vm_clone printer = vm_clone_aux Client.VM.clone "Cloned " printer true let vm_snapshot printer = vm_clone_aux Client.VM.snapshot "Snapshotted " printer false @@ -2602,1385 +2602,1385 @@ let vm_snapshot_with_quiesce printer = vm_clone_aux Client.VM.snapshot_with_quie let vm_checkpoint printer = vm_clone_aux Client.VM.checkpoint "Checkpointed " printer false let get_snapshot_uuid params = - if List.mem_assoc "snapshot-uuid" params - then List.assoc "snapshot-uuid" params - else if List.mem_assoc "uuid" params - then List.assoc "uuid" params - else raise (failwith "Required parameter not found: snapshot-uuid or uuid.") + if List.mem_assoc "snapshot-uuid" params + then List.assoc "snapshot-uuid" params + else if List.mem_assoc "uuid" params + then List.assoc "uuid" params + else raise (failwith "Required parameter not found: snapshot-uuid or uuid.") let snapshot_revert printer rpc session_id params = - let snap_uuid = get_snapshot_uuid params in - let snap_ref = Client.VM.get_by_uuid rpc session_id snap_uuid in - Client.VM.revert ~rpc ~session_id ~snapshot:snap_ref + let snap_uuid = get_snapshot_uuid params in + let snap_ref = Client.VM.get_by_uuid rpc session_id snap_uuid in + Client.VM.revert ~rpc ~session_id ~snapshot:snap_ref let snapshot_op op printer rpc session_id params = - let new_name = List.assoc "new-name-label" params in - let desc = if List.mem_assoc "new-name-description" params then Some (List.assoc "new-name-description" params) else None in - let uuid = get_snapshot_uuid params in - let ref = Client.VM.get_by_uuid ~rpc ~session_id ~uuid in - let new_ref = op ~rpc ~session_id ~vm:ref ~new_name in - ignore (may (fun desc -> Client.VM.set_name_description rpc session_id new_ref desc) desc); - let new_uuid = Client.VM.get_uuid ~rpc ~session_id ~self:new_ref in - printer (Cli_printer.PList [new_uuid]) + let new_name = List.assoc "new-name-label" params in + let desc = if List.mem_assoc "new-name-description" params then Some (List.assoc "new-name-description" params) else None in + let uuid = get_snapshot_uuid params in + let ref = Client.VM.get_by_uuid ~rpc ~session_id ~uuid in + let new_ref = op ~rpc ~session_id ~vm:ref ~new_name in + ignore (may (fun desc -> Client.VM.set_name_description rpc session_id new_ref desc) desc); + let new_uuid = Client.VM.get_uuid ~rpc ~session_id ~self:new_ref in + printer (Cli_printer.PList [new_uuid]) let snapshot_clone printer = snapshot_op Client.VM.clone printer let snapshot_copy printer rpc session_id params = - let sr = if List.mem_assoc "sr-uuid" params then Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) else Ref.null in - let op = Client.VM.copy ~sr:sr in - snapshot_op op printer rpc session_id params + let sr = if List.mem_assoc "sr-uuid" params then Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) else Ref.null in + let op = Client.VM.copy ~sr:sr in + snapshot_op op printer rpc session_id params let snapshot_destroy printer rpc session_id params = - let snap_uuid = get_snapshot_uuid params in - let snap_ref = Client.VM.get_by_uuid rpc session_id snap_uuid in - if Client.VM.get_power_state rpc session_id snap_ref <> `Halted then Client.VM.hard_shutdown ~rpc ~session_id ~vm:snap_ref; - Client.VM.destroy ~rpc ~session_id ~self:snap_ref + let snap_uuid = get_snapshot_uuid params in + let snap_ref = Client.VM.get_by_uuid rpc session_id snap_uuid in + if Client.VM.get_power_state rpc session_id snap_ref <> `Halted then Client.VM.hard_shutdown ~rpc ~session_id ~vm:snap_ref; + Client.VM.destroy ~rpc ~session_id ~self:snap_ref let snapshot_uninstall fd printer rpc session_id params = - let snap_uuid = get_snapshot_uuid params in - let snap_ref = Client.VM.get_by_uuid rpc session_id snap_uuid in - vm_uninstall_common fd printer rpc session_id params [snap_ref] + let snap_uuid = get_snapshot_uuid params in + let snap_ref = Client.VM.get_by_uuid rpc session_id snap_uuid in + vm_uninstall_common fd printer rpc session_id params [snap_ref] let vm_copy printer rpc session_id params = - let new_name = List.assoc "new-name-label" params in - let desc = try Some (List.assoc "new-name-description" params) with _ -> None in - let sr = if List.mem_assoc "sr-uuid" params then Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) else Ref.null in - let new_vms = do_vm_op printer ~multiple:false ~include_template_vms:true rpc session_id (fun vm -> Client.VM.copy rpc session_id (vm.getref()) new_name sr) params ["new-name-label"; "sr-uuid"; "new-name-description"] in - ignore (may (fun desc -> Client.VM.set_name_description rpc session_id (List.hd new_vms) desc) desc); - printer (Cli_printer.PList (List.map (fun vm -> Client.VM.get_uuid rpc session_id vm) new_vms)) + let new_name = List.assoc "new-name-label" params in + let desc = try Some (List.assoc "new-name-description" params) with _ -> None in + let sr = if List.mem_assoc "sr-uuid" params then Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) else Ref.null in + let new_vms = do_vm_op printer ~multiple:false ~include_template_vms:true rpc session_id (fun vm -> Client.VM.copy rpc session_id (vm.getref()) new_name sr) params ["new-name-label"; "sr-uuid"; "new-name-description"] in + ignore (may (fun desc -> Client.VM.set_name_description rpc session_id (List.hd new_vms) desc) desc); + printer (Cli_printer.PList (List.map (fun vm -> Client.VM.get_uuid rpc session_id vm) new_vms)) let vm_reset_powerstate printer rpc session_id params = - if not (List.mem_assoc "force" params) then - failwith "This operation is extremely dangerous and may cause data loss. This operation must be forced (use --force)."; - ignore (do_vm_op printer rpc session_id (fun vm -> Client.VM.power_state_reset rpc session_id (vm.getref())) params []) + if not (List.mem_assoc "force" params) then + failwith "This operation is extremely dangerous and may cause data loss. This operation must be forced (use --force)."; + ignore (do_vm_op printer rpc session_id (fun vm -> Client.VM.power_state_reset rpc session_id (vm.getref())) params []) let snapshot_reset_powerstate printer rpc session_id params = - if not (List.mem_assoc "force" params) then - failwith "This operation is extremely dangerous and may cause data loss. This operation must be forced (use --force)."; - let snapshot_uuid = get_snapshot_uuid params in - let snapshot = Client.VM.get_by_uuid rpc session_id snapshot_uuid in - Client.VM.power_state_reset rpc session_id snapshot + if not (List.mem_assoc "force" params) then + failwith "This operation is extremely dangerous and may cause data loss. This operation must be forced (use --force)."; + let snapshot_uuid = get_snapshot_uuid params in + let snapshot = Client.VM.get_by_uuid rpc session_id snapshot_uuid in + Client.VM.power_state_reset rpc session_id snapshot let vm_shutdown printer rpc session_id params = - let force = get_bool_param params "force" in - ignore(if force - then do_vm_op printer rpc session_id (fun vm -> Client.Async.VM.hard_shutdown rpc session_id (vm.getref()) |> waiter printer rpc session_id params) params ["progress"] - else do_vm_op printer rpc session_id (fun vm -> Client.Async.VM.clean_shutdown rpc session_id (vm.getref()) |> waiter printer rpc session_id params) params ["progress"]) + let force = get_bool_param params "force" in + ignore(if force + then do_vm_op printer rpc session_id (fun vm -> Client.Async.VM.hard_shutdown rpc session_id (vm.getref()) |> waiter printer rpc session_id params) params ["progress"] + else do_vm_op printer rpc session_id (fun vm -> Client.Async.VM.clean_shutdown rpc session_id (vm.getref()) |> waiter printer rpc session_id params) params ["progress"]) let vm_reboot printer rpc session_id params = - let force = get_bool_param params "force" in - ignore(if force - then do_vm_op printer rpc session_id (fun vm -> Client.VM.hard_reboot rpc session_id (vm.getref())) params [] - else do_vm_op printer rpc session_id (fun vm -> Client.VM.clean_reboot rpc session_id (vm.getref())) params []) + let force = get_bool_param params "force" in + ignore(if force + then do_vm_op printer rpc session_id (fun vm -> Client.VM.hard_reboot rpc session_id (vm.getref())) params [] + else do_vm_op printer rpc session_id (fun vm -> Client.VM.clean_reboot rpc session_id (vm.getref())) params []) let vm_compute_maximum_memory printer rpc session_id params = - let total = Record_util.bytes_of_string "total" (List.assoc "total" params) in - let approximate = get_bool_param params "approximate" in - ignore(do_vm_op printer rpc session_id - (fun vm -> - let max = Client.VM.maximise_memory rpc session_id (vm.getref()) total approximate in - printer (Cli_printer.PList [Printf.sprintf "%Ld" max])) - params [ "total"; "approximate" ]) + let total = Record_util.bytes_of_string "total" (List.assoc "total" params) in + let approximate = get_bool_param params "approximate" in + ignore(do_vm_op printer rpc session_id + (fun vm -> + let max = Client.VM.maximise_memory rpc session_id (vm.getref()) total approximate in + printer (Cli_printer.PList [Printf.sprintf "%Ld" max])) + params [ "total"; "approximate" ]) let vm_retrieve_wlb_recommendations printer rpc session_id params = - let table vm = - List.map (fun (host,recom) -> ((Client.Host.get_name_label rpc session_id host) ^ "(" ^ (Client.Host.get_uuid rpc session_id host) ^ ")", String.concat " " recom)) - (Client.VM.retrieve_wlb_recommendations rpc session_id (vm.getref())) - in - try - let vms = select_vms rpc session_id params [] in - match List.length vms with - | 0 -> failwith "No matching VMs found" - | 1 -> printer (Cli_printer.PTable [("Host(Uuid)", "Stars, RecID, ZeroScoreReason") :: table (List.hd vms)]) - | _ -> failwith "Multiple VMs found. Operation can only be performed on one VM at a time" - with - | Records.CLI_failed_to_find_param name -> - failwith ("Parameter '"^name^"' is not a field of the VM class. Failed to select VM for operation.") + let table vm = + List.map (fun (host,recom) -> ((Client.Host.get_name_label rpc session_id host) ^ "(" ^ (Client.Host.get_uuid rpc session_id host) ^ ")", String.concat " " recom)) + (Client.VM.retrieve_wlb_recommendations rpc session_id (vm.getref())) + in + try + let vms = select_vms rpc session_id params [] in + match List.length vms with + | 0 -> failwith "No matching VMs found" + | 1 -> printer (Cli_printer.PTable [("Host(Uuid)", "Stars, RecID, ZeroScoreReason") :: table (List.hd vms)]) + | _ -> failwith "Multiple VMs found. Operation can only be performed on one VM at a time" + with + | Records.CLI_failed_to_find_param name -> + failwith ("Parameter '"^name^"' is not a field of the VM class. Failed to select VM for operation.") let vm_migrate_sxm_params = ["remote-master"; "remote-username"; "vif"; "remote-password"; - "remote-network"; "vdi"] + "remote-network"; "vdi"] let vm_migrate printer rpc session_id params = - (* Hack to match host-uuid and host-name for backwards compatibility *) - let params = List.map (fun (k, v) -> if (k = "host-uuid") || (k = "host-name") then ("host", v) else (k, v)) params in - let options = List.map_assoc_with_key (string_of_bool +++ bool_of_string) (List.restrict_with_default "false" ["force"; "live"; "copy"] params) in - (* If we specify all of: remote-master, remote-username, remote-password - then we're using the new codepath *) - if List.mem_assoc "remote-master" params && (List.mem_assoc "remote-username" params) - && (List.mem_assoc "remote-password" params) then begin - printer (Cli_printer.PMsg "Performing a Storage XenMotion migration. Your VM's VDIs will be migrated with the VM."); - let ip = List.assoc "remote-master" params in - let remote_rpc xml = - let open Xmlrpc_client in - let http = xmlrpc ~version:"1.0" "/" in - XMLRPC_protocol.rpc ~srcstr:"cli" ~dststr:"dst_xapi" ~transport:(SSL(SSL.make ~use_fork_exec_helper:false (), ip, 443)) ~http xml in - let username = List.assoc "remote-username" params in - let password = List.assoc "remote-password" params in - let remote_session = Client.Session.login_with_password remote_rpc username password "1.3" Xapi_globs.xapi_user_agent in - finally - (fun () -> - let host, host_record = - let all = Client.Host.get_all_records remote_rpc remote_session in - if List.mem_assoc "host" params then begin - let x = List.assoc "host" params in - try - List.find (fun (_, h) -> h.API.host_hostname = x || h.API.host_name_label = x || h.API.host_uuid = x) all - with Not_found -> - failwith (Printf.sprintf "Failed to find host: %s" x) - end else begin - List.hd all - end in - let network, network_record = - let all = Client.Network.get_all_records remote_rpc remote_session in - if List.mem_assoc "remote-network" params then begin - let x = List.assoc "remote-network" params in - try - List.find (fun (_, net) -> net.API.network_bridge = x || net.API.network_name_label = x || net.API.network_uuid = x) all - with Not_found -> - failwith (Printf.sprintf "Failed to find network: %s" x) - end else begin - let pifs = host_record.API.host_PIFs in - let management_pifs = List.filter (fun pif -> - Client.PIF.get_management remote_rpc remote_session pif) pifs in - if List.length management_pifs = 0 then - failwith (Printf.sprintf "Could not find management PIF on host %s" (host_record.API.host_uuid)); - let pif = List.hd management_pifs in - let net = Client.PIF.get_network remote_rpc remote_session pif in - (net, Client.Network.get_record remote_rpc remote_session net) - end in - let vif_map = List.map (fun (vif_uuid,net_uuid) -> - let vif = Client.VIF.get_by_uuid rpc session_id vif_uuid in - let net = Client.Network.get_by_uuid remote_rpc remote_session net_uuid in - vif,net) (read_map_params "vif" params) in - - let vdi_map = List.map (fun (vdi_uuid,sr_uuid) -> - let vdi = Client.VDI.get_by_uuid rpc session_id vdi_uuid in - let sr = Client.SR.get_by_uuid remote_rpc remote_session sr_uuid in - vdi,sr) (read_map_params "vdi" params) in - - let default_sr = - try let pools = Client.Pool.get_all remote_rpc remote_session in - printer (Cli_printer.PMsg "Selecting remote pool's default SR for migrating VDIs") ; - Some (Client.Pool.get_default_SR remote_rpc remote_session (List.hd pools)) - with _ -> None in - - let vdi_map = match default_sr with - | None -> vdi_map - | Some default_sr -> - let vms = select_vms ~include_template_vms:true rpc session_id params - ( "host" :: "host-uuid" :: "host-name" :: "live" :: "force" :: "copy" - :: vm_migrate_sxm_params ) in - if vms = [] then failwith "No matching VMs found" ; - let vbds = Client.VM.get_VBDs rpc session_id ((List.hd vms).getref ()) in - let vbds = List.filter (fun vbd -> - not (Client.VBD.get_empty rpc session_id vbd)) vbds in - let vdis = List.map - (fun vbd -> Client.VBD.get_VDI rpc session_id vbd) vbds in - let overrides = List.map (fun vdi -> - if List.mem_assoc vdi vdi_map - then (vdi, List.assoc vdi vdi_map) - else (vdi, default_sr) - ) vdis in - let filtered_orig_list = List.filter (fun (vdi,_) -> - not (List.mem_assoc vdi overrides)) vdi_map in - overrides @ filtered_orig_list - in - - let params = List.filter (fun (s,_) -> if String.length s < 5 then true - else let start = String.sub s 0 4 in start <> "vif:" && start <> "vdi:") params in - printer (Cli_printer.PMsg (Printf.sprintf "Will migrate to remote host: %s, using remote network: %s. Here is the VDI mapping:" host_record.API.host_name_label network_record.API.network_name_label)); - List.iter (fun (vdi,sr) -> - printer (Cli_printer.PMsg (Printf.sprintf "VDI %s -> SR %s" - (Client.VDI.get_uuid rpc session_id vdi) - (Client.SR.get_uuid remote_rpc remote_session sr)))) - vdi_map ; - let token = Client.Host.migrate_receive remote_rpc remote_session host network options in - let new_vm = - do_vm_op ~include_control_vms:false ~include_template_vms:true printer rpc session_id (fun vm -> Client.VM.migrate_send rpc session_id (vm.getref ()) token true vdi_map vif_map options) - params (["host"; "host-uuid"; "host-name"; "live"; "force"; "copy"] @ vm_migrate_sxm_params) |> List.hd in - if get_bool_param params "copy" then - printer (Cli_printer.PList [Client.VM.get_uuid remote_rpc remote_session new_vm]) - ) - (fun () -> Client.Session.logout remote_rpc remote_session) - end else begin - if not (List.mem_assoc "host" params) then failwith "No destination host specified"; - let host = (get_host_by_name_or_id rpc session_id (List.assoc "host" params)).getref () in - - ignore(do_vm_op ~include_control_vms:true printer rpc session_id (fun vm -> Client.VM.pool_migrate rpc session_id (vm.getref ()) host options) - params ["host"; "host-uuid"; "host-name"; "live"]) - end + (* Hack to match host-uuid and host-name for backwards compatibility *) + let params = List.map (fun (k, v) -> if (k = "host-uuid") || (k = "host-name") then ("host", v) else (k, v)) params in + let options = List.map_assoc_with_key (string_of_bool +++ bool_of_string) (List.restrict_with_default "false" ["force"; "live"; "copy"] params) in + (* If we specify all of: remote-master, remote-username, remote-password + then we're using the new codepath *) + if List.mem_assoc "remote-master" params && (List.mem_assoc "remote-username" params) + && (List.mem_assoc "remote-password" params) then begin + printer (Cli_printer.PMsg "Performing a Storage XenMotion migration. Your VM's VDIs will be migrated with the VM."); + let ip = List.assoc "remote-master" params in + let remote_rpc xml = + let open Xmlrpc_client in + let http = xmlrpc ~version:"1.0" "/" in + XMLRPC_protocol.rpc ~srcstr:"cli" ~dststr:"dst_xapi" ~transport:(SSL(SSL.make ~use_fork_exec_helper:false (), ip, 443)) ~http xml in + let username = List.assoc "remote-username" params in + let password = List.assoc "remote-password" params in + let remote_session = Client.Session.login_with_password remote_rpc username password "1.3" Xapi_globs.xapi_user_agent in + finally + (fun () -> + let host, host_record = + let all = Client.Host.get_all_records remote_rpc remote_session in + if List.mem_assoc "host" params then begin + let x = List.assoc "host" params in + try + List.find (fun (_, h) -> h.API.host_hostname = x || h.API.host_name_label = x || h.API.host_uuid = x) all + with Not_found -> + failwith (Printf.sprintf "Failed to find host: %s" x) + end else begin + List.hd all + end in + let network, network_record = + let all = Client.Network.get_all_records remote_rpc remote_session in + if List.mem_assoc "remote-network" params then begin + let x = List.assoc "remote-network" params in + try + List.find (fun (_, net) -> net.API.network_bridge = x || net.API.network_name_label = x || net.API.network_uuid = x) all + with Not_found -> + failwith (Printf.sprintf "Failed to find network: %s" x) + end else begin + let pifs = host_record.API.host_PIFs in + let management_pifs = List.filter (fun pif -> + Client.PIF.get_management remote_rpc remote_session pif) pifs in + if List.length management_pifs = 0 then + failwith (Printf.sprintf "Could not find management PIF on host %s" (host_record.API.host_uuid)); + let pif = List.hd management_pifs in + let net = Client.PIF.get_network remote_rpc remote_session pif in + (net, Client.Network.get_record remote_rpc remote_session net) + end in + let vif_map = List.map (fun (vif_uuid,net_uuid) -> + let vif = Client.VIF.get_by_uuid rpc session_id vif_uuid in + let net = Client.Network.get_by_uuid remote_rpc remote_session net_uuid in + vif,net) (read_map_params "vif" params) in + + let vdi_map = List.map (fun (vdi_uuid,sr_uuid) -> + let vdi = Client.VDI.get_by_uuid rpc session_id vdi_uuid in + let sr = Client.SR.get_by_uuid remote_rpc remote_session sr_uuid in + vdi,sr) (read_map_params "vdi" params) in + + let default_sr = + try let pools = Client.Pool.get_all remote_rpc remote_session in + printer (Cli_printer.PMsg "Selecting remote pool's default SR for migrating VDIs") ; + Some (Client.Pool.get_default_SR remote_rpc remote_session (List.hd pools)) + with _ -> None in + + let vdi_map = match default_sr with + | None -> vdi_map + | Some default_sr -> + let vms = select_vms ~include_template_vms:true rpc session_id params + ( "host" :: "host-uuid" :: "host-name" :: "live" :: "force" :: "copy" + :: vm_migrate_sxm_params ) in + if vms = [] then failwith "No matching VMs found" ; + let vbds = Client.VM.get_VBDs rpc session_id ((List.hd vms).getref ()) in + let vbds = List.filter (fun vbd -> + not (Client.VBD.get_empty rpc session_id vbd)) vbds in + let vdis = List.map + (fun vbd -> Client.VBD.get_VDI rpc session_id vbd) vbds in + let overrides = List.map (fun vdi -> + if List.mem_assoc vdi vdi_map + then (vdi, List.assoc vdi vdi_map) + else (vdi, default_sr) + ) vdis in + let filtered_orig_list = List.filter (fun (vdi,_) -> + not (List.mem_assoc vdi overrides)) vdi_map in + overrides @ filtered_orig_list + in + + let params = List.filter (fun (s,_) -> if String.length s < 5 then true + else let start = String.sub s 0 4 in start <> "vif:" && start <> "vdi:") params in + printer (Cli_printer.PMsg (Printf.sprintf "Will migrate to remote host: %s, using remote network: %s. Here is the VDI mapping:" host_record.API.host_name_label network_record.API.network_name_label)); + List.iter (fun (vdi,sr) -> + printer (Cli_printer.PMsg (Printf.sprintf "VDI %s -> SR %s" + (Client.VDI.get_uuid rpc session_id vdi) + (Client.SR.get_uuid remote_rpc remote_session sr)))) + vdi_map ; + let token = Client.Host.migrate_receive remote_rpc remote_session host network options in + let new_vm = + do_vm_op ~include_control_vms:false ~include_template_vms:true printer rpc session_id (fun vm -> Client.VM.migrate_send rpc session_id (vm.getref ()) token true vdi_map vif_map options) + params (["host"; "host-uuid"; "host-name"; "live"; "force"; "copy"] @ vm_migrate_sxm_params) |> List.hd in + if get_bool_param params "copy" then + printer (Cli_printer.PList [Client.VM.get_uuid remote_rpc remote_session new_vm]) + ) + (fun () -> Client.Session.logout remote_rpc remote_session) + end else begin + if not (List.mem_assoc "host" params) then failwith "No destination host specified"; + let host = (get_host_by_name_or_id rpc session_id (List.assoc "host" params)).getref () in + + ignore(do_vm_op ~include_control_vms:true printer rpc session_id (fun vm -> Client.VM.pool_migrate rpc session_id (vm.getref ()) host options) + params ["host"; "host-uuid"; "host-name"; "live"]) + end let vm_disk_list_aux vm is_cd_list printer rpc session_id params = - let vbds = List.filter (fun vbd -> Client.VBD.get_type rpc session_id vbd = (if is_cd_list then `CD else `Disk)) (vm.record()).API.vM_VBDs in - let vbdrecords = List.map (fun vbd-> (vbd_record rpc session_id vbd)) vbds in - let vdirecords = List.map (fun vbd -> - if not (Client.VBD.get_empty rpc session_id vbd) then Some (vdi_record rpc session_id (Client.VBD.get_VDI rpc session_id vbd)) else None) vbds in - (* Hack - convert 'vbd-params' to 'params' *) - let params' = List.map (fun (a,b) -> if a="vbd-params" then ("params",b) else (a,b)) params in - let selectedvbd = select_fields params' vbdrecords - (if is_cd_list - then ["uuid"; "vm-name-label"; "userdevice"; "empty"] - else ["uuid"; "vm-name-label"; "userdevice"]) - in - let params' = List.map (fun (a,b) -> if a="vdi-params" then ("params",b) else (a,b)) params in - let rec doit vbds vdis n = - match (vbds,vdis) with - | ([],[]) -> () - | (vbd::vbds,vdi::vdis) -> - let disk = (if is_cd_list then "CD " else "Disk ")^string_of_int n in - printer (Cli_printer.PMsg (disk ^ " VBD:")); - printer (Cli_printer.PTable [(List.map print_field vbd)]); - (* Only print out the VDI if there is one - empty cds don't have one *) - begin - match vdi with - Some vdi -> - let selectedvdi = List.hd (select_fields params' [vdi] ["uuid"; "name-label"; "virtual-size"; "sr-name-label"]) in - printer (Cli_printer.PMsg (disk ^ " VDI:")); - printer (Cli_printer.PTable [(List.map print_field selectedvdi)]); - | None -> () - end; - doit vbds vdis (n+1) - | _ -> (failwith "Unexpected mismatch in list length in vm_disk_list") - in doit selectedvbd vdirecords 0 + let vbds = List.filter (fun vbd -> Client.VBD.get_type rpc session_id vbd = (if is_cd_list then `CD else `Disk)) (vm.record()).API.vM_VBDs in + let vbdrecords = List.map (fun vbd-> (vbd_record rpc session_id vbd)) vbds in + let vdirecords = List.map (fun vbd -> + if not (Client.VBD.get_empty rpc session_id vbd) then Some (vdi_record rpc session_id (Client.VBD.get_VDI rpc session_id vbd)) else None) vbds in + (* Hack - convert 'vbd-params' to 'params' *) + let params' = List.map (fun (a,b) -> if a="vbd-params" then ("params",b) else (a,b)) params in + let selectedvbd = select_fields params' vbdrecords + (if is_cd_list + then ["uuid"; "vm-name-label"; "userdevice"; "empty"] + else ["uuid"; "vm-name-label"; "userdevice"]) + in + let params' = List.map (fun (a,b) -> if a="vdi-params" then ("params",b) else (a,b)) params in + let rec doit vbds vdis n = + match (vbds,vdis) with + | ([],[]) -> () + | (vbd::vbds,vdi::vdis) -> + let disk = (if is_cd_list then "CD " else "Disk ")^string_of_int n in + printer (Cli_printer.PMsg (disk ^ " VBD:")); + printer (Cli_printer.PTable [(List.map print_field vbd)]); + (* Only print out the VDI if there is one - empty cds don't have one *) + begin + match vdi with + Some vdi -> + let selectedvdi = List.hd (select_fields params' [vdi] ["uuid"; "name-label"; "virtual-size"; "sr-name-label"]) in + printer (Cli_printer.PMsg (disk ^ " VDI:")); + printer (Cli_printer.PTable [(List.map print_field selectedvdi)]); + | None -> () + end; + doit vbds vdis (n+1) + | _ -> (failwith "Unexpected mismatch in list length in vm_disk_list") + in doit selectedvbd vdirecords 0 let vm_disk_list is_cd_list printer rpc session_id params = - let op vm = vm_disk_list_aux vm is_cd_list printer rpc session_id params in - let ( _ : unit list) = do_vm_op printer rpc session_id op params ["vbd-params";"vdi-params"] in () + let op vm = vm_disk_list_aux vm is_cd_list printer rpc session_id params in + let ( _ : unit list) = do_vm_op printer rpc session_id op params ["vbd-params";"vdi-params"] in () let snapshot_disk_list is_cd_list printer rpc session_id params = - let snapshot_uuid = get_snapshot_uuid params in - let snapshot_ref = Client.VM.get_by_uuid rpc session_id snapshot_uuid in - let snapshot = vm_record rpc session_id snapshot_ref in - vm_disk_list_aux snapshot is_cd_list printer rpc session_id params + let snapshot_uuid = get_snapshot_uuid params in + let snapshot_ref = Client.VM.get_by_uuid rpc session_id snapshot_uuid in + let snapshot = vm_record rpc session_id snapshot_ref in + vm_disk_list_aux snapshot is_cd_list printer rpc session_id params let vm_crashdump_list printer rpc session_id params = - let op vm = - let records = List.map (fun crashdump -> (crashdump_record rpc session_id crashdump).fields) (vm.record()).API.vM_crash_dumps in - printer (Cli_printer.PTable (List.map (List.map print_field) records)) - in - ignore(do_vm_op printer rpc session_id op params []) + let op vm = + let records = List.map (fun crashdump -> (crashdump_record rpc session_id crashdump).fields) (vm.record()).API.vM_crash_dumps in + printer (Cli_printer.PTable (List.map (List.map print_field) records)) + in + ignore(do_vm_op printer rpc session_id op params []) (* Disk add creates a VDI with the size, sr specified. The name and sector size * can be optionally specified. A VBD is then creased with the device name as specified *) let vm_disk_add printer rpc session_id params = - (* Required params *) - let vdi_size = Record_util.bytes_of_string "disk-size" (List.assoc "disk-size" params) in - let vbd_device = List.assoc "device" params in - let sr = - if List.mem_assoc "sr-uuid" params then - let sr_uuid = List.assoc "sr-uuid" params in - Client.SR.get_by_uuid rpc session_id sr_uuid - else - match get_default_sr_uuid rpc session_id with - | Some x -> Client.SR.get_by_uuid rpc session_id x - | None -> failwith "No default Pool SR set; you must specify an SR on the commandline" - in - (* Optional params *) - let vdi_name = "Created by xe" in - let op vm = - let vm=vm.getref() in - let vmuuid = Client.VM.get_uuid ~rpc ~session_id ~self:vm in - let sm_config = [ Xapi_globs._sm_vm_hint, vmuuid ] in - let vdi = Client.VDI.create ~rpc ~session_id ~name_label:vdi_name ~name_description:vdi_name ~sR:sr ~virtual_size:vdi_size ~_type:`user ~sharable:false ~read_only:false ~other_config:[] ~xenstore_data:[] ~sm_config ~tags:[] in - try - let _ = - create_owner_vbd_and_plug rpc session_id vm vdi - vbd_device false `RW `Disk true "" [] in - () - with - e -> - Client.VDI.destroy rpc session_id vdi; - raise e - in - ignore(do_vm_op printer rpc session_id op params ["sr-uuid";"device";"disk-size"]) + (* Required params *) + let vdi_size = Record_util.bytes_of_string "disk-size" (List.assoc "disk-size" params) in + let vbd_device = List.assoc "device" params in + let sr = + if List.mem_assoc "sr-uuid" params then + let sr_uuid = List.assoc "sr-uuid" params in + Client.SR.get_by_uuid rpc session_id sr_uuid + else + match get_default_sr_uuid rpc session_id with + | Some x -> Client.SR.get_by_uuid rpc session_id x + | None -> failwith "No default Pool SR set; you must specify an SR on the commandline" + in + (* Optional params *) + let vdi_name = "Created by xe" in + let op vm = + let vm=vm.getref() in + let vmuuid = Client.VM.get_uuid ~rpc ~session_id ~self:vm in + let sm_config = [ Xapi_globs._sm_vm_hint, vmuuid ] in + let vdi = Client.VDI.create ~rpc ~session_id ~name_label:vdi_name ~name_description:vdi_name ~sR:sr ~virtual_size:vdi_size ~_type:`user ~sharable:false ~read_only:false ~other_config:[] ~xenstore_data:[] ~sm_config ~tags:[] in + try + let _ = + create_owner_vbd_and_plug rpc session_id vm vdi + vbd_device false `RW `Disk true "" [] in + () + with + e -> + Client.VDI.destroy rpc session_id vdi; + raise e + in + ignore(do_vm_op printer rpc session_id op params ["sr-uuid";"device";"disk-size"]) let vm_disk_remove printer rpc session_id params = - let device = List.assoc "device" params in - let op vm = - let vm=vm.getref() in - let vm_record = Client.VM.get_record rpc session_id vm in - let vbd_to_remove = List.filter (fun x -> device = Client.VBD.get_userdevice rpc session_id x) vm_record.API.vM_VBDs in - if List.length vbd_to_remove < 1 then (failwith "Disk not found") else - let vbd = List.nth vbd_to_remove 0 in - let vdi = Client.VBD.get_VDI rpc session_id vbd in - Client.VBD.destroy rpc session_id vbd; - Client.VDI.destroy rpc session_id vdi - in - ignore(do_vm_op printer rpc session_id op params ["device"]) + let device = List.assoc "device" params in + let op vm = + let vm=vm.getref() in + let vm_record = Client.VM.get_record rpc session_id vm in + let vbd_to_remove = List.filter (fun x -> device = Client.VBD.get_userdevice rpc session_id x) vm_record.API.vM_VBDs in + if List.length vbd_to_remove < 1 then (failwith "Disk not found") else + let vbd = List.nth vbd_to_remove 0 in + let vdi = Client.VBD.get_VDI rpc session_id vbd in + Client.VBD.destroy rpc session_id vbd; + Client.VDI.destroy rpc session_id vdi + in + ignore(do_vm_op printer rpc session_id op params ["device"]) let vm_disk_detach printer rpc session_id params = - let device = List.assoc "device" params in - let op vm = - let vm_record = vm.record () in - let vbd_to_remove = List.filter (fun x -> device = Client.VBD.get_userdevice rpc session_id x) vm_record.API.vM_VBDs in - if List.length vbd_to_remove < 1 then (failwith "Disk not found") else - let vbd = List.nth vbd_to_remove 0 in - Client.VBD.destroy rpc session_id vbd - in - ignore(do_vm_op printer rpc session_id op params ["device"]) + let device = List.assoc "device" params in + let op vm = + let vm_record = vm.record () in + let vbd_to_remove = List.filter (fun x -> device = Client.VBD.get_userdevice rpc session_id x) vm_record.API.vM_VBDs in + if List.length vbd_to_remove < 1 then (failwith "Disk not found") else + let vbd = List.nth vbd_to_remove 0 in + Client.VBD.destroy rpc session_id vbd + in + ignore(do_vm_op printer rpc session_id op params ["device"]) let vm_disk_resize printer rpc session_id params = - let device = List.assoc "device" params in - let new_size = Record_util.bytes_of_string "disk-size" (List.assoc "disk-size" params) in - let op vm = - let vm_record = vm.record () in - let vbd_to_resize = List.filter (fun x -> device = Client.VBD.get_userdevice rpc session_id x) vm_record.API.vM_VBDs in - if List.length vbd_to_resize < 1 then (failwith "Disk not found") else - let vbd = List.nth vbd_to_resize 0 in - let vdi = Client.VBD.get_VDI rpc session_id vbd in - Client.VDI.resize rpc session_id vdi new_size - in - ignore(do_vm_op printer rpc session_id op params ["device";"disk-size"]) + let device = List.assoc "device" params in + let new_size = Record_util.bytes_of_string "disk-size" (List.assoc "disk-size" params) in + let op vm = + let vm_record = vm.record () in + let vbd_to_resize = List.filter (fun x -> device = Client.VBD.get_userdevice rpc session_id x) vm_record.API.vM_VBDs in + if List.length vbd_to_resize < 1 then (failwith "Disk not found") else + let vbd = List.nth vbd_to_resize 0 in + let vdi = Client.VBD.get_VDI rpc session_id vbd in + Client.VDI.resize rpc session_id vdi new_size + in + ignore(do_vm_op printer rpc session_id op params ["device";"disk-size"]) let vm_cd_remove printer rpc session_id params = - let disk_name = List.assoc "cd-name" params in - let op vm = - let vm_record = vm.record () in - let vbd_to_remove = List.filter - (fun x -> - try - let vdi = (Client.VBD.get_VDI rpc session_id x) in - let sr = (Client.VDI.get_SR rpc session_id vdi) in - ("iso"=Client.SR.get_content_type rpc session_id sr) && - (disk_name = Client.VDI.get_name_label rpc session_id vdi) - with _ (* VDI handle invalid *) -> disk_name="") vm_record.API.vM_VBDs in - if List.length vbd_to_remove < 1 then raise (failwith "Disk not found") else - let vbd = List.nth vbd_to_remove 0 in - Client.VBD.destroy rpc session_id vbd - in - ignore(do_vm_op printer rpc session_id op params ["cd-name"]) + let disk_name = List.assoc "cd-name" params in + let op vm = + let vm_record = vm.record () in + let vbd_to_remove = List.filter + (fun x -> + try + let vdi = (Client.VBD.get_VDI rpc session_id x) in + let sr = (Client.VDI.get_SR rpc session_id vdi) in + ("iso"=Client.SR.get_content_type rpc session_id sr) && + (disk_name = Client.VDI.get_name_label rpc session_id vdi) + with _ (* VDI handle invalid *) -> disk_name="") vm_record.API.vM_VBDs in + if List.length vbd_to_remove < 1 then raise (failwith "Disk not found") else + let vbd = List.nth vbd_to_remove 0 in + Client.VBD.destroy rpc session_id vbd + in + ignore(do_vm_op printer rpc session_id op params ["cd-name"]) let vm_cd_add printer rpc session_id params = - let cd_name = List.assoc "cd-name" params in - let vdis = Client.VDI.get_by_name_label rpc session_id cd_name in - let vdis = List.filter (fun vdi -> let sr = Client.VDI.get_SR rpc session_id vdi in "iso"=Client.SR.get_content_type rpc session_id sr) vdis in - (if List.length vdis = 0 then (failwith ("CD "^cd_name^" not found!"))); - let vdi = List.nth vdis 0 in - let op vm = create_vbd_and_plug - rpc session_id (vm.getref()) vdi (List.assoc "device" params) false `RO `CD true "" [] - in - ignore(do_vm_op printer rpc session_id op params ["cd-name";"device";"cd-location"]) - (* cd-location was a geneva-style param *) + let cd_name = List.assoc "cd-name" params in + let vdis = Client.VDI.get_by_name_label rpc session_id cd_name in + let vdis = List.filter (fun vdi -> let sr = Client.VDI.get_SR rpc session_id vdi in "iso"=Client.SR.get_content_type rpc session_id sr) vdis in + (if List.length vdis = 0 then (failwith ("CD "^cd_name^" not found!"))); + let vdi = List.nth vdis 0 in + let op vm = create_vbd_and_plug + rpc session_id (vm.getref()) vdi (List.assoc "device" params) false `RO `CD true "" [] + in + ignore(do_vm_op printer rpc session_id op params ["cd-name";"device";"cd-location"]) +(* cd-location was a geneva-style param *) let vm_cd_eject printer rpc session_id params = - let op vm = - let vm_record = vm.record () in - let vbds = vm_record.API.vM_VBDs in - let cdvbds = List.filter (fun vbd -> Client.VBD.get_type rpc session_id vbd = `CD) vbds in - if List.length cdvbds = 0 then (failwith "No CDs found"); - if List.length cdvbds > 1 then (failwith "Two or more CDs found. Please use vbd-eject"); - let cd = List.hd cdvbds in - Client.VBD.eject rpc session_id cd - in - ignore(do_vm_op printer rpc session_id op params []) + let op vm = + let vm_record = vm.record () in + let vbds = vm_record.API.vM_VBDs in + let cdvbds = List.filter (fun vbd -> Client.VBD.get_type rpc session_id vbd = `CD) vbds in + if List.length cdvbds = 0 then (failwith "No CDs found"); + if List.length cdvbds > 1 then (failwith "Two or more CDs found. Please use vbd-eject"); + let cd = List.hd cdvbds in + Client.VBD.eject rpc session_id cd + in + ignore(do_vm_op printer rpc session_id op params []) let vm_cd_insert printer rpc session_id params = - let cd_name = List.assoc "cd-name" params in - let vdis = - Client.VDI.get_by_name_label rpc session_id cd_name in - let vdis = List.filter (fun vdi -> let sr = Client.VDI.get_SR rpc session_id vdi in "iso"=Client.SR.get_content_type rpc session_id sr) vdis in - (if List.length vdis = 0 then (failwith ("CD "^cd_name^" not found"))); - (if List.length vdis > 1 then (failwith ("Multiple CDs named "^cd_name^" found. Please use vbd-insert and specify uuids"))); - let op vm = - let vm_record = vm.record () in - let vbds = vm_record.API.vM_VBDs in - let cdvbds = List.filter (fun vbd -> (Client.VBD.get_type rpc session_id vbd = `CD) && (Client.VBD.get_empty rpc session_id vbd)) vbds in - if List.length cdvbds = 0 then raise (Api_errors.Server_error(Api_errors.vm_no_empty_cd_vbd, [ Ref.string_of (vm.getref ()) ])); - if List.length cdvbds > 1 then (failwith "Two or more empty CD devices found. Please use vbd-insert"); - let cd = List.hd cdvbds in - Client.VBD.insert rpc session_id cd (List.hd vdis) - in - ignore(do_vm_op printer rpc session_id op params ["cd-name"]) + let cd_name = List.assoc "cd-name" params in + let vdis = + Client.VDI.get_by_name_label rpc session_id cd_name in + let vdis = List.filter (fun vdi -> let sr = Client.VDI.get_SR rpc session_id vdi in "iso"=Client.SR.get_content_type rpc session_id sr) vdis in + (if List.length vdis = 0 then (failwith ("CD "^cd_name^" not found"))); + (if List.length vdis > 1 then (failwith ("Multiple CDs named "^cd_name^" found. Please use vbd-insert and specify uuids"))); + let op vm = + let vm_record = vm.record () in + let vbds = vm_record.API.vM_VBDs in + let cdvbds = List.filter (fun vbd -> (Client.VBD.get_type rpc session_id vbd = `CD) && (Client.VBD.get_empty rpc session_id vbd)) vbds in + if List.length cdvbds = 0 then raise (Api_errors.Server_error(Api_errors.vm_no_empty_cd_vbd, [ Ref.string_of (vm.getref ()) ])); + if List.length cdvbds > 1 then (failwith "Two or more empty CD devices found. Please use vbd-insert"); + let cd = List.hd cdvbds in + Client.VBD.insert rpc session_id cd (List.hd vdis) + in + ignore(do_vm_op printer rpc session_id op params ["cd-name"]) let host_careful_op op warnings fd printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let host = Client.Host.get_by_uuid rpc session_id uuid in - let pool = List.hd (Client.Pool.get_all rpc session_id) in - let _ (* unused variable 'pool_master' *) = Client.Pool.get_master rpc session_id pool in - (* if pool_master = host then failwith "Cannot forget pool master"; *) + let uuid = List.assoc "uuid" params in + let host = Client.Host.get_by_uuid rpc session_id uuid in + let pool = List.hd (Client.Pool.get_all rpc session_id) in + let _ (* unused variable 'pool_master' *) = Client.Pool.get_master rpc session_id pool in + (* if pool_master = host then failwith "Cannot forget pool master"; *) - let force = get_bool_param params "force" in + let force = get_bool_param params "force" in - let go () = ignore (op ~rpc ~session_id ~self:host) in + let go () = ignore (op ~rpc ~session_id ~self:host) in - if force - then go () - else begin - (* Best-effort attempt to warn the user *) - List.iter (fun x-> marshal fd (Command (Print x))) warnings; - if user_says_yes fd - then go () - end + if force + then go () + else begin + (* Best-effort attempt to warn the user *) + List.iter (fun x-> marshal fd (Command (Print x))) warnings; + if user_says_yes fd + then go () + end let host_forget x = - let warnings = [ - "WARNING: A host should only be forgotten if it is physically unrecoverable;"; - "WARNING: if possible, Hosts should be 'ejected' from the Pool instead."; - "WARNING: Once a host has been forgotten it will have to be re-installed."; - "WARNING: This operation is irreversible."] in - host_careful_op Client.Host.destroy warnings x + let warnings = [ + "WARNING: A host should only be forgotten if it is physically unrecoverable;"; + "WARNING: if possible, Hosts should be 'ejected' from the Pool instead."; + "WARNING: Once a host has been forgotten it will have to be re-installed."; + "WARNING: This operation is irreversible."] in + host_careful_op Client.Host.destroy warnings x let host_declare_dead x = - let warnings = [ - "WARNING: A host should only be declared dead if it is verified offline."; - "WARNING: Performing this operation if the host is still online and has any"; - "WARNING: running VMs may lead to possible data loss and/or corruption." - ] in - host_careful_op (fun ~rpc ~session_id ~self -> Client.Host.declare_dead ~rpc ~session_id ~host:self) warnings x + let warnings = [ + "WARNING: A host should only be declared dead if it is verified offline."; + "WARNING: Performing this operation if the host is still online and has any"; + "WARNING: running VMs may lead to possible data loss and/or corruption." + ] in + host_careful_op (fun ~rpc ~session_id ~self -> Client.Host.declare_dead ~rpc ~session_id ~host:self) warnings x let host_license_add fd printer rpc session_id params = - let host = - if List.mem_assoc "host-uuid" params then - Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) - else - get_host_from_session rpc session_id in - let license_file = List.assoc "license-file" params in - match get_client_file fd license_file with - | Some license -> - debug "Checking license [%s]" license; - (try - Client.Host.license_add rpc session_id host (Base64.encode license); - marshal fd (Command (Print "License applied.")) - with _ -> - marshal fd (Command (PrintStderr "Failed to apply license file.\n")); - raise (ExitWithError 1) - ) - | None -> - marshal fd (Command (PrintStderr "Failed to read license file.\n")); - raise (ExitWithError 1) + let host = + if List.mem_assoc "host-uuid" params then + Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) + else + get_host_from_session rpc session_id in + let license_file = List.assoc "license-file" params in + match get_client_file fd license_file with + | Some license -> + debug "Checking license [%s]" license; + (try + Client.Host.license_add rpc session_id host (Base64.encode license); + marshal fd (Command (Print "License applied.")) + with _ -> + marshal fd (Command (PrintStderr "Failed to apply license file.\n")); + raise (ExitWithError 1) + ) + | None -> + marshal fd (Command (PrintStderr "Failed to read license file.\n")); + raise (ExitWithError 1) let host_license_remove printer rpc session_id params = - let host = - if List.mem_assoc "host-uuid" params then - Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) - else - get_host_from_session rpc session_id in - Client.Host.license_remove rpc session_id host + let host = + if List.mem_assoc "host-uuid" params then + Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) + else + get_host_from_session rpc session_id in + Client.Host.license_remove rpc session_id host let host_license_view printer rpc session_id params = - let host = - if List.mem_assoc "host-uuid" params then - Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) - else - get_host_from_session rpc session_id in - let params = Client.Host.get_license_params rpc session_id host in - let tohide = [ "sku_type" ] in - let params = List.filter (fun (x, _) -> not (List.mem x tohide)) params in - printer (Cli_printer.PTable [params]) + let host = + if List.mem_assoc "host-uuid" params then + Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) + else + get_host_from_session rpc session_id in + let params = Client.Host.get_license_params rpc session_id host in + let tohide = [ "sku_type" ] in + let params = List.filter (fun (x, _) -> not (List.mem x tohide)) params in + printer (Cli_printer.PTable [params]) let with_license_server_changes printer rpc session_id params hosts f = - (* Save the original license server details for each host; - * in case of failure we will need to roll back. *) - let current_license_servers = - List.map - (fun host -> (host, Client.Host.get_license_server rpc session_id host)) - hosts - in - (* Set any new license server address across the pool. *) - if List.mem_assoc "license-server-address" params then begin - let address = List.assoc "license-server-address" params in - List.iter - (fun host -> - Client.Host.remove_from_license_server rpc session_id host "address"; - Client.Host.add_to_license_server rpc session_id host "address" address) - hosts - end; - (* Set any new license server port across the pool. *) - if List.mem_assoc "license-server-port" params then begin - let port = List.assoc "license-server-port" params in - let port_int = try int_of_string port with _ -> -1 in - if port_int < 0 || port_int > 65535 then - printer (Cli_printer.PStderr "NOTE: The given port number is invalid; reverting to the current value.\n") - else begin - List.iter - (fun host -> - Client.Host.remove_from_license_server rpc session_id host "port"; - Client.Host.add_to_license_server rpc session_id host "port" port) - hosts - end - end; - let now = (Unix.gettimeofday ()) in - try - f rpc session_id - with - | Api_errors.Server_error (name, args) as e - when name = Api_errors.license_checkout_error -> - (* Put back original license_server_details *) - List.iter - (fun (host, license_server) -> - Client.Host.set_license_server rpc session_id host license_server) - current_license_servers; - let alerts = Client.Message.get_since rpc session_id (Date.of_float (now -. 1.)) in - let print_if_checkout_error (ref, msg) = - if false - || msg.API.message_name = (fst Api_messages.v6_rejected) - || msg.API.message_name = (fst Api_messages.v6_comm_error) - || msg.API.message_name = (fst Api_messages.v6_license_server_version_obsolete) - then begin - Client.Message.destroy rpc session_id ref; - printer (Cli_printer.PStderr (msg.API.message_body ^ "\n")) - end - in - if alerts = [] - then raise e - else begin - List.iter print_if_checkout_error alerts; - raise (ExitWithError 1) - end - | Api_errors.Server_error (name, args) as e - when name = Api_errors.invalid_edition -> - let editions = (V6client.get_editions "host_apply_edition") - |> List.map (fun (x, _, _, _) -> x) - |> String.concat ", " - in - printer (Cli_printer.PStderr ("Valid editions are: " ^ editions ^ "\n")); - raise e - | e -> raise e + (* Save the original license server details for each host; + * in case of failure we will need to roll back. *) + let current_license_servers = + List.map + (fun host -> (host, Client.Host.get_license_server rpc session_id host)) + hosts + in + (* Set any new license server address across the pool. *) + if List.mem_assoc "license-server-address" params then begin + let address = List.assoc "license-server-address" params in + List.iter + (fun host -> + Client.Host.remove_from_license_server rpc session_id host "address"; + Client.Host.add_to_license_server rpc session_id host "address" address) + hosts + end; + (* Set any new license server port across the pool. *) + if List.mem_assoc "license-server-port" params then begin + let port = List.assoc "license-server-port" params in + let port_int = try int_of_string port with _ -> -1 in + if port_int < 0 || port_int > 65535 then + printer (Cli_printer.PStderr "NOTE: The given port number is invalid; reverting to the current value.\n") + else begin + List.iter + (fun host -> + Client.Host.remove_from_license_server rpc session_id host "port"; + Client.Host.add_to_license_server rpc session_id host "port" port) + hosts + end + end; + let now = (Unix.gettimeofday ()) in + try + f rpc session_id + with + | Api_errors.Server_error (name, args) as e + when name = Api_errors.license_checkout_error -> + (* Put back original license_server_details *) + List.iter + (fun (host, license_server) -> + Client.Host.set_license_server rpc session_id host license_server) + current_license_servers; + let alerts = Client.Message.get_since rpc session_id (Date.of_float (now -. 1.)) in + let print_if_checkout_error (ref, msg) = + if false + || msg.API.message_name = (fst Api_messages.v6_rejected) + || msg.API.message_name = (fst Api_messages.v6_comm_error) + || msg.API.message_name = (fst Api_messages.v6_license_server_version_obsolete) + then begin + Client.Message.destroy rpc session_id ref; + printer (Cli_printer.PStderr (msg.API.message_body ^ "\n")) + end + in + if alerts = [] + then raise e + else begin + List.iter print_if_checkout_error alerts; + raise (ExitWithError 1) + end + | Api_errors.Server_error (name, args) as e + when name = Api_errors.invalid_edition -> + let editions = (V6client.get_editions "host_apply_edition") + |> List.map (fun (x, _, _, _) -> x) + |> String.concat ", " + in + printer (Cli_printer.PStderr ("Valid editions are: " ^ editions ^ "\n")); + raise e + | e -> raise e let host_apply_edition printer rpc session_id params = - let host = - if List.mem_assoc "host-uuid" params then - Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) - else - get_host_from_session rpc session_id in - let edition = List.assoc "edition" params in - with_license_server_changes printer rpc session_id params [host] - (fun rpc session_id -> Client.Host.apply_edition rpc session_id host edition false) + let host = + if List.mem_assoc "host-uuid" params then + Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) + else + get_host_from_session rpc session_id in + let edition = List.assoc "edition" params in + with_license_server_changes printer rpc session_id params [host] + (fun rpc session_id -> Client.Host.apply_edition rpc session_id host edition false) let host_all_editions printer rpc session_id params = - let editions = List.map (fun (e, _, _, _) -> e) (V6client.get_editions "host_all_editions") in - printer (Cli_printer.PList editions) + let editions = List.map (fun (e, _, _, _) -> e) (V6client.get_editions "host_all_editions") in + printer (Cli_printer.PList editions) let host_evacuate printer rpc session_id params = - ignore (do_host_op rpc session_id ~multiple:false - (fun _ host -> - Client.Host.evacuate rpc session_id (host.getref ())) - params []) + ignore (do_host_op rpc session_id ~multiple:false + (fun _ host -> + Client.Host.evacuate rpc session_id (host.getref ())) + params []) let host_get_vms_which_prevent_evacuation printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let host = Client.Host.get_by_uuid rpc session_id uuid in - let vms = Client.Host.get_vms_which_prevent_evacuation rpc session_id host in - - let op (vm, error) = - let error = String.concat "," error in - let record = vm_record rpc session_id vm in - let extra_field = make_field ~name:"reason" ~get:(fun () -> error) () in - let record = { record with fields = record.fields @ [ extra_field ] } in - let selected = List.hd (select_fields params [record] [ "uuid"; "name-label"; "reason"]) in - let table = List.map print_field selected in - printer (Cli_printer.PTable [table]) - in - ignore(List.iter op vms) + let uuid = List.assoc "uuid" params in + let host = Client.Host.get_by_uuid rpc session_id uuid in + let vms = Client.Host.get_vms_which_prevent_evacuation rpc session_id host in + + let op (vm, error) = + let error = String.concat "," error in + let record = vm_record rpc session_id vm in + let extra_field = make_field ~name:"reason" ~get:(fun () -> error) () in + let record = { record with fields = record.fields @ [ extra_field ] } in + let selected = List.hd (select_fields params [record] [ "uuid"; "name-label"; "reason"]) in + let table = List.map print_field selected in + printer (Cli_printer.PTable [table]) + in + ignore(List.iter op vms) let host_retrieve_wlb_evacuate_recommendations printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let host = Client.Host.get_by_uuid rpc session_id uuid in - let vms = Client.Host.retrieve_wlb_evacuate_recommendations rpc session_id host in - let table = List.map (fun (vm, result) -> - Printf.sprintf "%s (%s)" (Client.VM.get_uuid rpc session_id vm) (Client.VM.get_name_label rpc session_id vm), - String.concat " " result) vms in - printer (Cli_printer.PTable [ ("VM", "[Host, RecID] / Error") :: table ]) + let uuid = List.assoc "uuid" params in + let host = Client.Host.get_by_uuid rpc session_id uuid in + let vms = Client.Host.retrieve_wlb_evacuate_recommendations rpc session_id host in + let table = List.map (fun (vm, result) -> + Printf.sprintf "%s (%s)" (Client.VM.get_uuid rpc session_id vm) (Client.VM.get_name_label rpc session_id vm), + String.concat " " result) vms in + printer (Cli_printer.PTable [ ("VM", "[Host, RecID] / Error") :: table ]) let host_shutdown_agent printer rpc session_id params = - ignore(Client.Host.shutdown_agent rpc session_id) + ignore(Client.Host.shutdown_agent rpc session_id) let vdi_import fd printer rpc session_id params = - let filename = List.assoc "filename" params in - let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let format = - if List.mem_assoc "format" params - then "&format=" ^ (List.assoc "format" params) - else "" in - let progress_bar = get_bool_param params "progress" in - let make_command task_id = - let prefix = uri_of_someone rpc session_id Master in - let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s&vdi=%s%s" - prefix Constants.import_raw_vdi_uri (Ref.string_of session_id) - (Ref.string_of task_id) (Ref.string_of vdi) format in - debug "requesting HttpPut('%s','%s')" filename uri; - HttpPut (filename, uri) in - ignore(track_http_operation ~progress_bar fd rpc session_id make_command "VDI import") + let filename = List.assoc "filename" params in + let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let format = + if List.mem_assoc "format" params + then "&format=" ^ (List.assoc "format" params) + else "" in + let progress_bar = get_bool_param params "progress" in + let make_command task_id = + let prefix = uri_of_someone rpc session_id Master in + let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s&vdi=%s%s" + prefix Constants.import_raw_vdi_uri (Ref.string_of session_id) + (Ref.string_of task_id) (Ref.string_of vdi) format in + debug "requesting HttpPut('%s','%s')" filename uri; + HttpPut (filename, uri) in + ignore(track_http_operation ~progress_bar fd rpc session_id make_command "VDI import") let vdi_export fd printer rpc session_id params = - let filename = List.assoc "filename" params in - let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let format = - if List.mem_assoc "format" params - then "&format=" ^ (List.assoc "format" params) - else "" in - let base = - if List.mem_assoc "base" params - then "&base=" ^ (List.assoc "base" params) - else "" in - let progress_bar = get_bool_param params "progress" in - let make_command task_id = - let prefix = uri_of_someone rpc session_id Master in - let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s&vdi=%s%s%s" - prefix Constants.export_raw_vdi_uri (Ref.string_of session_id) - (Ref.string_of task_id) (Ref.string_of vdi) format base in - debug "requesting HttpGet('%s','%s')" filename uri; - HttpGet (filename, uri) in - ignore(track_http_operation ~progress_bar fd rpc session_id make_command "VDI export") + let filename = List.assoc "filename" params in + let vdi = Client.VDI.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let format = + if List.mem_assoc "format" params + then "&format=" ^ (List.assoc "format" params) + else "" in + let base = + if List.mem_assoc "base" params + then "&base=" ^ (List.assoc "base" params) + else "" in + let progress_bar = get_bool_param params "progress" in + let make_command task_id = + let prefix = uri_of_someone rpc session_id Master in + let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s&vdi=%s%s%s" + prefix Constants.export_raw_vdi_uri (Ref.string_of session_id) + (Ref.string_of task_id) (Ref.string_of vdi) format base in + debug "requesting HttpGet('%s','%s')" filename uri; + HttpGet (filename, uri) in + ignore(track_http_operation ~progress_bar fd rpc session_id make_command "VDI export") let wait_for_task_complete rpc session_id task_id = - let finished () = - match (Client.Task.get_status rpc session_id task_id) with - `success | `failure | `cancelled -> true - | _ -> false in - (* All successes and failures are communicated via the task object *) - while not (finished ()) do - Thread.delay 1.0 - done + let finished () = + match (Client.Task.get_status rpc session_id task_id) with + `success | `failure | `cancelled -> true + | _ -> false in + (* All successes and failures are communicated via the task object *) + while not (finished ()) do + Thread.delay 1.0 + done let download_file ~__context rpc session_id task fd filename uri label = - marshal fd (Command (HttpGet (filename, uri))); - let response = ref (Response Wait) in - while !response = Response Wait do response := unmarshal fd done; - let ok = - match !response with - | Response OK -> true - | Response Failed -> - (* Need to check whether the thin cli managed to contact the server - or not. If not, we need to mark the task as failed *) - if Client.Task.get_progress rpc session_id task < 0.0 - then Db_actions.DB_Action.Task.set_status ~__context ~self:task ~value:`failure; - false - | _ -> false - in - wait_for_task_complete rpc session_id task; - - (* Check the server status -- even if the client thinks it's ok, we need - to check that the server does too. *) - match Client.Task.get_status rpc session_id task with - | `success -> - if ok - then - (if filename <> "" then - marshal fd (Command (Print (Printf.sprintf "%s succeeded" label)))) - else - (marshal fd (Command (PrintStderr (Printf.sprintf "%s failed, unknown error.\n" label))); - raise (ExitWithError 1)) - | `failure -> - let result = Client.Task.get_error_info rpc session_id task in - if result = [] - then - marshal fd (Command (PrintStderr (Printf.sprintf "%s failed, unknown error\n" label))) - else - raise (Api_errors.Server_error ((List.hd result),(List.tl result))) - | `cancelled -> - marshal fd (Command (PrintStderr (Printf.sprintf "%s cancelled\n" label))); - raise (ExitWithError 1) - | _ -> - marshal fd (Command (PrintStderr "Internal error\n")); (* should never happen *) - raise (ExitWithError 1) + marshal fd (Command (HttpGet (filename, uri))); + let response = ref (Response Wait) in + while !response = Response Wait do response := unmarshal fd done; + let ok = + match !response with + | Response OK -> true + | Response Failed -> + (* Need to check whether the thin cli managed to contact the server + or not. If not, we need to mark the task as failed *) + if Client.Task.get_progress rpc session_id task < 0.0 + then Db_actions.DB_Action.Task.set_status ~__context ~self:task ~value:`failure; + false + | _ -> false + in + wait_for_task_complete rpc session_id task; + + (* Check the server status -- even if the client thinks it's ok, we need + to check that the server does too. *) + match Client.Task.get_status rpc session_id task with + | `success -> + if ok + then + (if filename <> "" then + marshal fd (Command (Print (Printf.sprintf "%s succeeded" label)))) + else + (marshal fd (Command (PrintStderr (Printf.sprintf "%s failed, unknown error.\n" label))); + raise (ExitWithError 1)) + | `failure -> + let result = Client.Task.get_error_info rpc session_id task in + if result = [] + then + marshal fd (Command (PrintStderr (Printf.sprintf "%s failed, unknown error\n" label))) + else + raise (Api_errors.Server_error ((List.hd result),(List.tl result))) + | `cancelled -> + marshal fd (Command (PrintStderr (Printf.sprintf "%s cancelled\n" label))); + raise (ExitWithError 1) + | _ -> + marshal fd (Command (PrintStderr "Internal error\n")); (* should never happen *) + raise (ExitWithError 1) let download_file_with_task fd rpc session_id filename uri query label - task_name = - let task = Client.Task.create rpc session_id task_name "" in - - (* Initially mark the task progress as -1.0. The first thing the HTTP handler does it to mark it as zero *) - (* This is used as a flag to show that the 'ownership' of the task has been passed to the handler, and it's *) - (* not our responsibility any more to mark the task as completed/failed/etc. *) - let __context = Context.make task_name in - Db_actions.DB_Action.Task.set_progress ~__context ~self:task ~value:(-1.0); - finally - (fun () -> - download_file ~__context rpc session_id task fd filename - (Printf.sprintf "%s?session_id=%s&task_id=%s%s%s" uri - (Ref.string_of session_id) - (Ref.string_of task) - (if query = "" then "" else "&") - query) - label) - (fun () -> Client.Task.destroy rpc session_id task) + task_name = + let task = Client.Task.create rpc session_id task_name "" in + + (* Initially mark the task progress as -1.0. The first thing the HTTP handler does it to mark it as zero *) + (* This is used as a flag to show that the 'ownership' of the task has been passed to the handler, and it's *) + (* not our responsibility any more to mark the task as completed/failed/etc. *) + let __context = Context.make task_name in + Db_actions.DB_Action.Task.set_progress ~__context ~self:task ~value:(-1.0); + finally + (fun () -> + download_file ~__context rpc session_id task fd filename + (Printf.sprintf "%s?session_id=%s&task_id=%s%s%s" uri + (Ref.string_of session_id) + (Ref.string_of task) + (if query = "" then "" else "&") + query) + label) + (fun () -> Client.Task.destroy rpc session_id task) let pool_retrieve_wlb_report fd printer rpc session_id params = - let report = List.assoc "report" params in - let filename = List.assoc_default "filename" params "" in - let other_params = - List.filter - (fun (k, _) -> not (List.mem k (["report"; "filename"] @ stdparams))) - params - in - download_file_with_task fd rpc session_id filename - Constants.wlb_report_uri - (Printf.sprintf - "report=%s%s%s" - (Http.urlencode report) - (if List.length other_params = 0 then "" else "&") - (String.concat "&" - (List.map (fun (k, v) -> - (Printf.sprintf "%s=%s" - (Http.urlencode k) - (Http.urlencode v))) other_params))) - "Report generation" - (Printf.sprintf "WLB report: %s" report) + let report = List.assoc "report" params in + let filename = List.assoc_default "filename" params "" in + let other_params = + List.filter + (fun (k, _) -> not (List.mem k (["report"; "filename"] @ stdparams))) + params + in + download_file_with_task fd rpc session_id filename + Constants.wlb_report_uri + (Printf.sprintf + "report=%s%s%s" + (Http.urlencode report) + (if List.length other_params = 0 then "" else "&") + (String.concat "&" + (List.map (fun (k, v) -> + (Printf.sprintf "%s=%s" + (Http.urlencode k) + (Http.urlencode v))) other_params))) + "Report generation" + (Printf.sprintf "WLB report: %s" report) let pool_retrieve_wlb_diagnostics fd printer rpc session_id params = - let filename = List.assoc_default "filename" params "" in - download_file_with_task fd rpc session_id filename - Constants.wlb_diagnostics_uri "" - "WLB diagnostics download" - "WLB diagnostics download" + let filename = List.assoc_default "filename" params "" in + download_file_with_task fd rpc session_id filename + Constants.wlb_diagnostics_uri "" + "WLB diagnostics download" + "WLB diagnostics download" let vm_import fd printer rpc session_id params = - let sr = - if List.mem_assoc "sr-uuid" params - then Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) - else - match Cli_util.get_default_sr_uuid rpc session_id with - | Some uuid -> Client.SR.get_by_uuid rpc session_id uuid - | None -> raise (Cli_util.Cli_failure "No SR specified and Pool default SR is null") - in - let _type = if List.mem_assoc "type" params - then List.assoc "type" params - else "default" in - let full_restore = get_bool_param params "preserve" in - let vm_metadata_only = get_bool_param params "metadata" in - let force = get_bool_param params "force" in - let dry_run = get_bool_param params "dry-run" in - let vdi_map = read_map_params "vdi" params in - if List.mem_assoc "url" params && List.mem_assoc "filename" params then begin - marshal fd (Command (PrintStderr "Invalid arguments. The 'url' and 'filename' parameters should not both be specified.\n")); - raise (ExitWithError 1) - end; - if (Vpx.serverType_of_string _type) <> Vpx.XenServer then begin - let username = List.assoc "host-username" params in - let password = List.assoc "host-password" params in - let remote_config = read_map_params "remote-config" params in - Client.VM.import_convert rpc session_id _type username password sr remote_config - end - else if List.mem_assoc "url" params then begin - let url = List.assoc "url" params in - let vm_refs = Client.VM.import ~rpc ~session_id ~url ~sr ~full_restore ~force in - let uuids = List.map (fun vm -> Client.VM.get_uuid rpc session_id vm) vm_refs in - marshal fd (Command (Print (String.concat "," uuids))) - end else begin - let filename = List.assoc "filename" params in - if not vm_metadata_only && dry_run then begin - marshal fd (Command (PrintStderr "Only metadata import function support dry-run\n")); - raise (ExitWithError 1) - end; - - (* Special-case where the user accidentally sets filename= *) - let filename = - if String.endswith "ova.xml" (String.lowercase filename) - then String.sub filename 0 (String.length filename - (String.length "ova.xml")) - else filename in - - marshal fd (Command (Load (filename ^ "/ova.xml"))); - match unmarshal fd with - | Response OK -> - debug "Looking like a Zurich/Geneva XVA"; - (* Zurich/Geneva style XVA import *) - (* If a task was passed in, use that - else create a new one. UI uses "task_id" to pass reference [UI uses ThinCLI for Geneva import]; - xe now allows task-uuid on cmd-line *) - let using_existing_task = (List.mem_assoc "task_id" params) || (List.mem_assoc "task-uuid" params) in - let importtask = - if List.mem_assoc "task_id" params - then (Ref.of_string (List.assoc "task_id" params)) - else if List.mem_assoc "task-uuid" params then Client.Task.get_by_uuid rpc session_id (List.assoc "task-uuid" params) - else Client.Task.create rpc session_id "Import of Zurich/Geneva style XVA" "" - in - - (* Initially mark the task progress as -1.0. The first thing the import handler does it to mark it as zero *) - (* This is used as a flag to show that the 'ownership' of the task has been passed to the handler, and it's *) - (* not our responsibility any more to mark the task as completed/failed/etc. *) - let __context = Context.make "import" in - Db_actions.DB_Action.Task.set_progress ~__context ~self:importtask ~value:(-1.0); - - Pervasiveext.finally (fun () -> - begin - let buffer = get_chunks fd in - begin - try - let vm, vdis = Xva.of_xml (Xml.parse_string buffer) in - (* Only import the first VM *) - let vm = List.hd vm in - let disks = List.sort compare (List.map (fun x -> x.Xva.device) vm.Xva.vbds) in - let host = - if sr<>Ref.null - then Importexport.find_host_for_sr ~__context sr - else Helpers.get_localhost __context - in - let address = Client.Host.get_address rpc session_id host in - (* Although it's inefficient use a loopback HTTP connection *) - debug "address is: %s" address; - let request = Xapi_http.http_request - ~cookie:(["session_id", Ref.string_of session_id; - "task_id", Ref.string_of importtask] @ - (if sr <> Ref.null then [ "sr_id", Ref.string_of sr ] else [])) - Http.Put Constants.import_uri in - (* Stream the disk data from the client *) - let writer (response, sock) = - try - (* First add the metadata file *) - let hdr = Tar_unix.Header.make Xva.xml_filename (Int64.of_int (String.length buffer)) in - Tar_unix.write_block hdr (fun ofd -> Unixext.really_write_string ofd buffer) sock; - List.iter - (fun vdi -> - let counter = ref 0 in - let finished = ref false in - while not(!finished) do - (* Nb. - * The check for task cancelling is done here in the cli server. This is due to the fact that we've got - * 3 parties talking to one another here: the thin cli, the cli server and the import handler. If the - * import handler was checking, it would close its socket on task cancelling. This only happens after - * each chunk is sent. Unfortunately the cli server wouldn't notice until it had already requested the - * data from the thin cli, and would have to wait for it to finish sending its chunk before it could - * alert it to the failure. *) - - (let l=Client.Task.get_current_operations rpc session_id importtask in - if List.exists (fun (_,x) -> x=`cancel) l - then raise Api_errors.(Server_error(task_cancelled,[Ref.string_of importtask]))); - - (* Cancelling will close the connection, which will be interpreted by the import handler as failure *) - - let chunk = Printf.sprintf "%s/chunk-%09d.gz" vdi !counter in - marshal fd (Command (Load (filename ^ "/" ^ chunk))); - match unmarshal fd with - | Response OK -> - (* A single chunk always follows the OK *) - let length = match unmarshal fd with - | Blob (Chunk x) -> x - | _ -> failwith "Thin CLI protocol error" - in - let hdr = Tar_unix.Header.make chunk (Int64.of_int32 length) in - Tar_unix.write_block hdr - (fun ofd -> - let limit = Int64.of_int32 length in - let total_bytes = Unixext.copy_file ~limit fd ofd in - debug "File %s has size %Ld; we received %Ld%s" chunk limit total_bytes - (if limit = total_bytes then "" else " ** truncated **") - ) - sock; - (match unmarshal fd with | Blob End -> () | _ -> (failwith "Thin CLI protocol error")); - incr counter - | Response Failed -> - finished := true - | m -> - debug "Protocol failure: unexpected: %s" (string_of_message m) - done) disks; - Tar_unix.write_end sock; - true - with e -> - debug "vm_import caught %s while writing data" (Printexc.to_string e); - false - in - - let open Xmlrpc_client in - let transport = SSL(SSL.make ~use_stunnel_cache:true ~task_id:(Ref.string_of (Context.get_task_id __context)) (), address, !Xapi_globs.https_port) in - let stream_ok = with_transport transport (with_http request writer) in - if not stream_ok then - begin - (* If the progress is negative, we never got to talk to the import handler, and must complete *) - (* the task ourselves *) - if Client.Task.get_progress rpc session_id importtask < 0.0 - then Db_actions.DB_Action.Task.set_status ~__context ~self:importtask ~value:`failure; - end; - - wait_for_task_complete rpc session_id importtask; - (match Client.Task.get_status rpc session_id importtask with - | `success -> - if stream_ok then - let result = Client.Task.get_result rpc session_id importtask in - let vmrefs = API.Legacy.From.ref_VM_set "" (Xml.parse_string result) in - let uuids = List.map (fun vm -> Client.VM.get_uuid rpc session_id vm) vmrefs in - marshal fd (Command (Print (String.concat "," uuids))) - else - begin - marshal fd (Command (PrintStderr "Warning: Streaming failed, but task succeeded. Manual check required.\n")); - raise (ExitWithError 1) - end - | `failure -> - let result = Client.Task.get_error_info rpc session_id importtask in - if result = [] then - begin - marshal fd (Command (PrintStderr "Import failed, unknown error\n")); - raise (ExitWithError 1) - end - else Cli_util.server_error (List.hd result) (List.tl result) fd - | `cancelled -> - marshal fd (Command (PrintStderr "Import cancelled\n")); - raise (ExitWithError 1) - | _ -> - marshal fd (Command (PrintStderr "Internal error\n")); (* should never happen *) - raise (ExitWithError 1)) - with e -> - marshal fd (Command (Debug ("Caught exception: " ^ (Printexc.to_string e)))); - marshal fd (Command (PrintStderr "Failed to import directory-format XVA\n")); - debug "Import failed with exception: %s" (Printexc.to_string e); - (if (Db_actions.DB_Action.Task.get_progress ~__context ~self:importtask = (-1.0)) - then TaskHelper.failed ~__context:(Context.from_forwarded_task importtask) (Api_errors.Server_error(Api_errors.import_error_generic,[(Printexc.to_string e)])) - ); - raise (ExitWithError 2) - end - end) - (fun () -> - if using_existing_task then () else Client.Task.destroy rpc session_id importtask) - | Response Failed -> - (* possibly a Rio import *) - let make_command task_id = - let prefix = uri_of_someone rpc session_id Master in - let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s&restore=%b&force=%b&dry_run=%b%s%s" - prefix - (if vm_metadata_only then Constants.import_metadata_uri else Constants.import_uri) - (Ref.string_of session_id) (Ref.string_of task_id) full_restore force dry_run - (if sr <> Ref.null then "&sr_id=" ^ (Ref.string_of sr) else "") - (String.concat "" (List.map (fun (a, b) -> "&vdi:" ^ a ^ "=" ^ b) vdi_map)) - in - debug "requesting HttpPut('%s','%s')" filename uri; - HttpPut (filename, uri) in - let importtask = - if List.mem_assoc "task-uuid" params then - Some (Client.Task.get_by_uuid rpc session_id (List.assoc "task-uuid" params)) - else None (* track_http_operation will create one for us *) in - let result = track_http_operation ?use_existing_task:importtask fd rpc session_id make_command "VM import" in - let vmrefs = API.Legacy.From.ref_VM_set "" (Xml.parse_string result) in - let uuids = List.map (fun vm -> Client.VM.get_uuid rpc session_id vm) vmrefs in - let uuids = if uuids = [] && dry_run then ["xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"] else uuids in - marshal fd (Command (Print (String.concat "," uuids))) - | _ -> failwith "Thin CLI protocol error" - end + let sr = + if List.mem_assoc "sr-uuid" params + then Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) + else + match Cli_util.get_default_sr_uuid rpc session_id with + | Some uuid -> Client.SR.get_by_uuid rpc session_id uuid + | None -> raise (Cli_util.Cli_failure "No SR specified and Pool default SR is null") + in + let _type = if List.mem_assoc "type" params + then List.assoc "type" params + else "default" in + let full_restore = get_bool_param params "preserve" in + let vm_metadata_only = get_bool_param params "metadata" in + let force = get_bool_param params "force" in + let dry_run = get_bool_param params "dry-run" in + let vdi_map = read_map_params "vdi" params in + if List.mem_assoc "url" params && List.mem_assoc "filename" params then begin + marshal fd (Command (PrintStderr "Invalid arguments. The 'url' and 'filename' parameters should not both be specified.\n")); + raise (ExitWithError 1) + end; + if (Vpx.serverType_of_string _type) <> Vpx.XenServer then begin + let username = List.assoc "host-username" params in + let password = List.assoc "host-password" params in + let remote_config = read_map_params "remote-config" params in + Client.VM.import_convert rpc session_id _type username password sr remote_config + end + else if List.mem_assoc "url" params then begin + let url = List.assoc "url" params in + let vm_refs = Client.VM.import ~rpc ~session_id ~url ~sr ~full_restore ~force in + let uuids = List.map (fun vm -> Client.VM.get_uuid rpc session_id vm) vm_refs in + marshal fd (Command (Print (String.concat "," uuids))) + end else begin + let filename = List.assoc "filename" params in + if not vm_metadata_only && dry_run then begin + marshal fd (Command (PrintStderr "Only metadata import function support dry-run\n")); + raise (ExitWithError 1) + end; + + (* Special-case where the user accidentally sets filename= *) + let filename = + if String.endswith "ova.xml" (String.lowercase filename) + then String.sub filename 0 (String.length filename - (String.length "ova.xml")) + else filename in + + marshal fd (Command (Load (filename ^ "/ova.xml"))); + match unmarshal fd with + | Response OK -> + debug "Looking like a Zurich/Geneva XVA"; + (* Zurich/Geneva style XVA import *) + (* If a task was passed in, use that - else create a new one. UI uses "task_id" to pass reference [UI uses ThinCLI for Geneva import]; + xe now allows task-uuid on cmd-line *) + let using_existing_task = (List.mem_assoc "task_id" params) || (List.mem_assoc "task-uuid" params) in + let importtask = + if List.mem_assoc "task_id" params + then (Ref.of_string (List.assoc "task_id" params)) + else if List.mem_assoc "task-uuid" params then Client.Task.get_by_uuid rpc session_id (List.assoc "task-uuid" params) + else Client.Task.create rpc session_id "Import of Zurich/Geneva style XVA" "" + in + + (* Initially mark the task progress as -1.0. The first thing the import handler does it to mark it as zero *) + (* This is used as a flag to show that the 'ownership' of the task has been passed to the handler, and it's *) + (* not our responsibility any more to mark the task as completed/failed/etc. *) + let __context = Context.make "import" in + Db_actions.DB_Action.Task.set_progress ~__context ~self:importtask ~value:(-1.0); + + Pervasiveext.finally (fun () -> + begin + let buffer = get_chunks fd in + begin + try + let vm, vdis = Xva.of_xml (Xml.parse_string buffer) in + (* Only import the first VM *) + let vm = List.hd vm in + let disks = List.sort compare (List.map (fun x -> x.Xva.device) vm.Xva.vbds) in + let host = + if sr<>Ref.null + then Importexport.find_host_for_sr ~__context sr + else Helpers.get_localhost __context + in + let address = Client.Host.get_address rpc session_id host in + (* Although it's inefficient use a loopback HTTP connection *) + debug "address is: %s" address; + let request = Xapi_http.http_request + ~cookie:(["session_id", Ref.string_of session_id; + "task_id", Ref.string_of importtask] @ + (if sr <> Ref.null then [ "sr_id", Ref.string_of sr ] else [])) + Http.Put Constants.import_uri in + (* Stream the disk data from the client *) + let writer (response, sock) = + try + (* First add the metadata file *) + let hdr = Tar_unix.Header.make Xva.xml_filename (Int64.of_int (String.length buffer)) in + Tar_unix.write_block hdr (fun ofd -> Unixext.really_write_string ofd buffer) sock; + List.iter + (fun vdi -> + let counter = ref 0 in + let finished = ref false in + while not(!finished) do + (* Nb. + * The check for task cancelling is done here in the cli server. This is due to the fact that we've got + * 3 parties talking to one another here: the thin cli, the cli server and the import handler. If the + * import handler was checking, it would close its socket on task cancelling. This only happens after + * each chunk is sent. Unfortunately the cli server wouldn't notice until it had already requested the + * data from the thin cli, and would have to wait for it to finish sending its chunk before it could + * alert it to the failure. *) + + (let l=Client.Task.get_current_operations rpc session_id importtask in + if List.exists (fun (_,x) -> x=`cancel) l + then raise Api_errors.(Server_error(task_cancelled,[Ref.string_of importtask]))); + + (* Cancelling will close the connection, which will be interpreted by the import handler as failure *) + + let chunk = Printf.sprintf "%s/chunk-%09d.gz" vdi !counter in + marshal fd (Command (Load (filename ^ "/" ^ chunk))); + match unmarshal fd with + | Response OK -> + (* A single chunk always follows the OK *) + let length = match unmarshal fd with + | Blob (Chunk x) -> x + | _ -> failwith "Thin CLI protocol error" + in + let hdr = Tar_unix.Header.make chunk (Int64.of_int32 length) in + Tar_unix.write_block hdr + (fun ofd -> + let limit = Int64.of_int32 length in + let total_bytes = Unixext.copy_file ~limit fd ofd in + debug "File %s has size %Ld; we received %Ld%s" chunk limit total_bytes + (if limit = total_bytes then "" else " ** truncated **") + ) + sock; + (match unmarshal fd with | Blob End -> () | _ -> (failwith "Thin CLI protocol error")); + incr counter + | Response Failed -> + finished := true + | m -> + debug "Protocol failure: unexpected: %s" (string_of_message m) + done) disks; + Tar_unix.write_end sock; + true + with e -> + debug "vm_import caught %s while writing data" (Printexc.to_string e); + false + in + + let open Xmlrpc_client in + let transport = SSL(SSL.make ~use_stunnel_cache:true ~task_id:(Ref.string_of (Context.get_task_id __context)) (), address, !Xapi_globs.https_port) in + let stream_ok = with_transport transport (with_http request writer) in + if not stream_ok then + begin + (* If the progress is negative, we never got to talk to the import handler, and must complete *) + (* the task ourselves *) + if Client.Task.get_progress rpc session_id importtask < 0.0 + then Db_actions.DB_Action.Task.set_status ~__context ~self:importtask ~value:`failure; + end; + + wait_for_task_complete rpc session_id importtask; + (match Client.Task.get_status rpc session_id importtask with + | `success -> + if stream_ok then + let result = Client.Task.get_result rpc session_id importtask in + let vmrefs = API.Legacy.From.ref_VM_set "" (Xml.parse_string result) in + let uuids = List.map (fun vm -> Client.VM.get_uuid rpc session_id vm) vmrefs in + marshal fd (Command (Print (String.concat "," uuids))) + else + begin + marshal fd (Command (PrintStderr "Warning: Streaming failed, but task succeeded. Manual check required.\n")); + raise (ExitWithError 1) + end + | `failure -> + let result = Client.Task.get_error_info rpc session_id importtask in + if result = [] then + begin + marshal fd (Command (PrintStderr "Import failed, unknown error\n")); + raise (ExitWithError 1) + end + else Cli_util.server_error (List.hd result) (List.tl result) fd + | `cancelled -> + marshal fd (Command (PrintStderr "Import cancelled\n")); + raise (ExitWithError 1) + | _ -> + marshal fd (Command (PrintStderr "Internal error\n")); (* should never happen *) + raise (ExitWithError 1)) + with e -> + marshal fd (Command (Debug ("Caught exception: " ^ (Printexc.to_string e)))); + marshal fd (Command (PrintStderr "Failed to import directory-format XVA\n")); + debug "Import failed with exception: %s" (Printexc.to_string e); + (if (Db_actions.DB_Action.Task.get_progress ~__context ~self:importtask = (-1.0)) + then TaskHelper.failed ~__context:(Context.from_forwarded_task importtask) (Api_errors.Server_error(Api_errors.import_error_generic,[(Printexc.to_string e)])) + ); + raise (ExitWithError 2) + end + end) + (fun () -> + if using_existing_task then () else Client.Task.destroy rpc session_id importtask) + | Response Failed -> + (* possibly a Rio import *) + let make_command task_id = + let prefix = uri_of_someone rpc session_id Master in + let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s&restore=%b&force=%b&dry_run=%b%s%s" + prefix + (if vm_metadata_only then Constants.import_metadata_uri else Constants.import_uri) + (Ref.string_of session_id) (Ref.string_of task_id) full_restore force dry_run + (if sr <> Ref.null then "&sr_id=" ^ (Ref.string_of sr) else "") + (String.concat "" (List.map (fun (a, b) -> "&vdi:" ^ a ^ "=" ^ b) vdi_map)) + in + debug "requesting HttpPut('%s','%s')" filename uri; + HttpPut (filename, uri) in + let importtask = + if List.mem_assoc "task-uuid" params then + Some (Client.Task.get_by_uuid rpc session_id (List.assoc "task-uuid" params)) + else None (* track_http_operation will create one for us *) in + let result = track_http_operation ?use_existing_task:importtask fd rpc session_id make_command "VM import" in + let vmrefs = API.Legacy.From.ref_VM_set "" (Xml.parse_string result) in + let uuids = List.map (fun vm -> Client.VM.get_uuid rpc session_id vm) vmrefs in + let uuids = if uuids = [] && dry_run then ["xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"] else uuids in + marshal fd (Command (Print (String.concat "," uuids))) + | _ -> failwith "Thin CLI protocol error" + end let blob_get fd printer rpc session_id params = - let blob_uuid = List.assoc "uuid" params in - let blob_ref = Client.Blob.get_by_uuid rpc session_id blob_uuid in - let filename = List.assoc "filename" params in - let __context = Context.make "import" in - let blobtask = Client.Task.create rpc session_id (Printf.sprintf "Obtaining blob, ref=%s" (Ref.string_of blob_ref)) "" in - Db_actions.DB_Action.Task.set_progress ~__context ~self:blobtask ~value:(-1.0); - - let bloburi = Printf.sprintf "%s?session_id=%s&task_id=%s&ref=%s" - (Constants.blob_uri) (Ref.string_of session_id) (Ref.string_of blobtask) (Ref.string_of blob_ref) - in - finally - (fun () -> - marshal fd (Command (HttpGet (filename, bloburi))); - let response = ref (Response Wait) in - while !response = Response Wait do response := unmarshal fd done; - let ok = match !response with - | Response OK -> true - | Response Failed -> - if Client.Task.get_progress rpc session_id blobtask < 0.0 - then Db_actions.DB_Action.Task.set_status ~__context ~self:blobtask ~value:`failure; - false - | _ -> false - in - - wait_for_task_complete rpc session_id blobtask; - - (* if the client thinks it's ok, check that the server does too *) - (match Client.Task.get_status rpc session_id blobtask with - | `success -> - if ok - then (marshal fd (Command (Print "Blob get succeeded"))) - else (marshal fd (Command (PrintStderr "Blob get failed, unknown error.\n")); - raise (ExitWithError 1)) - | `failure -> - let result = Client.Task.get_error_info rpc session_id blobtask in - if result = [] - then marshal fd (Command (PrintStderr "Blob get failed, unknown error\n")) - else raise (Api_errors.Server_error ((List.hd result),(List.tl result))) - | `cancelled -> - marshal fd (Command (PrintStderr "Blob get cancelled\n")); - raise (ExitWithError 1) - | _ -> - marshal fd (Command (PrintStderr "Internal error\n")); (* should never happen *) - raise (ExitWithError 1) - )) - (fun () -> Client.Task.destroy rpc session_id blobtask) + let blob_uuid = List.assoc "uuid" params in + let blob_ref = Client.Blob.get_by_uuid rpc session_id blob_uuid in + let filename = List.assoc "filename" params in + let __context = Context.make "import" in + let blobtask = Client.Task.create rpc session_id (Printf.sprintf "Obtaining blob, ref=%s" (Ref.string_of blob_ref)) "" in + Db_actions.DB_Action.Task.set_progress ~__context ~self:blobtask ~value:(-1.0); + + let bloburi = Printf.sprintf "%s?session_id=%s&task_id=%s&ref=%s" + (Constants.blob_uri) (Ref.string_of session_id) (Ref.string_of blobtask) (Ref.string_of blob_ref) + in + finally + (fun () -> + marshal fd (Command (HttpGet (filename, bloburi))); + let response = ref (Response Wait) in + while !response = Response Wait do response := unmarshal fd done; + let ok = match !response with + | Response OK -> true + | Response Failed -> + if Client.Task.get_progress rpc session_id blobtask < 0.0 + then Db_actions.DB_Action.Task.set_status ~__context ~self:blobtask ~value:`failure; + false + | _ -> false + in + + wait_for_task_complete rpc session_id blobtask; + + (* if the client thinks it's ok, check that the server does too *) + (match Client.Task.get_status rpc session_id blobtask with + | `success -> + if ok + then (marshal fd (Command (Print "Blob get succeeded"))) + else (marshal fd (Command (PrintStderr "Blob get failed, unknown error.\n")); + raise (ExitWithError 1)) + | `failure -> + let result = Client.Task.get_error_info rpc session_id blobtask in + if result = [] + then marshal fd (Command (PrintStderr "Blob get failed, unknown error\n")) + else raise (Api_errors.Server_error ((List.hd result),(List.tl result))) + | `cancelled -> + marshal fd (Command (PrintStderr "Blob get cancelled\n")); + raise (ExitWithError 1) + | _ -> + marshal fd (Command (PrintStderr "Internal error\n")); (* should never happen *) + raise (ExitWithError 1) + )) + (fun () -> Client.Task.destroy rpc session_id blobtask) let blob_put fd printer rpc session_id params = - let blob_uuid = List.assoc "uuid" params in - let blob_ref = Client.Blob.get_by_uuid rpc session_id blob_uuid in - let filename = List.assoc "filename" params in - let __context = Context.make "import" in - let blobtask = Client.Task.create rpc session_id (Printf.sprintf "Blob PUT, ref=%s" (Ref.string_of blob_ref)) "" in - Db_actions.DB_Action.Task.set_progress ~__context ~self:blobtask ~value:(-1.0); - - let bloburi = Printf.sprintf "%s?session_id=%s&task_id=%s&ref=%s" - (Constants.blob_uri) (Ref.string_of session_id) (Ref.string_of blobtask) (Ref.string_of blob_ref) - in - finally - (fun () -> - marshal fd (Command (HttpPut (filename, bloburi))); - let response = ref (Response Wait) in - while !response = Response Wait do response := unmarshal fd done; - let ok = match !response with - | Response OK -> true - | Response Failed -> - if Client.Task.get_progress rpc session_id blobtask < 0.0 - then Db_actions.DB_Action.Task.set_status ~__context ~self:blobtask ~value:`failure; - false - | _ -> false - in - - wait_for_task_complete rpc session_id blobtask; - - (* if the client thinks it's ok, check that the server does too *) - (match Client.Task.get_status rpc session_id blobtask with - | `success -> - if ok - then (marshal fd (Command (Print "Blob put succeeded"))) - else (marshal fd (Command (PrintStderr "Blob put failed, unknown error.\n")); - raise (ExitWithError 1)) - | `failure -> - let result = Client.Task.get_error_info rpc session_id blobtask in - if result = [] - then marshal fd (Command (PrintStderr "Blob put failed, unknown error\n")) - else raise (Api_errors.Server_error ((List.hd result),(List.tl result))) - | `cancelled -> - marshal fd (Command (PrintStderr "Blob put cancelled\n")); - raise (ExitWithError 1) - | _ -> - marshal fd (Command (PrintStderr "Internal error\n")); (* should never happen *) - raise (ExitWithError 1) - )) - (fun () -> Client.Task.destroy rpc session_id blobtask) + let blob_uuid = List.assoc "uuid" params in + let blob_ref = Client.Blob.get_by_uuid rpc session_id blob_uuid in + let filename = List.assoc "filename" params in + let __context = Context.make "import" in + let blobtask = Client.Task.create rpc session_id (Printf.sprintf "Blob PUT, ref=%s" (Ref.string_of blob_ref)) "" in + Db_actions.DB_Action.Task.set_progress ~__context ~self:blobtask ~value:(-1.0); + + let bloburi = Printf.sprintf "%s?session_id=%s&task_id=%s&ref=%s" + (Constants.blob_uri) (Ref.string_of session_id) (Ref.string_of blobtask) (Ref.string_of blob_ref) + in + finally + (fun () -> + marshal fd (Command (HttpPut (filename, bloburi))); + let response = ref (Response Wait) in + while !response = Response Wait do response := unmarshal fd done; + let ok = match !response with + | Response OK -> true + | Response Failed -> + if Client.Task.get_progress rpc session_id blobtask < 0.0 + then Db_actions.DB_Action.Task.set_status ~__context ~self:blobtask ~value:`failure; + false + | _ -> false + in + + wait_for_task_complete rpc session_id blobtask; + + (* if the client thinks it's ok, check that the server does too *) + (match Client.Task.get_status rpc session_id blobtask with + | `success -> + if ok + then (marshal fd (Command (Print "Blob put succeeded"))) + else (marshal fd (Command (PrintStderr "Blob put failed, unknown error.\n")); + raise (ExitWithError 1)) + | `failure -> + let result = Client.Task.get_error_info rpc session_id blobtask in + if result = [] + then marshal fd (Command (PrintStderr "Blob put failed, unknown error\n")) + else raise (Api_errors.Server_error ((List.hd result),(List.tl result))) + | `cancelled -> + marshal fd (Command (PrintStderr "Blob put cancelled\n")); + raise (ExitWithError 1) + | _ -> + marshal fd (Command (PrintStderr "Internal error\n")); (* should never happen *) + raise (ExitWithError 1) + )) + (fun () -> Client.Task.destroy rpc session_id blobtask) let blob_create printer rpc session_id params = - let name = List.assoc "name" params in - let mime_type = List.assoc_default "mime-type" params "" in - let public = try bool_of_string "public" (List.assoc "public" params) with _ -> false in - if (List.mem_assoc "vm-uuid" params) then - begin - let uuid = List.assoc "vm-uuid" params in - let vm = Client.VM.get_by_uuid rpc session_id uuid in - let blob = Client.VM.create_new_blob rpc session_id vm name mime_type public in - let blob_uuid = Client.Blob.get_uuid rpc session_id blob in - printer (Cli_printer.PList [blob_uuid]) - end - else if (List.mem_assoc "pool-uuid" params) then - begin - let uuid = List.assoc "pool-uuid" params in - let pool = Client.Pool.get_by_uuid rpc session_id uuid in - let blob = Client.Pool.create_new_blob rpc session_id pool name mime_type public in - let blob_uuid = Client.Blob.get_uuid rpc session_id blob in - printer (Cli_printer.PList [blob_uuid]) - end - else if (List.mem_assoc "sr-uuid" params) then - begin - let uuid = List.assoc "sr-uuid" params in - let sr = Client.SR.get_by_uuid rpc session_id uuid in - let blob = Client.SR.create_new_blob rpc session_id sr name mime_type public in - let blob_uuid = Client.Blob.get_uuid rpc session_id blob in - printer (Cli_printer.PList [blob_uuid]) - end - else if (List.mem_assoc "host-uuid" params) then - begin - let uuid = List.assoc "host-uuid" params in - let host = Client.Host.get_by_uuid rpc session_id uuid in - let blob = Client.Host.create_new_blob rpc session_id host name mime_type public in - let blob_uuid = Client.Blob.get_uuid rpc session_id blob in - printer (Cli_printer.PList [blob_uuid]) - end - else if (List.mem_assoc "network-uuid" params) then - begin - let uuid = List.assoc "network-uuid" params in - let network = Client.Network.get_by_uuid rpc session_id uuid in - let blob = Client.Network.create_new_blob rpc session_id network name mime_type public in - let blob_uuid = Client.Blob.get_uuid rpc session_id blob in - printer (Cli_printer.PList [blob_uuid]) - end - else - raise (Cli_util.Cli_failure "Need one of: vm-uuid, host-uuid, network-uuid, sr-uuid or pool-uuid") + let name = List.assoc "name" params in + let mime_type = List.assoc_default "mime-type" params "" in + let public = try bool_of_string "public" (List.assoc "public" params) with _ -> false in + if (List.mem_assoc "vm-uuid" params) then + begin + let uuid = List.assoc "vm-uuid" params in + let vm = Client.VM.get_by_uuid rpc session_id uuid in + let blob = Client.VM.create_new_blob rpc session_id vm name mime_type public in + let blob_uuid = Client.Blob.get_uuid rpc session_id blob in + printer (Cli_printer.PList [blob_uuid]) + end + else if (List.mem_assoc "pool-uuid" params) then + begin + let uuid = List.assoc "pool-uuid" params in + let pool = Client.Pool.get_by_uuid rpc session_id uuid in + let blob = Client.Pool.create_new_blob rpc session_id pool name mime_type public in + let blob_uuid = Client.Blob.get_uuid rpc session_id blob in + printer (Cli_printer.PList [blob_uuid]) + end + else if (List.mem_assoc "sr-uuid" params) then + begin + let uuid = List.assoc "sr-uuid" params in + let sr = Client.SR.get_by_uuid rpc session_id uuid in + let blob = Client.SR.create_new_blob rpc session_id sr name mime_type public in + let blob_uuid = Client.Blob.get_uuid rpc session_id blob in + printer (Cli_printer.PList [blob_uuid]) + end + else if (List.mem_assoc "host-uuid" params) then + begin + let uuid = List.assoc "host-uuid" params in + let host = Client.Host.get_by_uuid rpc session_id uuid in + let blob = Client.Host.create_new_blob rpc session_id host name mime_type public in + let blob_uuid = Client.Blob.get_uuid rpc session_id blob in + printer (Cli_printer.PList [blob_uuid]) + end + else if (List.mem_assoc "network-uuid" params) then + begin + let uuid = List.assoc "network-uuid" params in + let network = Client.Network.get_by_uuid rpc session_id uuid in + let blob = Client.Network.create_new_blob rpc session_id network name mime_type public in + let blob_uuid = Client.Blob.get_uuid rpc session_id blob in + printer (Cli_printer.PList [blob_uuid]) + end + else + raise (Cli_util.Cli_failure "Need one of: vm-uuid, host-uuid, network-uuid, sr-uuid or pool-uuid") let export_common fd printer rpc session_id params filename num ?task_uuid use_compression preserve_power_state vm = - let vm_metadata_only : bool = get_bool_param params "metadata" in - let export_snapshots : bool = - if List.mem_assoc "include-snapshots" params - then bool_of_string "include-snapshots" (List.assoc "include-snapshots" params) - else vm_metadata_only in - let vm_metadata_only = get_bool_param params "metadata" in - let vm_record = vm.record () in - let exporttask, task_destroy_fn = - match task_uuid with - | None -> (* manage task internally *) - let exporttask = Client.Task.create rpc session_id (Printf.sprintf "Export of VM: %s" (vm_record.API.vM_uuid)) "" in - (exporttask,(fun ()->Client.Task.destroy rpc session_id exporttask)) - | Some task_uuid -> (* do not destroy the task that has been received *) - ((Client.Task.get_by_uuid rpc session_id task_uuid),(fun ()->())) - in - - (* Initially mark the task progress as -1.0. The first thing the export handler does it to mark it as zero *) - (* This is used as a flag to show that the 'ownership' of the task has been passed to the handler, and it's *) - (* not our responsibility any more to mark the task as completed/failed/etc. *) - let __context = Context.make "export" in - Db_actions.DB_Action.Task.set_progress ~__context ~self:exporttask ~value:(-1.0); - - finally - (fun () -> - let f = if !num > 1 then filename ^ (string_of_int !num) else filename in - download_file ~__context rpc session_id exporttask fd f - (Printf.sprintf - "%s?session_id=%s&task_id=%s&ref=%s&%s=%s&preserve_power_state=%b&export_snapshots=%b" - (if vm_metadata_only then Constants.export_metadata_uri else Constants.export_uri) - (Ref.string_of session_id) - (Ref.string_of exporttask) - (Ref.string_of (vm.getref ())) - Constants.use_compression - (if use_compression then "true" else "false") - preserve_power_state - export_snapshots) - "Export"; - num := !num + 1) - (fun () -> task_destroy_fn ()) + let vm_metadata_only : bool = get_bool_param params "metadata" in + let export_snapshots : bool = + if List.mem_assoc "include-snapshots" params + then bool_of_string "include-snapshots" (List.assoc "include-snapshots" params) + else vm_metadata_only in + let vm_metadata_only = get_bool_param params "metadata" in + let vm_record = vm.record () in + let exporttask, task_destroy_fn = + match task_uuid with + | None -> (* manage task internally *) + let exporttask = Client.Task.create rpc session_id (Printf.sprintf "Export of VM: %s" (vm_record.API.vM_uuid)) "" in + (exporttask,(fun ()->Client.Task.destroy rpc session_id exporttask)) + | Some task_uuid -> (* do not destroy the task that has been received *) + ((Client.Task.get_by_uuid rpc session_id task_uuid),(fun ()->())) + in + + (* Initially mark the task progress as -1.0. The first thing the export handler does it to mark it as zero *) + (* This is used as a flag to show that the 'ownership' of the task has been passed to the handler, and it's *) + (* not our responsibility any more to mark the task as completed/failed/etc. *) + let __context = Context.make "export" in + Db_actions.DB_Action.Task.set_progress ~__context ~self:exporttask ~value:(-1.0); + + finally + (fun () -> + let f = if !num > 1 then filename ^ (string_of_int !num) else filename in + download_file ~__context rpc session_id exporttask fd f + (Printf.sprintf + "%s?session_id=%s&task_id=%s&ref=%s&%s=%s&preserve_power_state=%b&export_snapshots=%b" + (if vm_metadata_only then Constants.export_metadata_uri else Constants.export_uri) + (Ref.string_of session_id) + (Ref.string_of exporttask) + (Ref.string_of (vm.getref ())) + Constants.use_compression + (if use_compression then "true" else "false") + preserve_power_state + export_snapshots) + "Export"; + num := !num + 1) + (fun () -> task_destroy_fn ()) let vm_export fd printer rpc session_id params = - let filename = List.assoc "filename" params in - let use_compression = get_bool_param params "compress" in - let preserve_power_state = get_bool_param params "preserve-power-state" in - let task_uuid = if (List.mem_assoc "task-uuid" params) then Some (List.assoc "task-uuid" params) else None in - let num = ref 1 in - let op vm = - export_common fd printer rpc session_id params filename num ?task_uuid use_compression preserve_power_state vm - in - ignore(do_vm_op printer rpc session_id op params ["filename"; "metadata"; "compress"; "preserve-power-state"; "include-snapshots"]) + let filename = List.assoc "filename" params in + let use_compression = get_bool_param params "compress" in + let preserve_power_state = get_bool_param params "preserve-power-state" in + let task_uuid = if (List.mem_assoc "task-uuid" params) then Some (List.assoc "task-uuid" params) else None in + let num = ref 1 in + let op vm = + export_common fd printer rpc session_id params filename num ?task_uuid use_compression preserve_power_state vm + in + ignore(do_vm_op printer rpc session_id op params ["filename"; "metadata"; "compress"; "preserve-power-state"; "include-snapshots"]) let vm_export_aux obj_type fd printer rpc session_id params = - let filename = List.assoc "filename" params in - let use_compression = get_bool_param params "compress" in - let preserve_power_state = get_bool_param params "preserve-power-state" in - let num = ref 1 in - let uuid = List.assoc (obj_type ^ "-uuid") params in - let ref = Client.VM.get_by_uuid rpc session_id uuid in - export_common fd printer rpc session_id params filename num use_compression preserve_power_state (vm_record rpc session_id ref) + let filename = List.assoc "filename" params in + let use_compression = get_bool_param params "compress" in + let preserve_power_state = get_bool_param params "preserve-power-state" in + let num = ref 1 in + let uuid = List.assoc (obj_type ^ "-uuid") params in + let ref = Client.VM.get_by_uuid rpc session_id uuid in + export_common fd printer rpc session_id params filename num use_compression preserve_power_state (vm_record rpc session_id ref) let vm_copy_bios_strings printer rpc session_id params = - let host = Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) in - let op vm = - Client.VM.copy_bios_strings rpc session_id (vm.getref ()) host in - ignore(do_vm_op printer rpc session_id op params ["host-uuid"]) + let host = Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) in + let op vm = + Client.VM.copy_bios_strings rpc session_id (vm.getref ()) host in + ignore(do_vm_op printer rpc session_id op params ["host-uuid"]) let vm_is_bios_customized printer rpc session_id params = - let op vm = - let bios_strings = Client.VM.get_bios_strings rpc session_id (vm.getref ()) in - if List.length bios_strings = 0 then - printer (Cli_printer.PMsg "The BIOS strings of this VM have not yet been set.") - else if bios_strings = Xapi_globs.generic_bios_strings then - printer (Cli_printer.PMsg "This VM is BIOS-generic.") - else - printer (Cli_printer.PMsg "This VM is BIOS-customized.") - in - ignore(do_vm_op printer rpc session_id op params []) + let op vm = + let bios_strings = Client.VM.get_bios_strings rpc session_id (vm.getref ()) in + if List.length bios_strings = 0 then + printer (Cli_printer.PMsg "The BIOS strings of this VM have not yet been set.") + else if bios_strings = Xapi_globs.generic_bios_strings then + printer (Cli_printer.PMsg "This VM is BIOS-generic.") + else + printer (Cli_printer.PMsg "This VM is BIOS-customized.") + in + ignore(do_vm_op printer rpc session_id op params []) let template_export fd printer = vm_export_aux "template" fd printer let snapshot_export fd printer = vm_export_aux "snapshot" fd printer let vm_vcpu_hotplug printer rpc session_id params = - let vcpus=List.assoc "new-vcpus" params in - let nvcpu = - try - Int64.of_string vcpus - with - _ -> failwith "Failed to parse parameter 'new-vcpus': expecting an integer" - in - let op vm = - Client.VM.set_VCPUs_number_live ~rpc ~session_id ~self:(vm.getref ()) ~nvcpu - in - ignore(do_vm_op printer rpc session_id op params ["new-vcpus"]) + let vcpus=List.assoc "new-vcpus" params in + let nvcpu = + try + Int64.of_string vcpus + with + _ -> failwith "Failed to parse parameter 'new-vcpus': expecting an integer" + in + let op vm = + Client.VM.set_VCPUs_number_live ~rpc ~session_id ~self:(vm.getref ()) ~nvcpu + in + ignore(do_vm_op printer rpc session_id op params ["new-vcpus"]) let vm_vif_list printer rpc session_id params = - let op vm = - let vm_record = vm.record () in - let vifs = vm_record.API.vM_VIFs in - let table vif = - let record = vif_record rpc session_id vif in - let selected = List.hd (select_fields params [record] [ "uuid"; "device"; "MAC"; "network-uuid"; "network-name-label"; "vm-name-label"]) in - List.map print_field selected in - printer (Cli_printer.PTable (List.map table vifs)) - in - ignore(do_vm_op printer rpc session_id op (("multiple","true")::params) ["params"]) (* always list multiple vms *) + let op vm = + let vm_record = vm.record () in + let vifs = vm_record.API.vM_VIFs in + let table vif = + let record = vif_record rpc session_id vif in + let selected = List.hd (select_fields params [record] [ "uuid"; "device"; "MAC"; "network-uuid"; "network-name-label"; "vm-name-label"]) in + List.map print_field selected in + printer (Cli_printer.PTable (List.map table vifs)) + in + ignore(do_vm_op printer rpc session_id op (("multiple","true")::params) ["params"]) (* always list multiple vms *) let with_database_vdi rpc session_id params f = - let database_params = read_map_params "database" params in - let database_uuid = - if List.mem_assoc "vdi-uuid" database_params then - List.assoc "vdi-uuid" database_params - else - failwith "A parameter of the form 'database:vdi-uuid=' must be specified to run this command." - in - let database_vdi = Client.VDI.get_by_uuid ~rpc ~session_id ~uuid:database_uuid in - let database_session = Client.VDI.open_database ~rpc ~session_id ~self:database_vdi in - finally - (fun () -> f database_session) - (fun () -> Client.Session.logout ~rpc ~session_id:database_session) + let database_params = read_map_params "database" params in + let database_uuid = + if List.mem_assoc "vdi-uuid" database_params then + List.assoc "vdi-uuid" database_params + else + failwith "A parameter of the form 'database:vdi-uuid=' must be specified to run this command." + in + let database_vdi = Client.VDI.get_by_uuid ~rpc ~session_id ~uuid:database_uuid in + let database_session = Client.VDI.open_database ~rpc ~session_id ~self:database_vdi in + finally + (fun () -> f database_session) + (fun () -> Client.Session.logout ~rpc ~session_id:database_session) let vm_recover printer rpc session_id params = - let force = get_bool_param params "force" in - let uuid = List.assoc "uuid" params in - with_database_vdi rpc session_id params - (fun database_session -> - let vm = Client.VM.get_by_uuid ~rpc ~session_id:database_session ~uuid in - Client.VM.recover ~rpc ~session_id:database_session ~self:vm ~session_to:session_id ~force) + let force = get_bool_param params "force" in + let uuid = List.assoc "uuid" params in + with_database_vdi rpc session_id params + (fun database_session -> + let vm = Client.VM.get_by_uuid ~rpc ~session_id:database_session ~uuid in + Client.VM.recover ~rpc ~session_id:database_session ~self:vm ~session_to:session_id ~force) let vm_assert_can_be_recovered printer rpc session_id params = - let uuid = List.assoc "uuid" params in - with_database_vdi rpc session_id params - (fun database_session -> - let vm = Client.VM.get_by_uuid ~rpc ~session_id:database_session ~uuid in - Client.VM.assert_can_be_recovered ~rpc ~session_id:database_session ~self:vm ~session_to:session_id) + let uuid = List.assoc "uuid" params in + with_database_vdi rpc session_id params + (fun database_session -> + let vm = Client.VM.get_by_uuid ~rpc ~session_id:database_session ~uuid in + Client.VM.assert_can_be_recovered ~rpc ~session_id:database_session ~self:vm ~session_to:session_id) let cd_list printer rpc session_id params = - let srs = Client.SR.get_all_records_where rpc session_id "true" in - let cd_srs = List.filter (fun (sr,sr_record) -> sr_record.API.sR_content_type = "iso") srs in - let cd_vdis = List.flatten (List.map (fun (sr,sr_record) -> Client.SR.get_VDIs rpc session_id sr) cd_srs) in - let table cd = - let record = vdi_record rpc session_id cd in - let selected = List.hd (select_fields params [record] ["name-label"; "uuid"]) in - List.map print_field selected in - printer (Cli_printer.PTable (List.map table cd_vdis)) + let srs = Client.SR.get_all_records_where rpc session_id "true" in + let cd_srs = List.filter (fun (sr,sr_record) -> sr_record.API.sR_content_type = "iso") srs in + let cd_vdis = List.flatten (List.map (fun (sr,sr_record) -> Client.SR.get_VDIs rpc session_id sr) cd_srs) in + let table cd = + let record = vdi_record rpc session_id cd in + let selected = List.hd (select_fields params [record] ["name-label"; "uuid"]) in + List.map print_field selected in + printer (Cli_printer.PTable (List.map table cd_vdis)) let validate_and_get_vlan params = - try Int64.of_string (List.assoc "vlan" params) - with _ -> failwith "Failed to parse parameter 'vlan': expecting an integer" + try Int64.of_string (List.assoc "vlan" params) + with _ -> failwith "Failed to parse parameter 'vlan': expecting an integer" let vlan_create printer rpc session_id params = - let network = Client.Network.get_by_uuid rpc session_id (List.assoc "network-uuid" params) in - let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "pif-uuid" params) in - let vLAN = validate_and_get_vlan params in - let vlan = Client.VLAN.create rpc session_id pif vLAN network in - let pif' = Client.VLAN.get_untagged_PIF rpc session_id vlan in - let uuid = Client.PIF.get_uuid rpc session_id pif' in - (* XXX: technically Rio displayed the PIF UUID here *) - printer (Cli_printer.PList [uuid]) + let network = Client.Network.get_by_uuid rpc session_id (List.assoc "network-uuid" params) in + let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "pif-uuid" params) in + let vLAN = validate_and_get_vlan params in + let vlan = Client.VLAN.create rpc session_id pif vLAN network in + let pif' = Client.VLAN.get_untagged_PIF rpc session_id vlan in + let uuid = Client.PIF.get_uuid rpc session_id pif' in + (* XXX: technically Rio displayed the PIF UUID here *) + printer (Cli_printer.PList [uuid]) let pool_vlan_create printer rpc session_id params = - let network = Client.Network.get_by_uuid rpc session_id (List.assoc "network-uuid" params) in - let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "pif-uuid" params) in - let vLAN = validate_and_get_vlan params in - let vlan_pifs = Client.Pool.create_VLAN_from_PIF rpc session_id pif network vLAN in - let vlan_pif_uuids = List.map (fun pif -> Client.PIF.get_uuid rpc session_id pif) vlan_pifs in - (* XXX: technically Rio displayed the PIF UUID here *) - printer (Cli_printer.PList vlan_pif_uuids) + let network = Client.Network.get_by_uuid rpc session_id (List.assoc "network-uuid" params) in + let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "pif-uuid" params) in + let vLAN = validate_and_get_vlan params in + let vlan_pifs = Client.Pool.create_VLAN_from_PIF rpc session_id pif network vLAN in + let vlan_pif_uuids = List.map (fun pif -> Client.PIF.get_uuid rpc session_id pif) vlan_pifs in + (* XXX: technically Rio displayed the PIF UUID here *) + printer (Cli_printer.PList vlan_pif_uuids) let vlan_destroy printer rpc session_id params = - (* Rio allowed a PIF UUID to be provided; support this mechanism *) - let uuid = List.assoc "uuid" params in - try - let vlan = Client.VLAN.get_by_uuid rpc session_id uuid in - Client.VLAN.destroy rpc session_id vlan - with - | Api_errors.Server_error(s,_) as e when s=Api_errors.handle_invalid || s=Api_errors.host_offline -> - raise e - | e -> - let pif = try Some (Client.PIF.get_by_uuid rpc session_id uuid) with _ -> None in - match pif with | Some pif -> Client.PIF.destroy rpc session_id pif | None -> raise e + (* Rio allowed a PIF UUID to be provided; support this mechanism *) + let uuid = List.assoc "uuid" params in + try + let vlan = Client.VLAN.get_by_uuid rpc session_id uuid in + Client.VLAN.destroy rpc session_id vlan + with + | Api_errors.Server_error(s,_) as e when s=Api_errors.handle_invalid || s=Api_errors.host_offline -> + raise e + | e -> + let pif = try Some (Client.PIF.get_by_uuid rpc session_id uuid) with _ -> None in + match pif with | Some pif -> Client.PIF.destroy rpc session_id pif | None -> raise e let tunnel_create printer rpc session_id params = - let network = Client.Network.get_by_uuid rpc session_id (List.assoc "network-uuid" params) in - let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "pif-uuid" params) in - let tunnel = Client.Tunnel.create rpc session_id pif network in - let pif' = Client.Tunnel.get_access_PIF rpc session_id tunnel in - let uuid = Client.PIF.get_uuid rpc session_id pif' in - printer (Cli_printer.PList [uuid]) + let network = Client.Network.get_by_uuid rpc session_id (List.assoc "network-uuid" params) in + let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "pif-uuid" params) in + let tunnel = Client.Tunnel.create rpc session_id pif network in + let pif' = Client.Tunnel.get_access_PIF rpc session_id tunnel in + let uuid = Client.PIF.get_uuid rpc session_id pif' in + printer (Cli_printer.PList [uuid]) let tunnel_destroy printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let tunnel = Client.Tunnel.get_by_uuid rpc session_id uuid in - Client.Tunnel.destroy rpc session_id tunnel + let uuid = List.assoc "uuid" params in + let tunnel = Client.Tunnel.get_by_uuid rpc session_id uuid in + Client.Tunnel.destroy rpc session_id tunnel let pif_reconfigure_ip printer rpc session_id params = - let read_optional_case_insensitive key = - let lower_case_params = List.map (fun (k,v)->(String.lowercase k,v)) params in - let lower_case_key = String.lowercase key in - List.assoc_default lower_case_key lower_case_params "" in - - let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let mode = Record_util.ip_configuration_mode_of_string (List.assoc "mode" params) in - let ip = read_optional_case_insensitive "IP" in - let netmask = List.assoc_default "netmask" params "" in - let gateway = List.assoc_default "gateway" params "" in - let dns = read_optional_case_insensitive "DNS" in - let () = Client.PIF.reconfigure_ip rpc session_id pif mode ip netmask gateway dns in () + let read_optional_case_insensitive key = + let lower_case_params = List.map (fun (k,v)->(String.lowercase k,v)) params in + let lower_case_key = String.lowercase key in + List.assoc_default lower_case_key lower_case_params "" in + + let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let mode = Record_util.ip_configuration_mode_of_string (List.assoc "mode" params) in + let ip = read_optional_case_insensitive "IP" in + let netmask = List.assoc_default "netmask" params "" in + let gateway = List.assoc_default "gateway" params "" in + let dns = read_optional_case_insensitive "DNS" in + let () = Client.PIF.reconfigure_ip rpc session_id pif mode ip netmask gateway dns in () let pif_reconfigure_ipv6 printer rpc session_id params = - let read_optional_case_insensitive key = - let lower_case_params = List.map (fun (k,v)->(String.lowercase k,v)) params in - let lower_case_key = String.lowercase key in - List.assoc_default lower_case_key lower_case_params "" in - - let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let mode = Record_util.ipv6_configuration_mode_of_string (List.assoc "mode" params) in - let ipv6 = read_optional_case_insensitive "IPv6" in - let gateway = List.assoc_default "gateway" params "" in - let dns = read_optional_case_insensitive "DNS" in - let () = Client.PIF.reconfigure_ipv6 rpc session_id pif mode ipv6 gateway dns in () + let read_optional_case_insensitive key = + let lower_case_params = List.map (fun (k,v)->(String.lowercase k,v)) params in + let lower_case_key = String.lowercase key in + List.assoc_default lower_case_key lower_case_params "" in + + let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let mode = Record_util.ipv6_configuration_mode_of_string (List.assoc "mode" params) in + let ipv6 = read_optional_case_insensitive "IPv6" in + let gateway = List.assoc_default "gateway" params "" in + let dns = read_optional_case_insensitive "DNS" in + let () = Client.PIF.reconfigure_ipv6 rpc session_id pif mode ipv6 gateway dns in () let pif_set_primary_address_type printer rpc session_id params = - let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let address_type = Record_util.primary_address_type_of_string (List.assoc "primary_address_type" params) in - let () = Client.PIF.set_primary_address_type rpc session_id pif address_type in () + let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let address_type = Record_util.primary_address_type_of_string (List.assoc "primary_address_type" params) in + let () = Client.PIF.set_primary_address_type rpc session_id pif address_type in () let pif_unplug printer rpc session_id params = - let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let () = Client.PIF.unplug rpc session_id pif in () + let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let () = Client.PIF.unplug rpc session_id pif in () let pif_plug printer rpc session_id params = - let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let () = Client.PIF.plug rpc session_id pif in () + let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let () = Client.PIF.plug rpc session_id pif in () let pif_scan printer rpc session_id params = - let host_uuid = List.assoc "host-uuid" params in - let host = Client.Host.get_by_uuid rpc session_id host_uuid in - let () = Client.PIF.scan rpc session_id host in - () + let host_uuid = List.assoc "host-uuid" params in + let host = Client.Host.get_by_uuid rpc session_id host_uuid in + let () = Client.PIF.scan rpc session_id host in + () let pif_introduce printer rpc session_id params = - let host_uuid = List.assoc "host-uuid" params in - let host = Client.Host.get_by_uuid rpc session_id host_uuid in - let mac = List.assoc_default "mac" params "" in - let device = List.assoc "device" params in - let managed = get_bool_param params ~default:true "managed" in - let pif = Client.PIF.introduce rpc session_id host mac device managed in - let uuid = Client.PIF.get_uuid rpc session_id pif in - printer (Cli_printer.PList [uuid]) + let host_uuid = List.assoc "host-uuid" params in + let host = Client.Host.get_by_uuid rpc session_id host_uuid in + let mac = List.assoc_default "mac" params "" in + let device = List.assoc "device" params in + let managed = get_bool_param params ~default:true "managed" in + let pif = Client.PIF.introduce rpc session_id host mac device managed in + let uuid = Client.PIF.get_uuid rpc session_id pif in + printer (Cli_printer.PList [uuid]) let pif_forget printer rpc session_id params = - let pif_uuid = List.assoc "uuid" params in - let pif = Client.PIF.get_by_uuid rpc session_id pif_uuid in - let () = Client.PIF.forget rpc session_id pif in - () + let pif_uuid = List.assoc "uuid" params in + let pif = Client.PIF.get_by_uuid rpc session_id pif_uuid in + let () = Client.PIF.forget rpc session_id pif in + () let pif_db_forget printer rpc session_id params = - let pif_uuid = List.assoc "uuid" params in - let pif = Client.PIF.get_by_uuid rpc session_id pif_uuid in - let () = Client.PIF.db_forget rpc session_id pif in - () + let pif_uuid = List.assoc "uuid" params in + let pif = Client.PIF.get_by_uuid rpc session_id pif_uuid in + let () = Client.PIF.db_forget rpc session_id pif in + () let bond_create printer rpc session_id params = - let network = List.assoc "network-uuid" params in - let mac = List.assoc_default "mac" params "" in - let network = Client.Network.get_by_uuid rpc session_id network in - let pifs = List.assoc "pif-uuids" params in - let uuids = String.split ',' pifs in - let pifs = List.map (fun uuid -> Client.PIF.get_by_uuid rpc session_id uuid) uuids in - let mode = Record_util.bond_mode_of_string (List.assoc_default "mode" params "") in - let properties = read_map_params "properties" params in - let bond = Client.Bond.create rpc session_id network pifs mac mode properties in - let uuid = Client.Bond.get_uuid rpc session_id bond in - printer (Cli_printer.PList [ uuid]) + let network = List.assoc "network-uuid" params in + let mac = List.assoc_default "mac" params "" in + let network = Client.Network.get_by_uuid rpc session_id network in + let pifs = List.assoc "pif-uuids" params in + let uuids = String.split ',' pifs in + let pifs = List.map (fun uuid -> Client.PIF.get_by_uuid rpc session_id uuid) uuids in + let mode = Record_util.bond_mode_of_string (List.assoc_default "mode" params "") in + let properties = read_map_params "properties" params in + let bond = Client.Bond.create rpc session_id network pifs mac mode properties in + let uuid = Client.Bond.get_uuid rpc session_id bond in + printer (Cli_printer.PList [ uuid]) let bond_destroy printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let bond = Client.Bond.get_by_uuid rpc session_id uuid in - Client.Bond.destroy rpc session_id bond + let uuid = List.assoc "uuid" params in + let bond = Client.Bond.get_by_uuid rpc session_id uuid in + Client.Bond.destroy rpc session_id bond let bond_set_mode printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let bond = Client.Bond.get_by_uuid rpc session_id uuid in - let mode = Record_util.bond_mode_of_string (List.assoc_default "mode" params "") in - Client.Bond.set_mode rpc session_id bond mode + let uuid = List.assoc "uuid" params in + let bond = Client.Bond.get_by_uuid rpc session_id uuid in + let mode = Record_util.bond_mode_of_string (List.assoc_default "mode" params "") in + Client.Bond.set_mode rpc session_id bond mode let host_disable printer rpc session_id params = - ignore(do_host_op rpc session_id (fun _ host -> Client.Host.disable rpc session_id (host.getref ())) params []) + ignore(do_host_op rpc session_id (fun _ host -> Client.Host.disable rpc session_id (host.getref ())) params []) let host_sync_data printer rpc session_id params = - ignore(do_host_op rpc session_id (fun _ host -> Client.Host.sync_data rpc session_id (host.getref ())) params []) + ignore(do_host_op rpc session_id (fun _ host -> Client.Host.sync_data rpc session_id (host.getref ())) params []) (* BAD BAD MAN @@ -3993,405 +3993,405 @@ let host_sync_data printer rpc session_id params = This should be cleaned up at some point. *) let host_enable printer rpc session_id params = - ignore(do_host_op rpc session_id (fun _ host -> - Client.Host.remove_from_other_config rpc session_id (host.getref ()) "MAINTENANCE_MODE"; - Client.Host.enable rpc session_id (host.getref ())) params []) + ignore(do_host_op rpc session_id (fun _ host -> + Client.Host.remove_from_other_config rpc session_id (host.getref ()) "MAINTENANCE_MODE"; + Client.Host.enable rpc session_id (host.getref ())) params []) let host_shutdown printer rpc session_id params = - ignore(do_host_op rpc session_id (fun _ host -> Client.Host.shutdown rpc session_id (host.getref ())) params []) + ignore(do_host_op rpc session_id (fun _ host -> Client.Host.shutdown rpc session_id (host.getref ())) params []) let host_reboot printer rpc session_id params = - ignore(do_host_op rpc session_id (fun _ host -> Client.Host.reboot rpc session_id (host.getref ())) params []) + ignore(do_host_op rpc session_id (fun _ host -> Client.Host.reboot rpc session_id (host.getref ())) params []) let host_power_on printer rpc session_id params = - ignore(do_host_op rpc session_id (fun _ host -> Client.Host.power_on rpc session_id (host.getref ())) params []) + ignore(do_host_op rpc session_id (fun _ host -> Client.Host.power_on rpc session_id (host.getref ())) params []) let host_dmesg printer rpc session_id params = - let op _ host = - let dmesg = Client.Host.dmesg rpc session_id (host.getref ()) in - printer (Cli_printer.PList [ dmesg ]) - in - ignore(do_host_op rpc session_id op params []) + let op _ host = + let dmesg = Client.Host.dmesg rpc session_id (host.getref ()) in + printer (Cli_printer.PList [ dmesg ]) + in + ignore(do_host_op rpc session_id op params []) let host_enable_local_storage_caching printer rpc session_id params = - ignore(do_host_op rpc session_id (fun _ host -> - let sr_uuid = List.assoc "sr-uuid" params in - let sr = Client.SR.get_by_uuid rpc session_id sr_uuid in - Client.Host.enable_local_storage_caching rpc session_id (host.getref ()) sr - ) params ["sr-uuid"]) + ignore(do_host_op rpc session_id (fun _ host -> + let sr_uuid = List.assoc "sr-uuid" params in + let sr = Client.SR.get_by_uuid rpc session_id sr_uuid in + Client.Host.enable_local_storage_caching rpc session_id (host.getref ()) sr + ) params ["sr-uuid"]) let host_disable_local_storage_caching printer rpc session_id params = - ignore(do_host_op rpc session_id (fun _ host -> Client.Host.disable_local_storage_caching rpc session_id (host.getref ())) params []) + ignore(do_host_op rpc session_id (fun _ host -> Client.Host.disable_local_storage_caching rpc session_id (host.getref ())) params []) let pool_enable_local_storage_caching printer rpc session_id params = - let pool = List.hd (Client.Pool.get_all rpc session_id) in - Client.Pool.enable_local_storage_caching rpc session_id pool + let pool = List.hd (Client.Pool.get_all rpc session_id) in + Client.Pool.enable_local_storage_caching rpc session_id pool let pool_disable_local_storage_caching printer rpc session_id params = - let pool = List.hd (Client.Pool.get_all rpc session_id) in - Client.Pool.disable_local_storage_caching rpc session_id pool + let pool = List.hd (Client.Pool.get_all rpc session_id) in + Client.Pool.disable_local_storage_caching rpc session_id pool let pool_apply_edition printer rpc session_id params = - let pool = get_pool_with_default rpc session_id params "uuid" in - let edition = List.assoc "edition" params in - let hosts = Client.Host.get_all rpc session_id in - with_license_server_changes printer rpc session_id params hosts - (fun rpc session_id -> Client.Pool.apply_edition rpc session_id pool edition) + let pool = get_pool_with_default rpc session_id params "uuid" in + let edition = List.assoc "edition" params in + let hosts = Client.Host.get_all rpc session_id in + with_license_server_changes printer rpc session_id params hosts + (fun rpc session_id -> Client.Pool.apply_edition rpc session_id pool edition) let host_set_power_on_mode printer rpc session_id params = - let power_on_mode = List.assoc "power-on-mode" params in - let power_on_config = read_map_params "power-on-config" params in - ignore( - do_host_op rpc session_id (fun _ host -> Client.Host.set_power_on_mode ~rpc ~session_id ~self:(host.getref ()) ~power_on_mode ~power_on_config ) - params ["power-on-mode";"power-on-config"] - ) + let power_on_mode = List.assoc "power-on-mode" params in + let power_on_config = read_map_params "power-on-config" params in + ignore( + do_host_op rpc session_id (fun _ host -> Client.Host.set_power_on_mode ~rpc ~session_id ~self:(host.getref ()) ~power_on_mode ~power_on_config ) + params ["power-on-mode";"power-on-config"] + ) let host_crash_upload printer rpc session_id params = - let crash = Client.Host_crashdump.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let url = List.assoc_default "url" params "" in - (* pass everything else in as an option *) - let options = List.filter (fun (k, _) -> k <> "uuid" && k <> "url") params in - Client.Host_crashdump.upload rpc session_id crash url options + let crash = Client.Host_crashdump.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let url = List.assoc_default "url" params "" in + (* pass everything else in as an option *) + let options = List.filter (fun (k, _) -> k <> "uuid" && k <> "url") params in + Client.Host_crashdump.upload rpc session_id crash url options let host_crash_destroy printer rpc session_id params = - let crash = Client.Host_crashdump.get_by_uuid rpc session_id (List.assoc "uuid" params) in - Client.Host_crashdump.destroy rpc session_id crash + let crash = Client.Host_crashdump.get_by_uuid rpc session_id (List.assoc "uuid" params) in + Client.Host_crashdump.destroy rpc session_id crash let host_bugreport_upload printer rpc session_id params = - let op _ host = - let url = List.assoc_default "url" params "" in - (* pass everything else in as an option *) - let options = List.filter (fun (k, _) -> k <> "host" && k <> "url") params in - Client.Host.bugreport_upload rpc session_id (host.getref ()) url options - in - ignore(do_host_op rpc session_id op params ["url"; "http_proxy"]) + let op _ host = + let url = List.assoc_default "url" params "" in + (* pass everything else in as an option *) + let options = List.filter (fun (k, _) -> k <> "host" && k <> "url") params in + Client.Host.bugreport_upload rpc session_id (host.getref ()) url options + in + ignore(do_host_op rpc session_id op params ["url"; "http_proxy"]) let host_backup fd printer rpc session_id params = - let op _ host = - let filename = List.assoc "file-name" params in - let prefix = - let uuid = safe_get_field (field_lookup host.fields "uuid") in - let someone = try SpecificHost (Client.Host.get_by_uuid rpc session_id uuid) with _ -> Master in - uri_of_someone rpc session_id someone in - let make_command task_id = - let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s" prefix - Constants.host_backup_uri (Ref.string_of session_id) (Ref.string_of task_id) in - HttpGet (filename, uri) in - ignore(track_http_operation fd rpc session_id make_command "host backup download") - in - ignore(do_host_op rpc session_id op params ["file-name"] ~multiple:false) + let op _ host = + let filename = List.assoc "file-name" params in + let prefix = + let uuid = safe_get_field (field_lookup host.fields "uuid") in + let someone = try SpecificHost (Client.Host.get_by_uuid rpc session_id uuid) with _ -> Master in + uri_of_someone rpc session_id someone in + let make_command task_id = + let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s" prefix + Constants.host_backup_uri (Ref.string_of session_id) (Ref.string_of task_id) in + HttpGet (filename, uri) in + ignore(track_http_operation fd rpc session_id make_command "host backup download") + in + ignore(do_host_op rpc session_id op params ["file-name"] ~multiple:false) let pool_dump_db fd printer rpc session_id params = - let filename = List.assoc "file-name" params in - let make_command task_id = - let prefix = uri_of_someone rpc session_id Master in - let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s" - prefix - Constants.pool_xml_db_sync (Ref.string_of session_id) (Ref.string_of task_id) in - debug "%s" uri; - HttpGet (filename, uri) in - ignore(track_http_operation fd rpc session_id make_command "dump database") + let filename = List.assoc "file-name" params in + let make_command task_id = + let prefix = uri_of_someone rpc session_id Master in + let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s" + prefix + Constants.pool_xml_db_sync (Ref.string_of session_id) (Ref.string_of task_id) in + debug "%s" uri; + HttpGet (filename, uri) in + ignore(track_http_operation fd rpc session_id make_command "dump database") let pool_restore_db fd printer rpc session_id params = - let dry_run = List.mem_assoc "dry-run" params in - if not(List.mem_assoc "force" params) && not(dry_run) - then failwith "This operation will restore the database backup to this host, making it the master. All slave hosts are assumed dead and they will be forgotten. This operation must be forced (use --force)."; - let filename = List.assoc "file-name" params in - let make_command task_id = - let prefix = uri_of_someone rpc session_id Master in - let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s&dry_run=%b" - prefix - Constants.pool_xml_db_sync (Ref.string_of session_id) (Ref.string_of task_id) - dry_run in - debug "%s" uri; - HttpPut (filename, uri) in - ignore(track_http_operation fd rpc session_id make_command "restore database"); - if dry_run - then printer (Cli_printer.PList [ "Dry-run backup restore successful" ]) - else printer (Cli_printer.PList ["Host will reboot with restored database in "^(string_of_float !Xapi_globs.db_restore_fuse_time)^" seconds..."]) + let dry_run = List.mem_assoc "dry-run" params in + if not(List.mem_assoc "force" params) && not(dry_run) + then failwith "This operation will restore the database backup to this host, making it the master. All slave hosts are assumed dead and they will be forgotten. This operation must be forced (use --force)."; + let filename = List.assoc "file-name" params in + let make_command task_id = + let prefix = uri_of_someone rpc session_id Master in + let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s&dry_run=%b" + prefix + Constants.pool_xml_db_sync (Ref.string_of session_id) (Ref.string_of task_id) + dry_run in + debug "%s" uri; + HttpPut (filename, uri) in + ignore(track_http_operation fd rpc session_id make_command "restore database"); + if dry_run + then printer (Cli_printer.PList [ "Dry-run backup restore successful" ]) + else printer (Cli_printer.PList ["Host will reboot with restored database in "^(string_of_float !Xapi_globs.db_restore_fuse_time)^" seconds..."]) let pool_enable_external_auth printer rpc session_id params = - let pool = get_pool_with_default rpc session_id params "uuid" in - let auth_type = List.assoc "auth-type" params in - let service_name = List.assoc "service-name" params in - let config = read_map_params "config" params in - Client.Pool.enable_external_auth rpc session_id pool config service_name auth_type + let pool = get_pool_with_default rpc session_id params "uuid" in + let auth_type = List.assoc "auth-type" params in + let service_name = List.assoc "service-name" params in + let config = read_map_params "config" params in + Client.Pool.enable_external_auth rpc session_id pool config service_name auth_type let pool_disable_external_auth printer rpc session_id params = - let pool = get_pool_with_default rpc session_id params "uuid" in - let config = read_map_params "config" params in - Client.Pool.disable_external_auth rpc session_id pool config + let pool = get_pool_with_default rpc session_id params "uuid" in + let config = read_map_params "config" params in + Client.Pool.disable_external_auth rpc session_id pool config let host_restore fd printer rpc session_id params = - let filename = List.assoc "file-name" params in - let op _ host = - let prefix = - let uuid = safe_get_field (field_lookup host.fields "uuid") in - let someone = try SpecificHost (Client.Host.get_by_uuid rpc session_id uuid) with _ -> Master in - uri_of_someone rpc session_id someone in - let make_command task_id = - let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s" prefix - Constants.host_restore_uri (Ref.string_of session_id) (Ref.string_of task_id) in - HttpPut (filename, uri) in - ignore(track_http_operation fd rpc session_id make_command "host backup upload") - in - ignore(do_host_op rpc session_id op params ["file-name"] ~multiple:false) + let filename = List.assoc "file-name" params in + let op _ host = + let prefix = + let uuid = safe_get_field (field_lookup host.fields "uuid") in + let someone = try SpecificHost (Client.Host.get_by_uuid rpc session_id uuid) with _ -> Master in + uri_of_someone rpc session_id someone in + let make_command task_id = + let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s" prefix + Constants.host_restore_uri (Ref.string_of session_id) (Ref.string_of task_id) in + HttpPut (filename, uri) in + ignore(track_http_operation fd rpc session_id make_command "host backup upload") + in + ignore(do_host_op rpc session_id op params ["file-name"] ~multiple:false) let host_get_system_status_capabilities printer rpc session_id params = - printer (Cli_printer.PList - (do_host_op rpc session_id - (fun _ host -> - Client.Host.get_system_status_capabilities ~rpc ~session_id - ~host:(host.getref ())) params [])) + printer (Cli_printer.PList + (do_host_op rpc session_id + (fun _ host -> + Client.Host.get_system_status_capabilities ~rpc ~session_id + ~host:(host.getref ())) params [])) let wait_for_task rpc session_id task __context fd op_str = - let ok = match unmarshal fd with - | Response OK -> true - | Response Failed -> - (* Need to check whether the thin cli managed to contact the server or - not. If not, we need to mark the task as failed *) - if Client.Task.get_progress rpc session_id task < 0.0 - then Db_actions.DB_Action.Task.set_status ~__context - ~self:task ~value:`failure; - false - | _ -> false in - wait_for_task_complete rpc session_id task; - - (* if the client thinks it's ok, check that the server does too *) - (match Client.Task.get_status rpc session_id task with - | `success -> - if ok - then (marshal fd (Command (Print (op_str ^ " succeeded")))) - else (marshal fd (Command (PrintStderr (op_str ^ " failed, unknown error.\n"))); - raise (ExitWithError 1)) - | `failure -> - let result = Client.Task.get_error_info rpc session_id task in - if result = [] - then marshal fd (Command (PrintStderr (op_str ^ " failed, unknown error\n"))) - else raise (Api_errors.Server_error ((List.hd result),(List.tl result))) - | `cancelled -> - marshal fd (Command (PrintStderr (op_str ^ " cancelled\n"))); - raise (ExitWithError 1) - | _ -> - marshal fd (Command (PrintStderr "Internal error\n")); (* should never happen *) - raise (ExitWithError 1) - ) + let ok = match unmarshal fd with + | Response OK -> true + | Response Failed -> + (* Need to check whether the thin cli managed to contact the server or + not. If not, we need to mark the task as failed *) + if Client.Task.get_progress rpc session_id task < 0.0 + then Db_actions.DB_Action.Task.set_status ~__context + ~self:task ~value:`failure; + false + | _ -> false in + wait_for_task_complete rpc session_id task; + + (* if the client thinks it's ok, check that the server does too *) + (match Client.Task.get_status rpc session_id task with + | `success -> + if ok + then (marshal fd (Command (Print (op_str ^ " succeeded")))) + else (marshal fd (Command (PrintStderr (op_str ^ " failed, unknown error.\n"))); + raise (ExitWithError 1)) + | `failure -> + let result = Client.Task.get_error_info rpc session_id task in + if result = [] + then marshal fd (Command (PrintStderr (op_str ^ " failed, unknown error\n"))) + else raise (Api_errors.Server_error ((List.hd result),(List.tl result))) + | `cancelled -> + marshal fd (Command (PrintStderr (op_str ^ " cancelled\n"))); + raise (ExitWithError 1) + | _ -> + marshal fd (Command (PrintStderr "Internal error\n")); (* should never happen *) + raise (ExitWithError 1) + ) let host_get_system_status fd printer rpc session_id params = - let filename = List.assoc "filename" params in - let entries = List.assoc_default "entries" params "" in - let output = try List.assoc "output" params with _ -> "tar.bz2" in - begin match output with "tar.bz2" | "tar" | "zip" -> () | _ -> - failwith "Invalid output format. Must be 'tar', 'zip' or 'tar.bz2'" end; - - let op n host = - let doit task_id = - let uuid = safe_get_field (field_lookup host.fields "uuid") in - let someone = try SpecificHost (Client.Host.get_by_uuid rpc session_id uuid) with _ -> Master in - let prefix = uri_of_someone rpc session_id someone in - - let url = - Printf.sprintf "%s%s?session_id=%s&entries=%s&output=%s&task_id=%s" - prefix Constants.system_status_uri - (Ref.string_of session_id) entries output - (Ref.string_of task_id) in - HttpGet (filename, url) in - track_http_operation fd rpc session_id doit "system-status download" - in - ignore (do_host_op rpc session_id op params ["filename"; "entries"; "output"]) + let filename = List.assoc "filename" params in + let entries = List.assoc_default "entries" params "" in + let output = try List.assoc "output" params with _ -> "tar.bz2" in + begin match output with "tar.bz2" | "tar" | "zip" -> () | _ -> + failwith "Invalid output format. Must be 'tar', 'zip' or 'tar.bz2'" end; + + let op n host = + let doit task_id = + let uuid = safe_get_field (field_lookup host.fields "uuid") in + let someone = try SpecificHost (Client.Host.get_by_uuid rpc session_id uuid) with _ -> Master in + let prefix = uri_of_someone rpc session_id someone in + + let url = + Printf.sprintf "%s%s?session_id=%s&entries=%s&output=%s&task_id=%s" + prefix Constants.system_status_uri + (Ref.string_of session_id) entries output + (Ref.string_of task_id) in + HttpGet (filename, url) in + track_http_operation fd rpc session_id doit "system-status download" + in + ignore (do_host_op rpc session_id op params ["filename"; "entries"; "output"]) let host_set_hostname_live printer rpc session_id params = - let host_uuid = List.assoc "host-uuid" params in - let host = Client.Host.get_by_uuid rpc session_id host_uuid in - let hostname = List.assoc "host-name" params in - Client.Host.set_hostname_live rpc session_id host hostname + let host_uuid = List.assoc "host-uuid" params in + let host = Client.Host.get_by_uuid rpc session_id host_uuid in + let hostname = List.assoc "host-name" params in + Client.Host.set_hostname_live rpc session_id host hostname let host_call_plugin printer rpc session_id params = - let host_uuid = List.assoc "host-uuid" params in - let host = Client.Host.get_by_uuid rpc session_id host_uuid in - let plugin = List.assoc "plugin" params in - let fn = List.assoc "fn" params in - let args = read_map_params "args" params in - let result = Client.Host.call_plugin rpc session_id host plugin fn args in - printer (Cli_printer.PList [ result ]) + let host_uuid = List.assoc "host-uuid" params in + let host = Client.Host.get_by_uuid rpc session_id host_uuid in + let plugin = List.assoc "plugin" params in + let fn = List.assoc "fn" params in + let args = read_map_params "args" params in + let result = Client.Host.call_plugin rpc session_id host plugin fn args in + printer (Cli_printer.PList [ result ]) let host_enable_external_auth printer rpc session_id params = - if not (List.mem_assoc "force" params) then - failwith "This operation is provided only to recover individual hosts that are unable to access the external authentication service. This operation must be forced (use --force)."; - let host_uuid = List.assoc "host-uuid" params in - let auth_type = List.assoc "auth-type" params in - let service_name = List.assoc "service-name" params in - let config = read_map_params "config" params in - let host = Client.Host.get_by_uuid rpc session_id host_uuid in - Client.Host.enable_external_auth rpc session_id host config service_name auth_type + if not (List.mem_assoc "force" params) then + failwith "This operation is provided only to recover individual hosts that are unable to access the external authentication service. This operation must be forced (use --force)."; + let host_uuid = List.assoc "host-uuid" params in + let auth_type = List.assoc "auth-type" params in + let service_name = List.assoc "service-name" params in + let config = read_map_params "config" params in + let host = Client.Host.get_by_uuid rpc session_id host_uuid in + Client.Host.enable_external_auth rpc session_id host config service_name auth_type let host_disable_external_auth printer rpc session_id params = - if not (List.mem_assoc "force" params) then - failwith "This operation is provided only to recover individual hosts that are unable to access the external authentication service. This operation must be forced (use --force)."; - let host_uuid = List.assoc "host-uuid" params in - let host = Client.Host.get_by_uuid rpc session_id host_uuid in - let config = read_map_params "config" params in - Client.Host.disable_external_auth rpc session_id host config + if not (List.mem_assoc "force" params) then + failwith "This operation is provided only to recover individual hosts that are unable to access the external authentication service. This operation must be forced (use --force)."; + let host_uuid = List.assoc "host-uuid" params in + let host = Client.Host.get_by_uuid rpc session_id host_uuid in + let config = read_map_params "config" params in + Client.Host.disable_external_auth rpc session_id host config let host_refresh_pack_info printer rpc session_id params = - let host_uuid = List.assoc "host-uuid" params in - let host = Client.Host.get_by_uuid rpc session_id host_uuid in - Client.Host.refresh_pack_info rpc session_id host + let host_uuid = List.assoc "host-uuid" params in + let host = Client.Host.get_by_uuid rpc session_id host_uuid in + Client.Host.refresh_pack_info rpc session_id host let host_cpu_info printer rpc session_id params = - let host = - if List.mem_assoc "uuid" params then - Client.Host.get_by_uuid rpc session_id (List.assoc "uuid" params) - else - get_host_from_session rpc session_id in - let cpu_info = Client.Host.get_cpu_info rpc session_id host in - printer (Cli_printer.PTable [cpu_info]) + let host = + if List.mem_assoc "uuid" params then + Client.Host.get_by_uuid rpc session_id (List.assoc "uuid" params) + else + get_host_from_session rpc session_id in + let cpu_info = Client.Host.get_cpu_info rpc session_id host in + printer (Cli_printer.PTable [cpu_info]) let host_get_cpu_features printer rpc session_id params = - let host = - if List.mem_assoc "uuid" params then - Client.Host.get_by_uuid rpc session_id (List.assoc "uuid" params) - else - get_host_from_session rpc session_id in - let cpu_info = Client.Host.get_cpu_info rpc session_id host in - let features = List.assoc "features" cpu_info in - printer (Cli_printer.PMsg features) + let host = + if List.mem_assoc "uuid" params then + Client.Host.get_by_uuid rpc session_id (List.assoc "uuid" params) + else + get_host_from_session rpc session_id in + let cpu_info = Client.Host.get_cpu_info rpc session_id host in + let features = List.assoc "features" cpu_info in + printer (Cli_printer.PMsg features) let host_enable_display printer rpc session_id params = - let host = Client.Host.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let result = Client.Host.enable_display rpc session_id host in - printer (Cli_printer.PMsg (Record_util.host_display_to_string result)) + let host = Client.Host.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let result = Client.Host.enable_display rpc session_id host in + printer (Cli_printer.PMsg (Record_util.host_display_to_string result)) let host_disable_display printer rpc session_id params = - let host = Client.Host.get_by_uuid rpc session_id (List.assoc "uuid" params) in - let result = Client.Host.disable_display rpc session_id host in - printer (Cli_printer.PMsg (Record_util.host_display_to_string result)) + let host = Client.Host.get_by_uuid rpc session_id (List.assoc "uuid" params) in + let result = Client.Host.disable_display rpc session_id host in + printer (Cli_printer.PMsg (Record_util.host_display_to_string result)) let patch_upload fd printer rpc session_id params = - let filename = List.assoc "file-name" params in - let make_command task_id = - let prefix = uri_of_someone rpc session_id Master in - let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s" - prefix Constants.pool_patch_upload_uri (Ref.string_of session_id) (Ref.string_of task_id) in - let _ = debug "trying to post patch to uri:%s" uri in - HttpPut (filename, uri) in - let result = track_http_operation fd rpc session_id make_command "host patch upload" in - let patch_ref = Ref.of_string result in - let patch_uuid = Client.Pool_patch.get_uuid rpc session_id patch_ref in - marshal fd (Command (Print patch_uuid)) + let filename = List.assoc "file-name" params in + let make_command task_id = + let prefix = uri_of_someone rpc session_id Master in + let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s" + prefix Constants.pool_patch_upload_uri (Ref.string_of session_id) (Ref.string_of task_id) in + let _ = debug "trying to post patch to uri:%s" uri in + HttpPut (filename, uri) in + let result = track_http_operation fd rpc session_id make_command "host patch upload" in + let patch_ref = Ref.of_string result in + let patch_uuid = Client.Pool_patch.get_uuid rpc session_id patch_ref in + marshal fd (Command (Print patch_uuid)) let update_upload fd printer rpc session_id params = - let filename = List.assoc "file-name" params in - let host_uuid = List.assoc "host-uuid" params in - let host = Client.Host.get_by_uuid rpc session_id host_uuid in - let make_command task_id = - let prefix = uri_of_someone rpc session_id (SpecificHost host) in - let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s" - prefix Constants.oem_patch_stream_uri (Ref.string_of session_id) (Ref.string_of task_id) in - let _ = debug "trying to post patch to uri:%s" uri in - HttpPut (filename, uri) - in - let result = track_http_operation fd rpc session_id make_command "host patch upload" in - marshal fd (Command (Print result)) + let filename = List.assoc "file-name" params in + let host_uuid = List.assoc "host-uuid" params in + let host = Client.Host.get_by_uuid rpc session_id host_uuid in + let make_command task_id = + let prefix = uri_of_someone rpc session_id (SpecificHost host) in + let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s" + prefix Constants.oem_patch_stream_uri (Ref.string_of session_id) (Ref.string_of task_id) in + let _ = debug "trying to post patch to uri:%s" uri in + HttpPut (filename, uri) + in + let result = track_http_operation fd rpc session_id make_command "host patch upload" in + marshal fd (Command (Print result)) let patch_clean printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let patch_ref = Client.Pool_patch.get_by_uuid rpc session_id uuid in - Client.Pool_patch.clean rpc session_id patch_ref + let uuid = List.assoc "uuid" params in + let patch_ref = Client.Pool_patch.get_by_uuid rpc session_id uuid in + Client.Pool_patch.clean rpc session_id patch_ref let patch_pool_clean printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let patch_ref = Client.Pool_patch.get_by_uuid rpc session_id uuid in - Client.Pool_patch.pool_clean rpc session_id patch_ref + let uuid = List.assoc "uuid" params in + let patch_ref = Client.Pool_patch.get_by_uuid rpc session_id uuid in + Client.Pool_patch.pool_clean rpc session_id patch_ref let patch_destroy printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let patch_ref = Client.Pool_patch.get_by_uuid rpc session_id uuid in - Client.Pool_patch.destroy rpc session_id patch_ref + let uuid = List.assoc "uuid" params in + let patch_ref = Client.Pool_patch.get_by_uuid rpc session_id uuid in + Client.Pool_patch.destroy rpc session_id patch_ref let patch_apply printer rpc session_id params = - let patch_uuid = List.assoc "uuid" params in - let host_uuid = List.assoc "host-uuid" params in - let patch_ref = Client.Pool_patch.get_by_uuid rpc session_id patch_uuid in - let host_ref = Client.Host.get_by_uuid rpc session_id host_uuid in - let result = Client.Pool_patch.apply rpc session_id patch_ref host_ref in - printer (Cli_printer.PList [ result ]) + let patch_uuid = List.assoc "uuid" params in + let host_uuid = List.assoc "host-uuid" params in + let patch_ref = Client.Pool_patch.get_by_uuid rpc session_id patch_uuid in + let host_ref = Client.Host.get_by_uuid rpc session_id host_uuid in + let result = Client.Pool_patch.apply rpc session_id patch_ref host_ref in + printer (Cli_printer.PList [ result ]) let patch_precheck printer rpc session_id params = - let patch_uuid = List.assoc "uuid" params in - let host_uuid = List.assoc "host-uuid" params in - let patch_ref = Client.Pool_patch.get_by_uuid rpc session_id patch_uuid in - let host_ref = Client.Host.get_by_uuid rpc session_id host_uuid in - let result = Client.Pool_patch.precheck rpc session_id patch_ref host_ref in - printer (Cli_printer.PList [ result ]) + let patch_uuid = List.assoc "uuid" params in + let host_uuid = List.assoc "host-uuid" params in + let patch_ref = Client.Pool_patch.get_by_uuid rpc session_id patch_uuid in + let host_ref = Client.Host.get_by_uuid rpc session_id host_uuid in + let result = Client.Pool_patch.precheck rpc session_id patch_ref host_ref in + printer (Cli_printer.PList [ result ]) let patch_pool_apply printer rpc session_id params = - let patch_uuid = List.assoc "uuid" params in - let patch_ref = Client.Pool_patch.get_by_uuid rpc session_id patch_uuid in - Client.Pool_patch.pool_apply rpc session_id patch_ref + let patch_uuid = List.assoc "uuid" params in + let patch_ref = Client.Pool_patch.get_by_uuid rpc session_id patch_uuid in + Client.Pool_patch.pool_apply rpc session_id patch_ref let host_logs_download fd printer rpc session_id params = - let op n host = - let filename = if List.mem_assoc "file-name" params then List.assoc "file-name" params - else - let tm = Unix.gmtime (Unix.time ()) in - Printf.sprintf "logs-%d-%d-%dT%02d%02d%02dZ" - (tm.Unix.tm_year + 1900) (tm.Unix.tm_mon + 1) tm.Unix.tm_mday - tm.Unix.tm_hour tm.Unix.tm_min tm.Unix.tm_sec in - let prefix = - let uuid = safe_get_field (field_lookup host.fields "uuid") in - let someone = try SpecificHost (Client.Host.get_by_uuid rpc session_id uuid) with _ -> Master in - uri_of_someone rpc session_id someone in - let filesuffix = - if n=1 then "" else "-"^(safe_get_field (field_lookup host.fields "name-label")) - in - let make_command task_id = - let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s" prefix - Constants.host_logs_download_uri (Ref.string_of session_id) (Ref.string_of task_id) in - HttpGet (filename^filesuffix, uri) in - ignore(track_http_operation fd rpc session_id make_command "host logs download") - in - ignore(do_host_op rpc session_id op params ["file-name"]) + let op n host = + let filename = if List.mem_assoc "file-name" params then List.assoc "file-name" params + else + let tm = Unix.gmtime (Unix.time ()) in + Printf.sprintf "logs-%d-%d-%dT%02d%02d%02dZ" + (tm.Unix.tm_year + 1900) (tm.Unix.tm_mon + 1) tm.Unix.tm_mday + tm.Unix.tm_hour tm.Unix.tm_min tm.Unix.tm_sec in + let prefix = + let uuid = safe_get_field (field_lookup host.fields "uuid") in + let someone = try SpecificHost (Client.Host.get_by_uuid rpc session_id uuid) with _ -> Master in + uri_of_someone rpc session_id someone in + let filesuffix = + if n=1 then "" else "-"^(safe_get_field (field_lookup host.fields "name-label")) + in + let make_command task_id = + let uri = Printf.sprintf "%s%s?session_id=%s&task_id=%s" prefix + Constants.host_logs_download_uri (Ref.string_of session_id) (Ref.string_of task_id) in + HttpGet (filename^filesuffix, uri) in + ignore(track_http_operation fd rpc session_id make_command "host logs download") + in + ignore(do_host_op rpc session_id op params ["file-name"]) let host_is_in_emergency_mode printer rpc session_id params = - let mode = Client.Host.is_in_emergency_mode ~rpc ~session_id in - printer (Cli_printer.PMsg (Printf.sprintf "%b" mode)) + let mode = Client.Host.is_in_emergency_mode ~rpc ~session_id in + printer (Cli_printer.PMsg (Printf.sprintf "%b" mode)) let host_emergency_management_reconfigure printer rpc session_id params = - let interface = List.assoc "interface" params in - Client.Host.local_management_reconfigure rpc session_id interface + let interface = List.assoc "interface" params in + Client.Host.local_management_reconfigure rpc session_id interface let host_emergency_ha_disable printer rpc session_id params = - let force = get_bool_param params "force" in - if not force then failwith "This operation is extremely dangerous and may cause data loss. This operation must be forced (use --force)."; - Client.Host.emergency_ha_disable rpc session_id + let force = get_bool_param params "force" in + if not force then failwith "This operation is extremely dangerous and may cause data loss. This operation must be forced (use --force)."; + Client.Host.emergency_ha_disable rpc session_id let host_management_reconfigure printer rpc session_id params = - let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "pif-uuid" params) in - Client.Host.management_reconfigure rpc session_id pif + let pif = Client.PIF.get_by_uuid rpc session_id (List.assoc "pif-uuid" params) in + Client.Host.management_reconfigure rpc session_id pif let host_management_disable printer rpc session_id params = - Client.Host.management_disable rpc session_id + Client.Host.management_disable rpc session_id let host_signal_networking_change printer rpc session_id params = - Client.Host.signal_networking_change rpc session_id + Client.Host.signal_networking_change rpc session_id let host_notify printer rpc session_id params = - let ty = List.assoc "type" params in - let args = List.assoc_default "params" params "" in - Client.Host.notify rpc session_id ty args + let ty = List.assoc "type" params in + let args = List.assoc_default "params" params "" in + Client.Host.notify rpc session_id ty args let host_syslog_reconfigure printer rpc session_id params = - let host = Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) in - Client.Host.syslog_reconfigure rpc session_id host + let host = Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) in + Client.Host.syslog_reconfigure rpc session_id host let host_send_debug_keys printer rpc session_id params = - let host = Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) in - let keys = List.assoc "keys" params in - Client.Host.send_debug_keys rpc session_id host keys + let host = Client.Host.get_by_uuid rpc session_id (List.assoc "host-uuid" params) in + let keys = List.assoc "keys" params in + Client.Host.send_debug_keys rpc session_id host keys (* let host_introduce printer rpc session_id params = @@ -4405,9 +4405,9 @@ let host_send_debug_keys printer rpc session_id params = *) let task_cancel printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let task = Client.Task.get_by_uuid rpc session_id uuid in - Client.Task.cancel rpc session_id task + let uuid = List.assoc "uuid" params in + let task = Client.Task.get_by_uuid rpc session_id uuid in + Client.Task.cancel rpc session_id task (* let alert_create printer rpc session_id params = @@ -4447,242 +4447,242 @@ let task_cancel printer rpc session_id params = *) let subject_add printer rpc session_id params = - let subject_name = List.assoc "subject-name" params in - (* let's try to resolve the subject_name to a subject_id using the external directory *) - let subject_identifier = Client.Auth.get_subject_identifier ~rpc ~session_id ~subject_name in - (* obtains a list of name-value pairs with info about the subject from the external directory *) - let subject_info = Client.Auth.get_subject_information_from_identifier ~rpc ~session_id ~subject_identifier in - (* now we've got enough information to create our new subject in the pool *) - let subject_ref = Client.Subject.create ~rpc ~session_id ~subject_identifier ~other_config:subject_info in - let subject_uuid = Client.Subject.get_uuid rpc session_id subject_ref in - printer (Cli_printer.PList [subject_uuid]) + let subject_name = List.assoc "subject-name" params in + (* let's try to resolve the subject_name to a subject_id using the external directory *) + let subject_identifier = Client.Auth.get_subject_identifier ~rpc ~session_id ~subject_name in + (* obtains a list of name-value pairs with info about the subject from the external directory *) + let subject_info = Client.Auth.get_subject_information_from_identifier ~rpc ~session_id ~subject_identifier in + (* now we've got enough information to create our new subject in the pool *) + let subject_ref = Client.Subject.create ~rpc ~session_id ~subject_identifier ~other_config:subject_info in + let subject_uuid = Client.Subject.get_uuid rpc session_id subject_ref in + printer (Cli_printer.PList [subject_uuid]) let subject_remove printer rpc session_id params = - (* we are removing by subject-uuid *) - let subject_uuid = List.assoc "subject-uuid" params in - let subject = Client.Subject.get_by_uuid ~rpc ~session_id ~uuid:subject_uuid in - Client.Subject.destroy ~rpc ~session_id ~self:subject + (* we are removing by subject-uuid *) + let subject_uuid = List.assoc "subject-uuid" params in + let subject = Client.Subject.get_by_uuid ~rpc ~session_id ~uuid:subject_uuid in + Client.Subject.destroy ~rpc ~session_id ~self:subject let subject_role_common rpc session_id params = - let role_uuid = List.assoc_default "role-uuid" params "" in - let role_name = List.assoc_default "role-name" params "" in - if role_uuid="" && role_name="" - then failwith "Required parameter not found: role-uuid or role-name" - else - if role_uuid<>"" && role_name<>"" - then failwith "Parameters role-uuid and role-name cannot be used together" - else - let subject_uuid = List.assoc "uuid" params in - let role = - if role_uuid<>"" - then Client.Role.get_by_uuid ~rpc ~session_id ~uuid:role_uuid - else begin - let roles = (Client.Role.get_by_name_label ~rpc ~session_id ~label:role_name) in - if List.length roles > 0 - then List.hd roles (* names are unique, there's either 0 or 1*) - else Ref.null (*role not found* raise (Api_errors.Server_error (Api_errors.role_not_found, []))*) - end - in - let subject = Client.Subject.get_by_uuid ~rpc ~session_id ~uuid:subject_uuid in - (subject,role) + let role_uuid = List.assoc_default "role-uuid" params "" in + let role_name = List.assoc_default "role-name" params "" in + if role_uuid="" && role_name="" + then failwith "Required parameter not found: role-uuid or role-name" + else + if role_uuid<>"" && role_name<>"" + then failwith "Parameters role-uuid and role-name cannot be used together" + else + let subject_uuid = List.assoc "uuid" params in + let role = + if role_uuid<>"" + then Client.Role.get_by_uuid ~rpc ~session_id ~uuid:role_uuid + else begin + let roles = (Client.Role.get_by_name_label ~rpc ~session_id ~label:role_name) in + if List.length roles > 0 + then List.hd roles (* names are unique, there's either 0 or 1*) + else Ref.null (*role not found* raise (Api_errors.Server_error (Api_errors.role_not_found, []))*) + end + in + let subject = Client.Subject.get_by_uuid ~rpc ~session_id ~uuid:subject_uuid in + (subject,role) let subject_role_add printer rpc session_id params = - let (subject,role) = subject_role_common rpc session_id params in - Client.Subject.add_to_roles ~rpc ~session_id ~self:subject ~role + let (subject,role) = subject_role_common rpc session_id params in + Client.Subject.add_to_roles ~rpc ~session_id ~self:subject ~role let subject_role_remove printer rpc session_id params = - let (subject,role) = subject_role_common rpc session_id params in - Client.Subject.remove_from_roles ~rpc ~session_id ~self:subject ~role + let (subject,role) = subject_role_common rpc session_id params in + Client.Subject.remove_from_roles ~rpc ~session_id ~self:subject ~role let audit_log_get fd printer rpc session_id params = - let filename = List.assoc "filename" params in - let since = - if List.mem_assoc "since" params - then (* make sure since has a reasonable length *) - let unsanitized_since = List.assoc "since" params in - if String.length unsanitized_since > 255 - then String.sub unsanitized_since 0 255 - else unsanitized_since - else "" - in - let label = Printf.sprintf "audit-log-get%sinto file %s" - (if since="" then " " else Printf.sprintf " (since \"%s\") " since) - (if String.length filename <= 255 - then filename (* make sure filename has a reasonable length in the logs *) - else String.sub filename 0 255 - ) - in - let query = - if since="" then "" - else Printf.sprintf "since=%s" (Http.urlencode since) - in - download_file_with_task - fd rpc session_id filename Constants.audit_log_uri query label label + let filename = List.assoc "filename" params in + let since = + if List.mem_assoc "since" params + then (* make sure since has a reasonable length *) + let unsanitized_since = List.assoc "since" params in + if String.length unsanitized_since > 255 + then String.sub unsanitized_since 0 255 + else unsanitized_since + else "" + in + let label = Printf.sprintf "audit-log-get%sinto file %s" + (if since="" then " " else Printf.sprintf " (since \"%s\") " since) + (if String.length filename <= 255 + then filename (* make sure filename has a reasonable length in the logs *) + else String.sub filename 0 255 + ) + in + let query = + if since="" then "" + else Printf.sprintf "since=%s" (Http.urlencode since) + in + download_file_with_task + fd rpc session_id filename Constants.audit_log_uri query label label (* RBAC 2.0 only let role_create printer rpc session_id params = -(*let id = List.assoc "id" params in*) + (*let id = List.assoc "id" params in*) let name = List.assoc "name" params in ignore (Client.Role.create ~rpc ~session_id ~name ~description:"" ~permissions:[] ~is_basic:false ~is_complete:false) - *) +*) let session_subject_identifier_list printer rpc session_id params = - let subject_identifiers = Client.Session.get_all_subject_identifiers ~rpc ~session_id in - let table_of_subject_identifiers subject_identifier = - [ "subject-identifier ( RO)", subject_identifier ] - in - let all = List.map table_of_subject_identifiers subject_identifiers in - printer (Cli_printer.PTable all) + let subject_identifiers = Client.Session.get_all_subject_identifiers ~rpc ~session_id in + let table_of_subject_identifiers subject_identifier = + [ "subject-identifier ( RO)", subject_identifier ] + in + let all = List.map table_of_subject_identifiers subject_identifiers in + printer (Cli_printer.PTable all) let session_subject_identifier_logout printer rpc session_id params = - let subject_identifier = List.assoc "subject-identifier" params in - Client.Session.logout_subject_identifier ~rpc ~session_id ~subject_identifier + let subject_identifier = List.assoc "subject-identifier" params in + Client.Session.logout_subject_identifier ~rpc ~session_id ~subject_identifier let session_subject_identifier_logout_all printer rpc session_id params = - let subject_identifiers = Client.Session.get_all_subject_identifiers ~rpc ~session_id in - List.iter (fun subject_identifier -> Client.Session.logout_subject_identifier ~rpc ~session_id ~subject_identifier) subject_identifiers + let subject_identifiers = Client.Session.get_all_subject_identifiers ~rpc ~session_id in + List.iter (fun subject_identifier -> Client.Session.logout_subject_identifier ~rpc ~session_id ~subject_identifier) subject_identifiers let secret_create printer rpc session_id params = - let value = List.assoc "value" params in - let other_config = read_map_params "other-config" params in - let ref = Client.Secret.create ~rpc ~session_id ~value ~other_config in - let uuid = Client.Secret.get_uuid ~rpc ~session_id ~self:ref in - printer (Cli_printer.PList [uuid]) + let value = List.assoc "value" params in + let other_config = read_map_params "other-config" params in + let ref = Client.Secret.create ~rpc ~session_id ~value ~other_config in + let uuid = Client.Secret.get_uuid ~rpc ~session_id ~self:ref in + printer (Cli_printer.PList [uuid]) let secret_destroy printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let ref = Client.Secret.get_by_uuid ~rpc ~session_id ~uuid in - Client.Secret.destroy ~rpc ~session_id ~self:ref + let uuid = List.assoc "uuid" params in + let ref = Client.Secret.get_by_uuid ~rpc ~session_id ~uuid in + Client.Secret.destroy ~rpc ~session_id ~self:ref let vm_appliance_create printer rpc session_id params = - let name_label = List.assoc "name-label" params in - let name_description = - if List.mem_assoc "name-description" params then - List.assoc "name-description" params - else "" - in - let ref = Client.VM_appliance.create ~rpc ~session_id ~name_label ~name_description in - let uuid = Client.VM_appliance.get_uuid ~rpc ~session_id ~self:ref in - printer (Cli_printer.PList [uuid]) + let name_label = List.assoc "name-label" params in + let name_description = + if List.mem_assoc "name-description" params then + List.assoc "name-description" params + else "" + in + let ref = Client.VM_appliance.create ~rpc ~session_id ~name_label ~name_description in + let uuid = Client.VM_appliance.get_uuid ~rpc ~session_id ~self:ref in + printer (Cli_printer.PList [uuid]) let vm_appliance_destroy printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let ref = Client.VM_appliance.get_by_uuid ~rpc ~session_id ~uuid in - Client.VM_appliance.destroy ~rpc ~session_id ~self:ref + let uuid = List.assoc "uuid" params in + let ref = Client.VM_appliance.get_by_uuid ~rpc ~session_id ~uuid in + Client.VM_appliance.destroy ~rpc ~session_id ~self:ref let vm_appliance_start printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let paused = get_bool_param params "paused" in - let ref = Client.VM_appliance.get_by_uuid ~rpc ~session_id ~uuid in - Client.VM_appliance.start ~rpc ~session_id ~self:ref ~paused + let uuid = List.assoc "uuid" params in + let paused = get_bool_param params "paused" in + let ref = Client.VM_appliance.get_by_uuid ~rpc ~session_id ~uuid in + Client.VM_appliance.start ~rpc ~session_id ~self:ref ~paused let vm_appliance_shutdown printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let force = get_bool_param params "force" in - let ref = Client.VM_appliance.get_by_uuid ~rpc ~session_id ~uuid in - if force then - Client.VM_appliance.hard_shutdown ~rpc ~session_id ~self:ref - else - Client.VM_appliance.clean_shutdown ~rpc ~session_id ~self:ref + let uuid = List.assoc "uuid" params in + let force = get_bool_param params "force" in + let ref = Client.VM_appliance.get_by_uuid ~rpc ~session_id ~uuid in + if force then + Client.VM_appliance.hard_shutdown ~rpc ~session_id ~self:ref + else + Client.VM_appliance.clean_shutdown ~rpc ~session_id ~self:ref let vm_appliance_recover printer rpc session_id params = - let force = get_bool_param params "force" in - let uuid = List.assoc "uuid" params in - with_database_vdi rpc session_id params - (fun database_session -> - let appliance = Client.VM_appliance.get_by_uuid ~rpc ~session_id:database_session ~uuid in - Client.VM_appliance.recover ~rpc ~session_id:database_session ~self:appliance ~session_to:session_id ~force) + let force = get_bool_param params "force" in + let uuid = List.assoc "uuid" params in + with_database_vdi rpc session_id params + (fun database_session -> + let appliance = Client.VM_appliance.get_by_uuid ~rpc ~session_id:database_session ~uuid in + Client.VM_appliance.recover ~rpc ~session_id:database_session ~self:appliance ~session_to:session_id ~force) let vm_appliance_assert_can_be_recovered printer rpc session_id params = - let uuid = List.assoc "uuid" params in - with_database_vdi rpc session_id params - (fun database_session -> - let appliance = Client.VM_appliance.get_by_uuid ~rpc ~session_id:database_session ~uuid in - Client.VM_appliance.assert_can_be_recovered ~rpc ~session_id:database_session ~self:appliance ~session_to:session_id) + let uuid = List.assoc "uuid" params in + with_database_vdi rpc session_id params + (fun database_session -> + let appliance = Client.VM_appliance.get_by_uuid ~rpc ~session_id:database_session ~uuid in + Client.VM_appliance.assert_can_be_recovered ~rpc ~session_id:database_session ~self:appliance ~session_to:session_id) let gpu_group_create printer rpc session_id params = - let name_label = List.assoc "name-label" params in - let name_description = - try List.assoc "name-description" params - with Not_found -> "" - in - let gpu_group = - Client.GPU_group.create ~rpc ~session_id - ~name_label ~name_description ~other_config:[] - in - let uuid = Client.GPU_group.get_uuid ~rpc ~session_id ~self:gpu_group in - printer (Cli_printer.PList [uuid]) + let name_label = List.assoc "name-label" params in + let name_description = + try List.assoc "name-description" params + with Not_found -> "" + in + let gpu_group = + Client.GPU_group.create ~rpc ~session_id + ~name_label ~name_description ~other_config:[] + in + let uuid = Client.GPU_group.get_uuid ~rpc ~session_id ~self:gpu_group in + printer (Cli_printer.PList [uuid]) let gpu_group_destroy printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let gpu_group = Client.GPU_group.get_by_uuid ~rpc ~session_id ~uuid in - Client.GPU_group.destroy ~rpc ~session_id ~self:gpu_group + let uuid = List.assoc "uuid" params in + let gpu_group = Client.GPU_group.get_by_uuid ~rpc ~session_id ~uuid in + Client.GPU_group.destroy ~rpc ~session_id ~self:gpu_group let gpu_group_get_remaining_capacity printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let vgpu_type_uuid = List.assoc "vgpu-type-uuid" params in - let gpu_group = Client.GPU_group.get_by_uuid ~rpc ~session_id ~uuid in - let vgpu_type = - Client.VGPU_type.get_by_uuid ~rpc ~session_id ~uuid:vgpu_type_uuid - in - let result = Client.GPU_group.get_remaining_capacity ~rpc ~session_id - ~self:gpu_group ~vgpu_type in - printer (Cli_printer.PMsg (Int64.to_string result)) + let uuid = List.assoc "uuid" params in + let vgpu_type_uuid = List.assoc "vgpu-type-uuid" params in + let gpu_group = Client.GPU_group.get_by_uuid ~rpc ~session_id ~uuid in + let vgpu_type = + Client.VGPU_type.get_by_uuid ~rpc ~session_id ~uuid:vgpu_type_uuid + in + let result = Client.GPU_group.get_remaining_capacity ~rpc ~session_id + ~self:gpu_group ~vgpu_type in + printer (Cli_printer.PMsg (Int64.to_string result)) let vgpu_create printer rpc session_id params = - let device = if List.mem_assoc "device" params then List.assoc "device" params else "0" in - let gpu_group_uuid = List.assoc "gpu-group-uuid" params in - let vm_uuid=List.assoc "vm-uuid" params in - let vM=Client.VM.get_by_uuid rpc session_id vm_uuid in - let gPU_group=Client.GPU_group.get_by_uuid rpc session_id gpu_group_uuid in - let _type = - if List.mem_assoc "vgpu-type-uuid" params - then Client.VGPU_type.get_by_uuid rpc session_id (List.assoc "vgpu-type-uuid" params) - else Ref.null - in - let vgpu = Client.VGPU.create ~rpc ~session_id ~device ~gPU_group ~vM ~other_config:[] ~_type in - let uuid = Client.VGPU.get_uuid rpc session_id vgpu in - printer (Cli_printer.PList [uuid]) + let device = if List.mem_assoc "device" params then List.assoc "device" params else "0" in + let gpu_group_uuid = List.assoc "gpu-group-uuid" params in + let vm_uuid=List.assoc "vm-uuid" params in + let vM=Client.VM.get_by_uuid rpc session_id vm_uuid in + let gPU_group=Client.GPU_group.get_by_uuid rpc session_id gpu_group_uuid in + let _type = + if List.mem_assoc "vgpu-type-uuid" params + then Client.VGPU_type.get_by_uuid rpc session_id (List.assoc "vgpu-type-uuid" params) + else Ref.null + in + let vgpu = Client.VGPU.create ~rpc ~session_id ~device ~gPU_group ~vM ~other_config:[] ~_type in + let uuid = Client.VGPU.get_uuid rpc session_id vgpu in + printer (Cli_printer.PList [uuid]) let vgpu_destroy printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let vgpu = Client.VGPU.get_by_uuid rpc session_id uuid in - Client.VGPU.destroy rpc session_id vgpu + let uuid = List.assoc "uuid" params in + let vgpu = Client.VGPU.get_by_uuid rpc session_id uuid in + Client.VGPU.destroy rpc session_id vgpu let dr_task_create printer rpc session_id params = - let _type = List.assoc "type" params in - let device_config = parse_device_config params in - let whitelist = if List.mem_assoc "sr-whitelist" params then String.split ',' (List.assoc "sr-whitelist" params) else [] in - let dr_task = Client.DR_task.create ~rpc ~session_id ~_type ~device_config ~whitelist in - let uuid = Client.DR_task.get_uuid ~rpc ~session_id ~self:dr_task in - printer (Cli_printer.PList [uuid]) + let _type = List.assoc "type" params in + let device_config = parse_device_config params in + let whitelist = if List.mem_assoc "sr-whitelist" params then String.split ',' (List.assoc "sr-whitelist" params) else [] in + let dr_task = Client.DR_task.create ~rpc ~session_id ~_type ~device_config ~whitelist in + let uuid = Client.DR_task.get_uuid ~rpc ~session_id ~self:dr_task in + printer (Cli_printer.PList [uuid]) let dr_task_destroy printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let ref = Client.DR_task.get_by_uuid ~rpc ~session_id ~uuid in - Client.DR_task.destroy ~rpc ~session_id ~self:ref + let uuid = List.assoc "uuid" params in + let ref = Client.DR_task.get_by_uuid ~rpc ~session_id ~uuid in + Client.DR_task.destroy ~rpc ~session_id ~self:ref let pgpu_enable_dom0_access printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let ref = Client.PGPU.get_by_uuid rpc session_id uuid in - let result = Client.PGPU.enable_dom0_access rpc session_id ref in - printer (Cli_printer.PMsg (Record_util.pgpu_dom0_access_to_string result)) + let uuid = List.assoc "uuid" params in + let ref = Client.PGPU.get_by_uuid rpc session_id uuid in + let result = Client.PGPU.enable_dom0_access rpc session_id ref in + printer (Cli_printer.PMsg (Record_util.pgpu_dom0_access_to_string result)) let pgpu_disable_dom0_access printer rpc session_id params = - let uuid = List.assoc "uuid" params in - let ref = Client.PGPU.get_by_uuid rpc session_id uuid in - let result = Client.PGPU.disable_dom0_access rpc session_id ref in - printer (Cli_printer.PMsg (Record_util.pgpu_dom0_access_to_string result)) + let uuid = List.assoc "uuid" params in + let ref = Client.PGPU.get_by_uuid rpc session_id uuid in + let result = Client.PGPU.disable_dom0_access rpc session_id ref in + printer (Cli_printer.PMsg (Record_util.pgpu_dom0_access_to_string result)) let lvhd_enable_thin_provisioning printer rpc session_id params = - let sr_uuid = List.assoc "sr-uuid" params in - let initial_allocation = Record_util.bytes_of_string "initial-allocation" (List.assoc "initial-allocation" params) in - let allocation_quantum = Record_util.bytes_of_string "allocation-quantum" (List.assoc "allocation-quantum" params) in - ignore( - do_host_op rpc session_id (fun _ host -> - let host_ref = host.getref () in - let sr_ref = Client.SR.get_by_uuid rpc session_id sr_uuid in - Client.LVHD.enable_thin_provisioning rpc session_id host_ref sr_ref initial_allocation allocation_quantum - ) params ["sr-uuid"; "initial-allocation";"allocation-quantum"] - ) + let sr_uuid = List.assoc "sr-uuid" params in + let initial_allocation = Record_util.bytes_of_string "initial-allocation" (List.assoc "initial-allocation" params) in + let allocation_quantum = Record_util.bytes_of_string "allocation-quantum" (List.assoc "allocation-quantum" params) in + ignore( + do_host_op rpc session_id (fun _ host -> + let host_ref = host.getref () in + let sr_ref = Client.SR.get_by_uuid rpc session_id sr_uuid in + Client.LVHD.enable_thin_provisioning rpc session_id host_ref sr_ref initial_allocation allocation_quantum + ) params ["sr-uuid"; "initial-allocation";"allocation-quantum"] + ) diff --git a/ocaml/xapi/cli_operations_geneva.ml b/ocaml/xapi/cli_operations_geneva.ml index 912d8460ec7..db96da76457 100644 --- a/ocaml/xapi/cli_operations_geneva.ml +++ b/ocaml/xapi/cli_operations_geneva.ml @@ -11,10 +11,10 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(** +(** * @group Command-Line Interface (CLI) - *) - +*) + (* Backwards compatible CLI operations *) (* These are mostly list functions - operations that do things *) @@ -29,7 +29,7 @@ open Xstringext open Pervasiveext open Unixext open Client -open Records +open Records module D=Debug.Make(struct let name="cli" end) open D @@ -40,51 +40,51 @@ let failwith str = raise (Cli_util.Cli_failure str) let powerstate vm_r = let co = vm_r.API.vM_current_operations in let s = vm_r.API.vM_power_state in - let op_to_string (_,s) = - match s with - | `clone -> "CLONING" - | `start -> "STARTING" - | `clean_shutdown -> "SHUTTING_DOWN" - | `hard_shutdown -> "SHUTTING_DOWN" - | `clean_reboot -> "REBOOTING" - | `hard_reboot -> "REBOOTING" - | `suspend -> "SUSPENDING" - | `resume -> "RESUMING" - | `export -> "EXPORTING" - | _ -> "" + let op_to_string (_,s) = + match s with + | `clone -> "CLONING" + | `start -> "STARTING" + | `clean_shutdown -> "SHUTTING_DOWN" + | `hard_shutdown -> "SHUTTING_DOWN" + | `clean_reboot -> "REBOOTING" + | `hard_reboot -> "REBOOTING" + | `suspend -> "SUSPENDING" + | `resume -> "RESUMING" + | `export -> "EXPORTING" + | _ -> "" in let state = String.concat " " (List.map op_to_string co) in if state <> "" then state else match s with - | `Halted -> "DOWN" - | `Paused -> "PAUSED" - | `Running -> "UP" - | `Suspended -> "SUSPENDED" + | `Halted -> "DOWN" + | `Paused -> "PAUSED" + | `Running -> "UP" + | `Suspended -> "SUSPENDED" (* Functions to get objects from the database in convenient ways *) let get_vm_records rpc session_id = let vms = Client.VM.get_all_records_where rpc session_id "true" in let vmrs = List.map snd vms in - List.filter (fun vm_r -> not vm_r.API.vM_is_a_template) vmrs + List.filter (fun vm_r -> not vm_r.API.vM_is_a_template) vmrs let get_template_records rpc session_id = let vms = Client.VM.get_all_records_where rpc session_id "true" in let vmrs = List.map snd vms in - List.filter (fun vm_r -> vm_r.API.vM_is_a_template) vmrs + List.filter (fun vm_r -> vm_r.API.vM_is_a_template) vmrs let get_patch_by_name_or_id rpc session_id params = let patches = Client.Host_patch.get_all_records_where rpc session_id "true"in let filter_fun = - if List.mem_assoc "patch-name" params - then + if List.mem_assoc "patch-name" params + then let name = List.assoc "patch-name" params in function (_, x) -> (x.API.host_patch_name_label = name) else - if List.mem_assoc "patch-id" params - then - let id = List.assoc "patch-id" params in - function (_, x) -> (x.API.host_patch_uuid = id) + if List.mem_assoc "patch-id" params + then + let id = List.assoc "patch-id" params in + function (_, x) -> (x.API.host_patch_uuid = id) else raise (Failure "Either a patch-name or a patch-id must be specified") in let patches = List.filter filter_fun patches in @@ -116,16 +116,16 @@ let host_template_list printer rpc session_id params = let host_sr_list printer rpc session_id params = let srs = Client.SR.get_all rpc session_id in let srs = List.map (fun sr -> (sr,Client.SR.get_record rpc session_id sr)) srs in - let recs = + let recs = List.map (fun (_,sr) -> - ["NAME",sr.API.sR_name_label; - "uuid",sr.API.sR_uuid; - "active","true"; - "devices",""]) srs + ["NAME",sr.API.sR_name_label; + "uuid",sr.API.sR_uuid; + "active","true"; + "devices",""]) srs in printer (Cli_printer.PTable recs) - + let host_param_list printer rpc session_id params = let host = Cli_operations.get_host_from_session rpc session_id in let host_r = Client.Host.get_record rpc session_id host in @@ -134,14 +134,14 @@ let host_param_list printer rpc session_id params = "Xen version",""; "Installed",""; "Product version/build", - (List.assoc "product_version" host_r.API.host_software_version)^"/"^ - (List.assoc "build_number" host_r.API.host_software_version); + (List.assoc "product_version" host_r.API.host_software_version)^"/"^ + (List.assoc "build_number" host_r.API.host_software_version); "Sockets per node",string_of_int (List.length host_r.API.host_host_CPUs); "Cores per socket",""; "Threads per core",""]] in printer (Cli_printer.PTable recs) -let host_password_set _ rpc session_id params = +let host_password_set _ rpc session_id params = let old_pwd = List.assoc "password" params and new_pwd = List.assoc "new-password" params in Client.Session.change_password rpc session_id old_pwd new_pwd @@ -166,10 +166,10 @@ let host_cpu_list printer rpc session_id params = let cpus = Client.Host.get_host_CPUs rpc session_id this_host in let recs = List.map (fun cpu -> - let cpu = Client.Host_cpu.get_record rpc session_id cpu in - [("CPU"^(Int64.to_string cpu.API.host_cpu_number),cpu.API.host_cpu_modelname); - ("Vendor",cpu.API.host_cpu_vendor); - ("Speed",Int64.to_string cpu.API.host_cpu_speed)]) cpus in + let cpu = Client.Host_cpu.get_record rpc session_id cpu in + [("CPU"^(Int64.to_string cpu.API.host_cpu_number),cpu.API.host_cpu_modelname); + ("Vendor",cpu.API.host_cpu_vendor); + ("Speed",Int64.to_string cpu.API.host_cpu_speed)]) cpus in printer (Cli_printer.PTable recs) let host_cd_list printer rpc session_id params = @@ -178,40 +178,40 @@ let host_cd_list printer rpc session_id params = let cd_vdis = List.flatten (List.map (fun (sr,sr_record) -> Client.SR.get_VDIs rpc session_id sr) cd_srs) in let records = List.map (fun vdi -> - let vdi_rec = Client.VDI.get_record rpc session_id vdi in - [("CD",vdi_rec.API.vDI_name_label); - ("Description",vdi_rec.API.vDI_name_description); - ("Location","")]) cd_vdis in - printer (Cli_printer.PTable records) + let vdi_rec = Client.VDI.get_record rpc session_id vdi in + [("CD",vdi_rec.API.vDI_name_label); + ("Description",vdi_rec.API.vDI_name_description); + ("Location","")]) cd_vdis in + printer (Cli_printer.PTable records) -let host_patch_list printer rpc session_id params = +let host_patch_list printer rpc session_id params = let patches = Client.Host_patch.get_all_records_where rpc session_id "true" in let records = List.map - (fun (patch, patch_rec) -> - [("PATCH", patch_rec.API.host_patch_name_label); - ("uuid", patch_rec.API.host_patch_uuid); - ("applied on", if patch_rec.API.host_patch_applied - then Date.to_string patch_rec.API.host_patch_timestamp_applied - else "never")]) patches in + (fun (patch, patch_rec) -> + [("PATCH", patch_rec.API.host_patch_name_label); + ("uuid", patch_rec.API.host_patch_uuid); + ("applied on", if patch_rec.API.host_patch_applied + then Date.to_string patch_rec.API.host_patch_timestamp_applied + else "never")]) patches in printer (Cli_printer.PTable records) -let host_patch_remove printer rpc session_id params = +let host_patch_remove printer rpc session_id params = let p = get_patch_by_name_or_id rpc session_id params in Client.Host_patch.destroy rpc session_id (fst p) -let host_patch_upload fd printer rpc session_id params = +let host_patch_upload fd printer rpc session_id params = let name = List.assoc "patch-file" params in Cli_operations.patch_upload fd printer rpc session_id [ "file-name", name ] -let host_patch_apply printer rpc session_id params = +let host_patch_apply printer rpc session_id params = let p = get_patch_by_name_or_id rpc session_id params in Cli_operations.patch_apply printer rpc session_id [ "uuid", (snd p).API.host_patch_uuid ] let get_disks rpc session_id vm = let vbds = Client.VM.get_VBDs rpc session_id vm in List.map (fun vbd -> (Client.VBD.get_record rpc session_id vbd, - Client.VDI.get_record rpc session_id (Client.VBD.get_VDI rpc session_id vbd))) vbds - + Client.VDI.get_record rpc session_id (Client.VBD.get_VDI rpc session_id vbd))) vbds + let vm_disk_list printer rpc session_id params = let op vm = let vm = vm.getref () in @@ -239,11 +239,11 @@ let vm_disk_setqos printer rpc session_id params = Client.VBD.set_qos_algorithm_type rpc session_id vbd "ionice"; (* remove the key if it's already there *) (try - Client.VBD.remove_from_qos_algorithm_params rpc session_id vbd "class"; - with _ -> ()); + Client.VBD.remove_from_qos_algorithm_params rpc session_id vbd "class"; + with _ -> ()); Client.VBD.add_to_qos_algorithm_params rpc session_id vbd "class" qos with - _ -> failwith "Disk not found" + _ -> failwith "Disk not found" in ignore(Cli_operations.do_vm_op printer rpc session_id op params []) @@ -264,16 +264,16 @@ let vm_cd_list printer rpc session_id params = let get_vm_params rpc session_id vm = let vm_record = Records.vm_record rpc session_id vm in let power_state = powerstate (Client.VM.get_record rpc session_id vm) in - let vcpus = + let vcpus = if power_state="running" || power_state="suspended" then Records.safe_get_field (Records.field_lookup vm_record.Records.fields "VCPUs-number") else Records.safe_get_field (Records.field_lookup vm_record.Records.fields "VCPUs-max") in - (* memory_set refers to the balloon target, memory_max is the boot-time max *) + (* memory_set refers to the balloon target, memory_max is the boot-time max *) let memory_set = Records.safe_get_field (Records.field_lookup vm_record.Records.fields "memory-dynamic-max") in - let vcpu_params = - try + let vcpu_params = + try match (Records.field_lookup vm_record.Records.fields "VCPUs-params").Records.get_map with Some f -> f () | None -> [] with _ -> [] in @@ -301,15 +301,15 @@ let vm_param_list printer rpc session_id params = ignore(Cli_operations.do_vm_op printer rpc session_id op params []) let vm_param_get printer rpc session_id params = - let op vm = + let op vm = let vm=vm.getref () in let allparams = get_vm_params rpc session_id vm in let param_name=List.assoc "param-name" params in - let result = + let result = try - List.filter (fun (k,v) -> k=param_name) allparams + List.filter (fun (k,v) -> k=param_name) allparams with - _ -> ["error","Parameter not found"] + _ -> ["error","Parameter not found"] in printer (Cli_printer.PTable [result]) in @@ -321,101 +321,101 @@ let vm_param_set printer rpc session_id params = let param = List.assoc "param-name" params in let value = List.assoc "param-value" params in match param with - | "name" -> Client.VM.set_name_label rpc session_id vm value - | "description" -> Client.VM.set_name_description rpc session_id vm value - | "vcpus" -> Client.VM.set_VCPUs_at_startup rpc session_id vm (Int64.of_string value) - | "memory_set" -> - if Client.VM.get_power_state rpc session_id vm <> `Halted - then failwith "Cannot modify memory_set when VM is running"; - let bytes = Int64.shift_left (Int64.of_string value) 20 in - Client.VM.set_memory_dynamic_max rpc session_id vm bytes; - Client.VM.set_memory_dynamic_min rpc session_id vm bytes - | "auto_poweron" -> () - | "boot_params" -> Client.VM.set_PV_args rpc session_id vm value - | "on_crash" -> Client.VM.set_actions_after_crash rpc session_id vm (Record_util.string_to_on_crash_behaviour value) - | "sched_credit_weight" -> - (try Client.VM.remove_from_VCPUs_params rpc session_id vm "weight" with _ -> ()); - Client.VM.add_to_VCPUs_params rpc session_id vm "weight" value - | "sched_credit_cap" -> - (try Client.VM.remove_from_VCPUs_params rpc session_id vm "cap" with _ -> ()); - Client.VM.add_to_VCPUs_params rpc session_id vm "cap" value - | _ -> failwith "Unknown parameter" + | "name" -> Client.VM.set_name_label rpc session_id vm value + | "description" -> Client.VM.set_name_description rpc session_id vm value + | "vcpus" -> Client.VM.set_VCPUs_at_startup rpc session_id vm (Int64.of_string value) + | "memory_set" -> + if Client.VM.get_power_state rpc session_id vm <> `Halted + then failwith "Cannot modify memory_set when VM is running"; + let bytes = Int64.shift_left (Int64.of_string value) 20 in + Client.VM.set_memory_dynamic_max rpc session_id vm bytes; + Client.VM.set_memory_dynamic_min rpc session_id vm bytes + | "auto_poweron" -> () + | "boot_params" -> Client.VM.set_PV_args rpc session_id vm value + | "on_crash" -> Client.VM.set_actions_after_crash rpc session_id vm (Record_util.string_to_on_crash_behaviour value) + | "sched_credit_weight" -> + (try Client.VM.remove_from_VCPUs_params rpc session_id vm "weight" with _ -> ()); + Client.VM.add_to_VCPUs_params rpc session_id vm "weight" value + | "sched_credit_cap" -> + (try Client.VM.remove_from_VCPUs_params rpc session_id vm "cap" with _ -> ()); + Client.VM.add_to_VCPUs_params rpc session_id vm "cap" value + | _ -> failwith "Unknown parameter" in ignore(Cli_operations.do_vm_op printer rpc session_id op params []) - + let vm_install fd printer rpc session_id params = let template_name = List.assoc "template-name" params in let name = List.assoc "name" params in let description = try List.assoc "description" params with _ -> "" in let vcpus = try Some (Int64.of_string (List.assoc "vcpus" params)) with _ -> None in - let memory_set = - try Some (Int64.shift_left (Int64.of_string (List.assoc "memory_set" params)) 20) + let memory_set = + try Some (Int64.shift_left (Int64.of_string (List.assoc "memory_set" params)) 20) with _ -> None in let boot_params = try Some (List.assoc "boot_params" params) with _ -> None in let _ (* auto_poweron *) = try Some (bool_of_string (List.assoc "auto_poweron" params)) with _ -> None in let templates = get_template_records rpc session_id in let template = List.filter (fun t -> t.API.vM_name_label = template_name) templates in - match template with - [t] -> - marshal fd (Command (Print "Initiating install...")); - let new_vm = Client.VM.clone rpc session_id (Client.VM.get_by_uuid rpc session_id (t.API.vM_uuid)) name in - let uuid = Client.VM.get_uuid rpc session_id new_vm in - - (* Add VIFs to any network that has 'auto_add_to_VM' set to true *) - let nets = Client.Network.get_all rpc session_id in - let filtered_nets = List.filter (fun net -> try bool_of_string (List.assoc "auto_add_to_VM" (Client.Network.get_other_config rpc session_id net)) with _ -> false) nets in - - let device=ref 0 in - let add_vif net = - let mac = Record_util.random_mac_local () in - marshal fd (Command (Print ("Adding VIF, device "^(string_of_int !device)^" to network '"^(Client.Network.get_name_label rpc session_id net)^"' mac="^mac))); - ignore(Client.VIF.create rpc session_id (string_of_int !device) net new_vm mac 1500L [] "" [] `network_default [] [] ); - device := !device + 1 - in - List.iter add_vif filtered_nets; - - ignore(may (fun vcpus -> Client.VM.set_VCPUs_max rpc session_id new_vm vcpus) vcpus); - ignore(may (fun vcpus -> Client.VM.set_VCPUs_at_startup rpc session_id new_vm vcpus) vcpus); - ignore(may (fun memory_set -> Client.VM.set_memory_dynamic_max rpc session_id new_vm memory_set) memory_set); - ignore(may (fun memory_set -> Client.VM.set_memory_static_max rpc session_id new_vm memory_set) memory_set); - ignore(may (fun memory_set -> Client.VM.set_memory_dynamic_min rpc session_id new_vm memory_set) memory_set); - ignore(may (fun boot_params -> Client.VM.set_PV_args rpc session_id new_vm boot_params) boot_params); - ignore(Client.VM.set_name_description rpc session_id new_vm description); - - let sr_uuid = match get_default_sr_uuid rpc session_id with - | Some sr_uuid -> sr_uuid - | None -> failwith "Failed to find a Pool default_SR and no override was provided" in - rewrite_provisioning_xml rpc session_id new_vm sr_uuid; - - Client.VM.provision rpc session_id new_vm; - (* Geneva doesn't start VMs automatically on install *) - (* + match template with + [t] -> + marshal fd (Command (Print "Initiating install...")); + let new_vm = Client.VM.clone rpc session_id (Client.VM.get_by_uuid rpc session_id (t.API.vM_uuid)) name in + let uuid = Client.VM.get_uuid rpc session_id new_vm in + + (* Add VIFs to any network that has 'auto_add_to_VM' set to true *) + let nets = Client.Network.get_all rpc session_id in + let filtered_nets = List.filter (fun net -> try bool_of_string (List.assoc "auto_add_to_VM" (Client.Network.get_other_config rpc session_id net)) with _ -> false) nets in + + let device=ref 0 in + let add_vif net = + let mac = Record_util.random_mac_local () in + marshal fd (Command (Print ("Adding VIF, device "^(string_of_int !device)^" to network '"^(Client.Network.get_name_label rpc session_id net)^"' mac="^mac))); + ignore(Client.VIF.create rpc session_id (string_of_int !device) net new_vm mac 1500L [] "" [] `network_default [] [] ); + device := !device + 1 + in + List.iter add_vif filtered_nets; + + ignore(may (fun vcpus -> Client.VM.set_VCPUs_max rpc session_id new_vm vcpus) vcpus); + ignore(may (fun vcpus -> Client.VM.set_VCPUs_at_startup rpc session_id new_vm vcpus) vcpus); + ignore(may (fun memory_set -> Client.VM.set_memory_dynamic_max rpc session_id new_vm memory_set) memory_set); + ignore(may (fun memory_set -> Client.VM.set_memory_static_max rpc session_id new_vm memory_set) memory_set); + ignore(may (fun memory_set -> Client.VM.set_memory_dynamic_min rpc session_id new_vm memory_set) memory_set); + ignore(may (fun boot_params -> Client.VM.set_PV_args rpc session_id new_vm boot_params) boot_params); + ignore(Client.VM.set_name_description rpc session_id new_vm description); + + let sr_uuid = match get_default_sr_uuid rpc session_id with + | Some sr_uuid -> sr_uuid + | None -> failwith "Failed to find a Pool default_SR and no override was provided" in + rewrite_provisioning_xml rpc session_id new_vm sr_uuid; + + Client.VM.provision rpc session_id new_vm; + (* Geneva doesn't start VMs automatically on install *) + (* Client.VM.start rpc session_id new_vm false true; *) - (* We wait for the PV bootloader switcheroo *) - marshal fd (Command (Print ("New VM uuid: "^uuid))); - let record_matches record = - (List.assoc "uuid" record) () = uuid && - (List.assoc "PV-bootloader" record) () <> "installer" - in - Cli_operations.event_wait_gen rpc session_id "vm" record_matches; - marshal fd (Command (Print ("[DONE]"))) - | _ -> failwith "Template not found" + (* We wait for the PV bootloader switcheroo *) + marshal fd (Command (Print ("New VM uuid: "^uuid))); + let record_matches record = + (List.assoc "uuid" record) () = uuid && + (List.assoc "PV-bootloader" record) () <> "installer" + in + Cli_operations.event_wait_gen rpc session_id "vm" record_matches; + marshal fd (Command (Print ("[DONE]"))) + | _ -> failwith "Template not found" let host_bridge_list vbridge printer rpc session_id params = let filterfn = if vbridge then not else fun b -> b in let networks = Client.Network.get_all rpc session_id in let pbridges = List.filter (fun net -> filterfn (List.length (Client.Network.get_PIFs rpc session_id net) > 0)) networks in - let bridge_to_printer_record pbridge = + let bridge_to_printer_record pbridge = let pifs = Client.Network.get_PIFs rpc session_id pbridge in let other_config = Client.Network.get_other_config rpc session_id pbridge in - let name = try List.assoc "geneva-name" other_config with _ -> Client.Network.get_bridge rpc session_id pbridge in + let name = try List.assoc "geneva-name" other_config with _ -> Client.Network.get_bridge rpc session_id pbridge in [((if vbridge then "Virtual bridge" else "Physical bridge"),name); ("Description",Client.Network.get_name_description rpc session_id pbridge)] @ - (if not vbridge then [("NIC",if vbridge then "" else Client.PIF.get_device rpc session_id (List.hd pifs))] else []) @ - [("VLAN",if vbridge then "(null)" else Int64.to_string (Client.PIF.get_VLAN rpc session_id (List.hd pifs))); - ("Auto add to VM",try List.assoc "auto_add_to_VM" other_config with _ -> "false")] + (if not vbridge then [("NIC",if vbridge then "" else Client.PIF.get_device rpc session_id (List.hd pifs))] else []) @ + [("VLAN",if vbridge then "(null)" else Int64.to_string (Client.PIF.get_VLAN rpc session_id (List.hd pifs))); + ("Auto add to VM",try List.assoc "auto_add_to_VM" other_config with _ -> "false")] in printer (Cli_printer.PTable (List.map bridge_to_printer_record pbridges)) @@ -424,20 +424,20 @@ let host_vbridge_add printer rpc session_id params = let autoadd = List.assoc "auto-vm-add" params in let desc = try List.assoc "vbridge-description" params with _ -> "" in ignore(Client.Network.create rpc session_id name desc 1500L - (if autoadd="true" then [("auto_add_to_VM",autoadd);("geneva-name",name)] else [("geneva-name",name)]) []) + (if autoadd="true" then [("auto_add_to_VM",autoadd);("geneva-name",name)] else [("geneva-name",name)]) []) let host_vbridge_remove printer rpc session_id params = let name = List.assoc "vbridge-name" params in let networks = Client.Network.get_all rpc session_id in - let net = List.filter (fun net -> - let other_config = Client.Network.get_other_config rpc session_id net in - if List.mem_assoc "geneva-name" other_config then - List.assoc "geneva-name" other_config = name - else - Client.Network.get_bridge rpc session_id net = name) networks in + let net = List.filter (fun net -> + let other_config = Client.Network.get_other_config rpc session_id net in + if List.mem_assoc "geneva-name" other_config then + List.assoc "geneva-name" other_config = name + else + Client.Network.get_bridge rpc session_id net = name) networks in match net with - [n] -> Client.Network.destroy rpc session_id n - | _ -> failwith "Multiple networks found!" + [n] -> Client.Network.destroy rpc session_id n + | _ -> failwith "Multiple networks found!" let vdi_param_set printer rpc session_id params = let vdi = List.assoc "vdi" params in @@ -445,11 +445,11 @@ let vdi_param_set printer rpc session_id params = let param_value=List.assoc "param-value" params in let vdi_ref = Client.VDI.get_by_uuid rpc session_id vdi in match param_name with - "name-label" -> Client.VDI.set_name_label rpc session_id vdi_ref param_value - | "name-description" -> Client.VDI.set_name_description rpc session_id vdi_ref param_value - | "read-only" -> Client.VDI.set_read_only rpc session_id vdi_ref (bool_of_string param_value) - | "sharable" -> Client.VDI.set_sharable rpc session_id vdi_ref (bool_of_string param_value) - | _ -> failwith ("Unknown param "^param_name) + "name-label" -> Client.VDI.set_name_label rpc session_id vdi_ref param_value + | "name-description" -> Client.VDI.set_name_description rpc session_id vdi_ref param_value + | "read-only" -> Client.VDI.set_read_only rpc session_id vdi_ref (bool_of_string param_value) + | "sharable" -> Client.VDI.set_sharable rpc session_id vdi_ref (bool_of_string param_value) + | _ -> failwith ("Unknown param "^param_name) let vm_vif_add printer rpc session_id params = @@ -458,37 +458,37 @@ let vm_vif_add printer rpc session_id params = let vif_name = List.assoc "vif-name" params in let bridge = List.assoc "bridge-name" params in let mac = List.assoc "mac" params in - + (* the name encodes the device - VIFs should be called 'ethX' or nicX in geneva/zurich *) let device = - if String.startswith "eth" vif_name + if String.startswith "eth" vif_name then String.sub vif_name 3 (String.length vif_name - 3) else if String.startswith "nic" vif_name then String.sub vif_name 3 (String.length vif_name - 3) else failwith "VIF names must be of the form ethX or nicX" in - + (* find the bridge *) let networks = Client.Network.get_all rpc session_id in let filter net = let other_config = Client.Network.get_other_config rpc session_id net in try - List.assoc "geneva-name" other_config = bridge + List.assoc "geneva-name" other_config = bridge with - _ -> Client.Network.get_bridge rpc session_id net = bridge + _ -> Client.Network.get_bridge rpc session_id net = bridge in let net = List.filter filter networks in match net with - | [] -> failwith "Bridge not found" - | n::ns -> - begin - let vif = Client.VIF.create rpc session_id device n vm mac 1500L [] "" [] `network_default [] [] in - if List.mem_assoc "rate" params then - (Client.VIF.set_qos_algorithm_type rpc session_id vif "ratelimit"; - Client.VIF.add_to_qos_algorithm_params rpc session_id vif "kbs" (List.assoc "rate" params)) - end - in - ignore(Cli_operations.do_vm_op printer rpc session_id op params []) + | [] -> failwith "Bridge not found" + | n::ns -> + begin + let vif = Client.VIF.create rpc session_id device n vm mac 1500L [] "" [] `network_default [] [] in + if List.mem_assoc "rate" params then + (Client.VIF.set_qos_algorithm_type rpc session_id vif "ratelimit"; + Client.VIF.add_to_qos_algorithm_params rpc session_id vif "kbs" (List.assoc "rate" params)) + end + in + ignore(Cli_operations.do_vm_op printer rpc session_id op params []) let vm_vif_list printer rpc session_id params = let op vm = @@ -500,26 +500,26 @@ let vm_vif_list printer rpc session_id params = let name = (if is_hvm then "nic" else "eth")^(Client.VIF.get_device rpc session_id vif) in let mac = Client.VIF.get_MAC rpc session_id vif in let bridge_other_config = Client.Network.get_other_config rpc session_id (Client.VIF.get_network rpc session_id vif) in - let bridge = - try List.assoc "geneva-name" bridge_other_config - with _ -> Client.Network.get_bridge rpc session_id (Client.VIF.get_network rpc session_id vif) + let bridge = + try List.assoc "geneva-name" bridge_other_config + with _ -> Client.Network.get_bridge rpc session_id (Client.VIF.get_network rpc session_id vif) in let ip = - try - let networks = Client.VM_guest_metrics.get_networks rpc session_id (Client.VM.get_guest_metrics rpc session_id vm) in - List.assoc ((Client.VIF.get_device rpc session_id vif)^"/ip") networks - with - _ -> "not available [guest must be on with XenSource tools installed]" - in + try + let networks = Client.VM_guest_metrics.get_networks rpc session_id (Client.VM.get_guest_metrics rpc session_id vm) in + List.assoc ((Client.VIF.get_device rpc session_id vif)^"/ip") networks + with + _ -> "not available [guest must be on with XenSource tools installed]" + in [("name",name); ("mac",mac); ("ip",ip); ("vbridge",bridge); ("rate","")] - in + in printer (Cli_printer.PTable (List.map vif_to_record vifs)) in - ignore(Cli_operations.do_vm_op printer rpc session_id op params []) + ignore(Cli_operations.do_vm_op printer rpc session_id op params []) let vm_vif_remove printer rpc session_id params = let op vm = @@ -528,7 +528,7 @@ let vm_vif_remove printer rpc session_id params = let vifs = List.filter (fun vif -> Client.VIF.get_VM rpc session_id vif = vm) vifs in let vif_name = List.assoc "vif-name" params in let device = - if String.startswith "eth" vif_name + if String.startswith "eth" vif_name then String.sub vif_name 3 (String.length vif_name - 3) else if String.startswith "nic" vif_name then String.sub vif_name 3 (String.length vif_name - 3) @@ -536,14 +536,14 @@ let vm_vif_remove printer rpc session_id params = in let vif = List.filter (fun vif -> Client.VIF.get_device rpc session_id vif = device) vifs in match vif with - v::vs -> Client.VIF.destroy rpc session_id v - | _ -> failwith "Cannot find VIF" - in - ignore(Cli_operations.do_vm_op printer rpc session_id op params []) + v::vs -> Client.VIF.destroy rpc session_id v + | _ -> failwith "Cannot find VIF" + in + ignore(Cli_operations.do_vm_op printer rpc session_id op params []) (* - let recs = + let recs = List.map (fun (_,v) -> (["NAME",v.API.vM_name_label; "uuid",v.API.vM_uuid; @@ -557,10 +557,10 @@ let vm_vif_remove printer rpc session_id params = ("description" , vm_record.API.vM_name_description ); ("vcpus", Int64.to_string (vm_record.API.vM_VCPUs_number )); ("memory_set", Int64.to_string (Int64.shift_right (vm_record.API.vM_memory_dynamic_max ) 20)); - ("auto_power_on", string_of_bool (vm_record.API.vM_auto_power_on))]] + ("auto_power_on", string_of_bool (vm_record.API.vM_auto_power_on))]] *) - + (* Cmdtable *) let cmdtable_data : (string*cmd_spec) list = @@ -572,7 +572,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; "host-shutdown", { reqd=[]; @@ -596,7 +596,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd host_password_set; flags=[]; - }; + }; "host-license-add", { reqd=[]; @@ -604,7 +604,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; "host-license-list", { reqd=[]; @@ -612,7 +612,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; "host-pif-list", { reqd=[]; @@ -620,7 +620,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: List the PIFs on the host"; implementation=No_fd host_pif_list; flags=[]; - }; + }; "host-cpu-list", { reqd=[]; @@ -628,7 +628,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd host_cpu_list; flags=[]; - }; + }; "host-vbridge-list", { reqd=[]; @@ -636,7 +636,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd (host_bridge_list true); flags=[]; - }; + }; "host-pbridge-list", { reqd=[]; @@ -644,7 +644,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd (host_bridge_list false); flags=[]; - }; + }; "host-cd-list", { reqd=[]; @@ -652,7 +652,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd host_cd_list; flags=[]; - }; + }; "host-vbridge-add", { reqd=["vbridge-name";"auto-vm-add"]; @@ -660,7 +660,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd host_vbridge_add; flags=[]; - }; + }; "host-vbridge-remove", { reqd=["vbridge-name"]; @@ -668,7 +668,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd host_vbridge_remove; flags=[]; - }; + }; "host-sr-set", { reqd=[]; @@ -676,7 +676,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; "host-loglevel-set", { reqd=[]; @@ -684,7 +684,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; "host-logs-download", { reqd=[]; @@ -692,7 +692,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; "host-patch-list", { reqd=[]; @@ -700,7 +700,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd host_patch_list; flags=[]; - }; + }; "host-patch-remove", { @@ -709,7 +709,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd host_patch_remove; flags=[]; - }; + }; "host-patch-upload", { reqd=["patch-file"]; @@ -717,7 +717,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=With_fd host_patch_upload; flags=[]; - }; + }; "host-patch-apply", { reqd=[]; @@ -725,7 +725,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd host_patch_apply; flags=[]; - }; + }; "host-backup", { reqd=[]; @@ -733,7 +733,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; "host-restore", { reqd=[]; @@ -741,7 +741,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; "host-crash-list", { reqd=[]; @@ -749,7 +749,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; "host-crash-del", { reqd=[]; @@ -757,7 +757,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; "host-crash-upload", { reqd=[]; @@ -765,7 +765,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; "host-bugreport-upload", { reqd=[]; @@ -773,7 +773,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; "host-license-add", { reqd=["license-file"]; @@ -781,9 +781,9 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=With_fd Cli_operations.host_license_add; flags=[]; - }; + }; "host-vm-list", - { + { reqd=[]; optn=[]; help="COMPAT MODE: List the hosts on the server"; @@ -791,7 +791,7 @@ let cmdtable_data : (string*cmd_spec) list = flags=[]; }; "host-template-list", - { + { reqd=[]; optn=[]; help="COMPAT MODE: List the templates on the server"; @@ -821,7 +821,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=With_fd vm_install; flags=[]; - }; + }; "vm-uninstall", { reqd=[]; @@ -829,7 +829,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=With_fd Cli_operations.vm_uninstall; flags=[]; - }; + }; "vm-clone", { reqd=[]; @@ -837,7 +837,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd Cli_operations.vm_clone; flags=[]; - }; + }; "vm-shutdown", { reqd=[]; @@ -845,7 +845,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd Cli_operations.vm_shutdown; flags=[]; - }; + }; "vm-start", { reqd=[]; @@ -853,7 +853,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd Cli_operations.vm_start; flags=[]; - }; + }; "vm-suspend", { reqd=[]; @@ -861,7 +861,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd Cli_operations.vm_suspend; flags=[]; - }; + }; "vm-resume", { reqd=[]; @@ -869,7 +869,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd Cli_operations.vm_resume; flags=[]; - }; + }; "vm-reboot", { reqd=[]; @@ -877,7 +877,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd Cli_operations.vm_reboot; flags=[]; - }; + }; "vm-disk-list", { reqd=[]; @@ -885,7 +885,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd vm_disk_list; flags=[]; - }; + }; "vm-disk-add", { reqd=["disk-name";"disk-size"]; @@ -893,7 +893,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd Cli_operations.vm_disk_add; flags=[]; - }; + }; "vm-disk-remove", { reqd=[]; @@ -901,7 +901,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd Cli_operations.vm_disk_remove; flags=[]; - }; + }; "vm-disk-resize", { reqd=[]; @@ -909,7 +909,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd Cli_operations.vm_disk_resize; flags=[]; - }; + }; "vm-disk-setqos", { reqd=[]; @@ -917,7 +917,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd vm_disk_setqos; flags=[]; - }; + }; "vm-cd-list", { reqd=[]; @@ -925,7 +925,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd vm_cd_list; flags=[]; - }; + }; "vm-cd-add", { reqd=[]; @@ -933,7 +933,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd Cli_operations.vm_cd_add; flags=[]; - }; + }; "vm-cd-remove", { reqd=[]; @@ -941,7 +941,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd Cli_operations.vm_cd_remove; flags=[]; - }; + }; "vm-cd-change", { reqd=[]; @@ -949,7 +949,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; "vm-vif-list", { reqd=[]; @@ -957,7 +957,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd vm_vif_list; flags=[]; - }; + }; "vm-vif-add", { reqd=["vif-name";"mac";"bridge-name"]; @@ -965,7 +965,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd vm_vif_add; flags=[]; - }; + }; "vm-vif-remove", { reqd=["vif-name"]; @@ -973,7 +973,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd vm_vif_remove; flags=[]; - }; + }; "vm-param-list", { reqd=[]; @@ -981,7 +981,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd vm_param_list; flags=[]; - }; + }; "vm-param-get", { reqd=["param-name"]; @@ -989,7 +989,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd vm_param_get; flags=[]; - }; + }; "vm-param-set", { reqd=[]; @@ -997,7 +997,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd vm_param_set; flags=[]; - }; + }; "vm-export", { reqd=[]; @@ -1005,7 +1005,7 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; "vm-import", { reqd=[]; @@ -1013,8 +1013,8 @@ let cmdtable_data : (string*cmd_spec) list = help="COMPAT MODE: "; implementation=No_fd not_implemented; flags=[]; - }; + }; ] - + diff --git a/ocaml/xapi/cli_printer.ml b/ocaml/xapi/cli_printer.ml index 629ae3e38c4..92c70de6164 100644 --- a/ocaml/xapi/cli_printer.ml +++ b/ocaml/xapi/cli_printer.ml @@ -11,21 +11,21 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(** +(** * @group Command-Line Interface (CLI) - *) - +*) + open Cli_util open Cli_protocol type record = (string*string) list type printval = - | PMsg of string - | PTable of record list - | PList of string list - | PStderr of string - + | PMsg of string + | PTable of record list + | PList of string list + | PStderr of string + type print_fn = printval -> unit let pad_string s len = @@ -37,57 +37,57 @@ let pad_rhs s len = s^(String.make (if n>0 then n else 0) ' ') let rec multi_line_record r = - let maxlen = 4 + List.fold_left max 0 (List.map (fun (a,b) -> String.length a) r) in - let indent fs = List.map (fun (f,v)->(pad_string f maxlen,v)) fs in - let r = - match r with - ((k,v)::fs) -> ((pad_rhs k maxlen),v)::(indent fs) - | _ -> r in - (String.concat "\n" (List.map (fun (f,v)->f^": "^v) r))^"\n" + let maxlen = 4 + List.fold_left max 0 (List.map (fun (a,b) -> String.length a) r) in + let indent fs = List.map (fun (f,v)->(pad_string f maxlen,v)) fs in + let r = + match r with + ((k,v)::fs) -> ((pad_rhs k maxlen),v)::(indent fs) + | _ -> r in + (String.concat "\n" (List.map (fun (f,v)->f^": "^v) r))^"\n" (* Used to escape commas in --minimal mode *) -let escape_commas x = - (* Escaping rules: *) - let rules = [ ',', "\\,"; (* , -> \, *) - '\\', "\\\\" (* \ -> \\ *) - ] in - Stdext.Xstringext.String.escaped ~rules x +let escape_commas x = + (* Escaping rules: *) + let rules = [ ',', "\\,"; (* , -> \, *) + '\\', "\\\\" (* \ -> \\ *) + ] in + Stdext.Xstringext.String.escaped ~rules x let make_printer sock minimal = - let buffer = ref [] in + let buffer = ref [] in - let multi_line_xapi_minimal pval = - match pval with - | (PTable rs) -> - if (List.length rs > 0) && (List.length (List.hd rs) > 0) then - let names = List.map (fun r -> snd (List.hd r)) rs in - let escaped_names = List.map escape_commas names in - buffer := (String.concat "," escaped_names) :: !buffer - | (PList ss) -> - let escaped_ss = List.map escape_commas ss in - buffer := (String.concat "," escaped_ss) :: !buffer - | _ -> - () - in + let multi_line_xapi_minimal pval = + match pval with + | (PTable rs) -> + if (List.length rs > 0) && (List.length (List.hd rs) > 0) then + let names = List.map (fun r -> snd (List.hd r)) rs in + let escaped_names = List.map escape_commas names in + buffer := (String.concat "," escaped_names) :: !buffer + | (PList ss) -> + let escaped_ss = List.map escape_commas ss in + buffer := (String.concat "," escaped_ss) :: !buffer + | _ -> + () + in - let multi_line_xapi pval = - match pval with - | (PTable rs) -> - List.iter (fun l -> marshal sock (Command (Print (l ^ "\n")))) (List.map multi_line_record rs) - | (PList ss) -> - List.iter (fun l -> marshal sock (Command (Print (l)))) ss - | (PMsg ss) -> - marshal sock (Command (Print ss)) - | (PStderr ss) -> - marshal sock (Command (PrintStderr (ss ^ "\n"))) - in + let multi_line_xapi pval = + match pval with + | (PTable rs) -> + List.iter (fun l -> marshal sock (Command (Print (l ^ "\n")))) (List.map multi_line_record rs) + | (PList ss) -> + List.iter (fun l -> marshal sock (Command (Print (l)))) ss + | (PMsg ss) -> + marshal sock (Command (Print ss)) + | (PStderr ss) -> + marshal sock (Command (PrintStderr (ss ^ "\n"))) + in - let minimal_flush () = - marshal sock (Command(Print (String.concat "," (!buffer)))) - in + let minimal_flush () = + marshal sock (Command(Print (String.concat "," (!buffer)))) + in - let flush () = - () - in + let flush () = + () + in - if minimal then (multi_line_xapi_minimal, minimal_flush) else (multi_line_xapi, flush) + if minimal then (multi_line_xapi_minimal, minimal_flush) else (multi_line_xapi, flush) diff --git a/ocaml/xapi/cli_progress_bar.ml b/ocaml/xapi/cli_progress_bar.ml index bf881185894..4e2d361b731 100644 --- a/ocaml/xapi/cli_progress_bar.ml +++ b/ocaml/xapi/cli_progress_bar.ml @@ -49,10 +49,10 @@ module Make(T: Floatable) = struct int_of_float (T.(to_float value /. (to_float t.max_value) *. (float_of_int (t.width - prefix - suffix)))) let hms secs = - let h = secs / 3600 in - let m = (secs mod 3600) / 60 in - let s = secs mod 60 in - Printf.sprintf "%02d:%02d:%02d" h m s + let h = secs / 3600 in + let m = (secs mod 3600) / 60 in + let s = secs mod 60 in + Printf.sprintf "%02d:%02d:%02d" h m s let eta t = let time_so_far = Unix.gettimeofday () -. t.start_time in diff --git a/ocaml/xapi/cli_protocol.ml b/ocaml/xapi/cli_protocol.ml index e3b2dd7ac6d..fdebd584ae2 100644 --- a/ocaml/xapi/cli_protocol.ml +++ b/ocaml/xapi/cli_protocol.ml @@ -11,9 +11,9 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(** +(** * @group Command-Line Interface (CLI) - *) +*) (** Used to ensure that we actually are talking to a thin CLI server *) let major = 0 @@ -26,36 +26,36 @@ let prefix = "XenSource thin CLI protocol" (** Command sent by the server to the client. If the command is "Save" then the server waits for "OK" from the client and then streams a list of data chunks to the client. *) -type command = - | Print of string - | Debug of string (* debug message to optionally display *) - | Load of string (* filename *) - | HttpGet of string * string (* filename * path *) - | HttpPut of string * string (* filename * path *) - | HttpConnect of string (* path *) - | Prompt (* request the user enter some text *) - | Exit of int (* exit with a success or failure code *) - | Error of string * string list (* code params *) - | PrintStderr of string (* print something to stderr *) +type command = + | Print of string + | Debug of string (* debug message to optionally display *) + | Load of string (* filename *) + | HttpGet of string * string (* filename * path *) + | HttpPut of string * string (* filename * path *) + | HttpConnect of string (* path *) + | Prompt (* request the user enter some text *) + | Exit of int (* exit with a success or failure code *) + | Error of string * string list (* code params *) + | PrintStderr of string (* print something to stderr *) (** In response to a server command, the client sends one of these. If the command was "Load" or "Prompt" then the client sends a list of data chunks. *) -type response = - | OK - | Wait - | Failed +type response = + | OK + | Wait + | Failed (** When streaming binary data, send in chunks with a known length and a special End marker at the end. *) -type blob_header = - | Chunk of int32 - | End +type blob_header = + | Chunk of int32 + | End -type message = - | Command of command - | Response of response - | Blob of blob_header +type message = + | Command of command + | Response of response + | Blob of blob_header (*****************************************************************************) (* Pretty-print functions *) @@ -73,9 +73,9 @@ let string_of_command = function | PrintStderr x -> "PrintStderr " ^ x let string_of_response = function - | OK -> "OK" - | Wait -> "Wait" - | Failed -> "Failed" + | OK -> "OK" + | Wait -> "Wait" + | Failed -> "Failed" let string_of_blob_header = function | Chunk x -> "Chunk " ^ (Int32.to_string x) @@ -89,10 +89,10 @@ let string_of_message = function (*****************************************************************************) (* Marshal/Unmarshal primitives *) -let marshal_int32 x = +let marshal_int32 x = let (>>) a b = Int32.shift_right_logical a b and (&&) a b = Int32.logand a b in - let a = (x >> 0) && 0xffl + let a = (x >> 0) && 0xffl and b = (x >> 8) && 0xffl and c = (x >> 16) && 0xffl and d = (x >> 24) && 0xffl in @@ -107,37 +107,37 @@ let marshal_int x = marshal_int32 (Int32.of_int x) let marshal_string x = marshal_int (String.length x) ^ x -let marshal_list f x = +let marshal_list f x = marshal_int (List.length x) ^ (String.concat "" (List.map f x)) type context = string * int (* offset *) -let unmarshal_int32 (s, offset) = +let unmarshal_int32 (s, offset) = let (<<<) a b = Int32.shift_left a b and (|||) a b = Int32.logor a b in - let a = Int32.of_int (int_of_char (s.[offset + 0])) - and b = Int32.of_int (int_of_char (s.[offset + 1])) - and c = Int32.of_int (int_of_char (s.[offset + 2])) + let a = Int32.of_int (int_of_char (s.[offset + 0])) + and b = Int32.of_int (int_of_char (s.[offset + 1])) + and c = Int32.of_int (int_of_char (s.[offset + 2])) and d = Int32.of_int (int_of_char (s.[offset + 3])) in (a <<< 0) ||| (b <<< 8) ||| (c <<< 16) ||| (d <<< 24), (s, offset + 4) -let unmarshal_int pos = +let unmarshal_int pos = let x, pos = unmarshal_int32 pos in Int32.to_int x, pos -let unmarshal_string pos = +let unmarshal_string pos = let len, (s, offset) = unmarshal_int pos in String.sub s offset len, (s, offset + len) -let unmarshal_list pos f = +let unmarshal_list pos f = let len, pos = unmarshal_int pos in let rec loop pos acc = function | 0 -> List.rev acc, pos - | n -> - let item, pos = f pos in - loop pos (item :: acc) (n - 1) in + | n -> + let item, pos = f pos in + loop pos (item :: acc) (n - 1) in loop pos [] len - + (*****************************************************************************) (* Marshal/Unmarshal higher-level messages *) @@ -157,131 +157,131 @@ let marshal_command = function | PrintStderr x -> marshal_int 16 ^ (marshal_string x) exception Unknown_tag of string * int - -let unmarshal_command pos = + +let unmarshal_command pos = let tag, pos = unmarshal_int pos in match tag with - | 0 -> let body, pos = unmarshal_string pos in Print body, pos - | 15 -> let body, pos = unmarshal_string pos in Debug body, pos - | 1 -> let body, pos = unmarshal_string pos in Load body, pos - | 12 -> - let a, pos = unmarshal_string pos in - let b, pos = unmarshal_string pos in - HttpGet(a, b), pos - | 13 -> - let a, pos = unmarshal_string pos in - let b, pos = unmarshal_string pos in - HttpPut(a, b), pos - | 3 -> Prompt, pos - | 4 -> let body, pos = unmarshal_int pos in Exit body, pos - | 17 -> - let a, pos = unmarshal_string pos in - HttpConnect(a), pos - | 14 -> - let code, pos = unmarshal_string pos in - let params, pos = unmarshal_list pos unmarshal_string in - Error(code, params), pos - | 16 -> let body, pos = unmarshal_string pos in PrintStderr body, pos - | n -> raise (Unknown_tag("command", n)) + | 0 -> let body, pos = unmarshal_string pos in Print body, pos + | 15 -> let body, pos = unmarshal_string pos in Debug body, pos + | 1 -> let body, pos = unmarshal_string pos in Load body, pos + | 12 -> + let a, pos = unmarshal_string pos in + let b, pos = unmarshal_string pos in + HttpGet(a, b), pos + | 13 -> + let a, pos = unmarshal_string pos in + let b, pos = unmarshal_string pos in + HttpPut(a, b), pos + | 3 -> Prompt, pos + | 4 -> let body, pos = unmarshal_int pos in Exit body, pos + | 17 -> + let a, pos = unmarshal_string pos in + HttpConnect(a), pos + | 14 -> + let code, pos = unmarshal_string pos in + let params, pos = unmarshal_list pos unmarshal_string in + Error(code, params), pos + | 16 -> let body, pos = unmarshal_string pos in PrintStderr body, pos + | n -> raise (Unknown_tag("command", n)) let marshal_response = function - | OK -> marshal_int 5 - | Wait -> marshal_int 18 + | OK -> marshal_int 5 + | Wait -> marshal_int 18 | Failed -> marshal_int 6 -let unmarshal_response pos = +let unmarshal_response pos = let tag, pos = unmarshal_int pos in match tag with - | 5 -> OK, pos - | 18 -> Wait, pos - | 6 -> Failed, pos - | n -> raise (Unknown_tag("response", n)) + | 5 -> OK, pos + | 18 -> Wait, pos + | 6 -> Failed, pos + | n -> raise (Unknown_tag("response", n)) let marshal_blob_header = function | Chunk x -> marshal_int 7 ^ (marshal_int32 x) | End -> marshal_int 8 -let unmarshal_blob_header pos = +let unmarshal_blob_header pos = let tag, pos = unmarshal_int pos in match tag with - | 7 -> let body, pos = unmarshal_int32 pos in Chunk body, pos - | 8 -> End, pos - | n -> raise (Unknown_tag("blob_header", n)) + | 7 -> let body, pos = unmarshal_int32 pos in Chunk body, pos + | 8 -> End, pos + | n -> raise (Unknown_tag("blob_header", n)) let marshal_message = function | Command x -> marshal_int 9 ^ (marshal_command x) | Response x -> marshal_int 10 ^ (marshal_response x) - | Blob x -> marshal_int 11 ^ (marshal_blob_header x) + | Blob x -> marshal_int 11 ^ (marshal_blob_header x) -let write_string (fd: Unix.file_descr) buf = +let write_string (fd: Unix.file_descr) buf = Stdext.Unixext.really_write fd buf 0 (String.length buf) (** Marshal a message to a file descriptor prefixing it with total header length *) -let marshal (fd: Unix.file_descr) x = +let marshal (fd: Unix.file_descr) x = let payload = marshal_message x in write_string fd (marshal_int (String.length payload)); write_string fd payload exception Unmarshal_failure of exn * string -let unmarshal_message pos = +let unmarshal_message pos = let tag, pos = unmarshal_int pos in match tag with - | 9 -> let body, pos = unmarshal_command pos in Command body, pos - | 10 -> let body, pos = unmarshal_response pos in Response body, pos - | 11 -> let body, pos = unmarshal_blob_header pos in Blob body, pos - | n -> raise (Unknown_tag("blob_header", n)) + | 9 -> let body, pos = unmarshal_command pos in Command body, pos + | 10 -> let body, pos = unmarshal_response pos in Response body, pos + | 11 -> let body, pos = unmarshal_blob_header pos in Blob body, pos + | n -> raise (Unknown_tag("blob_header", n)) (** Unmarshal a message from a file descriptor *) let unmarshal (fd: Unix.file_descr) = - let buf = Buffer.create 0 in - try - let head = Stdext.Unixext.try_read_string ~limit:4 fd in - Buffer.add_string buf head; - if String.length head < 4 then raise End_of_file; - let length, _ = unmarshal_int (head, 0) in - let body = Stdext.Unixext.try_read_string ~limit:length fd in - Buffer.add_string buf body; - if String.length body < length then raise End_of_file; - fst (unmarshal_message (body, 0)) - with e -> raise (Unmarshal_failure (e, Buffer.contents buf)) - -let marshal_protocol (fd: Unix.file_descr) = + let buf = Buffer.create 0 in + try + let head = Stdext.Unixext.try_read_string ~limit:4 fd in + Buffer.add_string buf head; + if String.length head < 4 then raise End_of_file; + let length, _ = unmarshal_int (head, 0) in + let body = Stdext.Unixext.try_read_string ~limit:length fd in + Buffer.add_string buf body; + if String.length body < length then raise End_of_file; + fst (unmarshal_message (body, 0)) + with e -> raise (Unmarshal_failure (e, Buffer.contents buf)) + +let marshal_protocol (fd: Unix.file_descr) = write_string fd (prefix ^ (marshal_int major) ^ (marshal_int minor)) exception Protocol_mismatch of string exception Not_a_cli_server let unmarshal_protocol (fd: Unix.file_descr) = - let buf = Buffer.create 0 in - try - let prefix_len = String.length prefix in - let prefix' = Stdext.Unixext.try_read_string ~limit:prefix_len fd in - Buffer.add_string buf prefix'; - if String.length prefix' < prefix_len then raise End_of_file; - if prefix' <> prefix then raise Not_a_cli_server; - let major_str = Stdext.Unixext.try_read_string ~limit:4 fd in - Buffer.add_string buf major_str; - if String.length major_str < 4 then raise End_of_file; - let minor_str = Stdext.Unixext.try_read_string ~limit:4 fd in - Buffer.add_string buf minor_str; - if String.length minor_str < 4 then raise End_of_file; - let major', _ = unmarshal_int (major_str, 0) in - let minor', _ = unmarshal_int (minor_str, 0) in - major', minor' - with e -> raise (Unmarshal_failure (e, Buffer.contents buf)) + let buf = Buffer.create 0 in + try + let prefix_len = String.length prefix in + let prefix' = Stdext.Unixext.try_read_string ~limit:prefix_len fd in + Buffer.add_string buf prefix'; + if String.length prefix' < prefix_len then raise End_of_file; + if prefix' <> prefix then raise Not_a_cli_server; + let major_str = Stdext.Unixext.try_read_string ~limit:4 fd in + Buffer.add_string buf major_str; + if String.length major_str < 4 then raise End_of_file; + let minor_str = Stdext.Unixext.try_read_string ~limit:4 fd in + Buffer.add_string buf minor_str; + if String.length minor_str < 4 then raise End_of_file; + let major', _ = unmarshal_int (major_str, 0) in + let minor', _ = unmarshal_int (minor_str, 0) in + major', minor' + with e -> raise (Unmarshal_failure (e, Buffer.contents buf)) (*****************************************************************************) (* Marshal/Unmarshal unit test *) -let marshal_unmarshal (a: message) = +let marshal_unmarshal (a: message) = let x = marshal_message a in let b, (s, offset) = unmarshal_message (x, 0) in - if a <> b - then failwith (Printf.sprintf "marshal_unmarshal failure: %s <> %s" - (string_of_message a) (string_of_message b)); + if a <> b + then failwith (Printf.sprintf "marshal_unmarshal failure: %s <> %s" + (string_of_message a) (string_of_message b)); if String.length x <> offset then failwith (Printf.sprintf "Failed to consume all data in marshal_unmarshal %s (length=%d offset=%d)" - (string_of_message a) (String.length x) offset) + (string_of_message a) (String.length x) offset) -let examples = +let examples = [ Command (Print "Hello there"); Command (Debug "this is debug output"); Command (Load "ova.xml"); diff --git a/ocaml/xapi/cli_util.ml b/ocaml/xapi/cli_util.ml index c1960e28b3a..1058373da6d 100644 --- a/ocaml/xapi/cli_util.ml +++ b/ocaml/xapi/cli_util.ml @@ -11,11 +11,11 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(** +(** * @group Command-Line Interface (CLI) - *) +*) open Sexplib.Std - + module D = Debug.Make(struct let name = "cli" end) open D @@ -32,51 +32,51 @@ let log_exn_continue msg f x = try f x with e -> debug "Ignoring exception: %s w exception Cli_failure of string (** call [callback task_record] on every update to the task, until it completes or fails *) -let track callback rpc (session_id:API.ref_session) task = +let track callback rpc (session_id:API.ref_session) task = let classes = [ "task" ] in - finally + finally (fun () -> let finished = ref false in while not(!finished) do - Client.Event.register ~rpc ~session_id ~classes; - try - (* Need to check once after registering to avoid a race *) - finished := Client.Task.get_status ~rpc ~session_id ~self:task <> `pending; - - while not(!finished) do - let events = Event_types.events_of_rpc (Client.Event.next ~rpc ~session_id) in - let events = List.map Event_helper.record_of_event events in - List.iter (function - | Event_helper.Task (t, Some t_rec) when t = task -> callback t_rec - | _ -> () - ) events; - let matches = function - | Event_helper.Task (t, Some t_rec) -> t = task && t_rec.API.task_status <> `pending - | _ -> false in - finished := List.fold_left (||) false (List.map matches events) - done - with Api_errors.Server_error(code, _) when code = Api_errors.events_lost -> - debug "Caught EVENTS_LOST; reregistering"; - Client.Event.unregister ~rpc ~session_id ~classes + Client.Event.register ~rpc ~session_id ~classes; + try + (* Need to check once after registering to avoid a race *) + finished := Client.Task.get_status ~rpc ~session_id ~self:task <> `pending; + + while not(!finished) do + let events = Event_types.events_of_rpc (Client.Event.next ~rpc ~session_id) in + let events = List.map Event_helper.record_of_event events in + List.iter (function + | Event_helper.Task (t, Some t_rec) when t = task -> callback t_rec + | _ -> () + ) events; + let matches = function + | Event_helper.Task (t, Some t_rec) -> t = task && t_rec.API.task_status <> `pending + | _ -> false in + finished := List.fold_left (||) false (List.map matches events) + done + with Api_errors.Server_error(code, _) when code = Api_errors.events_lost -> + debug "Caught EVENTS_LOST; reregistering"; + Client.Event.unregister ~rpc ~session_id ~classes done) (fun () -> Client.Event.unregister ~rpc ~session_id ~classes) let result_from_task rpc session_id remote_task = - match Client.Task.get_status rpc session_id remote_task with - | `cancelling | `cancelled -> - raise (Api_errors.Server_error(Api_errors.task_cancelled, [ Ref.string_of remote_task ])) - | `pending -> - failwith "wait_for_task_completion failed; task is still pending" - | `success -> - () - | `failure -> - let error_info = Client.Task.get_error_info rpc session_id remote_task in - let trace = Client.Task.get_backtrace rpc session_id remote_task in - let exn = match error_info with - | code :: params -> Api_errors.Server_error(code, params) - | [] -> Failure (Printf.sprintf "Task failed but no error recorded: %s" (Ref.string_of remote_task)) in - Backtrace.(add exn (t_of_sexp (Sexplib.Sexp.of_string trace))); - raise exn + match Client.Task.get_status rpc session_id remote_task with + | `cancelling | `cancelled -> + raise (Api_errors.Server_error(Api_errors.task_cancelled, [ Ref.string_of remote_task ])) + | `pending -> + failwith "wait_for_task_completion failed; task is still pending" + | `success -> + () + | `failure -> + let error_info = Client.Task.get_error_info rpc session_id remote_task in + let trace = Client.Task.get_backtrace rpc session_id remote_task in + let exn = match error_info with + | code :: params -> Api_errors.Server_error(code, params) + | [] -> Failure (Printf.sprintf "Task failed but no error recorded: %s" (Ref.string_of remote_task)) in + Backtrace.(add exn (t_of_sexp (Sexplib.Sexp.of_string trace))); + raise exn (** Use the event system to wait for a specific task to complete (succeed, failed or be cancelled) *) let wait_for_task_completion = track (fun _ -> ()) @@ -86,70 +86,70 @@ module P = Cli_progress_bar.Make(struct type t = float let to_float x = x end) let wait_for_task_completion_with_progress fd = let p = P.create 80 0. 1. in track (fun t -> - let progress_updated = P.update p t.API.task_progress in - if progress_updated then marshal fd (Command (PrintStderr (Printf.sprintf "\r%s" (P.string_of_bar p)))); - if t.API.task_status <> `pending then begin - marshal fd (Command (PrintStderr "\n")); - marshal fd (Command (PrintStderr (P.summarise p))) - end - ) + let progress_updated = P.update p t.API.task_progress in + if progress_updated then marshal fd (Command (PrintStderr (Printf.sprintf "\r%s" (P.string_of_bar p)))); + if t.API.task_status <> `pending then begin + marshal fd (Command (PrintStderr "\n")); + marshal fd (Command (PrintStderr (P.summarise p))) + end + ) let track_http_operation ?use_existing_task ?(progress_bar=false) fd rpc session_id (make_command: API.ref_task -> command) label = (* Need to associate the operation with a task so we can check for failure *) let task_id = match use_existing_task with None -> Client.Task.create rpc session_id label "" | Some t -> t in finally (fun () -> - marshal fd (Command (make_command task_id)); - let response = ref (Response Wait) in - let receive_heartbeats = Thread.create - (fun () -> while !response = Response Wait do response := unmarshal fd done) () in - (* Wait for the task to complete *) - (if progress_bar - then wait_for_task_completion_with_progress fd - else wait_for_task_completion) + marshal fd (Command (make_command task_id)); + let response = ref (Response Wait) in + let receive_heartbeats = Thread.create + (fun () -> while !response = Response Wait do response := unmarshal fd done) () in + (* Wait for the task to complete *) + (if progress_bar + then wait_for_task_completion_with_progress fd + else wait_for_task_completion) rpc session_id task_id; - Thread.join receive_heartbeats; - if !response = Response OK then begin - if Client.Task.get_status rpc session_id task_id = `success then begin - let result = Client.Task.get_result rpc session_id task_id in - debug "result was [%s]" result; - result - end else begin - let params = Client.Task.get_error_info rpc session_id task_id in - raise (Api_errors.Server_error(List.hd params, List.tl params)); - end + Thread.join receive_heartbeats; + if !response = Response OK then begin + if Client.Task.get_status rpc session_id task_id = `success then begin + let result = Client.Task.get_result rpc session_id task_id in + debug "result was [%s]" result; + result + end else begin + let params = Client.Task.get_error_info rpc session_id task_id in + raise (Api_errors.Server_error(List.hd params, List.tl params)); + end end else begin - debug "client-side reports failure"; - (* Debug info might have been written into the task, let's see if there is some *) - Thread.delay 1.; - (* Bit of a race here - we can't simply wait for the task to be completed since *) - (* 'response failed' doesn't indicate whether it managed to talk to the handler *) - (* or not, so we don't know if the handler got the task_id to complete. The *) - (* import/export commands get round this by setting a negative progress, and *) - (* using this as an indicator that the handler never got the task. All handlers *) - (* would need to use this mechanism if we want to check for it here. For now a *) - (* delay of 1 will do... *) - let params = Client.Task.get_error_info rpc session_id task_id in - if params = [] then - raise (Api_errors.Server_error(Api_errors.client_error, [])) - else - raise (Api_errors.Server_error(List.hd params, List.tl params)); + debug "client-side reports failure"; + (* Debug info might have been written into the task, let's see if there is some *) + Thread.delay 1.; + (* Bit of a race here - we can't simply wait for the task to be completed since *) + (* 'response failed' doesn't indicate whether it managed to talk to the handler *) + (* or not, so we don't know if the handler got the task_id to complete. The *) + (* import/export commands get round this by setting a negative progress, and *) + (* using this as an indicator that the handler never got the task. All handlers *) + (* would need to use this mechanism if we want to check for it here. For now a *) + (* delay of 1 will do... *) + let params = Client.Task.get_error_info rpc session_id task_id in + if params = [] then + raise (Api_errors.Server_error(Api_errors.client_error, [])) + else + raise (Api_errors.Server_error(List.hd params, List.tl params)); end) (fun () -> (* if we created our own task then destroy it again; if the task was supplied to us then don't destroy it -- - if clients pass a task in on the command-line then they are responsible for destroying *) + if clients pass a task in on the command-line then they are responsible for destroying *) match use_existing_task with - None -> log_exn_continue "destroying task" (fun x -> Client.Task.destroy rpc session_id x) task_id + None -> log_exn_continue "destroying task" (fun x -> Client.Task.destroy rpc session_id x) task_id | Some _ -> () ) (* Rewrite the provisioning XML fragment to create all disks on a new, specified SR *) -let rewrite_provisioning_xml rpc session_id new_vm sr_uuid = +let rewrite_provisioning_xml rpc session_id new_vm sr_uuid = let rewrite_xml xml newsrname = let rewrite_disk = function | Xml.Element("disk",params,[]) -> - Xml.Element("disk",List.map (fun (x,y) -> if x<>"sr" then (x,y) else ("sr",newsrname)) params,[]) + Xml.Element("disk",List.map (fun (x,y) -> if x<>"sr" then (x,y) else ("sr",newsrname)) params,[]) | x -> x in match xml with @@ -165,7 +165,7 @@ let rewrite_provisioning_xml rpc session_id new_vm sr_uuid = Client.VM.add_to_other_config rpc session_id new_vm "disks" (Xml.to_string newdisks) end -let get_default_sr_uuid rpc session_id = +let get_default_sr_uuid rpc session_id = let pool = List.hd (Client.Pool.get_all rpc session_id) in let sr = Client.Pool.get_default_SR rpc session_id pool in (try Some (Client.SR.get_uuid rpc session_id sr) (* throws an exception if not found *) @@ -174,9 +174,9 @@ let get_default_sr_uuid rpc session_id = (* Given a string that might be a ref, lookup ref in cache and print uuid/name-label where possible *) let ref_convert x = match Ref_index.lookup x with - None -> x - | Some ir -> - ir.Ref_index.uuid^(match ir.Ref_index.name_label with None->"" | Some x -> " ("^x^")") + None -> x + | Some ir -> + ir.Ref_index.uuid^(match ir.Ref_index.name_label with None->"" | Some x -> " ("^x^")") (* Marshal an API-style server-error *) @@ -184,9 +184,9 @@ let get_server_error code params = try let error = Hashtbl.find Datamodel.errors code in (* There ought to be a bijection between parameters mentioned in - datamodel.ml and those in the exception but this is unchecked and + datamodel.ml and those in the exception but this is unchecked and false in some cases, defined here. *) - let required = + let required = if code = Api_errors.vms_failed_to_cooperate then List.map (fun _ -> "VM") params else error.Datamodel_types.err_params in @@ -202,50 +202,50 @@ let get_server_error code params = with _ -> None -let server_error (code: string) (params: string list) sock = +let server_error (code: string) (params: string list) sock = begin match get_server_error code params with - | None -> - marshal sock (Command (Error(code, List.map ref_convert params))); - | Some (e, l) -> - marshal sock (Command (PrintStderr (e ^ "\n"))); - List.iter (fun pv -> marshal sock (Command (PrintStderr (pv ^ "\n")))) l; + | None -> + marshal sock (Command (Error(code, List.map ref_convert params))); + | Some (e, l) -> + marshal sock (Command (PrintStderr (e ^ "\n"))); + List.iter (fun pv -> marshal sock (Command (PrintStderr (pv ^ "\n")))) l; end -let user_says_yes fd = +let user_says_yes fd = marshal fd (Command (Print "Type 'yes' to continue")); marshal fd (Command (Prompt)); let response = match unmarshal fd with | Blob (Chunk len) -> - debug "Reading a chunk of %ld bytes" len; - Unixext.really_read_string fd (Int32.to_int len) + debug "Reading a chunk of %ld bytes" len; + Unixext.really_read_string fd (Int32.to_int len) | _ -> failwith "Protocol error" in begin match unmarshal fd with - | Blob End -> () - | _ -> failwith "Protocol error" + | Blob End -> () + | _ -> failwith "Protocol error" end; let result = String.lowercase (String.strip String.isspace response)="yes" in if not(result) then marshal fd (Command (Print ("Aborted (you typed: '"^response^"')"))); result -type someone = - | Master (** I want to talk to the master *) - | SpecificHost of API.ref_host (** I want to talk to [h] (who may be the master *) +type someone = + | Master (** I want to talk to the master *) + | SpecificHost of API.ref_host (** I want to talk to [h] (who may be the master *) -(** Return a uri prefix which will cause the CLI to talk to either the - master or to a specific host (which may be the master). This will - work even when the management interface is disabled. *) +(** Return a uri prefix which will cause the CLI to talk to either the + master or to a specific host (which may be the master). This will + work even when the management interface is disabled. *) let rec uri_of_someone rpc session_id = function - | Master -> - (* See ocaml/xe-cli/newcli.ml:parse_url *) - "" - | SpecificHost h -> - let pool = List.hd (Client.Pool.get_all rpc session_id) in - let pool_master = Client.Pool.get_master rpc session_id pool in - if h = pool_master - then uri_of_someone rpc session_id Master - else - let address = Client.Host.get_address rpc session_id h in - "https://" ^ address + | Master -> + (* See ocaml/xe-cli/newcli.ml:parse_url *) + "" + | SpecificHost h -> + let pool = List.hd (Client.Pool.get_all rpc session_id) in + let pool_master = Client.Pool.get_master rpc session_id pool in + if h = pool_master + then uri_of_someone rpc session_id Master + else + let address = Client.Host.get_address rpc session_id h in + "https://" ^ address diff --git a/ocaml/xapi/cluster_stack_constraints.ml b/ocaml/xapi/cluster_stack_constraints.ml index 4030ee788c2..c836e60269b 100644 --- a/ocaml/xapi/cluster_stack_constraints.ml +++ b/ocaml/xapi/cluster_stack_constraints.ml @@ -6,83 +6,83 @@ open D (* Check which cluster stack we can use based on the kinds of SRs that are attached *) let required_cluster_stack ~__context = - let constraints = List.map - (fun (_, rc) -> rc.API.sM_type, rc.API.sM_required_cluster_stack) - (Db.SM.get_all_records ~__context) - in - (* Check which PBDs are attached on the master (assume this is running on the master) *) - let localhost = Helpers.get_localhost ~__context in - let pbds = Db.PBD.get_refs_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of localhost)), - Eq (Field "currently_attached", Literal "true") - )) in - (* Obtain constraints from the SR drivers. Each SR that has constraints - * returns a list of alternative cluster stacks, any one of which will - * work for the SR. *) - let required_stacks = List.filter_map (fun pbd -> - let sr = Db.PBD.get_SR ~__context ~self:pbd in - let sr_type = Db.SR.get_type ~__context ~self:sr in - if List.mem_assoc sr_type constraints then - match List.assoc sr_type constraints with - | [] -> None (* No constraints *) - | l -> Some l (* Any one of these will do *) - else begin - error "SR type not found in SM table."; - failwith "SR type not found in SM table." - end - ) pbds in - match required_stacks with - | [] -> None (* None of the attached SRs have constraints *) - | [stacks] -> - (* There is one SR with constraints; pick the first alternative. *) - Some (List.hd stacks) - | hd :: tl -> - (* There are multiple attached SRs with constraints. The intersection of - * the sets of alternatives captures which cluster stacks are possible. *) - match List.fold_left List.intersect hd tl with - | [] -> - (* This must be avoided by the PBD.plug code *) - error "Conflicting cluster stack demands."; - failwith "Conflicting cluster stack demands." - | stack :: _ -> - (* Multiple options; just pick the first one. *) - Some stack + let constraints = List.map + (fun (_, rc) -> rc.API.sM_type, rc.API.sM_required_cluster_stack) + (Db.SM.get_all_records ~__context) + in + (* Check which PBDs are attached on the master (assume this is running on the master) *) + let localhost = Helpers.get_localhost ~__context in + let pbds = Db.PBD.get_refs_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of localhost)), + Eq (Field "currently_attached", Literal "true") + )) in + (* Obtain constraints from the SR drivers. Each SR that has constraints + * returns a list of alternative cluster stacks, any one of which will + * work for the SR. *) + let required_stacks = List.filter_map (fun pbd -> + let sr = Db.PBD.get_SR ~__context ~self:pbd in + let sr_type = Db.SR.get_type ~__context ~self:sr in + if List.mem_assoc sr_type constraints then + match List.assoc sr_type constraints with + | [] -> None (* No constraints *) + | l -> Some l (* Any one of these will do *) + else begin + error "SR type not found in SM table."; + failwith "SR type not found in SM table." + end + ) pbds in + match required_stacks with + | [] -> None (* None of the attached SRs have constraints *) + | [stacks] -> + (* There is one SR with constraints; pick the first alternative. *) + Some (List.hd stacks) + | hd :: tl -> + (* There are multiple attached SRs with constraints. The intersection of + * the sets of alternatives captures which cluster stacks are possible. *) + match List.fold_left List.intersect hd tl with + | [] -> + (* This must be avoided by the PBD.plug code *) + error "Conflicting cluster stack demands."; + failwith "Conflicting cluster stack demands." + | stack :: _ -> + (* Multiple options; just pick the first one. *) + Some stack (* Choose a cluster stack given the constraints. Use default stack if there are no constaints. *) let choose_cluster_stack ~__context = - match required_cluster_stack ~__context with - | Some stack -> stack - | None -> !Xapi_globs.cluster_stack_default + match required_cluster_stack ~__context with + | Some stack -> stack + | None -> !Xapi_globs.cluster_stack_default (* Check whether the given SR is compatible with the given cluster stack *) let assert_sr_compatible ~__context ~cluster_stack ~sr = - match Xha_scripts.get_supported_srs cluster_stack with - | None -> () (* No constraints *) - | Some srs -> - let sr_type = Db.SR.get_type ~__context ~self:sr in - if not (List.exists (fun x -> x = sr_type) srs) then - raise (Api_errors.Server_error (Api_errors.incompatible_statefile_sr, [sr_type])) + match Xha_scripts.get_supported_srs cluster_stack with + | None -> () (* No constraints *) + | Some srs -> + let sr_type = Db.SR.get_type ~__context ~self:sr in + if not (List.exists (fun x -> x = sr_type) srs) then + raise (Api_errors.Server_error (Api_errors.incompatible_statefile_sr, [sr_type])) (* Check whether we can attach the SR given the cluster stack that is currently in use *) let assert_cluster_stack_compatible ~__context sr = - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool then begin - let current_stack = Db.Pool.get_ha_cluster_stack ~__context ~self:pool in - let sr_type = Db.SR.get_type ~__context ~self:sr in - let sms = Db.SM.get_refs_where ~__context ~expr:(Eq (Field "type", Literal sr_type)) in - match sms with - | sm :: _ -> - let constraints = Db.SM.get_required_cluster_stack ~__context ~self:sm in - (match constraints with - | [] -> () (* No constraints *) - | alternatives -> - if List.exists (fun x -> x = current_stack) alternatives then - () (* Constraints satisfied *) - else - raise (Api_errors.Server_error - (Api_errors.incompatible_cluster_stack_active, [String.concat "," alternatives])) - ) - | [] -> - error "SR type not found in SM table."; - failwith "SR type not found in SM table." - end + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool then begin + let current_stack = Db.Pool.get_ha_cluster_stack ~__context ~self:pool in + let sr_type = Db.SR.get_type ~__context ~self:sr in + let sms = Db.SM.get_refs_where ~__context ~expr:(Eq (Field "type", Literal sr_type)) in + match sms with + | sm :: _ -> + let constraints = Db.SM.get_required_cluster_stack ~__context ~self:sm in + (match constraints with + | [] -> () (* No constraints *) + | alternatives -> + if List.exists (fun x -> x = current_stack) alternatives then + () (* Constraints satisfied *) + else + raise (Api_errors.Server_error + (Api_errors.incompatible_cluster_stack_active, [String.concat "," alternatives])) + ) + | [] -> + error "SR type not found in SM table."; + failwith "SR type not found in SM table." + end diff --git a/ocaml/xapi/config_file_sync.ml b/ocaml/xapi/config_file_sync.ml index e99b80c2742..61141c34662 100644 --- a/ocaml/xapi/config_file_sync.ml +++ b/ocaml/xapi/config_file_sync.ml @@ -26,11 +26,11 @@ type config = { password : string } with rpc let config_sync_version = 2 let config_sync_uri = - Filename.concat Constants.config_sync_uri (string_of_int config_sync_version) + Filename.concat Constants.config_sync_uri (string_of_int config_sync_version) let read_config_file () = match get_password superuser with - | None -> failwith "Couldn't get password" - | Some p -> { password = p } + | None -> failwith "Couldn't get password" + | Some p -> { password = p } let parse_config_string config = Jsonrpc.of_string config |> config_of_rpc @@ -41,7 +41,7 @@ let rewrite_config_files config = parse_config_string config |> write_config let write_to_fd s msg = Unix.write s msg 0 (String.length msg) |> ignore let transmit_config_files s = - read_config_file () |> rpc_of_config |> Jsonrpc.to_string |> write_to_fd s + read_config_file () |> rpc_of_config |> Jsonrpc.to_string |> write_to_fd s (* We still need to respect older XenServer hosts which are expecting the entire /etc/password file. We need to make sure we send the @@ -57,16 +57,16 @@ let config_file_sync_handler (req: Http.Request.t) s _ = debug "received request to write out dom0 config files"; Xapi_http.with_context "Syncing dom0 config files over HTTP" req s (fun __context -> - let uri = String.split '/' (req.Http.Request.uri) |> List.filter (fun x -> x <> "") in - req.Http.Request.close <- true; - debug "sending headers"; - Http_svr.headers s (Http.http_200_ok ~keep_alive:false ()); - match uri with - | [path; version] when current version -> + let uri = String.split '/' (req.Http.Request.uri) |> List.filter (fun x -> x <> "") in + req.Http.Request.close <- true; + debug "sending headers"; + Http_svr.headers s (Http.http_200_ok ~keep_alive:false ()); + match uri with + | [path; version] when current version -> debug "writing dom0 config files"; transmit_config_files s; debug "finished writing dom0 config files" - | _ -> + | _ -> debug "writing legacy dom0 config files"; legacy_transmit_passwd s; debug "finished writing legacy dom0 config files") @@ -74,17 +74,17 @@ let config_file_sync_handler (req: Http.Request.t) s _ = let fetch_config_files_internal ~master_address ~pool_secret = Server_helpers.exec_with_new_task "fetch_config_files" (fun __context -> - Helpers.call_api_functions ~__context - (fun rpc session_id -> - let request = Xapi_http.http_request - ~cookie:[ "session_id", Ref.string_of session_id ] - Http.Get config_sync_uri in - let open Xmlrpc_client in - let transport = SSL (SSL.make (), master_address, !Xapi_globs.https_port) in - with_transport transport - (with_http request - (fun (response, fd) -> - Stdext.Unixext.string_of_fd fd)))) + Helpers.call_api_functions ~__context + (fun rpc session_id -> + let request = Xapi_http.http_request + ~cookie:[ "session_id", Ref.string_of session_id ] + Http.Get config_sync_uri in + let open Xmlrpc_client in + let transport = SSL (SSL.make (), master_address, !Xapi_globs.https_port) in + with_transport transport + (with_http request + (fun (response, fd) -> + Stdext.Unixext.string_of_fd fd)))) (* Invoked on slave as a notification that config files may have changed. Slaves can use this to decide whether to sync the new config files if the hash is different from the @@ -98,7 +98,7 @@ let fetch_config_files ~master_address ~pool_secret = let fetch_config_files_on_slave_startup () = Server_helpers.exec_with_new_task "checking no other known hosts are masters" (fun __context -> - let master_address = Helpers.get_main_ip_address () in - let pool_secret = !Xapi_globs.pool_secret in - let config_files = fetch_config_files_internal ~master_address ~pool_secret in - rewrite_config_files config_files) + let master_address = Helpers.get_main_ip_address () in + let pool_secret = !Xapi_globs.pool_secret in + let config_files = fetch_config_files_internal ~master_address ~pool_secret in + rewrite_config_files config_files) diff --git a/ocaml/xapi/console.ml b/ocaml/xapi/console.ml index e42f2200916..86573e5922a 100644 --- a/ocaml/xapi/console.ml +++ b/ocaml/xapi/console.ml @@ -26,125 +26,125 @@ open D exception Failure type address = -| Port of int (* console is listening on localhost:port *) -| Path of string (* console is listening on a Unix domain socket *) + | Port of int (* console is listening on localhost:port *) + | Path of string (* console is listening on a Unix domain socket *) let string_of_address = function -| Port x -> "localhost:" ^ (string_of_int x) -| Path x -> "unix:" ^ x + | Port x -> "localhost:" ^ (string_of_int x) + | Path x -> "unix:" ^ x let address_of_console __context console : address option = - let vm = Db.Console.get_VM __context console in - let address_option = - if Db.VM.get_is_control_domain ~__context ~self:vm - then Some (Port (Db.Console.get_port ~__context ~self:console |> Int64.to_int)) - else begin - try - let open Xenops_interface in - let id = Xapi_xenops.id_of_vm ~__context ~self:vm in - let dbg = Context.string_of_task __context in - let open Xapi_xenops_queue in - let module Client = (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) in - let _, s = Client.VM.stat dbg id in - - let proto = match Db.Console.get_protocol __context console with - | `rfb -> Vm.Rfb - | `vt100 -> Vm.Vt100 - | `rdp -> failwith "No support for tunnelling RDP" in - let console = List.find (fun x -> x.Vm.protocol = proto) s.Vm.consoles in - Some (if console.Vm.path = "" then Port console.Vm.port else Path console.Vm.path) - with e -> - debug "%s" (Printexc.to_string e); - None - end - in - debug "VM %s console port: %s" (Ref.string_of vm) (Opt.default "None" (Opt.map (fun x -> "Some " ^ (string_of_address x)) address_option)); - address_option - -let real_proxy __context _ _ vnc_port s = - try - Http_svr.headers s (Http.http_200_ok ()); - let vnc_sock = match vnc_port with - | Port x -> Unixext.open_connection_fd "127.0.0.1" x - | Path x -> Unixext.open_connection_unix_fd x in - - (* Unixext.proxy closes fds itself so we must dup here *) - let s' = Unix.dup s in - debug "Connected; running proxy (between fds: %d and %d)" (Unixext.int_of_file_descr vnc_sock) (Unixext.int_of_file_descr s'); - Unixext.proxy vnc_sock s'; - debug "Proxy exited" - with - exn -> debug "error: %s" (ExnHelper.string_of_exn exn) - -let fake_proxy __context _ _ console s = + let vm = Db.Console.get_VM __context console in + let address_option = + if Db.VM.get_is_control_domain ~__context ~self:vm + then Some (Port (Db.Console.get_port ~__context ~self:console |> Int64.to_int)) + else begin + try + let open Xenops_interface in + let id = Xapi_xenops.id_of_vm ~__context ~self:vm in + let dbg = Context.string_of_task __context in + let open Xapi_xenops_queue in + let module Client = (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) in + let _, s = Client.VM.stat dbg id in + + let proto = match Db.Console.get_protocol __context console with + | `rfb -> Vm.Rfb + | `vt100 -> Vm.Vt100 + | `rdp -> failwith "No support for tunnelling RDP" in + let console = List.find (fun x -> x.Vm.protocol = proto) s.Vm.consoles in + Some (if console.Vm.path = "" then Port console.Vm.port else Path console.Vm.path) + with e -> + debug "%s" (Printexc.to_string e); + None + end + in + debug "VM %s console port: %s" (Ref.string_of vm) (Opt.default "None" (Opt.map (fun x -> "Some " ^ (string_of_address x)) address_option)); + address_option + +let real_proxy __context _ _ vnc_port s = + try + Http_svr.headers s (Http.http_200_ok ()); + let vnc_sock = match vnc_port with + | Port x -> Unixext.open_connection_fd "127.0.0.1" x + | Path x -> Unixext.open_connection_unix_fd x in + + (* Unixext.proxy closes fds itself so we must dup here *) + let s' = Unix.dup s in + debug "Connected; running proxy (between fds: %d and %d)" (Unixext.int_of_file_descr vnc_sock) (Unixext.int_of_file_descr s'); + Unixext.proxy vnc_sock s'; + debug "Proxy exited" + with + exn -> debug "error: %s" (ExnHelper.string_of_exn exn) + +let fake_proxy __context _ _ console s = Rfb_randomtest.server s let check_wsproxy () = - try - let pid = int_of_string (Unixext.string_of_file "/var/run/wsproxy.pid") in - Unix.kill pid 0; - true - with _ -> false + try + let pid = int_of_string (Unixext.string_of_file "/var/run/wsproxy.pid") in + Unix.kill pid 0; + true + with _ -> false let ensure_proxy_running () = - if check_wsproxy () then () else begin - ignore(Forkhelpers.execute_command_get_output "/opt/xensource/libexec/wsproxy" []); - Thread.delay 1.0; - end + if check_wsproxy () then () else begin + ignore(Forkhelpers.execute_command_get_output "/opt/xensource/libexec/wsproxy" []); + Thread.delay 1.0; + end let ws_proxy __context req protocol address s = let port = match address with - | Port p -> p - | Path _ -> - error "No implementation for web-sockets console proxy to a Unix domain socket"; - Http_svr.headers s (Http.http_501_method_not_implemented ()); - failwith "ws_proxy: not implemented" in + | Port p -> p + | Path _ -> + error "No implementation for web-sockets console proxy to a Unix domain socket"; + Http_svr.headers s (Http.http_501_method_not_implemented ()); + failwith "ws_proxy: not implemented" in ensure_proxy_running (); - let protocol = match protocol with + let protocol = match protocol with | `rfb -> "rfb" | `vt100 -> "vt100" | `rdp -> "rdp" in - let real_path = Filename.concat "/var/lib/xcp" "wsproxy" in - let sock = + let real_path = Filename.concat "/var/lib/xcp" "wsproxy" in + let sock = try Some (Fecomms.open_unix_domain_sock_client real_path) - with e -> + with e -> debug "Error connecting to wsproxy (%s)" (Printexc.to_string e); - Http_svr.headers s (Http.http_501_method_not_implemented ()); - None + Http_svr.headers s (Http.http_501_method_not_implemented ()); + None in (* Ensure we always close the socket *) - Pervasiveext.finally (fun () -> - let upgrade_successful = Opt.map (fun sock -> - try - let result = (sock,Some (Ws_helpers.upgrade req s)) in - result - with _ -> - (sock,None)) sock - in - - Opt.iter (function - | (sock,Some ty) -> begin - let wsprotocol = match ty with - | Ws_helpers.Hixie76 -> "hixie76" - | Ws_helpers.Hybi10 -> "hybi10" in - let message = Printf.sprintf "%s:%s:%d" wsprotocol protocol port in - let len = String.length message in - ignore(Unixext.send_fd sock message 0 len [] s) - end - | (sock,None) -> begin - Http_svr.headers s (Http.http_501_method_not_implemented ()) - end) upgrade_successful) + Pervasiveext.finally (fun () -> + let upgrade_successful = Opt.map (fun sock -> + try + let result = (sock,Some (Ws_helpers.upgrade req s)) in + result + with _ -> + (sock,None)) sock + in + + Opt.iter (function + | (sock,Some ty) -> begin + let wsprotocol = match ty with + | Ws_helpers.Hixie76 -> "hixie76" + | Ws_helpers.Hybi10 -> "hybi10" in + let message = Printf.sprintf "%s:%s:%d" wsprotocol protocol port in + let len = String.length message in + ignore(Unixext.send_fd sock message 0 len [] s) + end + | (sock,None) -> begin + Http_svr.headers s (Http.http_501_method_not_implemented ()) + end) upgrade_successful) (fun () -> - Opt.iter (fun sock -> Unix.close sock) sock) - - + Opt.iter (fun sock -> Unix.close sock) sock) + + -let default_console_of_vm ~__context ~self = +let default_console_of_vm ~__context ~self = try let consoles = Db.VM.get_consoles ~__context ~self in let protocols = List.map (fun self -> Db.Console.get_protocol ~__context ~self) consoles in @@ -153,15 +153,15 @@ let default_console_of_vm ~__context ~self = error "Failed to find default VNC console for VM"; raise Failure -let console_of_request __context req = +let console_of_request __context req = (* First check the request looks valid *) if not(List.mem_assoc "ref" req.Http.Request.query) && not(List.mem_assoc "uuid" req.Http.Request.query) then begin error "HTTP request for console forwarding lacked 'ref' or 'uuid' parameter"; raise Failure end; - let _ref = - if List.mem_assoc "uuid" req.Http.Request.query - then + let _ref = + if List.mem_assoc "uuid" req.Http.Request.query + then let uuid = List.assoc "uuid" req.Http.Request.query in (try Ref.string_of(Db.VM.get_by_uuid ~__context ~uuid) with _ -> Ref.string_of(Db.Console.get_by_uuid ~__context ~uuid)) @@ -171,40 +171,40 @@ let console_of_request __context req = default VNC console or it may be a console ref in which case we go for that. *) let db = Context.database_of __context in - let is_vm, is_console = - let module DB = (val (Db_cache.get db) : Db_interface.DB_ACCESS) in - match DB.get_table_from_ref db _ref with - | Some c when c = Db_names.vm -> true, false - | Some c when c = Db_names.console -> false, true - | _ -> - error "%s is neither a VM ref or a console ref" _ref; - raise Failure in - - if is_vm then default_console_of_vm ~__context ~self:(Ref.of_string _ref) else (Ref.of_string _ref) - + let is_vm, is_console = + let module DB = (val (Db_cache.get db) : Db_interface.DB_ACCESS) in + match DB.get_table_from_ref db _ref with + | Some c when c = Db_names.vm -> true, false + | Some c when c = Db_names.console -> false, true + | _ -> + error "%s is neither a VM ref or a console ref" _ref; + raise Failure in + + if is_vm then default_console_of_vm ~__context ~self:(Ref.of_string _ref) else (Ref.of_string _ref) + let rbac_check_for_control_domain __context (req:Request.t) console_id permission = - let is_control_domain = - let vm_id = Db.Console.get_VM ~__context ~self:console_id in - Db.VM.get_is_control_domain ~__context ~self:vm_id - in - if is_control_domain then - let extra_dmsg = Printf.sprintf "for host console %s" (Ref.string_of console_id) in - let session_id = Xapi_http.get_session_id req in - Rbac.check_with_new_task ~extra_dmsg session_id permission ~fn:Rbac.nofn - ~args:(Xapi_http.rbac_audit_params_of req) - -let check_vm_is_running_here __context console = + let is_control_domain = + let vm_id = Db.Console.get_VM ~__context ~self:console_id in + Db.VM.get_is_control_domain ~__context ~self:vm_id + in + if is_control_domain then + let extra_dmsg = Printf.sprintf "for host console %s" (Ref.string_of console_id) in + let session_id = Xapi_http.get_session_id req in + Rbac.check_with_new_task ~extra_dmsg session_id permission ~fn:Rbac.nofn + ~args:(Xapi_http.rbac_audit_params_of req) + +let check_vm_is_running_here __context console = let vm = Db.Console.get_VM ~__context ~self:console in if Db.VM.get_power_state ~__context ~self:vm <> `Running then begin - error "VM %s (Console %s) has power_state <> Running" (Ref.string_of vm) (Ref.string_of console); - raise Failure + error "VM %s (Console %s) has power_state <> Running" (Ref.string_of vm) (Ref.string_of console); + raise Failure end; let localhost = Helpers.get_localhost ~__context in let resident_on = Db.VM.get_resident_on ~__context ~self:vm in if resident_on <> localhost then begin - error "VM %s (Console %s) has resident_on = %s <> localhost" (Ref.string_of vm) (Ref.string_of console) (Ref.string_of resident_on); - raise Failure + error "VM %s (Console %s) has resident_on = %s <> localhost" (Ref.string_of vm) (Ref.string_of console) (Ref.string_of resident_on); + raise Failure end (* GET /console_uri?ref=..... @@ -213,19 +213,19 @@ let handler proxy_fn (req: Request.t) s _ = req.Request.close <- true; Xapi_http.with_context "Connection to VM console" req s (fun __context -> - let console = console_of_request __context req in - (* only sessions with 'http/connect_console/host_console' permission *) - let protocol = Db.Console.get_protocol ~__context ~self:console in - (* can access dom0 host consoles *) - rbac_check_for_control_domain __context req console - Rbac_static.permission_http_connect_console_host_console.Db_actions.role_name_label; - - (* Check VM is actually running locally *) - check_vm_is_running_here __context console; - - match address_of_console __context console with - | Some vnc_port -> - proxy_fn __context req protocol vnc_port s - | None -> - Http_svr.headers s (Http.http_404_missing ()) - ) + let console = console_of_request __context req in + (* only sessions with 'http/connect_console/host_console' permission *) + let protocol = Db.Console.get_protocol ~__context ~self:console in + (* can access dom0 host consoles *) + rbac_check_for_control_domain __context req console + Rbac_static.permission_http_connect_console_host_console.Db_actions.role_name_label; + + (* Check VM is actually running locally *) + check_vm_is_running_here __context console; + + match address_of_console __context console with + | Some vnc_port -> + proxy_fn __context req protocol vnc_port s + | None -> + Http_svr.headers s (Http.http_404_missing ()) + ) diff --git a/ocaml/xapi/cpuid_helpers.ml b/ocaml/xapi/cpuid_helpers.ml index 2c726477b25..87475deef3a 100644 --- a/ocaml/xapi/cpuid_helpers.ml +++ b/ocaml/xapi/cpuid_helpers.ml @@ -21,31 +21,31 @@ module D=Debug.Make(struct let name="xapi" end) open D let string_of_features features = - Array.map (Printf.sprintf "%08Lx") features - |> Array.to_list - |> String.concat "-" + Array.map (Printf.sprintf "%08Lx") features + |> Array.to_list + |> String.concat "-" exception InvalidFeatureString of string let features_of_string str = - let scanf fmt s = Scanf.sscanf s fmt (fun x -> x) in - try Stringext.split ~on:'-' str - |> Array.of_list - |> Array.map (scanf "%08Lx%!") - with _ -> raise (InvalidFeatureString str) + let scanf fmt s = Scanf.sscanf s fmt (fun x -> x) in + try Stringext.split ~on:'-' str + |> Array.of_list + |> Array.map (scanf "%08Lx%!") + with _ -> raise (InvalidFeatureString str) (** If arr0 is shorter than arr1, extend arr0 with elements from arr1 up to the * length of arr1. Otherwise, truncate arr0 to the length of arr1. *) let extend arr0 arr1 = - let new_arr = Array.copy arr1 in - let len = min (Array.length arr0) (Array.length arr1) in - Array.blit arr0 0 new_arr 0 len; - new_arr + let new_arr = Array.copy arr1 in + let len = min (Array.length arr0) (Array.length arr1) in + Array.blit arr0 0 new_arr 0 len; + new_arr (** If arr is shorter than len elements, extend with zeros up to len elements. * Otherwise, truncate arr to len elements. *) let zero_extend arr len = - let zero_arr = Array.make len 0L in - extend arr zero_arr + let zero_arr = Array.make len 0L in + extend arr zero_arr (** Calculate the intersection of two feature sets. * Intersection with the empty set is treated as identity, so that intersection @@ -53,28 +53,28 @@ let zero_extend arr len = * If both sets are non-empty and of differing lengths, set is longer than the other, the shorter one is zero-extended to match it. * The returned set is the same length as the longer of the two arguments. *) let intersect left right = - match left, right with - | [| |], _ -> right - | _, [| |] -> left - | _, _ -> - let len_left = Array.length left in - let len_right = Array.length right in - - let out = Array.make (max len_left len_right) 0L in - - for i = 0 to (min len_left len_right) - 1 do - out.(i) <- Int64.logand left.(i) right.(i) - done; - out + match left, right with + | [| |], _ -> right + | _, [| |] -> left + | _, _ -> + let len_left = Array.length left in + let len_right = Array.length right in + + let out = Array.make (max len_left len_right) 0L in + + for i = 0 to (min len_left len_right) - 1 do + out.(i) <- Int64.logand left.(i) right.(i) + done; + out (** is_subset left right returns true if left is a subset of right *) let is_subset left right = - intersect left right = left + intersect left right = left -(** is_strict_subset left right returns true if left is a strict subset of right +(** is_strict_subset left right returns true if left is a strict subset of right (left is a subset of right, but left and right are not equal) *) let is_strict_subset left right = - (is_subset left right) && (left <> right) + (is_subset left right) && (left <> right) (** Field definitions for checked string map access *) let features_t = Map_check.pickler features_of_string string_of_features @@ -86,15 +86,15 @@ let socket_count = Map_check.(field "socket_count" int) let vendor = Map_check.(field "vendor" string) let get_flags_for_vm ~__context vm cpu_info = - let features_key = - if Helpers.will_boot_hvm ~__context ~self:vm then - cpu_info_features_hvm_key - else - cpu_info_features_pv_key - in - let vendor = List.assoc cpu_info_vendor_key cpu_info in - let features = List.assoc features_key cpu_info in - (vendor, features) + let features_key = + if Helpers.will_boot_hvm ~__context ~self:vm then + cpu_info_features_hvm_key + else + cpu_info_features_pv_key + in + let vendor = List.assoc cpu_info_vendor_key cpu_info in + let features = List.assoc features_key cpu_info in + (vendor, features) (** Upgrade a VM's feature set based on the host's one, if needed. * The output will be a feature set that is the same length as the host's @@ -104,107 +104,107 @@ let get_flags_for_vm ~__context vm cpu_info = * be certain about which host features it was using, so we'll extend the set * with all current host features. Otherwise we'll zero-extend. *) let upgrade_features ~__context ~vm host_features vm_features = - let len = Array.length vm_features in - let upgraded_features = - if len <= 4 then - let open Xapi_xenops_queue in - let dbg = Context.string_of_task __context in - let module Client = (val make_client (default_xenopsd ()): XENOPS) in - let is_hvm = Helpers.will_boot_hvm ~__context ~self:vm in - let vm_features' = Client.HOST.upgrade_cpu_features dbg vm_features is_hvm in - extend vm_features' host_features - else - zero_extend vm_features (Array.length host_features) - in - if vm_features <> upgraded_features then begin - debug "VM featureset upgraded from %s to %s" - (string_of_features vm_features) - (string_of_features upgraded_features); - end; - upgraded_features + let len = Array.length vm_features in + let upgraded_features = + if len <= 4 then + let open Xapi_xenops_queue in + let dbg = Context.string_of_task __context in + let module Client = (val make_client (default_xenopsd ()): XENOPS) in + let is_hvm = Helpers.will_boot_hvm ~__context ~self:vm in + let vm_features' = Client.HOST.upgrade_cpu_features dbg vm_features is_hvm in + extend vm_features' host_features + else + zero_extend vm_features (Array.length host_features) + in + if vm_features <> upgraded_features then begin + debug "VM featureset upgraded from %s to %s" + (string_of_features vm_features) + (string_of_features upgraded_features); + end; + upgraded_features let set_flags ~__context self vendor features = - let value = [ - cpu_info_vendor_key, vendor; - cpu_info_features_key, features; - ] in - debug "VM's CPU features set to: %s" features; - Db.VM.set_last_boot_CPU_flags ~__context ~self ~value + let value = [ + cpu_info_vendor_key, vendor; + cpu_info_features_key, features; + ] in + debug "VM's CPU features set to: %s" features; + Db.VM.set_last_boot_CPU_flags ~__context ~self ~value (* Reset last_boot_CPU_flags with the vendor and feature set. * On VM.start, the feature set is inherited from the pool level (PV or HVM) *) let reset_cpu_flags ~__context ~vm = - let pool_vendor, pool_features = - let pool = Helpers.get_pool ~__context in - let pool_cpu_info = Db.Pool.get_cpu_info ~__context ~self:pool in - get_flags_for_vm ~__context vm pool_cpu_info - in - set_flags ~__context vm pool_vendor pool_features - + let pool_vendor, pool_features = + let pool = Helpers.get_pool ~__context in + let pool_cpu_info = Db.Pool.get_cpu_info ~__context ~self:pool in + get_flags_for_vm ~__context vm pool_cpu_info + in + set_flags ~__context vm pool_vendor pool_features + (* Update last_boot_CPU_flags with the vendor and feature set. * On VM.resume or migrate, the field is kept intact, and upgraded if needed. *) let update_cpu_flags ~__context ~vm ~host = - let current_features = - let flags = Db.VM.get_last_boot_CPU_flags ~__context ~self:vm in - try - List.assoc cpu_info_features_key flags - with Not_found -> "" - in - debug "VM last boot CPU features: %s" current_features; - try - let host_vendor, host_features = - let host_cpu_info = Db.Host.get_cpu_info ~__context ~self:host in - get_flags_for_vm ~__context vm host_cpu_info - in - let new_features = upgrade_features ~__context ~vm - (features_of_string host_features) (features_of_string current_features) - |> string_of_features in - if new_features <> current_features then - set_flags ~__context vm host_vendor new_features - with Not_found -> - debug "Host does not have new levelling feature keys - not upgrading VM's flags" + let current_features = + let flags = Db.VM.get_last_boot_CPU_flags ~__context ~self:vm in + try + List.assoc cpu_info_features_key flags + with Not_found -> "" + in + debug "VM last boot CPU features: %s" current_features; + try + let host_vendor, host_features = + let host_cpu_info = Db.Host.get_cpu_info ~__context ~self:host in + get_flags_for_vm ~__context vm host_cpu_info + in + let new_features = upgrade_features ~__context ~vm + (features_of_string host_features) (features_of_string current_features) + |> string_of_features in + if new_features <> current_features then + set_flags ~__context vm host_vendor new_features + with Not_found -> + debug "Host does not have new levelling feature keys - not upgrading VM's flags" let get_host_compatibility_info ~__context ~vm ~host ~remote = - let cpu_info = - match remote with - | None -> Db.Host.get_cpu_info ~__context ~self:host - | Some (rpc, session_id) -> Client.Client.Host.get_cpu_info rpc session_id host - in - get_flags_for_vm ~__context vm cpu_info + let cpu_info = + match remote with + | None -> Db.Host.get_cpu_info ~__context ~self:host + | Some (rpc, session_id) -> Client.Client.Host.get_cpu_info rpc session_id host + in + get_flags_for_vm ~__context vm cpu_info (* Compare the CPU on which the given VM was last booted to the CPU of the given host. *) let assert_vm_is_compatible ~__context ~vm ~host ?remote () = - let fail msg = - raise (Api_errors.Server_error(Api_errors.vm_incompatible_with_this_host, - [Ref.string_of vm; Ref.string_of host; msg])) - in - if Db.VM.get_power_state ~__context ~self:vm <> `Halted then begin - try - let host_cpu_vendor, host_cpu_features = get_host_compatibility_info ~__context ~vm ~host ~remote in - let vm_cpu_info = Db.VM.get_last_boot_CPU_flags ~__context ~self:vm in - if List.mem_assoc cpu_info_vendor_key vm_cpu_info then begin - (* Check the VM was last booted on a CPU with the same vendor as this host's CPU. *) - let vm_cpu_vendor = List.assoc cpu_info_vendor_key vm_cpu_info in - debug "VM last booted on CPU of vendor %s; host CPUs are of vendor %s" vm_cpu_vendor host_cpu_vendor; - if vm_cpu_vendor <> host_cpu_vendor then - fail "VM last booted on a host which had a CPU from a different vendor." - end; - if List.mem_assoc cpu_info_features_key vm_cpu_info then begin - (* Check the VM was last booted on a CPU whose features are a subset of the features of this host's CPU. *) - let vm_cpu_features = List.assoc cpu_info_features_key vm_cpu_info in - debug "VM last booted on CPU with features %s; host CPUs have features %s" vm_cpu_features host_cpu_features; - let host_cpu_features' = host_cpu_features |> features_of_string in - let vm_cpu_features' = - vm_cpu_features - |> features_of_string - |> upgrade_features ~__context ~vm host_cpu_features' - in - if not (is_subset vm_cpu_features' host_cpu_features') then begin - debug "VM CPU features (%s) are not compatible with host CPU features (%s)\n" (string_of_features vm_cpu_features') (string_of_features host_cpu_features'); - fail "VM last booted on a CPU with features this host's CPU does not have." - end - end - with Not_found -> - debug "Host does not have new levelling feature keys - not comparing VM's flags" - end + let fail msg = + raise (Api_errors.Server_error(Api_errors.vm_incompatible_with_this_host, + [Ref.string_of vm; Ref.string_of host; msg])) + in + if Db.VM.get_power_state ~__context ~self:vm <> `Halted then begin + try + let host_cpu_vendor, host_cpu_features = get_host_compatibility_info ~__context ~vm ~host ~remote in + let vm_cpu_info = Db.VM.get_last_boot_CPU_flags ~__context ~self:vm in + if List.mem_assoc cpu_info_vendor_key vm_cpu_info then begin + (* Check the VM was last booted on a CPU with the same vendor as this host's CPU. *) + let vm_cpu_vendor = List.assoc cpu_info_vendor_key vm_cpu_info in + debug "VM last booted on CPU of vendor %s; host CPUs are of vendor %s" vm_cpu_vendor host_cpu_vendor; + if vm_cpu_vendor <> host_cpu_vendor then + fail "VM last booted on a host which had a CPU from a different vendor." + end; + if List.mem_assoc cpu_info_features_key vm_cpu_info then begin + (* Check the VM was last booted on a CPU whose features are a subset of the features of this host's CPU. *) + let vm_cpu_features = List.assoc cpu_info_features_key vm_cpu_info in + debug "VM last booted on CPU with features %s; host CPUs have features %s" vm_cpu_features host_cpu_features; + let host_cpu_features' = host_cpu_features |> features_of_string in + let vm_cpu_features' = + vm_cpu_features + |> features_of_string + |> upgrade_features ~__context ~vm host_cpu_features' + in + if not (is_subset vm_cpu_features' host_cpu_features') then begin + debug "VM CPU features (%s) are not compatible with host CPU features (%s)\n" (string_of_features vm_cpu_features') (string_of_features host_cpu_features'); + fail "VM last booted on a CPU with features this host's CPU does not have." + end + end + with Not_found -> + debug "Host does not have new levelling feature keys - not comparing VM's flags" + end diff --git a/ocaml/xapi/create_misc.ml b/ocaml/xapi/create_misc.ml index a7d1f780e5f..291ff2a3345 100644 --- a/ocaml/xapi/create_misc.ml +++ b/ocaml/xapi/create_misc.ml @@ -13,7 +13,7 @@ *) (** Create miscellaneous DB records needed by both the real and fake servers. * @group Database Operations - *) +*) open Stdext open Fun @@ -29,20 +29,20 @@ module D=Debug.Make(struct let name="xapi" end) open D type host_info = { - name_label : string; - xen_verstring : string; - linux_verstring : string; - hostname : string; - uuid : string; - dom0_uuid : string; - oem_manufacturer : string option; - oem_model : string option; - oem_build_number : string option; - machine_serial_number: string option; - machine_serial_name: string option; - total_memory_mib: int64; - dom0_static_max: int64; - ssl_legacy: bool; + name_label : string; + xen_verstring : string; + linux_verstring : string; + hostname : string; + uuid : string; + dom0_uuid : string; + oem_manufacturer : string option; + oem_model : string option; + oem_build_number : string option; + machine_serial_number: string option; + machine_serial_name: string option; + total_memory_mib: int64; + dom0_static_max: int64; + ssl_legacy: bool; } (** The format of the response looks like @@ -52,15 +52,15 @@ type host_info = { * hp_1_1 | CHECKED * hp_2_1 | APPLIED * hp_3_2 | APPLIED *) -let make_xen_livepatch_list () = - let lines = try Xstringext.String.split '\n' (Helpers.get_process_output !Xapi_globs.xen_livepatch_list) with _ -> [] in - let patches = List.fold_left( - fun acc l -> - match List.map String.trim (Xstringext.String.split ~limit:2 '|' l) with - | [ key; "APPLIED" ] -> key :: acc - | _ -> acc; - )[] lines in - if List.length patches > 0 then Some(String.concat ", " patches) else None +let make_xen_livepatch_list () = + let lines = try Xstringext.String.split '\n' (Helpers.get_process_output !Xapi_globs.xen_livepatch_list) with _ -> [] in + let patches = List.fold_left( + fun acc l -> + match List.map String.trim (Xstringext.String.split ~limit:2 '|' l) with + | [ key; "APPLIED" ] -> key :: acc + | _ -> acc; + )[] lines in + if List.length patches > 0 then Some(String.concat ", " patches) else None (** The format of the response looks like * # kpatch list @@ -69,96 +69,96 @@ let make_xen_livepatch_list () = * kpatch_hp_2_1 * Installed patch modules: *) -let make_kpatch_list () = - let start_line = "Loaded patch modules:" in - let end_line = "Installed patch modules:" in - let lines = try Xstringext.String.split '\n' (Helpers.get_process_output !Xapi_globs.kpatch_list) with _ -> []in - let rec loop acc started = function - | [] -> acc - | line :: _ when line = end_line -> acc - | line :: rest when line = start_line -> loop acc true rest - | line :: rest -> - let line' = String.trim line in - if line' <> "" && started then - loop (line' :: acc) true rest - else - loop acc started rest - in - let patches = loop [] false lines in - if List.length patches > 0 then Some(String.concat ", " patches) else None +let make_kpatch_list () = + let start_line = "Loaded patch modules:" in + let end_line = "Installed patch modules:" in + let lines = try Xstringext.String.split '\n' (Helpers.get_process_output !Xapi_globs.kpatch_list) with _ -> []in + let rec loop acc started = function + | [] -> acc + | line :: _ when line = end_line -> acc + | line :: rest when line = start_line -> loop acc true rest + | line :: rest -> + let line' = String.trim line in + if line' <> "" && started then + loop (line' :: acc) true rest + else + loop acc started rest + in + let patches = loop [] false lines in + if List.length patches > 0 then Some(String.concat ", " patches) else None open Xstringext (* NB: this is dom0's view of the world, not Xen's. *) let read_dom0_memory_usage () = - try - let map = Balloon.parse_proc_xen_balloon () in - let lookup = fun x -> Opt.unbox (List.assoc x map) in - let keys = [Balloon._low_mem_balloon; Balloon._high_mem_balloon; Balloon._current_allocation] in - let values = List.map lookup keys in - let result = List.fold_left Int64.add 0L values in - Some (Int64.mul 1024L result) - with _ -> - None + try + let map = Balloon.parse_proc_xen_balloon () in + let lookup = fun x -> Opt.unbox (List.assoc x map) in + let keys = [Balloon._low_mem_balloon; Balloon._high_mem_balloon; Balloon._current_allocation] in + let values = List.map lookup keys in + let result = List.fold_left Int64.add 0L values in + Some (Int64.mul 1024L result) + with _ -> + None let read_localhost_info () = - let xen_verstring, total_memory_mib = - try - let xc = Xenctrl.interface_open () in - let v = Xenctrl.version xc in - Xenctrl.interface_close xc; - let open Xenctrl in - let xen_verstring = Printf.sprintf "%d.%d%s" v.major v.minor v.extra in - let total_memory_mib = - let open Xapi_xenops_queue in - let module Client = (val make_client (default_xenopsd ()) : XENOPS) in - Client.HOST.get_total_memory_mib "read_localhost_info" in - xen_verstring, total_memory_mib - with e -> - if Pool_role.is_unit_test () - then "0.0.0", 0L - else begin - warn "Failed to read xen version"; - match Balloon.get_memtotal () with - | None -> "unknown", 0L - | Some x -> "unknown", Int64.(div x (mul 1024L 1024L)) - end - and linux_verstring = - let verstring = ref "" in - let f line = - try verstring := List.nth (String.split ' ' line) 2 - with _ -> () in - Unixext.readfile_line f "/proc/version"; - !verstring - in - let me = Helpers.get_localhost_uuid () in - let lookup_inventory_nofail k = try Some (Xapi_inventory.lookup k) with _ -> None in - let this_host_name = Helpers.get_hostname() in - - let dom0_static_max = match read_dom0_memory_usage () with - | Some x -> x - | None -> - info "Failed to query balloon driver, assuming target = static_max"; - Int64.(mul total_memory_mib (mul 1024L 1024L)) in - { - name_label=this_host_name; - xen_verstring=xen_verstring; - linux_verstring=linux_verstring; - hostname=this_host_name; - uuid=me; - dom0_uuid = Xapi_inventory.lookup Xapi_inventory._control_domain_uuid; - oem_manufacturer = lookup_inventory_nofail Xapi_inventory._oem_manufacturer; - oem_model = lookup_inventory_nofail Xapi_inventory._oem_model; - oem_build_number = lookup_inventory_nofail Xapi_inventory._oem_build_number; - machine_serial_number = lookup_inventory_nofail Xapi_inventory._machine_serial_number; - machine_serial_name = lookup_inventory_nofail Xapi_inventory._machine_serial_name; - total_memory_mib = total_memory_mib; - dom0_static_max = dom0_static_max; - ssl_legacy = try ( - bool_of_string ( - Xapi_inventory.lookup Xapi_inventory._stunnel_legacy ~default:"true") - ) with _ -> true; - } + let xen_verstring, total_memory_mib = + try + let xc = Xenctrl.interface_open () in + let v = Xenctrl.version xc in + Xenctrl.interface_close xc; + let open Xenctrl in + let xen_verstring = Printf.sprintf "%d.%d%s" v.major v.minor v.extra in + let total_memory_mib = + let open Xapi_xenops_queue in + let module Client = (val make_client (default_xenopsd ()) : XENOPS) in + Client.HOST.get_total_memory_mib "read_localhost_info" in + xen_verstring, total_memory_mib + with e -> + if Pool_role.is_unit_test () + then "0.0.0", 0L + else begin + warn "Failed to read xen version"; + match Balloon.get_memtotal () with + | None -> "unknown", 0L + | Some x -> "unknown", Int64.(div x (mul 1024L 1024L)) + end + and linux_verstring = + let verstring = ref "" in + let f line = + try verstring := List.nth (String.split ' ' line) 2 + with _ -> () in + Unixext.readfile_line f "/proc/version"; + !verstring + in + let me = Helpers.get_localhost_uuid () in + let lookup_inventory_nofail k = try Some (Xapi_inventory.lookup k) with _ -> None in + let this_host_name = Helpers.get_hostname() in + + let dom0_static_max = match read_dom0_memory_usage () with + | Some x -> x + | None -> + info "Failed to query balloon driver, assuming target = static_max"; + Int64.(mul total_memory_mib (mul 1024L 1024L)) in + { + name_label=this_host_name; + xen_verstring=xen_verstring; + linux_verstring=linux_verstring; + hostname=this_host_name; + uuid=me; + dom0_uuid = Xapi_inventory.lookup Xapi_inventory._control_domain_uuid; + oem_manufacturer = lookup_inventory_nofail Xapi_inventory._oem_manufacturer; + oem_model = lookup_inventory_nofail Xapi_inventory._oem_model; + oem_build_number = lookup_inventory_nofail Xapi_inventory._oem_build_number; + machine_serial_number = lookup_inventory_nofail Xapi_inventory._machine_serial_number; + machine_serial_name = lookup_inventory_nofail Xapi_inventory._machine_serial_name; + total_memory_mib = total_memory_mib; + dom0_static_max = dom0_static_max; + ssl_legacy = try ( + bool_of_string ( + Xapi_inventory.lookup Xapi_inventory._stunnel_legacy ~default:"true") + ) with _ -> true; + } (** Returns the maximum of two values. *) let maximum x y = if x > y then x else y @@ -178,224 +178,224 @@ let (+++) = Int64.add (** It updates existing records if they are found, or else creates new *) (** records for any records that are missing. *) let rec ensure_domain_zero_records ~__context ~host (host_info: host_info) : unit = - maybe_upgrade_domain_zero_record ~__context ~host host_info; - let domain_zero_ref = ensure_domain_zero_record ~__context host_info in - ensure_domain_zero_console_record ~__context ~domain_zero_ref; - ensure_domain_zero_guest_metrics_record ~__context ~domain_zero_ref host_info; - ensure_domain_zero_shadow_record ~__context ~domain_zero_ref + maybe_upgrade_domain_zero_record ~__context ~host host_info; + let domain_zero_ref = ensure_domain_zero_record ~__context host_info in + ensure_domain_zero_console_record ~__context ~domain_zero_ref; + ensure_domain_zero_guest_metrics_record ~__context ~domain_zero_ref host_info; + ensure_domain_zero_shadow_record ~__context ~domain_zero_ref and maybe_upgrade_domain_zero_record ~__context ~host (host_info: host_info) = - try - let control_domain = Db.VM.get_by_uuid ~__context ~uuid:host_info.dom0_uuid in - if Db.Host.get_control_domain ~__context ~self:host = Ref.null then begin - debug "Setting control domain for host %s to %s" - (Ref.string_of host) (Ref.string_of control_domain); - Db.Host.set_control_domain ~__context ~self:host ~value:control_domain; - end - with Db_exn.Read_missing_uuid(_) -> () + try + let control_domain = Db.VM.get_by_uuid ~__context ~uuid:host_info.dom0_uuid in + if Db.Host.get_control_domain ~__context ~self:host = Ref.null then begin + debug "Setting control domain for host %s to %s" + (Ref.string_of host) (Ref.string_of control_domain); + Db.Host.set_control_domain ~__context ~self:host ~value:control_domain; + end + with Db_exn.Read_missing_uuid(_) -> () and ensure_domain_zero_record ~__context (host_info: host_info): [`VM] Ref.t = - let ref_lookup () = Helpers.get_domain_zero ~__context in - let ref_create () = Ref.make () in - let (domain_zero_ref, found) = - try ref_lookup (), true - with _ -> ref_create (), false in - if found - then update_domain_zero_record ~__context ~domain_zero_ref host_info - else create_domain_zero_record ~__context ~domain_zero_ref host_info; - domain_zero_ref + let ref_lookup () = Helpers.get_domain_zero ~__context in + let ref_create () = Ref.make () in + let (domain_zero_ref, found) = + try ref_lookup (), true + with _ -> ref_create (), false in + if found + then update_domain_zero_record ~__context ~domain_zero_ref host_info + else create_domain_zero_record ~__context ~domain_zero_ref host_info; + domain_zero_ref and ensure_domain_zero_console_record ~__context ~domain_zero_ref : unit = - let dom0_consoles = Db.VM.get_consoles ~__context ~self: domain_zero_ref in - let console_records_rfb = List.filter (fun x -> Db.Console.get_protocol ~__context ~self:x = `rfb) dom0_consoles in - let console_records_vt100 = List.filter (fun x -> Db.Console.get_protocol ~__context ~self:x = `vt100) dom0_consoles in - - match console_records_rfb, console_records_vt100 with - | [rfb], [vt100] -> - debug "1 RFB, 1 VT100 console found... ensuring correct port numbers"; - Db.Console.set_port ~__context ~self:rfb ~value:Xapi_globs.host_console_vncport; - Db.Console.set_port ~__context ~self:vt100 ~value:Xapi_globs.host_console_textport; - | _ -> - (* if there's not more than one console of each type then something strange is happening*) - create_domain_zero_console_record ~__context ~domain_zero_ref ~console_records_rfb ~console_records_vt100; + let dom0_consoles = Db.VM.get_consoles ~__context ~self: domain_zero_ref in + let console_records_rfb = List.filter (fun x -> Db.Console.get_protocol ~__context ~self:x = `rfb) dom0_consoles in + let console_records_vt100 = List.filter (fun x -> Db.Console.get_protocol ~__context ~self:x = `vt100) dom0_consoles in + + match console_records_rfb, console_records_vt100 with + | [rfb], [vt100] -> + debug "1 RFB, 1 VT100 console found... ensuring correct port numbers"; + Db.Console.set_port ~__context ~self:rfb ~value:Xapi_globs.host_console_vncport; + Db.Console.set_port ~__context ~self:vt100 ~value:Xapi_globs.host_console_textport; + | _ -> + (* if there's not more than one console of each type then something strange is happening*) + create_domain_zero_console_record ~__context ~domain_zero_ref ~console_records_rfb ~console_records_vt100; and ensure_domain_zero_guest_metrics_record ~__context ~domain_zero_ref (host_info: host_info) : unit = - if not (Db.is_valid_ref __context (Db.VM.get_metrics ~__context ~self:domain_zero_ref)) then - begin - debug "Domain 0 record does not have associated guest metrics record. Creating now"; - let metrics_ref = Ref.make() in - create_domain_zero_guest_metrics_record ~__context ~domain_zero_metrics_ref:metrics_ref ~memory_constraints:(create_domain_zero_memory_constraints host_info) - ~vcpus:(calculate_domain_zero_vcpu_count ~__context); - Db.VM.set_metrics ~__context ~self:domain_zero_ref ~value:metrics_ref - end + if not (Db.is_valid_ref __context (Db.VM.get_metrics ~__context ~self:domain_zero_ref)) then + begin + debug "Domain 0 record does not have associated guest metrics record. Creating now"; + let metrics_ref = Ref.make() in + create_domain_zero_guest_metrics_record ~__context ~domain_zero_metrics_ref:metrics_ref ~memory_constraints:(create_domain_zero_memory_constraints host_info) + ~vcpus:(calculate_domain_zero_vcpu_count ~__context); + Db.VM.set_metrics ~__context ~self:domain_zero_ref ~value:metrics_ref + end and ensure_domain_zero_shadow_record ~__context ~domain_zero_ref : unit = - (* Always create a new shadow record. *) - let domain_zero_record = Db.VM.get_record ~__context ~self:domain_zero_ref in - Helpers.set_boot_record ~__context ~self:domain_zero_ref domain_zero_record + (* Always create a new shadow record. *) + let domain_zero_record = Db.VM.get_record ~__context ~self:domain_zero_ref in + Helpers.set_boot_record ~__context ~self:domain_zero_ref domain_zero_record and create_domain_zero_record ~__context ~domain_zero_ref (host_info: host_info) : unit = - (* Determine domain 0 memory constraints. *) - let memory = create_domain_zero_memory_constraints host_info in - (* Determine information about the host machine. *) - let domarch = - let i = Int64.of_nativeint (Int64.to_nativeint 0xffffffffL) in - if i > 0L then "x64" else "x32" in - let localhost = Helpers.get_localhost ~__context in - (* Read the control domain uuid from the inventory file *) - let uuid = host_info.dom0_uuid in - (* FIXME: Assume dom0 has 1 vCPU per Host_cpu for now *) - let vcpus = calculate_domain_zero_vcpu_count ~__context in - let metrics = Ref.make () in - (* Now create the database record. *) - Db.VM.create ~__context ~ref:domain_zero_ref - ~name_label:("Control domain on host: " ^ host_info.hostname) ~uuid - ~name_description:"The domain which manages physical devices and manages other domains" - ~hVM_boot_policy:"" ~hVM_boot_params:[] ~hVM_shadow_multiplier:1. ~platform:[] ~pCI_bus:"" - ~pV_args:"" ~pV_ramdisk:"" ~pV_kernel:"" ~pV_bootloader:"" ~pV_bootloader_args:"" ~pV_legacy_args:"" - ~actions_after_crash:`destroy ~actions_after_reboot:`destroy ~actions_after_shutdown:`destroy - ~allowed_operations:[] ~current_operations:[] ~blocked_operations:[] ~power_state:`Running - ~vCPUs_max:(Int64.of_int vcpus) ~vCPUs_at_startup:(Int64.of_int vcpus) ~vCPUs_params:[] - ~memory_overhead:0L - ~memory_static_min:memory.static_min ~memory_dynamic_min:memory.dynamic_min ~memory_target:memory.target - ~memory_static_max:memory.static_max ~memory_dynamic_max:memory.dynamic_max - ~resident_on:localhost ~scheduled_to_be_resident_on:Ref.null ~affinity:localhost ~suspend_VDI:Ref.null - ~is_control_domain:true ~is_a_template:false ~domid:0L ~domarch - ~is_a_snapshot:false ~snapshot_time:Date.never ~snapshot_of:Ref.null ~transportable_snapshot_id:"" - ~snapshot_info:[] ~snapshot_metadata:"" - ~parent:Ref.null - ~other_config:[] ~blobs:[] ~xenstore_data:[] ~tags:[] ~user_version:1L - ~ha_restart_priority:"" ~ha_always_run:false ~recommendations:"" - ~last_boot_CPU_flags:[] ~last_booted_record:"" - ~guest_metrics:Ref.null ~metrics - ~bios_strings:[] ~protection_policy:Ref.null - ~is_snapshot_from_vmpp:false - ~appliance:Ref.null - ~start_delay:0L - ~shutdown_delay:0L - ~order:0L - ~suspend_SR:Ref.null - ~version:0L - ~generation_id:"" - ~hardware_platform_version:0L - ~has_vendor_device:false - ~requires_reboot:false - ; - Db.Host.set_control_domain ~__context ~self:localhost ~value:domain_zero_ref; - Xapi_vm_helpers.update_memory_overhead ~__context ~vm:domain_zero_ref + (* Determine domain 0 memory constraints. *) + let memory = create_domain_zero_memory_constraints host_info in + (* Determine information about the host machine. *) + let domarch = + let i = Int64.of_nativeint (Int64.to_nativeint 0xffffffffL) in + if i > 0L then "x64" else "x32" in + let localhost = Helpers.get_localhost ~__context in + (* Read the control domain uuid from the inventory file *) + let uuid = host_info.dom0_uuid in + (* FIXME: Assume dom0 has 1 vCPU per Host_cpu for now *) + let vcpus = calculate_domain_zero_vcpu_count ~__context in + let metrics = Ref.make () in + (* Now create the database record. *) + Db.VM.create ~__context ~ref:domain_zero_ref + ~name_label:("Control domain on host: " ^ host_info.hostname) ~uuid + ~name_description:"The domain which manages physical devices and manages other domains" + ~hVM_boot_policy:"" ~hVM_boot_params:[] ~hVM_shadow_multiplier:1. ~platform:[] ~pCI_bus:"" + ~pV_args:"" ~pV_ramdisk:"" ~pV_kernel:"" ~pV_bootloader:"" ~pV_bootloader_args:"" ~pV_legacy_args:"" + ~actions_after_crash:`destroy ~actions_after_reboot:`destroy ~actions_after_shutdown:`destroy + ~allowed_operations:[] ~current_operations:[] ~blocked_operations:[] ~power_state:`Running + ~vCPUs_max:(Int64.of_int vcpus) ~vCPUs_at_startup:(Int64.of_int vcpus) ~vCPUs_params:[] + ~memory_overhead:0L + ~memory_static_min:memory.static_min ~memory_dynamic_min:memory.dynamic_min ~memory_target:memory.target + ~memory_static_max:memory.static_max ~memory_dynamic_max:memory.dynamic_max + ~resident_on:localhost ~scheduled_to_be_resident_on:Ref.null ~affinity:localhost ~suspend_VDI:Ref.null + ~is_control_domain:true ~is_a_template:false ~domid:0L ~domarch + ~is_a_snapshot:false ~snapshot_time:Date.never ~snapshot_of:Ref.null ~transportable_snapshot_id:"" + ~snapshot_info:[] ~snapshot_metadata:"" + ~parent:Ref.null + ~other_config:[] ~blobs:[] ~xenstore_data:[] ~tags:[] ~user_version:1L + ~ha_restart_priority:"" ~ha_always_run:false ~recommendations:"" + ~last_boot_CPU_flags:[] ~last_booted_record:"" + ~guest_metrics:Ref.null ~metrics + ~bios_strings:[] ~protection_policy:Ref.null + ~is_snapshot_from_vmpp:false + ~appliance:Ref.null + ~start_delay:0L + ~shutdown_delay:0L + ~order:0L + ~suspend_SR:Ref.null + ~version:0L + ~generation_id:"" + ~hardware_platform_version:0L + ~has_vendor_device:false + ~requires_reboot:false + ; + Db.Host.set_control_domain ~__context ~self:localhost ~value:domain_zero_ref; + Xapi_vm_helpers.update_memory_overhead ~__context ~vm:domain_zero_ref and create_domain_zero_console_record_with_protocol ~__context ~domain_zero_ref ~dom0_console_protocol = - let console_ref = Ref.make () in - let address = Db.Host.get_address ~__context ~self: (Helpers.get_localhost ~__context) in - let location = Printf.sprintf "https://%s%s?ref=%s" address Constants.console_uri (Ref.string_of domain_zero_ref) in - let port = match dom0_console_protocol with - |`rfb -> Xapi_globs.host_console_vncport - |`vt100 -> Xapi_globs.host_console_textport in - Db.Console.create ~__context ~ref: console_ref - ~uuid: (Uuid.to_string (Uuid.make_uuid ())) - ~protocol: dom0_console_protocol - ~location - ~vM: domain_zero_ref - ~other_config:[] - ~port + let console_ref = Ref.make () in + let address = Db.Host.get_address ~__context ~self: (Helpers.get_localhost ~__context) in + let location = Printf.sprintf "https://%s%s?ref=%s" address Constants.console_uri (Ref.string_of domain_zero_ref) in + let port = match dom0_console_protocol with + |`rfb -> Xapi_globs.host_console_vncport + |`vt100 -> Xapi_globs.host_console_textport in + Db.Console.create ~__context ~ref: console_ref + ~uuid: (Uuid.to_string (Uuid.make_uuid ())) + ~protocol: dom0_console_protocol + ~location + ~vM: domain_zero_ref + ~other_config:[] + ~port and create_domain_zero_console_record ~__context ~domain_zero_ref ~console_records_rfb ~console_records_vt100 = - if List.length console_records_rfb <> 1 - then begin - List.iter (fun console -> Db.Console.destroy ~__context ~self: console ) console_records_rfb ; - create_domain_zero_console_record_with_protocol ~__context ~domain_zero_ref ~dom0_console_protocol: `rfb ; - end; - if List.length console_records_vt100 <> 1 - then begin - List.iter (fun console -> Db.Console.destroy ~__context ~self: console ) console_records_vt100 ; - create_domain_zero_console_record_with_protocol ~__context ~domain_zero_ref ~dom0_console_protocol: `vt100 ; - end + if List.length console_records_rfb <> 1 + then begin + List.iter (fun console -> Db.Console.destroy ~__context ~self: console ) console_records_rfb ; + create_domain_zero_console_record_with_protocol ~__context ~domain_zero_ref ~dom0_console_protocol: `rfb ; + end; + if List.length console_records_vt100 <> 1 + then begin + List.iter (fun console -> Db.Console.destroy ~__context ~self: console ) console_records_vt100 ; + create_domain_zero_console_record_with_protocol ~__context ~domain_zero_ref ~dom0_console_protocol: `vt100 ; + end and create_domain_zero_guest_metrics_record ~__context ~domain_zero_metrics_ref ~memory_constraints ~vcpus : unit = - let rec mkints = function - | 0 -> [] - | n -> (mkints (n - 1) @ [n]) in - Db.VM_metrics.create - ~__context - ~ref:domain_zero_metrics_ref - ~uuid:(Uuid.to_string (Uuid.make_uuid ())) - ~memory_actual: memory_constraints.target - ~vCPUs_utilisation:(List.map (fun x -> Int64.of_int x, 0.) (mkints vcpus)) - ~vCPUs_number:(Int64.of_int vcpus) - ~vCPUs_CPU:[] - ~vCPUs_params:[] - ~vCPUs_flags:[] - ~state:[] - ~start_time:Date.never - ~install_time:Date.never - ~last_updated:Date.never - ~other_config:[] - ~hvm:false - ~nomigrate:false - ~nested_virt:false - ; + let rec mkints = function + | 0 -> [] + | n -> (mkints (n - 1) @ [n]) in + Db.VM_metrics.create + ~__context + ~ref:domain_zero_metrics_ref + ~uuid:(Uuid.to_string (Uuid.make_uuid ())) + ~memory_actual: memory_constraints.target + ~vCPUs_utilisation:(List.map (fun x -> Int64.of_int x, 0.) (mkints vcpus)) + ~vCPUs_number:(Int64.of_int vcpus) + ~vCPUs_CPU:[] + ~vCPUs_params:[] + ~vCPUs_flags:[] + ~state:[] + ~start_time:Date.never + ~install_time:Date.never + ~last_updated:Date.never + ~other_config:[] + ~hvm:false + ~nomigrate:false + ~nested_virt:false + ; and update_domain_zero_record ~__context ~domain_zero_ref (host_info: host_info) : unit = - (* Write the updated memory constraints to the database, if the VM is not - marked as requiring reboot. *) - let constraints_in_db = Vm_memory_constraints.get ~__context ~vm_ref:domain_zero_ref in - let constraints = create_domain_zero_memory_constraints host_info in - if not (Xapi_host_helpers.Host_requires_reboot.get ()) then begin - let constraints = - (* Only update static_min if it is unset (i.e. 0) *) - if constraints_in_db.static_min > 0L then - {constraints with static_min = constraints_in_db.static_min} - else - constraints - in - Vm_memory_constraints.set ~__context ~vm_ref:domain_zero_ref ~constraints; - Db.VM.set_requires_reboot ~__context ~self:domain_zero_ref ~value:false - end; - let localhost = Helpers.get_localhost ~__context in - Helpers.update_domain_zero_name ~__context localhost host_info.hostname + (* Write the updated memory constraints to the database, if the VM is not + marked as requiring reboot. *) + let constraints_in_db = Vm_memory_constraints.get ~__context ~vm_ref:domain_zero_ref in + let constraints = create_domain_zero_memory_constraints host_info in + if not (Xapi_host_helpers.Host_requires_reboot.get ()) then begin + let constraints = + (* Only update static_min if it is unset (i.e. 0) *) + if constraints_in_db.static_min > 0L then + {constraints with static_min = constraints_in_db.static_min} + else + constraints + in + Vm_memory_constraints.set ~__context ~vm_ref:domain_zero_ref ~constraints; + Db.VM.set_requires_reboot ~__context ~self:domain_zero_ref ~value:false + end; + let localhost = Helpers.get_localhost ~__context in + Helpers.update_domain_zero_name ~__context localhost host_info.hostname and create_domain_zero_memory_constraints (host_info: host_info) : Vm_memory_constraints.t = - try - match Memory_client.Client.get_domain_zero_policy "create_misc" with - | Memory_interface.Fixed_size x -> - { - static_min = x; static_max = x; - dynamic_min = x; dynamic_max = x; - target = x; - } - | Memory_interface.Auto_balloon(low, high) -> - { - static_min = low; static_max = high; - dynamic_min = low; dynamic_max = high; - target = high; - } - with e -> - if Pool_role.is_unit_test () - then - { - static_min = 0L; static_max = 0L; - dynamic_min = 0L; dynamic_max = 0L; - target = 0L; - } - else raise e + try + match Memory_client.Client.get_domain_zero_policy "create_misc" with + | Memory_interface.Fixed_size x -> + { + static_min = x; static_max = x; + dynamic_min = x; dynamic_max = x; + target = x; + } + | Memory_interface.Auto_balloon(low, high) -> + { + static_min = low; static_max = high; + dynamic_min = low; dynamic_max = high; + target = high; + } + with e -> + if Pool_role.is_unit_test () + then + { + static_min = 0L; static_max = 0L; + dynamic_min = 0L; dynamic_max = 0L; + target = 0L; + } + else raise e and calculate_domain_zero_vcpu_count ~__context : int = - List.length (Db.Host.get_host_CPUs ~__context ~self:(Helpers.get_localhost ~__context)) + List.length (Db.Host.get_host_CPUs ~__context ~self:(Helpers.get_localhost ~__context)) open Db_filter (** Create a record for the "root" user if it doesn't exist already *) let create_root_user ~__context = - let fullname = "superuser" - and short_name = "root" - and uuid = Uuid.to_string (Uuid.make_uuid ()) - and ref = Ref.make () in + let fullname = "superuser" + and short_name = "root" + and uuid = Uuid.to_string (Uuid.make_uuid ()) + and ref = Ref.make () in - let all = Db.User.get_records_where ~__context ~expr:(Eq(Field "short_name", Literal short_name)) in - if all = [] then Db.User.create ~__context ~ref ~fullname ~short_name ~uuid ~other_config:[] + let all = Db.User.get_records_where ~__context ~expr:(Eq(Field "short_name", Literal short_name)) in + if all = [] then Db.User.create ~__context ~ref ~fullname ~short_name ~uuid ~other_config:[] let get_xapi_verstring () = - Printf.sprintf "%d.%d" Xapi_globs.version_major Xapi_globs.version_minor + Printf.sprintf "%d.%d" Xapi_globs.version_major Xapi_globs.version_minor (** Create assoc list of Supplemental-Pack information. * The package information is taking from the [XS-REPOSITORY] XML file in the package @@ -408,246 +408,246 @@ let get_xapi_verstring () = * when the linux pack (now [xs:linux]) is present (alongside the new key). * The [package-linux] key is now deprecated and will be removed in the next version. *) let make_packs_info () = - try - let packs = Sys.readdir !Xapi_globs.packs_dir in - let get_pack_details fname = - try - let xml = Xml.parse_file (!Xapi_globs.packs_dir ^ "/" ^ fname ^ "/XS-REPOSITORY") in - match xml with - | Xml.Element (name, attr, children) -> - let originator = List.assoc "originator" attr in - let name = List.assoc "name" attr in - let version = List.assoc "version" attr in - let build = - if List.mem_assoc "build" attr then Some (List.assoc "build" attr) - else None - in - let homogeneous = - if List.mem_assoc "enforce-homogeneity" attr && - (List.assoc "enforce-homogeneity" attr) = "true" then true - else false - in - let description = match children with - | Xml.Element(_, _, (Xml.PCData s) :: _) :: _ -> s - | _ -> failwith "error with parsing pack data" - in - let param_name = originator ^ ":" ^ name in - let value = description ^ ", version " ^ version ^ - (match build with - | Some build -> ", build " ^ build - | None -> "") ^ - (if homogeneous then ", homogeneous" - else "") - in - let kv = [(param_name, value)] in - if originator = "xs" && name = "linux" then - (* CA-29040: put old linux-pack key in there for backwards compatibility *) - ["package-linux", "installed"] @ kv - else - kv - | _ -> failwith "error while parsing pack data!" - with _ -> debug "error while parsing pack data for %s!" fname; [] - in - Array.fold_left (fun l fname -> get_pack_details fname @ l) [] packs - with _ -> [] + try + let packs = Sys.readdir !Xapi_globs.packs_dir in + let get_pack_details fname = + try + let xml = Xml.parse_file (!Xapi_globs.packs_dir ^ "/" ^ fname ^ "/XS-REPOSITORY") in + match xml with + | Xml.Element (name, attr, children) -> + let originator = List.assoc "originator" attr in + let name = List.assoc "name" attr in + let version = List.assoc "version" attr in + let build = + if List.mem_assoc "build" attr then Some (List.assoc "build" attr) + else None + in + let homogeneous = + if List.mem_assoc "enforce-homogeneity" attr && + (List.assoc "enforce-homogeneity" attr) = "true" then true + else false + in + let description = match children with + | Xml.Element(_, _, (Xml.PCData s) :: _) :: _ -> s + | _ -> failwith "error with parsing pack data" + in + let param_name = originator ^ ":" ^ name in + let value = description ^ ", version " ^ version ^ + (match build with + | Some build -> ", build " ^ build + | None -> "") ^ + (if homogeneous then ", homogeneous" + else "") + in + let kv = [(param_name, value)] in + if originator = "xs" && name = "linux" then + (* CA-29040: put old linux-pack key in there for backwards compatibility *) + ["package-linux", "installed"] @ kv + else + kv + | _ -> failwith "error while parsing pack data!" + with _ -> debug "error while parsing pack data for %s!" fname; [] + in + Array.fold_left (fun l fname -> get_pack_details fname @ l) [] packs + with _ -> [] (** Create a complete assoc list of version information *) let make_software_version ~__context = - let dbg = Context.string_of_task __context in - let option_to_list k o = match o with None -> [] | Some x -> [ k, x ] in - let info = read_localhost_info () in - let v6_version = - (* Best-effort attempt to read the date-based version from v6d *) - try - match V6client.get_version "make_software_version" with - | "" -> [] - | dbv -> ["dbv", dbv] - with Api_errors.Server_error (code, []) when code = Api_errors.v6d_failure -> - [] - in - Xapi_globs.software_version () @ - v6_version @ - [ - "xapi", get_xapi_verstring (); - "xen", info.xen_verstring; - "linux", info.linux_verstring; - "xencenter_min", Xapi_globs.xencenter_min_verstring; - "xencenter_max", Xapi_globs.xencenter_max_verstring; - "network_backend", Network_interface.string_of_kind (Net.Bridge.get_kind dbg ()); - ] @ - (option_to_list "oem_manufacturer" info.oem_manufacturer) @ - (option_to_list "oem_model" info.oem_model) @ - (option_to_list "oem_build_number" info.oem_build_number) @ - (option_to_list "machine_serial_number" info.machine_serial_number) @ - (option_to_list "machine_serial_name" info.machine_serial_name) @ - (option_to_list "xen_livepatches" (make_xen_livepatch_list ())) @ - (option_to_list "kernel_livepatches" (make_kpatch_list ())) @ - make_packs_info () + let dbg = Context.string_of_task __context in + let option_to_list k o = match o with None -> [] | Some x -> [ k, x ] in + let info = read_localhost_info () in + let v6_version = + (* Best-effort attempt to read the date-based version from v6d *) + try + match V6client.get_version "make_software_version" with + | "" -> [] + | dbv -> ["dbv", dbv] + with Api_errors.Server_error (code, []) when code = Api_errors.v6d_failure -> + [] + in + Xapi_globs.software_version () @ + v6_version @ + [ + "xapi", get_xapi_verstring (); + "xen", info.xen_verstring; + "linux", info.linux_verstring; + "xencenter_min", Xapi_globs.xencenter_min_verstring; + "xencenter_max", Xapi_globs.xencenter_max_verstring; + "network_backend", Network_interface.string_of_kind (Net.Bridge.get_kind dbg ()); + ] @ + (option_to_list "oem_manufacturer" info.oem_manufacturer) @ + (option_to_list "oem_model" info.oem_model) @ + (option_to_list "oem_build_number" info.oem_build_number) @ + (option_to_list "machine_serial_number" info.machine_serial_number) @ + (option_to_list "machine_serial_name" info.machine_serial_name) @ + (option_to_list "xen_livepatches" (make_xen_livepatch_list ())) @ + (option_to_list "kernel_livepatches" (make_kpatch_list ())) @ + make_packs_info () let create_software_version ~__context = - let software_version = make_software_version ~__context in - let host = Helpers.get_localhost ~__context in - Db.Host.set_software_version ~__context ~self:host ~value:software_version + let software_version = make_software_version ~__context in + let host = Helpers.get_localhost ~__context in + Db.Host.set_software_version ~__context ~self:host ~value:software_version let create_host_cpu ~__context = - let open Xapi_xenops_queue in - let open Map_check in - let open Cpuid_helpers in - let module Client = (val make_client (default_xenopsd ()) : XENOPS) in - let dbg = Context.string_of_task __context in - let stat = Client.HOST.stat dbg in - - let open Xenops_interface.Host in - let cpu = [ - "cpu_count", string_of_int stat.cpu_info.cpu_count; - "socket_count", string_of_int stat.cpu_info.socket_count; - "vendor", stat.cpu_info.vendor; - "speed", stat.cpu_info.speed; - "modelname", stat.cpu_info.modelname; - "family", stat.cpu_info.family; - "model", stat.cpu_info.model; - "stepping", stat.cpu_info.stepping; - "flags", stat.cpu_info.flags; - (* To support VMs migrated from hosts which do not support CPU levelling v2, - set the "features" key to what it would be on such hosts. *) - "features", Cpuid_helpers.string_of_features stat.cpu_info.features_oldstyle; - "features_pv", Cpuid_helpers.string_of_features stat.cpu_info.features_pv; - "features_hvm", Cpuid_helpers.string_of_features stat.cpu_info.features_hvm; - ] in - let host = Helpers.get_localhost ~__context in - let old_cpu_info = Db.Host.get_cpu_info ~__context ~self:host in - debug "create_host_cpuinfo: setting host cpuinfo: socket_count=%d, cpu_count=%d, features_hvm=%s, features_pv=%s" - (Map_check.getf Cpuid_helpers.socket_count cpu) - (Map_check.getf Cpuid_helpers.cpu_count cpu) - (Map_check.getf Cpuid_helpers.features_hvm cpu |> string_of_features) - (Map_check.getf Cpuid_helpers.features_pv cpu |> string_of_features); - Db.Host.set_cpu_info ~__context ~self:host ~value:cpu; - - let before = getf ~default:[||] features_hvm old_cpu_info in - let after = stat.cpu_info.features_hvm in - if before <> after && before <> [||] then begin - let lost = is_strict_subset (intersect before after) before in - let gained = is_strict_subset (intersect before after) after in - let body = Printf.sprintf "The CPU features of host have changed.%s%s" - (if lost then " Some features have gone away." else "") - (if gained then " Some features were added." else "") - in - info "%s" body; - - if not (Helpers.rolling_upgrade_in_progress ~__context) then - let (name, priority) = if lost then Api_messages.host_cpu_features_down else Api_messages.host_cpu_features_up in - let obj_uuid = Db.Host.get_uuid ~__context ~self:host in - Helpers.call_api_functions ~__context (fun rpc session_id -> - ignore (XenAPI.Message.create rpc session_id name priority `Host obj_uuid body) - ) - end; - - (* Recreate all Host_cpu objects *) - - (* Not all /proc/cpuinfo files contain MHz information. *) - let speed = try Int64.of_float (float_of_string stat.cpu_info.speed) with _ -> 0L in - let model = try Int64.of_string stat.cpu_info.model with _ -> 0L in - let family = try Int64.of_string stat.cpu_info.family with _ -> 0L in - - (* Recreate all Host_cpu objects *) - let host_cpus = List.filter (fun (_, s) -> s.API.host_cpu_host = host) (Db.Host_cpu.get_all_records ~__context) in - List.iter (fun (r, _) -> Db.Host_cpu.destroy ~__context ~self:r) host_cpus; - for i = 0 to stat.cpu_info.cpu_count - 1 - do - let uuid = Uuid.to_string (Uuid.make_uuid ()) - and ref = Ref.make () in - debug "Creating CPU %d: %s" i uuid; - ignore (Db.Host_cpu.create ~__context ~ref ~uuid ~host ~number:(Int64.of_int i) - ~vendor:stat.cpu_info.vendor ~speed ~modelname:stat.cpu_info.modelname - ~utilisation:0. ~flags:stat.cpu_info.flags ~stepping:stat.cpu_info.stepping ~model ~family - ~features:"" ~other_config:[]) - done + let open Xapi_xenops_queue in + let open Map_check in + let open Cpuid_helpers in + let module Client = (val make_client (default_xenopsd ()) : XENOPS) in + let dbg = Context.string_of_task __context in + let stat = Client.HOST.stat dbg in + + let open Xenops_interface.Host in + let cpu = [ + "cpu_count", string_of_int stat.cpu_info.cpu_count; + "socket_count", string_of_int stat.cpu_info.socket_count; + "vendor", stat.cpu_info.vendor; + "speed", stat.cpu_info.speed; + "modelname", stat.cpu_info.modelname; + "family", stat.cpu_info.family; + "model", stat.cpu_info.model; + "stepping", stat.cpu_info.stepping; + "flags", stat.cpu_info.flags; + (* To support VMs migrated from hosts which do not support CPU levelling v2, + set the "features" key to what it would be on such hosts. *) + "features", Cpuid_helpers.string_of_features stat.cpu_info.features_oldstyle; + "features_pv", Cpuid_helpers.string_of_features stat.cpu_info.features_pv; + "features_hvm", Cpuid_helpers.string_of_features stat.cpu_info.features_hvm; + ] in + let host = Helpers.get_localhost ~__context in + let old_cpu_info = Db.Host.get_cpu_info ~__context ~self:host in + debug "create_host_cpuinfo: setting host cpuinfo: socket_count=%d, cpu_count=%d, features_hvm=%s, features_pv=%s" + (Map_check.getf Cpuid_helpers.socket_count cpu) + (Map_check.getf Cpuid_helpers.cpu_count cpu) + (Map_check.getf Cpuid_helpers.features_hvm cpu |> string_of_features) + (Map_check.getf Cpuid_helpers.features_pv cpu |> string_of_features); + Db.Host.set_cpu_info ~__context ~self:host ~value:cpu; + + let before = getf ~default:[||] features_hvm old_cpu_info in + let after = stat.cpu_info.features_hvm in + if before <> after && before <> [||] then begin + let lost = is_strict_subset (intersect before after) before in + let gained = is_strict_subset (intersect before after) after in + let body = Printf.sprintf "The CPU features of host have changed.%s%s" + (if lost then " Some features have gone away." else "") + (if gained then " Some features were added." else "") + in + info "%s" body; + + if not (Helpers.rolling_upgrade_in_progress ~__context) then + let (name, priority) = if lost then Api_messages.host_cpu_features_down else Api_messages.host_cpu_features_up in + let obj_uuid = Db.Host.get_uuid ~__context ~self:host in + Helpers.call_api_functions ~__context (fun rpc session_id -> + ignore (XenAPI.Message.create rpc session_id name priority `Host obj_uuid body) + ) + end; + + (* Recreate all Host_cpu objects *) + + (* Not all /proc/cpuinfo files contain MHz information. *) + let speed = try Int64.of_float (float_of_string stat.cpu_info.speed) with _ -> 0L in + let model = try Int64.of_string stat.cpu_info.model with _ -> 0L in + let family = try Int64.of_string stat.cpu_info.family with _ -> 0L in + + (* Recreate all Host_cpu objects *) + let host_cpus = List.filter (fun (_, s) -> s.API.host_cpu_host = host) (Db.Host_cpu.get_all_records ~__context) in + List.iter (fun (r, _) -> Db.Host_cpu.destroy ~__context ~self:r) host_cpus; + for i = 0 to stat.cpu_info.cpu_count - 1 + do + let uuid = Uuid.to_string (Uuid.make_uuid ()) + and ref = Ref.make () in + debug "Creating CPU %d: %s" i uuid; + ignore (Db.Host_cpu.create ~__context ~ref ~uuid ~host ~number:(Int64.of_int i) + ~vendor:stat.cpu_info.vendor ~speed ~modelname:stat.cpu_info.modelname + ~utilisation:0. ~flags:stat.cpu_info.flags ~stepping:stat.cpu_info.stepping ~model ~family + ~features:"" ~other_config:[]) + done let create_pool_cpuinfo ~__context = - let open Map_check in - let open Cpuid_helpers in - - let all_host_cpus = List.map - (fun (_, s) -> s.API.host_cpu_info) - (Db.Host.get_all_records ~__context) in - - let merge pool host = - try - pool - |> setf vendor (getf vendor host) - |> setf cpu_count ((getf cpu_count host) + (getf cpu_count pool)) - |> setf socket_count ((getf socket_count host) + (getf socket_count pool)) - |> setf features_pv (Cpuid_helpers.intersect (getf features_pv host) (getf features_pv pool)) - |> setf features_hvm (Cpuid_helpers.intersect (getf features_hvm host) (getf features_hvm pool)) - with Not_found -> - (* If the host doesn't have all the keys we expect, assume that we - are in the middle of an RPU and it has not yet been upgraded, so - it should be ignored when calculating the pool level *) - pool - in - - let zero = ["vendor", ""; "socket_count", "0"; "cpu_count", "0"; "features_pv", ""; "features_hvm", ""] in - let pool_cpuinfo = List.fold_left merge zero all_host_cpus in - let pool = Helpers.get_pool ~__context in - let old_cpuinfo = Db.Pool.get_cpu_info ~__context ~self:pool in - debug "create_pool_cpuinfo: setting pool cpuinfo: socket_count=%d, cpu_count=%d, features_hvm=%s, features_pv=%s" - (Map_check.getf Cpuid_helpers.socket_count pool_cpuinfo) - (Map_check.getf Cpuid_helpers.cpu_count pool_cpuinfo) - (Map_check.getf Cpuid_helpers.features_hvm pool_cpuinfo |> string_of_features) - (Map_check.getf Cpuid_helpers.features_pv pool_cpuinfo |> string_of_features); - Db.Pool.set_cpu_info ~__context ~self:pool ~value:pool_cpuinfo; - - let before = getf ~default:[||] features_hvm old_cpuinfo in - let after = getf ~default:[||] features_hvm pool_cpuinfo in - if before <> after && before <> [||] then begin - let lost = is_strict_subset (intersect before after) before in - let gained = is_strict_subset (intersect before after) after in - let body = Printf.sprintf "The pool-level CPU features have changed.%s%s" - (if lost then " Some features have gone away." else "") - (if gained then " Some features were added." else "") - in - info "%s" body; - - if not (Helpers.rolling_upgrade_in_progress ~__context) && List.length all_host_cpus > 1 then - let (name, priority) = if lost then Api_messages.pool_cpu_features_down else Api_messages.pool_cpu_features_up in - let obj_uuid = Db.Pool.get_uuid ~__context ~self:pool in - Helpers.call_api_functions ~__context (fun rpc session_id -> - ignore (XenAPI.Message.create rpc session_id name priority `Pool obj_uuid body) - ) - end - + let open Map_check in + let open Cpuid_helpers in + + let all_host_cpus = List.map + (fun (_, s) -> s.API.host_cpu_info) + (Db.Host.get_all_records ~__context) in + + let merge pool host = + try + pool + |> setf vendor (getf vendor host) + |> setf cpu_count ((getf cpu_count host) + (getf cpu_count pool)) + |> setf socket_count ((getf socket_count host) + (getf socket_count pool)) + |> setf features_pv (Cpuid_helpers.intersect (getf features_pv host) (getf features_pv pool)) + |> setf features_hvm (Cpuid_helpers.intersect (getf features_hvm host) (getf features_hvm pool)) + with Not_found -> + (* If the host doesn't have all the keys we expect, assume that we + are in the middle of an RPU and it has not yet been upgraded, so + it should be ignored when calculating the pool level *) + pool + in + + let zero = ["vendor", ""; "socket_count", "0"; "cpu_count", "0"; "features_pv", ""; "features_hvm", ""] in + let pool_cpuinfo = List.fold_left merge zero all_host_cpus in + let pool = Helpers.get_pool ~__context in + let old_cpuinfo = Db.Pool.get_cpu_info ~__context ~self:pool in + debug "create_pool_cpuinfo: setting pool cpuinfo: socket_count=%d, cpu_count=%d, features_hvm=%s, features_pv=%s" + (Map_check.getf Cpuid_helpers.socket_count pool_cpuinfo) + (Map_check.getf Cpuid_helpers.cpu_count pool_cpuinfo) + (Map_check.getf Cpuid_helpers.features_hvm pool_cpuinfo |> string_of_features) + (Map_check.getf Cpuid_helpers.features_pv pool_cpuinfo |> string_of_features); + Db.Pool.set_cpu_info ~__context ~self:pool ~value:pool_cpuinfo; + + let before = getf ~default:[||] features_hvm old_cpuinfo in + let after = getf ~default:[||] features_hvm pool_cpuinfo in + if before <> after && before <> [||] then begin + let lost = is_strict_subset (intersect before after) before in + let gained = is_strict_subset (intersect before after) after in + let body = Printf.sprintf "The pool-level CPU features have changed.%s%s" + (if lost then " Some features have gone away." else "") + (if gained then " Some features were added." else "") + in + info "%s" body; + + if not (Helpers.rolling_upgrade_in_progress ~__context) && List.length all_host_cpus > 1 then + let (name, priority) = if lost then Api_messages.pool_cpu_features_down else Api_messages.pool_cpu_features_up in + let obj_uuid = Db.Pool.get_uuid ~__context ~self:pool in + Helpers.call_api_functions ~__context (fun rpc session_id -> + ignore (XenAPI.Message.create rpc session_id name priority `Pool obj_uuid body) + ) + end + let create_chipset_info ~__context = - let host = Helpers.get_localhost ~__context in - let current_info = Db.Host.get_chipset_info ~__context ~self:host in - let iommu = - try - let xc = Xenctrl.interface_open () in - Xenctrl.interface_close xc; - let open Xapi_xenops_queue in - let module Client = (val make_client (default_xenopsd ()) : XENOPS) in - let dbg = Context.string_of_task __context in - let xen_dmesg = Client.HOST.get_console_data dbg in - if String.has_substr xen_dmesg "I/O virtualisation enabled" then - "true" - else if String.has_substr xen_dmesg "I/O virtualisation disabled" then - "false" - else if List.mem_assoc "iommu" current_info then - List.assoc "iommu" current_info - else - "false" - with _ -> - warn "Not running on xen; assuming I/O virtualization disabled"; - "false" in - let info = ["iommu", iommu] in - Db.Host.set_chipset_info ~__context ~self:host ~value:info + let host = Helpers.get_localhost ~__context in + let current_info = Db.Host.get_chipset_info ~__context ~self:host in + let iommu = + try + let xc = Xenctrl.interface_open () in + Xenctrl.interface_close xc; + let open Xapi_xenops_queue in + let module Client = (val make_client (default_xenopsd ()) : XENOPS) in + let dbg = Context.string_of_task __context in + let xen_dmesg = Client.HOST.get_console_data dbg in + if String.has_substr xen_dmesg "I/O virtualisation enabled" then + "true" + else if String.has_substr xen_dmesg "I/O virtualisation disabled" then + "false" + else if List.mem_assoc "iommu" current_info then + List.assoc "iommu" current_info + else + "false" + with _ -> + warn "Not running on xen; assuming I/O virtualization disabled"; + "false" in + let info = ["iommu", iommu] in + Db.Host.set_chipset_info ~__context ~self:host ~value:info let create_patches_requiring_reboot_info ~__context ~host = let patch_uuids = try Stdext.Listext.List.setify (Stdext.Unixext.read_lines !Xapi_globs.reboot_required_hfxs) with _ -> [] in - let patches = List.fold_left (fun acc uuid -> - try - (Db.Pool_patch.get_by_uuid ~__context ~uuid) :: acc - with _ -> warn "Invalid Pool_patch UUID [%s]" uuid; acc - ) [] patch_uuids in + let patches = List.fold_left (fun acc uuid -> + try + (Db.Pool_patch.get_by_uuid ~__context ~uuid) :: acc + with _ -> warn "Invalid Pool_patch UUID [%s]" uuid; acc + ) [] patch_uuids in Db.Host.set_patches_requiring_reboot ~__context ~self:host ~value:patches diff --git a/ocaml/xapi/create_networks.ml b/ocaml/xapi/create_networks.ml index 37ffd409c03..4b22eb2256f 100644 --- a/ocaml/xapi/create_networks.ml +++ b/ocaml/xapi/create_networks.ml @@ -17,33 +17,33 @@ open D let internal_management_network_name = "Host internal management network" let internal_management_network_desc = "Network on which guests will be assigned a private link-local IP address which can be used to talk XenAPI" -let internal_management_network_oc = - [ - Xapi_globs.is_guest_installer_network, "true"; (* for backward compat *) - Xapi_globs.is_host_internal_management_network, "true"; - "ip_begin", "169.254.0.1"; - "ip_end", "169.254.255.254"; - "netmask", "255.255.0.0" - ] +let internal_management_network_oc = + [ + Xapi_globs.is_guest_installer_network, "true"; (* for backward compat *) + Xapi_globs.is_host_internal_management_network, "true"; + "ip_begin", "169.254.0.1"; + "ip_end", "169.254.255.254"; + "netmask", "255.255.0.0" + ] (* We use a well-known name for the internal management interface *) let internal_management_bridge = "xenapi" let create_guest_installer_network ~__context = - try - (* We've already got one; check if it has got the right bridge name and fix if needed *) - let net = Helpers.get_host_internal_management_network ~__context in - if Db.Network.get_bridge ~__context ~self:net <> internal_management_bridge then - Db.Network.set_bridge ~__context ~self:net ~value:internal_management_bridge - with _ -> - (* It is not there yet; make one *) - (* The new "host internal management network" is created with both other_config keys *) - let h' = Xapi_network.create ~__context ~name_label:internal_management_network_name - ~name_description:internal_management_network_desc ~mTU:1500L - ~other_config:internal_management_network_oc ~tags:[] in - Db.Network.set_bridge ~__context ~self:h' ~value:internal_management_bridge; - debug "Created new host internal management network: %s" (Ref.string_of h') + try + (* We've already got one; check if it has got the right bridge name and fix if needed *) + let net = Helpers.get_host_internal_management_network ~__context in + if Db.Network.get_bridge ~__context ~self:net <> internal_management_bridge then + Db.Network.set_bridge ~__context ~self:net ~value:internal_management_bridge + with _ -> + (* It is not there yet; make one *) + (* The new "host internal management network" is created with both other_config keys *) + let h' = Xapi_network.create ~__context ~name_label:internal_management_network_name + ~name_description:internal_management_network_desc ~mTU:1500L + ~other_config:internal_management_network_oc ~tags:[] in + Db.Network.set_bridge ~__context ~self:h' ~value:internal_management_bridge; + debug "Created new host internal management network: %s" (Ref.string_of h') -let create_networks_localhost () = +let create_networks_localhost () = Server_helpers.exec_with_new_task "creating networks" (fun __context-> create_guest_installer_network ~__context) diff --git a/ocaml/xapi/create_networks.mli b/ocaml/xapi/create_networks.mli index 87ce873d90f..521af360440 100644 --- a/ocaml/xapi/create_networks.mli +++ b/ocaml/xapi/create_networks.mli @@ -13,7 +13,7 @@ *) (** Built-in networks. * @group Networking - *) +*) (** The name_label of the internal management network *) val internal_management_network_name : string diff --git a/ocaml/xapi/create_storage.ml b/ocaml/xapi/create_storage.ml index addfae2c7b6..ec5e0f8f69f 100644 --- a/ocaml/xapi/create_storage.ml +++ b/ocaml/xapi/create_storage.ml @@ -13,8 +13,8 @@ *) (** Use the API to register a set of default SRs with the server. * @group Storage - *) - +*) + open Client module D=Debug.Make(struct let name="xapi" end) open D @@ -28,44 +28,44 @@ let plug_all_pbds __context = let result = ref true in List.iter (fun (self, pbd_record) -> - try - if pbd_record.API.pBD_currently_attached - then debug "Not replugging PBD %s: already plugged in" (Ref.string_of self) - else Xapi_pbd.plug ~__context ~self - with e -> - result := false; - error "Could not plug in pbd '%s': %s" (Db.PBD.get_uuid ~__context ~self) (Printexc.to_string e)) + try + if pbd_record.API.pBD_currently_attached + then debug "Not replugging PBD %s: already plugged in" (Ref.string_of self) + else Xapi_pbd.plug ~__context ~self + with e -> + result := false; + error "Could not plug in pbd '%s': %s" (Db.PBD.get_uuid ~__context ~self) (Printexc.to_string e)) my_pbds; - !result + !result let plug_unplugged_pbds __context = let my_pbds = Helpers.get_my_pbds __context in List.iter (fun (self, pbd_record) -> - try - if pbd_record.API.pBD_currently_attached - then debug "Not replugging PBD %s: already plugged in" (Ref.string_of self) - else Xapi_pbd.plug ~__context ~self - with e -> debug "Could not plug in pbd '%s': %s" (Db.PBD.get_uuid ~__context ~self) (Printexc.to_string e)) + try + if pbd_record.API.pBD_currently_attached + then debug "Not replugging PBD %s: already plugged in" (Ref.string_of self) + else Xapi_pbd.plug ~__context ~self + with e -> debug "Could not plug in pbd '%s': %s" (Db.PBD.get_uuid ~__context ~self) (Printexc.to_string e)) my_pbds (* Create a PBD which connects this host to the SR, if one doesn't already exist *) -let maybe_create_pbd rpc session_id sr device_config me = - let pbds = Client.SR.get_PBDs rpc session_id sr in - let pbds = List.filter (fun self -> Client.PBD.get_host rpc session_id self = me) pbds in - (* Check not more than 1 pbd in the database *) - let pbds = - if List.length pbds > 1 - then begin - (* shouldn't happen... delete all but first pbd to make db consistent again *) - List.iter (fun pbd->Client.PBD.destroy rpc session_id pbd) (List.tl pbds); - [List.hd pbds] - end - else pbds - in - if List.length pbds = 0 (* If there's no PBD, create it *) - then Client.PBD.create ~rpc ~session_id ~host:me ~sR:sr ~device_config ~other_config:[] - else List.hd pbds (* Otherwise, return the current one *) +let maybe_create_pbd rpc session_id sr device_config me = + let pbds = Client.SR.get_PBDs rpc session_id sr in + let pbds = List.filter (fun self -> Client.PBD.get_host rpc session_id self = me) pbds in + (* Check not more than 1 pbd in the database *) + let pbds = + if List.length pbds > 1 + then begin + (* shouldn't happen... delete all but first pbd to make db consistent again *) + List.iter (fun pbd->Client.PBD.destroy rpc session_id pbd) (List.tl pbds); + [List.hd pbds] + end + else pbds + in + if List.length pbds = 0 (* If there's no PBD, create it *) + then Client.PBD.create ~rpc ~session_id ~host:me ~sR:sr ~device_config ~other_config:[] + else List.hd pbds (* Otherwise, return the current one *) let create_storage (me: API.ref_host) rpc session_id __context : unit = let create_pbds_for_shared_srs () = @@ -85,10 +85,10 @@ let create_storage (me: API.ref_host) rpc session_id __context : unit = List.iter (fun s -> try ignore (maybe_create_pbd_for_shared_sr s) with _ -> ()) shared_sr_refs in - let other_config = + let other_config = try let pool = Helpers.get_pool ~__context in - Db.Pool.get_other_config ~__context ~self:pool + Db.Pool.get_other_config ~__context ~self:pool with _ -> [] in @@ -106,10 +106,10 @@ let create_storage (me: API.ref_host) rpc session_id __context : unit = Xapi_alert.add ~msg:Api_messages.pbd_plug_failed_on_server_start ~cls:`Host ~obj_uuid ~body:""; end; Xapi_host_helpers.consider_enabling_host ~__context - + let create_storage_localhost rpc session_id : unit = Server_helpers.exec_with_new_task "creating storage" (fun context-> let me = Helpers.get_localhost ~__context:context in - create_storage me rpc session_id context) + create_storage me rpc session_id context) diff --git a/ocaml/xapi/daemon_manager.ml b/ocaml/xapi/daemon_manager.ml index 99dedcf0dee..4ba56ffe690 100644 --- a/ocaml/xapi/daemon_manager.ml +++ b/ocaml/xapi/daemon_manager.ml @@ -17,92 +17,92 @@ open Stdext.Threadext module IntSet = Set.Make(struct type t = int let compare = compare end) type daemon_check = - | Pidfile of string - | Function of (unit -> bool) + | Pidfile of string + | Function of (unit -> bool) type daemon_state = [ - `unmanaged | - (** No threads which care about the state of the daemon are running. *) - `should_start | - (** Daemon should be started when the last thread exits - with_daemon_stopped. *) - `should_not_start - (** Daemon should not be started when the last thread exits - with_daemon_stopped. *) + `unmanaged | + (** No threads which care about the state of the daemon are running. *) + `should_start | + (** Daemon should be started when the last thread exits + with_daemon_stopped. *) + `should_not_start + (** Daemon should not be started when the last thread exits + with_daemon_stopped. *) ] (** Tristate value for representing the state of a daemon we want to manage. *) module type DAEMON = sig - val check : daemon_check + val check : daemon_check - val start : unit -> unit + val start : unit -> unit - val stop : unit -> unit + val stop : unit -> unit end module Make(D : DAEMON) = struct - let registered_threads = ref IntSet.empty - - let register_thread_nolock id = - registered_threads := (IntSet.add id !registered_threads) - - let deregister_thread_nolock id = - registered_threads := (IntSet.remove id !registered_threads) - - let are_threads_registered_nolock () = - not (IntSet.is_empty !registered_threads) - - let daemon_state : daemon_state ref = ref `unmanaged - let m = Mutex.create () - - let is_running () = - match D.check with - | Pidfile file -> begin - try - let pid = Stdext.Unixext.string_of_file file |> String.trim |> int_of_string in - Unix.kill pid 0; - true - with _ -> false - end - | Function f -> f () - - let start = D.start - - let stop ?timeout () = - match timeout with - | Some t -> begin - let start = Unix.gettimeofday () in - try D.stop () - with e -> - while (Unix.gettimeofday () -. start < t) && (is_running ()) do - Thread.delay 1.0 - done; - if is_running () then raise e - end - | None -> D.stop () - - let with_daemon_stopped ?timeout f = - let thread_id = Thread.(id (self ())) in - (* Stop the daemon if it's running, then register this thread. *) - Mutex.execute m - (fun () -> - begin - match is_running (), !daemon_state with - | true, _ -> (daemon_state := `should_start; stop ?timeout ()) - | false, `unmanaged -> daemon_state := `should_not_start - | false, _ -> () - end; - register_thread_nolock thread_id); - Stdext.Pervasiveext.finally - f - (* Deregister this thread, and if there are no more threads registered, - * start the daemon if it was running in the first place. *) - (fun () -> - Mutex.execute m - (fun () -> - deregister_thread_nolock thread_id; - match are_threads_registered_nolock (), !daemon_state with - | true, _ -> () - | false, `should_start -> (start (); daemon_state := `unmanaged) - | false, _ -> daemon_state := `unmanaged)) + let registered_threads = ref IntSet.empty + + let register_thread_nolock id = + registered_threads := (IntSet.add id !registered_threads) + + let deregister_thread_nolock id = + registered_threads := (IntSet.remove id !registered_threads) + + let are_threads_registered_nolock () = + not (IntSet.is_empty !registered_threads) + + let daemon_state : daemon_state ref = ref `unmanaged + let m = Mutex.create () + + let is_running () = + match D.check with + | Pidfile file -> begin + try + let pid = Stdext.Unixext.string_of_file file |> String.trim |> int_of_string in + Unix.kill pid 0; + true + with _ -> false + end + | Function f -> f () + + let start = D.start + + let stop ?timeout () = + match timeout with + | Some t -> begin + let start = Unix.gettimeofday () in + try D.stop () + with e -> + while (Unix.gettimeofday () -. start < t) && (is_running ()) do + Thread.delay 1.0 + done; + if is_running () then raise e + end + | None -> D.stop () + + let with_daemon_stopped ?timeout f = + let thread_id = Thread.(id (self ())) in + (* Stop the daemon if it's running, then register this thread. *) + Mutex.execute m + (fun () -> + begin + match is_running (), !daemon_state with + | true, _ -> (daemon_state := `should_start; stop ?timeout ()) + | false, `unmanaged -> daemon_state := `should_not_start + | false, _ -> () + end; + register_thread_nolock thread_id); + Stdext.Pervasiveext.finally + f + (* Deregister this thread, and if there are no more threads registered, + * start the daemon if it was running in the first place. *) + (fun () -> + Mutex.execute m + (fun () -> + deregister_thread_nolock thread_id; + match are_threads_registered_nolock (), !daemon_state with + | true, _ -> () + | false, `should_start -> (start (); daemon_state := `unmanaged) + | false, _ -> daemon_state := `unmanaged)) end diff --git a/ocaml/xapi/daemon_manager.mli b/ocaml/xapi/daemon_manager.mli index f50173f5df0..f9e52d1fdd9 100644 --- a/ocaml/xapi/daemon_manager.mli +++ b/ocaml/xapi/daemon_manager.mli @@ -13,31 +13,31 @@ *) type daemon_check = - | Pidfile of string - (** Check whether the daemon is running by reading a pidfile, and checking - that the PID points to a running process. *) - | Function of (unit -> bool) - (** Generic user-defined check, *) + | Pidfile of string + (** Check whether the daemon is running by reading a pidfile, and checking + that the PID points to a running process. *) + | Function of (unit -> bool) + (** Generic user-defined check, *) module type DAEMON = sig - val check : daemon_check - (** A way to check whether the daemon is running. *) + val check : daemon_check + (** A way to check whether the daemon is running. *) - val start : unit -> unit - (** Function which will start the daemon. *) + val start : unit -> unit + (** Function which will start the daemon. *) - val stop : unit -> unit - (** Function which will stop the daemon. *) + val stop : unit -> unit + (** Function which will stop the daemon. *) end module Make : functor (D : DAEMON) -> sig - val with_daemon_stopped : ?timeout:float -> (unit -> 'a) -> 'a - (** If the daemon is running, stop it while [f] runs and restart it once [f] - has returned. If multiple threads call [with_daemon_stopped] in parallel, - the daemon will not be restarted until all threads have left [f]. + val with_daemon_stopped : ?timeout:float -> (unit -> 'a) -> 'a + (** If the daemon is running, stop it while [f] runs and restart it once [f] + has returned. If multiple threads call [with_daemon_stopped] in parallel, + the daemon will not be restarted until all threads have left [f]. - If [timeout] is set, [with_daemon_stopped] will catch any exceptions from - [stop ()] and keep checking whether the daemon is running, until [timeout] - expires. If the daemon is still running after [timeout], the original - exception will be thrown. *) + If [timeout] is set, [with_daemon_stopped] will catch any exceptions from + [stop ()] and keep checking whether the daemon is running, until [timeout] + expires. If the daemon is still running after [timeout], the original + exception will be thrown. *) end diff --git a/ocaml/xapi/db.ml b/ocaml/xapi/db.ml index e2025bc6a54..6a675ba6ebc 100644 --- a/ocaml/xapi/db.ml +++ b/ocaml/xapi/db.ml @@ -13,10 +13,10 @@ *) (** * @group Database Operations - *) - +*) + include Db_actions.DB_Action let is_valid_ref __context r = - let t = Context.database_of __context in - let module DB = (val (Db_cache.get t) : Db_interface.DB_ACCESS) in - DB.is_valid_ref t (Ref.string_of r) + let t = Context.database_of __context in + let module DB = (val (Db_cache.get t) : Db_interface.DB_ACCESS) in + DB.is_valid_ref t (Ref.string_of r) diff --git a/ocaml/xapi/db_gc.ml b/ocaml/xapi/db_gc.ml index b98bc5d4d75..9518cb529de 100644 --- a/ocaml/xapi/db_gc.ml +++ b/ocaml/xapi/db_gc.ml @@ -13,8 +13,8 @@ *) (** * @group Database Operations - *) - +*) + open API open Stdext open Fun @@ -40,8 +40,8 @@ let _shutting_down = "shutting-down" let valid_ref x = Db.is_valid_ref x let gc_connector ~__context get_all get_record valid_ref1 valid_ref2 delete_record = - let db = Context.database_of __context in - let module DB = (val (Db_cache.get db) : Db_interface.DB_ACCESS) in + let db = Context.database_of __context in + let module DB = (val (Db_cache.get db) : Db_interface.DB_ACCESS) in let all_refs = get_all ~__context in let do_gc ref = let print_valid b = if b then "valid" else "INVALID" in @@ -49,36 +49,36 @@ let gc_connector ~__context get_all get_record valid_ref1 valid_ref2 delete_reco let ref_1_valid = valid_ref1 record in let ref_2_valid = valid_ref2 record in - if not (ref_1_valid && ref_2_valid) then - begin - let table,reference,valid1,valid2 = - (match DB.get_table_from_ref db (Ref.string_of ref) with - None -> "UNKNOWN CLASS" - | Some c -> c), - (Ref.string_of ref), - (print_valid ref_1_valid), - (print_valid ref_2_valid) in - debug "Connector %s (%s) has invalid refs [ref_1: %s; ref_2: %s]. Attempting GC..." table reference valid1 valid2; - delete_record ~__context ~self:ref - end in + if not (ref_1_valid && ref_2_valid) then + begin + let table,reference,valid1,valid2 = + (match DB.get_table_from_ref db (Ref.string_of ref) with + None -> "UNKNOWN CLASS" + | Some c -> c), + (Ref.string_of ref), + (print_valid ref_1_valid), + (print_valid ref_2_valid) in + debug "Connector %s (%s) has invalid refs [ref_1: %s; ref_2: %s]. Attempting GC..." table reference valid1 valid2; + delete_record ~__context ~self:ref + end in List.iter do_gc all_refs let gc_VGPU_types ~__context = - (* We delete a VGPU_type iff it does not appear in the supported_VGPU_types - * of any PGPU _and_ there doesn't exist a VGPU with this VGPU_type *) - let open Db_filter_types in - let garbage = Db.VGPU_type.get_records_where ~__context - ~expr:(And ((Eq (Field "VGPUs", Literal "()")), - (Eq (Field "supported_on_PGPUs", Literal "()")))) in - match garbage with - | [] -> () - | _ -> - debug "GC-ing the following unused and unsupported VGPU_types: [ %s ]" - (String.concat "; " (List.map Ref.string_of (List.map fst garbage))); - List.iter (fun (self, _) -> Db.VGPU_type.destroy ~__context ~self) garbage + (* We delete a VGPU_type iff it does not appear in the supported_VGPU_types + * of any PGPU _and_ there doesn't exist a VGPU with this VGPU_type *) + let open Db_filter_types in + let garbage = Db.VGPU_type.get_records_where ~__context + ~expr:(And ((Eq (Field "VGPUs", Literal "()")), + (Eq (Field "supported_on_PGPUs", Literal "()")))) in + match garbage with + | [] -> () + | _ -> + debug "GC-ing the following unused and unsupported VGPU_types: [ %s ]" + (String.concat "; " (List.map Ref.string_of (List.map fst garbage))); + List.iter (fun (self, _) -> Db.VGPU_type.destroy ~__context ~self) garbage let gc_PIFs ~__context = - gc_connector ~__context Db.PIF.get_all Db.PIF.get_record (fun x->valid_ref __context x.pIF_host) (fun x->valid_ref __context x.pIF_network) + gc_connector ~__context Db.PIF.get_all Db.PIF.get_record (fun x->valid_ref __context x.pIF_host) (fun x->valid_ref __context x.pIF_network) (fun ~__context ~self -> (* We need to destroy the PIF, it's metrics and any VLAN/bond records that this PIF was a master of. *) (* bonds/tunnels_to_gc is actually a list which is either empty (not part of a bond/tunnel) @@ -96,21 +96,21 @@ let gc_PIFs ~__context = List.iter (fun bond -> (try Db.Bond.destroy ~__context ~self:bond with _ -> ())) bonds_to_gc; Db.PIF.destroy ~__context ~self) let gc_VBDs ~__context = - gc_connector ~__context Db.VBD.get_all Db.VBD.get_record (fun x->valid_ref __context x.vBD_VM) (fun x->valid_ref __context x.vBD_VDI || x.vBD_empty) + gc_connector ~__context Db.VBD.get_all Db.VBD.get_record (fun x->valid_ref __context x.vBD_VM) (fun x->valid_ref __context x.vBD_VDI || x.vBD_empty) (fun ~__context ~self -> - (* When GCing VBDs that are CDs, set them to empty rather than destroy them entirely *) - if (valid_ref __context (Db.VBD.get_VM ~__context ~self)) && (Db.VBD.get_type ~__context ~self = `CD) then - begin - Db.VBD.set_VDI ~__context ~self ~value:Ref.null; - Db.VBD.set_empty ~__context ~self ~value:true; - debug "VBD corresponds to CD. Record preserved but set to empty"; - end - else - begin - let metrics = Db.VBD.get_metrics ~__context ~self in - (try Db.VBD_metrics.destroy ~__context ~self:metrics with _ -> ()); - Db.VBD.destroy ~__context ~self; - end) + (* When GCing VBDs that are CDs, set them to empty rather than destroy them entirely *) + if (valid_ref __context (Db.VBD.get_VM ~__context ~self)) && (Db.VBD.get_type ~__context ~self = `CD) then + begin + Db.VBD.set_VDI ~__context ~self ~value:Ref.null; + Db.VBD.set_empty ~__context ~self ~value:true; + debug "VBD corresponds to CD. Record preserved but set to empty"; + end + else + begin + let metrics = Db.VBD.get_metrics ~__context ~self in + (try Db.VBD_metrics.destroy ~__context ~self:metrics with _ -> ()); + Db.VBD.destroy ~__context ~self; + end) let gc_crashdumps ~__context = gc_connector ~__context Db.Crashdump.get_all Db.Crashdump.get_record @@ -127,31 +127,31 @@ let gc_VGPUs ~__context = Db.VGPU.destroy ~__context ~self) let gc_PGPUs ~__context = - let pgpus = Db.PGPU.get_all ~__context in - (* Go through the list of PGPUs, destroying any with an invalid host ref. - * Keep a list of groups which contained PGPUs which were destroyed. *) - let affected_groups = - List.fold_left - (fun acc pgpu -> - if not (valid_ref __context (Db.PGPU.get_host ~__context ~self:pgpu)) - then begin - let group = Db.PGPU.get_GPU_group ~__context ~self:pgpu in - Db.PGPU.destroy ~__context ~self:pgpu; - debug "GCed PGPU %s" (Ref.string_of pgpu); - group :: acc - end else - acc) - [] pgpus - |> List.filter (valid_ref __context) - |> List.setify - in - (* Update enabled/supported VGPU types on the groups which contained the - * destroyed PGPUs. *) - List.iter - (fun group -> - Xapi_gpu_group.update_enabled_VGPU_types ~__context ~self:group; - Xapi_gpu_group.update_supported_VGPU_types ~__context ~self:group) - affected_groups + let pgpus = Db.PGPU.get_all ~__context in + (* Go through the list of PGPUs, destroying any with an invalid host ref. + * Keep a list of groups which contained PGPUs which were destroyed. *) + let affected_groups = + List.fold_left + (fun acc pgpu -> + if not (valid_ref __context (Db.PGPU.get_host ~__context ~self:pgpu)) + then begin + let group = Db.PGPU.get_GPU_group ~__context ~self:pgpu in + Db.PGPU.destroy ~__context ~self:pgpu; + debug "GCed PGPU %s" (Ref.string_of pgpu); + group :: acc + end else + acc) + [] pgpus + |> List.filter (valid_ref __context) + |> List.setify + in + (* Update enabled/supported VGPU types on the groups which contained the + * destroyed PGPUs. *) + List.iter + (fun group -> + Xapi_gpu_group.update_enabled_VGPU_types ~__context ~self:group; + Xapi_gpu_group.update_supported_VGPU_types ~__context ~self:group) + affected_groups let gc_PBDs ~__context = gc_connector ~__context Db.PBD.get_all Db.PBD.get_record (fun x->valid_ref __context x.pBD_host) (fun x->valid_ref __context x.pBD_SR) Db.PBD.destroy @@ -159,41 +159,41 @@ let gc_Host_patches ~__context = gc_connector ~__context Db.Host_patch.get_all Db.Host_patch.get_record (fun x->valid_ref __context x.host_patch_host) (fun x->valid_ref __context x.host_patch_pool_patch) Db.Host_patch.destroy let gc_host_cpus ~__context = let host_cpus = Db.Host_cpu.get_all ~__context in - List.iter - (fun hcpu -> - if not (valid_ref __context (Db.Host_cpu.get_host ~__context ~self:hcpu)) then - Db.Host_cpu.destroy ~__context ~self:hcpu) host_cpus + List.iter + (fun hcpu -> + if not (valid_ref __context (Db.Host_cpu.get_host ~__context ~self:hcpu)) then + Db.Host_cpu.destroy ~__context ~self:hcpu) host_cpus let gc_host_metrics ~__context = - let all_host_metrics = Db.Host_metrics.get_all ~__context in - let metrics = List.map (fun host-> Db.Host.get_metrics ~__context ~self:host) in - let host_metrics = metrics (Db.Host.get_all ~__context) in - List.iter - (fun hmetric-> - if not (List.mem hmetric host_metrics) then - Db.Host_metrics.destroy ~__context ~self:hmetric) all_host_metrics + let all_host_metrics = Db.Host_metrics.get_all ~__context in + let metrics = List.map (fun host-> Db.Host.get_metrics ~__context ~self:host) in + let host_metrics = metrics (Db.Host.get_all ~__context) in + List.iter + (fun hmetric-> + if not (List.mem hmetric host_metrics) then + Db.Host_metrics.destroy ~__context ~self:hmetric) all_host_metrics (* If the SR record is missing, delete the VDI record *) -let gc_VDIs ~__context = +let gc_VDIs ~__context = let all_srs = Db.SR.get_all ~__context in List.iter (fun vdi -> - let sr = Db.VDI.get_SR ~__context ~self:vdi in - if not(List.mem sr all_srs) then begin - debug "GCed VDI %s" (Ref.string_of vdi); - Db.VDI.destroy ~__context ~self:vdi - end) (Db.VDI.get_all ~__context) + let sr = Db.VDI.get_SR ~__context ~self:vdi in + if not(List.mem sr all_srs) then begin + debug "GCed VDI %s" (Ref.string_of vdi); + Db.VDI.destroy ~__context ~self:vdi + end) (Db.VDI.get_all ~__context) let gc_consoles ~__context = - List.iter (fun console -> - if not (valid_ref __context (Db.Console.get_VM ~__context ~self:console)) - then begin - Db.Console.destroy ~__context ~self:console; - debug "GCed console %s" (Ref.string_of console); - end - ) (Db.Console.get_all ~__context) + List.iter (fun console -> + if not (valid_ref __context (Db.Console.get_VM ~__context ~self:console)) + then begin + Db.Console.destroy ~__context ~self:console; + debug "GCed console %s" (Ref.string_of console); + end + ) (Db.Console.get_all ~__context) let already_sent_clock_skew_warnings = Hashtbl.create 10 -let detect_clock_skew ~__context host skew = +let detect_clock_skew ~__context host skew = (* Send one message if we exceed the max_clock_skew *) if skew > Xapi_globs.max_clock_skew && not(Hashtbl.mem already_sent_clock_skew_warnings host) then begin error "Sending clock_skew_detected message since the skew with host %s (%s) is greater than the limit (%.2f > %.2f)" @@ -209,7 +209,7 @@ let detect_clock_skew ~__context host skew = (* If we are under half the max skew then re-arm the message sender *) if skew < Xapi_globs.max_clock_skew /. 2. then Hashtbl.remove already_sent_clock_skew_warnings host - + (* Master compares the database with the in-memory host heartbeat table and sets the live flag accordingly. Called with the use_host_heartbeat_for_liveness_m and use_host_heartbeat_for_liveness is true (ie non-HA mode) *) let check_host_liveness ~__context = @@ -221,52 +221,52 @@ let check_host_liveness ~__context = let localhost = try Helpers.get_localhost ~__context with _ -> Ref.null in (* Look for "true->false" transition on Host_metrics.live *) - let check_host host = + let check_host host = if host <> localhost then begin - try - let hmetric = Db.Host.get_metrics ~__context ~self:host in - let live = Db.Host_metrics.get_live ~__context ~self:hmetric in - (* See if the host is using the new HB mechanism, if so we'll use that *) - let new_heartbeat_time = - try - Mutex.execute host_table_m (fun () -> Hashtbl.find host_heartbeat_table host) - with _ -> 0.0 (* never *) - in - let old_heartbeat_time = - if rum && (Version.platform_version () <> (Helpers.version_string_of ~__context (Helpers.LocalObject host))) then - (debug "Host %s considering using metrics last update time as heartbeat" (Ref.string_of host); - Date.to_float (Db.Host_metrics.get_last_updated ~__context ~self:hmetric)) - else 0.0 in - (* Use whichever value is the most recent to determine host liveness *) - let host_time = max old_heartbeat_time new_heartbeat_time in - - let now = Unix.gettimeofday () in - (* we can now compare 'host_time' with 'now' *) - - if now -. host_time < !Xapi_globs.host_assumed_dead_interval then begin - (* From the heartbeat PoV the host looks alive. We try to (i) minimise database sets; and (ii) - avoid toggling the host back to live if it has been marked as shutting_down. *) - Mutex.execute Xapi_globs.hosts_which_are_shutting_down_m - (fun () -> - let shutting_down = List.exists (fun x -> x=host) !Xapi_globs.hosts_which_are_shutting_down in - if not live && not shutting_down then begin - Db.Host_metrics.set_live ~__context ~self:hmetric ~value:true; - Xapi_host_helpers.update_allowed_operations ~__context ~self:host - end - ) - end else begin - if live then begin - debug "Assuming host is offline since the heartbeat/metrics haven't been updated for %.2f seconds; setting live to false" (now -. host_time); - Xapi_hooks.host_pre_declare_dead ~__context ~host ~reason:Xapi_hooks.reason__assume_failed; - Db.Host_metrics.set_live ~__context ~self:hmetric ~value:false; - Xapi_host_helpers.update_allowed_operations ~__context ~self:host; - Xapi_hooks.host_post_declare_dead ~__context ~host ~reason:Xapi_hooks.reason__assume_failed; - end - end; - (* Check for clock skew *) - detect_clock_skew ~__context host (try Mutex.execute host_table_m (fun () -> Hashtbl.find host_skew_table host) with _ -> 0.) + try + let hmetric = Db.Host.get_metrics ~__context ~self:host in + let live = Db.Host_metrics.get_live ~__context ~self:hmetric in + (* See if the host is using the new HB mechanism, if so we'll use that *) + let new_heartbeat_time = + try + Mutex.execute host_table_m (fun () -> Hashtbl.find host_heartbeat_table host) + with _ -> 0.0 (* never *) + in + let old_heartbeat_time = + if rum && (Version.platform_version () <> (Helpers.version_string_of ~__context (Helpers.LocalObject host))) then + (debug "Host %s considering using metrics last update time as heartbeat" (Ref.string_of host); + Date.to_float (Db.Host_metrics.get_last_updated ~__context ~self:hmetric)) + else 0.0 in + (* Use whichever value is the most recent to determine host liveness *) + let host_time = max old_heartbeat_time new_heartbeat_time in + + let now = Unix.gettimeofday () in + (* we can now compare 'host_time' with 'now' *) + + if now -. host_time < !Xapi_globs.host_assumed_dead_interval then begin + (* From the heartbeat PoV the host looks alive. We try to (i) minimise database sets; and (ii) + avoid toggling the host back to live if it has been marked as shutting_down. *) + Mutex.execute Xapi_globs.hosts_which_are_shutting_down_m + (fun () -> + let shutting_down = List.exists (fun x -> x=host) !Xapi_globs.hosts_which_are_shutting_down in + if not live && not shutting_down then begin + Db.Host_metrics.set_live ~__context ~self:hmetric ~value:true; + Xapi_host_helpers.update_allowed_operations ~__context ~self:host + end + ) + end else begin + if live then begin + debug "Assuming host is offline since the heartbeat/metrics haven't been updated for %.2f seconds; setting live to false" (now -. host_time); + Xapi_hooks.host_pre_declare_dead ~__context ~host ~reason:Xapi_hooks.reason__assume_failed; + Db.Host_metrics.set_live ~__context ~self:hmetric ~value:false; + Xapi_host_helpers.update_allowed_operations ~__context ~self:host; + Xapi_hooks.host_post_declare_dead ~__context ~host ~reason:Xapi_hooks.reason__assume_failed; + end + end; + (* Check for clock skew *) + detect_clock_skew ~__context host (try Mutex.execute host_table_m (fun () -> Hashtbl.find host_skew_table host) with _ -> 0.) with exn -> - debug "Ignoring exception inspecting metrics of host %s: %s" (Ref.string_of host) (ExnHelper.string_of_exn exn) + debug "Ignoring exception inspecting metrics of host %s: %s" (Ref.string_of host) (ExnHelper.string_of_exn exn) end in let all_hosts = Db.Host.get_all ~__context in @@ -274,25 +274,25 @@ let check_host_liveness ~__context = let timeout_sessions_common ~__context sessions limit session_group = let unused_sessions = List.filter - (fun (x, _) -> - let rec is_session_unused s = - if (s=Ref.null) then true (* top of session tree *) - else - try (* if no session s, assume default value true=unused *) - let tasks = (Db.Session.get_tasks ~__context ~self:s) in - let parent = (Db.Session.get_parent ~__context ~self:s) in - (List.for_all - (fun t -> TaskHelper.status_is_completed - (* task might not exist anymore, assume completed in this case *) - (try Db.Task.get_status ~__context ~self:t with _->`success) - ) - tasks - ) - && (is_session_unused parent) - with _->true - in is_session_unused x - ) - sessions + (fun (x, _) -> + let rec is_session_unused s = + if (s=Ref.null) then true (* top of session tree *) + else + try (* if no session s, assume default value true=unused *) + let tasks = (Db.Session.get_tasks ~__context ~self:s) in + let parent = (Db.Session.get_parent ~__context ~self:s) in + (List.for_all + (fun t -> TaskHelper.status_is_completed + (* task might not exist anymore, assume completed in this case *) + (try Db.Task.get_status ~__context ~self:t with _->`success) + ) + tasks + ) + && (is_session_unused parent) + with _->true + in is_session_unused x + ) + sessions in (* Only keep a list of (ref, last_active, uuid) *) let disposable_sessions = List.map (fun (x, y) -> x, Date.to_float y.Db_actions.session_last_active, y.Db_actions.session_uuid) unused_sessions in @@ -300,20 +300,20 @@ let timeout_sessions_common ~__context sessions limit session_group = let threshold_time = Unix.time () -. !Xapi_globs.inactive_session_timeout in let young, old = List.partition (fun (_, y, _) -> y > threshold_time) disposable_sessions in (* If there are too many young sessions then we need to delete the oldest *) - let lucky, unlucky = + let lucky, unlucky = if List.length young <= limit then young, [] (* keep them all *) - else + else (* Need to reverse sort by last active and drop the oldest *) List.chop limit (List.sort (fun (_,a, _) (_,b, _) -> compare b a) young) in - let cancel doc sessions = + let cancel doc sessions = List.iter (fun (s, active, uuid) -> - debug "Session.destroy _ref=%s uuid=%s %s (last active %s): %s" (Ref.string_of s) uuid (Context.trackid_of_session (Some s)) (Date.to_string (Date.of_float active)) doc; - Xapi_session.destroy_db_session ~__context ~self:s - ) sessions in + debug "Session.destroy _ref=%s uuid=%s %s (last active %s): %s" (Ref.string_of s) uuid (Context.trackid_of_session (Some s)) (Date.to_string (Date.of_float active)) doc; + Xapi_session.destroy_db_session ~__context ~self:s + ) sessions in (* Only the 'lucky' survive: the 'old' and 'unlucky' are destroyed *) - if unlucky <> [] + if unlucky <> [] then debug "Number of disposable sessions in group '%s' in database (%d/%d) exceeds limit (%d): will delete the oldest" session_group (List.length disposable_sessions) (List.length sessions) limit; cancel (Printf.sprintf "Timed out session in group '%s' because of its age" session_group) old; cancel (Printf.sprintf "Timed out session in group '%s' because max number of sessions was exceeded" session_group) unlucky @@ -321,123 +321,123 @@ let timeout_sessions_common ~__context sessions limit session_group = let last_session_log_time = ref None let timeout_sessions ~__context = - let all_sessions = Db.Session.get_internal_records_where ~__context ~expr:Db_filter_types.True in - - let pool_sessions, nonpool_sessions = List.partition (fun (_, s) -> s.Db_actions.session_pool) all_sessions in - let use_root_auth_name s = s.Db_actions.session_auth_user_name = "" || s.Db_actions.session_auth_user_name = "root" in - let anon_sessions, named_sessions = List.partition (fun (_, s) -> s.Db_actions.session_originator = "" && use_root_auth_name s) nonpool_sessions in - let session_groups = Hashtbl.create 37 in - List.iter (function (_, s) as rs -> - let key = if use_root_auth_name s then `Orig s.Db_actions.session_originator else `Name s.Db_actions.session_auth_user_name in - let current_sessions = - try Hashtbl.find session_groups key - with Not_found -> [] in - Hashtbl.replace session_groups key (rs :: current_sessions) - ) named_sessions; - - let should_log = match !last_session_log_time with - | None -> true - | Some t -> Unix.time () -. t > 600.0 (* Every 10 mins, dump session stats *) - in - - if should_log then begin - last_session_log_time := Some (Unix.time ()); - let nbindings = Hashtbl.fold (fun _ _ acc -> 1+acc) session_groups 0 in - debug "session_log: active_sessions=%d (%d pool, %d anon, %d named - %d groups)" - (List.length all_sessions) (List.length pool_sessions) (List.length anon_sessions) (List.length named_sessions) nbindings - end; - - begin - Hashtbl.iter - (fun key ss -> match key with - | `Orig orig -> timeout_sessions_common ~__context ss Xapi_globs.max_sessions_per_originator ("originator:"^orig) - | `Name name -> timeout_sessions_common ~__context ss Xapi_globs.max_sessions_per_user_name ("username:"^name)) - session_groups; - timeout_sessions_common ~__context anon_sessions Xapi_globs.max_sessions "external"; - timeout_sessions_common ~__context pool_sessions Xapi_globs.max_sessions "internal"; + let all_sessions = Db.Session.get_internal_records_where ~__context ~expr:Db_filter_types.True in + + let pool_sessions, nonpool_sessions = List.partition (fun (_, s) -> s.Db_actions.session_pool) all_sessions in + let use_root_auth_name s = s.Db_actions.session_auth_user_name = "" || s.Db_actions.session_auth_user_name = "root" in + let anon_sessions, named_sessions = List.partition (fun (_, s) -> s.Db_actions.session_originator = "" && use_root_auth_name s) nonpool_sessions in + let session_groups = Hashtbl.create 37 in + List.iter (function (_, s) as rs -> + let key = if use_root_auth_name s then `Orig s.Db_actions.session_originator else `Name s.Db_actions.session_auth_user_name in + let current_sessions = + try Hashtbl.find session_groups key + with Not_found -> [] in + Hashtbl.replace session_groups key (rs :: current_sessions) + ) named_sessions; + + let should_log = match !last_session_log_time with + | None -> true + | Some t -> Unix.time () -. t > 600.0 (* Every 10 mins, dump session stats *) + in + + if should_log then begin + last_session_log_time := Some (Unix.time ()); + let nbindings = Hashtbl.fold (fun _ _ acc -> 1+acc) session_groups 0 in + debug "session_log: active_sessions=%d (%d pool, %d anon, %d named - %d groups)" + (List.length all_sessions) (List.length pool_sessions) (List.length anon_sessions) (List.length named_sessions) nbindings + end; + + begin + Hashtbl.iter + (fun key ss -> match key with + | `Orig orig -> timeout_sessions_common ~__context ss Xapi_globs.max_sessions_per_originator ("originator:"^orig) + | `Name name -> timeout_sessions_common ~__context ss Xapi_globs.max_sessions_per_user_name ("username:"^name)) + session_groups; + timeout_sessions_common ~__context anon_sessions Xapi_globs.max_sessions "external"; + timeout_sessions_common ~__context pool_sessions Xapi_globs.max_sessions "internal"; end let probation_pending_tasks = Hashtbl.create 53 let timeout_tasks ~__context = - let all_tasks = Db.Task.get_internal_records_where ~__context ~expr:Db_filter_types.True in - let oldest_completed_time = Unix.time() -. !Xapi_globs.completed_task_timeout (* time out completed tasks after 65 minutes *) in - let oldest_pending_time = Unix.time() -. !Xapi_globs.pending_task_timeout (* time out pending tasks after 24 hours *) in - - let completed, pending = - List.partition - (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) - all_tasks in - - (* Any task that was incomplete at the point someone called Task.destroy - will have `destroy in its current_operations. If they're now complete, - we can Kill these immediately *) - let completed_destroyable, completed_gcable = - List.partition - (fun (_, t) -> List.exists (fun (_,op) -> op = `destroy) t.Db_actions.task_current_operations) - completed in - - List.iter (fun (t, _) -> Db.Task.destroy ~__context ~self:t) completed_destroyable; - - let completed_old, completed_young = - List.partition - (fun (_, t) -> - Date.to_float t.Db_actions.task_finished < oldest_completed_time) - completed_gcable in - - let pending_old, pending_young = - List.partition - (fun (_, t) -> - Date.to_float t.Db_actions.task_created < oldest_pending_time) - pending in - - let pending_old_run, pending_old_hung = - List.partition - (fun (_, t) -> - try - let pre_progress = - Hashtbl.find probation_pending_tasks t.Db_actions.task_uuid in - t.Db_actions.task_progress -. pre_progress > min_float - with Not_found -> true) - pending_old in - - let () = - Hashtbl.clear probation_pending_tasks; - List.iter - (fun (_, t) -> - Hashtbl.add probation_pending_tasks - t.Db_actions.task_uuid t.Db_actions.task_progress) - pending_old in - - let old = pending_old_hung @ completed_old in - let young = pending_old_run @ pending_young @ completed_young in + let all_tasks = Db.Task.get_internal_records_where ~__context ~expr:Db_filter_types.True in + let oldest_completed_time = Unix.time() -. !Xapi_globs.completed_task_timeout (* time out completed tasks after 65 minutes *) in + let oldest_pending_time = Unix.time() -. !Xapi_globs.pending_task_timeout (* time out pending tasks after 24 hours *) in + + let completed, pending = + List.partition + (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) + all_tasks in + + (* Any task that was incomplete at the point someone called Task.destroy + will have `destroy in its current_operations. If they're now complete, + we can Kill these immediately *) + let completed_destroyable, completed_gcable = + List.partition + (fun (_, t) -> List.exists (fun (_,op) -> op = `destroy) t.Db_actions.task_current_operations) + completed in + + List.iter (fun (t, _) -> Db.Task.destroy ~__context ~self:t) completed_destroyable; + + let completed_old, completed_young = + List.partition + (fun (_, t) -> + Date.to_float t.Db_actions.task_finished < oldest_completed_time) + completed_gcable in + + let pending_old, pending_young = + List.partition + (fun (_, t) -> + Date.to_float t.Db_actions.task_created < oldest_pending_time) + pending in + + let pending_old_run, pending_old_hung = + List.partition + (fun (_, t) -> + try + let pre_progress = + Hashtbl.find probation_pending_tasks t.Db_actions.task_uuid in + t.Db_actions.task_progress -. pre_progress > min_float + with Not_found -> true) + pending_old in + + let () = + Hashtbl.clear probation_pending_tasks; + List.iter + (fun (_, t) -> + Hashtbl.add probation_pending_tasks + t.Db_actions.task_uuid t.Db_actions.task_progress) + pending_old in + + let old = pending_old_hung @ completed_old in + let young = pending_old_run @ pending_young @ completed_young in (* If there are still too many young tasks then we'll try to delete some completed ones *) - let lucky, unlucky = + let lucky, unlucky = if List.length young <= Xapi_globs.max_tasks then young, [] (* keep them all *) - else + else (* Compute how many we'd like to delete *) let overflow = List.length young - Xapi_globs.max_tasks in (* We only consider deleting completed tasks *) - let completed, pending = List.partition - (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) young in + let completed, pending = List.partition + (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) young in (* Sort the completed tasks so we delete oldest tasks in preference *) let completed = - List.sort (fun (_,t1) (_,t2) -> compare (Date.to_float t1.Db_actions.task_finished) (Date.to_float t2.Db_actions.task_finished)) completed in + List.sort (fun (_,t1) (_,t2) -> compare (Date.to_float t1.Db_actions.task_finished) (Date.to_float t2.Db_actions.task_finished)) completed in (* From the completes set, choose up to 'overflow' *) - let unlucky, lucky = - if List.length completed > overflow - then List.chop overflow completed - else completed, [] in (* not enough to delete, oh well *) + let unlucky, lucky = + if List.length completed > overflow + then List.chop overflow completed + else completed, [] in (* not enough to delete, oh well *) (* Keep all pending and any which were not chosen from the completed set *) pending @ lucky, unlucky in (* Cancel the 'old' and 'unlucky' *) List.iter (fun (x, y) -> - if not (TaskHelper.status_is_completed y.Db_actions.task_status) - then warn "GCed old task that was still in pending state: %s" y.Db_actions.task_uuid; - TaskHelper.destroy ~__context x - ) (old @ unlucky); + if not (TaskHelper.status_is_completed y.Db_actions.task_status) + then warn "GCed old task that was still in pending state: %s" y.Db_actions.task_uuid; + TaskHelper.destroy ~__context x + ) (old @ unlucky); if List.length lucky > Xapi_globs.max_tasks then warn "There are more pending tasks than the maximum allowed: %d > %d" (List.length lucky) Xapi_globs.max_tasks @@ -456,181 +456,181 @@ let timeout_alerts ~__context = and mark the Pool with an other_config key if we are in a rolling upgrade mode. If we detect the beginning or end of a rolling upgrade, call out to an external script. *) let detect_rolling_upgrade ~__context = - try - (* If my platform version is different to any host (including myself) then we're in a rolling upgrade mode *) - (* NB: it is critical this code runs once in the master of a pool of one before the dbsync, since this - is the only time at which the master's Version will be out of sync with its database record *) - let actually_in_progress = Helpers.pool_has_different_host_platform_versions ~__context in - (* Check the current state of the Pool as indicated by the Pool.other_config:rolling_upgrade_in_progress *) - let pools = Db.Pool.get_all ~__context in - match pools with - | [] -> - debug "Ignoring absence of pool record in detect_rolling_upgrade: this is expected on first boot" - | pool :: _ -> - let pool_says_in_progress = - List.mem_assoc Xapi_globs.rolling_upgrade_in_progress (Db.Pool.get_other_config ~__context ~self:pool) in - (* Resynchronise *) - if actually_in_progress <> pool_says_in_progress then begin - let platform_versions = List.map (fun host -> Helpers.version_string_of ~__context (Helpers.LocalObject host)) (Db.Host.get_all ~__context) in - debug "xapi platform version = %s; host platform versions = [ %s ]" - (Version.platform_version ()) (String.concat "; " platform_versions); - - warn "Pool thinks rolling upgrade%s in progress but Host version numbers indicate otherwise; correcting" - (if pool_says_in_progress then "" else " not"); - (if actually_in_progress - then Db.Pool.add_to_other_config ~__context ~self:pool ~key:Xapi_globs.rolling_upgrade_in_progress ~value:"true" - else begin - Db.Pool.remove_from_other_config ~__context ~self:pool ~key:Xapi_globs.rolling_upgrade_in_progress; - List.iter (fun vm -> Xapi_vm_lifecycle.update_allowed_operations ~__context ~self:vm) (Db.VM.get_all ~__context) - end); - (* Call out to an external script to allow external actions to be performed *) - let rolling_upgrade_script_hook = !Xapi_globs.rolling_upgrade_script_hook in - if (try Unix.access rolling_upgrade_script_hook [ Unix.X_OK ]; true with _ -> false) then begin - let args = if actually_in_progress then [ "start" ] else [ "stop" ] in - debug "Executing rolling_upgrade script: %s %s" - rolling_upgrade_script_hook (String.concat " " args); - ignore(Forkhelpers.execute_command_get_output rolling_upgrade_script_hook args) - end; - (* Call in to internal xapi upgrade code *) - if actually_in_progress - then Xapi_upgrade.start () - else Xapi_upgrade.stop () - end - with exn -> - warn "Ignoring error in detect_rolling_upgrade: %s" (ExnHelper.string_of_exn exn) + try + (* If my platform version is different to any host (including myself) then we're in a rolling upgrade mode *) + (* NB: it is critical this code runs once in the master of a pool of one before the dbsync, since this + is the only time at which the master's Version will be out of sync with its database record *) + let actually_in_progress = Helpers.pool_has_different_host_platform_versions ~__context in + (* Check the current state of the Pool as indicated by the Pool.other_config:rolling_upgrade_in_progress *) + let pools = Db.Pool.get_all ~__context in + match pools with + | [] -> + debug "Ignoring absence of pool record in detect_rolling_upgrade: this is expected on first boot" + | pool :: _ -> + let pool_says_in_progress = + List.mem_assoc Xapi_globs.rolling_upgrade_in_progress (Db.Pool.get_other_config ~__context ~self:pool) in + (* Resynchronise *) + if actually_in_progress <> pool_says_in_progress then begin + let platform_versions = List.map (fun host -> Helpers.version_string_of ~__context (Helpers.LocalObject host)) (Db.Host.get_all ~__context) in + debug "xapi platform version = %s; host platform versions = [ %s ]" + (Version.platform_version ()) (String.concat "; " platform_versions); + + warn "Pool thinks rolling upgrade%s in progress but Host version numbers indicate otherwise; correcting" + (if pool_says_in_progress then "" else " not"); + (if actually_in_progress + then Db.Pool.add_to_other_config ~__context ~self:pool ~key:Xapi_globs.rolling_upgrade_in_progress ~value:"true" + else begin + Db.Pool.remove_from_other_config ~__context ~self:pool ~key:Xapi_globs.rolling_upgrade_in_progress; + List.iter (fun vm -> Xapi_vm_lifecycle.update_allowed_operations ~__context ~self:vm) (Db.VM.get_all ~__context) + end); + (* Call out to an external script to allow external actions to be performed *) + let rolling_upgrade_script_hook = !Xapi_globs.rolling_upgrade_script_hook in + if (try Unix.access rolling_upgrade_script_hook [ Unix.X_OK ]; true with _ -> false) then begin + let args = if actually_in_progress then [ "start" ] else [ "stop" ] in + debug "Executing rolling_upgrade script: %s %s" + rolling_upgrade_script_hook (String.concat " " args); + ignore(Forkhelpers.execute_command_get_output rolling_upgrade_script_hook args) + end; + (* Call in to internal xapi upgrade code *) + if actually_in_progress + then Xapi_upgrade.start () + else Xapi_upgrade.stop () + end + with exn -> + warn "Ignoring error in detect_rolling_upgrade: %s" (ExnHelper.string_of_exn exn) (* A host has asked to tickle its heartbeat to keep it alive (if we're using that mechanism for host liveness). *) let tickle_heartbeat ~__context host stuff = - (* debug "Tickling heartbeat for host: %s stuff = [ %s ]" (Ref.string_of host) (String.concat ";" (List.map (fun (a, b) -> a ^ "=" ^ b) stuff)); *) - let use_host_heartbeat_for_liveness = - Mutex.execute use_host_heartbeat_for_liveness_m - (fun () -> !use_host_heartbeat_for_liveness) in - - Mutex.execute host_table_m - (fun () -> - (* When a host is going down it will send a negative heartbeat *) - if List.mem_assoc _shutting_down stuff then begin - Hashtbl.remove host_skew_table host; - let reason = Xapi_hooks.reason__clean_shutdown in - if use_host_heartbeat_for_liveness - then Xapi_host_helpers.mark_host_as_dead ~__context ~host ~reason - end else begin - let now = Unix.gettimeofday () in - Hashtbl.replace host_heartbeat_table host now; - (* compute the clock skew for later analysis *) - if List.mem_assoc _time stuff then begin - try - let slave = float_of_string (List.assoc _time stuff) in - let skew = abs_float (now -. slave) in - Hashtbl.replace host_skew_table host skew - with _ -> () - end - end - ); - [] + (* debug "Tickling heartbeat for host: %s stuff = [ %s ]" (Ref.string_of host) (String.concat ";" (List.map (fun (a, b) -> a ^ "=" ^ b) stuff)); *) + let use_host_heartbeat_for_liveness = + Mutex.execute use_host_heartbeat_for_liveness_m + (fun () -> !use_host_heartbeat_for_liveness) in + + Mutex.execute host_table_m + (fun () -> + (* When a host is going down it will send a negative heartbeat *) + if List.mem_assoc _shutting_down stuff then begin + Hashtbl.remove host_skew_table host; + let reason = Xapi_hooks.reason__clean_shutdown in + if use_host_heartbeat_for_liveness + then Xapi_host_helpers.mark_host_as_dead ~__context ~host ~reason + end else begin + let now = Unix.gettimeofday () in + Hashtbl.replace host_heartbeat_table host now; + (* compute the clock skew for later analysis *) + if List.mem_assoc _time stuff then begin + try + let slave = float_of_string (List.assoc _time stuff) in + let skew = abs_float (now -. slave) in + Hashtbl.replace host_skew_table host skew + with _ -> () + end + end + ); + [] let gc_messages ~__context = Xapi_message.gc ~__context let single_pass () = - Server_helpers.exec_with_new_task "DB GC" - (fun __context -> - Db_lock.with_lock - (fun () -> - let time_one (name, f) = - Stats.time_this (Printf.sprintf "Db_gc: %s" name) - (fun () -> f ~__context) - in - (* do VDIs first because this will *) - (* cause some VBDs to be affected *) - List.iter time_one [ - "VDIs", gc_VDIs; - "PIFs", gc_PIFs; - "VBDs", gc_VBDs; - "crashdumps", gc_crashdumps; - "VIFs", gc_VIFs; - "PBDs", gc_PBDs; - "VGPUs", gc_VGPUs; - "PGPUs", gc_PGPUs; - "VGPU_types", gc_VGPU_types; - "Host patches", gc_Host_patches; - "Host CPUs", gc_host_cpus; - "Host metrics", gc_host_metrics; - "Tasks", timeout_tasks; - "Sessions", timeout_sessions; - "Messages", gc_messages; - "Consoles", gc_consoles; - (* timeout_alerts; *) - (* CA-29253: wake up all blocked clients *) - "Heartbeat", Xapi_event.heartbeat; - ] - ); - Mutex.execute use_host_heartbeat_for_liveness_m - (fun () -> - if !use_host_heartbeat_for_liveness - then check_host_liveness ~__context); - (* Note that we don't hold the DB lock, because we *) - (* want to use the CLI from external script hooks: *) - detect_rolling_upgrade ~__context) + Server_helpers.exec_with_new_task "DB GC" + (fun __context -> + Db_lock.with_lock + (fun () -> + let time_one (name, f) = + Stats.time_this (Printf.sprintf "Db_gc: %s" name) + (fun () -> f ~__context) + in + (* do VDIs first because this will *) + (* cause some VBDs to be affected *) + List.iter time_one [ + "VDIs", gc_VDIs; + "PIFs", gc_PIFs; + "VBDs", gc_VBDs; + "crashdumps", gc_crashdumps; + "VIFs", gc_VIFs; + "PBDs", gc_PBDs; + "VGPUs", gc_VGPUs; + "PGPUs", gc_PGPUs; + "VGPU_types", gc_VGPU_types; + "Host patches", gc_Host_patches; + "Host CPUs", gc_host_cpus; + "Host metrics", gc_host_metrics; + "Tasks", timeout_tasks; + "Sessions", timeout_sessions; + "Messages", gc_messages; + "Consoles", gc_consoles; + (* timeout_alerts; *) + (* CA-29253: wake up all blocked clients *) + "Heartbeat", Xapi_event.heartbeat; + ] + ); + Mutex.execute use_host_heartbeat_for_liveness_m + (fun () -> + if !use_host_heartbeat_for_liveness + then check_host_liveness ~__context); + (* Note that we don't hold the DB lock, because we *) + (* want to use the CLI from external script hooks: *) + detect_rolling_upgrade ~__context) let start_db_gc_thread() = Thread.create (fun ()-> - Debug.with_thread_named "db_gc" - (fun () -> - while (true) do - try - Thread.delay db_GC_TIMER; - single_pass () - with e -> debug "Exception in DB GC thread: %s" (ExnHelper.string_of_exn e) - done - ) () + Debug.with_thread_named "db_gc" + (fun () -> + while (true) do + try + Thread.delay db_GC_TIMER; + single_pass () + with e -> debug "Exception in DB GC thread: %s" (ExnHelper.string_of_exn e) + done + ) () ) () let send_one_heartbeat ~__context ?(shutting_down=false) rpc session_id = - let localhost = Helpers.get_localhost ~__context in - let time = Unix.gettimeofday () +. (if Xapi_fist.insert_clock_skew () then Xapi_globs.max_clock_skew *. 2. else 0.) in - let stuff = [ - _time, string_of_float time - ] @ (if shutting_down then [ _shutting_down, "true" ] else []) - in - let (_: (string*string) list) = Client.Client.Host.tickle_heartbeat rpc session_id localhost stuff in - () - (* debug "Master responded with [ %s ]" (String.concat ";" (List.map (fun (a, b) -> a ^ "=" ^ b) response)); *) + let localhost = Helpers.get_localhost ~__context in + let time = Unix.gettimeofday () +. (if Xapi_fist.insert_clock_skew () then Xapi_globs.max_clock_skew *. 2. else 0.) in + let stuff = [ + _time, string_of_float time + ] @ (if shutting_down then [ _shutting_down, "true" ] else []) + in + let (_: (string*string) list) = Client.Client.Host.tickle_heartbeat rpc session_id localhost stuff in + () +(* debug "Master responded with [ %s ]" (String.concat ";" (List.map (fun (a, b) -> a ^ "=" ^ b) response)); *) let start_heartbeat_thread() = Debug.with_thread_named "heartbeat" (fun () -> - Server_helpers.exec_with_new_task "Heartbeat" (fun __context -> - let localhost = Helpers.get_localhost __context in - let master = Helpers.get_master ~__context in - let address = Db.Host.get_address ~__context ~self:master in - - if localhost=master then () else begin - - while (true) do - try - Helpers.call_emergency_mode_functions address - (fun rpc session_id -> - while(true) do - try - send_one_heartbeat ~__context rpc session_id; - Thread.delay !Xapi_globs.host_heartbeat_interval - with - | (Api_errors.Server_error (x,y)) as e -> - if x=Api_errors.session_invalid - then raise e - else debug "Caught exception in heartbeat thread: %s" (ExnHelper.string_of_exn e); - | e -> - debug "Caught exception in heartbeat thread: %s" (ExnHelper.string_of_exn e); - done) - with - | Api_errors.Server_error(code, params) when code = Api_errors.session_authentication_failed -> - debug "Master did not recognise our pool secret: we must be pointing at the wrong master. Restarting."; - exit Xapi_globs.restart_return_code - | e -> - debug "Caught %s - logging in again" (ExnHelper.string_of_exn e); - Thread.delay !Xapi_globs.host_heartbeat_interval; - done - end) - ) () + Server_helpers.exec_with_new_task "Heartbeat" (fun __context -> + let localhost = Helpers.get_localhost __context in + let master = Helpers.get_master ~__context in + let address = Db.Host.get_address ~__context ~self:master in + + if localhost=master then () else begin + + while (true) do + try + Helpers.call_emergency_mode_functions address + (fun rpc session_id -> + while(true) do + try + send_one_heartbeat ~__context rpc session_id; + Thread.delay !Xapi_globs.host_heartbeat_interval + with + | (Api_errors.Server_error (x,y)) as e -> + if x=Api_errors.session_invalid + then raise e + else debug "Caught exception in heartbeat thread: %s" (ExnHelper.string_of_exn e); + | e -> + debug "Caught exception in heartbeat thread: %s" (ExnHelper.string_of_exn e); + done) + with + | Api_errors.Server_error(code, params) when code = Api_errors.session_authentication_failed -> + debug "Master did not recognise our pool secret: we must be pointing at the wrong master. Restarting."; + exit Xapi_globs.restart_return_code + | e -> + debug "Caught %s - logging in again" (ExnHelper.string_of_exn e); + Thread.delay !Xapi_globs.host_heartbeat_interval; + done + end) + ) () diff --git a/ocaml/xapi/dbsync.ml b/ocaml/xapi/dbsync.ml index 13b5bc936d4..721a9e8ef9b 100644 --- a/ocaml/xapi/dbsync.ml +++ b/ocaml/xapi/dbsync.ml @@ -13,8 +13,8 @@ *) (** * @group Main Loop and Start-up - *) - +*) + open Printf module D=Debug.Make(struct let name="dbsync" end) @@ -23,7 +23,7 @@ open D (* Update the database to reflect current state. Called for both start of day and after an agent restart. *) -let resync_dom0_config_files() = +let resync_dom0_config_files() = try debug "resyncing dom0 config files if necessary"; Config_file_sync.fetch_config_files_on_slave_startup () @@ -33,40 +33,40 @@ let resync_dom0_config_files() = in Miami resulted in these not being created by default. We recreate them here for compatability. Note that from MidnightRide onwards the metrics will always exist and we can delete this code. *) let create_host_metrics ~__context = - List.iter + List.iter (fun self -> let m = Db.Host.get_metrics ~__context ~self in if not(Db.is_valid_ref __context m) then begin - debug "Creating missing Host_metrics object for Host: %s" (Db.Host.get_uuid ~__context ~self); - let r = Ref.make () in - Db.Host_metrics.create ~__context ~ref:r - ~uuid:(Uuid.to_string (Uuid.make_uuid ())) ~live:false - ~memory_total:0L ~memory_free:0L ~last_updated:Stdext.Date.never ~other_config:[]; - Db.Host.set_metrics ~__context ~self ~value:r + debug "Creating missing Host_metrics object for Host: %s" (Db.Host.get_uuid ~__context ~self); + let r = Ref.make () in + Db.Host_metrics.create ~__context ~ref:r + ~uuid:(Uuid.to_string (Uuid.make_uuid ())) ~live:false + ~memory_total:0L ~memory_free:0L ~last_updated:Stdext.Date.never ~other_config:[]; + Db.Host.set_metrics ~__context ~self ~value:r end) (Db.Host.get_all ~__context) let update_env () = Server_helpers.exec_with_new_task "dbsync (update_env)" ~task_in_database:true (fun __context -> - let other_config = - match Db.Pool.get_all ~__context with - | [ pool ] -> - Db.Pool.get_other_config ~__context ~self:pool - | [] -> - (* Happens before the pool object has been created *) - [] - | _ -> - error "Multiple pool objects detected -- this should never happen"; - [] in + let other_config = + match Db.Pool.get_all ~__context with + | [ pool ] -> + Db.Pool.get_other_config ~__context ~self:pool + | [] -> + (* Happens before the pool object has been created *) + [] + | _ -> + error "Multiple pool objects detected -- this should never happen"; + [] in if Pool_role.is_master () then create_host_metrics ~__context; Dbsync_slave.update_env __context other_config; if Pool_role.is_master () then Dbsync_master.update_env __context; (* we sync dom0 config files on slaves; however, we don't want - to do this in dbsync_slave since we want the master to have - been set on the pool record before we run it [otherwise we - try and sync config files from the old master if someone's - done a pool.designate_new_master!] *) + to do this in dbsync_slave since we want the master to have + been set on the pool record before we run it [otherwise we + try and sync config files from the old master if someone's + done a pool.designate_new_master!] *) if not (Pool_role.is_master ()) then resync_dom0_config_files(); ) diff --git a/ocaml/xapi/dbsync_master.ml b/ocaml/xapi/dbsync_master.ml index 3d774632ddd..c78ddee007a 100644 --- a/ocaml/xapi/dbsync_master.ml +++ b/ocaml/xapi/dbsync_master.ml @@ -13,8 +13,8 @@ *) (** * @group Main Loop and Start-up - *) - +*) + module D=Debug.Make(struct let name="dbsync" end) open D @@ -25,81 +25,81 @@ open Client (* create pool record (if master and not one already there) *) let create_pool_record ~__context = - let pools = Db.Pool.get_all ~__context in - if pools=[] then - Db.Pool.create ~__context ~ref:(Ref.make()) ~uuid:(Uuid.to_string (Uuid.make_uuid())) - ~name_label:"" ~name_description:"" ~master:(Helpers.get_localhost ~__context) - ~default_SR:Ref.null ~suspend_image_SR:Ref.null ~crash_dump_SR:Ref.null - ~ha_enabled:false ~ha_configuration:[] ~ha_statefiles:[] - ~ha_host_failures_to_tolerate:0L ~ha_plan_exists_for:0L ~ha_allow_overcommit:false ~ha_overcommitted:false ~blobs:[] ~tags:[] ~gui_config:[] ~health_check_config:[] - ~wlb_url:"" ~wlb_username:"" ~wlb_password:Ref.null ~wlb_enabled:false ~wlb_verify_cert:false - ~redo_log_enabled:false ~redo_log_vdi:Ref.null ~vswitch_controller:"" ~restrictions:[] - ~current_operations:[] ~allowed_operations:[] - ~other_config:[ - Xapi_globs.memory_ratio_hvm; - Xapi_globs.memory_ratio_pv; - ] - ~ha_cluster_stack:"xhad" - ~guest_agent_config:[] - ~cpu_info:[] ~policy_no_vendor_device:false ~live_patching_disabled:false + let pools = Db.Pool.get_all ~__context in + if pools=[] then + Db.Pool.create ~__context ~ref:(Ref.make()) ~uuid:(Uuid.to_string (Uuid.make_uuid())) + ~name_label:"" ~name_description:"" ~master:(Helpers.get_localhost ~__context) + ~default_SR:Ref.null ~suspend_image_SR:Ref.null ~crash_dump_SR:Ref.null + ~ha_enabled:false ~ha_configuration:[] ~ha_statefiles:[] + ~ha_host_failures_to_tolerate:0L ~ha_plan_exists_for:0L ~ha_allow_overcommit:false ~ha_overcommitted:false ~blobs:[] ~tags:[] ~gui_config:[] ~health_check_config:[] + ~wlb_url:"" ~wlb_username:"" ~wlb_password:Ref.null ~wlb_enabled:false ~wlb_verify_cert:false + ~redo_log_enabled:false ~redo_log_vdi:Ref.null ~vswitch_controller:"" ~restrictions:[] + ~current_operations:[] ~allowed_operations:[] + ~other_config:[ + Xapi_globs.memory_ratio_hvm; + Xapi_globs.memory_ratio_pv; + ] + ~ha_cluster_stack:"xhad" + ~guest_agent_config:[] + ~cpu_info:[] ~policy_no_vendor_device:false ~live_patching_disabled:false let set_master_ip ~__context = let ip = match (Helpers.get_management_ip_addr ~__context) with - Some ip -> ip - | None -> - (error "Cannot read master IP address. Check the control interface has an IP address"; "") in + Some ip -> ip + | None -> + (error "Cannot read master IP address. Check the control interface has an IP address"; "") in let host = Helpers.get_localhost ~__context in - Db.Host.set_address ~__context ~self:host ~value:ip + Db.Host.set_address ~__context ~self:host ~value:ip (* NB the master doesn't use the heartbeat mechanism to track its own liveness so we must make sure that live starts out as true because it will never be updated. *) -let set_master_live ~__context = +let set_master_live ~__context = let host = Helpers.get_localhost ~__context in let metrics = Db.Host.get_metrics ~__context ~self:host in debug "Setting Host_metrics.live to true for localhost"; Db.Host_metrics.set_live ~__context ~self:metrics ~value:true let set_master_pool_reference ~__context = - let pool = Helpers.get_pool ~__context in - Db.Pool.set_master ~__context ~self:pool ~value:(Helpers.get_localhost ~__context) + let pool = Helpers.get_pool ~__context in + Db.Pool.set_master ~__context ~self:pool ~value:(Helpers.get_localhost ~__context) let refresh_console_urls ~__context = - List.iter (fun console -> - Helpers.log_exn_continue (Printf.sprintf "Updating console: %s" (Ref.string_of console)) (fun () -> - let vm = Db.Console.get_VM ~__context ~self:console in - let host = Db.VM.get_resident_on ~__context ~self:vm in - let url_should_be = match Db.Host.get_address ~__context ~self:host with - | "" -> "" - | address -> - Printf.sprintf "https://%s%s?ref=%s" address Constants.console_uri (Ref.string_of console) in - Db.Console.set_location ~__context ~self:console ~value:url_should_be - ) () - ) (Db.Console.get_all ~__context) + List.iter (fun console -> + Helpers.log_exn_continue (Printf.sprintf "Updating console: %s" (Ref.string_of console)) (fun () -> + let vm = Db.Console.get_VM ~__context ~self:console in + let host = Db.VM.get_resident_on ~__context ~self:vm in + let url_should_be = match Db.Host.get_address ~__context ~self:host with + | "" -> "" + | address -> + Printf.sprintf "https://%s%s?ref=%s" address Constants.console_uri (Ref.string_of console) in + Db.Console.set_location ~__context ~self:console ~value:url_should_be + ) () + ) (Db.Console.get_all ~__context) (** CA-15449: after a pool restore database VMs which were running on slaves now have dangling resident_on fields. If these are control domains we destroy them, otherwise we reset them to Halted. *) let reset_vms_running_on_missing_hosts ~__context = List.iter (fun vm -> - let vm_r = Db.VM.get_record ~__context ~self:vm in - let valid_resident_on = Db.is_valid_ref __context vm_r.API.vM_resident_on in - if (not valid_resident_on) && (vm_r.API.vM_power_state = `Running) then begin - let msg = Printf.sprintf "Resetting VM uuid '%s' to Halted because VM.resident_on refers to a Host which is no longer in the Pool" vm_r.API.vM_uuid in - info "%s" msg; - Helpers.log_exn_continue msg (fun () -> Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:`Halted) () - end) (Db.VM.get_all ~__context) + let vm_r = Db.VM.get_record ~__context ~self:vm in + let valid_resident_on = Db.is_valid_ref __context vm_r.API.vM_resident_on in + if (not valid_resident_on) && (vm_r.API.vM_power_state = `Running) then begin + let msg = Printf.sprintf "Resetting VM uuid '%s' to Halted because VM.resident_on refers to a Host which is no longer in the Pool" vm_r.API.vM_uuid in + info "%s" msg; + Helpers.log_exn_continue msg (fun () -> Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:`Halted) () + end) (Db.VM.get_all ~__context) (** Release 'locks' on VMs in the Halted state: ie {VBD,VIF}.{currently_attached,reserved} Note that the {allowed,current}_operations fields are non-persistent so blanked on *master* startup (not slave) No allowed_operations are recomputed here: this work is performed later in a non-critical thread. - *) +*) let release_locks ~__context = (* non-running VMs should have their VBD.current_operations cleared: *) let vms = List.filter (fun self -> Db.VM.get_power_state ~__context ~self = `Halted) (Db.VM.get_all ~__context) in - List.iter (fun vm -> - List.iter (fun self -> - Xapi_vbd_helpers.clear_current_operations ~__context ~self) - (Db.VM.get_VBDs ~__context ~self:vm)) vms; + List.iter (fun vm -> + List.iter (fun self -> + Xapi_vbd_helpers.clear_current_operations ~__context ~self) + (Db.VM.get_VBDs ~__context ~self:vm)) vms; (* Resets the current operations of all Halted VMs *) List.iter (fun self -> Xapi_vm_lifecycle.force_state_reset ~__context ~self ~value:`Halted) vms; (* All VMs should have their scheduled_to_be_resident_on field cleared *) @@ -107,126 +107,126 @@ let release_locks ~__context = (Db.VM.get_all ~__context) let create_tools_sr __context name_label name_description sr_introduce maybe_create_pbd = - let create_magic_sr name_label name_description other_config = - (* Create a new SR and PBD record *) - (* N.b. dbsync_slave is called _before_ this, so we can't rely on the PBD creating code in there - to make the PBD for the shared tools SR *) - let sr = - sr_introduce - ~uuid:(Uuid.to_string (Uuid.make_uuid())) - ~name_label ~name_description - ~_type:"iso" ~content_type:"iso" ~shared:true ~sm_config:[] - in - Db.SR.set_other_config ~__context ~self:sr ~value:other_config; - Db.SR.set_is_tools_sr ~__context ~self:sr ~value:true; - (* Master has created this shared SR, lets make PBDs for all of the slaves too. Nb. device-config is same for all hosts *) - let device_config = [ - "path", !Xapi_globs.tools_sr_dir; (* for ffs *) - "location", !Xapi_globs.tools_sr_dir; (* for legacy iso *) - "legacy_mode", "true" - ] in - let hosts = Db.Host.get_all ~__context in - List.iter (fun host -> ignore (maybe_create_pbd sr device_config host)) hosts; - sr - in - let other_config = [ - Xapi_globs.xensource_internal, "true"; - Xapi_globs.tools_sr_tag, "true"; - Xapi_globs.i18n_key, "xenserver-tools"; - (Xapi_globs.i18n_original_value_prefix ^ "name_label"), name_label; - (Xapi_globs.i18n_original_value_prefix ^ "name_description"), name_description - ] in - let sr = - let tools_srs = List.filter (fun self -> Db.SR.get_is_tools_sr ~__context ~self) (Db.SR.get_all ~__context) in - match tools_srs with - | sr :: others -> - (* Let there be only one Tools SR *) - List.iter (fun self -> Db.SR.destroy ~__context ~self) others; - sr - | [] -> - (* First check if there is an SR with the old tags on it, which needs upgrading (set is_tools_sr). *) - (* We cannot do this in xapi_db_upgrade, because that runs later. *) - let old_srs = - List.filter (fun self -> - let other_config = Db.SR.get_other_config ~__context ~self in - (List.mem_assoc Xapi_globs.tools_sr_tag other_config) || - (List.mem_assoc Xapi_globs.xensource_internal other_config) - ) (Db.SR.get_all ~__context) - in - match old_srs with - | sr :: _ -> - Db.SR.set_is_tools_sr ~__context ~self:sr ~value:true; sr - | [] -> - create_magic_sr name_label name_description other_config - in - (* Ensure fields are up-to-date *) - Db.SR.set_name_label ~__context ~self:sr ~value:name_label; - Db.SR.set_name_description ~__context ~self:sr ~value:name_description; - let other_config = - (* Keep any existing keys/value pair besides the required ones *) - let oc = Db.SR.get_other_config ~__context ~self:sr in - let keys = List.map fst other_config in - let extra = List.filter (fun (k, _) -> not (List.mem k keys)) oc in - extra @ other_config - in - Db.SR.set_other_config ~__context ~self:sr ~value:other_config + let create_magic_sr name_label name_description other_config = + (* Create a new SR and PBD record *) + (* N.b. dbsync_slave is called _before_ this, so we can't rely on the PBD creating code in there + to make the PBD for the shared tools SR *) + let sr = + sr_introduce + ~uuid:(Uuid.to_string (Uuid.make_uuid())) + ~name_label ~name_description + ~_type:"iso" ~content_type:"iso" ~shared:true ~sm_config:[] + in + Db.SR.set_other_config ~__context ~self:sr ~value:other_config; + Db.SR.set_is_tools_sr ~__context ~self:sr ~value:true; + (* Master has created this shared SR, lets make PBDs for all of the slaves too. Nb. device-config is same for all hosts *) + let device_config = [ + "path", !Xapi_globs.tools_sr_dir; (* for ffs *) + "location", !Xapi_globs.tools_sr_dir; (* for legacy iso *) + "legacy_mode", "true" + ] in + let hosts = Db.Host.get_all ~__context in + List.iter (fun host -> ignore (maybe_create_pbd sr device_config host)) hosts; + sr + in + let other_config = [ + Xapi_globs.xensource_internal, "true"; + Xapi_globs.tools_sr_tag, "true"; + Xapi_globs.i18n_key, "xenserver-tools"; + (Xapi_globs.i18n_original_value_prefix ^ "name_label"), name_label; + (Xapi_globs.i18n_original_value_prefix ^ "name_description"), name_description + ] in + let sr = + let tools_srs = List.filter (fun self -> Db.SR.get_is_tools_sr ~__context ~self) (Db.SR.get_all ~__context) in + match tools_srs with + | sr :: others -> + (* Let there be only one Tools SR *) + List.iter (fun self -> Db.SR.destroy ~__context ~self) others; + sr + | [] -> + (* First check if there is an SR with the old tags on it, which needs upgrading (set is_tools_sr). *) + (* We cannot do this in xapi_db_upgrade, because that runs later. *) + let old_srs = + List.filter (fun self -> + let other_config = Db.SR.get_other_config ~__context ~self in + (List.mem_assoc Xapi_globs.tools_sr_tag other_config) || + (List.mem_assoc Xapi_globs.xensource_internal other_config) + ) (Db.SR.get_all ~__context) + in + match old_srs with + | sr :: _ -> + Db.SR.set_is_tools_sr ~__context ~self:sr ~value:true; sr + | [] -> + create_magic_sr name_label name_description other_config + in + (* Ensure fields are up-to-date *) + Db.SR.set_name_label ~__context ~self:sr ~value:name_label; + Db.SR.set_name_description ~__context ~self:sr ~value:name_description; + let other_config = + (* Keep any existing keys/value pair besides the required ones *) + let oc = Db.SR.get_other_config ~__context ~self:sr in + let keys = List.map fst other_config in + let extra = List.filter (fun (k, _) -> not (List.mem k keys)) oc in + extra @ other_config + in + Db.SR.set_other_config ~__context ~self:sr ~value:other_config let create_tools_sr_noexn __context = - let name_label = Xapi_globs.tools_sr_name () in - let name_description = Xapi_globs.tools_sr_description () in - Helpers.call_api_functions ~__context (fun rpc session_id -> - let sr_introduce = Client.SR.introduce ~rpc ~session_id in - let maybe_create_pbd = Create_storage.maybe_create_pbd rpc session_id in - Helpers.log_exn_continue "creating tools SR" (fun () -> - create_tools_sr __context name_label name_description sr_introduce maybe_create_pbd - ) () - ) + let name_label = Xapi_globs.tools_sr_name () in + let name_description = Xapi_globs.tools_sr_description () in + Helpers.call_api_functions ~__context (fun rpc session_id -> + let sr_introduce = Client.SR.introduce ~rpc ~session_id in + let maybe_create_pbd = Create_storage.maybe_create_pbd rpc session_id in + Helpers.log_exn_continue "creating tools SR" (fun () -> + create_tools_sr __context name_label name_description sr_introduce maybe_create_pbd + ) () + ) let ensure_vm_metrics_records_exist __context = - List.iter (fun vm -> - let m = Db.VM.get_metrics ~__context ~self:vm in - if not(Db.is_valid_ref __context m) then begin - info "Regenerating missing VM_metrics record for VM %s" (Ref.string_of vm); - let m = Ref.make () in - let uuid = Uuid.to_string (Uuid.make_uuid ()) in - Db.VM_metrics.create - ~__context ~ref:m - ~uuid - ~vCPUs_number:0L - ~vCPUs_utilisation:[] - ~memory_actual:0L - ~vCPUs_CPU:[] - ~vCPUs_params:[] - ~vCPUs_flags:[] - ~start_time:Stdext.Date.never - ~install_time:Stdext.Date.never - ~state: [] - ~last_updated:(Stdext.Date.of_float 0.) - ~other_config:[] - ~hvm:false - ~nested_virt:false - ~nomigrate:false - ; - Db.VM.set_metrics ~__context ~self:vm ~value:m - end) (Db.VM.get_all __context) + List.iter (fun vm -> + let m = Db.VM.get_metrics ~__context ~self:vm in + if not(Db.is_valid_ref __context m) then begin + info "Regenerating missing VM_metrics record for VM %s" (Ref.string_of vm); + let m = Ref.make () in + let uuid = Uuid.to_string (Uuid.make_uuid ()) in + Db.VM_metrics.create + ~__context ~ref:m + ~uuid + ~vCPUs_number:0L + ~vCPUs_utilisation:[] + ~memory_actual:0L + ~vCPUs_CPU:[] + ~vCPUs_params:[] + ~vCPUs_flags:[] + ~start_time:Stdext.Date.never + ~install_time:Stdext.Date.never + ~state: [] + ~last_updated:(Stdext.Date.of_float 0.) + ~other_config:[] + ~hvm:false + ~nested_virt:false + ~nomigrate:false + ; + Db.VM.set_metrics ~__context ~self:vm ~value:m + end) (Db.VM.get_all __context) let ensure_vm_metrics_records_exist_noexn __context = Helpers.log_exn_continue "ensuring VM_metrics flags exist" ensure_vm_metrics_records_exist __context let destroy_invalid_pool_patches ~__context = - let is_valid_pool_patch patch = - (* If patch has been applied to at least one host, then it is valid. *) - if (Db.Pool_patch.get_host_patches ~__context ~self:patch) <> [] then true - (* If patch hasn't been applied to any host, but we can still apply it, then it is valid. *) - (* File needs to exist in the master's filesystem for us to be able to apply it. *) - else if (Sys.file_exists (Db.Pool_patch.get_filename ~__context ~self:patch)) then true - else false - in - let pool_patches = Db.Pool_patch.get_all ~__context in - List.iter - (fun patch -> - if not (is_valid_pool_patch patch) - then Db.Pool_patch.destroy ~__context ~self:patch) - pool_patches + let is_valid_pool_patch patch = + (* If patch has been applied to at least one host, then it is valid. *) + if (Db.Pool_patch.get_host_patches ~__context ~self:patch) <> [] then true + (* If patch hasn't been applied to any host, but we can still apply it, then it is valid. *) + (* File needs to exist in the master's filesystem for us to be able to apply it. *) + else if (Sys.file_exists (Db.Pool_patch.get_filename ~__context ~self:patch)) then true + else false + in + let pool_patches = Db.Pool_patch.get_all ~__context in + List.iter + (fun patch -> + if not (is_valid_pool_patch patch) + then Db.Pool_patch.destroy ~__context ~self:patch) + pool_patches (* Update the database to reflect current state. Called for both start of day and after an agent restart. *) @@ -255,5 +255,5 @@ let update_env __context = create_tools_sr_noexn __context; - ensure_vm_metrics_records_exist_noexn __context; - destroy_invalid_pool_patches ~__context + ensure_vm_metrics_records_exist_noexn __context; + destroy_invalid_pool_patches ~__context diff --git a/ocaml/xapi/dbsync_slave.ml b/ocaml/xapi/dbsync_slave.ml index 6fe0ec15d8c..40bea5c3ea7 100644 --- a/ocaml/xapi/dbsync_slave.ml +++ b/ocaml/xapi/dbsync_slave.ml @@ -13,7 +13,7 @@ *) (** Code to bring the database up-to-date when a host starts up. * @group Main Loop and Start-up - *) +*) module Rrdd = Rrd_client.Client @@ -38,8 +38,8 @@ let ( // ) = Int64.div let get_my_ip_addr ~__context = match (Helpers.get_management_ip_addr ~__context) with - Some ip -> ip - | None -> (error "Cannot read IP address. Check the control interface has an IP address"; "") + Some ip -> ip + | None -> (error "Cannot read IP address. Check the control interface has an IP address"; "") let create_localhost ~__context info = @@ -47,14 +47,14 @@ let create_localhost ~__context info = let me = try Some (Db.Host.get_by_uuid ~__context ~uuid:info.uuid) with _ -> None in (* me = None on firstboot only *) if me = None - then - let (_: API.ref_host) = - Xapi_host.create ~__context ~uuid:info.uuid ~name_label:info.hostname ~name_description:"" - ~hostname:info.hostname ~address:ip - ~external_auth_type:"" ~external_auth_service_name:"" ~external_auth_configuration:[] - ~license_params:[] ~edition:"" ~license_server:["address", "localhost"; "port", "27000"] - ~local_cache_sr:Ref.null ~chipset_info:[] ~ssl_legacy:info.ssl_legacy - in () + then + let (_: API.ref_host) = + Xapi_host.create ~__context ~uuid:info.uuid ~name_label:info.hostname ~name_description:"" + ~hostname:info.hostname ~address:ip + ~external_auth_type:"" ~external_auth_service_name:"" ~external_auth_configuration:[] + ~license_params:[] ~edition:"" ~license_server:["address", "localhost"; "port", "27000"] + ~local_cache_sr:Ref.null ~chipset_info:[] ~ssl_legacy:info.ssl_legacy + in () (* TODO cat /proc/stat for btime ? *) let get_start_time () = @@ -67,12 +67,12 @@ let get_start_time () = let uptime = List.hd uptime in let uptime = float_of_string uptime in let boot_time = Date.of_float (now -. uptime) in - debug " system booted at %s" (Date.to_string boot_time); - boot_time + debug " system booted at %s" (Date.to_string boot_time); + boot_time with - e -> - debug "Calculating boot time failed with '%s'" (ExnHelper.string_of_exn e); - Date.never + e -> + debug "Calculating boot time failed with '%s'" (ExnHelper.string_of_exn e); + Date.never (** Update the information in the Host structure *) (* not sufficient just to fill in this data on create time [Xen caps may change if VT enabled in BIOS etc.] *) @@ -82,104 +82,104 @@ let refresh_localhost_info ~__context info = (* Xapi_ha_flags.resync_host_armed_flag __context host; *) debug "Updating host software_version and patches_requiring_reboot"; - Create_misc.create_patches_requiring_reboot_info ~__context ~host; - Create_misc.create_software_version ~__context; - Db.Host.set_API_version_major ~__context ~self:host ~value:Xapi_globs.api_version_major; - Db.Host.set_API_version_minor ~__context ~self:host ~value:Xapi_globs.api_version_minor; - Db.Host.set_virtual_hardware_platform_versions ~__context ~self:host ~value:Xapi_globs.host_virtual_hardware_platform_versions; - Db.Host.set_hostname ~__context ~self:host ~value:info.hostname; - let caps = try + Create_misc.create_patches_requiring_reboot_info ~__context ~host; + Create_misc.create_software_version ~__context; + Db.Host.set_API_version_major ~__context ~self:host ~value:Xapi_globs.api_version_major; + Db.Host.set_API_version_minor ~__context ~self:host ~value:Xapi_globs.api_version_minor; + Db.Host.set_virtual_hardware_platform_versions ~__context ~self:host ~value:Xapi_globs.host_virtual_hardware_platform_versions; + Db.Host.set_hostname ~__context ~self:host ~value:info.hostname; + let caps = try String.split ' ' (Xenctrl.with_intf (fun xc -> Xenctrl.version_capabilities xc)) with _ -> warn "Unable to query hypervisor capabilities"; [] in - Db.Host.set_capabilities ~__context ~self:host ~value:caps; - Db.Host.set_address ~__context ~self:host ~value:(get_my_ip_addr ~__context); + Db.Host.set_capabilities ~__context ~self:host ~value:caps; + Db.Host.set_address ~__context ~self:host ~value:(get_my_ip_addr ~__context); - let boot_time_key = "boot_time" in - let boot_time_value = string_of_float (Date.to_float (get_start_time ())) in + let boot_time_key = "boot_time" in + let boot_time_value = string_of_float (Date.to_float (get_start_time ())) in - Db.Host.remove_from_other_config ~__context ~self:host ~key:boot_time_key; - Db.Host.add_to_other_config ~__context ~self:host ~key:boot_time_key ~value:boot_time_value; + Db.Host.remove_from_other_config ~__context ~self:host ~key:boot_time_key; + Db.Host.add_to_other_config ~__context ~self:host ~key:boot_time_key ~value:boot_time_value; - let agent_start_key = "agent_start_time" in - let agent_start_time = string_of_float (Unix.time ()) in + let agent_start_key = "agent_start_time" in + let agent_start_time = string_of_float (Unix.time ()) in - Db.Host.remove_from_other_config ~__context ~self:host ~key:agent_start_key; - Db.Host.add_to_other_config ~__context ~self:host ~key:agent_start_key ~value:agent_start_time; + Db.Host.remove_from_other_config ~__context ~self:host ~key:agent_start_key; + Db.Host.add_to_other_config ~__context ~self:host ~key:agent_start_key ~value:agent_start_time; - (* Register whether we have local storage or not *) + (* Register whether we have local storage or not *) - if not (Helpers.local_storage_exists ()) then begin - Db.Host.remove_from_other_config ~__context ~self:host ~key:Xapi_globs.host_no_local_storage; - Db.Host.add_to_other_config ~__context ~self:host ~key:Xapi_globs.host_no_local_storage ~value:"true" - end else - Db.Host.remove_from_other_config ~__context ~self:host ~key:Xapi_globs.host_no_local_storage + if not (Helpers.local_storage_exists ()) then begin + Db.Host.remove_from_other_config ~__context ~self:host ~key:Xapi_globs.host_no_local_storage; + Db.Host.add_to_other_config ~__context ~self:host ~key:Xapi_globs.host_no_local_storage ~value:"true" + end else + Db.Host.remove_from_other_config ~__context ~self:host ~key:Xapi_globs.host_no_local_storage (*************** update database tools ******************) (** Record host memory properties in database *) let record_host_memory_properties ~__context = - let self = !Xapi_globs.localhost_ref in - let total_memory_bytes = - try - let xc = Xenctrl.interface_open () in - Xenctrl.interface_close xc; (* we're on xen *) - let dbg = Context.string_of_task __context in - let open Xapi_xenops_queue in - let module Client = (val make_client (default_xenopsd ()): XENOPS) in - let mib = Client.HOST.get_total_memory_mib dbg in - Int64.mul 1024L (Int64.mul 1024L mib) - with _ -> - warn "Failed to detect xen, querying /proc/meminfo"; - begin match Balloon.get_memtotal () with - | None -> 0L - | Some x -> Int64.(div x (mul 1024L 1024L)) - end in - - let metrics = Db.Host.get_metrics ~__context ~self in - Db.Host_metrics.set_memory_total ~__context ~self:metrics ~value:total_memory_bytes; - let boot_memory_file = Xapi_globs.initial_host_free_memory_file in - let boot_memory_string = - try - Some (Unixext.string_of_file boot_memory_file) - with e -> - warn "Could not read host free memory file. This may prevent \ - VMs from being started on this host. (%s)" (Printexc.to_string e); - None in - maybe - (fun boot_memory_string -> - let boot_memory_bytes = Int64.of_string boot_memory_string in - (* Host memory overhead comes from multiple sources: *) - (* 1. obvious overhead: (e.g. Xen, crash kernel). *) - (* appears as used memory. *) - (* 2. non-obvious overhead: (e.g. low memory emergency pool) *) - (* appears as free memory but can't be used in practice. *) - let obvious_overhead_memory_bytes = - total_memory_bytes -- boot_memory_bytes in - let nonobvious_overhead_memory_kib = - try - Memory_client.Client.get_host_reserved_memory "dbsync" - with e -> - error "Failed to contact ballooning service: \ - host memory overhead may be too small (%s)" - (Printexc.to_string e); - 0L - in - let nonobvious_overhead_memory_bytes = - Int64.mul 1024L nonobvious_overhead_memory_kib in - Db.Host.set_boot_free_mem ~__context ~self - ~value:boot_memory_bytes; - Db.Host.set_memory_overhead ~__context ~self ~value: - (obvious_overhead_memory_bytes ++ nonobvious_overhead_memory_bytes); - ) - boot_memory_string + let self = !Xapi_globs.localhost_ref in + let total_memory_bytes = + try + let xc = Xenctrl.interface_open () in + Xenctrl.interface_close xc; (* we're on xen *) + let dbg = Context.string_of_task __context in + let open Xapi_xenops_queue in + let module Client = (val make_client (default_xenopsd ()): XENOPS) in + let mib = Client.HOST.get_total_memory_mib dbg in + Int64.mul 1024L (Int64.mul 1024L mib) + with _ -> + warn "Failed to detect xen, querying /proc/meminfo"; + begin match Balloon.get_memtotal () with + | None -> 0L + | Some x -> Int64.(div x (mul 1024L 1024L)) + end in + + let metrics = Db.Host.get_metrics ~__context ~self in + Db.Host_metrics.set_memory_total ~__context ~self:metrics ~value:total_memory_bytes; + let boot_memory_file = Xapi_globs.initial_host_free_memory_file in + let boot_memory_string = + try + Some (Unixext.string_of_file boot_memory_file) + with e -> + warn "Could not read host free memory file. This may prevent \ + VMs from being started on this host. (%s)" (Printexc.to_string e); + None in + maybe + (fun boot_memory_string -> + let boot_memory_bytes = Int64.of_string boot_memory_string in + (* Host memory overhead comes from multiple sources: *) + (* 1. obvious overhead: (e.g. Xen, crash kernel). *) + (* appears as used memory. *) + (* 2. non-obvious overhead: (e.g. low memory emergency pool) *) + (* appears as free memory but can't be used in practice. *) + let obvious_overhead_memory_bytes = + total_memory_bytes -- boot_memory_bytes in + let nonobvious_overhead_memory_kib = + try + Memory_client.Client.get_host_reserved_memory "dbsync" + with e -> + error "Failed to contact ballooning service: \ + host memory overhead may be too small (%s)" + (Printexc.to_string e); + 0L + in + let nonobvious_overhead_memory_bytes = + Int64.mul 1024L nonobvious_overhead_memory_kib in + Db.Host.set_boot_free_mem ~__context ~self + ~value:boot_memory_bytes; + Db.Host.set_memory_overhead ~__context ~self ~value: + (obvious_overhead_memory_bytes ++ nonobvious_overhead_memory_bytes); + ) + boot_memory_string (* -- used this for testing uniqueness constraints executed on slave do not kill connection. Committing commented out vsn of this because it might be useful again.. -let test_uniqueness_doesnt_kill_us ~__context = - let duplicate_uuid = Uuid.to_string (Uuid.make_uuid()) in + let test_uniqueness_doesnt_kill_us ~__context = + let duplicate_uuid = Uuid.to_string (Uuid.make_uuid()) in Db.Network.create ~__context ~ref:(Ref.make()) ~uuid:duplicate_uuid ~current_operations:[] ~allowed_operations:[] ~name_label:"Test uniqueness constraint" @@ -196,32 +196,32 @@ let test_uniqueness_doesnt_kill_us ~__context = (** Make sure the PIF we're using as a management interface is marked as attached otherwise we might blow it away by accident *) (* CA-23803: - * As well as marking the management interface as attached, mark any other important + * As well as marking the management interface as attached, mark any other important * interface (defined by what is brought up before xapi starts) as attached too. * For example, this will prevent needless glitches in storage interfaces. - *) +*) let resynchronise_pif_params ~__context = - let localhost = Helpers.get_localhost ~__context in + let localhost = Helpers.get_localhost ~__context in - (* Determine all bridges that are currently up, and ask the master to sync the currently_attached - * fields on all my PIFs *) - Helpers.call_api_functions ~__context (fun rpc session_id -> - let dbg = Context.string_of_task __context in - let bridges = Net.Bridge.get_all dbg () in - Client.Host.sync_pif_currently_attached rpc session_id localhost bridges - ); + (* Determine all bridges that are currently up, and ask the master to sync the currently_attached + * fields on all my PIFs *) + Helpers.call_api_functions ~__context (fun rpc session_id -> + let dbg = Context.string_of_task __context in + let bridges = Net.Bridge.get_all dbg () in + Client.Host.sync_pif_currently_attached rpc session_id localhost bridges + ); - (* sync management *) - Xapi_pif.update_management_flags ~__context ~host:localhost; + (* sync management *) + Xapi_pif.update_management_flags ~__context ~host:localhost; - (* sync MACs and MTUs *) - Xapi_pif.refresh_all ~__context ~host:localhost; + (* sync MACs and MTUs *) + Xapi_pif.refresh_all ~__context ~host:localhost; - (* Ensure that all DHCP PIFs have their IP address updated in the DB *) - Helpers.update_pif_addresses ~__context + (* Ensure that all DHCP PIFs have their IP address updated in the DB *) + Helpers.update_pif_addresses ~__context (** Update the database to reflect current state. Called for both start of day and after - an agent restart. *) + an agent restart. *) let update_env __context sync_keys = (* Helper function to allow us to switch off particular types of syncing *) @@ -229,13 +229,13 @@ let update_env __context sync_keys = let task_id = Context.get_task_id __context in Db.Task.remove_from_other_config ~__context ~self:task_id ~key:"sync_operation"; Db.Task.add_to_other_config ~__context ~self:task_id ~key:"sync_operation" ~value:key; - let skip_sync = + let skip_sync = try - List.assoc key sync_keys = Xapi_globs.sync_switch_off + List.assoc key sync_keys = Xapi_globs.sync_switch_off with _ -> false in let disabled_in_config_file = List.mem key !Xapi_globs.disable_dbsync_for in - begin + begin if (not skip_sync) && (not disabled_in_config_file) then (debug "Sync: %s" key; f ()) else debug "Skipping sync keyed: %s" key @@ -248,55 +248,55 @@ let update_env __context sync_keys = let info = Create_misc.read_localhost_info () in (* create localhost record if doesn't already exist *) - switched_sync Xapi_globs.sync_create_localhost (fun () -> - debug "creating localhost"; - create_localhost ~__context info; - ); + switched_sync Xapi_globs.sync_create_localhost (fun () -> + debug "creating localhost"; + create_localhost ~__context info; + ); (* record who we are in xapi_globs *) Xapi_globs.localhost_ref := Helpers.get_localhost ~__context; - switched_sync Xapi_globs.sync_set_cache_sr (fun () -> - try - let cache_sr = Db.Host.get_local_cache_sr ~__context ~self:(Helpers.get_localhost ~__context) in - let cache_sr_uuid = Db.SR.get_uuid ~__context ~self:cache_sr in - Db.SR.set_local_cache_enabled ~__context ~self:cache_sr ~value:true; - log_and_ignore_exn (fun () -> Rrdd.set_cache_sr ~sr_uuid:cache_sr_uuid) - with _ -> log_and_ignore_exn Rrdd.unset_cache_sr - ); - - switched_sync Xapi_globs.sync_load_rrd (fun () -> - (* Load the host rrd *) - Rrdd_proxy.Deprecated.load_rrd ~__context - ~uuid:(Helpers.get_localhost_uuid ()) - ); + switched_sync Xapi_globs.sync_set_cache_sr (fun () -> + try + let cache_sr = Db.Host.get_local_cache_sr ~__context ~self:(Helpers.get_localhost ~__context) in + let cache_sr_uuid = Db.SR.get_uuid ~__context ~self:cache_sr in + Db.SR.set_local_cache_enabled ~__context ~self:cache_sr ~value:true; + log_and_ignore_exn (fun () -> Rrdd.set_cache_sr ~sr_uuid:cache_sr_uuid) + with _ -> log_and_ignore_exn Rrdd.unset_cache_sr + ); + + switched_sync Xapi_globs.sync_load_rrd (fun () -> + (* Load the host rrd *) + Rrdd_proxy.Deprecated.load_rrd ~__context + ~uuid:(Helpers.get_localhost_uuid ()) + ); (* maybe record host memory properties in database *) switched_sync Xapi_globs.sync_record_host_memory_properties (fun () -> - record_host_memory_properties ~__context; - ); + record_host_memory_properties ~__context; + ); switched_sync Xapi_globs.sync_create_host_cpu (fun () -> - debug "creating cpu"; - Create_misc.create_host_cpu ~__context; - ); + debug "creating cpu"; + Create_misc.create_host_cpu ~__context; + ); let localhost = Helpers.get_localhost ~__context in switched_sync Xapi_globs.sync_create_domain_zero (fun () -> - debug "creating domain 0"; - Create_misc.ensure_domain_zero_records ~__context ~host:localhost info; - ); + debug "creating domain 0"; + Create_misc.ensure_domain_zero_records ~__context ~host:localhost info; + ); switched_sync Xapi_globs.sync_crashdump_resynchronise (fun () -> - debug "resynchronising host crashdumps"; - Xapi_host_crashdump.resynchronise ~__context ~host:localhost; - ); + debug "resynchronising host crashdumps"; + Xapi_host_crashdump.resynchronise ~__context ~host:localhost; + ); switched_sync Xapi_globs.sync_pbds (fun () -> - debug "resynchronising host PBDs"; - Storage_access.resynchronise_pbds ~__context ~pbds:(Db.Host.get_PBDs ~__context ~self:localhost); - ); + debug "resynchronising host PBDs"; + Storage_access.resynchronise_pbds ~__context ~pbds:(Db.Host.get_PBDs ~__context ~self:localhost); + ); (* debug "resynchronising db with host physical interfaces"; @@ -304,53 +304,53 @@ let update_env __context sync_keys = *) switched_sync Xapi_globs.sync_pif_params (fun () -> - debug "resynchronising PIF params"; - resynchronise_pif_params ~__context; - ); + debug "resynchronising PIF params"; + resynchronise_pif_params ~__context; + ); switched_sync Xapi_globs.sync_patch_update_db (fun () -> - debug "checking patch status"; - Xapi_pool_patch.update_db ~__context - ); - + debug "checking patch status"; + Xapi_pool_patch.update_db ~__context + ); + switched_sync Xapi_globs.sync_bios_strings (fun () -> - debug "get BIOS strings on startup"; - let current_bios_strings = Bios_strings.get_host_bios_strings ~__context in - let db_host_bios_strings = Db.Host.get_bios_strings ~__context ~self:localhost in - - if current_bios_strings <> db_host_bios_strings then - begin - debug "BIOS strings obtained from the host and that present in DB are different. Updating BIOS strings in xapi-db."; - Db.Host.set_bios_strings ~__context ~self:localhost ~value:current_bios_strings - end - ); + debug "get BIOS strings on startup"; + let current_bios_strings = Bios_strings.get_host_bios_strings ~__context in + let db_host_bios_strings = Db.Host.get_bios_strings ~__context ~self:localhost in + + if current_bios_strings <> db_host_bios_strings then + begin + debug "BIOS strings obtained from the host and that present in DB are different. Updating BIOS strings in xapi-db."; + Db.Host.set_bios_strings ~__context ~self:localhost ~value:current_bios_strings + end + ); (* CA-35549: In a pool rolling upgrade, the master will detect the end of upgrade when the software versions - of all the hosts are the same. It will then assume that (for example) per-host patch records have - been tidied up and attempt to delete orphaned pool-wide patch records. *) + of all the hosts are the same. It will then assume that (for example) per-host patch records have + been tidied up and attempt to delete orphaned pool-wide patch records. *) (* refresh host info fields *) - switched_sync Xapi_globs.sync_host_display (fun () -> - Xapi_host.sync_display ~__context ~host:localhost - ); + switched_sync Xapi_globs.sync_host_display (fun () -> + Xapi_host.sync_display ~__context ~host:localhost + ); - switched_sync Xapi_globs.sync_refresh_localhost_info (fun () -> - refresh_localhost_info ~__context info; - ); + switched_sync Xapi_globs.sync_refresh_localhost_info (fun () -> + refresh_localhost_info ~__context info; + ); switched_sync Xapi_globs.sync_local_vdi_activations (fun () -> - Storage_access.refresh_local_vdi_activations ~__context; - ); + Storage_access.refresh_local_vdi_activations ~__context; + ); switched_sync Xapi_globs.sync_chipset_info (fun () -> - Create_misc.create_chipset_info ~__context; - ); + Create_misc.create_chipset_info ~__context; + ); switched_sync Xapi_globs.sync_pci_devices (fun () -> - Xapi_pci.update_pcis ~__context ~host:localhost; - ); + Xapi_pci.update_pcis ~__context ~host:localhost; + ); switched_sync Xapi_globs.sync_gpus (fun () -> - Xapi_pgpu.update_gpus ~__context ~host:localhost; - ); + Xapi_pgpu.update_gpus ~__context ~host:localhost; + ); diff --git a/ocaml/xapi/debug_populate.ml b/ocaml/xapi/debug_populate.ml index 32a202046b8..6a83a2f4cfe 100644 --- a/ocaml/xapi/debug_populate.ml +++ b/ocaml/xapi/debug_populate.ml @@ -24,9 +24,9 @@ let rec make_srs __context i = begin let uuid = Uuid.to_string (Uuid.make_uuid()) in let sr_ref = Xapi_sr.introduce ~__context ~uuid:uuid ~name_label:("SR-"^(string_of_int i)) - ~name_description:"Dummy data" ~_type:"ext" ~content_type:"dummy" ~shared:true ~sm_config:[] in - srs := sr_ref :: !srs; - make_srs __context (i-1) + ~name_description:"Dummy data" ~_type:"ext" ~content_type:"dummy" ~shared:true ~sm_config:[] in + srs := sr_ref :: !srs; + make_srs __context (i-1) end let rec make_networks __context i = @@ -34,46 +34,46 @@ let rec make_networks __context i = else begin let nw_ref = - Xapi_network.create ~__context ~name_label:("Network-"^(string_of_int i)) ~name_description:"dummy" ~mTU:1500L ~other_config:[] ~tags:[] in - nws := nw_ref :: !nws; - make_networks __context (i-1) + Xapi_network.create ~__context ~name_label:("Network-"^(string_of_int i)) ~name_description:"dummy" ~mTU:1500L ~other_config:[] ~tags:[] in + nws := nw_ref :: !nws; + make_networks __context (i-1) end let get_random lr = let l = List.length !lr in let n = Random.int l in - List.nth !lr n + List.nth !lr n let rec make_vdis_and_vbds __context vmref i = - if i=0 then () - else - begin - let uuid = Uuid.to_string (Uuid.make_uuid()) in - let vm_uuid = Db.VM.get_uuid ~self:vmref ~__context in - let name_label = "VDI-"^(string_of_int i)^"-for-VM-"^vm_uuid in - let name_description = "dummy" in - let sR = get_random srs in - let _type = `user in - let read_only = false in - let other_config = [] in - let location = vm_uuid ^ (string_of_int i) in - let xenstore_data = [] in - let sm_config = [] in - let managed = true in - let virtual_size = 1L in - let physical_utilisation = 1L in - let metadata_of_pool = Ref.null in - let is_a_snapshot = false in - let snapshot_time = Stdext.Date.never in - let snapshot_of = Ref.null in - let sharable = false in - let vdi = Xapi_vdi.pool_introduce -~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location ~xenstore_data ~sm_config ~managed ~virtual_size ~physical_utilisation ~metadata_of_pool ~is_a_snapshot ~snapshot_time ~snapshot_of in + if i=0 then () + else + begin + let uuid = Uuid.to_string (Uuid.make_uuid()) in + let vm_uuid = Db.VM.get_uuid ~self:vmref ~__context in + let name_label = "VDI-"^(string_of_int i)^"-for-VM-"^vm_uuid in + let name_description = "dummy" in + let sR = get_random srs in + let _type = `user in + let read_only = false in + let other_config = [] in + let location = vm_uuid ^ (string_of_int i) in + let xenstore_data = [] in + let sm_config = [] in + let managed = true in + let virtual_size = 1L in + let physical_utilisation = 1L in + let metadata_of_pool = Ref.null in + let is_a_snapshot = false in + let snapshot_time = Stdext.Date.never in + let snapshot_of = Ref.null in + let sharable = false in + let vdi = Xapi_vdi.pool_introduce + ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location ~xenstore_data ~sm_config ~managed ~virtual_size ~physical_utilisation ~metadata_of_pool ~is_a_snapshot ~snapshot_time ~snapshot_of in let _ = - Xapi_vbd.create ~__context ~vM:vmref ~vDI:vdi ~userdevice:(string_of_int i) ~bootable:true ~mode:`RW ~_type:`Disk ~empty:false + Xapi_vbd.create ~__context ~vM:vmref ~vDI:vdi ~userdevice:(string_of_int i) ~bootable:true ~mode:`RW ~_type:`Disk ~empty:false ~qos_algorithm_type:"" ~qos_algorithm_params:[] in - make_vdis_and_vbds __context vmref (i-1) + make_vdis_and_vbds __context vmref (i-1) end let rec make_vifs __context vmref i = @@ -81,7 +81,7 @@ let rec make_vifs __context vmref i = else begin ignore(Xapi_vif.create ~__context ~device:(string_of_int i) ~network:(get_random nws) ~vM:vmref - ~mAC:"de:ad:be:ef:99:88" ~mTU:Int64.zero ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[] ~locking_mode:`network_default ~ipv4_allowed:[] ~ipv6_allowed:[]); + ~mAC:"de:ad:be:ef:99:88" ~mTU:Int64.zero ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[] ~locking_mode:`network_default ~ipv4_allowed:[] ~ipv6_allowed:[]); make_vifs __context vmref (i-1) end @@ -91,10 +91,10 @@ let rec make_vms __context template i vdis_per_vm = else begin let vmref = Xapi_vm.clone ~__context ~vm:template ~new_name:("VM-"^(string_of_int i)) in - Db.VM.set_is_a_template ~__context ~self:vmref ~value:false; - make_vdis_and_vbds __context vmref vdis_per_vm; - make_vifs __context vmref 2; - make_vms __context template (i-1) vdis_per_vm + Db.VM.set_is_a_template ~__context ~self:vmref ~value:false; + make_vdis_and_vbds __context vmref vdis_per_vm; + make_vifs __context vmref 2; + make_vms __context template (i-1) vdis_per_vm end let make_tasks __context tasks = @@ -103,7 +103,7 @@ let make_tasks __context tasks = let len = List.length l in let i = Random.int len in try List.nth l i with exn -> List.hd l - in + in let all_vms = Db.VM.get_all ~__context in let all_vbds = Db.VBD.get_all ~__context in let all_vdis = Db.VDI.get_all ~__context in @@ -113,34 +113,34 @@ let make_tasks __context tasks = do let mode = Random.int 6 in let label = match mode with - | 0 -> "VM" - | 1 -> "VBD" - | 2 -> "VDI" - | 3 -> "VIF" - | 4 -> "SR" - | _ -> "other" in + | 0 -> "VM" + | 1 -> "VBD" + | 2 -> "VDI" + | 3 -> "VIF" + | 4 -> "SR" + | _ -> "other" in let task = Xapi_task.create ~__context ~label ~description:(create_description label) in let taskid = Ref.string_of task in try - match mode with - | 0 -> - let self = pick_random all_vms in - Db.VM.add_to_current_operations ~__context ~self ~key:taskid ~value:`import - | 1 -> - let self = pick_random all_vbds in - Db.VBD.add_to_current_operations ~__context ~self ~key:taskid ~value:`unplug - | 2 -> - let self = pick_random all_vdis in - Db.VDI.add_to_current_operations ~__context ~self ~key:taskid ~value:`clone - | 3 -> - let self = pick_random all_vifs in - Db.VIF.add_to_current_operations ~__context ~self ~key:taskid ~value:`plug - | 4 -> - let self = pick_random all_srs in - Db.SR.add_to_current_operations ~__context ~self ~key:taskid ~value:`scan - | _ -> - () + match mode with + | 0 -> + let self = pick_random all_vms in + Db.VM.add_to_current_operations ~__context ~self ~key:taskid ~value:`import + | 1 -> + let self = pick_random all_vbds in + Db.VBD.add_to_current_operations ~__context ~self ~key:taskid ~value:`unplug + | 2 -> + let self = pick_random all_vdis in + Db.VDI.add_to_current_operations ~__context ~self ~key:taskid ~value:`clone + | 3 -> + let self = pick_random all_vifs in + Db.VIF.add_to_current_operations ~__context ~self ~key:taskid ~value:`plug + | 4 -> + let self = pick_random all_srs in + Db.SR.add_to_current_operations ~__context ~self ~key:taskid ~value:`scan + | _ -> + () with _ -> () done diff --git a/ocaml/xapi/export.ml b/ocaml/xapi/export.ml index 2cce0fba419..3c514dfc7d5 100644 --- a/ocaml/xapi/export.ml +++ b/ocaml/xapi/export.ml @@ -13,27 +13,27 @@ *) (** Code to output a subset of database records, marshalled in XMLRPC format * @group Import and Export - *) +*) (** The general plan: - + Walk around the database and select the objects you want (see 'create_table') + + Walk around the database and select the objects you want (see 'create_table') and make a table mapping internal ref -> fresh external references. It would - be nice to generate a visitor thingimy for this. - + Select all the objects from each class, filter the subset you want (ie those whose - reference exists as a key in the table) and convert them into instances of the + be nice to generate a visitor thingimy for this. + + Select all the objects from each class, filter the subset you want (ie those whose + reference exists as a key in the table) and convert them into instances of the intermediate record 'type obj' via the functions make_{vm,sr,vbd,vif,network}. - The created 'obj record' includes the class name as a string (from the datamodel), - the fresh reference and the output of 'get_record' marshalled using the standard - XMLRPC functions with all the references converted either to the fresh external refs - or NULL (so we aim not to export dangling pointers) - + Write out one big XML file containing an XMLRPC struct which has keys: - version -> a structure of system version info (API versions, internal build numbers) - state -> an XMLRPC array of XMLRPC serialised 'obj' records (see 'xmlrpc_of_obj') - *) + The created 'obj record' includes the class name as a string (from the datamodel), + the fresh reference and the output of 'get_record' marshalled using the standard + XMLRPC functions with all the references converted either to the fresh external refs + or NULL (so we aim not to export dangling pointers) + + Write out one big XML file containing an XMLRPC struct which has keys: + version -> a structure of system version info (API versions, internal build numbers) + state -> an XMLRPC array of XMLRPC serialised 'obj' records (see 'xmlrpc_of_obj') + *) (** The specific plan for VM export: - Walk over the datamodel and mark VIFs, Networks connected to the VIFs, VBDs, VDIs connected - to the VBDs, SRs connected to the VDIs (and maybe a suspend image?). *) + Walk over the datamodel and mark VIFs, Networks connected to the VIFs, VBDs, VDIs connected + to the VBDs, SRs connected to the VDIs (and maybe a suspend image?). *) open Importexport open Stdext @@ -43,7 +43,7 @@ module D=Debug.Make(struct let name="export" end) open D -let make_id = +let make_id = let counter = ref 0 in fun () -> let this = !counter in @@ -51,92 +51,92 @@ let make_id = "Ref:" ^ (string_of_int this) let rec update_table ~__context ~include_snapshots ~preserve_power_state ~include_vhd_parents ~table vm = - let add r = - if not (Hashtbl.mem table (Ref.string_of r)) then - Hashtbl.add table (Ref.string_of r)(make_id ()) in - - let rec add_vdi v = - add v; - let r = Db.VDI.get_record ~__context ~self:v in - add r.API.vDI_SR; - if include_vhd_parents then begin - let sm_config = r.API.vDI_sm_config in - if List.mem_assoc Xapi_globs.vhd_parent sm_config then begin - let parent_uuid = List.assoc Xapi_globs.vhd_parent sm_config in - try - let parent_ref = Db.VDI.get_by_uuid ~__context ~uuid:parent_uuid in - (* Only recurse if we haven't already seen this VDI *) - if not (Hashtbl.mem table (Ref.string_of parent_ref)) - then add_vdi parent_ref - with _ -> - warn "VM.export_metadata: lookup of parent VDI %s failed" parent_uuid - end - end - in - - if Db.is_valid_ref __context vm && not (Hashtbl.mem table (Ref.string_of vm)) then begin - add vm; - let vm = Db.VM.get_record ~__context ~self:vm in - - List.iter - (fun vif -> - if Db.is_valid_ref __context vif then begin - add vif; - let vif = Db.VIF.get_record ~__context ~self:vif in - add vif.API.vIF_network - end) - vm.API.vM_VIFs; - - List.iter - (fun vbd -> - if Db.is_valid_ref __context vbd then begin - add vbd; - let vbd = Db.VBD.get_record ~__context ~self:vbd in - if not(vbd.API.vBD_empty) - then add_vdi vbd.API.vBD_VDI - end) - vm.API.vM_VBDs; - - List.iter - (fun vgpu -> - if Db.is_valid_ref __context vgpu then begin - add vgpu; - let vgpu = Db.VGPU.get_record ~__context ~self:vgpu in - add vgpu.API.vGPU_type; - add vgpu.API.vGPU_GPU_group - end) - vm.API.vM_VGPUs; - - (* If we need to include snapshots, update the table for VMs in the 'snapshots' field *) - if include_snapshots then - List.iter - (fun snap -> update_table ~__context ~include_snapshots:false ~preserve_power_state ~include_vhd_parents ~table snap) - vm.API.vM_snapshots; - - (* If VM is suspended then add the suspend_VDI *) - let vdi = vm.API.vM_suspend_VDI in - if preserve_power_state && vm.API.vM_power_state = `Suspended && Db.is_valid_ref __context vdi - then add_vdi vdi; - - (* Add also the guest metrics *) - add vm.API.vM_guest_metrics; - - (* Add the hosts links *) - add vm.API.vM_resident_on; - add vm.API.vM_affinity; - - (* Add the parent VM *) - if include_snapshots && Db.is_valid_ref __context vm.API.vM_parent - then update_table ~__context ~include_snapshots:false ~preserve_power_state ~include_vhd_parents ~table vm.API.vM_parent - end + let add r = + if not (Hashtbl.mem table (Ref.string_of r)) then + Hashtbl.add table (Ref.string_of r)(make_id ()) in + + let rec add_vdi v = + add v; + let r = Db.VDI.get_record ~__context ~self:v in + add r.API.vDI_SR; + if include_vhd_parents then begin + let sm_config = r.API.vDI_sm_config in + if List.mem_assoc Xapi_globs.vhd_parent sm_config then begin + let parent_uuid = List.assoc Xapi_globs.vhd_parent sm_config in + try + let parent_ref = Db.VDI.get_by_uuid ~__context ~uuid:parent_uuid in + (* Only recurse if we haven't already seen this VDI *) + if not (Hashtbl.mem table (Ref.string_of parent_ref)) + then add_vdi parent_ref + with _ -> + warn "VM.export_metadata: lookup of parent VDI %s failed" parent_uuid + end + end + in + + if Db.is_valid_ref __context vm && not (Hashtbl.mem table (Ref.string_of vm)) then begin + add vm; + let vm = Db.VM.get_record ~__context ~self:vm in + + List.iter + (fun vif -> + if Db.is_valid_ref __context vif then begin + add vif; + let vif = Db.VIF.get_record ~__context ~self:vif in + add vif.API.vIF_network + end) + vm.API.vM_VIFs; + + List.iter + (fun vbd -> + if Db.is_valid_ref __context vbd then begin + add vbd; + let vbd = Db.VBD.get_record ~__context ~self:vbd in + if not(vbd.API.vBD_empty) + then add_vdi vbd.API.vBD_VDI + end) + vm.API.vM_VBDs; + + List.iter + (fun vgpu -> + if Db.is_valid_ref __context vgpu then begin + add vgpu; + let vgpu = Db.VGPU.get_record ~__context ~self:vgpu in + add vgpu.API.vGPU_type; + add vgpu.API.vGPU_GPU_group + end) + vm.API.vM_VGPUs; + + (* If we need to include snapshots, update the table for VMs in the 'snapshots' field *) + if include_snapshots then + List.iter + (fun snap -> update_table ~__context ~include_snapshots:false ~preserve_power_state ~include_vhd_parents ~table snap) + vm.API.vM_snapshots; + + (* If VM is suspended then add the suspend_VDI *) + let vdi = vm.API.vM_suspend_VDI in + if preserve_power_state && vm.API.vM_power_state = `Suspended && Db.is_valid_ref __context vdi + then add_vdi vdi; + + (* Add also the guest metrics *) + add vm.API.vM_guest_metrics; + + (* Add the hosts links *) + add vm.API.vM_resident_on; + add vm.API.vM_affinity; + + (* Add the parent VM *) + if include_snapshots && Db.is_valid_ref __context vm.API.vM_parent + then update_table ~__context ~include_snapshots:false ~preserve_power_state ~include_vhd_parents ~table vm.API.vM_parent + end (** Walk the graph of objects and update the table of Ref -> ids for each object we wish to include in the output. Other object references will be purged. *) let create_table () = - Hashtbl.create 10 + Hashtbl.create 10 (** Convert an internal reference into an external one or NULL *) -let lookup table r = +let lookup table r = if not(Hashtbl.mem table r) then Ref.null else Ref.of_string (Hashtbl.find table r) (** Convert a list of internal references into external references, filtering out NULLs *) @@ -144,248 +144,248 @@ let filter table rs = List.filter (fun x -> x <> Ref.null) (List.map (lookup tab (** Convert a Host to an obj *) let make_host table __context self = - let host = Db.Host.get_record ~__context ~self in - let host = { host with - API.host_PIFs = []; - API.host_PBDs = []; - API.host_PGPUs = []; - API.host_PCIs = []; - API.host_host_CPUs = []; - API.host_license_params = []; - API.host_blobs = []; - API.host_external_auth_type = ""; - API.host_external_auth_service_name = ""; - API.host_external_auth_configuration = []; - API.host_metrics = Ref.null; - API.host_patches = []; - API.host_crashdumps = []; - API.host_logging = []; - API.host_supported_bootloaders = []; - API.host_cpu_configuration = []; - API.host_other_config = []; - API.host_capabilities = []; - API.host_software_version = []; - API.host_sched_policy = ""; - API.host_ha_statefiles = []; - API.host_ha_network_peers = []; - API.host_tags = []; - API.host_crash_dump_sr = lookup table (Ref.string_of host.API.host_crash_dump_sr); - API.host_suspend_image_sr = lookup table (Ref.string_of host.API.host_suspend_image_sr); - API.host_resident_VMs = List.filter ((<>) Ref.null) (List.map (fun vm -> lookup table (Ref.string_of vm)) host.API.host_resident_VMs) } in - { cls = Datamodel._host; - id = Ref.string_of (lookup table (Ref.string_of self)); - snapshot = API.Legacy.To.host_t host } + let host = Db.Host.get_record ~__context ~self in + let host = { host with + API.host_PIFs = []; + API.host_PBDs = []; + API.host_PGPUs = []; + API.host_PCIs = []; + API.host_host_CPUs = []; + API.host_license_params = []; + API.host_blobs = []; + API.host_external_auth_type = ""; + API.host_external_auth_service_name = ""; + API.host_external_auth_configuration = []; + API.host_metrics = Ref.null; + API.host_patches = []; + API.host_crashdumps = []; + API.host_logging = []; + API.host_supported_bootloaders = []; + API.host_cpu_configuration = []; + API.host_other_config = []; + API.host_capabilities = []; + API.host_software_version = []; + API.host_sched_policy = ""; + API.host_ha_statefiles = []; + API.host_ha_network_peers = []; + API.host_tags = []; + API.host_crash_dump_sr = lookup table (Ref.string_of host.API.host_crash_dump_sr); + API.host_suspend_image_sr = lookup table (Ref.string_of host.API.host_suspend_image_sr); + API.host_resident_VMs = List.filter ((<>) Ref.null) (List.map (fun vm -> lookup table (Ref.string_of vm)) host.API.host_resident_VMs) } in + { cls = Datamodel._host; + id = Ref.string_of (lookup table (Ref.string_of self)); + snapshot = API.Legacy.To.host_t host } (** Convert a VM reference to an obj *) -let make_vm ?(with_snapshot_metadata=false) ~preserve_power_state table __context self = +let make_vm ?(with_snapshot_metadata=false) ~preserve_power_state table __context self = let vm = Db.VM.get_record ~__context ~self in let vm = { vm with - API.vM_power_state = if preserve_power_state then vm.API.vM_power_state else `Halted; - API.vM_suspend_VDI = if preserve_power_state then lookup table (Ref.string_of vm.API.vM_suspend_VDI) else Ref.null; - API.vM_is_a_snapshot = if with_snapshot_metadata then vm.API.vM_is_a_snapshot else false; - API.vM_snapshot_of = - if with_snapshot_metadata - then lookup table (Ref.string_of vm.API.vM_snapshot_of) - else Ref.null; - API.vM_snapshots = if with_snapshot_metadata then vm.API.vM_snapshots else []; - API.vM_snapshot_time = if with_snapshot_metadata then vm.API.vM_snapshot_time else Date.never; - API.vM_transportable_snapshot_id = if with_snapshot_metadata then vm.API.vM_transportable_snapshot_id else ""; - API.vM_parent = - if with_snapshot_metadata - then lookup table (Ref.string_of vm.API.vM_parent) - else Ref.null; - API.vM_current_operations = []; - API.vM_allowed_operations = []; - API.vM_VIFs = filter table (List.map Ref.string_of vm.API.vM_VIFs); - API.vM_VBDs = filter table (List.map Ref.string_of vm.API.vM_VBDs); - API.vM_VGPUs = filter table (List.map Ref.string_of vm.API.vM_VGPUs); - API.vM_crash_dumps = []; - API.vM_VTPMs = []; - API.vM_resident_on = lookup table (Ref.string_of vm.API.vM_resident_on); - API.vM_affinity = lookup table (Ref.string_of vm.API.vM_affinity); - API.vM_consoles = []; - API.vM_metrics = Ref.null; - API.vM_guest_metrics = lookup table (Ref.string_of vm.API.vM_guest_metrics); - API.vM_protection_policy = Ref.null; - API.vM_bios_strings = vm.API.vM_bios_strings; - API.vM_blobs = [];} in - { cls = Datamodel._vm; - id = Ref.string_of (lookup table (Ref.string_of self)); + API.vM_power_state = if preserve_power_state then vm.API.vM_power_state else `Halted; + API.vM_suspend_VDI = if preserve_power_state then lookup table (Ref.string_of vm.API.vM_suspend_VDI) else Ref.null; + API.vM_is_a_snapshot = if with_snapshot_metadata then vm.API.vM_is_a_snapshot else false; + API.vM_snapshot_of = + if with_snapshot_metadata + then lookup table (Ref.string_of vm.API.vM_snapshot_of) + else Ref.null; + API.vM_snapshots = if with_snapshot_metadata then vm.API.vM_snapshots else []; + API.vM_snapshot_time = if with_snapshot_metadata then vm.API.vM_snapshot_time else Date.never; + API.vM_transportable_snapshot_id = if with_snapshot_metadata then vm.API.vM_transportable_snapshot_id else ""; + API.vM_parent = + if with_snapshot_metadata + then lookup table (Ref.string_of vm.API.vM_parent) + else Ref.null; + API.vM_current_operations = []; + API.vM_allowed_operations = []; + API.vM_VIFs = filter table (List.map Ref.string_of vm.API.vM_VIFs); + API.vM_VBDs = filter table (List.map Ref.string_of vm.API.vM_VBDs); + API.vM_VGPUs = filter table (List.map Ref.string_of vm.API.vM_VGPUs); + API.vM_crash_dumps = []; + API.vM_VTPMs = []; + API.vM_resident_on = lookup table (Ref.string_of vm.API.vM_resident_on); + API.vM_affinity = lookup table (Ref.string_of vm.API.vM_affinity); + API.vM_consoles = []; + API.vM_metrics = Ref.null; + API.vM_guest_metrics = lookup table (Ref.string_of vm.API.vM_guest_metrics); + API.vM_protection_policy = Ref.null; + API.vM_bios_strings = vm.API.vM_bios_strings; + API.vM_blobs = [];} in + { cls = Datamodel._vm; + id = Ref.string_of (lookup table (Ref.string_of self)); snapshot = API.Legacy.To.vM_t vm } (** Convert a guest-metrics reference to an obj *) let make_gm table __context self = - let gm = Db.VM_guest_metrics.get_record ~__context ~self in - { cls = Datamodel._vm_guest_metrics; - id = Ref.string_of (lookup table (Ref.string_of self)); - snapshot = API.Legacy.To.vM_guest_metrics_t gm } + let gm = Db.VM_guest_metrics.get_record ~__context ~self in + { cls = Datamodel._vm_guest_metrics; + id = Ref.string_of (lookup table (Ref.string_of self)); + snapshot = API.Legacy.To.vM_guest_metrics_t gm } (** Convert a VIF reference to an obj *) -let make_vif table ~preserve_power_state __context self = +let make_vif table ~preserve_power_state __context self = let vif = Db.VIF.get_record ~__context ~self in - let vif = { vif with - API.vIF_currently_attached = if preserve_power_state then vif.API.vIF_currently_attached else false; - API.vIF_network = lookup table (Ref.string_of vif.API.vIF_network); - API.vIF_VM = lookup table (Ref.string_of vif.API.vIF_VM); - API.vIF_metrics = Ref.null; - API.vIF_current_operations = []; - API.vIF_allowed_operations = []; - } in - { cls = Datamodel._vif; - id = Ref.string_of (lookup table (Ref.string_of self)); + let vif = { vif with + API.vIF_currently_attached = if preserve_power_state then vif.API.vIF_currently_attached else false; + API.vIF_network = lookup table (Ref.string_of vif.API.vIF_network); + API.vIF_VM = lookup table (Ref.string_of vif.API.vIF_VM); + API.vIF_metrics = Ref.null; + API.vIF_current_operations = []; + API.vIF_allowed_operations = []; + } in + { cls = Datamodel._vif; + id = Ref.string_of (lookup table (Ref.string_of self)); snapshot = API.Legacy.To.vIF_t vif } (** Convert a Network reference to an obj *) -let make_network table __context self = +let make_network table __context self = let net = Db.Network.get_record ~__context ~self in - let net = { net with - API.network_VIFs = filter table (List.map Ref.string_of net.API.network_VIFs); - API.network_PIFs = []; - API.network_current_operations = []; - API.network_allowed_operations = []; - } in - { cls = Datamodel._network; - id = Ref.string_of (lookup table (Ref.string_of self)); + let net = { net with + API.network_VIFs = filter table (List.map Ref.string_of net.API.network_VIFs); + API.network_PIFs = []; + API.network_current_operations = []; + API.network_allowed_operations = []; + } in + { cls = Datamodel._network; + id = Ref.string_of (lookup table (Ref.string_of self)); snapshot = API.Legacy.To.network_t net } (** Convert a VBD reference to an obj *) -let make_vbd table ~preserve_power_state __context self = +let make_vbd table ~preserve_power_state __context self = let vbd = Db.VBD.get_record ~__context ~self in - let vbd = { vbd with - API.vBD_currently_attached = if preserve_power_state then vbd.API.vBD_currently_attached else false; - API.vBD_VDI = lookup table (Ref.string_of vbd.API.vBD_VDI); - API.vBD_VM = lookup table (Ref.string_of vbd.API.vBD_VM); - API.vBD_metrics = Ref.null; - API.vBD_current_operations = []; - API.vBD_allowed_operations = []; - } in - { cls = Datamodel._vbd; - id = Ref.string_of (lookup table (Ref.string_of self)); - snapshot = API.Legacy.To.vBD_t vbd } + let vbd = { vbd with + API.vBD_currently_attached = if preserve_power_state then vbd.API.vBD_currently_attached else false; + API.vBD_VDI = lookup table (Ref.string_of vbd.API.vBD_VDI); + API.vBD_VM = lookup table (Ref.string_of vbd.API.vBD_VM); + API.vBD_metrics = Ref.null; + API.vBD_current_operations = []; + API.vBD_allowed_operations = []; + } in + { cls = Datamodel._vbd; + id = Ref.string_of (lookup table (Ref.string_of self)); + snapshot = API.Legacy.To.vBD_t vbd } (** Convert a VDI reference to an obj *) -let make_vdi table __context self = +let make_vdi table __context self = let vdi = Db.VDI.get_record ~__context ~self in - let vdi = { vdi with - API.vDI_VBDs = filter table (List.map Ref.string_of vdi.API.vDI_VBDs); - API.vDI_crash_dumps = []; - API.vDI_SR = lookup table (Ref.string_of vdi.API.vDI_SR); - API.vDI_current_operations = []; - API.vDI_allowed_operations = []; - } in - { cls = Datamodel._vdi; - id = Ref.string_of (lookup table (Ref.string_of self)); - snapshot = API.Legacy.To.vDI_t vdi } + let vdi = { vdi with + API.vDI_VBDs = filter table (List.map Ref.string_of vdi.API.vDI_VBDs); + API.vDI_crash_dumps = []; + API.vDI_SR = lookup table (Ref.string_of vdi.API.vDI_SR); + API.vDI_current_operations = []; + API.vDI_allowed_operations = []; + } in + { cls = Datamodel._vdi; + id = Ref.string_of (lookup table (Ref.string_of self)); + snapshot = API.Legacy.To.vDI_t vdi } (** Convert a SR reference to an obj *) -let make_sr table __context self = +let make_sr table __context self = let sr = Db.SR.get_record ~__context ~self in - let sr = { sr with - API.sR_VDIs = filter table (List.map Ref.string_of sr.API.sR_VDIs); - API.sR_PBDs = []; - API.sR_current_operations = []; - API.sR_allowed_operations = []; - } in - { cls = Datamodel._sr; - id = Ref.string_of (lookup table (Ref.string_of self)); + let sr = { sr with + API.sR_VDIs = filter table (List.map Ref.string_of sr.API.sR_VDIs); + API.sR_PBDs = []; + API.sR_current_operations = []; + API.sR_allowed_operations = []; + } in + { cls = Datamodel._sr; + id = Ref.string_of (lookup table (Ref.string_of self)); snapshot = API.Legacy.To.sR_t sr; - } + } (** Convert a VGPU_type reference to an obj *) let make_vgpu_type table __context self = - let vgpu_type = Db.VGPU_type.get_record ~__context ~self in - { - cls = Datamodel._vgpu_type; - id = Ref.string_of (lookup table (Ref.string_of self)); - snapshot = API.Legacy.To.vGPU_type_t vgpu_type - } + let vgpu_type = Db.VGPU_type.get_record ~__context ~self in + { + cls = Datamodel._vgpu_type; + id = Ref.string_of (lookup table (Ref.string_of self)); + snapshot = API.Legacy.To.vGPU_type_t vgpu_type + } (** Convert a VGPU reference to an obj *) -let make_vgpu table ~preserve_power_state __context self = - let vgpu = Db.VGPU.get_record ~__context ~self in - let vgpu = { vgpu with - API.vGPU_currently_attached = if preserve_power_state then vgpu.API.vGPU_currently_attached else false; - API.vGPU_GPU_group = lookup table (Ref.string_of vgpu.API.vGPU_GPU_group); - API.vGPU_type = lookup table (Ref.string_of vgpu.API.vGPU_type); - API.vGPU_VM = lookup table (Ref.string_of vgpu.API.vGPU_VM); - } in - { - cls = Datamodel._vgpu; - id = Ref.string_of (lookup table (Ref.string_of self)); - snapshot = API.Legacy.To.vGPU_t vgpu - } +let make_vgpu table ~preserve_power_state __context self = + let vgpu = Db.VGPU.get_record ~__context ~self in + let vgpu = { vgpu with + API.vGPU_currently_attached = if preserve_power_state then vgpu.API.vGPU_currently_attached else false; + API.vGPU_GPU_group = lookup table (Ref.string_of vgpu.API.vGPU_GPU_group); + API.vGPU_type = lookup table (Ref.string_of vgpu.API.vGPU_type); + API.vGPU_VM = lookup table (Ref.string_of vgpu.API.vGPU_VM); + } in + { + cls = Datamodel._vgpu; + id = Ref.string_of (lookup table (Ref.string_of self)); + snapshot = API.Legacy.To.vGPU_t vgpu + } (** Convert a GPU_group reference to an obj *) -let make_gpu_group table __context self = - let group = Db.GPU_group.get_record ~__context ~self in - let group = { group with - API.gPU_group_VGPUs = filter table (List.map Ref.string_of group.API.gPU_group_VGPUs); - API.gPU_group_PGPUs = []; - } in - { - cls = Datamodel._gpu_group; - id = Ref.string_of (lookup table (Ref.string_of self)); - snapshot = API.Legacy.To.gPU_group_t group - } - -let make_all ~with_snapshot_metadata ~preserve_power_state table __context = - let filter table rs = List.filter (fun x -> lookup table (Ref.string_of x) <> Ref.null) rs in - let hosts = List.map (make_host table __context) (filter table (Db.Host.get_all ~__context)) in - let vms = List.map (make_vm ~with_snapshot_metadata ~preserve_power_state table __context) (filter table (Db.VM.get_all ~__context)) in - let gms = List.map (make_gm table __context) (filter table (Db.VM_guest_metrics.get_all ~__context)) in - let vbds = List.map (make_vbd ~preserve_power_state table __context) (filter table (Db.VBD.get_all ~__context)) in - let vifs = List.map (make_vif ~preserve_power_state table __context) (filter table (Db.VIF.get_all ~__context)) in - let nets = List.map (make_network table __context) (filter table (Db.Network.get_all ~__context)) in - let vdis = List.map (make_vdi table __context) (filter table (Db.VDI.get_all ~__context)) in - let srs = List.map (make_sr table __context) (filter table (Db.SR.get_all ~__context)) in - let vgpu_types = List.map (make_vgpu_type table __context) (filter table (Db.VGPU_type.get_all ~__context)) in - let vgpus = List.map (make_vgpu ~preserve_power_state table __context) (filter table (Db.VGPU.get_all ~__context)) in - let gpu_groups = List.map (make_gpu_group table __context) (filter table (Db.GPU_group.get_all ~__context)) in - hosts @ vms @ gms @ vbds @ vifs @ nets @ vdis @ srs @ vgpu_types @ vgpus @ gpu_groups +let make_gpu_group table __context self = + let group = Db.GPU_group.get_record ~__context ~self in + let group = { group with + API.gPU_group_VGPUs = filter table (List.map Ref.string_of group.API.gPU_group_VGPUs); + API.gPU_group_PGPUs = []; + } in + { + cls = Datamodel._gpu_group; + id = Ref.string_of (lookup table (Ref.string_of self)); + snapshot = API.Legacy.To.gPU_group_t group + } + +let make_all ~with_snapshot_metadata ~preserve_power_state table __context = + let filter table rs = List.filter (fun x -> lookup table (Ref.string_of x) <> Ref.null) rs in + let hosts = List.map (make_host table __context) (filter table (Db.Host.get_all ~__context)) in + let vms = List.map (make_vm ~with_snapshot_metadata ~preserve_power_state table __context) (filter table (Db.VM.get_all ~__context)) in + let gms = List.map (make_gm table __context) (filter table (Db.VM_guest_metrics.get_all ~__context)) in + let vbds = List.map (make_vbd ~preserve_power_state table __context) (filter table (Db.VBD.get_all ~__context)) in + let vifs = List.map (make_vif ~preserve_power_state table __context) (filter table (Db.VIF.get_all ~__context)) in + let nets = List.map (make_network table __context) (filter table (Db.Network.get_all ~__context)) in + let vdis = List.map (make_vdi table __context) (filter table (Db.VDI.get_all ~__context)) in + let srs = List.map (make_sr table __context) (filter table (Db.SR.get_all ~__context)) in + let vgpu_types = List.map (make_vgpu_type table __context) (filter table (Db.VGPU_type.get_all ~__context)) in + let vgpus = List.map (make_vgpu ~preserve_power_state table __context) (filter table (Db.VGPU.get_all ~__context)) in + let gpu_groups = List.map (make_gpu_group table __context) (filter table (Db.GPU_group.get_all ~__context)) in + hosts @ vms @ gms @ vbds @ vifs @ nets @ vdis @ srs @ vgpu_types @ vgpus @ gpu_groups open Xapi_globs (* on normal export, do not include snapshot metadata; - on metadata-export, include snapshots fields of the exported VM as well as the VM records of VMs + on metadata-export, include snapshots fields of the exported VM as well as the VM records of VMs which are snapshots of the exported VM. *) let vm_metadata ~with_snapshot_metadata ~preserve_power_state ~include_vhd_parents ~__context ~vms = let table = create_table () in List.iter (update_table ~__context ~include_snapshots:with_snapshot_metadata ~preserve_power_state ~include_vhd_parents ~table) vms; let objects = make_all ~with_snapshot_metadata ~preserve_power_state table __context in let header = { version = this_version __context; - objects = objects } in + objects = objects } in let ova_xml = Xml.to_bigbuffer (xmlrpc_of_header header) in table, ova_xml let string_of_vm ~__context vm = - try Printf.sprintf "'%s' ('%s')" - (Db.VM.get_uuid ~__context ~self:vm) - (Db.VM.get_name_label ~__context ~self:vm) - with _ -> "invalid" + try Printf.sprintf "'%s' ('%s')" + (Db.VM.get_uuid ~__context ~self:vm) + (Db.VM.get_name_label ~__context ~self:vm) + with _ -> "invalid" (** Export a VM's metadata only *) let export_metadata ~__context ~with_snapshot_metadata ~preserve_power_state ~include_vhd_parents ~vms s = - begin match vms with - | [] -> failwith "need to specify at least one VM" - | [vm] -> info "VM.export_metadata: VM = %s; with_snapshot_metadata = '%b'; include_vhd_parents = '%b'; preserve_power_state = '%s" - (string_of_vm ~__context vm) - with_snapshot_metadata - include_vhd_parents - (string_of_bool preserve_power_state) - | vms -> info "VM.export_metadata: VM = %s; with_snapshot_metadata = '%b'; preserve_power_state = '%s" - (String.concat ", " (List.map (string_of_vm ~__context) vms)) - with_snapshot_metadata - (string_of_bool preserve_power_state) end; - - let _, ova_xml = vm_metadata ~with_snapshot_metadata ~preserve_power_state ~include_vhd_parents ~__context ~vms in - let hdr = Tar_unix.Header.make Xva.xml_filename (Bigbuffer.length ova_xml) in - Tar_unix.write_block hdr (fun s -> Bigbuffer.to_fct ova_xml (fun frag -> Unixext.really_write_string s frag)) s; - Tar_unix.write_end s + begin match vms with + | [] -> failwith "need to specify at least one VM" + | [vm] -> info "VM.export_metadata: VM = %s; with_snapshot_metadata = '%b'; include_vhd_parents = '%b'; preserve_power_state = '%s" + (string_of_vm ~__context vm) + with_snapshot_metadata + include_vhd_parents + (string_of_bool preserve_power_state) + | vms -> info "VM.export_metadata: VM = %s; with_snapshot_metadata = '%b'; preserve_power_state = '%s" + (String.concat ", " (List.map (string_of_vm ~__context) vms)) + with_snapshot_metadata + (string_of_bool preserve_power_state) end; + + let _, ova_xml = vm_metadata ~with_snapshot_metadata ~preserve_power_state ~include_vhd_parents ~__context ~vms in + let hdr = Tar_unix.Header.make Xva.xml_filename (Bigbuffer.length ova_xml) in + Tar_unix.write_block hdr (fun s -> Bigbuffer.to_fct ova_xml (fun frag -> Unixext.really_write_string s frag)) s; + Tar_unix.write_end s let export refresh_session __context rpc session_id s vm_ref preserve_power_state = info "VM.export: VM = %s; preserve_power_state = '%s'" - (string_of_vm ~__context vm_ref) - (string_of_bool preserve_power_state); + (string_of_vm ~__context vm_ref) + (string_of_bool preserve_power_state); let table, ova_xml = vm_metadata ~with_snapshot_metadata:false ~preserve_power_state ~include_vhd_parents:false ~__context ~vms:[vm_ref] in @@ -401,7 +401,7 @@ let export refresh_session __context rpc session_id s vm_ref preserve_power_stat let vdis = List.map (fun self -> Db.VBD.get_VDI ~__context ~self) vbds in (* Don't forget the suspend VDI (if we allow export of suspended VMs) *) let vdis = match Db.VM.get_power_state ~__context ~self:vm_ref with - | `Suspended when preserve_power_state -> Db.VM.get_suspend_VDI ~__context ~self:vm_ref :: vdis + | `Suspended when preserve_power_state -> Db.VM.get_suspend_VDI ~__context ~self:vm_ref :: vdis | _ -> vdis in let vdis = List.filter (fun self -> Db.SR.get_content_type ~__context ~self:(Db.VDI.get_SR ~__context ~self) <> "iso") vdis in let vdis = List.filter (fun vdi -> Hashtbl.mem table (Ref.string_of vdi)) vdis in @@ -417,202 +417,202 @@ open Http open Client let lock_vm ~__context ~vm ~task_id op = - (* Note slight race here because we haven't got the master lock *) - Xapi_vm_lifecycle.assert_operation_valid ~__context ~self:vm ~op ~strict:true; - (* ... small race lives here ... *) - Db.VM.add_to_current_operations ~__context ~self:vm ~key:task_id ~value:op; - Xapi_vm_lifecycle.update_allowed_operations ~__context ~self:vm + (* Note slight race here because we haven't got the master lock *) + Xapi_vm_lifecycle.assert_operation_valid ~__context ~self:vm ~op ~strict:true; + (* ... small race lives here ... *) + Db.VM.add_to_current_operations ~__context ~self:vm ~key:task_id ~value:op; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self:vm let unlock_vm ~__context ~vm ~task_id = - Db.VM.remove_from_current_operations ~__context ~self:vm ~key:task_id; - Xapi_vm_lifecycle.update_allowed_operations ~__context ~self:vm + Db.VM.remove_from_current_operations ~__context ~self:vm ~key:task_id; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self:vm -let with_vm_locked ~__context ~vm ~task_id op f = - lock_vm ~__context ~vm ~task_id op; - finally f - (fun () -> unlock_vm ~__context ~vm ~task_id) +let with_vm_locked ~__context ~vm ~task_id op f = + lock_vm ~__context ~vm ~task_id op; + finally f + (fun () -> unlock_vm ~__context ~vm ~task_id) -let vm_from_request ~__context (req: Request.t) = +let vm_from_request ~__context (req: Request.t) = if List.mem_assoc "ref" req.Request.query - then Ref.of_string (List.assoc "ref" req.Request.query) - else + then Ref.of_string (List.assoc "ref" req.Request.query) + else let uuid = List.assoc "uuid" req.Request.query in - Helpers.call_api_functions - ~__context (fun rpc session_id -> Client.VM.get_by_uuid rpc session_id uuid) + Helpers.call_api_functions + ~__context (fun rpc session_id -> Client.VM.get_by_uuid rpc session_id uuid) let bool_from_request ~__context (req: Request.t) default k = - if List.mem_assoc k req.Request.query - then bool_of_string (List.assoc k req.Request.query) - else default + if List.mem_assoc k req.Request.query + then bool_of_string (List.assoc k req.Request.query) + else default -let export_all_vms_from_request ~__context (req: Request.t) = - bool_from_request ~__context req false "all" +let export_all_vms_from_request ~__context (req: Request.t) = + bool_from_request ~__context req false "all" -let include_vhd_parents_from_request ~__context (req: Request.t) = - bool_from_request ~__context req false "include_vhd_parents" +let include_vhd_parents_from_request ~__context (req: Request.t) = + bool_from_request ~__context req false "include_vhd_parents" let export_snapshots_from_request ~__context (req: Request.t) = - bool_from_request ~__context req true "export_snapshots" - -let metadata_handler (req: Request.t) s _ = - debug "metadata_handler called"; - req.Request.close <- true; - - (* Xapi_http.with_context always completes the task at the end *) - Xapi_http.with_context "VM.export_metadata" req s - (fun __context -> - let include_vhd_parents = include_vhd_parents_from_request ~__context req in - let export_all = export_all_vms_from_request ~__context req in - let export_snapshots = export_snapshots_from_request ~__context req in - - (* Get the VM refs. In case of exporting the metadata of a particular VM, return a singleton list containing the vm ref. *) - (* In case of exporting all the VMs metadata, get all the VM records which are not default templates. *) - let vm_refs = - if export_all then begin - let is_default_template vm = - vm.API.vM_is_a_template - && (List.mem_assoc Xapi_globs.default_template_key vm.API.vM_other_config) - && ((List.assoc Xapi_globs.default_template_key vm.API.vM_other_config) = "true") in - let all_vms = Db.VM.get_all_records ~__context in - let interesting_vms = List.filter (fun (_, vm) -> - not (is_default_template vm) - && (not (Helpers.is_domain_zero ~__context (Db.VM.get_by_uuid ~__context ~uuid:vm.API.vM_uuid))) - ) all_vms in - List.map fst interesting_vms - end else - [vm_from_request ~__context req] - in - - if not export_all && Db.VM.get_is_a_snapshot ~__context ~self:(List.hd vm_refs) then - raise (Api_errors.Server_error (Api_errors.operation_not_allowed, [ "Exporting metadata of a snapshot is not allowed" ])); - - let task_id = Ref.string_of (Context.get_task_id __context) in - let read_fd, write_fd = Unix.pipe () in - let export_error = ref None in - let writer_thread = Thread.create (fun () -> - (* lock all the VMs before exporting their metadata *) - List.iter (fun vm -> lock_vm ~__context ~vm ~task_id `metadata_export) vm_refs; - try - finally - (fun () -> export_metadata ~with_snapshot_metadata:export_snapshots ~preserve_power_state:true ~include_vhd_parents ~__context ~vms:vm_refs write_fd) - (fun () -> - Unix.close write_fd; - List.iter (fun vm -> unlock_vm ~__context ~vm ~task_id) vm_refs) - with e -> - export_error := Some e) - () - in - let tar_data = Unixext.string_of_fd read_fd in - Thread.join writer_thread; - Unix.close read_fd; - match !export_error with - | None -> begin - let content_length = String.length tar_data in - - let headers = Http.http_200_ok ~keep_alive:false ~version:"1.0" () @ - [ Http.Hdr.task_id ^ ": " ^ task_id; - "Server: "^Xapi_globs.xapi_user_agent; - content_type; - "Content-Length: "^(string_of_int content_length); - "Content-Disposition: attachment; filename=\"export.xva\""] in - - Http_svr.headers s headers; - - Unixext.really_write_string s tar_data - end - | Some e -> begin - let response_string = Http.Response.(to_wire_string internal_error) in - Unixext.really_write_string s response_string; - error - "Caught %s while exporting metadata - responding with HTTP 500" - (Printexc.to_string e); - raise e - end - ) + bool_from_request ~__context req true "export_snapshots" + +let metadata_handler (req: Request.t) s _ = + debug "metadata_handler called"; + req.Request.close <- true; + + (* Xapi_http.with_context always completes the task at the end *) + Xapi_http.with_context "VM.export_metadata" req s + (fun __context -> + let include_vhd_parents = include_vhd_parents_from_request ~__context req in + let export_all = export_all_vms_from_request ~__context req in + let export_snapshots = export_snapshots_from_request ~__context req in + + (* Get the VM refs. In case of exporting the metadata of a particular VM, return a singleton list containing the vm ref. *) + (* In case of exporting all the VMs metadata, get all the VM records which are not default templates. *) + let vm_refs = + if export_all then begin + let is_default_template vm = + vm.API.vM_is_a_template + && (List.mem_assoc Xapi_globs.default_template_key vm.API.vM_other_config) + && ((List.assoc Xapi_globs.default_template_key vm.API.vM_other_config) = "true") in + let all_vms = Db.VM.get_all_records ~__context in + let interesting_vms = List.filter (fun (_, vm) -> + not (is_default_template vm) + && (not (Helpers.is_domain_zero ~__context (Db.VM.get_by_uuid ~__context ~uuid:vm.API.vM_uuid))) + ) all_vms in + List.map fst interesting_vms + end else + [vm_from_request ~__context req] + in + + if not export_all && Db.VM.get_is_a_snapshot ~__context ~self:(List.hd vm_refs) then + raise (Api_errors.Server_error (Api_errors.operation_not_allowed, [ "Exporting metadata of a snapshot is not allowed" ])); + + let task_id = Ref.string_of (Context.get_task_id __context) in + let read_fd, write_fd = Unix.pipe () in + let export_error = ref None in + let writer_thread = Thread.create (fun () -> + (* lock all the VMs before exporting their metadata *) + List.iter (fun vm -> lock_vm ~__context ~vm ~task_id `metadata_export) vm_refs; + try + finally + (fun () -> export_metadata ~with_snapshot_metadata:export_snapshots ~preserve_power_state:true ~include_vhd_parents ~__context ~vms:vm_refs write_fd) + (fun () -> + Unix.close write_fd; + List.iter (fun vm -> unlock_vm ~__context ~vm ~task_id) vm_refs) + with e -> + export_error := Some e) + () + in + let tar_data = Unixext.string_of_fd read_fd in + Thread.join writer_thread; + Unix.close read_fd; + match !export_error with + | None -> begin + let content_length = String.length tar_data in + + let headers = Http.http_200_ok ~keep_alive:false ~version:"1.0" () @ + [ Http.Hdr.task_id ^ ": " ^ task_id; + "Server: "^Xapi_globs.xapi_user_agent; + content_type; + "Content-Length: "^(string_of_int content_length); + "Content-Disposition: attachment; filename=\"export.xva\""] in + + Http_svr.headers s headers; + + Unixext.really_write_string s tar_data + end + | Some e -> begin + let response_string = Http.Response.(to_wire_string internal_error) in + Unixext.really_write_string s response_string; + error + "Caught %s while exporting metadata - responding with HTTP 500" + (Printexc.to_string e); + raise e + end + ) let handler (req: Request.t) s _ = debug "export handler"; req.Request.close <- true; (* First things first, let's make sure that the request has a valid session or username/password *) - + Xapi_http.assert_credentials_ok "VM.export" ~http_action:"get_export" req s; - + let use_compression = List.mem_assoc Constants.use_compression req.Request.query && List.assoc Constants.use_compression req.Request.query = "true" in debug "Using compression: %b" use_compression; (* Perform the SR reachability check using a fresh context/task because we don't want to complete the task in the forwarding case *) - - Server_helpers.exec_with_new_task "VM.export" - (fun __context -> + + Server_helpers.exec_with_new_task "VM.export" + (fun __context -> (* The VM Ref *) let vm_ref = vm_from_request ~__context req in let localhost = Helpers.get_localhost ~__context in let host_ok = check_vm_host_SRs ~__context vm_ref localhost in - + if not host_ok (* redirect *) - then - begin - try - (* We do this outside the Xapi_http.with_context below since that will complete the *) - (* task when it exits, and we don't want to do that *) - - let host = find_host_for_VM ~__context vm_ref in - - let address = Db.Host.get_address ~__context ~self:host in - let url = Printf.sprintf "https://%s%s?%s" address req.Request.uri (String.concat "&" (List.map (fun (a,b) -> a^"="^b) req.Request.query)) in - info "export VM = %s redirecting to: %s" (Ref.string_of vm_ref) url; - let headers = Http.http_302_redirect url in - Http_svr.headers s headers; - with - | Api_errors.Server_error (a,b) as e -> - error "Caught exception in export handler: %s" (ExnHelper.string_of_exn e); - (* If there's no host that can see the SRs, then it's actually our responsibility *) - (* to complete the task *) - let task_id = - let all = req.Request.cookie @ req.Request.query in - if List.mem_assoc "task_id" all - then Some (Ref.of_string (List.assoc "task_id" all)) - else None in - begin match task_id with - | None -> Server_helpers.exec_with_new_task "export" ~task_in_database:true (fun __context -> TaskHelper.failed ~__context e) - | Some task_id -> Server_helpers.exec_with_forwarded_task task_id (fun __context -> TaskHelper.failed ~__context e) - end - | e -> - error "Caught exception in export handler: %s" (Printexc.to_string e); - raise e - end + then + begin + try + (* We do this outside the Xapi_http.with_context below since that will complete the *) + (* task when it exits, and we don't want to do that *) + + let host = find_host_for_VM ~__context vm_ref in + + let address = Db.Host.get_address ~__context ~self:host in + let url = Printf.sprintf "https://%s%s?%s" address req.Request.uri (String.concat "&" (List.map (fun (a,b) -> a^"="^b) req.Request.query)) in + info "export VM = %s redirecting to: %s" (Ref.string_of vm_ref) url; + let headers = Http.http_302_redirect url in + Http_svr.headers s headers; + with + | Api_errors.Server_error (a,b) as e -> + error "Caught exception in export handler: %s" (ExnHelper.string_of_exn e); + (* If there's no host that can see the SRs, then it's actually our responsibility *) + (* to complete the task *) + let task_id = + let all = req.Request.cookie @ req.Request.query in + if List.mem_assoc "task_id" all + then Some (Ref.of_string (List.assoc "task_id" all)) + else None in + begin match task_id with + | None -> Server_helpers.exec_with_new_task "export" ~task_in_database:true (fun __context -> TaskHelper.failed ~__context e) + | Some task_id -> Server_helpers.exec_with_forwarded_task task_id (fun __context -> TaskHelper.failed ~__context e) + end + | e -> + error "Caught exception in export handler: %s" (Printexc.to_string e); + raise e + end else - (* Xapi_http.with_context always completes the task at the end *) - begin - debug "Doing xapi_http.with_context now..."; - Xapi_http.with_context "VM.export" req s - (fun __context -> Helpers.call_api_functions ~__context (fun rpc session_id -> - - (* This is the signal to say we've taken responsibility from the CLI server for completing the task *) - (* The GUI can deal with this itself, but the CLI is complicated by the thin cli/cli server split *) - TaskHelper.set_progress ~__context 0.0; - let refresh_session = Xapi_session.consider_touching_session rpc session_id in - let task_id = Ref.string_of (Context.get_task_id __context) in - let preserve_power_state = - let all = req.Request.cookie @ req.Request.query in - List.mem_assoc "preserve_power_state" all && bool_of_string (List.assoc "preserve_power_state" all) in - let headers = Http.http_200_ok ~keep_alive:false ~version:"1.0" () @ - [ Http.Hdr.task_id ^ ": " ^ task_id; - "Server: "^Xapi_globs.xapi_user_agent; - content_type; - "Content-Disposition: attachment; filename=\"export.xva\""] in - - with_vm_locked ~__context ~vm:vm_ref ~task_id `export - (fun () -> - Http_svr.headers s headers; - let go fd = export refresh_session __context rpc session_id fd vm_ref preserve_power_state in - if use_compression - then Gzip.compress s go - else go s - ) - - (* Exceptions are handled by Xapi_http.with_context *) - )) - end + (* Xapi_http.with_context always completes the task at the end *) + begin + debug "Doing xapi_http.with_context now..."; + Xapi_http.with_context "VM.export" req s + (fun __context -> Helpers.call_api_functions ~__context (fun rpc session_id -> + + (* This is the signal to say we've taken responsibility from the CLI server for completing the task *) + (* The GUI can deal with this itself, but the CLI is complicated by the thin cli/cli server split *) + TaskHelper.set_progress ~__context 0.0; + let refresh_session = Xapi_session.consider_touching_session rpc session_id in + let task_id = Ref.string_of (Context.get_task_id __context) in + let preserve_power_state = + let all = req.Request.cookie @ req.Request.query in + List.mem_assoc "preserve_power_state" all && bool_of_string (List.assoc "preserve_power_state" all) in + let headers = Http.http_200_ok ~keep_alive:false ~version:"1.0" () @ + [ Http.Hdr.task_id ^ ": " ^ task_id; + "Server: "^Xapi_globs.xapi_user_agent; + content_type; + "Content-Disposition: attachment; filename=\"export.xva\""] in + + with_vm_locked ~__context ~vm:vm_ref ~task_id `export + (fun () -> + Http_svr.headers s headers; + let go fd = export refresh_session __context rpc session_id fd vm_ref preserve_power_state in + if use_compression + then Gzip.compress s go + else go s + ) + + (* Exceptions are handled by Xapi_http.with_context *) + )) + end ) diff --git a/ocaml/xapi/export_raw_vdi.ml b/ocaml/xapi/export_raw_vdi.ml index 672efd57289..025af9b6b0c 100644 --- a/ocaml/xapi/export_raw_vdi.ml +++ b/ocaml/xapi/export_raw_vdi.ml @@ -13,81 +13,81 @@ *) (** HTTP handler for exporting a raw VDI. * @group Import and Export - *) +*) module D = Debug.Make(struct let name="export_raw_vdi" end) open D let localhost_handler rpc session_id vdi (req: Http.Request.t) (s: Unix.file_descr) = - req.Http.Request.close <- true; - Xapi_http.with_context "Exporting raw VDI" req s - (fun __context -> - let task_id = Context.get_task_id __context in - match Importexport.Format.of_req req with - | `Unknown x -> - error "export_raw_vdi task_id = %s; vdi = %s; unknown disk format = %s" - (Ref.string_of task_id) (Ref.string_of vdi) x; - TaskHelper.failed ~__context (Api_errors.Server_error(Api_errors.internal_error, ["Unknown format " ^ x])); - Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()) - | `Ok format -> - (* Suggest this filename to the client: *) - let filename = Importexport.Format.filename ~__context vdi format in - let content_type = Importexport.Format.content_type format in - debug "export_raw_vdi task_id = %s; vdi = %s; format = %s; content-type = %s; filename = %s" - (Ref.string_of task_id) (Ref.string_of vdi) (Importexport.Format.to_string format) content_type filename; - let copy base_path path = - let headers = Http.http_200_ok ~keep_alive:false () @ [ - Http.Hdr.task_id ^ ":" ^ (Ref.string_of task_id); - Http.Hdr.content_type ^ ":" ^ content_type; - Http.Hdr.content_disposition ^ ": attachment; filename=\"" ^ filename ^ "\"" - ] in - Http_svr.headers s headers; - try - debug "Copying VDI contents..."; - Vhd_tool_wrapper.send ?relative_to:base_path (Vhd_tool_wrapper.update_task_progress __context) - "none" (Importexport.Format.to_string format) s path ""; - debug "Copying VDI complete."; - with Unix.Unix_error(Unix.EIO, _, _) -> - raise (Api_errors.Server_error (Api_errors.vdi_io_error, ["Device I/O errors"])) in - begin - try - Sm_fs_ops.with_block_attached_device __context rpc session_id vdi `RO - (fun path -> - match Importexport.base_vdi_of_req ~__context req with - | Some base_vdi -> - Sm_fs_ops.with_block_attached_device __context rpc session_id base_vdi `RO - (fun base_path -> copy (Some base_path) path) - | None -> copy None path - ) - with e -> - Backtrace.is_important e; - TaskHelper.failed ~__context e; - raise e - end - ) + req.Http.Request.close <- true; + Xapi_http.with_context "Exporting raw VDI" req s + (fun __context -> + let task_id = Context.get_task_id __context in + match Importexport.Format.of_req req with + | `Unknown x -> + error "export_raw_vdi task_id = %s; vdi = %s; unknown disk format = %s" + (Ref.string_of task_id) (Ref.string_of vdi) x; + TaskHelper.failed ~__context (Api_errors.Server_error(Api_errors.internal_error, ["Unknown format " ^ x])); + Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()) + | `Ok format -> + (* Suggest this filename to the client: *) + let filename = Importexport.Format.filename ~__context vdi format in + let content_type = Importexport.Format.content_type format in + debug "export_raw_vdi task_id = %s; vdi = %s; format = %s; content-type = %s; filename = %s" + (Ref.string_of task_id) (Ref.string_of vdi) (Importexport.Format.to_string format) content_type filename; + let copy base_path path = + let headers = Http.http_200_ok ~keep_alive:false () @ [ + Http.Hdr.task_id ^ ":" ^ (Ref.string_of task_id); + Http.Hdr.content_type ^ ":" ^ content_type; + Http.Hdr.content_disposition ^ ": attachment; filename=\"" ^ filename ^ "\"" + ] in + Http_svr.headers s headers; + try + debug "Copying VDI contents..."; + Vhd_tool_wrapper.send ?relative_to:base_path (Vhd_tool_wrapper.update_task_progress __context) + "none" (Importexport.Format.to_string format) s path ""; + debug "Copying VDI complete."; + with Unix.Unix_error(Unix.EIO, _, _) -> + raise (Api_errors.Server_error (Api_errors.vdi_io_error, ["Device I/O errors"])) in + begin + try + Sm_fs_ops.with_block_attached_device __context rpc session_id vdi `RO + (fun path -> + match Importexport.base_vdi_of_req ~__context req with + | Some base_vdi -> + Sm_fs_ops.with_block_attached_device __context rpc session_id base_vdi `RO + (fun base_path -> copy (Some base_path) path) + | None -> copy None path + ) + with e -> + Backtrace.is_important e; + TaskHelper.failed ~__context e; + raise e + end + ) let export_raw vdi (req: Http.Request.t) (s: Unix.file_descr) _ = - (* Check the SR is reachable (in a fresh task context) *) - Server_helpers.exec_with_new_task "VDI.export_raw_vdi" - (fun __context -> - Helpers.call_api_functions ~__context - (fun rpc session_id -> - let sr = Db.VDI.get_SR ~__context ~self:vdi in - debug "Checking whether localhost can see SR: %s" (Ref.string_of sr); - if (Importexport.check_sr_availability ~__context sr) - then localhost_handler rpc session_id vdi req s - else - let host = Importexport.find_host_for_sr ~__context sr in - let address = Db.Host.get_address ~__context ~self:host in - Importexport.return_302_redirect req s address - ) - ) + (* Check the SR is reachable (in a fresh task context) *) + Server_helpers.exec_with_new_task "VDI.export_raw_vdi" + (fun __context -> + Helpers.call_api_functions ~__context + (fun rpc session_id -> + let sr = Db.VDI.get_SR ~__context ~self:vdi in + debug "Checking whether localhost can see SR: %s" (Ref.string_of sr); + if (Importexport.check_sr_availability ~__context sr) + then localhost_handler rpc session_id vdi req s + else + let host = Importexport.find_host_for_sr ~__context sr in + let address = Db.Host.get_address ~__context ~self:host in + Importexport.return_302_redirect req s address + ) + ) let handler (req: Http.Request.t) (s: Unix.file_descr) _ = - debug "export_raw_vdi handler"; - Xapi_http.assert_credentials_ok "VDI.export_raw" ~http_action:"get_export_raw_vdi" req s; + debug "export_raw_vdi handler"; + Xapi_http.assert_credentials_ok "VDI.export_raw" ~http_action:"get_export_raw_vdi" req s; - Server_helpers.exec_with_new_task "VDI.export_raw_vdi" - (fun __context -> - export_raw (Importexport.vdi_of_req ~__context req) req s () - ) + Server_helpers.exec_with_new_task "VDI.export_raw_vdi" + (fun __context -> + export_raw (Importexport.vdi_of_req ~__context req) req s () + ) diff --git a/ocaml/xapi/features.ml b/ocaml/xapi/features.ml index 441df767e0a..3e8fcc85bec 100644 --- a/ocaml/xapi/features.ml +++ b/ocaml/xapi/features.ml @@ -9,7 +9,7 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. - *) +*) module D = Debug.Make(struct let name="license" end) open D @@ -17,83 +17,83 @@ open D (* Features and restrictions *) type feature = - | VLAN - | QoS - | Shared_storage - | Netapp - | Equalogic - | Pooling - | HA - | Marathon - | Email - | Performance - | WLB - | RBAC - | DMC - | Checkpoint - | CPU_masking - | Connection - | No_platform_filter - | No_nag_dialog - | VMPR - | IntelliCache - | GPU - | DR - | VIF_locking - | Storage_motion - | VGPU - | Integrated_GPU - | VSS - | Guest_agent_auto_update - | PCI_device_for_auto_update - | Xen_motion - | Guest_ip_setting - | AD - | Ssl_legacy_switch - | Nested_virt - | Live_patching - with rpc + | VLAN + | QoS + | Shared_storage + | Netapp + | Equalogic + | Pooling + | HA + | Marathon + | Email + | Performance + | WLB + | RBAC + | DMC + | Checkpoint + | CPU_masking + | Connection + | No_platform_filter + | No_nag_dialog + | VMPR + | IntelliCache + | GPU + | DR + | VIF_locking + | Storage_motion + | VGPU + | Integrated_GPU + | VSS + | Guest_agent_auto_update + | PCI_device_for_auto_update + | Xen_motion + | Guest_ip_setting + | AD + | Ssl_legacy_switch + | Nested_virt + | Live_patching +with rpc type orientation = Positive | Negative let keys_of_features = - [ - VLAN, ("restrict_vlan", Negative, "VLAN"); - QoS, ("restrict_qos", Negative, "QoS"); - Shared_storage, ("restrict_pool_attached_storage", Negative, "SStorage"); - Netapp, ("restrict_netapp", Negative, "NTAP"); - Equalogic, ("restrict_equalogic", Negative, "EQL"); - Pooling, ("restrict_pooling", Negative, "Pool"); - HA, ("enable_xha", Positive, "XHA"); - Marathon, ("restrict_marathon", Negative, "MTC"); - Email, ("restrict_email_alerting", Negative, "email"); - Performance, ("restrict_historical_performance", Negative, "perf"); - WLB, ("restrict_wlb", Negative, "WLB"); - RBAC, ("restrict_rbac", Negative, "RBAC"); - DMC, ("restrict_dmc", Negative, "DMC"); - Checkpoint, ("restrict_checkpoint", Negative, "chpt"); - CPU_masking, ("restrict_cpu_masking", Negative, "Mask"); - Connection, ("restrict_connection", Negative, "Cnx"); - No_platform_filter, ("platform_filter", Negative, "Plat"); - No_nag_dialog, ("regular_nag_dialog", Negative, "nonag"); - VMPR, ("restrict_vmpr", Negative, "VMPR"); - IntelliCache, ("restrict_intellicache", Negative, "IntelliCache"); - GPU, ("restrict_gpu", Negative, "GPU"); - DR, ("restrict_dr", Negative, "DR"); - VIF_locking, ("restrict_vif_locking", Negative, "VIFLock"); - Storage_motion, ("restrict_storage_xen_motion", Negative, "SXM"); - VGPU, ("restrict_vgpu", Negative, "vGPU"); - Integrated_GPU, ("restrict_integrated_gpu_passthrough", Negative, "iGPU"); - VSS, ("restrict_vss", Negative, "VSS"); - Guest_agent_auto_update, ("restrict_guest_agent_auto_update", Negative, "GAAU"); - PCI_device_for_auto_update, ("restrict_pci_device_for_auto_update", Negative, "PciAU"); - Xen_motion, ("restrict_xen_motion", Negative, "XenMotion"); - Guest_ip_setting, ("restrict_guest_ip_setting", Negative, "GuestIP"); - AD, ("restrict_ad", Negative, "AD"); - Ssl_legacy_switch, ("restrict_ssl_legacy_switch", Negative, "Ssl_legacy_switch"); - Nested_virt, ("restrict_nested_virt", Negative, "Nested_virt"); - Live_patching, ("restrict_live_patching", Negative, "Live_patching"); - ] + [ + VLAN, ("restrict_vlan", Negative, "VLAN"); + QoS, ("restrict_qos", Negative, "QoS"); + Shared_storage, ("restrict_pool_attached_storage", Negative, "SStorage"); + Netapp, ("restrict_netapp", Negative, "NTAP"); + Equalogic, ("restrict_equalogic", Negative, "EQL"); + Pooling, ("restrict_pooling", Negative, "Pool"); + HA, ("enable_xha", Positive, "XHA"); + Marathon, ("restrict_marathon", Negative, "MTC"); + Email, ("restrict_email_alerting", Negative, "email"); + Performance, ("restrict_historical_performance", Negative, "perf"); + WLB, ("restrict_wlb", Negative, "WLB"); + RBAC, ("restrict_rbac", Negative, "RBAC"); + DMC, ("restrict_dmc", Negative, "DMC"); + Checkpoint, ("restrict_checkpoint", Negative, "chpt"); + CPU_masking, ("restrict_cpu_masking", Negative, "Mask"); + Connection, ("restrict_connection", Negative, "Cnx"); + No_platform_filter, ("platform_filter", Negative, "Plat"); + No_nag_dialog, ("regular_nag_dialog", Negative, "nonag"); + VMPR, ("restrict_vmpr", Negative, "VMPR"); + IntelliCache, ("restrict_intellicache", Negative, "IntelliCache"); + GPU, ("restrict_gpu", Negative, "GPU"); + DR, ("restrict_dr", Negative, "DR"); + VIF_locking, ("restrict_vif_locking", Negative, "VIFLock"); + Storage_motion, ("restrict_storage_xen_motion", Negative, "SXM"); + VGPU, ("restrict_vgpu", Negative, "vGPU"); + Integrated_GPU, ("restrict_integrated_gpu_passthrough", Negative, "iGPU"); + VSS, ("restrict_vss", Negative, "VSS"); + Guest_agent_auto_update, ("restrict_guest_agent_auto_update", Negative, "GAAU"); + PCI_device_for_auto_update, ("restrict_pci_device_for_auto_update", Negative, "PciAU"); + Xen_motion, ("restrict_xen_motion", Negative, "XenMotion"); + Guest_ip_setting, ("restrict_guest_ip_setting", Negative, "GuestIP"); + AD, ("restrict_ad", Negative, "AD"); + Ssl_legacy_switch, ("restrict_ssl_legacy_switch", Negative, "Ssl_legacy_switch"); + Nested_virt, ("restrict_nested_virt", Negative, "Nested_virt"); + Live_patching, ("restrict_live_patching", Negative, "Live_patching"); + ] (* A list of features that must be considered "enabled" by `of_assoc_list` if the feature string is missing from the list. These are existing features @@ -102,51 +102,51 @@ let keys_of_features = let enabled_when_unknown = [Xen_motion; AD] let name_of_feature f = - rpc_of_feature f |> Rpc.string_of_rpc + rpc_of_feature f |> Rpc.string_of_rpc let string_of_feature f = - let str, o, _ = List.assoc f keys_of_features in - str, o - + let str, o, _ = List.assoc f keys_of_features in + str, o + let feature_of_string str = - let f, (_, o, _) = List.find (fun (_, (k, _, _)) -> str = k) keys_of_features in - f, o + let f, (_, o, _) = List.find (fun (_, (k, _, _)) -> str = k) keys_of_features in + f, o let tag_of_feature f = - let _, _, tag = List.assoc f keys_of_features in - tag - + let _, _, tag = List.assoc f keys_of_features in + tag + let all_features = - List.map (fun (f, _) -> f) keys_of_features + List.map (fun (f, _) -> f) keys_of_features let to_compact_string (s: feature list) = - let get_tag f = - let tag = tag_of_feature f in - if List.mem f s then - tag - else - String.make (String.length tag) ' ' - in - let tags = List.map get_tag all_features in - String.concat " " tags - + let get_tag f = + let tag = tag_of_feature f in + if List.mem f s then + tag + else + String.make (String.length tag) ' ' + in + let tags = List.map get_tag all_features in + String.concat " " tags + let to_assoc_list (s: feature list) = - let get_map f = - let str, o = string_of_feature f in - let switch = List.mem f s in - let switch = string_of_bool (if o = Positive then switch else not switch) in - str, switch - in - List.map get_map all_features + let get_map f = + let str, o = string_of_feature f in + let switch = List.mem f s in + let switch = string_of_bool (if o = Positive then switch else not switch) in + str, switch + in + List.map get_map all_features let of_assoc_list l = - let get_feature f = - try - let str, o = string_of_feature f in - let v = bool_of_string (List.assoc str l) in - let v = if o = Positive then v else not v in - if v then Some f else None - with _ -> - if List.mem f enabled_when_unknown then Some f else None - in - Stdext.Listext.List.filter_map get_feature all_features + let get_feature f = + try + let str, o = string_of_feature f in + let v = bool_of_string (List.assoc str l) in + let v = if o = Positive then v else not v in + if v then Some f else None + with _ -> + if List.mem f enabled_when_unknown then Some f else None + in + Stdext.Listext.List.filter_map get_feature all_features diff --git a/ocaml/xapi/features.mli b/ocaml/xapi/features.mli index 57bdd1092e1..e225ba03a32 100644 --- a/ocaml/xapi/features.mli +++ b/ocaml/xapi/features.mli @@ -13,45 +13,45 @@ *) (** Module that controls feature restriction. * @group Licensing - *) +*) (** Features than can be enabled and disabled. *) type feature = - | VLAN (** Enable VLAN. Currently not used. *) - | QoS (** Enable QoS control. Currently not used. *) - | Shared_storage (** Enable shared storage. Currently not used? *) - | Netapp (** Enable use of NetApp SRs *) - | Equalogic (** Enable use of Equalogic SRs *) - | Pooling (** Enable pooling of hosts *) - | HA (** Enable High Availability (HA) *) - | Marathon (** Currently not used *) - | Email (** Enable email alerting *) - | Performance (** Used by XenCenter to restrict the performance graphs *) - | WLB (** Enable Workload Balancing (WLB) *) - | RBAC (** Enable Role-Based Access Control (RBAC) *) - | DMC (** Enable Dynamic Memory Control (DMC) *) - | Checkpoint (** Enable Checkpoint functionality *) - | CPU_masking (** Enable masking of CPU features *) - | Connection (** Used by XenCenter *) - | No_platform_filter (** Filter platform data *) - | No_nag_dialog (** Used by XenCenter *) - | VMPR (** Enable use of VM Protection and Recovery *) - | IntelliCache (** Enable use of IntelliCache feature *) - | GPU (** Enable use of GPU passthrough *) - | DR (** Enable disaster recovery *) - | VIF_locking (** Enable locking of VIFs to specific MAC addresses and IP addresses. *) - | Storage_motion (** Enable Storage XenMotion feature *) - | VGPU (** Enable use of virtual GPUs *) - | Integrated_GPU (** Enable use of integrated GPU passthrough *) - | VSS (** Enable use of VSS *) - | Guest_agent_auto_update (** Enable use of the Windows guest agent auto-update feature. *) - | PCI_device_for_auto_update (** Enable making new VMs with the PCI device that triggers Windows Update. *) - | Xen_motion (** Enable XenMotion feature *) - | Guest_ip_setting (** Enable use of Guest ip seting *) - | AD (** Enable use of Active Directory *) - | Ssl_legacy_switch (** Enable the control switch for SSL/TLS legacy-mode. *) - | Nested_virt (** Enable the use of nested virtualisation *) - | Live_patching (** Enable the use of live patching feature. *) + | VLAN (** Enable VLAN. Currently not used. *) + | QoS (** Enable QoS control. Currently not used. *) + | Shared_storage (** Enable shared storage. Currently not used? *) + | Netapp (** Enable use of NetApp SRs *) + | Equalogic (** Enable use of Equalogic SRs *) + | Pooling (** Enable pooling of hosts *) + | HA (** Enable High Availability (HA) *) + | Marathon (** Currently not used *) + | Email (** Enable email alerting *) + | Performance (** Used by XenCenter to restrict the performance graphs *) + | WLB (** Enable Workload Balancing (WLB) *) + | RBAC (** Enable Role-Based Access Control (RBAC) *) + | DMC (** Enable Dynamic Memory Control (DMC) *) + | Checkpoint (** Enable Checkpoint functionality *) + | CPU_masking (** Enable masking of CPU features *) + | Connection (** Used by XenCenter *) + | No_platform_filter (** Filter platform data *) + | No_nag_dialog (** Used by XenCenter *) + | VMPR (** Enable use of VM Protection and Recovery *) + | IntelliCache (** Enable use of IntelliCache feature *) + | GPU (** Enable use of GPU passthrough *) + | DR (** Enable disaster recovery *) + | VIF_locking (** Enable locking of VIFs to specific MAC addresses and IP addresses. *) + | Storage_motion (** Enable Storage XenMotion feature *) + | VGPU (** Enable use of virtual GPUs *) + | Integrated_GPU (** Enable use of integrated GPU passthrough *) + | VSS (** Enable use of VSS *) + | Guest_agent_auto_update (** Enable use of the Windows guest agent auto-update feature. *) + | PCI_device_for_auto_update (** Enable making new VMs with the PCI device that triggers Windows Update. *) + | Xen_motion (** Enable XenMotion feature *) + | Guest_ip_setting (** Enable use of Guest ip seting *) + | AD (** Enable use of Active Directory *) + | Ssl_legacy_switch (** Enable the control switch for SSL/TLS legacy-mode. *) + | Nested_virt (** Enable the use of nested virtualisation *) + | Live_patching (** Enable the use of live patching feature. *) (** Convert RPC into {!feature}s *) val feature_of_rpc : Rpc.t -> feature diff --git a/ocaml/xapi/fileserver.ml b/ocaml/xapi/fileserver.ml index 5bad4389a33..e85bea9b15e 100644 --- a/ocaml/xapi/fileserver.ml +++ b/ocaml/xapi/fileserver.ml @@ -11,7 +11,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(* +(* * HTTP handler for serving files in the rt subdir *) @@ -24,36 +24,36 @@ module D = Debug.Make(struct let name="xapi" end) open D let escape uri = - String.escaped ~rules:[ '<', "<"; '>', ">"; '\'', "'"; '"', """; '&', "&" ] uri + String.escaped ~rules:[ '<', "<"; '>', ">"; '\'', "'"; '"', """; '&', "&" ] uri let missing uri = " \ - \ -404 Not Found \ - \ -

Not Found

\ -

The requested URL " ^ (escape uri) ^ " was not found on this server.

\ -
\ -
Xapi Server
\ -" + \ + 404 Not Found \ + \ +

Not Found

\ +

The requested URL " ^ (escape uri) ^ " was not found on this server.

\ +
\ +
Xapi Server
\ + " let get_extension filename = - try - let basename = Filename.basename filename in - let i = String.rindex basename '.' in - Some (String.sub basename (i + 1) (String.length basename - i - 1)) - with _ -> - None + try + let basename = Filename.basename filename in + let i = String.rindex basename '.' in + Some (String.sub basename (i + 1) (String.length basename - i - 1)) + with _ -> + None let application_octet_stream = "application/octet-stream" let mime_of_extension = function - | "html" | "htm" -> "text/html" - | "css" -> "text/css" - | "js" -> "application/javascript" - | "gif" -> "image/gif" - | "png" -> "image/png" - | "jpg" | "jpeg" -> "image/jpeg" - | _ -> application_octet_stream + | "html" | "htm" -> "text/html" + | "css" -> "text/css" + | "js" -> "application/javascript" + | "gif" -> "image/gif" + | "png" -> "image/png" + | "jpg" | "jpeg" -> "image/jpeg" + | _ -> application_octet_stream let send_file (uri_base: string) (dir: string) (req: Request.t) (bio: Buf_io.t) _ = let uri_base_len = String.length uri_base in @@ -67,19 +67,19 @@ let send_file (uri_base: string) (dir: string) (req: Request.t) (bio: Buf_io.t) (* remove any dodgy use of "." or ".." NB we don't prevent the use of symlinks *) let file_path = Stdext.Unixext.resolve_dot_and_dotdot file_path in - if not(String.startswith dir file_path) then begin + if not(String.startswith dir file_path) then begin debug "Rejecting request for file: %s (outside of directory %s)" file_path dir; - Http_svr.response_forbidden ~req s + Http_svr.response_forbidden ~req s end else begin let stat = Unix.stat file_path in (* if a directory, automatically add index.html *) let file_path = if stat.Unix.st_kind = Unix.S_DIR then file_path ^ "/index.html" else file_path in - + let mime_content_type = let open Stdext.Opt in - let ext = map String.lowercase (get_extension file_path) in - default application_octet_stream (map mime_of_extension ext) in + let ext = map String.lowercase (get_extension file_path) in + default application_octet_stream (map mime_of_extension ext) in Http_svr.response_file ~mime_content_type s file_path end with - _ -> Http_svr.response_missing s (missing uri) + _ -> Http_svr.response_missing s (missing uri) diff --git a/ocaml/xapi/fileupload.ml b/ocaml/xapi/fileupload.ml index 2ed3807e37b..4ebace1c2b8 100644 --- a/ocaml/xapi/fileupload.ml +++ b/ocaml/xapi/fileupload.ml @@ -11,7 +11,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(* +(* * HTTP handler for file uploads (must have chunked encodings) * To provoke curl into using chunked encodings, feed it data from stdin eg * cat foo | curl -T - http://server:port/upload diff --git a/ocaml/xapi/hashtbl_xml.ml b/ocaml/xapi/hashtbl_xml.ml index 74707a890ca..327e7b2c673 100644 --- a/ocaml/xapi/hashtbl_xml.ml +++ b/ocaml/xapi/hashtbl_xml.ml @@ -35,10 +35,10 @@ let of_xml (input: Xmlm.input) = let db = Hashtbl.create 10 in let el (tag: Xmlm.tag) acc = match tag with | (_, "config"), attrs -> List.flatten acc - | (_, "row"), attrs -> - let key=List.assoc ("","key") attrs in - let value=List.assoc ("","value") attrs in - (key,value)::List.flatten acc + | (_, "row"), attrs -> + let key=List.assoc ("","key") attrs in + let value=List.assoc ("","value") attrs in + (key,value)::List.flatten acc | (ns, name), attrs -> raise (Unmarshall_error (Printf.sprintf "Unknown tag: (%s,%s)" ns name)) in let data str = [] in diff --git a/ocaml/xapi/helper_hostname.ml b/ocaml/xapi/helper_hostname.ml index c9e44de801b..68ea0b93227 100644 --- a/ocaml/xapi/helper_hostname.ml +++ b/ocaml/xapi/helper_hostname.ml @@ -20,25 +20,25 @@ let filter_newline s = if i=0 then 0 else let chr = String.get s i in - if chr='\n' || chr='\r' then count_newlines (i-1) - else i in + if chr='\n' || chr='\r' then count_newlines (i-1) + else i in let newline_end = count_newlines (l-1) in - String.sub s 0 (newline_end+1) + String.sub s 0 (newline_end+1) let _cached_hostname = ref "" let _cached_hostname_m = Mutex.create () -let get_hostname () = +let get_hostname () = Mutex.execute _cached_hostname_m (fun () -> if !_cached_hostname = "" then - _cached_hostname := - (try filter_newline (get_process_output "/bin/hostname") - with _ -> "unknown"); + _cached_hostname := + (try filter_newline (get_process_output "/bin/hostname") + with _ -> "unknown"); !_cached_hostname ) (* Fetch the hostname again in case it has changed beneath us *) -let reget_hostname () = +let reget_hostname () = Mutex.execute _cached_hostname_m (fun () -> _cached_hostname := ""); get_hostname () diff --git a/ocaml/xapi/helper_process.ml b/ocaml/xapi/helper_process.ml index b6d93dbf53e..d6b27c5d18f 100644 --- a/ocaml/xapi/helper_process.ml +++ b/ocaml/xapi/helper_process.ml @@ -16,14 +16,14 @@ let generic_handler cmd n = raise (Api_errors.Server_error (Api_errors.internal_error, [string_of_int n])) -exception Process_output_error of string -let get_process_output ?(handler=generic_handler) cmd = +exception Process_output_error of string +let get_process_output ?(handler=generic_handler) cmd = let args = Stdext.Xstringext.String.split ' ' cmd in try fst (Forkhelpers.execute_command_get_output (List.hd args) (List.tl args)) - with - | Forkhelpers.Spawn_internal_error(err,out,Unix.WEXITED n) -> - handler cmd n - | _ -> - raise (Process_output_error cmd) + with + | Forkhelpers.Spawn_internal_error(err,out,Unix.WEXITED n) -> + handler cmd n + | _ -> + raise (Process_output_error cmd) diff --git a/ocaml/xapi/helpers.ml b/ocaml/xapi/helpers.ml index 6bc9a6cdf96..d976d6a81bd 100644 --- a/ocaml/xapi/helpers.ml +++ b/ocaml/xapi/helpers.ml @@ -49,47 +49,47 @@ let rpc_fun : (Http.Request.t -> Unix.file_descr -> Rpc.call -> Rpc.response) op let get_rpc () = match !rpc_fun with - None -> failwith "No rpc set!" - | Some f -> f + None -> failwith "No rpc set!" + | Some f -> f (* !! FIXME - trap proper MISSINGREFERENCE exception when this has been defined *) (* !! FIXME(2) - this code could be shared with the CLI? *) let checknull f = try f() with - _ -> "" + _ -> "" let get_pool ~__context = List.hd (Db.Pool.get_all ~__context) let get_master ~__context = - Db.Pool.get_master ~__context ~self:(get_pool ~__context) + Db.Pool.get_master ~__context ~self:(get_pool ~__context) let get_primary_ip_addr ~__context iface primary_address_type = - if iface = "" then - None - else - try - let dbg = Context.string_of_task __context in - let addrs = match primary_address_type with - | `IPv4 -> Net.Interface.get_ipv4_addr dbg ~name:iface - | `IPv6 -> Net.Interface.get_ipv6_addr dbg ~name:iface - in - let addrs = List.map (fun (addr, _) -> Unix.string_of_inet_addr addr) addrs in - (* Filter out link-local addresses *) - let addrs = List.filter (fun addr -> String.sub addr 0 4 <> "fe80") addrs in - Some (List.hd addrs) - with _ -> None + if iface = "" then + None + else + try + let dbg = Context.string_of_task __context in + let addrs = match primary_address_type with + | `IPv4 -> Net.Interface.get_ipv4_addr dbg ~name:iface + | `IPv6 -> Net.Interface.get_ipv6_addr dbg ~name:iface + in + let addrs = List.map (fun (addr, _) -> Unix.string_of_inet_addr addr) addrs in + (* Filter out link-local addresses *) + let addrs = List.filter (fun addr -> String.sub addr 0 4 <> "fe80") addrs in + Some (List.hd addrs) + with _ -> None let get_management_ip_addr ~__context = - get_primary_ip_addr ~__context - (Xapi_inventory.lookup Xapi_inventory._management_interface) - (Record_util.primary_address_type_of_string (Xapi_inventory.lookup Xapi_inventory._management_address_type ~default:"ipv4")) + get_primary_ip_addr ~__context + (Xapi_inventory.lookup Xapi_inventory._management_interface) + (Record_util.primary_address_type_of_string (Xapi_inventory.lookup Xapi_inventory._management_address_type ~default:"ipv4")) let get_localhost_uuid () = Xapi_inventory.lookup Xapi_inventory._installation_uuid let get_localhost ~__context : API.ref_host = - let uuid = get_localhost_uuid () in - Db.Host.get_by_uuid ~__context ~uuid + let uuid = get_localhost_uuid () in + Db.Host.get_by_uuid ~__context ~uuid (* Determine the gateway and DNS PIFs: * If one of the PIFs with IP has other_config:defaultroute=true, then @@ -98,149 +98,149 @@ let get_localhost ~__context : API.ref_host = * interface, pick a random PIF. * Similarly for the DNS PIF, but with other_config:peerdns. *) let determine_gateway_and_dns_ifs ~__context ?(management_interface : API.ref_PIF option) () = - let localhost = get_localhost ~__context in - let ip_pifs = Db.PIF.get_records_where ~__context - ~expr:(And (Eq (Field "host", Literal (Ref.string_of localhost)), - Not (Eq (Field "ip_configuration_mode", Literal "None")))) in - if ip_pifs = [] then - None, None - else - let gateway_pif, gateway_rc = - let oc = List.filter (fun (_, r) -> - List.mem_assoc "defaultroute" r.API.pIF_other_config && - List.assoc "defaultroute" r.API.pIF_other_config = "true" - ) ip_pifs in - match oc with - | (ref, rc) :: tl -> - if tl <> [] then - warn "multiple PIFs with other_config:defaultroute=true - choosing %s" rc.API.pIF_device; - (ref, rc) - | [] -> - match management_interface with - | Some pif -> let rc = Db.PIF.get_record ~__context ~self:pif in (pif, rc) - | None -> - let mgmt = List.filter (fun (_, r) -> r.API.pIF_management) ip_pifs in - match mgmt with - | (ref, rc) :: _ -> (ref, rc) - | [] -> - let (ref, rc) = List.hd ip_pifs in - warn "no gateway PIF found - choosing %s" rc.API.pIF_device; - (ref, rc) - in - let dns_pif, dns_rc = - let oc = List.filter (fun (_, r) -> - List.mem_assoc "peerdns" r.API.pIF_other_config && - List.assoc "peerdns" r.API.pIF_other_config = "true" - ) ip_pifs in - match oc with - | (ref, rc) :: tl -> - if tl <> [] then - warn "multiple PIFs with other_config:peerdns=true - choosing %s" rc.API.pIF_device; - (ref, rc) - | [] -> - match management_interface with - | Some pif -> let pif_rc = Db.PIF.get_record ~__context ~self:pif in (pif, pif_rc) - | None -> - let mgmt = List.filter (fun (_, r) -> r.API.pIF_management) ip_pifs in - match mgmt with - | (ref, rc) :: _ -> (ref, rc) - | [] -> - let (ref, rc) = List.hd ip_pifs in - warn "no DNS PIF found - choosing %s" rc.API.pIF_device; - (ref, rc) - in - let gateway_bridge = Db.Network.get_bridge ~__context ~self:gateway_rc.API.pIF_network in - let dns_bridge = Db.Network.get_bridge ~__context ~self:dns_rc.API.pIF_network in - Some (gateway_pif, gateway_bridge), Some (dns_pif, dns_bridge) + let localhost = get_localhost ~__context in + let ip_pifs = Db.PIF.get_records_where ~__context + ~expr:(And (Eq (Field "host", Literal (Ref.string_of localhost)), + Not (Eq (Field "ip_configuration_mode", Literal "None")))) in + if ip_pifs = [] then + None, None + else + let gateway_pif, gateway_rc = + let oc = List.filter (fun (_, r) -> + List.mem_assoc "defaultroute" r.API.pIF_other_config && + List.assoc "defaultroute" r.API.pIF_other_config = "true" + ) ip_pifs in + match oc with + | (ref, rc) :: tl -> + if tl <> [] then + warn "multiple PIFs with other_config:defaultroute=true - choosing %s" rc.API.pIF_device; + (ref, rc) + | [] -> + match management_interface with + | Some pif -> let rc = Db.PIF.get_record ~__context ~self:pif in (pif, rc) + | None -> + let mgmt = List.filter (fun (_, r) -> r.API.pIF_management) ip_pifs in + match mgmt with + | (ref, rc) :: _ -> (ref, rc) + | [] -> + let (ref, rc) = List.hd ip_pifs in + warn "no gateway PIF found - choosing %s" rc.API.pIF_device; + (ref, rc) + in + let dns_pif, dns_rc = + let oc = List.filter (fun (_, r) -> + List.mem_assoc "peerdns" r.API.pIF_other_config && + List.assoc "peerdns" r.API.pIF_other_config = "true" + ) ip_pifs in + match oc with + | (ref, rc) :: tl -> + if tl <> [] then + warn "multiple PIFs with other_config:peerdns=true - choosing %s" rc.API.pIF_device; + (ref, rc) + | [] -> + match management_interface with + | Some pif -> let pif_rc = Db.PIF.get_record ~__context ~self:pif in (pif, pif_rc) + | None -> + let mgmt = List.filter (fun (_, r) -> r.API.pIF_management) ip_pifs in + match mgmt with + | (ref, rc) :: _ -> (ref, rc) + | [] -> + let (ref, rc) = List.hd ip_pifs in + warn "no DNS PIF found - choosing %s" rc.API.pIF_device; + (ref, rc) + in + let gateway_bridge = Db.Network.get_bridge ~__context ~self:gateway_rc.API.pIF_network in + let dns_bridge = Db.Network.get_bridge ~__context ~self:dns_rc.API.pIF_network in + Some (gateway_pif, gateway_bridge), Some (dns_pif, dns_bridge) let update_pif_address ~__context ~self = - let network = Db.PIF.get_network ~__context ~self in - let bridge = Db.Network.get_bridge ~__context ~self:network in - let dbg = Context.string_of_task __context in - try - begin - match Net.Interface.get_ipv4_addr dbg bridge with - | (addr, plen) :: _ -> - let ip = Unix.string_of_inet_addr addr in - let netmask = Network_interface.prefixlen_to_netmask plen in - if ip <> Db.PIF.get_IP ~__context ~self || netmask <> Db.PIF.get_netmask ~__context ~self then begin - debug "PIF %s bridge %s IP address changed: %s/%s" (Db.PIF.get_uuid ~__context ~self) bridge ip netmask; - Db.PIF.set_IP ~__context ~self ~value:ip; - Db.PIF.set_netmask ~__context ~self ~value:netmask - end - | _ -> () - end; - let ipv6_addr = Net.Interface.get_ipv6_addr dbg ~name:bridge in - let ipv6_addr' = List.map (fun (addr, plen) -> Printf.sprintf "%s/%d" (Unix.string_of_inet_addr addr) plen) ipv6_addr in - if ipv6_addr' <> Db.PIF.get_IPv6 ~__context ~self then begin - debug "PIF %s bridge %s IPv6 address changed: %s" (Db.PIF.get_uuid ~__context ~self) - bridge (String.concat "; " ipv6_addr'); - Db.PIF.set_IPv6 ~__context ~self ~value:ipv6_addr' - end - with _ -> - debug "Bridge %s is not up; not updating IP" bridge + let network = Db.PIF.get_network ~__context ~self in + let bridge = Db.Network.get_bridge ~__context ~self:network in + let dbg = Context.string_of_task __context in + try + begin + match Net.Interface.get_ipv4_addr dbg bridge with + | (addr, plen) :: _ -> + let ip = Unix.string_of_inet_addr addr in + let netmask = Network_interface.prefixlen_to_netmask plen in + if ip <> Db.PIF.get_IP ~__context ~self || netmask <> Db.PIF.get_netmask ~__context ~self then begin + debug "PIF %s bridge %s IP address changed: %s/%s" (Db.PIF.get_uuid ~__context ~self) bridge ip netmask; + Db.PIF.set_IP ~__context ~self ~value:ip; + Db.PIF.set_netmask ~__context ~self ~value:netmask + end + | _ -> () + end; + let ipv6_addr = Net.Interface.get_ipv6_addr dbg ~name:bridge in + let ipv6_addr' = List.map (fun (addr, plen) -> Printf.sprintf "%s/%d" (Unix.string_of_inet_addr addr) plen) ipv6_addr in + if ipv6_addr' <> Db.PIF.get_IPv6 ~__context ~self then begin + debug "PIF %s bridge %s IPv6 address changed: %s" (Db.PIF.get_uuid ~__context ~self) + bridge (String.concat "; " ipv6_addr'); + Db.PIF.set_IPv6 ~__context ~self ~value:ipv6_addr' + end + with _ -> + debug "Bridge %s is not up; not updating IP" bridge let set_gateway ~__context ~pif ~bridge = - let dbg = Context.string_of_task __context in - try - match Net.Interface.get_ipv4_gateway dbg bridge with - | Some addr -> Db.PIF.set_gateway ~__context ~self:pif ~value:(Unix.string_of_inet_addr addr) - | None -> () - with _ -> - warn "Unable to get the gateway of PIF %s (%s)" (Ref.string_of pif) bridge + let dbg = Context.string_of_task __context in + try + match Net.Interface.get_ipv4_gateway dbg bridge with + | Some addr -> Db.PIF.set_gateway ~__context ~self:pif ~value:(Unix.string_of_inet_addr addr) + | None -> () + with _ -> + warn "Unable to get the gateway of PIF %s (%s)" (Ref.string_of pif) bridge let set_DNS ~__context ~pif ~bridge = - let dbg = Context.string_of_task __context in - try - match Net.Interface.get_dns dbg bridge with - | (nameservers, _) when nameservers != [] -> - let dns = String.concat "," (List.map (Unix.string_of_inet_addr) nameservers) in - Db.PIF.set_DNS ~__context ~self:pif ~value:dns; - | _ -> () - with _ -> - warn "Unable to get the dns of PIF %s (%s)" (Ref.string_of pif) bridge + let dbg = Context.string_of_task __context in + try + match Net.Interface.get_dns dbg bridge with + | (nameservers, _) when nameservers != [] -> + let dns = String.concat "," (List.map (Unix.string_of_inet_addr) nameservers) in + Db.PIF.set_DNS ~__context ~self:pif ~value:dns; + | _ -> () + with _ -> + warn "Unable to get the dns of PIF %s (%s)" (Ref.string_of pif) bridge let update_pif_addresses ~__context = - debug "Updating IP addresses in DB for DHCP and autoconf PIFs"; - let host = get_localhost ~__context in - let pifs = Db.PIF.get_refs_where ~__context ~expr:( - And ( - Eq (Field "host", Literal (Ref.string_of host)), - Or ( - Or ( - (Eq (Field "ip_configuration_mode", Literal "DHCP")), - (Eq (Field "ipv6_configuration_mode", Literal "DHCP")) - ), - (Eq (Field "ipv6_configuration_mode", Literal "Autoconf")) - ) - ) - ) in - let gateway_if, dns_if = determine_gateway_and_dns_ifs ~__context () in - Opt.iter (fun (pif, bridge) -> set_gateway ~__context ~pif ~bridge) gateway_if; - Opt.iter (fun (pif, bridge) -> set_DNS ~__context ~pif ~bridge) dns_if; - List.iter (fun self -> update_pif_address ~__context ~self) pifs + debug "Updating IP addresses in DB for DHCP and autoconf PIFs"; + let host = get_localhost ~__context in + let pifs = Db.PIF.get_refs_where ~__context ~expr:( + And ( + Eq (Field "host", Literal (Ref.string_of host)), + Or ( + Or ( + (Eq (Field "ip_configuration_mode", Literal "DHCP")), + (Eq (Field "ipv6_configuration_mode", Literal "DHCP")) + ), + (Eq (Field "ipv6_configuration_mode", Literal "Autoconf")) + ) + ) + ) in + let gateway_if, dns_if = determine_gateway_and_dns_ifs ~__context () in + Opt.iter (fun (pif, bridge) -> set_gateway ~__context ~pif ~bridge) gateway_if; + Opt.iter (fun (pif, bridge) -> set_DNS ~__context ~pif ~bridge) dns_if; + List.iter (fun self -> update_pif_address ~__context ~self) pifs let make_rpc ~__context rpc : Rpc.response = - let subtask_of = Ref.string_of (Context.get_task_id __context) in - let open Xmlrpc_client in - let http = xmlrpc ~subtask_of ~version:"1.1" "/" in - let transport = - if Pool_role.is_master () - then Unix(Xapi_globs.unix_domain_socket) - else SSL(SSL.make ~use_stunnel_cache:true (), Pool_role.get_master_address(), !Xapi_globs.https_port) in - XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"xapi" ~transport ~http rpc + let subtask_of = Ref.string_of (Context.get_task_id __context) in + let open Xmlrpc_client in + let http = xmlrpc ~subtask_of ~version:"1.1" "/" in + let transport = + if Pool_role.is_master () + then Unix(Xapi_globs.unix_domain_socket) + else SSL(SSL.make ~use_stunnel_cache:true (), Pool_role.get_master_address(), !Xapi_globs.https_port) in + XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"xapi" ~transport ~http rpc (* This one uses rpc-light *) let make_remote_rpc remote_address xml = - let open Xmlrpc_client in - let transport = SSL(SSL.make (), remote_address, !Xapi_globs.https_port) in - let http = xmlrpc ~version:"1.0" "/" in - XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"remote_xapi" ~transport ~http xml + let open Xmlrpc_client in + let transport = SSL(SSL.make (), remote_address, !Xapi_globs.https_port) in + let http = xmlrpc ~version:"1.0" "/" in + XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"remote_xapi" ~transport ~http xml (* Helper type for an object which may or may not be in the local database. *) type 'a api_object = - | LocalObject of 'a Ref.t - | RemoteObject of ((Rpc.call -> Rpc.response) * API.ref_session * ('a Ref.t)) + | LocalObject of 'a Ref.t + | RemoteObject of ((Rpc.call -> Rpc.response) * API.ref_session * ('a Ref.t)) (** Log into pool master using the client code, call a function passing it the rpc function and session id, logout when finished. *) @@ -255,24 +255,24 @@ let call_api_functions ~__context f = let require_explicit_logout = ref false in let do_master_login () = let session = Client.Client.Session.slave_login rpc (get_localhost ~__context) !Xapi_globs.pool_secret in - require_explicit_logout := true; - session + require_explicit_logout := true; + session in let session_id = - try - if Pool_role.is_master() then - begin - let session_id = Context.get_session_id __context in + try + if Pool_role.is_master() then + begin + let session_id = Context.get_session_id __context in if Db.Session.get_pool ~__context ~self:session_id then session_id else do_master_login () - end + end else - let session_id = Context.get_session_id __context in - (* read any attr to test if session is still valid *) - ignore (Db.Session.get_pool ~__context ~self:session_id) ; - session_id - with _ -> + let session_id = Context.get_session_id __context in + (* read any attr to test if session is still valid *) + ignore (Db.Session.get_pool ~__context ~self:session_id) ; + session_id + with _ -> do_master_login () in (* let () = debug "login done" in *) @@ -282,40 +282,40 @@ let call_api_functions ~__context f = (* debug "remote client call finished; logging out"; *) if !require_explicit_logout then - try Client.Client.Session.logout rpc session_id - with e -> - debug "Helpers.call_api_functions failed to logout: %s (ignoring)" (Printexc.to_string e)) + try Client.Client.Session.logout rpc session_id + with e -> + debug "Helpers.call_api_functions failed to logout: %s (ignoring)" (Printexc.to_string e)) let call_emergency_mode_functions hostname f = - let open Xmlrpc_client in - let transport = SSL(SSL.make (), hostname, !Xapi_globs.https_port) in - let http = xmlrpc ~version:"1.0" "/" in - let rpc = XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"xapi" ~transport ~http in + let open Xmlrpc_client in + let transport = SSL(SSL.make (), hostname, !Xapi_globs.https_port) in + let http = xmlrpc ~version:"1.0" "/" in + let rpc = XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"xapi" ~transport ~http in let session_id = Client.Client.Session.slave_local_login rpc !Xapi_globs.pool_secret in finally (fun () -> f rpc session_id) (fun () -> Client.Client.Session.local_logout rpc session_id) let progress ~__context t = - for i = 0 to int_of_float (t *. 100.) do - let v = (float_of_int i /. 100.) /. t in + for i = 0 to int_of_float (t *. 100.) do + let v = (float_of_int i /. 100.) /. t in - TaskHelper.set_progress ~__context v; - Thread.delay 1. - done; - TaskHelper.set_progress ~__context 1. + TaskHelper.set_progress ~__context v; + Thread.delay 1. + done; + TaskHelper.set_progress ~__context 1. let get_user ~__context username = - let uuids = Db.User.get_all ~__context in - if List.length uuids = 0 then - failwith "Failed to find any users"; - List.hd uuids (* FIXME! it assumes that there is only one element in the list (root), username is not used*) + let uuids = Db.User.get_all ~__context in + if List.length uuids = 0 then + failwith "Failed to find any users"; + List.hd uuids (* FIXME! it assumes that there is only one element in the list (root), username is not used*) let is_domain_zero ~__context vm_ref = let host_ref = Db.VM.get_resident_on ~__context ~self:vm_ref in (Db.VM.get_is_control_domain ~__context ~self:vm_ref) - && (Db.is_valid_ref __context host_ref) - && (Db.Host.get_control_domain ~__context ~self:host_ref = vm_ref) + && (Db.is_valid_ref __context host_ref) + && (Db.Host.get_control_domain ~__context ~self:host_ref = vm_ref) exception No_domain_zero of string let domain_zero_ref_cache = ref None @@ -324,21 +324,21 @@ let get_domain_zero ~__context : API.ref_VM = Threadext.Mutex.execute domain_zero_ref_cache_mutex (fun () -> match !domain_zero_ref_cache with - Some r -> r - | None -> - (* Read the control domain uuid from the inventory file *) - let uuid = Xapi_inventory.lookup Xapi_inventory._control_domain_uuid in - try - let vm = Db.VM.get_by_uuid ~__context ~uuid in - if not (is_domain_zero ~__context vm) then begin - error "VM uuid %s is not domain zero but the uuid is in my inventory file" uuid; - raise (No_domain_zero uuid); - end; - domain_zero_ref_cache := Some vm; - vm - with _ -> - error "Failed to find domain zero (uuid = %s)" uuid; - raise (No_domain_zero uuid) + Some r -> r + | None -> + (* Read the control domain uuid from the inventory file *) + let uuid = Xapi_inventory.lookup Xapi_inventory._control_domain_uuid in + try + let vm = Db.VM.get_by_uuid ~__context ~uuid in + if not (is_domain_zero ~__context vm) then begin + error "VM uuid %s is not domain zero but the uuid is in my inventory file" uuid; + raise (No_domain_zero uuid); + end; + domain_zero_ref_cache := Some vm; + vm + with _ -> + error "Failed to find domain zero (uuid = %s)" uuid; + raise (No_domain_zero uuid) ) let update_domain_zero_name ~__context host hostname = @@ -347,12 +347,12 @@ let update_domain_zero_name ~__context host hostname = let dom0 = get_domain_zero ~__context in (* Double check host *) let dom0_host = Db.VM.get_resident_on ~__context ~self:dom0 in - if dom0_host <> host - then + if dom0_host <> host + then error "Unexpectedly incorrect dom0 record in update_domain_zero_name" else begin let current_name = Db.VM.get_name_label ~__context ~self:dom0 in - let is_default = + let is_default = try String.sub current_name 0 (String.length stem) = stem with _ -> false @@ -362,22 +362,22 @@ let update_domain_zero_name ~__context host hostname = end let get_size_with_suffix s = - let s, suffix = if String.length s > 0 then ( - let c = s.[String.length s - 1] in - if List.mem c [ 'G'; 'g'; 'M'; 'm'; 'K'; 'k'; 'B'; 'b' ] then ( - let suffix = match c with - | 'G' | 'g' -> 30 - | 'M' | 'm' -> 20 - | 'K' | 'k' -> 10 - | 'B' | 'b' -> 0 - | _ -> 10 in - String.sub s 0 (String.length s - 1), suffix - ) else - s, 10 + let s, suffix = if String.length s > 0 then ( + let c = s.[String.length s - 1] in + if List.mem c [ 'G'; 'g'; 'M'; 'm'; 'K'; 'k'; 'B'; 'b' ] then ( + let suffix = match c with + | 'G' | 'g' -> 30 + | 'M' | 'm' -> 20 + | 'K' | 'k' -> 10 + | 'B' | 'b' -> 0 + | _ -> 10 in + String.sub s 0 (String.length s - 1), suffix + ) else + s, 10 ) else - s, 10 in - Int64.shift_left (if String.contains s '.' then - (Int64.of_float (float_of_string s)) else Int64.of_string s) suffix + s, 10 in + Int64.shift_left (if String.contains s '.' then + (Int64.of_float (float_of_string s)) else Int64.of_string s) suffix (** An HVM boot has the following user-settable parameters: *) @@ -390,30 +390,30 @@ type direct_pv_boot_t = { kernel: string; kernel_args: string; ramdisk: string o (** An 'indirect' PV boot (one that defers to a bootloader) has the following options: *) type indirect_pv_boot_t = - { bootloader: string; (** bootloader to use (eg "pygrub") *) - extra_args: string; (** extra commandline arguments to pass bootloader for the kernel *) - legacy_args: string; (** "legacy" args to cope with Zurich/Geneva guests *) - pv_bootloader_args: string; (** misc arguments for the bootloader itself *) - vdis: API.ref_VDI list; (** list of bootable VDIs *) - } + { bootloader: string; (** bootloader to use (eg "pygrub") *) + extra_args: string; (** extra commandline arguments to pass bootloader for the kernel *) + legacy_args: string; (** "legacy" args to cope with Zurich/Geneva guests *) + pv_bootloader_args: string; (** misc arguments for the bootloader itself *) + vdis: API.ref_VDI list; (** list of bootable VDIs *) + } (** A type which represents the boot method a guest is configured to use *) type boot_method = - | HVM of hvm_boot_t - | DirectPV of direct_pv_boot_t - | IndirectPV of indirect_pv_boot_t + | HVM of hvm_boot_t + | DirectPV of direct_pv_boot_t + | IndirectPV of indirect_pv_boot_t let string_of_option opt = match opt with None -> "(none)" | Some s -> s let string_of_boot_method = function - | HVM _ -> "HVM" + | HVM _ -> "HVM" | DirectPV x -> - Printf.sprintf "Direct PV boot with kernel = %s; args = %s; ramdisk = %s" - x.kernel x.kernel_args (string_of_option x.ramdisk) + Printf.sprintf "Direct PV boot with kernel = %s; args = %s; ramdisk = %s" + x.kernel x.kernel_args (string_of_option x.ramdisk) | IndirectPV x -> - Printf.sprintf "Indirect PV boot via bootloader %s; extra_args = %s; legacy_args = %s; bootloader_args = %s; VDIs = [ %s ]" - x.bootloader x.extra_args x.legacy_args x.pv_bootloader_args - (String.concat "; " (List.map Ref.string_of x.vdis)) + Printf.sprintf "Indirect PV boot via bootloader %s; extra_args = %s; legacy_args = %s; bootloader_args = %s; VDIs = [ %s ]" + x.bootloader x.extra_args x.legacy_args x.pv_bootloader_args + (String.concat "; " (List.map Ref.string_of x.vdis)) (** Returns the current value of the pool configuration flag *) (** that indicates whether a rolling upgrade is in progress. *) @@ -421,23 +421,23 @@ let string_of_boot_method = function is not present in the database; that only happens on firstboot (when you're a master with no db and you're creating the db for the first time). In that context you cannot be in rolling upgrade mode *) let rolling_upgrade_in_progress ~__context = - try - let pool = get_pool ~__context in - List.mem_assoc Xapi_globs.rolling_upgrade_in_progress (Db.Pool.get_other_config ~__context ~self:pool) - with _ -> - false + try + let pool = get_pool ~__context in + List.mem_assoc Xapi_globs.rolling_upgrade_in_progress (Db.Pool.get_other_config ~__context ~self:pool) + with _ -> + false let parse_boot_record ~string:lbr = - match Xmlrpc_sexpr.sexpr_str_to_xmlrpc lbr with - | None -> API.Legacy.From.vM_t "ret_val" (Xml.parse_string lbr) - | Some xml -> API.Legacy.From.vM_t "ret_val" xml + match Xmlrpc_sexpr.sexpr_str_to_xmlrpc lbr with + | None -> API.Legacy.From.vM_t "ret_val" (Xml.parse_string lbr) + | Some xml -> API.Legacy.From.vM_t "ret_val" xml (** Fetch the configuration the VM was booted with *) let get_boot_record_of_record ~__context ~string:lbr ~uuid:current_vm_uuid = try parse_boot_record lbr with e -> - (* warn "Warning: exception '%s' parsing last booted record (%s) - returning current record instead" lbr (ExnHelper.string_of_exn e); *) + (* warn "Warning: exception '%s' parsing last booted record (%s) - returning current record instead" lbr (ExnHelper.string_of_exn e); *) Db.VM.get_record ~__context ~self:(Db.VM.get_by_uuid ~__context ~uuid:current_vm_uuid) let get_boot_record ~__context ~self = @@ -446,9 +446,9 @@ let get_boot_record ~__context ~self = (* CA-31903: we now use an unhealthy mix of fields from the boot_records and the live VM. In particular the VM is currently using dynamic_min and max from the live VM -- not the boot-time settings. *) { lbr with - API.vM_memory_target = 0L; - API.vM_memory_dynamic_min = r.Db_actions.vM_memory_dynamic_min; - API.vM_memory_dynamic_max = r.Db_actions.vM_memory_dynamic_max; + API.vM_memory_target = 0L; + API.vM_memory_dynamic_min = r.Db_actions.vM_memory_dynamic_min; + API.vM_memory_dynamic_max = r.Db_actions.vM_memory_dynamic_max; } @@ -458,59 +458,59 @@ let set_boot_record ~__context ~self newbootrec = let newbootrec = {newbootrec with API.vM_last_booted_record=""; API.vM_bios_strings=[]} in if rolling_upgrade_in_progress ~__context then begin - (* during a rolling upgrade, there might be slaves in the pool - who have not yet been upgraded to understand sexprs, so - let's still talk using the legacy xmlrpc format. - *) - let xml = Xml.to_string (API.Legacy.To.vM_t newbootrec) in - Db.VM.set_last_booted_record ~__context ~self ~value:xml - end + (* during a rolling upgrade, there might be slaves in the pool + who have not yet been upgraded to understand sexprs, so + let's still talk using the legacy xmlrpc format. + *) + let xml = Xml.to_string (API.Legacy.To.vM_t newbootrec) in + Db.VM.set_last_booted_record ~__context ~self ~value:xml + end else - begin - (* if it's not a rolling upgrade, then we know everyone - else in the pool will understand s-expressions. - *) - let sexpr = Xmlrpc_sexpr.xmlrpc_to_sexpr_str (API.Legacy.To.vM_t newbootrec) in - Db.VM.set_last_booted_record ~__context ~self ~value:sexpr - end; + begin + (* if it's not a rolling upgrade, then we know everyone + else in the pool will understand s-expressions. + *) + let sexpr = Xmlrpc_sexpr.xmlrpc_to_sexpr_str (API.Legacy.To.vM_t newbootrec) in + Db.VM.set_last_booted_record ~__context ~self ~value:sexpr + end; () (** Inspect the current configuration of a VM and return a boot_method type *) let boot_method_of_vm ~__context ~vm = - if vm.API.vM_HVM_boot_policy <> "" then begin - (* hvm_boot describes the HVM boot order. How? as a qemu-dm -boot param? *) - let timeoffset = try List.assoc "timeoffset" vm.API.vM_platform with _ -> "0" in - HVM { timeoffset = timeoffset } + if vm.API.vM_HVM_boot_policy <> "" then begin + (* hvm_boot describes the HVM boot order. How? as a qemu-dm -boot param? *) + let timeoffset = try List.assoc "timeoffset" vm.API.vM_platform with _ -> "0" in + HVM { timeoffset = timeoffset } + end else begin + (* PV *) + if vm.API.vM_PV_bootloader = "" then begin + let kern = vm.API.vM_PV_kernel + and args = vm.API.vM_PV_args + and ramdisk = if vm.API.vM_PV_ramdisk <> "" then (Some vm.API.vM_PV_ramdisk) else None in + DirectPV { kernel = kern; kernel_args = args; ramdisk = ramdisk } end else begin - (* PV *) - if vm.API.vM_PV_bootloader = "" then begin - let kern = vm.API.vM_PV_kernel - and args = vm.API.vM_PV_args - and ramdisk = if vm.API.vM_PV_ramdisk <> "" then (Some vm.API.vM_PV_ramdisk) else None in - DirectPV { kernel = kern; kernel_args = args; ramdisk = ramdisk } - end else begin - (* Extract the default kernel from the boot disk via bootloader *) - (* NB We allow multiple bootable VDIs, in which case the - bootloader gets to choose. Note that a VM may have no - bootable VDIs; this might happen for example if the - bootloader intends to PXE boot *) - let bootable = List.filter - (fun self -> Db.VBD.get_bootable ~__context ~self) - vm.API.vM_VBDs in - let non_empty = List.filter - (fun self -> not (Db.VBD.get_empty ~__context ~self)) - bootable in - let boot_vdis = - List.map - (fun self -> Db.VBD.get_VDI ~__context ~self) non_empty in - IndirectPV - { bootloader = vm.API.vM_PV_bootloader; - extra_args = vm.API.vM_PV_args; - legacy_args = vm.API.vM_PV_legacy_args; - pv_bootloader_args = vm.API.vM_PV_bootloader_args; - vdis = boot_vdis } - end + (* Extract the default kernel from the boot disk via bootloader *) + (* NB We allow multiple bootable VDIs, in which case the + bootloader gets to choose. Note that a VM may have no + bootable VDIs; this might happen for example if the + bootloader intends to PXE boot *) + let bootable = List.filter + (fun self -> Db.VBD.get_bootable ~__context ~self) + vm.API.vM_VBDs in + let non_empty = List.filter + (fun self -> not (Db.VBD.get_empty ~__context ~self)) + bootable in + let boot_vdis = + List.map + (fun self -> Db.VBD.get_VDI ~__context ~self) non_empty in + IndirectPV + { bootloader = vm.API.vM_PV_bootloader; + extra_args = vm.API.vM_PV_args; + legacy_args = vm.API.vM_PV_legacy_args; + pv_bootloader_args = vm.API.vM_PV_bootloader_args; + vdis = boot_vdis } end + end (** Returns true if the supplied VM configuration is HVM. NB that just because a VM's current configuration looks like HVM doesn't imply it @@ -532,22 +532,22 @@ let has_booted_hvm_of_record ~__context r = let is_running ~__context ~self = Db.VM.get_domid ~__context ~self <> -1L let devid_of_vif ~__context ~self = - int_of_string (Db.VIF.get_device ~__context ~self) + int_of_string (Db.VIF.get_device ~__context ~self) exception Device_has_no_VIF let vif_of_devid ~__context ~vm devid = - let vifs = Db.VM.get_VIFs ~__context ~self:vm in - let devs = List.map (fun self -> devid_of_vif ~__context ~self) vifs in - let table = List.combine devs vifs in - let has_vif = List.mem_assoc devid table in - if not(has_vif) - then raise Device_has_no_VIF - else List.assoc devid table + let vifs = Db.VM.get_VIFs ~__context ~self:vm in + let devs = List.map (fun self -> devid_of_vif ~__context ~self) vifs in + let table = List.combine devs vifs in + let has_vif = List.mem_assoc devid table in + if not(has_vif) + then raise Device_has_no_VIF + else List.assoc devid table (** Return the domid on the *local host* associated with a specific VM. - Note that if this is called without the VM lock then the result is undefined: the - domid might immediately change after the call returns. Caller beware! *) + Note that if this is called without the VM lock then the result is undefined: the + domid might immediately change after the call returns. Caller beware! *) let domid_of_vm ~__context ~self = let uuid = Uuid.uuid_of_string (Db.VM.get_uuid ~__context ~self) in let all = Xenctrl.with_intf (fun xc -> Xenctrl.domain_getinfolist xc 0) in @@ -578,7 +578,7 @@ let get_host_internal_management_network = get_special_network is_host_internal_ let get_my_pbds __context = let localhost = get_localhost __context in let localhost = Ref.string_of localhost in - Db.PBD.get_records_where ~__context ~expr:(Eq(Field "host", Literal localhost)) + Db.PBD.get_records_where ~__context ~expr:(Eq(Field "host", Literal localhost)) (* Return the PBD for specified SR on a specific host *) (* Just say an SR is shared if it has more than one PBD *) @@ -592,212 +592,212 @@ let get_main_ip_address ~__context = try Pool_role.get_master_address () with _ -> "127.0.0.1" let is_pool_master ~__context ~host = - let host_id = Db.Host.get_uuid ~__context ~self:host in - let master = get_master ~__context in - let master_id = Db.Host.get_uuid ~__context ~self:master in - host_id = master_id + let host_id = Db.Host.get_uuid ~__context ~self:host in + let master = get_master ~__context in + let master_id = Db.Host.get_uuid ~__context ~self:master in + host_id = master_id (* Host version compare helpers *) let compare_int_lists : int list -> int list -> int = - fun a b -> - let first_non_zero is = List.fold_left (fun a b -> if (a<>0) then a else b) 0 is in - first_non_zero (List.map2 compare a b) + fun a b -> + let first_non_zero is = List.fold_left (fun a b -> if (a<>0) then a else b) 0 is in + first_non_zero (List.map2 compare a b) let group_by f list = - let evaluated_list = List.map (fun x -> (x, f x)) list in - let snd_equality (_, x) (_, y) = x = y in - let snd_compare (_, x) (_, y) = compare x y in - let sorted = List.sort snd_compare evaluated_list in - let rec take_while p ac = function - | [] -> (ac, []) - | x :: xs -> - if (p x) then take_while p (x :: ac) xs - else (ac, x :: xs) - in - let rec group ac = function - | [] -> ac - | x :: xs -> - let peers, rest = take_while (snd_equality x) [] (x :: xs) in - group (peers :: ac) rest - in - group [] sorted + let evaluated_list = List.map (fun x -> (x, f x)) list in + let snd_equality (_, x) (_, y) = x = y in + let snd_compare (_, x) (_, y) = compare x y in + let sorted = List.sort snd_compare evaluated_list in + let rec take_while p ac = function + | [] -> (ac, []) + | x :: xs -> + if (p x) then take_while p (x :: ac) xs + else (ac, x :: xs) + in + let rec group ac = function + | [] -> ac + | x :: xs -> + let peers, rest = take_while (snd_equality x) [] (x :: xs) in + group (peers :: ac) rest + in + group [] sorted (** Groups list elements by equality of result of function application sorted * in order of that result *) let group_by ~ordering f list = - match ordering with - | `descending -> group_by f list - | `ascending -> List.rev (group_by f list) + match ordering with + | `descending -> group_by f list + | `ascending -> List.rev (group_by f list) (** Schwarzian transform sort *) let sort_by_schwarzian ?(descending=false) f list = - let comp x y = if descending then compare y x else compare x y in - let (|>) a f = f a in - List.map (fun x -> (x, f x)) list |> - List.sort (fun (_, x') (_, y') -> comp x' y') |> - List.map (fun (x, _) -> x) + let comp x y = if descending then compare y x else compare x y in + let (|>) a f = f a in + List.map (fun x -> (x, f x)) list |> + List.sort (fun (_, x') (_, y') -> comp x' y') |> + List.map (fun (x, _) -> x) let version_string_of : __context:Context.t -> [`host] api_object -> string = - fun ~__context host -> - try - let software_version = match host with - | LocalObject host_ref -> (Db.Host.get_software_version ~__context ~self:host_ref) - | RemoteObject (rpc, session_id, host_ref) -> - Client.Client.Host.get_software_version ~rpc ~session_id ~self:host_ref - in - List.assoc Xapi_globs._platform_version software_version - with Not_found -> - Xapi_globs.default_platform_version + fun ~__context host -> + try + let software_version = match host with + | LocalObject host_ref -> (Db.Host.get_software_version ~__context ~self:host_ref) + | RemoteObject (rpc, session_id, host_ref) -> + Client.Client.Host.get_software_version ~rpc ~session_id ~self:host_ref + in + List.assoc Xapi_globs._platform_version software_version + with Not_found -> + Xapi_globs.default_platform_version let version_of : __context:Context.t -> [`host] api_object -> int list = - fun ~__context host -> - let vs = version_string_of ~__context host - in List.map int_of_string (String.split '.' vs) + fun ~__context host -> + let vs = version_string_of ~__context host + in List.map int_of_string (String.split '.' vs) (* Compares host versions, analogous to Pervasives.compare. *) let compare_host_platform_versions : __context:Context.t -> [`host] api_object -> [`host] api_object -> int = - fun ~__context host_a host_b -> - let version_of = version_of ~__context in - compare_int_lists (version_of host_a) (version_of host_b) + fun ~__context host_a host_b -> + let version_of = version_of ~__context in + compare_int_lists (version_of host_a) (version_of host_b) let max_version_in_pool : __context:Context.t -> int list = - fun ~__context -> - let max_version a b = if a = [] then b else if (compare_int_lists a b) > 0 then a else b - and versions = List.map (fun host_ref -> version_of ~__context (LocalObject host_ref)) (Db.Host.get_all ~__context) in - List.fold_left max_version [] versions + fun ~__context -> + let max_version a b = if a = [] then b else if (compare_int_lists a b) > 0 then a else b + and versions = List.map (fun host_ref -> version_of ~__context (LocalObject host_ref)) (Db.Host.get_all ~__context) in + List.fold_left max_version [] versions let rec string_of_int_list : int list -> string = function - [] -> "" - | (x::xs) -> - if xs == [] - then string_of_int x - else string_of_int x ^ "." ^ string_of_int_list xs + [] -> "" + | (x::xs) -> + if xs == [] + then string_of_int x + else string_of_int x ^ "." ^ string_of_int_list xs let host_has_highest_version_in_pool : __context:Context.t -> host:[`host] api_object -> bool = - fun ~__context ~host -> - let host_version = version_of ~__context host - and max_version = max_version_in_pool ~__context in - (compare_int_lists host_version max_version) >= 0 + fun ~__context ~host -> + let host_version = version_of ~__context host + and max_version = max_version_in_pool ~__context in + (compare_int_lists host_version max_version) >= 0 let host_versions_not_decreasing ~__context ~host_from ~host_to = - compare_host_platform_versions ~__context host_from host_to <= 0 + compare_host_platform_versions ~__context host_from host_to <= 0 let is_platform_version_same_on_master ~__context ~host = - if is_pool_master ~__context ~host then true else - let master = get_master ~__context in - compare_host_platform_versions ~__context (LocalObject master) (LocalObject host) = 0 + if is_pool_master ~__context ~host then true else + let master = get_master ~__context in + compare_host_platform_versions ~__context (LocalObject master) (LocalObject host) = 0 let assert_platform_version_is_same_on_master ~__context ~host ~self = - if not (is_platform_version_same_on_master ~__context ~host) then - raise (Api_errors.Server_error (Api_errors.vm_host_incompatible_version, - [Ref.string_of host; Ref.string_of self])) + if not (is_platform_version_same_on_master ~__context ~host) then + raise (Api_errors.Server_error (Api_errors.vm_host_incompatible_version, + [Ref.string_of host; Ref.string_of self])) (** PR-1007 - block operations during rolling upgrade *) (* Assertion functions which raise an exception if certain invariants are broken during an upgrade. *) let assert_rolling_upgrade_not_in_progress : __context:Context.t -> unit = - fun ~__context -> - if rolling_upgrade_in_progress ~__context then - raise (Api_errors.Server_error (Api_errors.not_supported_during_upgrade, [])) + fun ~__context -> + if rolling_upgrade_in_progress ~__context then + raise (Api_errors.Server_error (Api_errors.not_supported_during_upgrade, [])) let assert_host_has_highest_version_in_pool : __context:Context.t -> host:API.ref_host -> unit = - fun ~__context ~host -> - if not (host_has_highest_version_in_pool ~__context ~host:(LocalObject host)) then - raise (Api_errors.Server_error (Api_errors.not_supported_during_upgrade, [])) + fun ~__context ~host -> + if not (host_has_highest_version_in_pool ~__context ~host:(LocalObject host)) then + raise (Api_errors.Server_error (Api_errors.not_supported_during_upgrade, [])) let assert_host_versions_not_decreasing : - __context:Context.t -> host_from:[`host] api_object -> host_to:[`host] api_object -> unit = - fun ~__context ~host_from ~host_to -> - if not (host_versions_not_decreasing ~__context ~host_from ~host_to) then - raise (Api_errors.Server_error (Api_errors.not_supported_during_upgrade, [])) + __context:Context.t -> host_from:[`host] api_object -> host_to:[`host] api_object -> unit = + fun ~__context ~host_from ~host_to -> + if not (host_versions_not_decreasing ~__context ~host_from ~host_to) then + raise (Api_errors.Server_error (Api_errors.not_supported_during_upgrade, [])) let pool_has_different_host_platform_versions ~__context = - let all_hosts = Db.Host.get_all ~__context in - let platform_versions = List.map (fun host -> version_string_of ~__context (LocalObject host)) all_hosts in - let is_different_to_me platform_version = platform_version <> Version.platform_version () in - List.fold_left (||) false (List.map is_different_to_me platform_versions) + let all_hosts = Db.Host.get_all ~__context in + let platform_versions = List.map (fun host -> version_string_of ~__context (LocalObject host)) all_hosts in + let is_different_to_me platform_version = platform_version <> Version.platform_version () in + List.fold_left (||) false (List.map is_different_to_me platform_versions) let get_vm_metrics ~__context ~self = - let metrics = Db.VM.get_metrics ~__context ~self in - if metrics = Ref.null - then failwith "Could not locate VM_metrics object for VM: internal error" - else metrics + let metrics = Db.VM.get_metrics ~__context ~self in + if metrics = Ref.null + then failwith "Could not locate VM_metrics object for VM: internal error" + else metrics let get_vbd_metrics ~__context ~self = - let metrics = Db.VBD.get_metrics ~__context ~self in - if metrics = Ref.null - then failwith "Could not locate VBD_metrics object for VBD: internal error" - else metrics + let metrics = Db.VBD.get_metrics ~__context ~self in + if metrics = Ref.null + then failwith "Could not locate VBD_metrics object for VBD: internal error" + else metrics let get_vif_metrics ~__context ~self = - let metrics = Db.VIF.get_metrics ~__context ~self in - if metrics = Ref.null - then failwith "Could not locate VIF_metrics object for VIF: internal error" - else metrics + let metrics = Db.VIF.get_metrics ~__context ~self in + if metrics = Ref.null + then failwith "Could not locate VIF_metrics object for VIF: internal error" + else metrics (* Read pool secret if it exists; otherwise, create a new one. *) let get_pool_secret () = - try - Unix.access !Xapi_globs.pool_secret_path [Unix.F_OK]; - pool_secret := Unixext.string_of_file !Xapi_globs.pool_secret_path - with _ -> (* No pool secret exists. *) - let mk_rand_string () = Uuid.to_string (Uuid.make_uuid()) in - pool_secret := (mk_rand_string()) ^ "/" ^ (mk_rand_string()) ^ "/" ^ (mk_rand_string()); - Unixext.write_string_to_file !Xapi_globs.pool_secret_path !pool_secret + try + Unix.access !Xapi_globs.pool_secret_path [Unix.F_OK]; + pool_secret := Unixext.string_of_file !Xapi_globs.pool_secret_path + with _ -> (* No pool secret exists. *) + let mk_rand_string () = Uuid.to_string (Uuid.make_uuid()) in + pool_secret := (mk_rand_string()) ^ "/" ^ (mk_rand_string()) ^ "/" ^ (mk_rand_string()); + Unixext.write_string_to_file !Xapi_globs.pool_secret_path !pool_secret (* Checks that a host has a PBD for a particular SR (meaning that the SR is visible to the host) *) let host_has_pbd_for_sr ~__context ~host ~sr = - try - let sr_pbds = Db.SR.get_PBDs ~__context ~self:sr in - let sr_host_pbd = List.filter - (fun pbd -> host = Db.PBD.get_host ~__context ~self:pbd) - sr_pbds - in sr_host_pbd <> [] (* empty list means no PBDs *) - with _ -> false + try + let sr_pbds = Db.SR.get_PBDs ~__context ~self:sr in + let sr_host_pbd = List.filter + (fun pbd -> host = Db.PBD.get_host ~__context ~self:pbd) + sr_pbds + in sr_host_pbd <> [] (* empty list means no PBDs *) + with _ -> false (* Checks if an SR exists, returning an SR ref option (None if it is missing) *) let check_sr_exists ~__context ~self = - try ignore(Db.SR.get_uuid ~__context ~self); Some self with _ -> None + try ignore(Db.SR.get_uuid ~__context ~self); Some self with _ -> None (* Checks that an SR exists, and is visible to a host *) let check_sr_exists_for_host ~__context ~self ~host = - if host_has_pbd_for_sr ~__context ~host ~sr:self - then Some self - else None + if host_has_pbd_for_sr ~__context ~host ~sr:self + then Some self + else None (* Returns an SR suitable for suspending this VM *) let choose_suspend_sr ~__context ~vm = - (* If the VM.suspend_SR exists, use that. If it fails, try the Pool.suspend_image_SR. *) - (* If that fails, try the Host.suspend_image_SR. *) - let vm_sr = Db.VM.get_suspend_SR ~__context ~self:vm in - let pool = get_pool ~__context in - let pool_sr = Db.Pool.get_suspend_image_SR ~__context ~self:pool in - let resident_on = Db.VM.get_resident_on ~__context ~self:vm in - let host_sr = Db.Host.get_suspend_image_sr ~__context ~self:resident_on in - - match - check_sr_exists_for_host ~__context ~self:vm_sr ~host:resident_on, - check_sr_exists_for_host ~__context ~self:pool_sr ~host:resident_on, - check_sr_exists_for_host ~__context ~self:host_sr ~host:resident_on - with - | Some x, _, _ -> x - | _, Some x, _ -> x - | _, _, Some x -> x - | None, None, None -> - raise (Api_errors.Server_error (Api_errors.vm_no_suspend_sr, [Ref.string_of vm])) + (* If the VM.suspend_SR exists, use that. If it fails, try the Pool.suspend_image_SR. *) + (* If that fails, try the Host.suspend_image_SR. *) + let vm_sr = Db.VM.get_suspend_SR ~__context ~self:vm in + let pool = get_pool ~__context in + let pool_sr = Db.Pool.get_suspend_image_SR ~__context ~self:pool in + let resident_on = Db.VM.get_resident_on ~__context ~self:vm in + let host_sr = Db.Host.get_suspend_image_sr ~__context ~self:resident_on in + + match + check_sr_exists_for_host ~__context ~self:vm_sr ~host:resident_on, + check_sr_exists_for_host ~__context ~self:pool_sr ~host:resident_on, + check_sr_exists_for_host ~__context ~self:host_sr ~host:resident_on + with + | Some x, _, _ -> x + | _, Some x, _ -> x + | _, _, Some x -> x + | None, None, None -> + raise (Api_errors.Server_error (Api_errors.vm_no_suspend_sr, [Ref.string_of vm])) (* Returns an SR suitable for receiving crashdumps of this VM *) let choose_crashdump_sr ~__context ~vm = - (* If the Pool.crashdump_SR exists, use that. Otherwise try the Host.crashdump_SR *) - let pool = get_pool ~__context in - let pool_sr = Db.Pool.get_crash_dump_SR ~__context ~self:pool in - let resident_on = Db.VM.get_resident_on ~__context ~self:vm in - let host_sr = Db.Host.get_crash_dump_sr ~__context ~self:resident_on in - match check_sr_exists ~__context ~self:pool_sr, check_sr_exists ~__context ~self:host_sr with - | Some x, _ -> x - | _, Some x -> x - | None, None -> - raise (Api_errors.Server_error (Api_errors.vm_no_crashdump_sr, [Ref.string_of vm])) + (* If the Pool.crashdump_SR exists, use that. Otherwise try the Host.crashdump_SR *) + let pool = get_pool ~__context in + let pool_sr = Db.Pool.get_crash_dump_SR ~__context ~self:pool in + let resident_on = Db.VM.get_resident_on ~__context ~self:vm in + let host_sr = Db.Host.get_crash_dump_sr ~__context ~self:resident_on in + match check_sr_exists ~__context ~self:pool_sr, check_sr_exists ~__context ~self:host_sr with + | Some x, _ -> x + | _, Some x -> x + | None, None -> + raise (Api_errors.Server_error (Api_errors.vm_no_crashdump_sr, [Ref.string_of vm])) (* return the operations filtered for cancels functions *) let cancel_tasks ~__context ~ops ~all_tasks_in_db (* all tasks in database *) ~task_ids (* all tasks to explicitly cancel *) ~set = @@ -808,7 +808,7 @@ let cancel_tasks ~__context ~ops ~all_tasks_in_db (* all tasks in database *) ~t database at all then we should cancel it. *) List.iter (fun s1 -> if into s1 taskids || not(List.mem (Ref.of_string (fst s1)) all_tasks_in_db) then c := true else su1 := s1 :: !su1) set1; !su1, !c - in + in let unique_ops, got_common = cancel_splitset_taskid ops task_ids in if got_common then set unique_ops @@ -820,36 +820,36 @@ let is_removable ~__context ~vbd = Db.VBD.get_type ~__context ~self:vbd = `CD (* IP address and CIDR checks *) let is_valid_ip kind address = - match Unixext.domain_of_addr address, kind with - | Some x, `ipv4 when x = Unix.PF_INET -> true - | Some x, `ipv6 when x = Unix.PF_INET6 -> true - | _ -> false + match Unixext.domain_of_addr address, kind with + | Some x, `ipv4 when x = Unix.PF_INET -> true + | Some x, `ipv6 when x = Unix.PF_INET6 -> true + | _ -> false let assert_is_valid_ip kind field address = - if not (is_valid_ip kind address) then - raise Api_errors.(Server_error (invalid_ip_address_specified, [field])) + if not (is_valid_ip kind address) then + raise Api_errors.(Server_error (invalid_ip_address_specified, [field])) let parse_cidr kind cidr = - try - let address, prefixlen = Scanf.sscanf cidr "%s@/%d" (fun a p -> a, p) in - if not (is_valid_ip kind address) then - (error "Invalid address in CIDR (%s)" address; None) - else if prefixlen < 0 || (kind = `ipv4 && prefixlen > 32) || (kind = `ipv6 && prefixlen > 128) then - (error "Invalid prefix length in CIDR (%d)" prefixlen; None) - else - Some (address, prefixlen) - with _ -> - (error "Invalid CIDR format (%s)" cidr; None) + try + let address, prefixlen = Scanf.sscanf cidr "%s@/%d" (fun a p -> a, p) in + if not (is_valid_ip kind address) then + (error "Invalid address in CIDR (%s)" address; None) + else if prefixlen < 0 || (kind = `ipv4 && prefixlen > 32) || (kind = `ipv6 && prefixlen > 128) then + (error "Invalid prefix length in CIDR (%d)" prefixlen; None) + else + Some (address, prefixlen) + with _ -> + (error "Invalid CIDR format (%s)" cidr; None) let assert_is_valid_cidr kind field cidr = - if parse_cidr kind cidr = None then - raise Api_errors.(Server_error (invalid_cidr_address_specified, [field])) + if parse_cidr kind cidr = None then + raise Api_errors.(Server_error (invalid_cidr_address_specified, [field])) (** Return true if the MAC is in the right format XX:XX:XX:XX:XX:XX *) let is_valid_MAC mac = - let l = String.split ':' mac in - let validchar c = (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') in - List.length l = 6 && (List.fold_left (fun acc s -> acc && String.length s = 2 && validchar s.[0] && validchar s.[1]) true l) + let l = String.split ':' mac in + let validchar c = (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') in + List.length l = 6 && (List.fold_left (fun acc s -> acc && String.length s = 2 && validchar s.[0] && validchar s.[1]) true l) (** Returns true if the supplied IP address looks like one of mine *) let this_is_my_address ~__context address = @@ -862,8 +862,8 @@ let this_is_my_address ~__context address = let get_live_hosts ~__context = let hosts = Db.Host.get_all ~__context in List.filter (fun self -> - let metrics = Db.Host.get_metrics ~__context ~self in - try Db.Host_metrics.get_live ~__context ~self:metrics with _ -> false) hosts + let metrics = Db.Host.get_metrics ~__context ~self in + try Db.Host_metrics.get_live ~__context ~self:metrics with _ -> false) hosts let gethostbyname_family host family = let throw_resolve_error() = failwith (Printf.sprintf "Couldn't resolve hostname: %s" host) in @@ -882,7 +882,7 @@ let gethostbyname host = gethostbyname_family host (if (pref == `IPv4) then Unix.PF_INET else Unix.PF_INET6) with _ -> (try - gethostbyname_family host (if (pref = `IPv4) then Unix.PF_INET6 else Unix.PF_INET) + gethostbyname_family host (if (pref = `IPv4) then Unix.PF_INET6 else Unix.PF_INET) with _ -> throw_resolve_error()) (** Indicate whether VM.clone should be allowed on suspended VMs *) @@ -891,17 +891,17 @@ let clone_suspended_vm_enabled ~__context = let pool = get_pool ~__context in let other_config = Db.Pool.get_other_config ~__context ~self:pool in List.mem_assoc Xapi_globs.pool_allow_clone_suspended_vm other_config - && (List.assoc Xapi_globs.pool_allow_clone_suspended_vm other_config = "true") + && (List.assoc Xapi_globs.pool_allow_clone_suspended_vm other_config = "true") with _ -> false (** Indicate whether run-script should be allowed on VM plugin guest-agent-operation *) let guest_agent_run_script_enabled ~__context = - try - let pool = get_pool ~__context in - let other_config = Db.Pool.get_other_config ~__context ~self:pool in - List.mem_assoc Xapi_globs.pool_allow_guest_agent_run_script other_config - && (List.assoc Xapi_globs.pool_allow_guest_agent_run_script other_config = "true") - with _ -> false + try + let pool = get_pool ~__context in + let other_config = Db.Pool.get_other_config ~__context ~self:pool in + List.mem_assoc Xapi_globs.pool_allow_guest_agent_run_script other_config + && (List.assoc Xapi_globs.pool_allow_guest_agent_run_script other_config = "true") + with _ -> false (* OEM Related helper functions *) let is_oem ~__context ~host = @@ -910,28 +910,28 @@ let is_oem ~__context ~host = let on_oem ~__context = let this_host = !Xapi_globs.localhost_ref in - is_oem ~__context ~host:this_host + is_oem ~__context ~host:this_host exception File_doesnt_exist of string let call_script ?(log_successful_output=true) ?env script args = try Unix.access script [ Unix.X_OK ]; - (* Use the same $PATH as xapi *) - let env = match env with - | None -> [| "PATH=" ^ (Sys.getenv "PATH") |] - | Some env -> env - in + (* Use the same $PATH as xapi *) + let env = match env with + | None -> [| "PATH=" ^ (Sys.getenv "PATH") |] + | Some env -> env + in let output, _ = Forkhelpers.execute_command_get_output ~env script args in if log_successful_output then debug "%s %s succeeded [ output = '%s' ]" script (String.concat " " args) output; output with | Unix.Unix_error _ as e -> - debug "Assuming script %s doesn't exist: caught %s" script (ExnHelper.string_of_exn e); - raise e + debug "Assuming script %s doesn't exist: caught %s" script (ExnHelper.string_of_exn e); + raise e | Forkhelpers.Spawn_internal_error(stderr, stdout, Unix.WEXITED n) as e-> - debug "%s %s exited with code %d [stdout = '%s'; stderr = '%s']" script (String.concat " " args) n stdout stderr; - raise e + debug "%s %s exited with code %d [stdout = '%s'; stderr = '%s']" script (String.concat " " args) n stdout stderr; + raise e (* Repeatedly bisect a range to find the maximum value for which the monotonic function returns true *) let rec bisect f lower upper = @@ -960,37 +960,37 @@ open Listext let local_storage_exists () = (try ignore(Unix.stat (Xapi_globs.xapi_blob_location)); true - with _ -> false) + with _ -> false) let touch_file fname = try if fname <> "" then match Unixext.spawnvp "touch" [| "touch"; fname |] with - Unix.WEXITED 0 -> () + Unix.WEXITED 0 -> () | _ -> warn "Unable to touch ready file '%s': touch exited abnormally" fname with | e -> (warn "Unable to touch ready file '%s': %s" fname (Printexc.to_string e)) let vm_to_string __context vm = - let str = Ref.string_of vm in + let str = Ref.string_of vm in - if not (Db.is_valid_ref __context vm) - then raise (Api_errors.Server_error(Api_errors.invalid_value ,[str])); - let t = Context.database_of __context in - let module DB = (val (Db_cache.get t) : Db_interface.DB_ACCESS) in - let fields = fst (DB.read_record t Db_names.vm str) in - let sexpr = SExpr.Node (List.map (fun (key,value) -> SExpr.Node [SExpr.String key; SExpr.String value]) fields) in - SExpr.string_of sexpr + if not (Db.is_valid_ref __context vm) + then raise (Api_errors.Server_error(Api_errors.invalid_value ,[str])); + let t = Context.database_of __context in + let module DB = (val (Db_cache.get t) : Db_interface.DB_ACCESS) in + let fields = fst (DB.read_record t Db_names.vm str) in + let sexpr = SExpr.Node (List.map (fun (key,value) -> SExpr.Node [SExpr.String key; SExpr.String value]) fields) in + SExpr.string_of sexpr let vm_string_to_assoc vm_string = - let assoc_of_node = function - | SExpr.Node [SExpr.String s; SExpr.String t] -> (s,t) - | _ -> raise (Api_errors.Server_error(Api_errors.invalid_value ,["Invalid vm_string"])) in + let assoc_of_node = function + | SExpr.Node [SExpr.String s; SExpr.String t] -> (s,t) + | _ -> raise (Api_errors.Server_error(Api_errors.invalid_value ,["Invalid vm_string"])) in - match SExpr_TS.of_string vm_string with - | SExpr.Node l -> List.map assoc_of_node l - | _ -> raise (Api_errors.Server_error(Api_errors.invalid_value ,["Invalid vm_string"])) + match SExpr_TS.of_string vm_string with + | SExpr.Node l -> List.map assoc_of_node l + | _ -> raise (Api_errors.Server_error(Api_errors.invalid_value ,["Invalid vm_string"])) let get_srmaster ~__context ~sr = let shared = Db.SR.get_shared ~__context ~self:sr in @@ -1001,60 +1001,60 @@ let get_srmaster ~__context ~sr = match List.length pbds with | 0 -> raise (Api_errors.Server_error - (Api_errors.sr_no_pbds, [])) + (Api_errors.sr_no_pbds, [])) | 1 -> Db.PBD.get_host ~__context ~self:(List.hd pbds) | n -> raise (Api_errors.Server_error - (Api_errors.sr_has_multiple_pbds, - List.map (fun pbd -> Db.PBD.get_uuid ~__context ~self:pbd) pbds)) + (Api_errors.sr_has_multiple_pbds, + List.map (fun pbd -> Db.PBD.get_uuid ~__context ~self:pbd) pbds)) end let i_am_srmaster ~__context ~sr = - get_srmaster ~__context ~sr = get_localhost ~__context + get_srmaster ~__context ~sr = get_localhost ~__context let get_all_plugged_srs ~__context = - let pbds = Db.PBD.get_all ~__context in - let pbds_plugged_in = List.filter (fun self -> Db.PBD.get_currently_attached ~__context ~self) pbds in - List.setify (List.map (fun self -> Db.PBD.get_SR ~__context ~self) pbds_plugged_in) + let pbds = Db.PBD.get_all ~__context in + let pbds_plugged_in = List.filter (fun self -> Db.PBD.get_currently_attached ~__context ~self) pbds in + List.setify (List.map (fun self -> Db.PBD.get_SR ~__context ~self) pbds_plugged_in) let find_health_check_task ~__context ~sr = - Db.Task.get_refs_where ~__context ~expr:(And ( - Eq (Field "name__label", Literal Xapi_globs.sr_health_check_task_label), - Eq (Field "name__description", Literal (Ref.string_of sr)) - )) + Db.Task.get_refs_where ~__context ~expr:(And ( + Eq (Field "name__label", Literal Xapi_globs.sr_health_check_task_label), + Eq (Field "name__description", Literal (Ref.string_of sr)) + )) (* Copy the snapshot metadata from [src_record] to the VM whose reference is [dst_ref]. *) (* If a lookup table is provided, then the field 'snapshot_of' is translated using this *) (* lookup table. *) let copy_snapshot_metadata rpc session_id ?lookup_table ~src_record ~dst_ref = - let f = match lookup_table with - | None -> (fun x -> x) - | Some t -> (fun x -> t x) - in - Client.Client.VM.update_snapshot_metadata ~rpc ~session_id ~vm:dst_ref - ~snapshot_of:(f src_record.API.vM_snapshot_of) - ~snapshot_time:src_record.API.vM_snapshot_time - ~transportable_snapshot_id:src_record.API.vM_transportable_snapshot_id + let f = match lookup_table with + | None -> (fun x -> x) + | Some t -> (fun x -> t x) + in + Client.Client.VM.update_snapshot_metadata ~rpc ~session_id ~vm:dst_ref + ~snapshot_of:(f src_record.API.vM_snapshot_of) + ~snapshot_time:src_record.API.vM_snapshot_time + ~transportable_snapshot_id:src_record.API.vM_transportable_snapshot_id let update_vswitch_controller ~__context ~host = - try call_api_functions ~__context (fun rpc session_id -> - let result = Client.Client.Host.call_plugin ~rpc ~session_id ~host ~plugin:"openvswitch-cfg-update" ~fn:"update" ~args:[] in - debug "openvswitch-cfg-update(on %s): %s" - (Db.Host.get_name_label ~__context ~self:host) - result) - with e -> - debug "Got '%s' while trying to update the vswitch configuration on host %s" - (Printexc.to_string e) - (Db.Host.get_name_label ~__context ~self:host) + try call_api_functions ~__context (fun rpc session_id -> + let result = Client.Client.Host.call_plugin ~rpc ~session_id ~host ~plugin:"openvswitch-cfg-update" ~fn:"update" ~args:[] in + debug "openvswitch-cfg-update(on %s): %s" + (Db.Host.get_name_label ~__context ~self:host) + result) + with e -> + debug "Got '%s' while trying to update the vswitch configuration on host %s" + (Printexc.to_string e) + (Db.Host.get_name_label ~__context ~self:host) let assert_vswitch_controller_not_active ~__context = - let pool = get_pool ~__context in - let controller = Db.Pool.get_vswitch_controller ~__context ~self:pool in - let dbg = Context.string_of_task __context in - let backend = Net.Bridge.get_kind dbg () in - if (controller <> "") && (backend = Network_interface.Openvswitch) then - raise (Api_errors.Server_error (Api_errors.operation_not_allowed, ["A vswitch controller is active"])) + let pool = get_pool ~__context in + let controller = Db.Pool.get_vswitch_controller ~__context ~self:pool in + let dbg = Context.string_of_task __context in + let backend = Net.Bridge.get_kind dbg () in + if (controller <> "") && (backend = Network_interface.Openvswitch) then + raise (Api_errors.Server_error (Api_errors.operation_not_allowed, ["A vswitch controller is active"])) (* Useful for making readable(ish) logs: *) let short_string_of_ref x = @@ -1062,11 +1062,11 @@ let short_string_of_ref x = String.sub x' (String.length "OpaqueRef:") 8 let force_loopback_vbd ~__context = - (* Workaround assumption in SMRT: if a global flag is set, force use - of loopback VBDs. *) - let pool = get_pool ~__context in - let other_config = Db.Pool.get_other_config ~__context ~self:pool in - List.mem_assoc "force_loopback_vbd" other_config + (* Workaround assumption in SMRT: if a global flag is set, force use + of loopback VBDs. *) + let pool = get_pool ~__context in + let other_config = Db.Pool.get_other_config ~__context ~self:pool in + List.mem_assoc "force_loopback_vbd" other_config (* We no longer care about the hash, but it's part of the API and we can't get rid of it. Put this here so clients don't need to know @@ -1091,19 +1091,19 @@ let compute_hash () = "" ... or a combination of the above. *) let timebox ~timeout ~otherwise f = - let fd_in, fd_out = Unix.pipe () in - let result = ref otherwise in - let _ = Thread.create - (fun () -> - (try - let r = f () in - result := fun () -> r - with e -> - result := fun () -> raise e); - Unix.close fd_out) () in - let _ = Thread.wait_timed_read fd_in timeout in - Unix.close fd_in; - !result () + let fd_in, fd_out = Unix.pipe () in + let result = ref otherwise in + let _ = Thread.create + (fun () -> + (try + let r = f () in + result := fun () -> r + with e -> + result := fun () -> raise e); + Unix.close fd_out) () in + let _ = Thread.wait_timed_read fd_in timeout in + Unix.close fd_in; + !result () (**************************************************************************************) (* The master uses a global mutex to mark database records before forwarding messages *) @@ -1118,21 +1118,21 @@ let with_global_lock x = Mutex.execute __internal_mutex x (** Call the function f having incremented the number of queueing threads counter. If we exceed a built-in threshold, throw TOO_MANY_PENDING_TASKS *) let queue_thread f = - with_global_lock - (fun () -> - if !__number_of_queueing_threads > max_number_of_queueing_threads - then raise (Api_errors.Server_error(Api_errors.too_many_pending_tasks, [])) - else incr __number_of_queueing_threads); - finally f (fun () -> with_global_lock (fun () -> decr __number_of_queueing_threads)) + with_global_lock + (fun () -> + if !__number_of_queueing_threads > max_number_of_queueing_threads + then raise (Api_errors.Server_error(Api_errors.too_many_pending_tasks, [])) + else incr __number_of_queueing_threads); + finally f (fun () -> with_global_lock (fun () -> decr __number_of_queueing_threads)) module type POLICY = sig - type t - val standard : t - (** Used by operations like VM.start which want to paper over transient glitches but want to fail - quickly if the objects are persistently locked (eg by a VDI.clone) *) - val fail_quickly : t - val fail_immediately: t - val wait : __context:Context.t -> t -> exn -> t + type t + val standard : t + (** Used by operations like VM.start which want to paper over transient glitches but want to fail + quickly if the objects are persistently locked (eg by a VDI.clone) *) + val fail_quickly : t + val fail_immediately: t + val wait : __context:Context.t -> t -> exn -> t end (* Mechanism for early wakeup of blocked threads. When a thread goes to sleep having got an @@ -1140,73 +1140,73 @@ end 'Thread.delay' and provide a mechanism for the other of the conflicting task to wake us up on the way out. *) module Early_wakeup = struct - let table : ((string*string), Delay.t) Hashtbl.t = Hashtbl.create 10 - let table_m = Mutex.create () - - let wait ((a, b) as key) time = - (* debug "Early_wakeup wait key = (%s, %s) time = %.2f" a b time; *) - let d = Delay.make () in - Mutex.execute table_m (fun () -> Hashtbl.add table key d); - finally - (fun () -> - let (_: bool) = Delay.wait d time in - () - )(fun () -> Mutex.execute table_m (fun () -> Hashtbl.remove table key)) - - let broadcast (a, b) = - (*debug "Early_wakeup broadcast key = (%s, %s)" a b;*) - Mutex.execute table_m - (fun () -> - Hashtbl.iter (fun (a, b) d -> (*debug "Signalling thread blocked on (%s, %s)" a b;*) Delay.signal d) table - ) - - let signal ((a, b) as key) = - (*debug "Early_wakeup signal key = (%s, %s)" a b;*) - Mutex.execute table_m - (fun () -> - if Hashtbl.mem table key then ((*debug "Signalling thread blocked on (%s,%s)" a b;*) Delay.signal (Hashtbl.find table key)) - ) + let table : ((string*string), Delay.t) Hashtbl.t = Hashtbl.create 10 + let table_m = Mutex.create () + + let wait ((a, b) as key) time = + (* debug "Early_wakeup wait key = (%s, %s) time = %.2f" a b time; *) + let d = Delay.make () in + Mutex.execute table_m (fun () -> Hashtbl.add table key d); + finally + (fun () -> + let (_: bool) = Delay.wait d time in + () + )(fun () -> Mutex.execute table_m (fun () -> Hashtbl.remove table key)) + + let broadcast (a, b) = + (*debug "Early_wakeup broadcast key = (%s, %s)" a b;*) + Mutex.execute table_m + (fun () -> + Hashtbl.iter (fun (a, b) d -> (*debug "Signalling thread blocked on (%s, %s)" a b;*) Delay.signal d) table + ) + + let signal ((a, b) as key) = + (*debug "Early_wakeup signal key = (%s, %s)" a b;*) + Mutex.execute table_m + (fun () -> + if Hashtbl.mem table key then ((*debug "Signalling thread blocked on (%s,%s)" a b;*) Delay.signal (Hashtbl.find table key)) + ) end module Repeat_with_uniform_backoff : POLICY = struct - type t = { - minimum_delay: float; (* seconds *) - maximum_delay: float; (* maximum backoff time *) - max_total_wait: float; (* max time to wait before failing *) - wait_so_far: float; (* time waited so far *) - } - let standard = { - minimum_delay = 1.0; - maximum_delay = 20.0; - max_total_wait = 3600.0 *. 2.0; (* 2 hours *) - wait_so_far = 0.0; - } - let fail_quickly = { - minimum_delay = 2.; - maximum_delay = 2.; - max_total_wait = 120.; - wait_so_far = 0. - } - let fail_immediately = { - minimum_delay = 0.; - maximum_delay = 3.; - max_total_wait = min_float; - wait_so_far = 0.; - } - let wait ~__context (state: t) (e: exn) = - if state.wait_so_far >= state.max_total_wait then raise e; - let this_timeout = state.minimum_delay +. (state.maximum_delay -. state.minimum_delay) *. (Random.float 1.0) in - - debug "Waiting for up to %f seconds before retrying..." this_timeout; - let start = Unix.gettimeofday () in - begin - match e with - | Api_errors.Server_error(code, [ cls; objref ]) when code = Api_errors.other_operation_in_progress -> - Early_wakeup.wait (cls, objref) this_timeout; - | _ -> - Thread.delay this_timeout; - end; - { state with wait_so_far = state.wait_so_far +. (Unix.gettimeofday () -. start) } + type t = { + minimum_delay: float; (* seconds *) + maximum_delay: float; (* maximum backoff time *) + max_total_wait: float; (* max time to wait before failing *) + wait_so_far: float; (* time waited so far *) + } + let standard = { + minimum_delay = 1.0; + maximum_delay = 20.0; + max_total_wait = 3600.0 *. 2.0; (* 2 hours *) + wait_so_far = 0.0; + } + let fail_quickly = { + minimum_delay = 2.; + maximum_delay = 2.; + max_total_wait = 120.; + wait_so_far = 0. + } + let fail_immediately = { + minimum_delay = 0.; + maximum_delay = 3.; + max_total_wait = min_float; + wait_so_far = 0.; + } + let wait ~__context (state: t) (e: exn) = + if state.wait_so_far >= state.max_total_wait then raise e; + let this_timeout = state.minimum_delay +. (state.maximum_delay -. state.minimum_delay) *. (Random.float 1.0) in + + debug "Waiting for up to %f seconds before retrying..." this_timeout; + let start = Unix.gettimeofday () in + begin + match e with + | Api_errors.Server_error(code, [ cls; objref ]) when code = Api_errors.other_operation_in_progress -> + Early_wakeup.wait (cls, objref) this_timeout; + | _ -> + Thread.delay this_timeout; + end; + { state with wait_so_far = state.wait_so_far +. (Unix.gettimeofday () -. start) } end (** Could replace this with something fancier which waits for objects to change at the @@ -1216,29 +1216,29 @@ module Policy = Repeat_with_uniform_backoff (** Attempts to retry a lock-acquiring function multiple times. If it catches another operation in progress error then it blocks before retrying. *) let retry ~__context ~doc ?(policy = Policy.standard) f = - (* This is a cancellable operation, so mark the allowed operations on the task *) - TaskHelper.set_cancellable ~__context; - - let rec loop state = - let result = ref None in - let state = ref state in - while !result = None do - try - if TaskHelper.is_cancelling ~__context then begin - error "%s locking failed: task has been cancelled" doc; - TaskHelper.cancel ~__context; - raise (Api_errors.Server_error(Api_errors.task_cancelled, [ Ref.string_of (Context.get_task_id __context) ])) - end; - result := Some (f ()) - with - | Api_errors.Server_error(code, objref :: _ ) as e when code = Api_errors.other_operation_in_progress -> - debug "%s locking failed: caught transient failure %s" doc (ExnHelper.string_of_exn e); - state := queue_thread (fun () -> Policy.wait ~__context !state e) - done; - match !result with - | Some x -> x - | None -> failwith "this should never happen" in - loop policy + (* This is a cancellable operation, so mark the allowed operations on the task *) + TaskHelper.set_cancellable ~__context; + + let rec loop state = + let result = ref None in + let state = ref state in + while !result = None do + try + if TaskHelper.is_cancelling ~__context then begin + error "%s locking failed: task has been cancelled" doc; + TaskHelper.cancel ~__context; + raise (Api_errors.Server_error(Api_errors.task_cancelled, [ Ref.string_of (Context.get_task_id __context) ])) + end; + result := Some (f ()) + with + | Api_errors.Server_error(code, objref :: _ ) as e when code = Api_errors.other_operation_in_progress -> + debug "%s locking failed: caught transient failure %s" doc (ExnHelper.string_of_exn e); + state := queue_thread (fun () -> Policy.wait ~__context !state e) + done; + match !result with + | Some x -> x + | None -> failwith "this should never happen" in + loop policy let retry_with_global_lock ~__context ~doc ?policy f = - retry ~__context ~doc ?policy (fun () -> with_global_lock f) + retry ~__context ~doc ?policy (fun () -> with_global_lock f) diff --git a/ocaml/xapi/http_test.ml b/ocaml/xapi/http_test.ml index a52c6c670b6..ae1b856837b 100644 --- a/ocaml/xapi/http_test.ml +++ b/ocaml/xapi/http_test.ml @@ -47,8 +47,8 @@ let _ = Thread.create (fun ()->Http_daemon.main http_spec) () let rec allocator() = let x = String.create 100000 in - Thread.delay 0.01; - allocator() + Thread.delay 0.01; + allocator() let rec minor() = Gc.minor(); @@ -65,7 +65,7 @@ let headers host path content_length = [ Printf.sprintf "Host: %s" host; "Content-Type: text/xml"; Printf.sprintf "Content-length: %d" content_length; -] +] let do_http_rpc host port path body f = @@ -87,14 +87,14 @@ let do_http_rpc host port path body f = let ic = Unix.in_channel_of_descr s in (try - while true do - let line = input_line ic in - (* NB input_line removes the final '\n'. - RFC1945 says to expect a '\r\n' (- '\n' = '\r') *) - match line with - | "" | "\r" -> raise Not_found - | _ -> () - done + while true do + let line = input_line ic in + (* NB input_line removes the final '\n'. + RFC1945 says to expect a '\r\n' (- '\n' = '\r') *) + match line with + | "" | "\r" -> raise Not_found + | _ -> () + done with Not_found -> () | End_of_file -> ()); let result = f ic in Printf.printf "."; diff --git a/ocaml/xapi/import.ml b/ocaml/xapi/import.ml index 00757191a63..afa598d7475 100644 --- a/ocaml/xapi/import.ml +++ b/ocaml/xapi/import.ml @@ -13,7 +13,7 @@ *) (** HTTP handler for importing a VM from a stream. * @group Import and Export - *) +*) module D=Debug.Make(struct let name="import" end) open D @@ -29,11 +29,11 @@ open Fun open Client type import_failure = -| Some_checksums_failed -| Cannot_handle_chunked -| Failed_to_find_object of string -| Attached_disks_not_found -| Unexpected_file of string (* expected *) * string (* actual *) + | Some_checksums_failed + | Cannot_handle_chunked + | Failed_to_find_object of string + | Attached_disks_not_found + | Unexpected_file of string (* expected *) * string (* actual *) exception IFailure of import_failure @@ -41,42 +41,42 @@ open Xapi_vm_memory_constraints open Vm_memory_constraints type metadata_options = { - (* If true, don't create any database objects. *) - dry_run: bool; - (* If true, treat the import as if it is preparation for a live migration. - * This has the following consequences: - * - We must perform extra checks on the VM object - do we have enough memory? Are the CPU flags compatible? Is there an HA plan for it? - * - If the migration is a dry run we don't need to check for VDIs, since VDI.mirror will have created them during a real migration. - * - If the migration is for real, we will expect the VM export code on the source host to have mapped the VDI locations onto their - * mirrored counterparts which are present on this host. *) - live: bool; - (* An optional src VDI -> destination VDI rewrite list *) - vdi_map: (string * string) list; + (* If true, don't create any database objects. *) + dry_run: bool; + (* If true, treat the import as if it is preparation for a live migration. + * This has the following consequences: + * - We must perform extra checks on the VM object - do we have enough memory? Are the CPU flags compatible? Is there an HA plan for it? + * - If the migration is a dry run we don't need to check for VDIs, since VDI.mirror will have created them during a real migration. + * - If the migration is for real, we will expect the VM export code on the source host to have mapped the VDI locations onto their + * mirrored counterparts which are present on this host. *) + live: bool; + (* An optional src VDI -> destination VDI rewrite list *) + vdi_map: (string * string) list; } type import_type = - (* Import the metadata of a VM whose disks already exist. *) - | Metadata_import of metadata_options - (* Import a VM and stream its disks into the specified SR. *) - | Full_import of API.ref_SR + (* Import the metadata of a VM whose disks already exist. *) + | Metadata_import of metadata_options + (* Import a VM and stream its disks into the specified SR. *) + | Full_import of API.ref_SR (** Allows the import to be customised *) type config = - { - (* Determines how to handle the import - see above. *) - import_type: import_type; - (* true if we want to restore as a perfect backup. Currently we preserve the - interface MAC addresses but we still regenerate UUIDs (because we lack the - internal APIs to keep them *) - full_restore: bool; - (* true if the user has provided '--force' *) - force: bool; - } + { + (* Determines how to handle the import - see above. *) + import_type: import_type; + (* true if we want to restore as a perfect backup. Currently we preserve the + interface MAC addresses but we still regenerate UUIDs (because we lack the + internal APIs to keep them *) + full_restore: bool; + (* true if the user has provided '--force' *) + force: bool; + } let is_live config = - match config.import_type with - | Metadata_import {live=live} -> live - | _ -> false + match config.import_type with + | Metadata_import {live=live} -> live + | _ -> false (** List of (datamodel classname * Reference in export * Reference in database) *) type table = (string * string * string) list @@ -84,522 +84,522 @@ type table = (string * string * string) list (** Track the table of external reference -> internal reference and a list of cleanup functions to delete all the objects we've created, in the event of error. *) type state = { - mutable table: table; - mutable created_vms: table; - mutable cleanup: (Context.t -> (Rpc.call -> Rpc.response) -> API.ref_session -> unit) list; - export: obj list; + mutable table: table; + mutable created_vms: table; + mutable cleanup: (Context.t -> (Rpc.call -> Rpc.response) -> API.ref_session -> unit) list; + export: obj list; } let initial_state export = { table = []; created_vms = []; cleanup = []; export = export } let log_reraise msg f x = - try f x - with e -> - Backtrace.is_important e; - error "Import failed: %s" msg; - raise e + try f x + with e -> + Backtrace.is_important e; + error "Import failed: %s" msg; + raise e let lookup x (table: table) = - let id = Ref.string_of x in - try - let (_,_,r) = List.find (fun (_,i,_) -> i=id) table in - Ref.of_string r - with Not_found as e -> - Backtrace.reraise e (IFailure (Failed_to_find_object id)) + let id = Ref.string_of x in + try + let (_,_,r) = List.find (fun (_,i,_) -> i=id) table in + Ref.of_string r + with Not_found as e -> + Backtrace.reraise e (IFailure (Failed_to_find_object id)) let exists x (table: table) = - let id = Ref.string_of x in - List.filter (fun (_,i,_) -> i=id) table <> [] + let id = Ref.string_of x in + List.filter (fun (_,i,_) -> i=id) table <> [] (* Using a reference string from the original export, find the XMLRPC snapshot of the appropriate object. *) let find_in_export x export = - try - let obj = List.find (fun obj -> obj.id = x) export in - obj.snapshot - with Not_found as e-> - Backtrace.reraise e (IFailure (Failed_to_find_object x)) + try + let obj = List.find (fun obj -> obj.id = x) export in + obj.snapshot + with Not_found as e-> + Backtrace.reraise e (IFailure (Failed_to_find_object x)) let choose_one = function - | x :: [] -> Some x - | x :: _ -> Some x - | [] -> None + | x :: [] -> Some x + | x :: _ -> Some x + | [] -> None (* Return the list of non-CDROM VDIs ie those which will be streamed-in *) let non_cdrom_vdis (x: header) = - let all_vbds = List.filter (fun x -> x.cls = Datamodel._vbd) x.objects in - let all_vbds = List.map (fun x -> API.Legacy.From.vBD_t "" x.snapshot) all_vbds in - let all_disk_vbds = List.filter (fun x -> x.API.vBD_type <> `CD) all_vbds in - let all_disk_vdis = List.map (fun x -> Ref.string_of x.API.vBD_VDI) all_disk_vbds in + let all_vbds = List.filter (fun x -> x.cls = Datamodel._vbd) x.objects in + let all_vbds = List.map (fun x -> API.Legacy.From.vBD_t "" x.snapshot) all_vbds in + let all_disk_vbds = List.filter (fun x -> x.API.vBD_type <> `CD) all_vbds in + let all_disk_vdis = List.map (fun x -> Ref.string_of x.API.vBD_VDI) all_disk_vbds in (* Remove all those whose SR has content-type = "iso" *) - let all_disk_vdis = List.filter (fun vdi -> - let vdir = API.Legacy.From.vDI_t "" (find_in_export vdi x.objects) in - let sr = API.Legacy.From.sR_t "" (find_in_export (Ref.string_of vdir.API.vDI_SR) x.objects) in - sr.API.sR_content_type <> "iso") all_disk_vdis in + let all_disk_vdis = List.filter (fun vdi -> + let vdir = API.Legacy.From.vDI_t "" (find_in_export vdi x.objects) in + let sr = API.Legacy.From.sR_t "" (find_in_export (Ref.string_of vdir.API.vDI_SR) x.objects) in + sr.API.sR_content_type <> "iso") all_disk_vdis in - let all_vdis = List.filter (fun x -> x.cls = Datamodel._vdi) x.objects in - List.filter (fun x -> false - || (List.mem x.id all_disk_vdis) - || (API.Legacy.From.vDI_t "" x.snapshot).API.vDI_type = `suspend) all_vdis + let all_vdis = List.filter (fun x -> x.cls = Datamodel._vdi) x.objects in + List.filter (fun x -> false + || (List.mem x.id all_disk_vdis) + || (API.Legacy.From.vDI_t "" x.snapshot).API.vDI_type = `suspend) all_vdis (* Check to see if another VM exists with the same MAC seed. *) (* Check VM uuids don't already exist. Check that if a VDI exists then it is a CDROM. *) let assert_can_restore_backup ~__context rpc session_id (x: header) = - let get_mac_seed vm = - if List.mem_assoc Xapi_globs.mac_seed vm.API.vM_other_config - then Some(List.assoc Xapi_globs.mac_seed vm.API.vM_other_config, vm) - else None in - - let get_vm_uuid_of_snap s = - let snapshot_of = Ref.string_of s.API.vM_snapshot_of in - try - if Xstringext.String.startswith "Ref:" snapshot_of then - (* This should be a snapshot in the archive *) - let v = Listext.List.find (fun v -> v.cls = Datamodel._vm && v.id = snapshot_of) x.objects in - let v = API.Legacy.From.vM_t "" v.snapshot in - Some v.API.vM_uuid - else if Xstringext.String.startswith Ref.ref_prefix snapshot_of then - (* This should be a snapshot in a live system *) - if Db.is_valid_ref __context s.API.vM_snapshot_of then - Some (Db.VM.get_uuid ~__context ~self:s.API.vM_snapshot_of) - else - Some (List.assoc Db_names.uuid (Helpers.vm_string_to_assoc s.API.vM_snapshot_metadata)) - else None - with _ -> None in - - (* This function should be called when a VM/snapshot to import has the same - mac seed as an existing VM. They are considered compatible only in the - following cases: - - - Both are VMs, and having the same uuid - - Both are snapshots, and the VMs they were derived from are the same one - - One is snapshot, one is VM, and the snapshot was derived from the VM - *) - let is_compatible v1 v2 = - match v1.API.vM_is_a_snapshot, v2.API.vM_is_a_snapshot with - | false, false -> - v1.API.vM_uuid = v2.API.vM_uuid - | true, true -> - let v1' = get_vm_uuid_of_snap v1 in - let v2' = get_vm_uuid_of_snap v2 in - v1' <> None && v2' <> None && v1' = v2' - | true, false -> - let v1' = get_vm_uuid_of_snap v1 in - v1' = Some v2.API.vM_uuid - | false, true -> - let v2' = get_vm_uuid_of_snap v2 in - v2' = Some v1.API.vM_uuid in - - let import_vms = - Listext.List.filter_map - (fun x -> - if x.cls <> Datamodel._vm then None else - let x = API.Legacy.From.vM_t "" x.snapshot in - get_mac_seed x - ) x.objects in - - let existing_vms = - Listext.List.filter_map - (fun (_, v) -> get_mac_seed v) - (Client.VM.get_all_records rpc session_id) in - - List.iter - (fun (mac, vm) -> - List.iter - (fun (mac', vm') -> - if mac = mac' && not (is_compatible vm vm') then - raise (Api_errors.Server_error(Api_errors.duplicate_vm, [ vm'.API.vM_uuid ]))) - existing_vms) - import_vms + let get_mac_seed vm = + if List.mem_assoc Xapi_globs.mac_seed vm.API.vM_other_config + then Some(List.assoc Xapi_globs.mac_seed vm.API.vM_other_config, vm) + else None in + + let get_vm_uuid_of_snap s = + let snapshot_of = Ref.string_of s.API.vM_snapshot_of in + try + if Xstringext.String.startswith "Ref:" snapshot_of then + (* This should be a snapshot in the archive *) + let v = Listext.List.find (fun v -> v.cls = Datamodel._vm && v.id = snapshot_of) x.objects in + let v = API.Legacy.From.vM_t "" v.snapshot in + Some v.API.vM_uuid + else if Xstringext.String.startswith Ref.ref_prefix snapshot_of then + (* This should be a snapshot in a live system *) + if Db.is_valid_ref __context s.API.vM_snapshot_of then + Some (Db.VM.get_uuid ~__context ~self:s.API.vM_snapshot_of) + else + Some (List.assoc Db_names.uuid (Helpers.vm_string_to_assoc s.API.vM_snapshot_metadata)) + else None + with _ -> None in + + (* This function should be called when a VM/snapshot to import has the same + mac seed as an existing VM. They are considered compatible only in the + following cases: + + - Both are VMs, and having the same uuid + - Both are snapshots, and the VMs they were derived from are the same one + - One is snapshot, one is VM, and the snapshot was derived from the VM + *) + let is_compatible v1 v2 = + match v1.API.vM_is_a_snapshot, v2.API.vM_is_a_snapshot with + | false, false -> + v1.API.vM_uuid = v2.API.vM_uuid + | true, true -> + let v1' = get_vm_uuid_of_snap v1 in + let v2' = get_vm_uuid_of_snap v2 in + v1' <> None && v2' <> None && v1' = v2' + | true, false -> + let v1' = get_vm_uuid_of_snap v1 in + v1' = Some v2.API.vM_uuid + | false, true -> + let v2' = get_vm_uuid_of_snap v2 in + v2' = Some v1.API.vM_uuid in + + let import_vms = + Listext.List.filter_map + (fun x -> + if x.cls <> Datamodel._vm then None else + let x = API.Legacy.From.vM_t "" x.snapshot in + get_mac_seed x + ) x.objects in + + let existing_vms = + Listext.List.filter_map + (fun (_, v) -> get_mac_seed v) + (Client.VM.get_all_records rpc session_id) in + + List.iter + (fun (mac, vm) -> + List.iter + (fun (mac', vm') -> + if mac = mac' && not (is_compatible vm vm') then + raise (Api_errors.Server_error(Api_errors.duplicate_vm, [ vm'.API.vM_uuid ]))) + existing_vms) + import_vms let assert_can_live_import __context rpc session_id vm_record = - let assert_memory_available () = - let host = Helpers.get_localhost ~__context in - let host_mem_available = - Memory_check.host_compute_free_memory_with_maximum_compression - ~__context ~host None in - let main, shadow = - Memory_check.vm_compute_start_memory ~__context vm_record in - let mem_reqd_for_vm = Int64.add main shadow in - if host_mem_available < mem_reqd_for_vm then - raise (Api_errors.Server_error ( - Api_errors.host_not_enough_free_memory, - [ - Int64.to_string mem_reqd_for_vm; - Int64.to_string host_mem_available; - ])) - in - if vm_record.API.vM_power_state = `Running || vm_record.API.vM_power_state = `Paused - then assert_memory_available () + let assert_memory_available () = + let host = Helpers.get_localhost ~__context in + let host_mem_available = + Memory_check.host_compute_free_memory_with_maximum_compression + ~__context ~host None in + let main, shadow = + Memory_check.vm_compute_start_memory ~__context vm_record in + let mem_reqd_for_vm = Int64.add main shadow in + if host_mem_available < mem_reqd_for_vm then + raise (Api_errors.Server_error ( + Api_errors.host_not_enough_free_memory, + [ + Int64.to_string mem_reqd_for_vm; + Int64.to_string host_mem_available; + ])) + in + if vm_record.API.vM_power_state = `Running || vm_record.API.vM_power_state = `Paused + then assert_memory_available () (* The signature for a set of functions which we must provide to be able to import an object type. *) module type HandlerTools = sig - (* A type which represents how we should deal with the import of an object. *) - type precheck_t - (* Compare the state of the database with the metadata to be imported. *) - (* Returns a result which signals what we should do to import the metadata. *) - val precheck: Context.t -> config -> (Rpc.call -> Rpc.response) -> API.ref_session -> state -> obj -> precheck_t - (* Handle the result of the precheck function, but don't create any database objects. *) - (* Add objects to the state table if necessary, to keep track of what would have been imported.*) - val handle_dry_run: Context.t -> config -> (Rpc.call -> Rpc.response) -> API.ref_session -> state -> obj -> precheck_t -> unit - (* Handle the result of the check function, creating database objects if necessary. *) - (* For certain combinations of result and object type, this can be aliased to handle_dry_run. *) - val handle: Context.t -> config -> (Rpc.call -> Rpc.response) -> API.ref_session -> state -> obj -> precheck_t -> unit + (* A type which represents how we should deal with the import of an object. *) + type precheck_t + (* Compare the state of the database with the metadata to be imported. *) + (* Returns a result which signals what we should do to import the metadata. *) + val precheck: Context.t -> config -> (Rpc.call -> Rpc.response) -> API.ref_session -> state -> obj -> precheck_t + (* Handle the result of the precheck function, but don't create any database objects. *) + (* Add objects to the state table if necessary, to keep track of what would have been imported.*) + val handle_dry_run: Context.t -> config -> (Rpc.call -> Rpc.response) -> API.ref_session -> state -> obj -> precheck_t -> unit + (* Handle the result of the check function, creating database objects if necessary. *) + (* For certain combinations of result and object type, this can be aliased to handle_dry_run. *) + val handle: Context.t -> config -> (Rpc.call -> Rpc.response) -> API.ref_session -> state -> obj -> precheck_t -> unit end (* Make a handler for a set of handler functions. *) module MakeHandler = functor (M: HandlerTools) -> struct - let handle __context config rpc session_id state obj = - let dry_run = match config.import_type with - | Metadata_import {dry_run = true; _} -> true - | _ -> false - in - let precheck_result = M.precheck __context config rpc session_id state obj in - if dry_run then - M.handle_dry_run __context config rpc session_id state obj precheck_result - else - M.handle __context config rpc session_id state obj precheck_result + let handle __context config rpc session_id state obj = + let dry_run = match config.import_type with + | Metadata_import {dry_run = true; _} -> true + | _ -> false + in + let precheck_result = M.precheck __context config rpc session_id state obj in + if dry_run then + M.handle_dry_run __context config rpc session_id state obj precheck_result + else + M.handle __context config rpc session_id state obj precheck_result end module Host : HandlerTools = struct - type precheck_t = - | Found_host of API.ref_host - | Found_no_host - - let precheck __context config rpc session_id state x = - let host_record = API.Legacy.From.host_t "" x.snapshot in - try Found_host (Db.Host.get_by_uuid __context host_record.API.host_uuid) - with _ -> Found_no_host - - let handle_dry_run __context config rpc session_id state x precheck_result = - let host = match precheck_result with - | Found_host host' -> host' - | Found_no_host -> Ref.null - in - state.table <- (x.cls, x.id, Ref.string_of host) :: state.table - - let handle = handle_dry_run + type precheck_t = + | Found_host of API.ref_host + | Found_no_host + + let precheck __context config rpc session_id state x = + let host_record = API.Legacy.From.host_t "" x.snapshot in + try Found_host (Db.Host.get_by_uuid __context host_record.API.host_uuid) + with _ -> Found_no_host + + let handle_dry_run __context config rpc session_id state x precheck_result = + let host = match precheck_result with + | Found_host host' -> host' + | Found_no_host -> Ref.null + in + state.table <- (x.cls, x.id, Ref.string_of host) :: state.table + + let handle = handle_dry_run end module VM : HandlerTools = struct - type precheck_t = - | Default_template of API.ref_VM - | Replace of API.ref_VM * API.vM_t - | Fail of exn - | Skip - | Clean_import of API.vM_t - - let precheck __context config rpc session_id state x = - let vm_record = API.Legacy.From.vM_t "" x.snapshot in - if vm_record.API.vM_is_a_template - && (List.mem_assoc Xapi_globs.default_template_key vm_record.API.vM_other_config) - && ((List.assoc Xapi_globs.default_template_key vm_record.API.vM_other_config) = "true") - then begin - (* If the VM is a default template, then pick up the one with the same name. *) - let template = - try List.hd (Db.VM.get_by_name_label __context vm_record.API.vM_name_label) - with _ -> Ref.null - in - Default_template template - end else begin - let import_action = - (* Check for an existing VM with the same UUID - if one exists, what we do next *) - (* will depend on the state of the VM and whether the import is forced. *) - let get_vm_by_uuid () = Db.VM.get_by_uuid __context vm_record.API.vM_uuid in - let vm_uuid_exists () = try ignore (get_vm_by_uuid ()); true with _ -> false in - (* If full_restore is true then we want to keep the VM uuid - this may involve replacing an existing VM. *) - if config.full_restore && vm_uuid_exists () then begin - let vm = get_vm_by_uuid () in - (* The existing VM cannot be replaced if it is running. *) - (* If import is forced then skip the VM, else throw an error. *) - let power_state = Db.VM.get_power_state ~__context ~self:vm in - if power_state <> `Halted then begin - if config.force then - (debug "Forced import skipping VM %s as VM to replace was not halted." vm_record.API.vM_uuid; Skip) - else Fail (Api_errors.Server_error(Api_errors.vm_bad_power_state, - [ - Ref.string_of vm; - Record_util.power_state_to_string `Halted; - Record_util.power_state_to_string power_state - ])) - end else begin - (* The existing VM should not be replaced if the version to be imported is no newer, *) - (* unless the import is forced. *) - let existing_version = Db.VM.get_version ~__context ~self:vm in - let version_to_import = vm_record.API.vM_version in - if (existing_version >= version_to_import) && (config.force = false) then - Fail (Api_errors.Server_error(Api_errors.vm_to_import_is_not_newer_version, - [ - Ref.string_of vm; - Int64.to_string existing_version; - Int64.to_string version_to_import; - ])) - else - Replace (vm, vm_record) - end - end else - Clean_import vm_record - in - match import_action with - | Replace (_, vm_record) | Clean_import vm_record -> - if is_live config - then assert_can_live_import __context rpc session_id vm_record; - import_action - | _ -> import_action - end - - let handle_dry_run __context config rpc session_id state x precheck_result = - match precheck_result with - | Skip -> () - | Fail e -> raise e - | Default_template template -> - state.table <- (x.cls, x.id, Ref.string_of template) :: state.table; - state.created_vms <- (x.cls, x.id, Ref.string_of template) :: state.created_vms - | Clean_import _ | Replace _ -> - let dummy_vm = Ref.make () in - state.table <- (x.cls, x.id, Ref.string_of dummy_vm) :: state.table - - let handle __context config rpc session_id state x precheck_result = - (* This function assumes we've already checked for and dealt with any existing VM with the same UUID. *) - let do_import vm_record = - let task_id = Ref.string_of (Context.get_task_id __context) in - (* Remove the grant guest API access key unconditionally (it's only for our RHEL4 templates atm) *) - let other_config = List.filter - (fun (key, _) -> key <> Xapi_globs.grant_api_access) vm_record.API.vM_other_config in - (* If not performing a full restore then generate a fresh MAC seed *) - let other_config = - if config.full_restore - then other_config - else - (Xapi_globs.mac_seed, Uuid.string_of_uuid (Uuid.make_uuid ())) :: - (List.filter (fun (x, _) -> x <> Xapi_globs.mac_seed) other_config) in - let vm_record = { vm_record with API.vM_other_config = other_config } in - - (* Preserve genid for cross-pool migrates, because to the guest the - * disk looks like it hasn't changed. - * Preserve genid for templates, since they're not going to be started. - * Generate a fresh genid for normal VM imports. *) - let vm_record = - if (is_live config) || vm_record.API.vM_is_a_template - then vm_record - else { - vm_record with API.vM_generation_id = Xapi_vm_helpers.fresh_genid - ~current_genid:vm_record.API.vM_generation_id () - } - in - - let vm_record = - if vm_exported_pre_dmc x - then begin - let safe_constraints = Vm_memory_constraints.reset_to_safe_defaults - ~constraints:(Vm_memory_constraints.extract ~vm_record) in - debug "VM %s was exported pre-DMC; dynamic_{min,max},target <- %Ld" - vm_record.API.vM_name_label safe_constraints.static_max; - {vm_record with API. - vM_memory_static_min = safe_constraints.static_min; - vM_memory_dynamic_min = safe_constraints.dynamic_min; - vM_memory_target = safe_constraints.target; - vM_memory_dynamic_max = safe_constraints.dynamic_max; - vM_memory_static_max = safe_constraints.static_max; - } - end else vm_record - in - let vm_record = - if vm_has_field ~x ~name:"has_vendor_device" then vm_record else ( - {vm_record with API.vM_has_vendor_device = false;} - ) in - let vm_record = {vm_record with API. - vM_memory_overhead = Memory_check.vm_compute_memory_overhead vm_record - } in - let vm_record = {vm_record with API.vM_protection_policy = Ref.null} in - (* Full restore preserves UUIDs, so if we are replacing an existing VM the version number should be incremented *) - (* to keep track of how many times this VM has been restored. If not a full restore, then we don't need to keep track. *) - let vm_record = - if config.full_restore then - {vm_record with API.vM_version = Int64.add vm_record.API.vM_version 1L} - else - {vm_record with API.vM_version = 0L} - in - (* Clear the appliance field - in the case of DR we will reconstruct the appliance separately. *) - let vm_record = {vm_record with API.vM_appliance = Ref.null} in - (* Correct ha-restart-priority for pre boston imports*) - let vm_record = match vm_record.API.vM_ha_restart_priority with - "0"|"1"|"2"|"3" as order -> { vm_record with API.vM_ha_restart_priority = "restart"; API.vM_order = Int64.of_string (order) } - | _ -> vm_record; - in - - let vm = log_reraise - ("failed to create VM with name-label " ^ vm_record.API.vM_name_label) - (fun value -> - let vm = Xapi_vm_helpers.create_from_record_without_checking_licence_feature_for_vendor_device - ~__context rpc session_id value - in - if config.full_restore then Db.VM.set_uuid ~__context ~self:vm ~value:value.API.vM_uuid; - vm) - vm_record in - state.cleanup <- (fun __context rpc session_id -> - (* Need to get rid of the import task or we cannot destroy the VM *) - Helpers.log_exn_continue - (Printf.sprintf "Attempting to remove import from current_operations of VM: %s" (Ref.string_of vm)) - (fun () -> Db.VM.remove_from_current_operations ~__context ~self:vm ~key:task_id) (); - Db.VM.set_power_state ~__context ~self:vm ~value:`Halted; - Client.VM.destroy rpc session_id vm) :: state.cleanup; - (* Restore the last_booted_record too (critical if suspended but might as well do it all the time) *) - Db.VM.set_last_booted_record ~__context ~self:vm ~value:(vm_record.API.vM_last_booted_record); - Db.VM.set_last_boot_CPU_flags ~__context ~self:vm ~value:(vm_record.API.vM_last_boot_CPU_flags); - - TaskHelper.operate_on_db_task ~__context (fun t -> - (try Db.VM.remove_from_other_config ~__context ~self:vm ~key:Xapi_globs.import_task with _ -> ()); - Db.VM.add_to_other_config ~__context ~self:vm ~key:Xapi_globs.import_task ~value:(Ref.string_of t)); - (* Set the power_state and suspend_VDI if the VM is suspended. - * If anything goes wrong, still continue if forced. *) - if vm_record.API.vM_power_state = `Suspended then begin - try - let vdi = (lookup vm_record.API.vM_suspend_VDI) state.table in - Db.VM.set_power_state ~__context ~self:vm ~value:`Suspended; - Db.VM.set_suspend_VDI ~__context ~self:vm ~value:vdi - with e -> if not config.force then begin - Backtrace.is_important e; - let msg = "Failed to find VM's suspend_VDI: " ^ (Ref.string_of vm_record.API.vM_suspend_VDI) in - error "Import failed: %s" msg; - raise e - end - end else - Db.VM.set_power_state ~__context ~self:vm ~value:`Halted; - - (* We might want to import a control domain *) - Db.VM.set_is_control_domain~__context ~self:vm ~value:vm_record.API.vM_is_control_domain; - Db.VM.set_resident_on ~__context ~self:vm ~value:(try lookup vm_record.API.vM_resident_on state.table with _ -> Ref.null); - Db.VM.set_affinity ~__context ~self:vm ~value:(try lookup vm_record.API.vM_affinity state.table with _ -> Ref.null); - - (* Update the snapshot metadata. At this points, the snapshot_of field is not relevant as - it use the export ref. However, as the corresponding VM object may have not been created - yet, this fiels contains some useful information to update it later. *) - Db.VM.set_is_a_snapshot ~__context ~self:vm ~value:vm_record.API.vM_is_a_snapshot; - Db.VM.set_snapshot_info ~__context ~self:vm ~value:vm_record.API.vM_snapshot_info; - Db.VM.set_snapshot_of ~__context ~self:vm ~value:vm_record.API.vM_snapshot_of; - Db.VM.set_snapshot_time ~__context ~self:vm ~value:vm_record.API.vM_snapshot_time; - Db.VM.set_transportable_snapshot_id ~__context ~self:vm ~value:vm_record.API.vM_transportable_snapshot_id; - - (* VM might have suspend_SR that does not exist on this pool *) - if None <> (Helpers.check_sr_exists ~__context - ~self:vm_record.API.vM_suspend_SR) - then Db.VM.set_suspend_SR ~__context ~self:vm ~value:Ref.null ; - - Db.VM.set_parent ~__context ~self:vm ~value:vm_record.API.vM_parent; - - begin try - let gm = lookup vm_record.API.vM_guest_metrics state.table in - Db.VM.set_guest_metrics ~__context ~self:vm ~value:gm - with _ -> () end; - - Db.VM.set_bios_strings ~__context ~self:vm ~value:vm_record.API.vM_bios_strings; - - debug "Created VM: %s (was %s)" (Ref.string_of vm) x.id; - - (* Although someone could sneak in here and attempt to power on the VM, it - doesn't really matter since no VBDs have been created yet. - We don't bother doing this if --force is set otherwise on error the VM - remains locked. *) - if not config.force then - Db.VM.add_to_current_operations ~__context ~self:vm ~key:task_id ~value:`import; - Xapi_vm_lifecycle.update_allowed_operations ~__context ~self:vm; - - state.table <- (x.cls, x.id, Ref.string_of vm) :: state.table; - state.created_vms <- (x.cls, x.id, Ref.string_of vm) :: state.created_vms - in - - match precheck_result with - | Skip | Fail _ | Default_template _ -> - handle_dry_run __context config rpc session_id state x precheck_result - | Clean_import (vm_record) -> do_import vm_record - | Replace (vm, vm_record) -> - (* Destroy the existing VM, along with its VIFs and VBDs. *) - debug "Replacing VM %s" vm_record.API.vM_uuid; - Helpers.call_api_functions ~__context - (fun rpc session_id -> - let vifs = Db.VM.get_VIFs ~__context ~self:vm in - List.iter (fun vif -> Client.VIF.destroy ~rpc ~session_id ~self:vif) vifs; - let vbds = Db.VM.get_VBDs ~__context ~self:vm in - List.iter (fun vbd -> Client.VBD.destroy ~rpc ~session_id ~self:vbd) vbds; - Client.VM.destroy ~rpc ~session_id ~self:vm); - do_import vm_record + type precheck_t = + | Default_template of API.ref_VM + | Replace of API.ref_VM * API.vM_t + | Fail of exn + | Skip + | Clean_import of API.vM_t + + let precheck __context config rpc session_id state x = + let vm_record = API.Legacy.From.vM_t "" x.snapshot in + if vm_record.API.vM_is_a_template + && (List.mem_assoc Xapi_globs.default_template_key vm_record.API.vM_other_config) + && ((List.assoc Xapi_globs.default_template_key vm_record.API.vM_other_config) = "true") + then begin + (* If the VM is a default template, then pick up the one with the same name. *) + let template = + try List.hd (Db.VM.get_by_name_label __context vm_record.API.vM_name_label) + with _ -> Ref.null + in + Default_template template + end else begin + let import_action = + (* Check for an existing VM with the same UUID - if one exists, what we do next *) + (* will depend on the state of the VM and whether the import is forced. *) + let get_vm_by_uuid () = Db.VM.get_by_uuid __context vm_record.API.vM_uuid in + let vm_uuid_exists () = try ignore (get_vm_by_uuid ()); true with _ -> false in + (* If full_restore is true then we want to keep the VM uuid - this may involve replacing an existing VM. *) + if config.full_restore && vm_uuid_exists () then begin + let vm = get_vm_by_uuid () in + (* The existing VM cannot be replaced if it is running. *) + (* If import is forced then skip the VM, else throw an error. *) + let power_state = Db.VM.get_power_state ~__context ~self:vm in + if power_state <> `Halted then begin + if config.force then + (debug "Forced import skipping VM %s as VM to replace was not halted." vm_record.API.vM_uuid; Skip) + else Fail (Api_errors.Server_error(Api_errors.vm_bad_power_state, + [ + Ref.string_of vm; + Record_util.power_state_to_string `Halted; + Record_util.power_state_to_string power_state + ])) + end else begin + (* The existing VM should not be replaced if the version to be imported is no newer, *) + (* unless the import is forced. *) + let existing_version = Db.VM.get_version ~__context ~self:vm in + let version_to_import = vm_record.API.vM_version in + if (existing_version >= version_to_import) && (config.force = false) then + Fail (Api_errors.Server_error(Api_errors.vm_to_import_is_not_newer_version, + [ + Ref.string_of vm; + Int64.to_string existing_version; + Int64.to_string version_to_import; + ])) + else + Replace (vm, vm_record) + end + end else + Clean_import vm_record + in + match import_action with + | Replace (_, vm_record) | Clean_import vm_record -> + if is_live config + then assert_can_live_import __context rpc session_id vm_record; + import_action + | _ -> import_action + end + + let handle_dry_run __context config rpc session_id state x precheck_result = + match precheck_result with + | Skip -> () + | Fail e -> raise e + | Default_template template -> + state.table <- (x.cls, x.id, Ref.string_of template) :: state.table; + state.created_vms <- (x.cls, x.id, Ref.string_of template) :: state.created_vms + | Clean_import _ | Replace _ -> + let dummy_vm = Ref.make () in + state.table <- (x.cls, x.id, Ref.string_of dummy_vm) :: state.table + + let handle __context config rpc session_id state x precheck_result = + (* This function assumes we've already checked for and dealt with any existing VM with the same UUID. *) + let do_import vm_record = + let task_id = Ref.string_of (Context.get_task_id __context) in + (* Remove the grant guest API access key unconditionally (it's only for our RHEL4 templates atm) *) + let other_config = List.filter + (fun (key, _) -> key <> Xapi_globs.grant_api_access) vm_record.API.vM_other_config in + (* If not performing a full restore then generate a fresh MAC seed *) + let other_config = + if config.full_restore + then other_config + else + (Xapi_globs.mac_seed, Uuid.string_of_uuid (Uuid.make_uuid ())) :: + (List.filter (fun (x, _) -> x <> Xapi_globs.mac_seed) other_config) in + let vm_record = { vm_record with API.vM_other_config = other_config } in + + (* Preserve genid for cross-pool migrates, because to the guest the + * disk looks like it hasn't changed. + * Preserve genid for templates, since they're not going to be started. + * Generate a fresh genid for normal VM imports. *) + let vm_record = + if (is_live config) || vm_record.API.vM_is_a_template + then vm_record + else { + vm_record with API.vM_generation_id = Xapi_vm_helpers.fresh_genid + ~current_genid:vm_record.API.vM_generation_id () + } + in + + let vm_record = + if vm_exported_pre_dmc x + then begin + let safe_constraints = Vm_memory_constraints.reset_to_safe_defaults + ~constraints:(Vm_memory_constraints.extract ~vm_record) in + debug "VM %s was exported pre-DMC; dynamic_{min,max},target <- %Ld" + vm_record.API.vM_name_label safe_constraints.static_max; + {vm_record with API. + vM_memory_static_min = safe_constraints.static_min; + vM_memory_dynamic_min = safe_constraints.dynamic_min; + vM_memory_target = safe_constraints.target; + vM_memory_dynamic_max = safe_constraints.dynamic_max; + vM_memory_static_max = safe_constraints.static_max; + } + end else vm_record + in + let vm_record = + if vm_has_field ~x ~name:"has_vendor_device" then vm_record else ( + {vm_record with API.vM_has_vendor_device = false;} + ) in + let vm_record = {vm_record with API. + vM_memory_overhead = Memory_check.vm_compute_memory_overhead vm_record + } in + let vm_record = {vm_record with API.vM_protection_policy = Ref.null} in + (* Full restore preserves UUIDs, so if we are replacing an existing VM the version number should be incremented *) + (* to keep track of how many times this VM has been restored. If not a full restore, then we don't need to keep track. *) + let vm_record = + if config.full_restore then + {vm_record with API.vM_version = Int64.add vm_record.API.vM_version 1L} + else + {vm_record with API.vM_version = 0L} + in + (* Clear the appliance field - in the case of DR we will reconstruct the appliance separately. *) + let vm_record = {vm_record with API.vM_appliance = Ref.null} in + (* Correct ha-restart-priority for pre boston imports*) + let vm_record = match vm_record.API.vM_ha_restart_priority with + "0"|"1"|"2"|"3" as order -> { vm_record with API.vM_ha_restart_priority = "restart"; API.vM_order = Int64.of_string (order) } + | _ -> vm_record; + in + + let vm = log_reraise + ("failed to create VM with name-label " ^ vm_record.API.vM_name_label) + (fun value -> + let vm = Xapi_vm_helpers.create_from_record_without_checking_licence_feature_for_vendor_device + ~__context rpc session_id value + in + if config.full_restore then Db.VM.set_uuid ~__context ~self:vm ~value:value.API.vM_uuid; + vm) + vm_record in + state.cleanup <- (fun __context rpc session_id -> + (* Need to get rid of the import task or we cannot destroy the VM *) + Helpers.log_exn_continue + (Printf.sprintf "Attempting to remove import from current_operations of VM: %s" (Ref.string_of vm)) + (fun () -> Db.VM.remove_from_current_operations ~__context ~self:vm ~key:task_id) (); + Db.VM.set_power_state ~__context ~self:vm ~value:`Halted; + Client.VM.destroy rpc session_id vm) :: state.cleanup; + (* Restore the last_booted_record too (critical if suspended but might as well do it all the time) *) + Db.VM.set_last_booted_record ~__context ~self:vm ~value:(vm_record.API.vM_last_booted_record); + Db.VM.set_last_boot_CPU_flags ~__context ~self:vm ~value:(vm_record.API.vM_last_boot_CPU_flags); + + TaskHelper.operate_on_db_task ~__context (fun t -> + (try Db.VM.remove_from_other_config ~__context ~self:vm ~key:Xapi_globs.import_task with _ -> ()); + Db.VM.add_to_other_config ~__context ~self:vm ~key:Xapi_globs.import_task ~value:(Ref.string_of t)); + (* Set the power_state and suspend_VDI if the VM is suspended. + * If anything goes wrong, still continue if forced. *) + if vm_record.API.vM_power_state = `Suspended then begin + try + let vdi = (lookup vm_record.API.vM_suspend_VDI) state.table in + Db.VM.set_power_state ~__context ~self:vm ~value:`Suspended; + Db.VM.set_suspend_VDI ~__context ~self:vm ~value:vdi + with e -> if not config.force then begin + Backtrace.is_important e; + let msg = "Failed to find VM's suspend_VDI: " ^ (Ref.string_of vm_record.API.vM_suspend_VDI) in + error "Import failed: %s" msg; + raise e + end + end else + Db.VM.set_power_state ~__context ~self:vm ~value:`Halted; + + (* We might want to import a control domain *) + Db.VM.set_is_control_domain~__context ~self:vm ~value:vm_record.API.vM_is_control_domain; + Db.VM.set_resident_on ~__context ~self:vm ~value:(try lookup vm_record.API.vM_resident_on state.table with _ -> Ref.null); + Db.VM.set_affinity ~__context ~self:vm ~value:(try lookup vm_record.API.vM_affinity state.table with _ -> Ref.null); + + (* Update the snapshot metadata. At this points, the snapshot_of field is not relevant as + it use the export ref. However, as the corresponding VM object may have not been created + yet, this fiels contains some useful information to update it later. *) + Db.VM.set_is_a_snapshot ~__context ~self:vm ~value:vm_record.API.vM_is_a_snapshot; + Db.VM.set_snapshot_info ~__context ~self:vm ~value:vm_record.API.vM_snapshot_info; + Db.VM.set_snapshot_of ~__context ~self:vm ~value:vm_record.API.vM_snapshot_of; + Db.VM.set_snapshot_time ~__context ~self:vm ~value:vm_record.API.vM_snapshot_time; + Db.VM.set_transportable_snapshot_id ~__context ~self:vm ~value:vm_record.API.vM_transportable_snapshot_id; + + (* VM might have suspend_SR that does not exist on this pool *) + if None <> (Helpers.check_sr_exists ~__context + ~self:vm_record.API.vM_suspend_SR) + then Db.VM.set_suspend_SR ~__context ~self:vm ~value:Ref.null ; + + Db.VM.set_parent ~__context ~self:vm ~value:vm_record.API.vM_parent; + + begin try + let gm = lookup vm_record.API.vM_guest_metrics state.table in + Db.VM.set_guest_metrics ~__context ~self:vm ~value:gm + with _ -> () end; + + Db.VM.set_bios_strings ~__context ~self:vm ~value:vm_record.API.vM_bios_strings; + + debug "Created VM: %s (was %s)" (Ref.string_of vm) x.id; + + (* Although someone could sneak in here and attempt to power on the VM, it + doesn't really matter since no VBDs have been created yet. + We don't bother doing this if --force is set otherwise on error the VM + remains locked. *) + if not config.force then + Db.VM.add_to_current_operations ~__context ~self:vm ~key:task_id ~value:`import; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self:vm; + + state.table <- (x.cls, x.id, Ref.string_of vm) :: state.table; + state.created_vms <- (x.cls, x.id, Ref.string_of vm) :: state.created_vms + in + + match precheck_result with + | Skip | Fail _ | Default_template _ -> + handle_dry_run __context config rpc session_id state x precheck_result + | Clean_import (vm_record) -> do_import vm_record + | Replace (vm, vm_record) -> + (* Destroy the existing VM, along with its VIFs and VBDs. *) + debug "Replacing VM %s" vm_record.API.vM_uuid; + Helpers.call_api_functions ~__context + (fun rpc session_id -> + let vifs = Db.VM.get_VIFs ~__context ~self:vm in + List.iter (fun vif -> Client.VIF.destroy ~rpc ~session_id ~self:vif) vifs; + let vbds = Db.VM.get_VBDs ~__context ~self:vm in + List.iter (fun vbd -> Client.VBD.destroy ~rpc ~session_id ~self:vbd) vbds; + Client.VM.destroy ~rpc ~session_id ~self:vm); + do_import vm_record end (** Create the guest metrics *) module GuestMetrics : HandlerTools = struct - type precheck_t = OK - - let precheck __context config rpc session_id state x = OK - - let handle_dry_run __context config rpc session_id state x precheck_result = - let dummy_gm = Ref.make () in - state.table <- (x.cls, x.id, Ref.string_of dummy_gm) :: state.table - - let handle __context config rpc session_id state x precheck_result = - let gm_record = API.Legacy.From.vM_guest_metrics_t "" x.snapshot in - let gm = Ref.make () in - Db.VM_guest_metrics.create ~__context - ~ref:gm - ~uuid:(Uuid.to_string (Uuid.make_uuid ())) - ~os_version:gm_record.API.vM_guest_metrics_os_version - ~pV_drivers_version:gm_record.API.vM_guest_metrics_PV_drivers_version - ~pV_drivers_up_to_date:gm_record.API.vM_guest_metrics_PV_drivers_up_to_date - ~memory:gm_record.API.vM_guest_metrics_memory - ~disks:gm_record.API.vM_guest_metrics_disks - ~networks:gm_record.API.vM_guest_metrics_networks - ~pV_drivers_detected:gm_record.API.vM_guest_metrics_PV_drivers_detected - ~other:gm_record.API.vM_guest_metrics_other - ~last_updated:gm_record.API.vM_guest_metrics_last_updated - ~other_config:gm_record.API.vM_guest_metrics_other_config - ~live:gm_record.API.vM_guest_metrics_live - ~can_use_hotplug_vbd:gm_record.API.vM_guest_metrics_can_use_hotplug_vbd - ~can_use_hotplug_vif:gm_record.API.vM_guest_metrics_can_use_hotplug_vif - ; - state.table <- (x.cls, x.id, Ref.string_of gm) :: state.table + type precheck_t = OK + + let precheck __context config rpc session_id state x = OK + + let handle_dry_run __context config rpc session_id state x precheck_result = + let dummy_gm = Ref.make () in + state.table <- (x.cls, x.id, Ref.string_of dummy_gm) :: state.table + + let handle __context config rpc session_id state x precheck_result = + let gm_record = API.Legacy.From.vM_guest_metrics_t "" x.snapshot in + let gm = Ref.make () in + Db.VM_guest_metrics.create ~__context + ~ref:gm + ~uuid:(Uuid.to_string (Uuid.make_uuid ())) + ~os_version:gm_record.API.vM_guest_metrics_os_version + ~pV_drivers_version:gm_record.API.vM_guest_metrics_PV_drivers_version + ~pV_drivers_up_to_date:gm_record.API.vM_guest_metrics_PV_drivers_up_to_date + ~memory:gm_record.API.vM_guest_metrics_memory + ~disks:gm_record.API.vM_guest_metrics_disks + ~networks:gm_record.API.vM_guest_metrics_networks + ~pV_drivers_detected:gm_record.API.vM_guest_metrics_PV_drivers_detected + ~other:gm_record.API.vM_guest_metrics_other + ~last_updated:gm_record.API.vM_guest_metrics_last_updated + ~other_config:gm_record.API.vM_guest_metrics_other_config + ~live:gm_record.API.vM_guest_metrics_live + ~can_use_hotplug_vbd:gm_record.API.vM_guest_metrics_can_use_hotplug_vbd + ~can_use_hotplug_vif:gm_record.API.vM_guest_metrics_can_use_hotplug_vif + ; + state.table <- (x.cls, x.id, Ref.string_of gm) :: state.table end (** If we're restoring VM metadata only then lookup the SR by uuid. If we can't find the SR then we will still try to match VDIs later (except CDROMs) *) module SR : HandlerTools = struct - type precheck_t = - | Found_SR of API.ref_SR - | Found_no_SR - | Will_use_SR of API.ref_SR - | SR_not_needed - - let precheck __context config rpc session_id state x = - let sr_record = API.Legacy.From.sR_t "" x.snapshot in - match config.import_type with - | Metadata_import _ -> begin - (* Look up the existing SR record *) - try - let sr = Client.SR.get_by_uuid rpc session_id sr_record.API.sR_uuid in - Found_SR sr - with e -> - let msg = match sr_record.API.sR_content_type with - | "iso" -> "- will eject disk" (* Will be handled specially in handle_vdi *) - | _ -> "- will still try to find individual VDIs" - in - warn "Failed to find SR with UUID: %s content-type: %s %s" - sr_record.API.sR_uuid sr_record.API.sR_content_type msg; - Found_no_SR - end - | Full_import sr -> begin - if sr_record.API.sR_content_type = "iso" - then SR_not_needed (* this one will be ejected *) - else Will_use_SR sr - end - - let handle_dry_run __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_SR sr | Will_use_SR sr -> - state.table <- (x.cls, x.id, Ref.string_of sr) :: state.table - | Found_no_SR | SR_not_needed -> () - - let handle = handle_dry_run + type precheck_t = + | Found_SR of API.ref_SR + | Found_no_SR + | Will_use_SR of API.ref_SR + | SR_not_needed + + let precheck __context config rpc session_id state x = + let sr_record = API.Legacy.From.sR_t "" x.snapshot in + match config.import_type with + | Metadata_import _ -> begin + (* Look up the existing SR record *) + try + let sr = Client.SR.get_by_uuid rpc session_id sr_record.API.sR_uuid in + Found_SR sr + with e -> + let msg = match sr_record.API.sR_content_type with + | "iso" -> "- will eject disk" (* Will be handled specially in handle_vdi *) + | _ -> "- will still try to find individual VDIs" + in + warn "Failed to find SR with UUID: %s content-type: %s %s" + sr_record.API.sR_uuid sr_record.API.sR_content_type msg; + Found_no_SR + end + | Full_import sr -> begin + if sr_record.API.sR_content_type = "iso" + then SR_not_needed (* this one will be ejected *) + else Will_use_SR sr + end + + let handle_dry_run __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_SR sr | Will_use_SR sr -> + state.table <- (x.cls, x.id, Ref.string_of sr) :: state.table + | Found_no_SR | SR_not_needed -> () + + let handle = handle_dry_run end (** If we're restoring VM metadata only then lookup the VDI by uuid. @@ -611,166 +611,166 @@ end If the SR can be found AND is an iso SR then we attempt to lookup the VDI by name_label If the SR can be found AND is not an iso SR then we attempt to create the VDI in it *) module VDI : HandlerTools = struct - type precheck_t = - | Found_iso of API.ref_VDI - | Found_no_iso - | Found_disk of API.ref_VDI - | Found_no_disk of exn - | Skip - | Create of API.vDI_t - - let precheck __context config rpc session_id state x = - let vdi_record = API.Legacy.From.vDI_t "" x.snapshot in - - let original_sr = API.Legacy.From.sR_t "" (find_in_export (Ref.string_of vdi_record.API.vDI_SR) state.export) in - if original_sr.API.sR_content_type = "iso" then begin - (* Best effort: locate a VDI in any shared ISO SR with a matching VDI.location *) - let iso_srs = List.filter (fun self -> Client.SR.get_content_type rpc session_id self = "iso" - && Client.SR.get_type rpc session_id self <> "udev") - (Client.SR.get_all rpc session_id) in - match List.filter (fun (_, vdir) -> - vdir.API.vDI_location = vdi_record.API.vDI_location && (List.mem vdir.API.vDI_SR iso_srs)) - (Client.VDI.get_all_records rpc session_id) |> choose_one with - | Some (vdi, _) -> - Found_iso vdi - | None -> - warn "Found no ISO VDI with location = %s; attempting to eject" vdi_record.API.vDI_location; - Found_no_iso - end else begin - match config.import_type with - | Metadata_import { vdi_map } -> begin - let mapto = - if List.mem_assoc Constants.storage_migrate_vdi_map_key vdi_record.API.vDI_other_config - then Some (Ref.of_string (List.assoc Constants.storage_migrate_vdi_map_key vdi_record.API.vDI_other_config)) - else None in - let vdi_records = Client.VDI.get_all_records rpc session_id in - let find_by_sr_and_location sr location = - vdi_records - |> List.filter (fun (_, vdir) -> vdir.API.vDI_location = location && vdir.API.vDI_SR = sr) - |> choose_one - |> Opt.map fst in - let find_by_uuid uuid = - vdi_records - |> List.filter (fun (_, vdir) -> vdir.API.vDI_uuid = uuid) - |> choose_one - |> Opt.map fst in - let _scsiid = "SCSIid" in - let scsiid_of vdi_record = - if List.mem_assoc _scsiid vdi_record.API.vDI_sm_config - then Some (List.assoc _scsiid vdi_record.API.vDI_sm_config) - else None in - let find_by_scsiid x = - vdi_records - |> Listext.List.filter_map (fun (rf, vdir) -> - if scsiid_of vdir = Some x then Some (rf, vdir) else None) - |> choose_one in - let by_vdi_map = - (* Look up the mapping by both uuid and SCSIid *) - match ( - if List.mem_assoc vdi_record.API.vDI_uuid vdi_map - then Some (List.assoc vdi_record.API.vDI_uuid vdi_map) - else match scsiid_of vdi_record with - | None -> None - | Some x -> - if List.mem_assoc x vdi_map - then Some (List.assoc x vdi_map) - else None - ) with - | Some destination -> - begin match find_by_uuid destination with - | Some x -> Some x - | None -> - begin match find_by_scsiid destination with - | Some (rf, rc) -> - info "VDI %s (SCSIid %s) mapped to %s (SCSIid %s) by user" vdi_record.API.vDI_uuid (Opt.default "None" (scsiid_of vdi_record)) rc.API.vDI_uuid (Opt.default "None" (scsiid_of rc)); - Some rf - | None -> None - end - end - | None -> - (match scsiid_of vdi_record with - | None -> None - | Some x -> - begin match find_by_scsiid x with - | Some (rf, rc) -> - info "VDI %s (SCSIid %s) mapped to %s (SCSIid %s) by user" vdi_record.API.vDI_uuid (Opt.default "None" (scsiid_of vdi_record)) rc.API.vDI_uuid (Opt.default "None" (scsiid_of rc)); - Some rf - | None -> None - end - ) in - match by_vdi_map with - | Some vdi -> - Found_disk vdi - | None -> - begin match ( - if exists vdi_record.API.vDI_SR state.table then begin - let sr = lookup vdi_record.API.vDI_SR state.table in - match find_by_sr_and_location sr vdi_record.API.vDI_location with - | Some x -> Some x - | None -> mapto - end else mapto - ) with - | Some vdi -> Found_disk vdi - | None -> begin - error "Found no VDI with location = %s: %s" vdi_record.API.vDI_location - (if config.force - then "ignoring error because '--force' is set" - else "treating as fatal and abandoning import"); - if config.force then Skip - else begin - if exists vdi_record.API.vDI_SR state.table - then - let sr = lookup vdi_record.API.vDI_SR state.table in - Found_no_disk (Api_errors.Server_error(Api_errors.vdi_location_missing, [ Ref.string_of sr; vdi_record.API.vDI_location ])) - else Found_no_disk (Api_errors.Server_error(Api_errors.vdi_content_id_missing, [ ])) - end - end - end - end - | Full_import _ -> Create vdi_record - end - - let handle_dry_run __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_iso vdi | Found_disk vdi -> state.table <- (x.cls, x.id, Ref.string_of vdi) :: state.table - | Found_no_iso -> () (* VDI will be ejected. *) - | Found_no_disk e -> begin - match config.import_type with - | Metadata_import {live=true} -> - (* We expect the disk to be missing during a live migration dry run. *) - debug "Ignoring missing disk %s - this will be mirrored during a real live migration." x.id; - (* Create a dummy disk in the state table so the VBD import has a disk to look up. *) - let dummy_vdi = Ref.make () in - state.table <- (x.cls, x.id, Ref.string_of dummy_vdi) :: state.table - | _ -> raise e - end - | Skip -> () - | Create _ -> - let dummy_vdi = Ref.make () in - state.table <- (x.cls, x.id, Ref.string_of dummy_vdi) :: state.table - - let handle __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_iso _ | Found_no_iso | Skip -> - handle_dry_run __context config rpc session_id state x precheck_result - | Found_disk vdi -> - handle_dry_run __context config rpc session_id state x precheck_result; - let other_config_record = (API.Legacy.From.vDI_t "" x.snapshot).API.vDI_other_config in - List.iter (fun key -> - Db.VDI.remove_from_other_config ~__context ~self:vdi ~key; - try Db.VDI.add_to_other_config ~__context ~self:vdi ~key ~value:(List.assoc key other_config_record) with Not_found -> () - ) Xapi_globs.vdi_other_config_sync_keys - | Found_no_disk e -> raise e - | Create vdi_record -> begin - (* Make a new VDI for streaming data into; adding task-id to sm-config on VDI.create so SM backend can see this is an import *) - let sr = lookup vdi_record.API.vDI_SR state.table in - let task_id = Ref.string_of (Context.get_task_id __context) in - let sm_config = List.filter (fun (k,_)->k<>Xapi_globs.import_task) vdi_record.API.vDI_sm_config in - let sm_config = (Xapi_globs.import_task, task_id)::sm_config in - let vdi = Client.VDI.create_from_record rpc session_id { vdi_record with API.vDI_SR = sr; API.vDI_sm_config = sm_config } in - state.cleanup <- (fun __context rpc session_id -> Client.VDI.destroy rpc session_id vdi) :: state.cleanup; - state.table <- (x.cls, x.id, Ref.string_of vdi) :: state.table - end + type precheck_t = + | Found_iso of API.ref_VDI + | Found_no_iso + | Found_disk of API.ref_VDI + | Found_no_disk of exn + | Skip + | Create of API.vDI_t + + let precheck __context config rpc session_id state x = + let vdi_record = API.Legacy.From.vDI_t "" x.snapshot in + + let original_sr = API.Legacy.From.sR_t "" (find_in_export (Ref.string_of vdi_record.API.vDI_SR) state.export) in + if original_sr.API.sR_content_type = "iso" then begin + (* Best effort: locate a VDI in any shared ISO SR with a matching VDI.location *) + let iso_srs = List.filter (fun self -> Client.SR.get_content_type rpc session_id self = "iso" + && Client.SR.get_type rpc session_id self <> "udev") + (Client.SR.get_all rpc session_id) in + match List.filter (fun (_, vdir) -> + vdir.API.vDI_location = vdi_record.API.vDI_location && (List.mem vdir.API.vDI_SR iso_srs)) + (Client.VDI.get_all_records rpc session_id) |> choose_one with + | Some (vdi, _) -> + Found_iso vdi + | None -> + warn "Found no ISO VDI with location = %s; attempting to eject" vdi_record.API.vDI_location; + Found_no_iso + end else begin + match config.import_type with + | Metadata_import { vdi_map } -> begin + let mapto = + if List.mem_assoc Constants.storage_migrate_vdi_map_key vdi_record.API.vDI_other_config + then Some (Ref.of_string (List.assoc Constants.storage_migrate_vdi_map_key vdi_record.API.vDI_other_config)) + else None in + let vdi_records = Client.VDI.get_all_records rpc session_id in + let find_by_sr_and_location sr location = + vdi_records + |> List.filter (fun (_, vdir) -> vdir.API.vDI_location = location && vdir.API.vDI_SR = sr) + |> choose_one + |> Opt.map fst in + let find_by_uuid uuid = + vdi_records + |> List.filter (fun (_, vdir) -> vdir.API.vDI_uuid = uuid) + |> choose_one + |> Opt.map fst in + let _scsiid = "SCSIid" in + let scsiid_of vdi_record = + if List.mem_assoc _scsiid vdi_record.API.vDI_sm_config + then Some (List.assoc _scsiid vdi_record.API.vDI_sm_config) + else None in + let find_by_scsiid x = + vdi_records + |> Listext.List.filter_map (fun (rf, vdir) -> + if scsiid_of vdir = Some x then Some (rf, vdir) else None) + |> choose_one in + let by_vdi_map = + (* Look up the mapping by both uuid and SCSIid *) + match ( + if List.mem_assoc vdi_record.API.vDI_uuid vdi_map + then Some (List.assoc vdi_record.API.vDI_uuid vdi_map) + else match scsiid_of vdi_record with + | None -> None + | Some x -> + if List.mem_assoc x vdi_map + then Some (List.assoc x vdi_map) + else None + ) with + | Some destination -> + begin match find_by_uuid destination with + | Some x -> Some x + | None -> + begin match find_by_scsiid destination with + | Some (rf, rc) -> + info "VDI %s (SCSIid %s) mapped to %s (SCSIid %s) by user" vdi_record.API.vDI_uuid (Opt.default "None" (scsiid_of vdi_record)) rc.API.vDI_uuid (Opt.default "None" (scsiid_of rc)); + Some rf + | None -> None + end + end + | None -> + (match scsiid_of vdi_record with + | None -> None + | Some x -> + begin match find_by_scsiid x with + | Some (rf, rc) -> + info "VDI %s (SCSIid %s) mapped to %s (SCSIid %s) by user" vdi_record.API.vDI_uuid (Opt.default "None" (scsiid_of vdi_record)) rc.API.vDI_uuid (Opt.default "None" (scsiid_of rc)); + Some rf + | None -> None + end + ) in + match by_vdi_map with + | Some vdi -> + Found_disk vdi + | None -> + begin match ( + if exists vdi_record.API.vDI_SR state.table then begin + let sr = lookup vdi_record.API.vDI_SR state.table in + match find_by_sr_and_location sr vdi_record.API.vDI_location with + | Some x -> Some x + | None -> mapto + end else mapto + ) with + | Some vdi -> Found_disk vdi + | None -> begin + error "Found no VDI with location = %s: %s" vdi_record.API.vDI_location + (if config.force + then "ignoring error because '--force' is set" + else "treating as fatal and abandoning import"); + if config.force then Skip + else begin + if exists vdi_record.API.vDI_SR state.table + then + let sr = lookup vdi_record.API.vDI_SR state.table in + Found_no_disk (Api_errors.Server_error(Api_errors.vdi_location_missing, [ Ref.string_of sr; vdi_record.API.vDI_location ])) + else Found_no_disk (Api_errors.Server_error(Api_errors.vdi_content_id_missing, [ ])) + end + end + end + end + | Full_import _ -> Create vdi_record + end + + let handle_dry_run __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_iso vdi | Found_disk vdi -> state.table <- (x.cls, x.id, Ref.string_of vdi) :: state.table + | Found_no_iso -> () (* VDI will be ejected. *) + | Found_no_disk e -> begin + match config.import_type with + | Metadata_import {live=true} -> + (* We expect the disk to be missing during a live migration dry run. *) + debug "Ignoring missing disk %s - this will be mirrored during a real live migration." x.id; + (* Create a dummy disk in the state table so the VBD import has a disk to look up. *) + let dummy_vdi = Ref.make () in + state.table <- (x.cls, x.id, Ref.string_of dummy_vdi) :: state.table + | _ -> raise e + end + | Skip -> () + | Create _ -> + let dummy_vdi = Ref.make () in + state.table <- (x.cls, x.id, Ref.string_of dummy_vdi) :: state.table + + let handle __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_iso _ | Found_no_iso | Skip -> + handle_dry_run __context config rpc session_id state x precheck_result + | Found_disk vdi -> + handle_dry_run __context config rpc session_id state x precheck_result; + let other_config_record = (API.Legacy.From.vDI_t "" x.snapshot).API.vDI_other_config in + List.iter (fun key -> + Db.VDI.remove_from_other_config ~__context ~self:vdi ~key; + try Db.VDI.add_to_other_config ~__context ~self:vdi ~key ~value:(List.assoc key other_config_record) with Not_found -> () + ) Xapi_globs.vdi_other_config_sync_keys + | Found_no_disk e -> raise e + | Create vdi_record -> begin + (* Make a new VDI for streaming data into; adding task-id to sm-config on VDI.create so SM backend can see this is an import *) + let sr = lookup vdi_record.API.vDI_SR state.table in + let task_id = Ref.string_of (Context.get_task_id __context) in + let sm_config = List.filter (fun (k,_)->k<>Xapi_globs.import_task) vdi_record.API.vDI_sm_config in + let sm_config = (Xapi_globs.import_task, task_id)::sm_config in + let vdi = Client.VDI.create_from_record rpc session_id { vdi_record with API.vDI_SR = sr; API.vDI_sm_config = sm_config } in + state.cleanup <- (fun __context rpc session_id -> Client.VDI.destroy rpc session_id vdi) :: state.cleanup; + state.table <- (x.cls, x.id, Ref.string_of vdi) :: state.table + end end (** Lookup the network by name_label only. Previously we used UUID which worked if importing @@ -779,117 +779,117 @@ end it seems less confusing to match on names: whether networks are the same or different is then under the control of the user. *) module Net : HandlerTools = struct - type precheck_t = - | Found_net of API.ref_network - | Create of API.network_t - - let precheck __context config rpc session_id state x = - let net_record = API.Legacy.From.network_t "" x.snapshot in - let possibilities = Client.Network.get_by_name_label rpc session_id net_record.API.network_name_label in - match possibilities with - | [] -> - begin - (* Lookup by bridge name as fallback *) - let expr = "field \"bridge\"=\"" ^ net_record.API.network_bridge ^ "\"" in - let nets = Client.Network.get_all_records_where rpc session_id expr in - match nets with - | [] -> Create net_record - | (net, _) :: _ -> Found_net net - end - | (n::ns) -> Found_net n - - let handle_dry_run __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_net net -> state.table <- (x.cls, x.id, Ref.string_of net) :: state.table - | Create _ -> - let dummy_net = Ref.make () in - state.table <- (x.cls, x.id, Ref.string_of dummy_net) :: state.table - - let handle __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_net _ -> - handle_dry_run __context config rpc session_id state x precheck_result - | Create net_record -> - let net = - log_reraise ("failed to create Network with name_label " ^ net_record.API.network_name_label) - (fun value -> Client.Network.create_from_record rpc session_id value) net_record - in - (* Only add task flag to networks which get created in this import *) - TaskHelper.operate_on_db_task ~__context - (fun t -> - (try Db.Network.remove_from_other_config ~__context ~self:net ~key:Xapi_globs.import_task - with _ -> ()); - Db.Network.add_to_other_config ~__context ~self:net ~key:Xapi_globs.import_task - ~value:(Ref.string_of t)); - state.cleanup <- (fun __context rpc session_id -> - Client.Network.destroy rpc session_id net) :: state.cleanup; - state.table <- (x.cls, x.id, Ref.string_of net) :: state.table + type precheck_t = + | Found_net of API.ref_network + | Create of API.network_t + + let precheck __context config rpc session_id state x = + let net_record = API.Legacy.From.network_t "" x.snapshot in + let possibilities = Client.Network.get_by_name_label rpc session_id net_record.API.network_name_label in + match possibilities with + | [] -> + begin + (* Lookup by bridge name as fallback *) + let expr = "field \"bridge\"=\"" ^ net_record.API.network_bridge ^ "\"" in + let nets = Client.Network.get_all_records_where rpc session_id expr in + match nets with + | [] -> Create net_record + | (net, _) :: _ -> Found_net net + end + | (n::ns) -> Found_net n + + let handle_dry_run __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_net net -> state.table <- (x.cls, x.id, Ref.string_of net) :: state.table + | Create _ -> + let dummy_net = Ref.make () in + state.table <- (x.cls, x.id, Ref.string_of dummy_net) :: state.table + + let handle __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_net _ -> + handle_dry_run __context config rpc session_id state x precheck_result + | Create net_record -> + let net = + log_reraise ("failed to create Network with name_label " ^ net_record.API.network_name_label) + (fun value -> Client.Network.create_from_record rpc session_id value) net_record + in + (* Only add task flag to networks which get created in this import *) + TaskHelper.operate_on_db_task ~__context + (fun t -> + (try Db.Network.remove_from_other_config ~__context ~self:net ~key:Xapi_globs.import_task + with _ -> ()); + Db.Network.add_to_other_config ~__context ~self:net ~key:Xapi_globs.import_task + ~value:(Ref.string_of t)); + state.cleanup <- (fun __context rpc session_id -> + Client.Network.destroy rpc session_id net) :: state.cleanup; + state.table <- (x.cls, x.id, Ref.string_of net) :: state.table end (** Lookup the GPU group by GPU_types only. Currently, the GPU_types field contains the prototype * of just a single pGPU. We would probably have to extend this function once we support GPU groups * for multiple compatible GPU types. *) module GPUGroup : HandlerTools = struct - type precheck_t = - | Found_GPU_group of API.ref_GPU_group - | Found_no_GPU_group of exn - | Create of API.gPU_group_t - - let precheck __context config rpc session_id state x = - let gpu_group_record = API.Legacy.From.gPU_group_t "" x.snapshot in - let groups = Client.GPU_group.get_all_records rpc session_id in - try - let group, _ = - List.find (fun (_, groupr) -> - groupr.API.gPU_group_GPU_types = gpu_group_record.API.gPU_group_GPU_types) groups - in - Found_GPU_group group - with Not_found -> - match config.import_type with - | Metadata_import _ -> - (* In vm_metadata_only mode the GPU group must exist *) - let msg = - Printf.sprintf "Unable to find GPU group with matching GPU_types = '[%s]'" - (String.concat "," gpu_group_record.API.gPU_group_GPU_types) - in - error "%s" msg; - Found_no_GPU_group (Failure msg) - | Full_import _ -> - (* In normal mode we attempt to create any missing GPU groups *) - Create gpu_group_record - - let handle_dry_run __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_GPU_group group -> - state.table <- (x.cls, x.id, Ref.string_of group) :: state.table - | Found_no_GPU_group e -> raise e - | Create _ -> - let dummy_gpu_group = Ref.make () in - state.table <- (x.cls, x.id, Ref.string_of dummy_gpu_group) :: state.table - - let handle __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_GPU_group _ | Found_no_GPU_group _ -> - handle_dry_run __context config rpc session_id state x precheck_result - | Create gpu_group_record -> - let group = log_reraise ("Unable to create GPU group with GPU_types = '[%s]'" ^ - (String.concat "," gpu_group_record.API.gPU_group_GPU_types)) (fun value -> - let group = Client.GPU_group.create ~rpc ~session_id - ~name_label:value.API.gPU_group_name_label - ~name_description:value.API.gPU_group_name_description - ~other_config:value.API.gPU_group_other_config in - Db.GPU_group.set_GPU_types ~__context ~self:group ~value:value.API.gPU_group_GPU_types; - group - ) gpu_group_record - in - (* Only add task flag to GPU groups which get created in this import *) - TaskHelper.operate_on_db_task ~__context (fun t -> - (try Db.GPU_group.remove_from_other_config ~__context ~self:group ~key:Xapi_globs.import_task - with _ -> ()); - Db.GPU_group.add_to_other_config ~__context ~self:group ~key:Xapi_globs.import_task - ~value:(Ref.string_of t)); - state.cleanup <- (fun __context rpc session_id -> Client.GPU_group.destroy rpc session_id group) :: state.cleanup; - state.table <- (x.cls, x.id, Ref.string_of group) :: state.table + type precheck_t = + | Found_GPU_group of API.ref_GPU_group + | Found_no_GPU_group of exn + | Create of API.gPU_group_t + + let precheck __context config rpc session_id state x = + let gpu_group_record = API.Legacy.From.gPU_group_t "" x.snapshot in + let groups = Client.GPU_group.get_all_records rpc session_id in + try + let group, _ = + List.find (fun (_, groupr) -> + groupr.API.gPU_group_GPU_types = gpu_group_record.API.gPU_group_GPU_types) groups + in + Found_GPU_group group + with Not_found -> + match config.import_type with + | Metadata_import _ -> + (* In vm_metadata_only mode the GPU group must exist *) + let msg = + Printf.sprintf "Unable to find GPU group with matching GPU_types = '[%s]'" + (String.concat "," gpu_group_record.API.gPU_group_GPU_types) + in + error "%s" msg; + Found_no_GPU_group (Failure msg) + | Full_import _ -> + (* In normal mode we attempt to create any missing GPU groups *) + Create gpu_group_record + + let handle_dry_run __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_GPU_group group -> + state.table <- (x.cls, x.id, Ref.string_of group) :: state.table + | Found_no_GPU_group e -> raise e + | Create _ -> + let dummy_gpu_group = Ref.make () in + state.table <- (x.cls, x.id, Ref.string_of dummy_gpu_group) :: state.table + + let handle __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_GPU_group _ | Found_no_GPU_group _ -> + handle_dry_run __context config rpc session_id state x precheck_result + | Create gpu_group_record -> + let group = log_reraise ("Unable to create GPU group with GPU_types = '[%s]'" ^ + (String.concat "," gpu_group_record.API.gPU_group_GPU_types)) (fun value -> + let group = Client.GPU_group.create ~rpc ~session_id + ~name_label:value.API.gPU_group_name_label + ~name_description:value.API.gPU_group_name_description + ~other_config:value.API.gPU_group_other_config in + Db.GPU_group.set_GPU_types ~__context ~self:group ~value:value.API.gPU_group_GPU_types; + group + ) gpu_group_record + in + (* Only add task flag to GPU groups which get created in this import *) + TaskHelper.operate_on_db_task ~__context (fun t -> + (try Db.GPU_group.remove_from_other_config ~__context ~self:group ~key:Xapi_globs.import_task + with _ -> ()); + Db.GPU_group.add_to_other_config ~__context ~self:group ~key:Xapi_globs.import_task + ~value:(Ref.string_of t)); + state.cleanup <- (fun __context rpc session_id -> Client.GPU_group.destroy rpc session_id group) :: state.cleanup; + state.table <- (x.cls, x.id, Ref.string_of group) :: state.table end (** Create a new VBD record, add the reference to the table. @@ -897,330 +897,330 @@ end If the VDI doesn't exist and the VBD is a CDROM then eject it. Note that any currently attached disk MUST be present, unless it's an HVM guest and a CDROM in which case we eject it anyway. - *) +*) module VBD : HandlerTools = struct - type precheck_t = - | Found_VBD of API.ref_VBD - | Fail of exn - | Skip - | Create of API.vBD_t - - let precheck __context config rpc session_id state x = - let vbd_record = API.Legacy.From.vBD_t "" x.snapshot in - - let get_vbd () = Client.VBD.get_by_uuid rpc session_id vbd_record.API.vBD_uuid in - let vbd_exists () = try ignore (get_vbd ()); true with _ -> false in - - if config.full_restore && vbd_exists () then begin - let vbd = get_vbd () in - Found_VBD vbd - end else begin - let vm = log_reraise - ("Failed to find VBD's VM: " ^ (Ref.string_of vbd_record.API.vBD_VM)) - (lookup vbd_record.API.vBD_VM) state.table in - (* If the VBD is supposed to be attached to a PV guest (which doesn't support - currently_attached empty drives) then throw a fatal error. *) - let original_vm = API.Legacy.From.vM_t "" (find_in_export (Ref.string_of vbd_record.API.vBD_VM) state.export) in - - let has_booted_hvm = - let lbr = try Helpers.parse_boot_record original_vm.API.vM_last_booted_record with _ -> original_vm in - lbr.API.vM_HVM_boot_policy <> "" in - - (* In the case of dry_run live migration, don't check for - missing disks as CDs will be ejected before the real migration. *) - let dry_run, live = match config.import_type with - | Metadata_import {dry_run = dry_run; live = live} -> dry_run, live - | _ -> false, false - in - if vbd_record.API.vBD_currently_attached && not(exists vbd_record.API.vBD_VDI state.table) then begin - (* It's only ok if it's a CDROM attached to an HVM guest, or it's part of SXM and we know the sender would eject it. *) - let will_eject = dry_run && live && original_vm.API.vM_power_state <> `Suspended in - if not (vbd_record.API.vBD_type = `CD && (has_booted_hvm || will_eject)) - then raise (IFailure Attached_disks_not_found) - end; - - let vbd_record = { vbd_record with API.vBD_VM = vm } in - match vbd_record.API.vBD_type, exists vbd_record.API.vBD_VDI state.table with - | `CD, false | `Floppy, false -> - if has_booted_hvm || original_vm.API.vM_power_state <> `Suspended then - Create { vbd_record with API.vBD_VDI = Ref.null; API.vBD_empty = true } (* eject *) - else - Create vbd_record - | `Disk, false -> begin - (* omit: cannot have empty disks *) - warn "Cannot import VM's disk: was it an .iso attached as a disk rather than CD?"; - Skip - end - | _, true -> Create { vbd_record with API.vBD_VDI = lookup vbd_record.API.vBD_VDI state.table } - end - - let handle_dry_run __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_VBD vbd -> begin - state.table <- (x.cls, x.id, Ref.string_of vbd) :: state.table; - state.table <- (x.cls, Ref.string_of vbd, Ref.string_of vbd) :: state.table - end - | Fail e -> raise e - | Skip -> () - | Create _ -> begin - let dummy_vbd = Ref.make () in - state.table <- (x.cls, x.id, Ref.string_of dummy_vbd) :: state.table - end - - let handle __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_VBD _ | Fail _ | Skip -> - handle_dry_run __context config rpc session_id state x precheck_result - | Create vbd_record -> begin - let vbd = log_reraise - "failed to create VBD" - (fun value -> - let vbd = Client.VBD.create_from_record rpc session_id value in - if config.full_restore then Db.VBD.set_uuid ~__context ~self:vbd ~value:value.API.vBD_uuid; - vbd) - vbd_record in - state.cleanup <- (fun __context rpc session_id -> Client.VBD.destroy rpc session_id vbd) :: state.cleanup; - (* Now that we can import/export suspended VMs we need to preserve the - currently_attached flag *) - Db.VBD.set_currently_attached ~__context ~self:vbd ~value:vbd_record.API.vBD_currently_attached; - state.table <- (x.cls, x.id, Ref.string_of vbd) :: state.table - end + type precheck_t = + | Found_VBD of API.ref_VBD + | Fail of exn + | Skip + | Create of API.vBD_t + + let precheck __context config rpc session_id state x = + let vbd_record = API.Legacy.From.vBD_t "" x.snapshot in + + let get_vbd () = Client.VBD.get_by_uuid rpc session_id vbd_record.API.vBD_uuid in + let vbd_exists () = try ignore (get_vbd ()); true with _ -> false in + + if config.full_restore && vbd_exists () then begin + let vbd = get_vbd () in + Found_VBD vbd + end else begin + let vm = log_reraise + ("Failed to find VBD's VM: " ^ (Ref.string_of vbd_record.API.vBD_VM)) + (lookup vbd_record.API.vBD_VM) state.table in + (* If the VBD is supposed to be attached to a PV guest (which doesn't support + currently_attached empty drives) then throw a fatal error. *) + let original_vm = API.Legacy.From.vM_t "" (find_in_export (Ref.string_of vbd_record.API.vBD_VM) state.export) in + + let has_booted_hvm = + let lbr = try Helpers.parse_boot_record original_vm.API.vM_last_booted_record with _ -> original_vm in + lbr.API.vM_HVM_boot_policy <> "" in + + (* In the case of dry_run live migration, don't check for + missing disks as CDs will be ejected before the real migration. *) + let dry_run, live = match config.import_type with + | Metadata_import {dry_run = dry_run; live = live} -> dry_run, live + | _ -> false, false + in + if vbd_record.API.vBD_currently_attached && not(exists vbd_record.API.vBD_VDI state.table) then begin + (* It's only ok if it's a CDROM attached to an HVM guest, or it's part of SXM and we know the sender would eject it. *) + let will_eject = dry_run && live && original_vm.API.vM_power_state <> `Suspended in + if not (vbd_record.API.vBD_type = `CD && (has_booted_hvm || will_eject)) + then raise (IFailure Attached_disks_not_found) + end; + + let vbd_record = { vbd_record with API.vBD_VM = vm } in + match vbd_record.API.vBD_type, exists vbd_record.API.vBD_VDI state.table with + | `CD, false | `Floppy, false -> + if has_booted_hvm || original_vm.API.vM_power_state <> `Suspended then + Create { vbd_record with API.vBD_VDI = Ref.null; API.vBD_empty = true } (* eject *) + else + Create vbd_record + | `Disk, false -> begin + (* omit: cannot have empty disks *) + warn "Cannot import VM's disk: was it an .iso attached as a disk rather than CD?"; + Skip + end + | _, true -> Create { vbd_record with API.vBD_VDI = lookup vbd_record.API.vBD_VDI state.table } + end + + let handle_dry_run __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_VBD vbd -> begin + state.table <- (x.cls, x.id, Ref.string_of vbd) :: state.table; + state.table <- (x.cls, Ref.string_of vbd, Ref.string_of vbd) :: state.table + end + | Fail e -> raise e + | Skip -> () + | Create _ -> begin + let dummy_vbd = Ref.make () in + state.table <- (x.cls, x.id, Ref.string_of dummy_vbd) :: state.table + end + + let handle __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_VBD _ | Fail _ | Skip -> + handle_dry_run __context config rpc session_id state x precheck_result + | Create vbd_record -> begin + let vbd = log_reraise + "failed to create VBD" + (fun value -> + let vbd = Client.VBD.create_from_record rpc session_id value in + if config.full_restore then Db.VBD.set_uuid ~__context ~self:vbd ~value:value.API.vBD_uuid; + vbd) + vbd_record in + state.cleanup <- (fun __context rpc session_id -> Client.VBD.destroy rpc session_id vbd) :: state.cleanup; + (* Now that we can import/export suspended VMs we need to preserve the + currently_attached flag *) + Db.VBD.set_currently_attached ~__context ~self:vbd ~value:vbd_record.API.vBD_currently_attached; + state.table <- (x.cls, x.id, Ref.string_of vbd) :: state.table + end end (** Create a new VIF record, add the reference to the table. The VM and Network must have already been handled first. *) module VIF : HandlerTools = struct - type precheck_t = - | Found_VIF of API.ref_VIF - | Create of API.vIF_t - - let precheck __context config rpc session_id state x = - let vif_record = API.Legacy.From.vIF_t "" x.snapshot in - - let get_vif () = Client.VIF.get_by_uuid rpc session_id vif_record.API.vIF_uuid in - let vif_exists () = try ignore (get_vif ()); true with _ -> false in - - if config.full_restore && vif_exists () then begin - (* If there's already a VIF with the same UUID and we're preserving UUIDs, use that one. *) - let vif = get_vif () in - Found_VIF vif - end else - (* If not restoring a full backup then blank the MAC so it is regenerated *) - let vif_record = { vif_record with API.vIF_MAC = - if config.full_restore then vif_record.API.vIF_MAC else "" } in - (* Determine the VM to which we're going to attach this VIF. *) - let vm = log_reraise - ("Failed to find VIF's VM: " ^ (Ref.string_of vif_record.API.vIF_VM)) - (lookup vif_record.API.vIF_VM) state.table in - (* Determine the network to which we're going to attach this VIF. *) - let net = - (* If we find the cross-pool migration key, attach the VIF to that network... *) - if List.mem_assoc Constants.storage_migrate_vif_map_key vif_record.API.vIF_other_config - then Ref.of_string (List.assoc Constants.storage_migrate_vif_map_key vif_record.API.vIF_other_config) - else - (* ...otherwise fall back to looking up the network from the state table. *) - log_reraise - ("Failed to find VIF's Network: " ^ (Ref.string_of vif_record.API.vIF_network)) - (lookup vif_record.API.vIF_network) state.table in - (* Make sure we remove the cross-pool migration VIF mapping key from the other_config - * before creating a VIF - otherwise we'll risk sending this key on to another pool - * during a future cross-pool migration and it won't make sense. *) - let other_config = - List.filter - (fun (k, _) -> k <> Constants.storage_migrate_vif_map_key) - vif_record.API.vIF_other_config - in - (* Construct the VIF record we're going to try to create locally. *) - let vif_record = if (Pool_features.is_enabled ~__context Features.VIF_locking) - then vif_record - else begin - if vif_record.API.vIF_locking_mode = `locked - then { - vif_record with API.vIF_locking_mode = `network_default; - API.vIF_ipv4_allowed = []; - API.vIF_ipv6_allowed = []; - } - else { - vif_record with API.vIF_ipv4_allowed = []; - API.vIF_ipv6_allowed = []; - } - end in - let vif_record = { vif_record with - API.vIF_VM = vm; - API.vIF_network = net; - API.vIF_other_config = other_config } in - Create vif_record - - let handle_dry_run __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_VIF vif -> begin - state.table <- (x.cls, x.id, Ref.string_of vif) :: state.table; - state.table <- (x.cls, Ref.string_of vif, Ref.string_of vif) :: state.table - end - | Create _ -> begin - let dummy_vif = Ref.make () in - state.table <- (x.cls, x.id, Ref.string_of dummy_vif) :: state.table - end - - let handle __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_VIF vif -> - handle_dry_run __context config rpc session_id state x precheck_result - | Create vif_record -> begin - let vif = log_reraise - "failed to create VIF" - (fun value -> - let vif = Client.VIF.create_from_record rpc session_id value in - if config.full_restore then Db.VIF.set_uuid ~__context ~self:vif ~value:value.API.vIF_uuid; - vif) - vif_record in - state.cleanup <- (fun __context rpc session_id -> Client.VIF.destroy rpc session_id vif) :: state.cleanup; - (* Now that we can import/export suspended VMs we need to preserve the - currently_attached flag *) - if Db.VM.get_power_state ~__context ~self:vif_record.API.vIF_VM <> `Halted - then Db.VIF.set_currently_attached ~__context ~self:vif ~value:vif_record.API.vIF_currently_attached; - - state.table <- (x.cls, x.id, Ref.string_of vif) :: state.table - end + type precheck_t = + | Found_VIF of API.ref_VIF + | Create of API.vIF_t + + let precheck __context config rpc session_id state x = + let vif_record = API.Legacy.From.vIF_t "" x.snapshot in + + let get_vif () = Client.VIF.get_by_uuid rpc session_id vif_record.API.vIF_uuid in + let vif_exists () = try ignore (get_vif ()); true with _ -> false in + + if config.full_restore && vif_exists () then begin + (* If there's already a VIF with the same UUID and we're preserving UUIDs, use that one. *) + let vif = get_vif () in + Found_VIF vif + end else + (* If not restoring a full backup then blank the MAC so it is regenerated *) + let vif_record = { vif_record with API.vIF_MAC = + if config.full_restore then vif_record.API.vIF_MAC else "" } in + (* Determine the VM to which we're going to attach this VIF. *) + let vm = log_reraise + ("Failed to find VIF's VM: " ^ (Ref.string_of vif_record.API.vIF_VM)) + (lookup vif_record.API.vIF_VM) state.table in + (* Determine the network to which we're going to attach this VIF. *) + let net = + (* If we find the cross-pool migration key, attach the VIF to that network... *) + if List.mem_assoc Constants.storage_migrate_vif_map_key vif_record.API.vIF_other_config + then Ref.of_string (List.assoc Constants.storage_migrate_vif_map_key vif_record.API.vIF_other_config) + else + (* ...otherwise fall back to looking up the network from the state table. *) + log_reraise + ("Failed to find VIF's Network: " ^ (Ref.string_of vif_record.API.vIF_network)) + (lookup vif_record.API.vIF_network) state.table in + (* Make sure we remove the cross-pool migration VIF mapping key from the other_config + * before creating a VIF - otherwise we'll risk sending this key on to another pool + * during a future cross-pool migration and it won't make sense. *) + let other_config = + List.filter + (fun (k, _) -> k <> Constants.storage_migrate_vif_map_key) + vif_record.API.vIF_other_config + in + (* Construct the VIF record we're going to try to create locally. *) + let vif_record = if (Pool_features.is_enabled ~__context Features.VIF_locking) + then vif_record + else begin + if vif_record.API.vIF_locking_mode = `locked + then { + vif_record with API.vIF_locking_mode = `network_default; + API.vIF_ipv4_allowed = []; + API.vIF_ipv6_allowed = []; + } + else { + vif_record with API.vIF_ipv4_allowed = []; + API.vIF_ipv6_allowed = []; + } + end in + let vif_record = { vif_record with + API.vIF_VM = vm; + API.vIF_network = net; + API.vIF_other_config = other_config } in + Create vif_record + + let handle_dry_run __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_VIF vif -> begin + state.table <- (x.cls, x.id, Ref.string_of vif) :: state.table; + state.table <- (x.cls, Ref.string_of vif, Ref.string_of vif) :: state.table + end + | Create _ -> begin + let dummy_vif = Ref.make () in + state.table <- (x.cls, x.id, Ref.string_of dummy_vif) :: state.table + end + + let handle __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_VIF vif -> + handle_dry_run __context config rpc session_id state x precheck_result + | Create vif_record -> begin + let vif = log_reraise + "failed to create VIF" + (fun value -> + let vif = Client.VIF.create_from_record rpc session_id value in + if config.full_restore then Db.VIF.set_uuid ~__context ~self:vif ~value:value.API.vIF_uuid; + vif) + vif_record in + state.cleanup <- (fun __context rpc session_id -> Client.VIF.destroy rpc session_id vif) :: state.cleanup; + (* Now that we can import/export suspended VMs we need to preserve the + currently_attached flag *) + if Db.VM.get_power_state ~__context ~self:vif_record.API.vIF_VM <> `Halted + then Db.VIF.set_currently_attached ~__context ~self:vif ~value:vif_record.API.vIF_currently_attached; + + state.table <- (x.cls, x.id, Ref.string_of vif) :: state.table + end end module VGPUType : HandlerTools = struct - type precheck_t = - | Found_VGPU_type of API.ref_VGPU_type - | Create of API.vGPU_type_t - - let precheck __context config rpc session_id state x = - let vgpu_type_record = API.Legacy.From.vGPU_type_t "" x.snapshot in - - (* First look up VGPU types using the identifier string. *) - let compatible_types = - match Client.VGPU_type.get_all_records_where rpc session_id - (Printf.sprintf - "field \"identifier\"=\"%s\"" - vgpu_type_record.API.vGPU_type_identifier) - with - | [] -> begin - (* If that fails, look up using the vendor name and model name. *) - Client.VGPU_type.get_all_records_where rpc session_id - (Printf.sprintf - "field \"vendor_name\"=\"%s\" and field \"model_name\"=\"%s\"" - vgpu_type_record.API.vGPU_type_vendor_name - vgpu_type_record.API.vGPU_type_model_name) - end - | types -> types - in - - match choose_one compatible_types with - | Some (vgpu_type, _) -> Found_VGPU_type vgpu_type - | None -> - warn - "Unable to find VGPU_type (%s,%s,%s) - creating a new record" - vgpu_type_record.API.vGPU_type_identifier - vgpu_type_record.API.vGPU_type_vendor_name - vgpu_type_record.API.vGPU_type_model_name; - Create vgpu_type_record - - let handle_dry_run __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_VGPU_type vgpu_type -> begin - state.table <- (x.cls, x.id, Ref.string_of vgpu_type) :: state.table; - state.table <- (x.cls, Ref.string_of vgpu_type, Ref.string_of vgpu_type) :: state.table - end - | Create _ -> - let dummy_vgpu_type = Ref.make () in - state.table <- (x.cls, x.id, Ref.string_of dummy_vgpu_type) :: state.table - - let handle __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_VGPU_type vgpu_type -> - handle_dry_run __context config rpc session_id state x precheck_result - | Create vgpu_type_record -> begin - let vgpu_type = - log_reraise - "failed to create VGPU_type" - (fun value -> - (* size and internal_config are left as defaults for now. They'll - * be updated if and when xapi comes across the real config file. *) - Xapi_vgpu_type.create ~__context - ~vendor_name:value.API.vGPU_type_vendor_name - ~model_name:value.API.vGPU_type_model_name - ~framebuffer_size:value.API.vGPU_type_framebuffer_size - ~max_heads:value.API.vGPU_type_max_heads - ~max_resolution_x:value.API.vGPU_type_max_resolution_x - ~max_resolution_y:value.API.vGPU_type_max_resolution_y - ~size:0L - ~internal_config:[] - ~implementation:value.API.vGPU_type_implementation - ~identifier:value.API.vGPU_type_identifier - ~experimental:value.API.vGPU_type_experimental) - vgpu_type_record - in - state.cleanup <- (fun __context rpc session_id -> Db.VGPU_type.destroy __context vgpu_type) :: state.cleanup; - state.table <- (x.cls, x.id, Ref.string_of vgpu_type) :: state.table - end + type precheck_t = + | Found_VGPU_type of API.ref_VGPU_type + | Create of API.vGPU_type_t + + let precheck __context config rpc session_id state x = + let vgpu_type_record = API.Legacy.From.vGPU_type_t "" x.snapshot in + + (* First look up VGPU types using the identifier string. *) + let compatible_types = + match Client.VGPU_type.get_all_records_where rpc session_id + (Printf.sprintf + "field \"identifier\"=\"%s\"" + vgpu_type_record.API.vGPU_type_identifier) + with + | [] -> begin + (* If that fails, look up using the vendor name and model name. *) + Client.VGPU_type.get_all_records_where rpc session_id + (Printf.sprintf + "field \"vendor_name\"=\"%s\" and field \"model_name\"=\"%s\"" + vgpu_type_record.API.vGPU_type_vendor_name + vgpu_type_record.API.vGPU_type_model_name) + end + | types -> types + in + + match choose_one compatible_types with + | Some (vgpu_type, _) -> Found_VGPU_type vgpu_type + | None -> + warn + "Unable to find VGPU_type (%s,%s,%s) - creating a new record" + vgpu_type_record.API.vGPU_type_identifier + vgpu_type_record.API.vGPU_type_vendor_name + vgpu_type_record.API.vGPU_type_model_name; + Create vgpu_type_record + + let handle_dry_run __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_VGPU_type vgpu_type -> begin + state.table <- (x.cls, x.id, Ref.string_of vgpu_type) :: state.table; + state.table <- (x.cls, Ref.string_of vgpu_type, Ref.string_of vgpu_type) :: state.table + end + | Create _ -> + let dummy_vgpu_type = Ref.make () in + state.table <- (x.cls, x.id, Ref.string_of dummy_vgpu_type) :: state.table + + let handle __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_VGPU_type vgpu_type -> + handle_dry_run __context config rpc session_id state x precheck_result + | Create vgpu_type_record -> begin + let vgpu_type = + log_reraise + "failed to create VGPU_type" + (fun value -> + (* size and internal_config are left as defaults for now. They'll + * be updated if and when xapi comes across the real config file. *) + Xapi_vgpu_type.create ~__context + ~vendor_name:value.API.vGPU_type_vendor_name + ~model_name:value.API.vGPU_type_model_name + ~framebuffer_size:value.API.vGPU_type_framebuffer_size + ~max_heads:value.API.vGPU_type_max_heads + ~max_resolution_x:value.API.vGPU_type_max_resolution_x + ~max_resolution_y:value.API.vGPU_type_max_resolution_y + ~size:0L + ~internal_config:[] + ~implementation:value.API.vGPU_type_implementation + ~identifier:value.API.vGPU_type_identifier + ~experimental:value.API.vGPU_type_experimental) + vgpu_type_record + in + state.cleanup <- (fun __context rpc session_id -> Db.VGPU_type.destroy __context vgpu_type) :: state.cleanup; + state.table <- (x.cls, x.id, Ref.string_of vgpu_type) :: state.table + end end (** Create a new VGPU record, add the reference to the table. The VM and GPU_group must have already been handled first. *) module VGPU : HandlerTools = struct - type precheck_t = - | Found_VGPU of API.ref_VGPU - | Create of API.vGPU_t - - let precheck __context config rpc session_id state x = - let vgpu_record = API.Legacy.From.vGPU_t "" x.snapshot in - - let get_vgpu () = Client.VGPU.get_by_uuid rpc session_id vgpu_record.API.vGPU_uuid in - let vgpu_exists () = try ignore (get_vgpu ()); true with _ -> false in - - if config.full_restore && vgpu_exists () then begin - let vgpu = get_vgpu () in - Found_VGPU vgpu - end else - let vm = log_reraise - ("Failed to find VGPU's VM: " ^ (Ref.string_of vgpu_record.API.vGPU_VM)) - (lookup vgpu_record.API.vGPU_VM) state.table in - let group = log_reraise - ("Failed to find VGPU's GPU group: " ^ (Ref.string_of vgpu_record.API.vGPU_GPU_group)) - (lookup vgpu_record.API.vGPU_GPU_group) state.table in - let _type = log_reraise - ("Failed to find VGPU's type: " ^ (Ref.string_of vgpu_record.API.vGPU_type)) - (lookup vgpu_record.API.vGPU_type) state.table in - let vgpu_record = { vgpu_record with - API.vGPU_VM = vm; - API.vGPU_GPU_group = group; - API.vGPU_type = _type; - } in - Create vgpu_record - - let handle_dry_run __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_VGPU vgpu -> begin - state.table <- (x.cls, x.id, Ref.string_of vgpu) :: state.table; - state.table <- (x.cls, Ref.string_of vgpu, Ref.string_of vgpu) :: state.table - end - | Create _ -> begin - let dummy_vgpu = Ref.make () in - state.table <- (x.cls, x.id, Ref.string_of dummy_vgpu) :: state.table - end - - let handle __context config rpc session_id state x precheck_result = - match precheck_result with - | Found_VGPU _ -> - handle_dry_run __context config rpc session_id state x precheck_result - | Create vgpu_record -> begin - let vgpu = log_reraise "failed to create VGPU" (fun value -> - let vgpu = Client.VGPU.create ~rpc ~session_id ~vM:value.API.vGPU_VM ~gPU_group:value.API.vGPU_GPU_group - ~device:value.API.vGPU_device ~other_config:value.API.vGPU_other_config ~_type:value.API.vGPU_type in - if config.full_restore then Db.VGPU.set_uuid ~__context ~self:vgpu ~value:value.API.vGPU_uuid; - vgpu) vgpu_record - in - state.cleanup <- (fun __context rpc session_id -> Client.VGPU.destroy rpc session_id vgpu) :: state.cleanup; - (* Now that we can import/export suspended VMs we need to preserve the currently_attached flag *) - if Db.VM.get_power_state ~__context ~self:vgpu_record.API.vGPU_VM <> `Halted then - Db.VGPU.set_currently_attached ~__context ~self:vgpu ~value:vgpu_record.API.vGPU_currently_attached; - state.table <- (x.cls, x.id, Ref.string_of vgpu) :: state.table - end + type precheck_t = + | Found_VGPU of API.ref_VGPU + | Create of API.vGPU_t + + let precheck __context config rpc session_id state x = + let vgpu_record = API.Legacy.From.vGPU_t "" x.snapshot in + + let get_vgpu () = Client.VGPU.get_by_uuid rpc session_id vgpu_record.API.vGPU_uuid in + let vgpu_exists () = try ignore (get_vgpu ()); true with _ -> false in + + if config.full_restore && vgpu_exists () then begin + let vgpu = get_vgpu () in + Found_VGPU vgpu + end else + let vm = log_reraise + ("Failed to find VGPU's VM: " ^ (Ref.string_of vgpu_record.API.vGPU_VM)) + (lookup vgpu_record.API.vGPU_VM) state.table in + let group = log_reraise + ("Failed to find VGPU's GPU group: " ^ (Ref.string_of vgpu_record.API.vGPU_GPU_group)) + (lookup vgpu_record.API.vGPU_GPU_group) state.table in + let _type = log_reraise + ("Failed to find VGPU's type: " ^ (Ref.string_of vgpu_record.API.vGPU_type)) + (lookup vgpu_record.API.vGPU_type) state.table in + let vgpu_record = { vgpu_record with + API.vGPU_VM = vm; + API.vGPU_GPU_group = group; + API.vGPU_type = _type; + } in + Create vgpu_record + + let handle_dry_run __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_VGPU vgpu -> begin + state.table <- (x.cls, x.id, Ref.string_of vgpu) :: state.table; + state.table <- (x.cls, Ref.string_of vgpu, Ref.string_of vgpu) :: state.table + end + | Create _ -> begin + let dummy_vgpu = Ref.make () in + state.table <- (x.cls, x.id, Ref.string_of dummy_vgpu) :: state.table + end + + let handle __context config rpc session_id state x precheck_result = + match precheck_result with + | Found_VGPU _ -> + handle_dry_run __context config rpc session_id state x precheck_result + | Create vgpu_record -> begin + let vgpu = log_reraise "failed to create VGPU" (fun value -> + let vgpu = Client.VGPU.create ~rpc ~session_id ~vM:value.API.vGPU_VM ~gPU_group:value.API.vGPU_GPU_group + ~device:value.API.vGPU_device ~other_config:value.API.vGPU_other_config ~_type:value.API.vGPU_type in + if config.full_restore then Db.VGPU.set_uuid ~__context ~self:vgpu ~value:value.API.vGPU_uuid; + vgpu) vgpu_record + in + state.cleanup <- (fun __context rpc session_id -> Client.VGPU.destroy rpc session_id vgpu) :: state.cleanup; + (* Now that we can import/export suspended VMs we need to preserve the currently_attached flag *) + if Db.VM.get_power_state ~__context ~self:vgpu_record.API.vGPU_VM <> `Halted then + Db.VGPU.set_currently_attached ~__context ~self:vgpu ~value:vgpu_record.API.vGPU_currently_attached; + state.table <- (x.cls, x.id, Ref.string_of vgpu) :: state.table + end end (** Create a handler for each object type. *) @@ -1238,444 +1238,444 @@ module VGPUHandler = MakeHandler(VGPU) (** Table mapping datamodel class names to handlers, in order we have to run them *) let handlers = - [ - Datamodel._host, HostHandler.handle; - Datamodel._sr, SRHandler.handle; - Datamodel._vdi, VDIHandler.handle; - Datamodel._vm_guest_metrics, GuestMetricsHandler.handle; - Datamodel._vm, VMHandler.handle; - Datamodel._network, NetworkHandler.handle; - Datamodel._gpu_group, GPUGroupHandler.handle; - Datamodel._vbd, VBDHandler.handle; - Datamodel._vif, VIFHandler.handle; - Datamodel._vgpu_type, VGPUTypeHandler.handle; - Datamodel._vgpu, VGPUHandler.handle; - ] + [ + Datamodel._host, HostHandler.handle; + Datamodel._sr, SRHandler.handle; + Datamodel._vdi, VDIHandler.handle; + Datamodel._vm_guest_metrics, GuestMetricsHandler.handle; + Datamodel._vm, VMHandler.handle; + Datamodel._network, NetworkHandler.handle; + Datamodel._gpu_group, GPUGroupHandler.handle; + Datamodel._vbd, VBDHandler.handle; + Datamodel._vif, VIFHandler.handle; + Datamodel._vgpu_type, VGPUTypeHandler.handle; + Datamodel._vgpu, VGPUHandler.handle; + ] let update_snapshot_and_parent_links ~__context state = - let aux (cls, id, ref) = - let ref = Ref.of_string ref in - - if cls = Datamodel._vm && Db.VM.get_is_a_snapshot ~__context ~self:ref then begin - let snapshot_of = Db.VM.get_snapshot_of ~__context ~self:ref in - if snapshot_of <> Ref.null - then begin - debug "lookup for snapshot_of = '%s'" (Ref.string_of snapshot_of); - log_reraise - ("Failed to find the VM which is snapshot of " ^ (Db.VM.get_name_label ~__context ~self:ref)) - (fun table -> - let snapshot_of = (lookup snapshot_of) table in - Db.VM.set_snapshot_of ~__context ~self:ref ~value:snapshot_of) - state.table - end - end; - - if cls = Datamodel._vm then begin - let parent = Db.VM.get_parent ~__context ~self:ref in - debug "lookup for parent = '%s'" (Ref.string_of parent); - try - let parent = lookup parent state.table in - Db.VM.set_parent ~__context ~self:ref ~value:parent - with _ -> debug "no parent found" - end in - - List.iter aux state.table + let aux (cls, id, ref) = + let ref = Ref.of_string ref in + + if cls = Datamodel._vm && Db.VM.get_is_a_snapshot ~__context ~self:ref then begin + let snapshot_of = Db.VM.get_snapshot_of ~__context ~self:ref in + if snapshot_of <> Ref.null + then begin + debug "lookup for snapshot_of = '%s'" (Ref.string_of snapshot_of); + log_reraise + ("Failed to find the VM which is snapshot of " ^ (Db.VM.get_name_label ~__context ~self:ref)) + (fun table -> + let snapshot_of = (lookup snapshot_of) table in + Db.VM.set_snapshot_of ~__context ~self:ref ~value:snapshot_of) + state.table + end + end; + + if cls = Datamodel._vm then begin + let parent = Db.VM.get_parent ~__context ~self:ref in + debug "lookup for parent = '%s'" (Ref.string_of parent); + try + let parent = lookup parent state.table in + Db.VM.set_parent ~__context ~self:ref ~value:parent + with _ -> debug "no parent found" + end in + + List.iter aux state.table (** Take a list of objects, lookup the handlers by class name and 'handle' them *) let handle_all __context config rpc session_id (xs: obj list) = - let state = initial_state xs in - try - let one_type (cls, handler) = - let instances = List.filter (fun x -> x.cls = cls) xs in - debug "Importing %i %s(s)" (List.length instances) cls; - List.iter (fun x -> handler __context config rpc session_id state x) instances in - List.iter one_type handlers; - let dry_run = match config.import_type with - | Metadata_import {dry_run=true} -> true - | _ -> false - in - if not dry_run then - update_snapshot_and_parent_links ~__context state; - state - with e -> - Backtrace.is_important e; - error "Caught exception in import: %s" (ExnHelper.string_of_exn e); - (* execute all the cleanup actions now *) - if config.force - then warn "Not cleaning up after import failure since --force provided: %s" (ExnHelper.string_of_exn e) - else begin - cleanup state.cleanup; - end; - raise e + let state = initial_state xs in + try + let one_type (cls, handler) = + let instances = List.filter (fun x -> x.cls = cls) xs in + debug "Importing %i %s(s)" (List.length instances) cls; + List.iter (fun x -> handler __context config rpc session_id state x) instances in + List.iter one_type handlers; + let dry_run = match config.import_type with + | Metadata_import {dry_run=true} -> true + | _ -> false + in + if not dry_run then + update_snapshot_and_parent_links ~__context state; + state + with e -> + Backtrace.is_important e; + error "Caught exception in import: %s" (ExnHelper.string_of_exn e); + (* execute all the cleanup actions now *) + if config.force + then warn "Not cleaning up after import failure since --force provided: %s" (ExnHelper.string_of_exn e) + else begin + cleanup state.cleanup; + end; + raise e (** Read the next file in the archive as xml *) let read_xml hdr fd = - let xml_string = Bigbuffer.make () in - really_read_bigbuffer fd xml_string hdr.Tar_unix.Header.file_size; - Xml.parse_bigbuffer xml_string + let xml_string = Bigbuffer.make () in + really_read_bigbuffer fd xml_string hdr.Tar_unix.Header.file_size; + Xml.parse_bigbuffer xml_string let assert_filename_is hdr = - let expected = Xva.xml_filename in - let actual = hdr.Tar_unix.Header.file_name in - if expected <> actual then begin - let hex = Tar_unix.Header.to_hex in - error "import expects the next file in the stream to be [%s]; got [%s]" - (hex expected) (hex actual); - raise (IFailure (Unexpected_file(expected, actual))) - end + let expected = Xva.xml_filename in + let actual = hdr.Tar_unix.Header.file_name in + if expected <> actual then begin + let hex = Tar_unix.Header.to_hex in + error "import expects the next file in the stream to be [%s]; got [%s]" + (hex expected) (hex actual); + raise (IFailure (Unexpected_file(expected, actual))) + end (** Takes an fd and a function, tries first to read the first tar block and checks for the existence of 'ova.xml'. If that fails then pipe the lot through gzip and try again *) let with_open_archive fd ?length f = - (* Read the first header's worth into a buffer *) - let buffer = Cstruct.create Tar_unix.Header.length in - let retry_with_gzip = ref true in - try - Tar_unix.really_read fd buffer; - - (* we assume the first block is not all zeroes *) - let hdr = Opt.unbox (Tar_unix.Header.unmarshal buffer) in - assert_filename_is hdr; - - (* successfully opened uncompressed stream *) - retry_with_gzip := false; - let xml = read_xml hdr fd in - Tar_unix.Archive.skip fd (Tar_unix.Header.compute_zero_padding_length hdr); - f xml fd - with e -> - if not(!retry_with_gzip) then raise e; - debug "Failed to directly open the archive; trying gzip"; - let pipe_out, pipe_in = Unix.pipe () in - let t = Thread.create - (Gzip.decompress pipe_in) - (fun compressed_in -> - (* Write the initial buffer *) - Unix.set_close_on_exec compressed_in; - debug "Writing initial buffer"; - Tar_unix.really_write compressed_in buffer; - let limit = (Opt.map - (fun x -> Int64.sub x (Int64.of_int Tar_unix.Header.length)) length) in - let n = Unixext.copy_file ?limit fd compressed_in in - debug "Written a total of %d + %Ld bytes" Tar_unix.Header.length n; - ) in - finally - (fun () -> - let hdr = Tar_unix.Header.get_next_header pipe_out in - assert_filename_is hdr; - - let xml = read_xml hdr pipe_out in - Tar_unix.Archive.skip pipe_out (Tar_unix.Header.compute_zero_padding_length hdr); - f xml pipe_out) - (fun () -> - debug "Closing pipes"; - Unix.close pipe_in; - Unix.close pipe_out; - Thread.join t) + (* Read the first header's worth into a buffer *) + let buffer = Cstruct.create Tar_unix.Header.length in + let retry_with_gzip = ref true in + try + Tar_unix.really_read fd buffer; + + (* we assume the first block is not all zeroes *) + let hdr = Opt.unbox (Tar_unix.Header.unmarshal buffer) in + assert_filename_is hdr; + + (* successfully opened uncompressed stream *) + retry_with_gzip := false; + let xml = read_xml hdr fd in + Tar_unix.Archive.skip fd (Tar_unix.Header.compute_zero_padding_length hdr); + f xml fd + with e -> + if not(!retry_with_gzip) then raise e; + debug "Failed to directly open the archive; trying gzip"; + let pipe_out, pipe_in = Unix.pipe () in + let t = Thread.create + (Gzip.decompress pipe_in) + (fun compressed_in -> + (* Write the initial buffer *) + Unix.set_close_on_exec compressed_in; + debug "Writing initial buffer"; + Tar_unix.really_write compressed_in buffer; + let limit = (Opt.map + (fun x -> Int64.sub x (Int64.of_int Tar_unix.Header.length)) length) in + let n = Unixext.copy_file ?limit fd compressed_in in + debug "Written a total of %d + %Ld bytes" Tar_unix.Header.length n; + ) in + finally + (fun () -> + let hdr = Tar_unix.Header.get_next_header pipe_out in + assert_filename_is hdr; + + let xml = read_xml hdr pipe_out in + Tar_unix.Archive.skip pipe_out (Tar_unix.Header.compute_zero_padding_length hdr); + f xml pipe_out) + (fun () -> + debug "Closing pipes"; + Unix.close pipe_in; + Unix.close pipe_out; + Thread.join t) (** Remove "import" from the current operations of all created VMs, complete the task including the VM references *) let complete_import ~__context vmrefs = - debug "length of vmrefs: %d" (List.length vmrefs); - debug "content: %s" (String.concat "," (List.map Ref.string_of vmrefs)); - try - (* Remove the "import" current operation, recompute allowed operations *) - let task_id = Ref.string_of (Context.get_task_id __context) in - List.iter (fun vm -> - Db.VM.remove_from_current_operations ~__context ~self:vm ~key:task_id; - Xapi_vm_lifecycle.update_allowed_operations ~__context ~self:vm) vmrefs; - - (* We only keep VMs which are not snapshot *) - let vmrefs = List.filter (fun vmref -> not (Db.VM.get_is_a_snapshot ~__context ~self:vmref)) vmrefs in - - (* We only set the result on the task since it is officially completed later. *) - TaskHelper.set_result ~__context (Some (API.rpc_of_ref_VM_set vmrefs)) - with e -> - Backtrace.is_important e; - error "Caught exception completing import: %s" (ExnHelper.string_of_exn e); - raise e + debug "length of vmrefs: %d" (List.length vmrefs); + debug "content: %s" (String.concat "," (List.map Ref.string_of vmrefs)); + try + (* Remove the "import" current operation, recompute allowed operations *) + let task_id = Ref.string_of (Context.get_task_id __context) in + List.iter (fun vm -> + Db.VM.remove_from_current_operations ~__context ~self:vm ~key:task_id; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self:vm) vmrefs; + + (* We only keep VMs which are not snapshot *) + let vmrefs = List.filter (fun vmref -> not (Db.VM.get_is_a_snapshot ~__context ~self:vmref)) vmrefs in + + (* We only set the result on the task since it is officially completed later. *) + TaskHelper.set_result ~__context (Some (API.rpc_of_ref_VM_set vmrefs)) + with e -> + Backtrace.is_important e; + error "Caught exception completing import: %s" (ExnHelper.string_of_exn e); + raise e let find_query_flag query key = - List.mem_assoc key query && (List.assoc key query = "true") + List.mem_assoc key query && (List.assoc key query = "true") let read_map_params name params = - let len = String.length name + 1 in (* include ':' *) - let filter_params = List.filter (fun (p,_) -> (Xstringext.String.startswith name p) && (String.length p > len)) params in - List.map (fun (k,v) -> String.sub k len (String.length k - len),v) filter_params + let len = String.length name + 1 in (* include ':' *) + let filter_params = List.filter (fun (p,_) -> (Xstringext.String.startswith name p) && (String.length p > len)) params in + List.map (fun (k,v) -> String.sub k len (String.length k - len),v) filter_params let with_error_handling f = - match Backtrace.with_backtraces f - with - | `Ok result -> result - | `Error (e, backtrace) -> begin - Debug.log_backtrace e backtrace; - let reraise = Backtrace.reraise e in - match e with - | IFailure failure -> - begin - match failure with - | Cannot_handle_chunked -> - error "import code cannot handle chunked encoding"; - reraise (Api_errors.Server_error (Api_errors.import_error_cannot_handle_chunked, [])) - | Some_checksums_failed -> - error "some checksums failed"; - reraise (Api_errors.Server_error (Api_errors.import_error_some_checksums_failed, [])) - | Failed_to_find_object id -> - error "Failed to find object with ID: %s" id; - reraise (Api_errors.Server_error (Api_errors.import_error_failed_to_find_object, [id])) - | Attached_disks_not_found -> - error "Cannot import guest with currently attached disks which cannot be found"; - reraise (Api_errors.Server_error (Api_errors.import_error_attached_disks_not_found, [])) - | Unexpected_file (expected, actual) -> - let hex = Tar_unix.Header.to_hex in - error "Invalid XVA file: import expects the next file in the stream to be \"%s\" [%s]; got \"%s\" [%s]" - expected (hex expected) actual (hex actual); - reraise (Api_errors.Server_error (Api_errors.import_error_unexpected_file, [expected; actual])) - end - | Api_errors.Server_error(code, params) as e -> - Backtrace.is_important e; - raise e - | End_of_file -> - error "Prematurely reached end-of-file during import"; - reraise (Api_errors.Server_error (Api_errors.import_error_premature_eof, [])) - | e -> - error "Import caught exception: %s" (ExnHelper.string_of_exn e); - reraise (Api_errors.Server_error (Api_errors.import_error_generic, [ (ExnHelper.string_of_exn e) ])) - end + match Backtrace.with_backtraces f + with + | `Ok result -> result + | `Error (e, backtrace) -> begin + Debug.log_backtrace e backtrace; + let reraise = Backtrace.reraise e in + match e with + | IFailure failure -> + begin + match failure with + | Cannot_handle_chunked -> + error "import code cannot handle chunked encoding"; + reraise (Api_errors.Server_error (Api_errors.import_error_cannot_handle_chunked, [])) + | Some_checksums_failed -> + error "some checksums failed"; + reraise (Api_errors.Server_error (Api_errors.import_error_some_checksums_failed, [])) + | Failed_to_find_object id -> + error "Failed to find object with ID: %s" id; + reraise (Api_errors.Server_error (Api_errors.import_error_failed_to_find_object, [id])) + | Attached_disks_not_found -> + error "Cannot import guest with currently attached disks which cannot be found"; + reraise (Api_errors.Server_error (Api_errors.import_error_attached_disks_not_found, [])) + | Unexpected_file (expected, actual) -> + let hex = Tar_unix.Header.to_hex in + error "Invalid XVA file: import expects the next file in the stream to be \"%s\" [%s]; got \"%s\" [%s]" + expected (hex expected) actual (hex actual); + reraise (Api_errors.Server_error (Api_errors.import_error_unexpected_file, [expected; actual])) + end + | Api_errors.Server_error(code, params) as e -> + Backtrace.is_important e; + raise e + | End_of_file -> + error "Prematurely reached end-of-file during import"; + reraise (Api_errors.Server_error (Api_errors.import_error_premature_eof, [])) + | e -> + error "Import caught exception: %s" (ExnHelper.string_of_exn e); + reraise (Api_errors.Server_error (Api_errors.import_error_generic, [ (ExnHelper.string_of_exn e) ])) + end (** Import metadata only *) let metadata_handler (req: Request.t) s _ = - debug "metadata_handler called"; - req.Request.close <- true; - Xapi_http.with_context "VM.metadata_import" req s - (fun __context -> Helpers.call_api_functions ~__context (fun rpc session_id -> - let full_restore = find_query_flag req.Request.query "restore" in - let force = find_query_flag req.Request.query "force" in - let dry_run = find_query_flag req.Request.query "dry_run" in - let live = find_query_flag req.Request.query "live" in - let vdi_map = read_map_params "vdi" req.Request.query in - info "VM.import_metadata: force = %b; full_restore = %b dry_run = %b; live = %b; vdi_map = [ %s ]" - force full_restore dry_run live - (String.concat "; " (List.map (fun (a, b) -> a ^ "=" ^ b) vdi_map)); - let metadata_options = {dry_run = dry_run; live = live; vdi_map} in - let config = { - import_type = Metadata_import metadata_options; - full_restore = full_restore; - force = force - } in - let headers = Http.http_200_ok ~keep_alive:false () @ - [ Http.Hdr.task_id ^ ":" ^ (Ref.string_of (Context.get_task_id __context)); - content_type ] in - Http_svr.headers s headers; - with_open_archive s ?length:req.Request.content_length - (fun metadata s -> - debug "Got XML"; - (* Skip trailing two zero blocks *) - Tar_unix.Archive.skip s (Tar_unix.Header.length * 2); - - let header = header_of_xmlrpc metadata in - assert_compatable ~__context header.version; - if full_restore then assert_can_restore_backup ~__context rpc session_id header; - - with_error_handling (fun () -> - let state = handle_all __context config rpc session_id header.objects in - let table = state.table in - let on_cleanup_stack = state.cleanup in - try - List.iter (fun (cls, id, r) -> - debug "Imported object type %s: external ref: %s internal ref: %s" - cls id r) table; - - let vmrefs = List.map (fun (cls,id,r) -> Ref.of_string r) state.created_vms in - let vmrefs = Listext.List.setify vmrefs in - complete_import ~__context vmrefs; - info "import_metadata successful"; - with e -> - Backtrace.is_important e; - error "Caught exception during import: %s" (ExnHelper.string_of_exn e); - if force - then warn "Not cleaning up after import failure since --force provided: %s" (ExnHelper.string_of_exn e) - else begin - debug "Cleaning up after import failure: %s" (ExnHelper.string_of_exn e); - cleanup on_cleanup_stack; - end; - raise e - ) - ))) + debug "metadata_handler called"; + req.Request.close <- true; + Xapi_http.with_context "VM.metadata_import" req s + (fun __context -> Helpers.call_api_functions ~__context (fun rpc session_id -> + let full_restore = find_query_flag req.Request.query "restore" in + let force = find_query_flag req.Request.query "force" in + let dry_run = find_query_flag req.Request.query "dry_run" in + let live = find_query_flag req.Request.query "live" in + let vdi_map = read_map_params "vdi" req.Request.query in + info "VM.import_metadata: force = %b; full_restore = %b dry_run = %b; live = %b; vdi_map = [ %s ]" + force full_restore dry_run live + (String.concat "; " (List.map (fun (a, b) -> a ^ "=" ^ b) vdi_map)); + let metadata_options = {dry_run = dry_run; live = live; vdi_map} in + let config = { + import_type = Metadata_import metadata_options; + full_restore = full_restore; + force = force + } in + let headers = Http.http_200_ok ~keep_alive:false () @ + [ Http.Hdr.task_id ^ ":" ^ (Ref.string_of (Context.get_task_id __context)); + content_type ] in + Http_svr.headers s headers; + with_open_archive s ?length:req.Request.content_length + (fun metadata s -> + debug "Got XML"; + (* Skip trailing two zero blocks *) + Tar_unix.Archive.skip s (Tar_unix.Header.length * 2); + + let header = header_of_xmlrpc metadata in + assert_compatable ~__context header.version; + if full_restore then assert_can_restore_backup ~__context rpc session_id header; + + with_error_handling (fun () -> + let state = handle_all __context config rpc session_id header.objects in + let table = state.table in + let on_cleanup_stack = state.cleanup in + try + List.iter (fun (cls, id, r) -> + debug "Imported object type %s: external ref: %s internal ref: %s" + cls id r) table; + + let vmrefs = List.map (fun (cls,id,r) -> Ref.of_string r) state.created_vms in + let vmrefs = Listext.List.setify vmrefs in + complete_import ~__context vmrefs; + info "import_metadata successful"; + with e -> + Backtrace.is_important e; + error "Caught exception during import: %s" (ExnHelper.string_of_exn e); + if force + then warn "Not cleaning up after import failure since --force provided: %s" (ExnHelper.string_of_exn e) + else begin + debug "Cleaning up after import failure: %s" (ExnHelper.string_of_exn e); + cleanup on_cleanup_stack; + end; + raise e + ) + ))) let stream_import __context rpc session_id s content_length refresh_session config = - let sr = match config.import_type with - | Full_import sr -> sr - | _ -> failwith "Internal error: stream_import called without correct import_type" - in - with_open_archive s ?length:content_length - (fun metadata s -> - debug "Got XML"; - let old_zurich_or_geneva = try ignore(Xva.of_xml metadata); true with _ -> false in - let vmrefs = - if old_zurich_or_geneva - then Import_xva.from_xml refresh_session s __context rpc session_id sr metadata - else begin - debug "importing new style VM"; - let header = header_of_xmlrpc metadata in - assert_compatable ~__context header.version; - if config.full_restore then assert_can_restore_backup ~__context rpc session_id header; - - (* objects created here: *) - let state = handle_all __context config rpc session_id header.objects in - let table, on_cleanup_stack = state.table, state.cleanup in - - (* signal to GUI that object have been created and they can now go off and remapp networks *) - TaskHelper.add_to_other_config ~__context "object_creation" "complete"; - - try - List.iter (fun (cls, id, r) -> - debug "Imported object type %s: external ref: %s internal ref: %s" cls id r) - table; - - (* now stream the disks. We expect not to stream CDROMs *) - let all_vdis = non_cdrom_vdis header in - (* some CDROMs might be in as disks, don't stream them either *) - let all_vdis = List.filter (fun x -> exists (Ref.of_string x.id) table) all_vdis in - let vdis = List.map (fun x -> - let vdir = API.Legacy.From.vDI_t "" (find_in_export x.id state.export) in - x.id, lookup (Ref.of_string x.id) table, vdir.API.vDI_virtual_size) all_vdis in - List.iter (fun (extid, intid, size) -> debug "Expecting to import VDI %s into %s (size=%Ld)" extid (Ref.string_of intid) size) vdis; - let checksum_table = Stream_vdi.recv_all refresh_session s __context rpc session_id header.version config.force vdis in - - (* CA-48768: Stream_vdi.recv_all only checks for task cancellation - every ten seconds, so we need to check again now. After this - point, we disable cancellation for this task. *) - TaskHelper.exn_if_cancelling ~__context; - TaskHelper.set_not_cancellable ~__context; - - (* Pre-miami GA exports have a checksum table at the end of the export. Check the calculated checksums *) - (* against the table here. Nb. Rio GA-Miami B2 exports get their checksums checked twice! *) - if header.version.export_vsn < 2 then begin - let xml = Tar_unix.Archive.with_next_file s (fun s hdr -> read_xml hdr s) in - let expected_checksums = checksum_table_of_xmlrpc xml in - if not(compare_checksums checksum_table expected_checksums) then begin - error "Some data checksums were incorrect: VM may be corrupt"; - if not(config.force) - then raise (IFailure Some_checksums_failed) - else error "Ignoring incorrect checksums since 'force' flag was supplied" - end; - end; - (* return vmrefs *) - Listext.List.setify (List.map (fun (cls,id,r) -> Ref.of_string r) state.created_vms) - - with e -> - Backtrace.is_important e; - error "Caught exception during import: %s" (ExnHelper.string_of_exn e); - if config.force - then warn "Not cleaning up after import failure since --force provided: %s" (ExnHelper.string_of_exn e) - else begin - debug "Cleaning up after import failure: %s" (ExnHelper.string_of_exn e); - cleanup on_cleanup_stack; - end; - raise e - end - in - complete_import ~__context vmrefs; - debug "import successful"; - vmrefs - ) + let sr = match config.import_type with + | Full_import sr -> sr + | _ -> failwith "Internal error: stream_import called without correct import_type" + in + with_open_archive s ?length:content_length + (fun metadata s -> + debug "Got XML"; + let old_zurich_or_geneva = try ignore(Xva.of_xml metadata); true with _ -> false in + let vmrefs = + if old_zurich_or_geneva + then Import_xva.from_xml refresh_session s __context rpc session_id sr metadata + else begin + debug "importing new style VM"; + let header = header_of_xmlrpc metadata in + assert_compatable ~__context header.version; + if config.full_restore then assert_can_restore_backup ~__context rpc session_id header; + + (* objects created here: *) + let state = handle_all __context config rpc session_id header.objects in + let table, on_cleanup_stack = state.table, state.cleanup in + + (* signal to GUI that object have been created and they can now go off and remapp networks *) + TaskHelper.add_to_other_config ~__context "object_creation" "complete"; + + try + List.iter (fun (cls, id, r) -> + debug "Imported object type %s: external ref: %s internal ref: %s" cls id r) + table; + + (* now stream the disks. We expect not to stream CDROMs *) + let all_vdis = non_cdrom_vdis header in + (* some CDROMs might be in as disks, don't stream them either *) + let all_vdis = List.filter (fun x -> exists (Ref.of_string x.id) table) all_vdis in + let vdis = List.map (fun x -> + let vdir = API.Legacy.From.vDI_t "" (find_in_export x.id state.export) in + x.id, lookup (Ref.of_string x.id) table, vdir.API.vDI_virtual_size) all_vdis in + List.iter (fun (extid, intid, size) -> debug "Expecting to import VDI %s into %s (size=%Ld)" extid (Ref.string_of intid) size) vdis; + let checksum_table = Stream_vdi.recv_all refresh_session s __context rpc session_id header.version config.force vdis in + + (* CA-48768: Stream_vdi.recv_all only checks for task cancellation + every ten seconds, so we need to check again now. After this + point, we disable cancellation for this task. *) + TaskHelper.exn_if_cancelling ~__context; + TaskHelper.set_not_cancellable ~__context; + + (* Pre-miami GA exports have a checksum table at the end of the export. Check the calculated checksums *) + (* against the table here. Nb. Rio GA-Miami B2 exports get their checksums checked twice! *) + if header.version.export_vsn < 2 then begin + let xml = Tar_unix.Archive.with_next_file s (fun s hdr -> read_xml hdr s) in + let expected_checksums = checksum_table_of_xmlrpc xml in + if not(compare_checksums checksum_table expected_checksums) then begin + error "Some data checksums were incorrect: VM may be corrupt"; + if not(config.force) + then raise (IFailure Some_checksums_failed) + else error "Ignoring incorrect checksums since 'force' flag was supplied" + end; + end; + (* return vmrefs *) + Listext.List.setify (List.map (fun (cls,id,r) -> Ref.of_string r) state.created_vms) + + with e -> + Backtrace.is_important e; + error "Caught exception during import: %s" (ExnHelper.string_of_exn e); + if config.force + then warn "Not cleaning up after import failure since --force provided: %s" (ExnHelper.string_of_exn e) + else begin + debug "Cleaning up after import failure: %s" (ExnHelper.string_of_exn e); + cleanup on_cleanup_stack; + end; + raise e + end + in + complete_import ~__context vmrefs; + debug "import successful"; + vmrefs + ) let handler (req: Request.t) s _ = - req.Request.close <- true; - - Xapi_http.assert_credentials_ok "VM.import" ~http_action:"put_import" req s; - - debug "import handler"; - - let full_restore = find_query_flag req.Request.query "restore" in - let force = find_query_flag req.Request.query "force" in - - let all = req.Request.cookie @ req.Request.query in - let subtask_of = - if List.mem_assoc "subtask_of" all - then Some (Ref.of_string (List.assoc "subtask_of" all)) - else None in - - (* Perform the SR reachability check using a fresh context/task because - we don't want to complete the task in the forwarding case *) - - Server_helpers.exec_with_new_task ?subtask_of "VM.import" - (fun __context -> Helpers.call_api_functions ~__context (fun rpc session_id -> - let sr = - if List.mem_assoc "sr_id" all - then Ref.of_string (List.assoc "sr_id" all) - else - if List.mem_assoc "sr_uuid" all - then Db.SR.get_by_uuid ~__context ~uuid:(List.assoc "sr_uuid" all) - else - log_reraise - "request was missing both sr_id and sr_uuid: one must be provided" - (Helpers.call_api_functions ~__context) - get_default_sr - in - info "VM.import: SR = '%s%s'; force = %b; full_restore = %b" - (try Db.SR.get_uuid ~__context ~self:sr with _ -> "invalid") - (try Printf.sprintf " (%s)" (Db.SR.get_name_label ~__context ~self:sr) with _ -> "") - force full_restore; - if not(check_sr_availability ~__context sr) - then - (debug "sr not available - redirecting"; - let host = find_host_for_sr ~__context sr in - let address = Db.Host.get_address ~__context ~self:host in - let url = Printf.sprintf "https://%s%s?%s" address req.Request.uri (String.concat "&" (List.map (fun (a,b) -> a^"="^b) req.Request.query)) in - let headers = Http.http_302_redirect url in - debug "new location: %s" url; - Http_svr.headers s headers) - else - Xapi_http.with_context "VM.import" req s - (fun __context -> - (* This is the signal to say we've taken responsibility from the CLI server for completing the task *) - (* The GUI can deal with this itself, but the CLI is complicated by the thin cli/cli server split *) - TaskHelper.set_progress ~__context 0.0; - - if force then warn "Force option supplied: will ignore checksum failures"; - - (* Let's check that we're not trying to import into an iso library! *) - if Db.SR.get_content_type ~__context ~self:sr = "iso" - then - begin - Http_svr.headers s (Http.http_400_badrequest ()); - raise (Api_errors.Server_error (Api_errors.sr_operation_not_supported, [])) - end; - with_error_handling (fun () -> - let refresh_external = - if List.mem_assoc "session_id" all then begin - let external_session_id = List.assoc "session_id" all in - Xapi_session.consider_touching_session rpc (Ref.of_string external_session_id) - end else - fun () -> () - in - let refresh_internal = - Xapi_session.consider_touching_session rpc session_id - in - let refresh_session () = - refresh_external (); - refresh_internal () - in - - debug "Importing %s" (if full_restore then "(as 'restore')" else "(as new VM)"); - let config = { import_type = Full_import sr; full_restore = full_restore; force = force } in - - match req.Request.transfer_encoding, req.Request.content_length with - | Some x, _ -> - error "Encoding not yet implemented in the import code: %s" x; - Http_svr.headers s (http_403_forbidden ()); - raise (IFailure Cannot_handle_chunked) - | None, content_length -> - let headers = Http.http_200_ok ~keep_alive:false () @ - [ Http.Hdr.task_id ^ ":" ^ (Ref.string_of (Context.get_task_id __context)); - content_type ] in - Http_svr.headers s headers; - debug "Reading XML"; - ignore(stream_import __context rpc session_id s content_length refresh_session config); - ) - ) - ); - debug "import successful") + req.Request.close <- true; + + Xapi_http.assert_credentials_ok "VM.import" ~http_action:"put_import" req s; + + debug "import handler"; + + let full_restore = find_query_flag req.Request.query "restore" in + let force = find_query_flag req.Request.query "force" in + + let all = req.Request.cookie @ req.Request.query in + let subtask_of = + if List.mem_assoc "subtask_of" all + then Some (Ref.of_string (List.assoc "subtask_of" all)) + else None in + + (* Perform the SR reachability check using a fresh context/task because + we don't want to complete the task in the forwarding case *) + + Server_helpers.exec_with_new_task ?subtask_of "VM.import" + (fun __context -> Helpers.call_api_functions ~__context (fun rpc session_id -> + let sr = + if List.mem_assoc "sr_id" all + then Ref.of_string (List.assoc "sr_id" all) + else + if List.mem_assoc "sr_uuid" all + then Db.SR.get_by_uuid ~__context ~uuid:(List.assoc "sr_uuid" all) + else + log_reraise + "request was missing both sr_id and sr_uuid: one must be provided" + (Helpers.call_api_functions ~__context) + get_default_sr + in + info "VM.import: SR = '%s%s'; force = %b; full_restore = %b" + (try Db.SR.get_uuid ~__context ~self:sr with _ -> "invalid") + (try Printf.sprintf " (%s)" (Db.SR.get_name_label ~__context ~self:sr) with _ -> "") + force full_restore; + if not(check_sr_availability ~__context sr) + then + (debug "sr not available - redirecting"; + let host = find_host_for_sr ~__context sr in + let address = Db.Host.get_address ~__context ~self:host in + let url = Printf.sprintf "https://%s%s?%s" address req.Request.uri (String.concat "&" (List.map (fun (a,b) -> a^"="^b) req.Request.query)) in + let headers = Http.http_302_redirect url in + debug "new location: %s" url; + Http_svr.headers s headers) + else + Xapi_http.with_context "VM.import" req s + (fun __context -> + (* This is the signal to say we've taken responsibility from the CLI server for completing the task *) + (* The GUI can deal with this itself, but the CLI is complicated by the thin cli/cli server split *) + TaskHelper.set_progress ~__context 0.0; + + if force then warn "Force option supplied: will ignore checksum failures"; + + (* Let's check that we're not trying to import into an iso library! *) + if Db.SR.get_content_type ~__context ~self:sr = "iso" + then + begin + Http_svr.headers s (Http.http_400_badrequest ()); + raise (Api_errors.Server_error (Api_errors.sr_operation_not_supported, [])) + end; + with_error_handling (fun () -> + let refresh_external = + if List.mem_assoc "session_id" all then begin + let external_session_id = List.assoc "session_id" all in + Xapi_session.consider_touching_session rpc (Ref.of_string external_session_id) + end else + fun () -> () + in + let refresh_internal = + Xapi_session.consider_touching_session rpc session_id + in + let refresh_session () = + refresh_external (); + refresh_internal () + in + + debug "Importing %s" (if full_restore then "(as 'restore')" else "(as new VM)"); + let config = { import_type = Full_import sr; full_restore = full_restore; force = force } in + + match req.Request.transfer_encoding, req.Request.content_length with + | Some x, _ -> + error "Encoding not yet implemented in the import code: %s" x; + Http_svr.headers s (http_403_forbidden ()); + raise (IFailure Cannot_handle_chunked) + | None, content_length -> + let headers = Http.http_200_ok ~keep_alive:false () @ + [ Http.Hdr.task_id ^ ":" ^ (Ref.string_of (Context.get_task_id __context)); + content_type ] in + Http_svr.headers s headers; + debug "Reading XML"; + ignore(stream_import __context rpc session_id s content_length refresh_session config); + ) + ) + ); + debug "import successful") diff --git a/ocaml/xapi/import_raw_vdi.ml b/ocaml/xapi/import_raw_vdi.ml index 642a9e0ffd5..76b07b4f818 100644 --- a/ocaml/xapi/import_raw_vdi.ml +++ b/ocaml/xapi/import_raw_vdi.ml @@ -13,7 +13,7 @@ *) (** HTTP handler for importing a raw VDI. * @group Import and Export - *) +*) module D=Debug.Make(struct let name="import" end) open D @@ -27,76 +27,76 @@ open Pervasiveext open Client let localhost_handler rpc session_id vdi (req: Request.t) (s: Unix.file_descr) = - req.Request.close <- true; - Xapi_http.with_context "Importing raw VDI" req s - (fun __context -> - let prezeroed = not (Sm_fs_ops.must_write_zeroes_into_new_vdi ~__context vdi) in - let all = req.Request.query @ req.Request.cookie in - let chunked = List.mem_assoc "chunked" all in - let task_id = Context.get_task_id __context in - match Importexport.Format.of_req req with - | `Unknown x -> - error "import_raw_vdi task_id = %s; vdi = %s; unknown disk format = %s" - (Ref.string_of task_id) (Ref.string_of vdi) x; - TaskHelper.failed ~__context (Api_errors.Server_error(Api_errors.internal_error, ["Unknown format " ^ x])); - Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()) - | `Ok format when format <> Importexport.Format.Raw && chunked -> - error "import_raw_vdi task_id = %s; vdi = %s; unable to import a .vhd using chunked encoding" - (Ref.string_of task_id) (Ref.string_of vdi) - | `Ok format -> - debug "import_raw_vdi task_id = %s vdi = %s; chunked = %b; format = %s" - (Ref.string_of task_id) (Ref.string_of vdi) chunked (Importexport.Format.to_string format); - try - match req.Request.transfer_encoding with - | Some x -> - error "Chunked encoding not yet implemented in the import code"; - Http_svr.headers s (http_403_forbidden ()); - raise (Failure (Printf.sprintf "import code cannot handle encoding: %s" x)) - | None -> - Sm_fs_ops.with_block_attached_device __context rpc session_id vdi `RW - (fun path -> - let headers = Http.http_200_ok ~keep_alive:false () @ - [ Http.Hdr.task_id ^ ":" ^ (Ref.string_of task_id); - content_type ] in - Http_svr.headers s headers; - if chunked - then Vhd_tool_wrapper.receive (Vhd_tool_wrapper.update_task_progress __context) "raw" "chunked" s None path "" prezeroed - else Vhd_tool_wrapper.receive (Vhd_tool_wrapper.update_task_progress __context) (Importexport.Format.to_string format) "none" s req.Request.content_length path "" prezeroed - ); - TaskHelper.complete ~__context None; - with e -> - Backtrace.is_important e; - error "Caught exception: %s" (ExnHelper.string_of_exn e); - TaskHelper.failed ~__context e; - raise e) + req.Request.close <- true; + Xapi_http.with_context "Importing raw VDI" req s + (fun __context -> + let prezeroed = not (Sm_fs_ops.must_write_zeroes_into_new_vdi ~__context vdi) in + let all = req.Request.query @ req.Request.cookie in + let chunked = List.mem_assoc "chunked" all in + let task_id = Context.get_task_id __context in + match Importexport.Format.of_req req with + | `Unknown x -> + error "import_raw_vdi task_id = %s; vdi = %s; unknown disk format = %s" + (Ref.string_of task_id) (Ref.string_of vdi) x; + TaskHelper.failed ~__context (Api_errors.Server_error(Api_errors.internal_error, ["Unknown format " ^ x])); + Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()) + | `Ok format when format <> Importexport.Format.Raw && chunked -> + error "import_raw_vdi task_id = %s; vdi = %s; unable to import a .vhd using chunked encoding" + (Ref.string_of task_id) (Ref.string_of vdi) + | `Ok format -> + debug "import_raw_vdi task_id = %s vdi = %s; chunked = %b; format = %s" + (Ref.string_of task_id) (Ref.string_of vdi) chunked (Importexport.Format.to_string format); + try + match req.Request.transfer_encoding with + | Some x -> + error "Chunked encoding not yet implemented in the import code"; + Http_svr.headers s (http_403_forbidden ()); + raise (Failure (Printf.sprintf "import code cannot handle encoding: %s" x)) + | None -> + Sm_fs_ops.with_block_attached_device __context rpc session_id vdi `RW + (fun path -> + let headers = Http.http_200_ok ~keep_alive:false () @ + [ Http.Hdr.task_id ^ ":" ^ (Ref.string_of task_id); + content_type ] in + Http_svr.headers s headers; + if chunked + then Vhd_tool_wrapper.receive (Vhd_tool_wrapper.update_task_progress __context) "raw" "chunked" s None path "" prezeroed + else Vhd_tool_wrapper.receive (Vhd_tool_wrapper.update_task_progress __context) (Importexport.Format.to_string format) "none" s req.Request.content_length path "" prezeroed + ); + TaskHelper.complete ~__context None; + with e -> + Backtrace.is_important e; + error "Caught exception: %s" (ExnHelper.string_of_exn e); + TaskHelper.failed ~__context e; + raise e) let import vdi (req: Request.t) (s: Unix.file_descr) _ = - Xapi_http.assert_credentials_ok "VDI.import" ~http_action:"put_import_raw_vdi" req s; + Xapi_http.assert_credentials_ok "VDI.import" ~http_action:"put_import_raw_vdi" req s; - (* Perform the SR reachability check using a fresh context/task because - we don't want to complete the task in the forwarding case *) - Server_helpers.exec_with_new_task "VDI.import" - (fun __context -> - Helpers.call_api_functions ~__context - (fun rpc session_id -> - let sr = Db.VDI.get_SR ~__context ~self:vdi in - debug "Checking whether localhost can see SR: %s" (Ref.string_of sr); - if (Importexport.check_sr_availability ~__context sr) - then localhost_handler rpc session_id vdi req s - else - let host = Importexport.find_host_for_sr ~__context sr in - let address = Db.Host.get_address ~__context ~self:host in - return_302_redirect req s address - ) - ) + (* Perform the SR reachability check using a fresh context/task because + we don't want to complete the task in the forwarding case *) + Server_helpers.exec_with_new_task "VDI.import" + (fun __context -> + Helpers.call_api_functions ~__context + (fun rpc session_id -> + let sr = Db.VDI.get_SR ~__context ~self:vdi in + debug "Checking whether localhost can see SR: %s" (Ref.string_of sr); + if (Importexport.check_sr_availability ~__context sr) + then localhost_handler rpc session_id vdi req s + else + let host = Importexport.find_host_for_sr ~__context sr in + let address = Db.Host.get_address ~__context ~self:host in + return_302_redirect req s address + ) + ) let handler (req: Request.t) (s: Unix.file_descr) _ = - Xapi_http.assert_credentials_ok "VDI.import" ~http_action:"put_import_raw_vdi" req s; + Xapi_http.assert_credentials_ok "VDI.import" ~http_action:"put_import_raw_vdi" req s; - (* Using a fresh context/task because we don't want to complete the - task in the forwarding case *) - Server_helpers.exec_with_new_task "VDI.import" - (fun __context -> - import (vdi_of_req ~__context req) req s () - ) + (* Using a fresh context/task because we don't want to complete the + task in the forwarding case *) + Server_helpers.exec_with_new_task "VDI.import" + (fun __context -> + import (vdi_of_req ~__context req) req s () + ) diff --git a/ocaml/xapi/import_xva.ml b/ocaml/xapi/import_xva.ml index 3614424f59e..966045d0a3d 100644 --- a/ocaml/xapi/import_xva.ml +++ b/ocaml/xapi/import_xva.ml @@ -13,7 +13,7 @@ *) (** Import code specific to Zurich/Geneva-style XVA VM exports * @group Import and Export - *) +*) open Stdext open Xstringext @@ -27,133 +27,133 @@ open D open Client (** Connect to an XAPI server on host:port and construct the VMs *) -let make __context rpc session_id srid (vms, vdis) = +let make __context rpc session_id srid (vms, vdis) = let task_id = Ref.string_of (Context.get_task_id __context) in - + (* On error, destroy all objects we have created *) let clean_up_stack = ref [] in try debug "Creating all the VDIs inside SR: %s (%s)" (Db.SR.get_name_label ~__context ~self:srid) (Ref.string_of srid); let vdi_refs = List.map (fun vdi -> - let vdi = Client.VDI.create ~rpc ~session_id ~name_label:vdi.vdi_name - ~name_description:"" ~sR:srid ~virtual_size:vdi.size - ~_type:(vdi.variety:>API.vdi_type) ~sharable:false ~read_only:false ~xenstore_data:[] - ~sm_config:[] ~other_config:[] ~tags:[] in - clean_up_stack := - (fun _ rpc session_id -> Client.VDI.destroy rpc session_id vdi) :: !clean_up_stack; - vdi) vdis in + let vdi = Client.VDI.create ~rpc ~session_id ~name_label:vdi.vdi_name + ~name_description:"" ~sR:srid ~virtual_size:vdi.size + ~_type:(vdi.variety:>API.vdi_type) ~sharable:false ~read_only:false ~xenstore_data:[] + ~sm_config:[] ~other_config:[] ~tags:[] in + clean_up_stack := + (fun _ rpc session_id -> Client.VDI.destroy rpc session_id vdi) :: !clean_up_stack; + vdi) vdis in debug("Now creating all the VMs"); let vm_refs = List.map (fun vm -> - let user_version = 0L in - let memory_b = vm.memory in - - let w2k_platform = ["acpi","false"; "apic","false"; "nx","false"; "pae","true"] in - let other_platform = ["acpi","true"; "apic","true"; "nx","false"; "pae","true"] in - - let platform = - match (vm.distrib,vm.distrib_version) with - Some d, Some d_v -> - if d="windows" && d_v="win2k" - then w2k_platform - else other_platform - | _ -> - other_platform - in - - let vm_ref = Client.VM.create ~rpc ~session_id ~name_label:(vm.vm_name ^ " import") - ~blocked_operations:[] - ~name_description:vm.description ~user_version ~is_a_template:false - ~affinity:Ref.null - ~memory_static_max:memory_b - ~memory_dynamic_max:memory_b - ~memory_target:memory_b - ~memory_dynamic_min:memory_b - ~memory_static_min:(Int64.mul 16L (Int64.mul 1024L 1024L)) - ~vCPUs_max:1L ~vCPUs_at_startup:1L - ~vCPUs_params:[] - ~actions_after_shutdown:`destroy ~actions_after_reboot:`restart - ~actions_after_crash:`destroy - ~hVM_boot_policy:(if vm.is_hvm then "BIOS order" else "") - ~hVM_boot_params:(if vm.is_hvm then [("order","cd")] else []) - ~hVM_shadow_multiplier:1. - ~platform - ~pV_kernel:"" ~pV_ramdisk:"" ~pV_bootloader:"pygrub" - ~pV_legacy_args:vm.kernel_boot_cmdline - ~pV_bootloader_args:"" - ~pV_args:"" - ~pCI_bus:"" ~other_config:[] ~xenstore_data:[] ~recommendations:"" - ~ha_always_run:false ~ha_restart_priority:"" ~tags:[] - ~protection_policy:Ref.null ~is_snapshot_from_vmpp:false - ~appliance:Ref.null - ~start_delay:0L - ~shutdown_delay:0L - ~order:0L - ~suspend_SR:Ref.null - ~version:0L - ~generation_id:"" - ~hardware_platform_version:0L - ~has_vendor_device:false - in - - TaskHelper.operate_on_db_task ~__context - (fun task -> Client.VM.add_to_other_config ~rpc ~session_id - ~self:vm_ref ~key:Xapi_globs.import_task ~value:(Ref.string_of task)); - - clean_up_stack := - (fun __context rpc session_id -> - Helpers.log_exn_continue - (Printf.sprintf "Attempting to remove import from current_operations of VM: %s" (Ref.string_of vm_ref)) - (fun () -> Db.VM.remove_from_current_operations ~__context ~self:vm_ref ~key:task_id) (); - Client.VM.destroy rpc session_id vm_ref) :: !clean_up_stack; - - (* Although someone could sneak in here and attempt to power on the VM, it - doesn't really matter since no VBDs have been created yet... *) - Db.VM.add_to_current_operations ~__context ~self:vm_ref ~key:task_id ~value:`import; - Xapi_vm_lifecycle.update_allowed_operations ~__context ~self:vm_ref; - - (* make VBDs *) - List.iter (fun vbd -> - let vdi = List.assoc vbd.vdi (List.combine vdis vdi_refs) in - let vbd_ref = Client.VBD.create ~rpc ~session_id ~vM:vm_ref ~vDI:vdi ~other_config:[Xapi_globs.owner_key,""] - ~userdevice:vbd.device ~bootable:(vbd.funct = Root) ~mode:vbd.mode - ~_type:`Disk - ~empty:false - ~unpluggable:(vbd.vdi.variety <> `system) - ~qos_algorithm_type:"" ~qos_algorithm_params:[] in - clean_up_stack := - (fun __context rpc session_id -> - Client.VBD.destroy rpc session_id vbd_ref) :: !clean_up_stack) vm.vbds; - (* attempt to make CD drive *) - begin - try - ignore (Client.VBD.create ~rpc ~session_id ~vM:vm_ref ~vDI:Ref.null ~other_config:[] ~userdevice:"autodetect" - ~bootable:false ~mode:`RO ~_type:`CD ~unpluggable:true ~empty:true ~qos_algorithm_type:"" ~qos_algorithm_params:[]) - with e -> warn "could not create CD drive on imported XVA: %s" (Printexc.to_string e) - end; - (vm,vm_ref) - ) vms in - (vm_refs, List.combine vdis vdi_refs, !clean_up_stack) + let user_version = 0L in + let memory_b = vm.memory in + + let w2k_platform = ["acpi","false"; "apic","false"; "nx","false"; "pae","true"] in + let other_platform = ["acpi","true"; "apic","true"; "nx","false"; "pae","true"] in + + let platform = + match (vm.distrib,vm.distrib_version) with + Some d, Some d_v -> + if d="windows" && d_v="win2k" + then w2k_platform + else other_platform + | _ -> + other_platform + in + + let vm_ref = Client.VM.create ~rpc ~session_id ~name_label:(vm.vm_name ^ " import") + ~blocked_operations:[] + ~name_description:vm.description ~user_version ~is_a_template:false + ~affinity:Ref.null + ~memory_static_max:memory_b + ~memory_dynamic_max:memory_b + ~memory_target:memory_b + ~memory_dynamic_min:memory_b + ~memory_static_min:(Int64.mul 16L (Int64.mul 1024L 1024L)) + ~vCPUs_max:1L ~vCPUs_at_startup:1L + ~vCPUs_params:[] + ~actions_after_shutdown:`destroy ~actions_after_reboot:`restart + ~actions_after_crash:`destroy + ~hVM_boot_policy:(if vm.is_hvm then "BIOS order" else "") + ~hVM_boot_params:(if vm.is_hvm then [("order","cd")] else []) + ~hVM_shadow_multiplier:1. + ~platform + ~pV_kernel:"" ~pV_ramdisk:"" ~pV_bootloader:"pygrub" + ~pV_legacy_args:vm.kernel_boot_cmdline + ~pV_bootloader_args:"" + ~pV_args:"" + ~pCI_bus:"" ~other_config:[] ~xenstore_data:[] ~recommendations:"" + ~ha_always_run:false ~ha_restart_priority:"" ~tags:[] + ~protection_policy:Ref.null ~is_snapshot_from_vmpp:false + ~appliance:Ref.null + ~start_delay:0L + ~shutdown_delay:0L + ~order:0L + ~suspend_SR:Ref.null + ~version:0L + ~generation_id:"" + ~hardware_platform_version:0L + ~has_vendor_device:false + in + + TaskHelper.operate_on_db_task ~__context + (fun task -> Client.VM.add_to_other_config ~rpc ~session_id + ~self:vm_ref ~key:Xapi_globs.import_task ~value:(Ref.string_of task)); + + clean_up_stack := + (fun __context rpc session_id -> + Helpers.log_exn_continue + (Printf.sprintf "Attempting to remove import from current_operations of VM: %s" (Ref.string_of vm_ref)) + (fun () -> Db.VM.remove_from_current_operations ~__context ~self:vm_ref ~key:task_id) (); + Client.VM.destroy rpc session_id vm_ref) :: !clean_up_stack; + + (* Although someone could sneak in here and attempt to power on the VM, it + doesn't really matter since no VBDs have been created yet... *) + Db.VM.add_to_current_operations ~__context ~self:vm_ref ~key:task_id ~value:`import; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self:vm_ref; + + (* make VBDs *) + List.iter (fun vbd -> + let vdi = List.assoc vbd.vdi (List.combine vdis vdi_refs) in + let vbd_ref = Client.VBD.create ~rpc ~session_id ~vM:vm_ref ~vDI:vdi ~other_config:[Xapi_globs.owner_key,""] + ~userdevice:vbd.device ~bootable:(vbd.funct = Root) ~mode:vbd.mode + ~_type:`Disk + ~empty:false + ~unpluggable:(vbd.vdi.variety <> `system) + ~qos_algorithm_type:"" ~qos_algorithm_params:[] in + clean_up_stack := + (fun __context rpc session_id -> + Client.VBD.destroy rpc session_id vbd_ref) :: !clean_up_stack) vm.vbds; + (* attempt to make CD drive *) + begin + try + ignore (Client.VBD.create ~rpc ~session_id ~vM:vm_ref ~vDI:Ref.null ~other_config:[] ~userdevice:"autodetect" + ~bootable:false ~mode:`RO ~_type:`CD ~unpluggable:true ~empty:true ~qos_algorithm_type:"" ~qos_algorithm_params:[]) + with e -> warn "could not create CD drive on imported XVA: %s" (Printexc.to_string e) + end; + (vm,vm_ref) + ) vms in + (vm_refs, List.combine vdis vdi_refs, !clean_up_stack) with e -> debug "Caught exception while importing objects from XVA: %s" (ExnHelper.string_of_exn e); cleanup !clean_up_stack; raise e - + (** Take the XML (already extracted from the tar stream), process it to create all the relevant records and then stream in and uncompress the disk fragments. *) -let from_xml refresh_session s __context rpc session_id srid xml = +let from_xml refresh_session s __context rpc session_id srid xml = let vms, vdis = of_xml xml in - + let (vms,vdis,clean_up_stack) = make __context rpc session_id srid (vms, vdis) in try (* signal to GUI that object have been created and they can now go off and remapp networks *) TaskHelper.add_to_other_config ~__context "object_creation" "complete"; - let prefix_vdis = List.map - (fun (vdi, vdi_ref) -> - if not(String.startswith "file://" vdi.source) - then failwith "VDI source must be a file:// URL"; - String.sub vdi.source 7 (String.length vdi.source - 7), vdi_ref, vdi.size) vdis in + let prefix_vdis = List.map + (fun (vdi, vdi_ref) -> + if not(String.startswith "file://" vdi.source) + then failwith "VDI source must be a file:// URL"; + String.sub vdi.source 7 (String.length vdi.source - 7), vdi_ref, vdi.size) vdis in Stream_vdi.recv_all_zurich refresh_session s __context rpc session_id prefix_vdis; List.map snd vms with e -> diff --git a/ocaml/xapi/importexport.ml b/ocaml/xapi/importexport.ml index ae3c15c1594..134207e146d 100644 --- a/ocaml/xapi/importexport.ml +++ b/ocaml/xapi/importexport.ml @@ -13,37 +13,37 @@ *) (** Common definitions and functions shared between the import and export code. * @group Import and Export - *) +*) (** Represents a database record (the reference gets converted to a small string) *) type obj = { cls: string; id: string; snapshot: XMLRPC.xmlrpc } (** Version information attached to each export and checked on import *) -type version = - { hostname: string; - date: string; - product_version: string; - product_brand: string; - build_number: string; - xapi_vsn_major: int; - xapi_vsn_minor: int; - export_vsn: int; (* 0 if missing, indicates eg whether to expect sha1sums in the stream *) - } +type version = + { hostname: string; + date: string; + product_version: string; + product_brand: string; + build_number: string; + xapi_vsn_major: int; + xapi_vsn_minor: int; + export_vsn: int; (* 0 if missing, indicates eg whether to expect sha1sums in the stream *) + } (** An exported VM has a header record: *) -type header = - { version: version; - objects: obj list } +type header = + { version: version; + objects: obj list } exception Version_mismatch of string module D=Debug.Make(struct let name="importexport" end) open D -let find kvpairs where x = - if not(List.mem_assoc x kvpairs) - then raise (Failure (Printf.sprintf "Failed to find key '%s' in %s" x where)) - else List.assoc x kvpairs +let find kvpairs where x = + if not(List.mem_assoc x kvpairs) + then raise (Failure (Printf.sprintf "Failed to find key '%s' in %s" x where)) + else List.assoc x kvpairs let string_of_obj x = x.cls ^ " " ^ x.id @@ -52,11 +52,11 @@ let _id = "id" let _snapshot = "snapshot" let xmlrpc_of_obj x = XMLRPC.To.structure - [ _class, XMLRPC.To.string x.cls; - _id, XMLRPC.To.string x.id; - _snapshot, x.snapshot ] + [ _class, XMLRPC.To.string x.cls; + _id, XMLRPC.To.string x.id; + _snapshot, x.snapshot ] -let obj_of_xmlrpc x = +let obj_of_xmlrpc x = let kvpairs = XMLRPC.From.structure x in let find = find kvpairs "object data" in { cls = XMLRPC.From.string (find _class); @@ -64,9 +64,9 @@ let obj_of_xmlrpc x = snapshot = find _snapshot } (** Return a version struct corresponding to this host *) -let this_version __context = +let this_version __context = let host = Helpers.get_localhost ~__context in - let (_: API.host_t) = Db.Host.get_record ~__context ~self:host in + let (_: API.host_t) = Db.Host.get_record ~__context ~self:host in { hostname = Version.hostname; date = Version.date; product_version = Version.product_version (); @@ -79,9 +79,9 @@ let this_version __context = (** Raises an exception if a prospective import cannot be handled by this code. This will get complicated over time... *) -let assert_compatable ~__context other_version = +let assert_compatable ~__context other_version = let this_version = this_version __context in - let error() = + let error() = error "Import version is incompatible"; raise (Api_errors.Server_error(Api_errors.import_incompatible_version, [])) in (* error if major versions differ; also error if this host has a @@ -103,7 +103,7 @@ let xmlrpc_of_version x = ] exception Failure of string -let version_of_xmlrpc x = +let version_of_xmlrpc x = let kvpairs = XMLRPC.From.structure x in let find = find kvpairs "version data" in { hostname = XMLRPC.From.string (find _hostname); @@ -119,13 +119,13 @@ let version_of_xmlrpc x = let _version = "version" let _objects = "objects" -let xmlrpc_of_header x = +let xmlrpc_of_header x = XMLRPC.To.structure [ _version, xmlrpc_of_version x.version; _objects, XMLRPC.To.array (List.map xmlrpc_of_obj x.objects); ] -let header_of_xmlrpc x = +let header_of_xmlrpc x = let kvpairs = XMLRPC.From.structure x in let find = find kvpairs "contents data" in { version = version_of_xmlrpc (find _version); @@ -137,7 +137,7 @@ let vm_has_field ~(x: obj) ~name = List.mem_assoc name structure (* This function returns true when the VM record was created pre-ballooning. *) -let vm_exported_pre_dmc (x: obj) = +let vm_exported_pre_dmc (x: obj) = (* The VM.parent field was added in rel_midnight_ride, at the same time as ballooning. XXX: Replace this with something specific to the ballooning feature if possible. *) not (vm_has_field ~x ~name:"parent") @@ -150,53 +150,53 @@ let content_type = Http.Hdr.content_type ^ ": application/octet-stream" let xmlrpc_of_checksum_table table = API.Legacy.To.string_to_string_map table let checksum_table_of_xmlrpc xml = API.Legacy.From.string_to_string_map "" xml -let compare_checksums a b = +let compare_checksums a b = let success = ref true in List.iter (fun (filename, csum) -> - if List.mem_assoc filename b - then (let expected = List.assoc filename b in - if csum <> expected - then begin - error "File %s checksum mismatch (%s <> %s)" filename csum expected; - success := false - end - else debug "File %s checksum ok (%s = %s)" filename csum expected; - ) - else begin - error "Missing checksum for file %s (expected %s)" filename csum; - success := false; - end) a; + if List.mem_assoc filename b + then (let expected = List.assoc filename b in + if csum <> expected + then begin + error "File %s checksum mismatch (%s <> %s)" filename csum expected; + success := false + end + else debug "File %s checksum ok (%s = %s)" filename csum expected; + ) + else begin + error "Missing checksum for file %s (expected %s)" filename csum; + success := false; + end) a; !success -let get_default_sr rpc session_id = +let get_default_sr rpc session_id = let pool = List.hd (Client.Pool.get_all rpc session_id) in let sr = Client.Pool.get_default_SR rpc session_id pool in - try ignore(Client.SR.get_uuid rpc session_id sr); sr + try ignore(Client.SR.get_uuid rpc session_id sr); sr with _ -> raise (Api_errors.Server_error(Api_errors.default_sr_not_found, [ Ref.string_of sr ])) -(** Check that the SR is visible on the specified host *) +(** Check that the SR is visible on the specified host *) let check_sr_availability_host ~__context sr host = - try + try ignore(Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs:[sr] ~host); true with _ -> false - + let check_sr_availability ~__context sr = let localhost = Helpers.get_localhost ~__context in check_sr_availability_host ~__context sr localhost - + let find_host_for_sr ~__context ?(prefer_slaves=false) sr = - let choose_fn ~host = - Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs:[sr] ~host in + let choose_fn ~host = + Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs:[sr] ~host in Xapi_vm_helpers.choose_host ~__context ~choose_fn ~prefer_slaves () let check_vm_host_SRs ~__context vm host = - try + try Xapi_vm_helpers.assert_can_see_SRs ~__context ~self:vm ~host; Xapi_vm_helpers.assert_host_is_live ~__context ~host; true - with - _ -> false + with + _ -> false let find_host_for_VM ~__context vm = Xapi_vm_helpers.choose_host ~__context ~vm:vm ~choose_fn:(Xapi_vm_helpers.assert_can_see_SRs ~__context ~self:vm) () @@ -204,160 +204,160 @@ let find_host_for_VM ~__context vm = (* On any import error, we try to cleanup the bits we have created *) type cleanup_stack = (Context.t -> (Rpc.call -> Rpc.response) -> API.ref_session -> unit) list -let cleanup (x: cleanup_stack) = +let cleanup (x: cleanup_stack) = (* Always perform the cleanup with a fresh login + context to prevent problems with any user-supplied one being invalidated *) Server_helpers.exec_with_new_task "VM.import (cleanup)" ~task_in_database:true (fun __context -> Helpers.call_api_functions ~__context - (fun rpc session_id -> - List.iter (fun action -> - Helpers.log_exn_continue "executing cleanup action" (action __context rpc) session_id) x - ) + (fun rpc session_id -> + List.iter (fun action -> + Helpers.log_exn_continue "executing cleanup action" (action __context rpc) session_id) x + ) ) open Stdext.Pervasiveext type vm_export_import = { - vm: API.ref_VM; - dry_run: bool; - live: bool; - send_snapshots: bool; + vm: API.ref_VM; + dry_run: bool; + live: bool; + send_snapshots: bool; } (* Copy VM metadata to a remote pool *) let remote_metadata_export_import ~__context ~rpc ~session_id ~remote_address ~restore which = - let subtask_of = (Ref.string_of (Context.get_task_id __context)) in - - let open Xmlrpc_client in - - let local_export_request = match which with - | `All -> "all=true" - | `Only {vm=vm; send_snapshots=send_snapshots} -> Printf.sprintf "export_snapshots=%b&ref=%s" send_snapshots (Ref.string_of vm) in - - let remote_import_request = - let params = match which with - | `All -> [] - | `Only {vm=vm; live=live; dry_run=dry_run; send_snapshots=send_snapshots} -> [Printf.sprintf "live=%b" live; Printf.sprintf "dry_run=%b" dry_run; Printf.sprintf "export_snapshots=%b" send_snapshots] in - let params = Printf.sprintf "restore=%b" restore :: params in - Printf.sprintf "%s?%s" Constants.import_metadata_uri (String.concat "&" params) in - - Helpers.call_api_functions ~__context (fun my_rpc my_session_id -> - let get = Xapi_http.http_request ~version:"1.0" ~subtask_of - ~cookie:["session_id", Ref.string_of my_session_id] ~keep_alive:false - Http.Get - (Printf.sprintf "%s?%s" Constants.export_metadata_uri local_export_request) in - let remote_task = Client.Task.create rpc session_id "VM metadata import" "" in - finally - (fun () -> - let put = Xapi_http.http_request ~version:"1.0" ~subtask_of - ~cookie:[ - "session_id", Ref.string_of session_id; - "task_id", Ref.string_of remote_task - ] ~keep_alive:false - Http.Put remote_import_request in - debug "Piping HTTP %s to %s" (Http.Request.to_string get) (Http.Request.to_string put); - with_transport (Unix Xapi_globs.unix_domain_socket) - (with_http get - (fun (r, ifd) -> - debug "Content-length: %s" (Stdext.Opt.default "None" (Stdext.Opt.map Int64.to_string r.Http.Response.content_length)); - let put = { put with Http.Request.content_length = r.Http.Response.content_length } in - debug "Connecting to %s:%d" remote_address !Xapi_globs.https_port; - (* Spawn a cached stunnel instance. Otherwise, once metadata tranmission completes, the connection - between local xapi and stunnel will be closed immediately, and the new spawned stunnel instance - will be revoked, this might cause the remote stunnel gets partial metadata xml file, and the - ripple effect is that remote xapi fails to parse metadata xml file. Using a cached stunnel can - not always avoid the problem since any cached stunnel entry might be evicted. However, it is - unlikely to happen in practice because the cache size is large enough.*) - with_transport (SSL (SSL.make ~use_stunnel_cache:true (), remote_address, !Xapi_globs.https_port)) - (with_http put - (fun (_, ofd) -> - let (n: int64) = Stdext.Unixext.copy_file ?limit:r.Http.Response.content_length ifd ofd in - debug "Written %Ld bytes" n - ) - ) - ) - ); - (* Wait for remote task to succeed or fail *) - Cli_util.wait_for_task_completion rpc session_id remote_task; - match Client.Task.get_status rpc session_id remote_task with - | `cancelling | `cancelled -> - raise (Api_errors.Server_error(Api_errors.task_cancelled, [ Ref.string_of remote_task ])) - | `pending -> - failwith "wait_for_task_completion failed; task is still pending" - | `failure -> begin - let error_info = Client.Task.get_error_info rpc session_id remote_task in - match error_info with - | code :: params when Hashtbl.mem Datamodel.errors code -> - raise (Api_errors.Server_error(code, params)) - | _ -> failwith (Printf.sprintf "VM metadata import failed: %s" (String.concat " " error_info)); - end - | `success -> begin - debug "Remote metadata import succeeded"; - let result = Client.Task.get_result rpc session_id remote_task in - API.Legacy.From.ref_VM_set "" (Xml.parse_string result) - end - ) - (fun () -> Client.Task.destroy rpc session_id remote_task ) - ) + let subtask_of = (Ref.string_of (Context.get_task_id __context)) in + + let open Xmlrpc_client in + + let local_export_request = match which with + | `All -> "all=true" + | `Only {vm=vm; send_snapshots=send_snapshots} -> Printf.sprintf "export_snapshots=%b&ref=%s" send_snapshots (Ref.string_of vm) in + + let remote_import_request = + let params = match which with + | `All -> [] + | `Only {vm=vm; live=live; dry_run=dry_run; send_snapshots=send_snapshots} -> [Printf.sprintf "live=%b" live; Printf.sprintf "dry_run=%b" dry_run; Printf.sprintf "export_snapshots=%b" send_snapshots] in + let params = Printf.sprintf "restore=%b" restore :: params in + Printf.sprintf "%s?%s" Constants.import_metadata_uri (String.concat "&" params) in + + Helpers.call_api_functions ~__context (fun my_rpc my_session_id -> + let get = Xapi_http.http_request ~version:"1.0" ~subtask_of + ~cookie:["session_id", Ref.string_of my_session_id] ~keep_alive:false + Http.Get + (Printf.sprintf "%s?%s" Constants.export_metadata_uri local_export_request) in + let remote_task = Client.Task.create rpc session_id "VM metadata import" "" in + finally + (fun () -> + let put = Xapi_http.http_request ~version:"1.0" ~subtask_of + ~cookie:[ + "session_id", Ref.string_of session_id; + "task_id", Ref.string_of remote_task + ] ~keep_alive:false + Http.Put remote_import_request in + debug "Piping HTTP %s to %s" (Http.Request.to_string get) (Http.Request.to_string put); + with_transport (Unix Xapi_globs.unix_domain_socket) + (with_http get + (fun (r, ifd) -> + debug "Content-length: %s" (Stdext.Opt.default "None" (Stdext.Opt.map Int64.to_string r.Http.Response.content_length)); + let put = { put with Http.Request.content_length = r.Http.Response.content_length } in + debug "Connecting to %s:%d" remote_address !Xapi_globs.https_port; + (* Spawn a cached stunnel instance. Otherwise, once metadata tranmission completes, the connection + between local xapi and stunnel will be closed immediately, and the new spawned stunnel instance + will be revoked, this might cause the remote stunnel gets partial metadata xml file, and the + ripple effect is that remote xapi fails to parse metadata xml file. Using a cached stunnel can + not always avoid the problem since any cached stunnel entry might be evicted. However, it is + unlikely to happen in practice because the cache size is large enough.*) + with_transport (SSL (SSL.make ~use_stunnel_cache:true (), remote_address, !Xapi_globs.https_port)) + (with_http put + (fun (_, ofd) -> + let (n: int64) = Stdext.Unixext.copy_file ?limit:r.Http.Response.content_length ifd ofd in + debug "Written %Ld bytes" n + ) + ) + ) + ); + (* Wait for remote task to succeed or fail *) + Cli_util.wait_for_task_completion rpc session_id remote_task; + match Client.Task.get_status rpc session_id remote_task with + | `cancelling | `cancelled -> + raise (Api_errors.Server_error(Api_errors.task_cancelled, [ Ref.string_of remote_task ])) + | `pending -> + failwith "wait_for_task_completion failed; task is still pending" + | `failure -> begin + let error_info = Client.Task.get_error_info rpc session_id remote_task in + match error_info with + | code :: params when Hashtbl.mem Datamodel.errors code -> + raise (Api_errors.Server_error(code, params)) + | _ -> failwith (Printf.sprintf "VM metadata import failed: %s" (String.concat " " error_info)); + end + | `success -> begin + debug "Remote metadata import succeeded"; + let result = Client.Task.get_result rpc session_id remote_task in + API.Legacy.From.ref_VM_set "" (Xml.parse_string result) + end + ) + (fun () -> Client.Task.destroy rpc session_id remote_task ) + ) let vdi_of_req ~__context (req: Http.Request.t) = - let all = req.Http.Request.query @ req.Http.Request.cookie in - let vdi = - if List.mem_assoc "vdi" all - then List.assoc "vdi" all - else raise (Failure "Missing vdi query parameter") in - if Db.is_valid_ref __context (Ref.of_string vdi) - then Ref.of_string vdi - else Db.VDI.get_by_uuid ~__context ~uuid:vdi + let all = req.Http.Request.query @ req.Http.Request.cookie in + let vdi = + if List.mem_assoc "vdi" all + then List.assoc "vdi" all + else raise (Failure "Missing vdi query parameter") in + if Db.is_valid_ref __context (Ref.of_string vdi) + then Ref.of_string vdi + else Db.VDI.get_by_uuid ~__context ~uuid:vdi let base_vdi_of_req ~__context (req: Http.Request.t) = - let all = req.Http.Request.query @ req.Http.Request.cookie in - if List.mem_assoc "base" all then begin - let base = List.assoc "base" all in - Some (if Db.is_valid_ref __context (Ref.of_string base) - then Ref.of_string base - else Db.VDI.get_by_uuid ~__context ~uuid:base) - end else None + let all = req.Http.Request.query @ req.Http.Request.cookie in + if List.mem_assoc "base" all then begin + let base = List.assoc "base" all in + Some (if Db.is_valid_ref __context (Ref.of_string base) + then Ref.of_string base + else Db.VDI.get_by_uuid ~__context ~uuid:base) + end else None module Format = struct - type t = - | Raw - | Vhd - - let to_string = function - | Raw -> "raw" - | Vhd -> "vhd" - - let of_string x = match String.lowercase x with - | "raw" -> Some Raw - | "vhd" -> Some Vhd - | _ -> None - - let filename ~__context vdi format = - Printf.sprintf "%s.%s" - (Db.VDI.get_uuid ~__context ~self:vdi) - (to_string format) - - let content_type = function - | Raw -> "application/octet-stream" - | Vhd -> "application/vhd" - - let _key = "format" - - let of_req (req: Http.Request.t) = - let all = req.Http.Request.query @ req.Http.Request.cookie in - if List.mem_assoc _key all then begin - let x = List.assoc _key all in - match of_string x with - | Some x -> `Ok x - | None -> `Unknown x - end else `Ok Raw (* default *) + type t = + | Raw + | Vhd + + let to_string = function + | Raw -> "raw" + | Vhd -> "vhd" + + let of_string x = match String.lowercase x with + | "raw" -> Some Raw + | "vhd" -> Some Vhd + | _ -> None + + let filename ~__context vdi format = + Printf.sprintf "%s.%s" + (Db.VDI.get_uuid ~__context ~self:vdi) + (to_string format) + + let content_type = function + | Raw -> "application/octet-stream" + | Vhd -> "application/vhd" + + let _key = "format" + + let of_req (req: Http.Request.t) = + let all = req.Http.Request.query @ req.Http.Request.cookie in + if List.mem_assoc _key all then begin + let x = List.assoc _key all in + match of_string x with + | Some x -> `Ok x + | None -> `Unknown x + end else `Ok Raw (* default *) end let return_302_redirect (req: Http.Request.t) s address = - let url = Printf.sprintf "%s://%s%s?%s" (if Context.is_unencrypted s then "http" else "https") address req.Http.Request.uri (String.concat "&" (List.map (fun (a,b) -> a^"="^b) req.Http.Request.query)) in - let headers = Http.http_302_redirect url in - debug "HTTP 302 redirect to: %s" url; - Http_svr.headers s headers + let url = Printf.sprintf "%s://%s%s?%s" (if Context.is_unencrypted s then "http" else "https") address req.Http.Request.uri (String.concat "&" (List.map (fun (a,b) -> a^"="^b) req.Http.Request.query)) in + let headers = Http.http_302_redirect url in + debug "HTTP 302 redirect to: %s" url; + Http_svr.headers s headers diff --git a/ocaml/xapi/ipq.ml b/ocaml/xapi/ipq.ml index dd5913e6788..aabc14bfd83 100644 --- a/ocaml/xapi/ipq.ml +++ b/ocaml/xapi/ipq.ml @@ -14,27 +14,27 @@ (* Imperative priority queue *) type 'a event = { ev: 'a; - time: float } + time: float } type 'a t = {mutable size : int; mutable data : 'a event array } - + exception EmptyHeap let create n = if n<=0 then invalid_arg "create" else - { size = -n; data=[| |] } + { size = -n; data=[| |] } let is_empty h = h.size <= 0 - + let resize h = let n = h.size in assert (n>0); let n'=2*n in let d = h.data in let d' = Array.create n' d.(0) in - Array.blit d 0 d' 0 n; - h.data <- d' + Array.blit d 0 d' 0 n; + h.data <- d' let add h x = (* first addition: we allocate the array *) @@ -56,11 +56,11 @@ let add h x = in moveup n; h.size <- n + 1 - + let maximum h = if h.size <= 0 then raise EmptyHeap; h.data.(0) - + let remove h s = if h.size <= 0 then raise EmptyHeap; let n = h.size - 1 in @@ -71,15 +71,15 @@ let remove h s = let rec movedown i = let j = 2 * i + 1 in if j < n then - let j = - let j' = j + 1 in - if j' < n && (d.(j').time < d.(j).time) then j' else j + let j = + let j' = j + 1 in + if j' < n && (d.(j').time < d.(j).time) then j' else j in - if (d.(j).time < x.time) then begin - d.(i) <- d.(j); - movedown j + if (d.(j).time < x.time) then begin + d.(i) <- d.(j); + movedown j end else - d.(i) <- x + d.(i) <- x else d.(i) <- x in @@ -91,20 +91,20 @@ let find h ev = else if ev = h.data.(n).ev then n else iter (n-1) in iter (h.size-1) - + let find_p h f = let rec iter n = if n < 0 then -1 else if f h.data.(n).ev then n else iter (n-1) in iter (h.size-1) - + let pop_maximum h = let m = maximum h in remove h 0; m - -let iter f h = + +let iter f h = let d = h.data in for i = 0 to h.size - 1 do f d.(i) done - + let fold f h x0 = let n = h.size in let d = h.data in @@ -122,7 +122,7 @@ let _ = done; for i=0 to 49 do let xx=find test i in - remove test xx + remove test xx done; (* remove test xx;*) for i=0 to 49 do diff --git a/ocaml/xapi/license_check.ml b/ocaml/xapi/license_check.ml index 251bc29129e..f4cc39c0e3d 100644 --- a/ocaml/xapi/license_check.ml +++ b/ocaml/xapi/license_check.ml @@ -14,27 +14,27 @@ module L = Debug.Make(struct let name="license" end) let never, _ = - let start_of_epoch = Unix.gmtime 0. in - Unix.mktime {start_of_epoch with Unix.tm_year = 130} + let start_of_epoch = Unix.gmtime 0. in + Unix.mktime {start_of_epoch with Unix.tm_year = 130} let get_expiry_date ~__context ~host = - let license = Db.Host.get_license_params ~__context ~self:host in - if List.mem_assoc "expiry" license - then Some (Stdext.Date.of_string (List.assoc "expiry" license)) - else None + let license = Db.Host.get_license_params ~__context ~self:host in + if List.mem_assoc "expiry" license + then Some (Stdext.Date.of_string (List.assoc "expiry" license)) + else None let check_expiry ~__context ~host = - let expired = - match get_expiry_date ~__context ~host with - | None -> false (* No expiry date means no expiry :) *) - | Some date -> Unix.time () > (Stdext.Date.to_float date) - in - if expired then raise (Api_errors.Server_error (Api_errors.license_expired, [])) + let expired = + match get_expiry_date ~__context ~host with + | None -> false (* No expiry date means no expiry :) *) + | Some date -> Unix.time () > (Stdext.Date.to_float date) + in + if expired then raise (Api_errors.Server_error (Api_errors.license_expired, [])) let vm ~__context vm = - (* Here we check that the license is still valid - this should be the only place where this happens *) - let host = Helpers.get_localhost ~__context in - check_expiry ~__context ~host + (* Here we check that the license is still valid - this should be the only place where this happens *) + let host = Helpers.get_localhost ~__context in + check_expiry ~__context ~host (* XXX: why use a "with_" style function here? *) let with_vm_license_check ~__context v f = diff --git a/ocaml/xapi/license_check.mli b/ocaml/xapi/license_check.mli index 13997177358..d2efbed1119 100644 --- a/ocaml/xapi/license_check.mli +++ b/ocaml/xapi/license_check.mli @@ -11,10 +11,10 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(** +(** * Verifying whether the current license is still valid * @group Licensing - *) +*) (** The expiry date that is considered to be "never". *) val never : float diff --git a/ocaml/xapi/local_work_queue.ml b/ocaml/xapi/local_work_queue.ml index cddd8d28aed..68741c3eaa9 100644 --- a/ocaml/xapi/local_work_queue.ml +++ b/ocaml/xapi/local_work_queue.ml @@ -21,11 +21,11 @@ let m = Mutex.create () let c = Condition.create () -let vm_lifecycle_queue_process_fn f = +let vm_lifecycle_queue_process_fn f = Mutex.execute m (fun () -> while not !vm_lifecycle_queue_started do Condition.wait c m done); f () -let start_vm_lifecycle_queue () = +let start_vm_lifecycle_queue () = Mutex.execute m (fun () -> vm_lifecycle_queue_started := true; Condition.signal c) (* NB VM.start for PV guests performs VBD.unplug operations which require the dom0 device resync ops @@ -33,7 +33,7 @@ let start_vm_lifecycle_queue () = (** Put "long running/streaming operations" into their own queue, so vm lifecycle ops can be parallelized with them *) let long_running_queue = Thread_queue.make ~name:"long_running_op" vm_lifecycle_queue_process_fn - + (** VM.{start,shutdown,copy,clone} etc are queued here *) let normal_vm_queue = Thread_queue.make ~name:"vm_lifecycle_op" vm_lifecycle_queue_process_fn @@ -47,27 +47,27 @@ open Pervasiveext (** Join a given queue and execute the function 'f' when its our turn. Actually perform the computation in this thread so we can return a result. *) -let wait_in_line q description f = +let wait_in_line q description f = let m = Mutex.create () in let c = Condition.create () in let state = ref `Pending in Locking_helpers.Thread_state.waiting_for (Locking_helpers.Lock q.Thread_queue.name); let ok = q.Thread_queue.push_fn description - (fun () -> - (* Signal the mothership to run the computation now *) - Mutex.execute m - (fun () -> - state := `Running; - Condition.signal c - ); - (* Wait for the computation to complete *) - Mutex.execute m (fun () -> while !state = `Running do Condition.wait c m done) - ) in + (fun () -> + (* Signal the mothership to run the computation now *) + Mutex.execute m + (fun () -> + state := `Running; + Condition.signal c + ); + (* Wait for the computation to complete *) + Mutex.execute m (fun () -> while !state = `Running do Condition.wait c m done) + ) in assert ok; (* queue has no length limit *) (* Wait for the signal from the queue processor *) Mutex.execute m (fun () -> while !state = `Pending do Condition.wait c m done); Locking_helpers.Thread_state.acquired (Locking_helpers.Lock q.Thread_queue.name); - finally f - (fun () -> - Locking_helpers.Thread_state.released (Locking_helpers.Lock q.Thread_queue.name); - Mutex.execute m (fun () -> state := `Finished; Condition.signal c)) + finally f + (fun () -> + Locking_helpers.Thread_state.released (Locking_helpers.Lock q.Thread_queue.name); + Mutex.execute m (fun () -> state := `Finished; Condition.signal c)) diff --git a/ocaml/xapi/localdb.ml b/ocaml/xapi/localdb.ml index d908b6dc7d4..450373a9399 100644 --- a/ocaml/xapi/localdb.ml +++ b/ocaml/xapi/localdb.ml @@ -21,31 +21,31 @@ module D=Debug.Make(struct let name="localdb" end) open D let db = Hashtbl.create 10 (* in-memory cache *) -let loaded = ref false +let loaded = ref false let to_db (output: Xmlm.output) = Hashtbl_xml.to_xml db output -let of_db (input: Xmlm.input) = +let of_db (input: Xmlm.input) = let db' = Hashtbl_xml.of_xml input in Hashtbl.clear db; Hashtbl.iter (fun k v -> Hashtbl.add db k v) db' - + let assert_loaded () = - if not(!loaded) then begin + if not(!loaded) then begin try ignore(Unix.stat Xapi_globs.local_database); let ic = open_in Xapi_globs.local_database in - finally - (fun () -> of_db (Xmlm.make_input (`Channel ic)); loaded := true) - (fun () -> close_in ic); + finally + (fun () -> of_db (Xmlm.make_input (`Channel ic)); loaded := true) + (fun () -> close_in ic); Hashtbl.iter (fun k v -> debug "%s = %s" k v) db - with - | Unix.Unix_error (Unix.ENOENT, _, _) -> - debug "Local database %s doesn't currently exist. Continuing." Xapi_globs.local_database - | Xmlm.Error _ -> - debug "Xml error processing local database %s. Moving it out of the way." Xapi_globs.local_database; - let corrupt_fname = Xapi_globs.local_database^".corrupt" in - Stdext.Unixext.unlink_safe corrupt_fname; - Unix.rename Xapi_globs.local_database corrupt_fname + with + | Unix.Unix_error (Unix.ENOENT, _, _) -> + debug "Local database %s doesn't currently exist. Continuing." Xapi_globs.local_database + | Xmlm.Error _ -> + debug "Xml error processing local database %s. Moving it out of the way." Xapi_globs.local_database; + let corrupt_fname = Xapi_globs.local_database^".corrupt" in + Stdext.Unixext.unlink_safe corrupt_fname; + Unix.rename Xapi_globs.local_database corrupt_fname end exception Missing_key of string @@ -58,35 +58,35 @@ let get (key: string) = (fun () -> assert_loaded (); try - Hashtbl.find db key + Hashtbl.find db key with Not_found -> raise (Missing_key key) ) -let get_with_default (key: string) (default: string) = +let get_with_default (key: string) (default: string) = try - get key + get key with Missing_key _ -> default (* Returns true if a change was made and should be flushed *) -let put_one (key: string) (v: string) = +let put_one (key: string) (v: string) = if Hashtbl.mem db key && Hashtbl.find db key = v then false (* no change necessary *) else (Hashtbl.replace db key v; true) -let flush () = +let flush () = let b = Buffer.create 256 in to_db (Xmlm.make_output (`Buffer b)); let s = Buffer.contents b in Stdext.Unixext.write_string_to_file Xapi_globs.local_database s -let put (key: string) (v: string) = +let put (key: string) (v: string) = Mutex.execute m (fun () -> assert_loaded (); if put_one key v; then flush ()) -let putv (all: (string * string) list) = +let putv (all: (string * string) list) = Mutex.execute m (fun () -> assert_loaded (); @@ -95,8 +95,8 @@ let putv (all: (string * string) list) = then flush ()) let del (key : string) = - Mutex.execute m + Mutex.execute m (fun () -> - assert_loaded (); - Hashtbl.remove db key; (* Does nothing if the key isn't there *) - flush ()) + assert_loaded (); + Hashtbl.remove db key; (* Does nothing if the key isn't there *) + flush ()) diff --git a/ocaml/xapi/localdb.mli b/ocaml/xapi/localdb.mli index ed000468e3c..aacce59c896 100644 --- a/ocaml/xapi/localdb.mli +++ b/ocaml/xapi/localdb.mli @@ -21,8 +21,8 @@ exception Missing_key of string (** Retrieves a value *) val get: string -> string -(** [get_with_default key default] returns the value associated with [key], - or [default] if the key is missing. *) +(** [get_with_default key default] returns the value associated with [key], + or [default] if the key is missing. *) val get_with_default: string -> string -> string (** Inserts a value into the database, only returns when the insertion has diff --git a/ocaml/xapi/map_check.ml b/ocaml/xapi/map_check.ml index a02eb81fe23..679889f020b 100644 --- a/ocaml/xapi/map_check.ml +++ b/ocaml/xapi/map_check.ml @@ -1,47 +1,47 @@ (* Represents a key-value pair and its allowed values. *) type requirement = { - key : string; - default_value : string option; - is_valid_value : string -> bool; + key : string; + default_value : string option; + is_valid_value : string -> bool; } (* Check that a key-value pair is present for each requirement. *) (* If any are not, add the default value. *) let add_defaults requirements kvpairs = - let key_is_present requirement = List.exists - (fun (key, _) -> key = requirement.key) - kvpairs - in - List.fold_left - (fun acc requirement -> - if key_is_present requirement - then acc - else match requirement.default_value with - | None -> acc - | Some default_value -> - (requirement.key, default_value)::acc) - kvpairs requirements + let key_is_present requirement = List.exists + (fun (key, _) -> key = requirement.key) + kvpairs + in + List.fold_left + (fun acc requirement -> + if key_is_present requirement + then acc + else match requirement.default_value with + | None -> acc + | Some default_value -> + (requirement.key, default_value)::acc) + kvpairs requirements (* Validate a key-value pair against a list of requirements. *) let validate_kvpair field_name requirements (key, value) = - let fail () = raise Api_errors.(Server_error - (invalid_value, [field_name; Printf.sprintf "%s = %s" key value])) - in - (* Try to find a required property requirement with this name. *) - let requirement = - try - List.find - (fun requirement -> requirement.key = key) - requirements - with Not_found -> fail () - in - (* Check whether the proposed value for this property is allowed. *) - if not (requirement.is_valid_value value) then fail () + let fail () = raise Api_errors.(Server_error + (invalid_value, [field_name; Printf.sprintf "%s = %s" key value])) + in + (* Try to find a required property requirement with this name. *) + let requirement = + try + List.find + (fun requirement -> requirement.key = key) + requirements + with Not_found -> fail () + in + (* Check whether the proposed value for this property is allowed. *) + if not (requirement.is_valid_value value) then fail () (** Combinators for validated map access *) type 'a pickler = (string -> 'a) * ('a -> string) -let pickler : (string -> 'a) -> ('a -> string) -> 'a pickler = - fun of_string to_string -> (of_string, to_string) +let pickler : (string -> 'a) -> ('a -> string) -> 'a pickler = + fun of_string to_string -> (of_string, to_string) let string : string pickler = (fun x -> x), (fun x -> x) let int : int pickler = int_of_string, string_of_int @@ -51,25 +51,25 @@ let cons x xs = x :: xs type assoc_list = (string * string) list type 'a field = (assoc_list -> 'a) * ('a -> assoc_list -> assoc_list) -let field : string -> 'a pickler -> 'a field = - fun name (of_string, to_string) -> - (fun assoc_list -> assoc_list - |> List.assoc name - |> of_string), - (fun value assoc_list -> assoc_list - |> List.remove_assoc name - |> cons (name, to_string value)) +let field : string -> 'a pickler -> 'a field = + fun name (of_string, to_string) -> + (fun assoc_list -> assoc_list + |> List.assoc name + |> of_string), + (fun value assoc_list -> assoc_list + |> List.remove_assoc name + |> cons (name, to_string value)) let getf : ?default:'a -> 'a field -> assoc_list -> 'a = - fun ?default (of_string, _) record -> - try of_string record - with Not_found as e -> - Backtrace.is_important e; - match default with - | None -> - raise e - | Some d -> d + fun ?default (of_string, _) record -> + try of_string record + with Not_found as e -> + Backtrace.is_important e; + match default with + | None -> + raise e + | Some d -> d -let setf : 'a field -> 'a -> assoc_list -> assoc_list = - fun (_, to_string) value record -> to_string value record +let setf : 'a field -> 'a -> assoc_list -> assoc_list = + fun (_, to_string) value record -> to_string value record diff --git a/ocaml/xapi/memory_check.ml b/ocaml/xapi/memory_check.ml index 3772f44be6e..ad2121f2e86 100644 --- a/ocaml/xapi/memory_check.ml +++ b/ocaml/xapi/memory_check.ml @@ -22,235 +22,235 @@ let ( /// ) = Int64.div (** Calculates the amounts of 'normal' and 'shadow' host memory needed *) (** to run the given guest with the given amount of guest memory. *) let vm_compute_required_memory vm_record guest_memory_kib = - let vcpu_count = Int64.to_int vm_record.API.vM_VCPUs_max in - let multiplier = - if Helpers.is_hvm vm_record - then vm_record.API.vM_HVM_shadow_multiplier - else XenopsMemory.Linux.shadow_multiplier_default in - let target_mib = XenopsMemory.mib_of_kib_used guest_memory_kib in - let max_mib = XenopsMemory.mib_of_bytes_used vm_record.API.vM_memory_static_max in - let footprint_mib = ( - if Helpers.is_hvm vm_record - then XenopsMemory.HVM.footprint_mib - else XenopsMemory.Linux.footprint_mib) - target_mib max_mib vcpu_count multiplier in - let shadow_mib = ( - if Helpers.is_hvm vm_record - then XenopsMemory.HVM.shadow_mib - else XenopsMemory.Linux.shadow_mib) - max_mib vcpu_count multiplier in - let normal_mib = - footprint_mib --- shadow_mib in - let normal_bytes = XenopsMemory.bytes_of_mib normal_mib in - let shadow_bytes = XenopsMemory.bytes_of_mib shadow_mib in - (normal_bytes, shadow_bytes) + let vcpu_count = Int64.to_int vm_record.API.vM_VCPUs_max in + let multiplier = + if Helpers.is_hvm vm_record + then vm_record.API.vM_HVM_shadow_multiplier + else XenopsMemory.Linux.shadow_multiplier_default in + let target_mib = XenopsMemory.mib_of_kib_used guest_memory_kib in + let max_mib = XenopsMemory.mib_of_bytes_used vm_record.API.vM_memory_static_max in + let footprint_mib = ( + if Helpers.is_hvm vm_record + then XenopsMemory.HVM.footprint_mib + else XenopsMemory.Linux.footprint_mib) + target_mib max_mib vcpu_count multiplier in + let shadow_mib = ( + if Helpers.is_hvm vm_record + then XenopsMemory.HVM.shadow_mib + else XenopsMemory.Linux.shadow_mib) + max_mib vcpu_count multiplier in + let normal_mib = + footprint_mib --- shadow_mib in + let normal_bytes = XenopsMemory.bytes_of_mib normal_mib in + let shadow_bytes = XenopsMemory.bytes_of_mib shadow_mib in + (normal_bytes, shadow_bytes) (** Different users will wish to use a different VM accounting policy, depending -on how conservative or liberal they are. *) + on how conservative or liberal they are. *) type accounting_policy = - | Static_max - (** use static_max: conservative: useful for HA. *) - | Dynamic_max - (** use dynamic_max: fairly conservative: useful for dom0 for HA. *) - | Dynamic_min - (** use dynamic_min: liberal: assumes that guests always co-operate. *) + | Static_max + (** use static_max: conservative: useful for HA. *) + | Dynamic_max + (** use dynamic_max: fairly conservative: useful for dom0 for HA. *) + | Dynamic_min + (** use dynamic_min: liberal: assumes that guests always co-operate. *) (** Common logic of vm_compute_start_memory and vm_compute_used_memory *) let choose_memory_required ~policy ~memory_dynamic_min ~memory_dynamic_max ~memory_static_max = - match policy with - | Dynamic_min -> memory_dynamic_min - | Dynamic_max -> memory_dynamic_max - | Static_max -> memory_static_max + match policy with + | Dynamic_min -> memory_dynamic_min + | Dynamic_max -> memory_dynamic_max + | Static_max -> memory_static_max (** Calculates the amount of memory required in both 'normal' and 'shadow' -memory, to start a VM. If the given VM is a PV guest and if memory ballooning -is enabled, this function returns values derived from the VM's dynamic memory -target (since PV guests are able to start in a pre-ballooned state). If memory -ballooning is not enabled or if the VM is an HVM guest, this function returns -values derived from the VM's static memory maximum (since currently HVM guests -are not able to start in a pre-ballooned state). *) + memory, to start a VM. If the given VM is a PV guest and if memory ballooning + is enabled, this function returns values derived from the VM's dynamic memory + target (since PV guests are able to start in a pre-ballooned state). If memory + ballooning is not enabled or if the VM is an HVM guest, this function returns + values derived from the VM's static memory maximum (since currently HVM guests + are not able to start in a pre-ballooned state). *) let vm_compute_start_memory ~__context ?(policy=Dynamic_min) vm_record = - if Xapi_fist.disable_memory_checks () - then (0L, 0L) - else - let memory_required = choose_memory_required - ~policy: policy - ~memory_dynamic_min: vm_record.API.vM_memory_dynamic_min - ~memory_dynamic_max: vm_record.API.vM_memory_dynamic_max - ~memory_static_max: vm_record.API.vM_memory_static_max in - vm_compute_required_memory vm_record - (XenopsMemory.kib_of_bytes_used memory_required) + if Xapi_fist.disable_memory_checks () + then (0L, 0L) + else + let memory_required = choose_memory_required + ~policy: policy + ~memory_dynamic_min: vm_record.API.vM_memory_dynamic_min + ~memory_dynamic_max: vm_record.API.vM_memory_dynamic_max + ~memory_static_max: vm_record.API.vM_memory_static_max in + vm_compute_required_memory vm_record + (XenopsMemory.kib_of_bytes_used memory_required) (** Calculates the amount of memory required in both 'normal' and 'shadow' -memory, for a running VM. If the VM is currently subject to a memory balloon -operation, this function returns the maximum amount of memory that the VM will -need between now, and the point in future time when the operation completes. *) + memory, for a running VM. If the VM is currently subject to a memory balloon + operation, this function returns the maximum amount of memory that the VM will + need between now, and the point in future time when the operation completes. *) let vm_compute_used_memory ~__context policy vm_ref = - if Xapi_fist.disable_memory_checks () then 0L else - let vm_main_record = Db.VM.get_record ~__context ~self:vm_ref in - let vm_boot_record = Helpers.get_boot_record ~__context ~self:vm_ref in + if Xapi_fist.disable_memory_checks () then 0L else + let vm_main_record = Db.VM.get_record ~__context ~self:vm_ref in + let vm_boot_record = Helpers.get_boot_record ~__context ~self:vm_ref in - let memory_required = choose_memory_required - ~policy: policy - ~memory_dynamic_min: vm_main_record.API.vM_memory_dynamic_min - (* ToDo: Is vm_main_record or vm_boot_record the right thing for dynamic_max? *) - ~memory_dynamic_max: vm_main_record.API.vM_memory_dynamic_max - ~memory_static_max: vm_boot_record.API.vM_memory_static_max in - memory_required +++ vm_main_record.API.vM_memory_overhead + let memory_required = choose_memory_required + ~policy: policy + ~memory_dynamic_min: vm_main_record.API.vM_memory_dynamic_min + (* ToDo: Is vm_main_record or vm_boot_record the right thing for dynamic_max? *) + ~memory_dynamic_max: vm_main_record.API.vM_memory_dynamic_max + ~memory_static_max: vm_boot_record.API.vM_memory_static_max in + memory_required +++ vm_main_record.API.vM_memory_overhead let vm_compute_resume_memory ~__context vm_ref = - if Xapi_fist.disable_memory_checks () then 0L else - let vm_boot_record = Helpers.get_boot_record ~__context ~self:vm_ref in - let (_, shadow_bytes) = vm_compute_required_memory - vm_boot_record vm_boot_record.API.vM_memory_static_max in - (* CA-31759: use the live target field for this *) - (* rather than the LBR to make upgrade easy. *) - let suspended_memory_usage_bytes = - Db.VM.get_memory_target ~__context ~self:vm_ref in - Int64.add suspended_memory_usage_bytes shadow_bytes + if Xapi_fist.disable_memory_checks () then 0L else + let vm_boot_record = Helpers.get_boot_record ~__context ~self:vm_ref in + let (_, shadow_bytes) = vm_compute_required_memory + vm_boot_record vm_boot_record.API.vM_memory_static_max in + (* CA-31759: use the live target field for this *) + (* rather than the LBR to make upgrade easy. *) + let suspended_memory_usage_bytes = + Db.VM.get_memory_target ~__context ~self:vm_ref in + Int64.add suspended_memory_usage_bytes shadow_bytes let vm_compute_migrate_memory ~__context vm_ref = - if Xapi_fist.disable_memory_checks () then 0L else - let vm_record = Db.VM.get_record ~__context ~self:vm_ref in - let (_, shadow_bytes) = vm_compute_required_memory - vm_record vm_record.API.vM_memory_static_max in - (* Only used when in rolling upgrade mode (from a pre-ballooning product) *) - let current_memory_usage_bytes = vm_record.API.vM_memory_static_max in - Int64.add current_memory_usage_bytes shadow_bytes + if Xapi_fist.disable_memory_checks () then 0L else + let vm_record = Db.VM.get_record ~__context ~self:vm_ref in + let (_, shadow_bytes) = vm_compute_required_memory + vm_record vm_record.API.vM_memory_static_max in + (* Only used when in rolling upgrade mode (from a pre-ballooning product) *) + let current_memory_usage_bytes = vm_record.API.vM_memory_static_max in + Int64.add current_memory_usage_bytes shadow_bytes (** - The Pool master's view of the total memory and memory consumers on a host. - This doesn't take into account dynamic changes i.e. those caused by - ballooning. Therefore if we ask a question like, 'is there amount of - memory free to boot VM ' we will get one of 3 different answers: - 1. yes: - the sum of the static_max's of all VMs with domains + the request - is less than the total free. - 2. maybe: - depending on the behaviour of the balloon drivers in the guest we - may be able to free the memory. - 3. no: - the sum of the dynamic_min's of all the VMs with domains + the - request is more than the total free. + The Pool master's view of the total memory and memory consumers on a host. + This doesn't take into account dynamic changes i.e. those caused by + ballooning. Therefore if we ask a question like, 'is there amount of + memory free to boot VM ' we will get one of 3 different answers: + 1. yes: + the sum of the static_max's of all VMs with domains + the request + is less than the total free. + 2. maybe: + depending on the behaviour of the balloon drivers in the guest we + may be able to free the memory. + 3. no: + the sum of the dynamic_min's of all the VMs with domains + the + request is more than the total free. *) type host_memory_summary = { - (** The maximum amount of memory that guests can use on this host. *) - host_maximum_guest_memory_bytes: int64; - (** list of VMs which have a domain running here *) - resident: API.ref_VM list; - (** list of VMs which are in the process of having a domain created here *) - scheduled: API.ref_VM list; + (** The maximum amount of memory that guests can use on this host. *) + host_maximum_guest_memory_bytes: int64; + (** list of VMs which have a domain running here *) + resident: API.ref_VM list; + (** list of VMs which are in the process of having a domain created here *) + scheduled: API.ref_VM list; } open Db_filter_types (** Return a host's memory summary from live database contents. *) let get_host_memory_summary ~__context ~host = - let metrics = Db.Host.get_metrics ~__context ~self:host in - let host_memory_total_bytes = - Db.Host_metrics.get_memory_total ~__context ~self:metrics in - let host_memory_overhead_bytes = - Db.Host.get_memory_overhead ~__context ~self:host in - let host_maximum_guest_memory_bytes = - host_memory_total_bytes --- host_memory_overhead_bytes in - let resident = Db.VM.get_refs_where ~__context - ~expr:(Eq (Field "resident_on", Literal (Ref.string_of host))) in - let scheduled = Db.VM.get_refs_where ~__context - ~expr:(Eq (Field "scheduled_to_be_resident_on", Literal ( - Ref.string_of host))) in - { - host_maximum_guest_memory_bytes = host_maximum_guest_memory_bytes; - resident = resident; - scheduled = scheduled; - } + let metrics = Db.Host.get_metrics ~__context ~self:host in + let host_memory_total_bytes = + Db.Host_metrics.get_memory_total ~__context ~self:metrics in + let host_memory_overhead_bytes = + Db.Host.get_memory_overhead ~__context ~self:host in + let host_maximum_guest_memory_bytes = + host_memory_total_bytes --- host_memory_overhead_bytes in + let resident = Db.VM.get_refs_where ~__context + ~expr:(Eq (Field "resident_on", Literal (Ref.string_of host))) in + let scheduled = Db.VM.get_refs_where ~__context + ~expr:(Eq (Field "scheduled_to_be_resident_on", Literal ( + Ref.string_of host))) in + { + host_maximum_guest_memory_bytes = host_maximum_guest_memory_bytes; + resident = resident; + scheduled = scheduled; + } (** - Given a host's memory summary and a policy flag (i.e. whether to only - consider static_max or to consider dynamic balloon data) it returns the - amount of free memory on the host. + Given a host's memory summary and a policy flag (i.e. whether to only + consider static_max or to consider dynamic balloon data) it returns the + amount of free memory on the host. *) let host_compute_free_memory_with_policy ~__context summary policy = - let all_vms = summary.resident @ summary.scheduled in - let all_vm_memories = List.map (vm_compute_used_memory ~__context policy) - all_vms in - let total_vm_memory = List.fold_left Int64.add 0L all_vm_memories in - let host_mem_available = Int64.sub - summary.host_maximum_guest_memory_bytes total_vm_memory in - max 0L host_mem_available + let all_vms = summary.resident @ summary.scheduled in + let all_vm_memories = List.map (vm_compute_used_memory ~__context policy) + all_vms in + let total_vm_memory = List.fold_left Int64.add 0L all_vm_memories in + let host_mem_available = Int64.sub + summary.host_maximum_guest_memory_bytes total_vm_memory in + max 0L host_mem_available (** - Compute, from our managed data, how much memory is available on a host; this - takes into account both VMs that are resident_on the host and also VMs that - are scheduled_to_be_resident_on the host. + Compute, from our managed data, how much memory is available on a host; this + takes into account both VMs that are resident_on the host and also VMs that + are scheduled_to_be_resident_on the host. - If ignore_scheduled_vm is set then we do not consider this VM as having any - resources allocated via the scheduled_to_be_resident_on mechanism. This is - used to ensure that, when we're executing this function with a view to - starting a VM, v, and further that v is scheduled_to_be_resident on the - specified host, that we do not count the resources required for v twice. + If ignore_scheduled_vm is set then we do not consider this VM as having any + resources allocated via the scheduled_to_be_resident_on mechanism. This is + used to ensure that, when we're executing this function with a view to + starting a VM, v, and further that v is scheduled_to_be_resident on the + specified host, that we do not count the resources required for v twice. - If 'dump_stats=true' then we write to the debug log where we think the - memory is being used. + If 'dump_stats=true' then we write to the debug log where we think the + memory is being used. *) let host_compute_free_memory_with_maximum_compression - ?(dump_stats=false) ~__context ~host - ignore_scheduled_vm = - (* + ?(dump_stats=false) ~__context ~host + ignore_scheduled_vm = + (* Compute host free memory from what is actually running. Don't rely on reported free memory, since this is an asychronously-computed metric that's liable to change or be out of date. *) - let summary = get_host_memory_summary ~__context ~host in - (* + let summary = get_host_memory_summary ~__context ~host in + (* When we're considering starting ourselves, and the host has reserved resources ready for us, then we need to make sure we don't count these reserved resources twice. *) - let summary = { summary with scheduled = - match ignore_scheduled_vm with - | None -> summary.scheduled (* no change *) - | Some ignore_me -> - List.filter (fun x -> x <> ignore_me) summary.scheduled - } in - let host_mem_available = host_compute_free_memory_with_policy - ~__context summary Dynamic_min (* consider ballooning *) in + let summary = { summary with scheduled = + match ignore_scheduled_vm with + | None -> summary.scheduled (* no change *) + | Some ignore_me -> + List.filter (fun x -> x <> ignore_me) summary.scheduled + } in + let host_mem_available = host_compute_free_memory_with_policy + ~__context summary Dynamic_min (* consider ballooning *) in - if dump_stats then begin - let mib x = Int64.div (Int64.div x 1024L) 1024L in - debug "Memory_check: total host memory: %Ld (%Ld MiB)" - summary.host_maximum_guest_memory_bytes - (mib summary.host_maximum_guest_memory_bytes); - List.iter - (fun v -> - let reqd = vm_compute_used_memory ~__context Static_max v in - debug "Memory_check: VM %s (%s): memory %Ld (%Ld MiB)" - (Db.VM.get_uuid ~__context ~self:v) - (if List.mem v summary.resident - then "resident here" - else "scheduled to be resident here" - ) - reqd (mib reqd) - ) - (summary.scheduled @ summary.resident); - debug "Memory_check: available memory: %Ld (%Ld MiB)" - host_mem_available (mib host_mem_available) - end; + if dump_stats then begin + let mib x = Int64.div (Int64.div x 1024L) 1024L in + debug "Memory_check: total host memory: %Ld (%Ld MiB)" + summary.host_maximum_guest_memory_bytes + (mib summary.host_maximum_guest_memory_bytes); + List.iter + (fun v -> + let reqd = vm_compute_used_memory ~__context Static_max v in + debug "Memory_check: VM %s (%s): memory %Ld (%Ld MiB)" + (Db.VM.get_uuid ~__context ~self:v) + (if List.mem v summary.resident + then "resident here" + else "scheduled to be resident here" + ) + reqd (mib reqd) + ) + (summary.scheduled @ summary.resident); + debug "Memory_check: available memory: %Ld (%Ld MiB)" + host_mem_available (mib host_mem_available) + end; - host_mem_available + host_mem_available let host_compute_memory_overhead ~__context ~host = - (* We assume that the memory overhead of a host is constant with respect *) - (* to time and simply fetch the existing cached value from the database. *) - Db.Host.get_memory_overhead ~__context ~self:host + (* We assume that the memory overhead of a host is constant with respect *) + (* to time and simply fetch the existing cached value from the database. *) + Db.Host.get_memory_overhead ~__context ~self:host let vm_compute_memory_overhead snapshot = - let static_max_bytes = snapshot.API.vM_memory_static_max in - let static_max_mib = XenopsMemory.mib_of_bytes_used static_max_bytes in - let multiplier = snapshot.API.vM_HVM_shadow_multiplier in - let vcpu_count = Int64.to_int (snapshot.API.vM_VCPUs_max) in - let memory_overhead_mib = ( - if Helpers.is_hvm snapshot - then XenopsMemory.HVM.overhead_mib - else XenopsMemory.Linux.overhead_mib) - static_max_mib vcpu_count multiplier in - XenopsMemory.bytes_of_mib memory_overhead_mib + let static_max_bytes = snapshot.API.vM_memory_static_max in + let static_max_mib = XenopsMemory.mib_of_bytes_used static_max_bytes in + let multiplier = snapshot.API.vM_HVM_shadow_multiplier in + let vcpu_count = Int64.to_int (snapshot.API.vM_VCPUs_max) in + let memory_overhead_mib = ( + if Helpers.is_hvm snapshot + then XenopsMemory.HVM.overhead_mib + else XenopsMemory.Linux.overhead_mib) + static_max_mib vcpu_count multiplier in + XenopsMemory.bytes_of_mib memory_overhead_mib diff --git a/ocaml/xapi/memory_check.mli b/ocaml/xapi/memory_check.mli index 849f9581be8..192b7b1a740 100644 --- a/ocaml/xapi/memory_check.mli +++ b/ocaml/xapi/memory_check.mli @@ -13,85 +13,85 @@ *) (** * @group Memory Management - *) - +*) + (** - The Pool master's view of the total memory and memory consumers on a host. - This doesn't take into account dynamic changes i.e. those caused by - ballooning. Therefore if we ask a question like, 'is there amount of - memory free to boot VM ' we will get one of 3 different answers: - 1. yes: - the sum of the static_max's of all VMs with domains + the request - is less than the total free. - 2. maybe: - depending on the behaviour of the balloon drivers in the guest we - may be able to free the memory. - 3. no: - the sum of the dynamic_min's of all the VMs with domains + the - request is more than the total free. + The Pool master's view of the total memory and memory consumers on a host. + This doesn't take into account dynamic changes i.e. those caused by + ballooning. Therefore if we ask a question like, 'is there amount of + memory free to boot VM ' we will get one of 3 different answers: + 1. yes: + the sum of the static_max's of all VMs with domains + the request + is less than the total free. + 2. maybe: + depending on the behaviour of the balloon drivers in the guest we + may be able to free the memory. + 3. no: + the sum of the dynamic_min's of all the VMs with domains + the + request is more than the total free. *) type host_memory_summary = { - (** The maximum amount of memory that guests can use on this host. *) - host_maximum_guest_memory_bytes: int64; - (** list of VMs which have a domain running here *) - resident: API.ref_VM list; - (** list of VMs which are in the process of having a domain created here *) - scheduled: API.ref_VM list; + (** The maximum amount of memory that guests can use on this host. *) + host_maximum_guest_memory_bytes: int64; + (** list of VMs which have a domain running here *) + resident: API.ref_VM list; + (** list of VMs which are in the process of having a domain created here *) + scheduled: API.ref_VM list; } (** Different users will wish to use a different VM accounting policy, depending -on how conservative or liberal they are. *) + on how conservative or liberal they are. *) type accounting_policy = - | Static_max - (** use static_max: conservative: useful for HA. *) - | Dynamic_max - (** use dynamic_max: fairly conservative: useful for dom0 for HA. *) - | Dynamic_min - (** use dynamic_min: liberal: assumes that guests always co-operate. *) + | Static_max + (** use static_max: conservative: useful for HA. *) + | Dynamic_max + (** use dynamic_max: fairly conservative: useful for dom0 for HA. *) + | Dynamic_min + (** use dynamic_min: liberal: assumes that guests always co-operate. *) (** Return a host's memory summary from live database contents. *) val get_host_memory_summary : __context:Context.t -> host:API.ref_host -> - host_memory_summary + host_memory_summary val vm_compute_required_memory : API.vM_t -> int64 -> int64 * int64 val vm_compute_start_memory : __context:Context.t -> - ?policy:accounting_policy -> API.vM_t -> int64 * int64 + ?policy:accounting_policy -> API.vM_t -> int64 * int64 val vm_compute_used_memory : __context:Context.t -> accounting_policy -> - [`VM] Ref.t -> int64 + [`VM] Ref.t -> int64 val vm_compute_resume_memory : __context:Context.t -> [`VM] Ref.t -> int64 val vm_compute_migrate_memory : __context:Context.t -> [`VM] Ref.t -> int64 (** - Given a host's memory summary and a policy flag (i.e. whether to only - consider static_max or to consider dynamic balloon data) it returns a - hypothetical amount of free memory on the host. + Given a host's memory summary and a policy flag (i.e. whether to only + consider static_max or to consider dynamic balloon data) it returns a + hypothetical amount of free memory on the host. *) val host_compute_free_memory_with_policy : __context:Context.t -> - host_memory_summary -> accounting_policy -> int64 + host_memory_summary -> accounting_policy -> int64 (** - Compute, from our managed data, how much memory is available on a host; this - takes into account both VMs that are resident_on the host and also VMs that - are scheduled_to_be_resident_on the host. + Compute, from our managed data, how much memory is available on a host; this + takes into account both VMs that are resident_on the host and also VMs that + are scheduled_to_be_resident_on the host. - If ignore_scheduled_vm is set then we do not consider this VM as having any - resources allocated via the scheduled_to_be_resident_on mechanism. This is - used to ensure that, when we're executing this function with a view to - starting a VM, v, and further that v is scheduled_to_be_resident on the - specified host, that we do not count the resources required for v twice. + If ignore_scheduled_vm is set then we do not consider this VM as having any + resources allocated via the scheduled_to_be_resident_on mechanism. This is + used to ensure that, when we're executing this function with a view to + starting a VM, v, and further that v is scheduled_to_be_resident on the + specified host, that we do not count the resources required for v twice. - If 'dump_stats=true' then we write to the debug log where we think the - memory is being used. + If 'dump_stats=true' then we write to the debug log where we think the + memory is being used. *) val host_compute_free_memory_with_maximum_compression : ?dump_stats:bool -> - __context:Context.t -> host:[`host] Ref.t -> [`VM] Ref.t option -> int64 + __context:Context.t -> host:[`host] Ref.t -> [`VM] Ref.t option -> int64 val host_compute_memory_overhead : __context:Context.t -> host:[`host] Ref.t -> - int64 + int64 val vm_compute_memory_overhead : API.vM_t -> int64 - + diff --git a/ocaml/xapi/message_forwarding.ml b/ocaml/xapi/message_forwarding.ml index d0664740d3c..44e954fb49b 100644 --- a/ocaml/xapi/message_forwarding.ml +++ b/ocaml/xapi/message_forwarding.ml @@ -13,7 +13,7 @@ *) (** * @group API Messaging - *) +*) open Stdext open Threadext @@ -46,3852 +46,3852 @@ let info = Audit.debug (* Use HTTP 1.0, don't use the connection cache and don't pre-verify the connection *) let remote_rpc_no_retry context hostname (task_opt: API.ref_task option) xml = - let open Xmlrpc_client in - let transport = SSL(SSL.make ?task_id:(may Ref.string_of task_opt) (), - hostname, !Xapi_globs.https_port) in - let http = xmlrpc ?task_id:(may Ref.string_of task_opt) ~version:"1.0" "/" in - XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"dst_xapi" ~transport ~http xml + let open Xmlrpc_client in + let transport = SSL(SSL.make ?task_id:(may Ref.string_of task_opt) (), + hostname, !Xapi_globs.https_port) in + let http = xmlrpc ?task_id:(may Ref.string_of task_opt) ~version:"1.0" "/" in + XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"dst_xapi" ~transport ~http xml (* Use HTTP 1.1, use the stunnel cache and pre-verify the connection *) let remote_rpc_retry context hostname (task_opt: API.ref_task option) xml = - let open Xmlrpc_client in - let transport = SSL(SSL.make ~use_stunnel_cache:true ?task_id:(may Ref.string_of task_opt) (), - hostname, !Xapi_globs.https_port) in - let http = xmlrpc ?task_id:(may Ref.string_of task_opt) ~version:"1.1" "/" in - XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"dst_xapi" ~transport ~http xml + let open Xmlrpc_client in + let transport = SSL(SSL.make ~use_stunnel_cache:true ?task_id:(may Ref.string_of task_opt) (), + hostname, !Xapi_globs.https_port) in + let http = xmlrpc ?task_id:(may Ref.string_of task_opt) ~version:"1.1" "/" in + XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"dst_xapi" ~transport ~http xml let call_slave_with_session remote_rpc_fn __context host (task_opt: API.ref_task option) f = - let hostname = Db.Host.get_address ~__context ~self:host in - let session_id = Xapi_session.login_no_password ~__context ~uname:None ~host ~pool:true ~is_local_superuser:true ~subject:(Ref.null) ~auth_user_sid:"" ~auth_user_name:"" ~rbac_permissions:[] in - Pervasiveext.finally - (fun ()->f session_id (remote_rpc_fn __context hostname task_opt)) - (fun () -> Xapi_session.destroy_db_session ~__context ~self:session_id) + let hostname = Db.Host.get_address ~__context ~self:host in + let session_id = Xapi_session.login_no_password ~__context ~uname:None ~host ~pool:true ~is_local_superuser:true ~subject:(Ref.null) ~auth_user_sid:"" ~auth_user_name:"" ~rbac_permissions:[] in + Pervasiveext.finally + (fun ()->f session_id (remote_rpc_fn __context hostname task_opt)) + (fun () -> Xapi_session.destroy_db_session ~__context ~self:session_id) let call_slave_with_local_session remote_rpc_fn __context host (task_opt: API.ref_task option) f = - let hostname = Db.Host.get_address ~__context ~self:host in - let session_id = Client.Session.slave_local_login ~rpc:(remote_rpc_fn __context hostname None) - ~psecret:!Xapi_globs.pool_secret in - Pervasiveext.finally - (fun () -> f session_id (remote_rpc_fn __context hostname task_opt)) - (fun () -> Client.Session.local_logout ~rpc:(remote_rpc_fn __context hostname None) ~session_id) + let hostname = Db.Host.get_address ~__context ~self:host in + let session_id = Client.Session.slave_local_login ~rpc:(remote_rpc_fn __context hostname None) + ~psecret:!Xapi_globs.pool_secret in + Pervasiveext.finally + (fun () -> f session_id (remote_rpc_fn __context hostname task_opt)) + (fun () -> Client.Session.local_logout ~rpc:(remote_rpc_fn __context hostname None) ~session_id) (* set the fields on the task record to indicate that forwarding has taken place and creates a task id for the slave to use *) let set_forwarding_on_task ~__context ~host = - if Context.task_in_database __context - then begin - let rt = Context.get_task_id __context in - Db.Task.set_forwarded ~__context ~self:rt ~value:true; - Db.Task.set_forwarded_to ~__context ~self:rt ~value:host; - Some rt (* slave uses this task for progress/status etc. *) - end else None + if Context.task_in_database __context + then begin + let rt = Context.get_task_id __context in + Db.Task.set_forwarded ~__context ~self:rt ~value:true; + Db.Task.set_forwarded_to ~__context ~self:rt ~value:host; + Some rt (* slave uses this task for progress/status etc. *) + end else None let check_live ~__context h = - (* assume that localhost is always live *) - if true - && (Helpers.get_localhost ~__context <> h) - && (not (Xapi_vm_helpers.is_host_live ~__context h)) - then raise (Api_errors.Server_error (Api_errors.host_offline, [Ref.string_of h])) + (* assume that localhost is always live *) + if true + && (Helpers.get_localhost ~__context <> h) + && (not (Xapi_vm_helpers.is_host_live ~__context h)) + then raise (Api_errors.Server_error (Api_errors.host_offline, [Ref.string_of h])) let check_enabled ~__context h = - (* check host is enabled *) - Xapi_vm_helpers.assert_host_is_enabled ~__context ~host:h + (* check host is enabled *) + Xapi_vm_helpers.assert_host_is_enabled ~__context ~host:h (* Forward op to one of the specified hosts if host!=localhost *) let do_op_on_common ~local_fn ~__context ~host op f = - try - let localhost=Helpers.get_localhost ~__context in - if localhost=host then local_fn ~__context - else - let task_opt = set_forwarding_on_task ~__context ~host in - f __context host task_opt op - with - | Xmlrpc_client.Connection_reset | Http_client.Http_request_rejected _ -> - warn "Caught Connection_reset when contacting host %s; converting into CANNOT_CONTACT_HOST" (Ref.string_of host); - raise (Api_errors.Server_error (Api_errors.cannot_contact_host, [Ref.string_of host])) - | Xmlrpc_client.Stunnel_connection_failed -> - warn "Caught Stunnel_connection_failed while contacting host %s; converting into CANNOT_CONTACT_HOST" (Ref.string_of host); - raise (Api_errors.Server_error (Api_errors.cannot_contact_host, [Ref.string_of host])) + try + let localhost=Helpers.get_localhost ~__context in + if localhost=host then local_fn ~__context + else + let task_opt = set_forwarding_on_task ~__context ~host in + f __context host task_opt op + with + | Xmlrpc_client.Connection_reset | Http_client.Http_request_rejected _ -> + warn "Caught Connection_reset when contacting host %s; converting into CANNOT_CONTACT_HOST" (Ref.string_of host); + raise (Api_errors.Server_error (Api_errors.cannot_contact_host, [Ref.string_of host])) + | Xmlrpc_client.Stunnel_connection_failed -> + warn "Caught Stunnel_connection_failed while contacting host %s; converting into CANNOT_CONTACT_HOST" (Ref.string_of host); + raise (Api_errors.Server_error (Api_errors.cannot_contact_host, [Ref.string_of host])) (* regular forwarding fn, with session and live-check. Used by most calls, will use the connection cache. *) (* we don't check "host.enabled" here, because for most messages we want to be able to forward them even when the host is disabled; vm.start_on and resume_on do their own check for enabled *) let do_op_on ~local_fn ~__context ~host op = - check_live ~__context host; - do_op_on_common ~local_fn ~__context ~host op - (call_slave_with_session remote_rpc_retry) + check_live ~__context host; + do_op_on_common ~local_fn ~__context ~host op + (call_slave_with_session remote_rpc_retry) (* with session but no live check. Used by the Pool.hello calling back ONLY Don't use the connection cache or retry logic. *) let do_op_on_nolivecheck_no_retry ~local_fn ~__context ~host op = - do_op_on_common ~local_fn ~__context ~host op - (call_slave_with_session remote_rpc_no_retry) + do_op_on_common ~local_fn ~__context ~host op + (call_slave_with_session remote_rpc_no_retry) (* with a local session and no checking. This is used for forwarding messages to hosts that we don't know are alive/dead -- e.g. the pool_emergency_* messages. Don't use the connection cache or retry logic. *) let do_op_on_localsession_nolivecheck ~local_fn ~__context ~host op = - do_op_on_common ~local_fn ~__context ~host op - (call_slave_with_local_session remote_rpc_no_retry) + do_op_on_common ~local_fn ~__context ~host op + (call_slave_with_local_session remote_rpc_no_retry) (* Map a function across a list, remove elements which throw an exception *) let map_with_drop ?(doc = "performing unknown operation") f xs = - let one x = - try [ f x ] - with e -> - debug "Caught exception while %s in message forwarder: %s" doc (ExnHelper.string_of_exn e); [] in - List.concat (List.map one xs) - (* Iterate a function across a list, ignoring applications which throw an exception *) + let one x = + try [ f x ] + with e -> + debug "Caught exception while %s in message forwarder: %s" doc (ExnHelper.string_of_exn e); [] in + List.concat (List.map one xs) +(* Iterate a function across a list, ignoring applications which throw an exception *) let iter_with_drop ?(doc = "performing unknown operation") f xs = - let one x = - try f x - with e -> - debug "Caught exception while %s in message forwarder: %s" doc (ExnHelper.string_of_exn e) in - List.iter one xs + let one x = + try f x + with e -> + debug "Caught exception while %s in message forwarder: %s" doc (ExnHelper.string_of_exn e) in + List.iter one xs let log_exn ?(doc = "performing unknown operation") f x = - try f x - with e -> - debug "Caught exception while %s in message forwarder: %s" doc (ExnHelper.string_of_exn e); - raise e + try f x + with e -> + debug "Caught exception while %s in message forwarder: %s" doc (ExnHelper.string_of_exn e); + raise e let log_exn_ignore ?(doc = "performing unknown operation") f x = - try f x - with e -> - debug "Ignoring exception while %s in message forwarder: %s" doc (ExnHelper.string_of_exn e) + try f x + with e -> + debug "Ignoring exception while %s in message forwarder: %s" doc (ExnHelper.string_of_exn e) (**************************************************************************************) let hosts_with_several_srs ~__context srs = - let hosts = Db.Host.get_all ~__context in - let filterfn host = - try - Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs:srs ~host; - true - with - _ -> false in - List.filter filterfn hosts + let hosts = Db.Host.get_all ~__context in + let filterfn host = + try + Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs:srs ~host; + true + with + _ -> false in + List.filter filterfn hosts (* Given an SR, return a PBD to use for some storage operation. *) (* In the case of SR.destroy we need to be able to forward the SR operation when all PBDs are unplugged - this is the reason for the consider_unplugged_pbds optional argument below. All other SR ops only consider plugged PBDs... *) let choose_pbd_for_sr ?(consider_unplugged_pbds=false) ~__context ~self () = - let all_pbds = Db.SR.get_PBDs ~__context ~self in - let plugged_pbds = List.filter (fun pbd->Db.PBD.get_currently_attached ~__context ~self:pbd) all_pbds in - let pbds_to_consider = if consider_unplugged_pbds then all_pbds else plugged_pbds in - if Helpers.is_sr_shared ~__context ~self then - let master = Helpers.get_master ~__context in - let master_pbds = Db.Host.get_PBDs ~__context ~self:master in - (* shared SR operations must happen on the master *) - match Listext.List.intersect pbds_to_consider master_pbds with - | pbd :: _ -> pbd (* ok, master plugged *) - | [] -> raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of self ])) (* can't do op, master pbd not plugged *) - else - match pbds_to_consider with - | [] -> raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of self ])) - | pdb :: _ -> pdb + let all_pbds = Db.SR.get_PBDs ~__context ~self in + let plugged_pbds = List.filter (fun pbd->Db.PBD.get_currently_attached ~__context ~self:pbd) all_pbds in + let pbds_to_consider = if consider_unplugged_pbds then all_pbds else plugged_pbds in + if Helpers.is_sr_shared ~__context ~self then + let master = Helpers.get_master ~__context in + let master_pbds = Db.Host.get_PBDs ~__context ~self:master in + (* shared SR operations must happen on the master *) + match Listext.List.intersect pbds_to_consider master_pbds with + | pbd :: _ -> pbd (* ok, master plugged *) + | [] -> raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of self ])) (* can't do op, master pbd not plugged *) + else + match pbds_to_consider with + | [] -> raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of self ])) + | pdb :: _ -> pdb let loadbalance_host_operation ~__context ~hosts ~doc ~op (f: API.ref_host -> unit) = - let task_id = Ref.string_of (Context.get_task_id __context) in - let choice = Helpers.retry_with_global_lock ~__context ~doc - (fun () -> - let possibilities = List.filter - (fun self -> try Xapi_host_helpers.assert_operation_valid ~__context ~self ~op; true - with _ -> false) hosts in - if possibilities = [] - then raise (Api_errors.Server_error(Api_errors.other_operation_in_progress, [ "host"; Ref.string_of (List.hd hosts) ])); - let choice = List.nth possibilities (Random.int (List.length possibilities)) in - Xapi_host_helpers.assert_operation_valid ~__context ~self:choice ~op; - Db.Host.add_to_current_operations ~__context ~self:choice ~key:task_id ~value:op; - Xapi_host_helpers.update_allowed_operations ~__context ~self:choice; - choice) in - - (* Then do the action with the lock released *) - finally - (fun () -> f choice) - (* Make sure to clean up at the end *) - (fun () -> - try - Db.Host.remove_from_current_operations ~__context ~self:choice ~key:task_id; - Xapi_host_helpers.update_allowed_operations ~__context ~self:choice; - Helpers.Early_wakeup.broadcast (Datamodel._host, Ref.string_of choice); - with - _ -> ()) + let task_id = Ref.string_of (Context.get_task_id __context) in + let choice = Helpers.retry_with_global_lock ~__context ~doc + (fun () -> + let possibilities = List.filter + (fun self -> try Xapi_host_helpers.assert_operation_valid ~__context ~self ~op; true + with _ -> false) hosts in + if possibilities = [] + then raise (Api_errors.Server_error(Api_errors.other_operation_in_progress, [ "host"; Ref.string_of (List.hd hosts) ])); + let choice = List.nth possibilities (Random.int (List.length possibilities)) in + Xapi_host_helpers.assert_operation_valid ~__context ~self:choice ~op; + Db.Host.add_to_current_operations ~__context ~self:choice ~key:task_id ~value:op; + Xapi_host_helpers.update_allowed_operations ~__context ~self:choice; + choice) in + + (* Then do the action with the lock released *) + finally + (fun () -> f choice) + (* Make sure to clean up at the end *) + (fun () -> + try + Db.Host.remove_from_current_operations ~__context ~self:choice ~key:task_id; + Xapi_host_helpers.update_allowed_operations ~__context ~self:choice; + Helpers.Early_wakeup.broadcast (Datamodel._host, Ref.string_of choice); + with + _ -> ()) module Forward = functor(Local: Custom_actions.CUSTOM_ACTIONS) -> struct - (* During certain operations that are executed on a pool slave, the slave management can reconfigure - * its management interface, we can lose connection with the slave. - * This function catches any "host cannot be contacted" exceptions during such calls and polls - * periodically to see whether the operation has completed on the slave. *) - let tolerate_connection_loss fn success timeout = - try - fn () - with - | Api_errors.Server_error (ercode, params) when ercode=Api_errors.cannot_contact_host -> - debug "Lost connection with slave during call (expected). Waiting for slave to come up again."; - let time_between_retries = 1. (* seconds *) in - let num_retries = int_of_float (timeout /. time_between_retries) in - let rec poll i = - match i with - | 0 -> raise (Api_errors.Server_error (ercode, params)) (* give up and re-raise exn *) - | i -> - begin - match success () with - | Some result -> debug "Slave is back and has completed the operation!"; result (* success *) - | None -> Thread.delay time_between_retries; poll (i-1) - end - in - poll num_retries - - let add_brackets s = - if s = "" then - "" - else - Printf.sprintf " (%s)" s - - let pool_uuid ~__context pool = - try if Pool_role.is_master () then - let name = Db.Pool.get_name_label __context pool in - Printf.sprintf "%s%s" (Db.Pool.get_uuid __context pool) (add_brackets name) - else - Ref.string_of pool - with _ -> "invalid" - - let current_pool_uuid ~__context = - if Pool_role.is_master () then - let _, pool = List.hd (Db.Pool.get_all_records ~__context) in - Printf.sprintf "%s%s" pool.API.pool_uuid (add_brackets pool.API.pool_name_label) - else - "invalid" - - let host_uuid ~__context host = - try if Pool_role.is_master () then - let name = Db.Host.get_name_label __context host in - Printf.sprintf "%s%s" (Db.Host.get_uuid __context host) (add_brackets name) - else - Ref.string_of host - with _ -> "invalid" - - let vm_uuid ~__context vm = - try if Pool_role.is_master () then - let name = Db.VM.get_name_label __context vm in - Printf.sprintf "%s%s" (Db.VM.get_uuid __context vm) (add_brackets name) - else - Ref.string_of vm - with _ -> "invalid" - - let vm_appliance_uuid ~__context vm_appliance = - try if Pool_role.is_master () then - let name = Db.VM_appliance.get_name_label __context vm_appliance in - Printf.sprintf "%s%s" (Db.VM_appliance.get_uuid __context vm_appliance) (add_brackets name) - else - Ref.string_of vm_appliance - with _ -> "invalid" - - let sr_uuid ~__context sr = - try if Pool_role.is_master () then - let name = Db.SR.get_name_label __context sr in - Printf.sprintf "%s%s" (Db.SR.get_uuid __context sr) (add_brackets name) - else - Ref.string_of sr - with _ -> "invalid" - - let vdi_uuid ~__context vdi = - try if Pool_role.is_master () then - Db.VDI.get_uuid __context vdi - else - Ref.string_of vdi - with _ -> "invalid" - - let vif_uuid ~__context vif = - try if Pool_role.is_master () then - Db.VIF.get_uuid __context vif - else - Ref.string_of vif - with _ -> "invalid" - - let vlan_uuid ~__context vlan = - try if Pool_role.is_master () then - Db.VLAN.get_uuid __context vlan - else - Ref.string_of vlan - with _ -> "invalid" - - let tunnel_uuid ~__context tunnel = - try if Pool_role.is_master () then - Db.Tunnel.get_uuid __context tunnel - else - Ref.string_of tunnel - with _ -> "invalid" - - let bond_uuid ~__context bond = - try if Pool_role.is_master () then - Db.Bond.get_uuid __context bond - else - Ref.string_of bond - with _ -> "invalid" - - - let pif_uuid ~__context pif = - try if Pool_role.is_master () then - Db.PIF.get_uuid __context pif - else - Ref.string_of pif - with _ -> "invalid" - - let vbd_uuid ~__context vbd = - try if Pool_role.is_master () then - Db.VBD.get_uuid __context vbd - else - Ref.string_of vbd - with _ -> "invalid" - - let pbd_uuid ~__context pbd = - try if Pool_role.is_master () then - Db.PBD.get_uuid __context pbd - else - Ref.string_of pbd - with _ -> "invalid" - - let task_uuid ~__context task = - try if Pool_role.is_master () then - Db.Task.get_uuid __context task - else - Ref.string_of task - with _ -> "invalid" - - let crashdump_uuid ~__context cd = - try if Pool_role.is_master () then - Db.Crashdump.get_uuid __context cd - else - Ref.string_of cd - with _ -> "invalid" - - let host_crashdump_uuid ~__context hcd = - try if Pool_role.is_master () then - Db.Host_crashdump.get_uuid __context hcd - else - Ref.string_of hcd - with _ -> "invalid" - - let network_uuid ~__context network = - try if Pool_role.is_master () then - Db.Network.get_uuid __context network - else - Ref.string_of network - with _ -> "invalid" - - let host_patch_uuid ~__context patch = - try if Pool_role.is_master () then - Db.Host_patch.get_uuid __context patch - else - Ref.string_of patch - with _ -> "invalid" - - let pool_patch_uuid ~__context patch = - try if Pool_role.is_master () then - Db.Pool_patch.get_uuid __context patch - else - Ref.string_of patch - with _ -> "invalid" - - let pci_uuid ~__context pci = - try if Pool_role.is_master () then - Db.PCI.get_uuid __context pci - else - Ref.string_of pci - with _ -> "invalid" - - let pgpu_uuid ~__context pgpu = - try if Pool_role.is_master () then - Db.PGPU.get_uuid __context pgpu - else - Ref.string_of pgpu - with _ -> "invalid" - - let gpu_group_uuid ~__context gpu_group = - try if Pool_role.is_master () then - Db.GPU_group.get_uuid __context gpu_group - else - Ref.string_of gpu_group - with _ -> "invalid" - - let vgpu_uuid ~__context vgpu = - try if Pool_role.is_master () then - Db.VGPU.get_uuid __context vgpu - else - Ref.string_of vgpu - with _ -> "invalid" - - let vgpu_type_uuid ~__context vgpu_type = - try if Pool_role.is_master () then - Db.VGPU_type.get_uuid __context vgpu_type - else - Ref.string_of vgpu_type - with _ -> "invalid" - - module Session = Local.Session - module Auth = Local.Auth - module Subject = Local.Subject - module Role = Local.Role - module Task = struct - include Local.Task - - let cancel ~__context ~task = - let local_fn = cancel ~task in - let forwarded_to = Db.Task.get_forwarded_to ~__context ~self:task in - if Db.is_valid_ref __context forwarded_to - then do_op_on ~local_fn ~__context ~host:(Db.Task.get_forwarded_to ~__context ~self:task) - (fun session_id rpc -> - Client.Task.cancel rpc session_id task - ) - else local_fn ~__context - end - module Event = Local.Event - module VMPP = Local.VMPP - module VM_appliance = struct - include Local.VM_appliance - (* Add to the VM_appliance's current operations, call a function and then remove from the *) - (* current operations. Ensure the allowed_operations are kept up to date. *) - let with_vm_appliance_operation ~__context ~self ~doc ~op f = - let task_id = Ref.string_of (Context.get_task_id __context) in - Helpers.retry_with_global_lock ~__context ~doc - (fun () -> - Xapi_vm_appliance.assert_operation_valid ~__context ~self ~op; - Db.VM_appliance.add_to_current_operations ~__context ~self ~key:task_id ~value:op; - Xapi_vm_appliance.update_allowed_operations ~__context ~self); - (* Then do the action with the lock released *) - finally f - (* Make sure to clean up at the end *) - (fun () -> - try - Db.VM_appliance.remove_from_current_operations ~__context ~self ~key:task_id; - Xapi_vm_appliance.update_allowed_operations ~__context ~self; - Helpers.Early_wakeup.broadcast (Datamodel._vm_appliance, Ref.string_of self); - with - _ -> ()) - - let start ~__context ~self ~paused = - info "VM_appliance.start: VM_appliance = '%s'" (vm_appliance_uuid ~__context self); - with_vm_appliance_operation ~__context ~self ~doc:"VM_appliance.start" ~op:`start - (fun () -> - Local.VM_appliance.start ~__context ~self ~paused) - - let clean_shutdown ~__context ~self = - info "VM_appliance.clean_shutdown: VM_appliance = '%s'" (vm_appliance_uuid ~__context self); - with_vm_appliance_operation ~__context ~self ~doc:"VM_appliance.clean_shutdown" ~op:`clean_shutdown - (fun () -> - Local.VM_appliance.clean_shutdown ~__context ~self) - - let hard_shutdown ~__context ~self = - info "VM_appliance.hard_shutdown: VM_appliance = '%s'" (vm_appliance_uuid ~__context self); - with_vm_appliance_operation ~__context ~self ~doc:"VM_appliance.hard_shutdown" ~op:`hard_shutdown - (fun () -> - Local.VM_appliance.hard_shutdown ~__context ~self) - - let shutdown ~__context ~self = - info "VM_appliance.shutdown: VM_appliance = '%s'" (vm_appliance_uuid ~__context self); - with_vm_appliance_operation ~__context ~self ~doc:"VM_appliance.shutdown" ~op:`shutdown - (fun () -> - Local.VM_appliance.shutdown ~__context ~self) - - let assert_can_be_recovered ~__context ~self ~session_to = - info "VM_appliance.assert_can_be_recovered: VM_appliance = '%s'" (vm_appliance_uuid ~__context self); - Local.VM_appliance.assert_can_be_recovered ~__context ~self ~session_to - - let get_SRs_required_for_recovery ~__context ~self ~session_to = - info "VM_appliance.get_SRs_required_for_recovery: VM_appliance = '%s'" (vm_appliance_uuid ~__context self); - Local.VM_appliance.get_SRs_required_for_recovery ~__context ~self ~session_to - - let recover ~__context ~self ~session_to ~force = - info "VM_appliance.recover: VM_appliance = '%s'" (vm_appliance_uuid ~__context self); - Local.VM_appliance.recover ~__context ~self ~session_to ~force - end - module DR_task = Local.DR_task - (* module Alert = Local.Alert *) - - module Pool = struct - include Local.Pool - - (** Add to the Pool's current operations, call a function and then remove from the - current operations. Ensure the allowed_operations are kept up to date. *) - let with_pool_operation ~__context ~self ~doc ~op f = - let task_id = Ref.string_of (Context.get_task_id __context) in - Helpers.retry_with_global_lock ~__context ~doc - (fun () -> - Xapi_pool_helpers.assert_operation_valid ~__context ~self ~op; - Db.Pool.add_to_current_operations ~__context ~self ~key:task_id ~value:op); - Xapi_pool_helpers.update_allowed_operations ~__context ~self; - (* Then do the action with the lock released *) - finally f - (* Make sure to clean up at the end *) - (fun () -> - try - Db.Pool.remove_from_current_operations ~__context ~self ~key:task_id; - Xapi_pool_helpers.update_allowed_operations ~__context ~self; - Helpers.Early_wakeup.broadcast (Datamodel._pool, Ref.string_of self); - with - _ -> ()) - - let eject ~__context ~host = - info "Pool.eject: pool = '%s'; host = '%s'" (current_pool_uuid ~__context) (host_uuid ~__context host); - let local_fn = Local.Pool.eject ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Pool.eject rpc session_id host) - - let designate_new_master ~__context ~host = - info "Pool.designate_new_master: pool = '%s'; host = '%s'" (current_pool_uuid ~__context) (host_uuid ~__context host); - (* Sync the RRDs from localhost to new master *) - Xapi_sync.sync_host __context host; - let local_fn = Local.Pool.designate_new_master ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Pool.designate_new_master rpc session_id host) - - let enable_ha ~__context ~heartbeat_srs ~configuration = - info "Pool.enable_ha: pool = '%s'; heartbeat_srs = [ %s ]; configuration = [ %s ]" - (current_pool_uuid ~__context) - (String.concat ", " (List.map Ref.string_of heartbeat_srs)) - (String.concat "; " (List.map (fun (k, v) -> k ^ "=" ^ v) configuration)); - let pool = Helpers.get_pool ~__context in - with_pool_operation ~__context ~doc:"Pool.ha_enable" ~self:pool ~op:`ha_enable - (fun () -> - Local.Pool.enable_ha __context heartbeat_srs configuration - ) - - let disable_ha ~__context = - info "Pool.disable_ha: pool = '%s'" (current_pool_uuid ~__context); - let pool = Helpers.get_pool ~__context in - with_pool_operation ~__context ~doc:"Pool.ha_disable" ~self:pool ~op:`ha_disable - (fun () -> - Local.Pool.disable_ha __context - ) - - let ha_prevent_restarts_for ~__context ~seconds = - info "Pool.ha_prevent_restarts_for: pool = '%s'; seconds = %Ld" (current_pool_uuid ~__context) seconds; - Local.Pool.ha_prevent_restarts_for ~__context ~seconds - - let ha_failover_plan_exists ~__context ~n = - info "Pool.ha_failover_plan_exists: pool = '%s'; n = %Ld" (current_pool_uuid ~__context) n; - Local.Pool.ha_failover_plan_exists ~__context ~n - - let ha_compute_max_host_failures_to_tolerate ~__context = - info "Pool.ha_compute_max_host_failures_to_tolerate: pool = '%s'" (current_pool_uuid ~__context); - Local.Pool.ha_compute_max_host_failures_to_tolerate ~__context - - let ha_compute_hypothetical_max_host_failures_to_tolerate ~__context ~configuration = - info "Pool.ha_compute_hypothetical_max_host_failures_to_tolerate: pool = '%s'; configuration = [ %s ]" - (current_pool_uuid ~__context) - (String.concat "; " (List.map (fun (vm, p) -> Ref.string_of vm ^ " " ^ p) configuration)); - Local.Pool.ha_compute_hypothetical_max_host_failures_to_tolerate ~__context ~configuration - - let ha_compute_vm_failover_plan ~__context ~failed_hosts ~failed_vms = - info "Pool.ha_compute_vm_failover_plan: pool = '%s'; failed_hosts = [ %s ]; failed_vms = [ %s ]" - (current_pool_uuid ~__context) - (String.concat "; " (List.map Ref.string_of failed_hosts)) - (String.concat "; " (List.map Ref.string_of failed_vms)); - Local.Pool.ha_compute_vm_failover_plan ~__context ~failed_hosts ~failed_vms - - let set_ha_host_failures_to_tolerate ~__context ~self ~value = - info "Pool.set_ha_host_failures_to_tolerate: pool = '%s'; value = %Ld" (pool_uuid ~__context self) value; - Local.Pool.set_ha_host_failures_to_tolerate ~__context ~self ~value - - let ha_schedule_plan_recomputation ~__context = - info "Pool.ha_schedule_plan_recomputation: pool = '%s'" (current_pool_uuid ~__context); - Local.Pool.ha_schedule_plan_recomputation ~__context - - let enable_external_auth ~__context ~pool ~config ~service_name ~auth_type = - info "Pool.enable_external_auth: pool = '%s'; service name = '%s'; auth_type = '%s'" (pool_uuid ~__context pool) service_name auth_type; - Local.Pool.enable_external_auth ~__context ~pool ~config ~service_name ~auth_type - - let disable_external_auth ~__context ~pool = - info "Pool.disable_external_auth: pool = '%s'" (pool_uuid ~__context pool); - Local.Pool.disable_external_auth ~__context ~pool - - let enable_redo_log ~__context ~sr = - info "Pool.enable_redo_log: pool = '%s'; sr_uuid = '%s'" - (current_pool_uuid ~__context) (sr_uuid __context sr); - Local.Pool.enable_redo_log ~__context ~sr - - let disable_redo_log ~__context = - info "Pool.disable_redo_log: pool = '%s'" (current_pool_uuid ~__context); - Local.Pool.disable_redo_log ~__context - - let set_vswitch_controller ~__context ~address = - info "Pool.set_vswitch_controller: pool = '%s'; address = '%s'" (current_pool_uuid ~__context) address; - Local.Pool.set_vswitch_controller ~__context ~address - - let get_license_state ~__context ~self = - info "Pool.get_license_state: pool = '%s'" (pool_uuid ~__context self); - Local.Pool.get_license_state ~__context ~self - - let apply_edition ~__context ~self ~edition = - info "Pool.apply_edition: pool = '%s'; edition = '%s'" (pool_uuid ~__context self) edition; - Local.Pool.apply_edition ~__context ~self ~edition - - let enable_ssl_legacy ~__context ~self = - info "Pool.enable_ssl_legacy: pool = '%s'" (pool_uuid ~__context self); - Local.Pool.enable_ssl_legacy ~__context ~self - - let disable_ssl_legacy ~__context ~self = - info "Pool.disable_ssl_legacy: pool = '%s'" (pool_uuid ~__context self); - Local.Pool.disable_ssl_legacy ~__context ~self - - let has_extension ~__context ~self ~name = - info "Pool.has_extension: pool = '%s'; name = '%s'" (pool_uuid ~__context self) name; - Local.Pool.has_extension ~__context ~self ~name - - let add_to_guest_agent_config ~__context ~self ~key ~value = - info "Pool.add_to_guest_agent_config: pool = '%s'; key = '%s'; value = '%s'" - (pool_uuid ~__context self) key value; - Local.Pool.add_to_guest_agent_config ~__context ~self ~key ~value - - let remove_from_guest_agent_config ~__context ~self ~key = - info "Pool.remove_from_guest_agent_config: pool = '%s'; key = '%s'" - (pool_uuid ~__context self) key; - Local.Pool.remove_from_guest_agent_config ~__context ~self ~key - end - - module VM = struct - (* Defined in Xapi_vm_helpers so it can be used from elsewhere without circular dependency. *) - let with_vm_operation = Xapi_vm_helpers.with_vm_operation - - (* Nb, we're not using the snapshots returned in 'Event.from' here because - * the tasks might get deleted. The standard mechanism for dealing with - * deleted events assumes you have a full database replica locally, and - * deletions are handled by checking your valid_ref_counts table against - * your local database. In this case, we're only interested in a subset of - * events, so this mechanism doesn't work. There will only be a few outstanding - * tasks anyway, so we're safe to just iterate through the references when an - * event happens - ie, we use the event API simply to wake us up when something - * interesting has happened. *) - - let wait_for_tasks ~__context ~tasks = - let our_task = Context.get_task_id __context in - let classes = List.map (fun x -> Printf.sprintf "task/%s" (Ref.string_of x)) (our_task::tasks) in - - let rec process token = - TaskHelper.exn_if_cancelling ~__context; (* First check if _we_ have been cancelled *) - let statuses = List.filter_map (fun task -> try Some (Db.Task.get_status ~__context ~self:task) with _ -> None) tasks in - let unfinished = List.exists (fun state -> state = `pending) statuses in - if unfinished - then begin - let from = Helpers.call_api_functions ~__context - (fun rpc session_id -> Client.Event.from ~rpc ~session_id ~classes ~token ~timeout:30.0) in - debug "Using events to wait for tasks: %s" (String.concat "," classes); - let from = Event_types.event_from_of_rpc from in - process from.Event_types.token - end else - () - in - process "" - - let cancel ~__context ~vm ~ops = - let cancelled = List.filter_map (fun (task,op) -> - if List.mem op ops then begin - info "Cancelling VM.%s for VM.hard_shutdown/reboot" (Record_util.vm_operation_to_string op); - Helpers.call_api_functions ~__context - (fun rpc session_id -> try Client.Task.cancel ~rpc ~session_id ~task:(Ref.of_string task) with _ -> ()); - Some (Ref.of_string task) - end else None - ) (Db.VM.get_current_operations ~__context ~self:vm) in - wait_for_tasks ~__context ~tasks:cancelled - - let unmark_vbds ~__context ~vbds ~doc ~op = - let task_id = Ref.string_of (Context.get_task_id __context) in - iter_with_drop ~doc:("unmarking VBDs after " ^ doc) - (fun self -> - if Db.is_valid_ref __context self then begin - Db.VBD.remove_from_current_operations ~__context ~self ~key:task_id; - Xapi_vbd_helpers.update_allowed_operations ~__context ~self; - Helpers.Early_wakeup.broadcast (Datamodel._vbd, Ref.string_of self); - end) - vbds - - let mark_vbds ~__context ~vm ~doc ~op : API.ref_VBD list = - let task_id = Ref.string_of (Context.get_task_id __context) in - let vbds = Db.VM.get_VBDs ~__context ~self:vm in - let marked = ref [] in - (* CA-26575: paper over transient VBD glitches caused by SR.lvhd_stop_the_world by throwing the - first OTHER_OPERATION_IN_PROGRESS (or whatever) we encounter and let the caller deal with it *) - try - List.iter - (fun vbd -> - Xapi_vbd_helpers.assert_operation_valid ~__context ~self:vbd ~op; - Db.VBD.add_to_current_operations ~__context ~self:vbd ~key:task_id ~value:op; - Xapi_vbd_helpers.update_allowed_operations ~__context ~self:vbd; - marked := vbd :: !marked; - ) vbds; - vbds - with e -> - debug "Caught exception marking VBD for %s on VM %s: %s" doc (Ref.string_of vm) (ExnHelper.string_of_exn e); - unmark_vbds ~__context ~vbds:!marked ~doc ~op; - raise e - - let with_vbds_marked ~__context ~vm ~doc ~op f = - (* CA-26575: paper over transient VBD glitches caused by SR.lvhd_stop_the_world *) - let vbds = Helpers.retry_with_global_lock ~__context ~doc ~policy:Helpers.Policy.fail_quickly (fun () -> - mark_vbds ~__context ~vm ~doc ~op) in - finally - (fun () -> f vbds) - (fun () -> Helpers.with_global_lock (fun () -> unmark_vbds ~__context ~vbds ~doc ~op)) - - let unmark_vifs ~__context ~vifs ~doc ~op = - let task_id = Ref.string_of (Context.get_task_id __context) in - iter_with_drop ~doc:("unmarking VIFs after " ^ doc) - (fun self -> - if Db.is_valid_ref __context self then begin - Db.VIF.remove_from_current_operations ~__context ~self ~key:task_id; - Xapi_vif_helpers.update_allowed_operations ~__context ~self; - Helpers.Early_wakeup.broadcast (Datamodel._vif, Ref.string_of self); - end) - vifs - - let mark_vifs ~__context ~vm ~doc ~op : API.ref_VIF list = - let task_id = Ref.string_of (Context.get_task_id __context) in - let vifs = Db.VM.get_VIFs ~__context ~self:vm in - let marked = map_with_drop ~doc:("marking VIFs for " ^ doc) - (fun vif -> - Xapi_vif_helpers.assert_operation_valid ~__context ~self:vif ~op; - Db.VIF.add_to_current_operations ~__context ~self:vif ~key:task_id ~value:op; - Xapi_vif_helpers.update_allowed_operations ~__context ~self:vif; - vif) vifs in - (* Did we mark them all? *) - if List.length marked <> List.length vifs then begin - unmark_vifs ~__context ~vifs:marked ~doc ~op; - raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["Failed to lock all VIFs"])) - end else marked - - let with_vifs_marked ~__context ~vm ~doc ~op f = - let vifs = Helpers.retry_with_global_lock ~__context ~doc (fun () -> mark_vifs ~__context ~vm ~doc ~op) in - finally - (fun () -> f vifs) - (fun () -> Helpers.with_global_lock (fun () -> unmark_vifs ~__context ~vifs ~doc ~op)) - - (* Some VM operations have side-effects on VBD allowed_operations but don't actually - lock the VBDs themselves (eg suspend) *) - let update_vbd_operations ~__context ~vm = - Helpers.with_global_lock - (fun () -> - List.iter (fun self -> - Xapi_vbd_helpers.update_allowed_operations ~__context ~self; - try - let vdi = Db.VBD.get_VDI ~__context ~self in - Xapi_vdi.update_allowed_operations ~__context ~self:vdi - with _ -> ()) - (Db.VM.get_VBDs ~__context ~self:vm)) - - let update_vif_operations ~__context ~vm = - Helpers.with_global_lock - (fun () -> - List.iter (fun self -> Xapi_vif_helpers.update_allowed_operations ~__context ~self) - (Db.VM.get_VIFs ~__context ~self:vm)) - - (* -------- Forwarding helper functions: ------------------------------------ *) - - (* Read resisdent-on field from vm to determine who to forward to *) - let forward_vm_op ~local_fn ~__context ~vm op = - let power_state = Db.VM.get_power_state ~__context ~self:vm in - if List.mem power_state [`Running; `Paused] then - do_op_on ~local_fn ~__context ~host:(Db.VM.get_resident_on ~__context ~self:vm) op - else - local_fn ~__context - - (* Clear scheduled_to_be_resident_on for a VM and all its vGPUs. *) - let clear_scheduled_to_be_resident_on ~__context ~vm = - Db.VM.set_scheduled_to_be_resident_on ~__context ~self:vm ~value:Ref.null; - List.iter - (fun vgpu -> - Db.VGPU.set_scheduled_to_be_resident_on ~__context - ~self:vgpu - ~value:Ref.null) - (Db.VM.get_VGPUs ~__context ~self:vm) - - (* Notes on memory checking/reservation logic: - When computing the hosts free memory we consider all VMs resident_on (ie running - and consuming resources NOW) and scheduled_to_be_resident_on (ie those which are - starting/resuming/migrating, whose memory has been reserved but may not all be being - used atm). - We generally call 'assert_can_boot_here' with the master forwarding lock held, - which verifies that a host has enough free memory to support the VM and then we - set 'scheduled_to_be_resident_on' which prevents concurrent competing attempts to - use the same resources from succeeding. *) - - (* Reserves the resources for a VM by setting it as 'scheduled_to_be_resident_on' a host *) - let allocate_vm_to_host ~__context ~vm ~host ~snapshot ?host_op () = - begin match host_op with - | Some x -> - let task_id = Ref.string_of (Context.get_task_id __context) in - Xapi_host_helpers.assert_operation_valid ~__context ~self:host ~op:x; - Db.Host.add_to_current_operations ~__context ~self:host ~key:task_id ~value:x; - Xapi_host_helpers.update_allowed_operations ~__context ~self:host - | None -> () - end; - (* Make sure the last_booted record has useful values for later use in memory checking - code. *) - if snapshot.API.vM_power_state = `Halted then begin - Helpers.set_boot_record ~__context ~self:vm snapshot - end; - (* Once this is set concurrent VM.start calls will start checking the memory used by this VM *) - Db.VM.set_scheduled_to_be_resident_on ~__context ~self:vm ~value:host; - try - Vgpuops.create_vgpus ~__context host (vm, snapshot) - (Helpers.will_boot_hvm ~__context ~self:vm) - with e -> - clear_scheduled_to_be_resident_on ~__context ~vm; - raise e - - (* For start/start_on/resume/resume_on/migrate *) - let finally_clear_host_operation ~__context ~host ?host_op () = match host_op with - | Some x -> - let task_id = Ref.string_of (Context.get_task_id __context) in - Db.Host.remove_from_current_operations ~__context ~self:host ~key:task_id; - Xapi_host_helpers.update_allowed_operations ~__context ~self:host; - Helpers.Early_wakeup.broadcast (Datamodel._host, Ref.string_of host); - | None -> () - - let check_vm_preserves_ha_plan ~__context ~vm ~snapshot ~host = - if true - && (snapshot.API.vM_ha_restart_priority = Constants.ha_restart) - && (not snapshot.API.vM_ha_always_run) - then - Xapi_ha_vm_failover.assert_new_vm_preserves_ha_plan ~__context vm - else - Xapi_ha_vm_failover.assert_vm_placement_preserves_ha_plan ~__context ~arriving:[host, (vm, snapshot)] () - - (* README: Note on locking -- forward_to_suitable_host and reserve_memory_for_vm are only - called in a context where the current_operations field for the VM object contains the - operation we're considering. Thus the global_lock in this context is _not_ used to cover - the period where current_operations are set, but is used to ensure that (i) choose_host_for_vm - is executed under mutual exclusion with other incoming operations; and (ii) that scheduled_to_be_resident_on - (which must not change whilst someone is calling choose_host_for_vm) only executes in exclusion with - choose_host_for_vm. - *) - - (* Used by VM.start and VM.resume to choose a host with enough resource and to - 'allocate_vm_to_host' (ie set the 'scheduled_to_be_resident_on' field) *) - let forward_to_suitable_host ~local_fn ~__context ~vm ~snapshot ?host_op op = - let suitable_host = Helpers.with_global_lock - (fun () -> - let host = Db.VM.get_scheduled_to_be_resident_on ~__context ~self:vm in - if host <> Ref.null then host else - let host = Xapi_vm_helpers.choose_host_for_vm ~__context ~vm ~snapshot in - (* HA overcommit protection: we can either perform 'n' HA plans by including this in - the 'choose_host_for_vm' function or we can be cheapskates by doing it here: *) - check_vm_preserves_ha_plan ~__context ~vm ~snapshot ~host; - allocate_vm_to_host ~__context ~vm ~host ~snapshot ?host_op (); - host) in - finally - (fun () -> do_op_on ~local_fn ~__context ~host:suitable_host op, suitable_host) - (fun () -> - Helpers.with_global_lock - (fun () -> - finally_clear_host_operation ~__context ~host:suitable_host ?host_op (); - (* In certain cases, VM might have been destroyed as a consequence of operation *) - if Db.is_valid_ref __context vm - then clear_scheduled_to_be_resident_on ~__context ~vm)) - - (* Used by VM.start_on, VM.resume_on, VM.migrate to verify a host has enough resource and to - 'allocate_vm_to_host' (ie set the 'scheduled_to_be_resident_on' field) *) - let reserve_memory_for_vm ~__context ~vm ~snapshot ~host ?host_op f = - Helpers.with_global_lock - (fun () -> - Xapi_vm_helpers.assert_can_boot_here ~__context ~self:vm ~host:host ~snapshot (); - (* NB in the case of migrate although we are about to increase free memory on the sending host - we ignore this because if a failure happens while a VM is in-flight it will still be considered - on both hosts, potentially breaking the failover plan. *) - check_vm_preserves_ha_plan ~__context ~vm ~snapshot ~host; - allocate_vm_to_host ~__context ~vm ~host ~snapshot ?host_op ()); - finally f - (fun () -> - Helpers.with_global_lock - (fun () -> - finally_clear_host_operation ~__context ~host ?host_op (); - clear_scheduled_to_be_resident_on ~__context ~vm)) - - (** - Used by VM.set_memory_dynamic_range to reserve enough memory for - increasing dynamic_min. Although a VM may actually be technically - outside the range [dynamic_min, dynamic_max] we still ensure that *if* - all VMs are obeying our commands and ballooning to dynamic_min if we ask - *then* the sum of the dynamic_mins will fit on the host. - *) - let reserve_memory_for_dynamic_change ~__context ~vm - new_dynamic_min new_dynamic_max f = - let host = Db.VM.get_resident_on ~__context ~self:vm in - let old_dynamic_min = Db.VM.get_memory_dynamic_min ~__context ~self:vm in - let old_dynamic_max = Db.VM.get_memory_dynamic_max ~__context ~self:vm in - let restore_old_values_on_error = ref false in - Helpers.with_global_lock - (fun () -> - let host_mem_available = - Memory_check.host_compute_free_memory_with_maximum_compression - ~__context ~host None in - let dynamic_min_change = Int64.sub old_dynamic_min - new_dynamic_min in - let new_host_mem_available = Int64.add host_mem_available - dynamic_min_change in - if new_host_mem_available < 0L - then raise (Api_errors.Server_error ( - Api_errors.host_not_enough_free_memory, [ - Int64.to_string (Int64.div (Int64.sub 0L dynamic_min_change) 1024L); - Int64.to_string (Int64.div host_mem_available 1024L); - ])); - if dynamic_min_change < 0L then begin - restore_old_values_on_error := true; - Db.VM.set_memory_dynamic_min ~__context ~self:vm - ~value:new_dynamic_min; - Db.VM.set_memory_dynamic_max ~__context ~self:vm - ~value:new_dynamic_max; - end - ); - try - f () - with exn -> - if !restore_old_values_on_error then begin - Db.VM.set_memory_dynamic_min ~__context ~self:vm - ~value:old_dynamic_min; - Db.VM.set_memory_dynamic_max ~__context ~self:vm - ~value:old_dynamic_max; - end; - raise exn - - let forward_to_access_srs ~local_fn ~__context ~vm op = - let suitable_host = - Xapi_vm_helpers.choose_host ~__context ~vm - ~choose_fn:(Xapi_vm_helpers.assert_can_see_SRs ~__context ~self:vm) () in - do_op_on ~local_fn ~__context ~host:suitable_host op - - (* Used for the VM.copy when an SR is specified *) - let forward_to_access_srs_and ~local_fn ~__context ?vm ?extra_sr op = - let choose_fn ~host = - begin match vm with - | Some vm -> - Xapi_vm_helpers.assert_can_see_SRs ~__context ~self:vm ~host - | _ -> () end; - begin match extra_sr with - | Some extra_sr -> - Xapi_vm_helpers.assert_can_see_specified_SRs ~__context - ~reqd_srs:[extra_sr] ~host - | _ -> () end in - let suitable_host = Xapi_vm_helpers.choose_host ~__context ?vm ~choose_fn () in - do_op_on ~local_fn ~__context ~host:suitable_host op - - (* -------------------------------------------------------------------------- *) - - (* don't forward create. this just makes a db record *) - let create ~__context ~name_label ~name_description = - info "VM.create: name_label = '%s' name_description = '%s'" name_label name_description; - (* Partial application: return a function which will take the dozens of remaining params *) - Local.VM.create ~__context ~name_label ~name_description - - (* don't forward destroy. this just deletes db record *) - let destroy ~__context ~self = - info "VM.destroy: VM = '%s'" (vm_uuid ~__context self); - with_vm_operation ~__context ~self ~doc:"VM.destroy" ~op:`destroy - (fun () -> - Local.VM.destroy ~__context ~self) - - let set_actions_after_shutdown ~__context ~self ~value = - info "VM.set_actions_after_shutdown: VM = '%s'" (vm_uuid ~__context self); - Local.VM.set_actions_after_shutdown ~__context ~self ~value - - let set_actions_after_reboot ~__context ~self ~value = - info "VM.set_actions_after_reboot: VM = '%s'" (vm_uuid ~__context self); - Local.VM.set_actions_after_reboot ~__context ~self ~value - - let set_actions_after_crash ~__context ~self ~value = - info "VM.set_actions_after_crash: VM = '%s'" (vm_uuid ~__context self); - Local.VM.set_actions_after_crash ~__context ~self ~value - - let set_ha_always_run ~__context ~self ~value = - info "VM.set_ha_always_run: VM = '%s'; value = '%b'" (vm_uuid ~__context self) value; - Local.VM.set_ha_always_run ~__context ~self ~value; - Xapi_vm_lifecycle.update_allowed_operations ~__context ~self - - let set_ha_restart_priority ~__context ~self ~value = - info "VM.set_ha_restart_priority: VM = '%s'; value = '%s'" (vm_uuid ~__context self) value; - Local.VM.set_ha_restart_priority ~__context ~self ~value; - Xapi_vm_lifecycle.update_allowed_operations ~__context ~self - - let set_is_a_template ~__context ~self ~value = - if value - then with_vm_operation ~__context ~self ~doc:"VM.set_is_a_template" ~op:`make_into_template - (fun () -> - Local.VM.set_is_a_template ~__context ~self ~value:true) - else Local.VM.set_is_a_template ~__context ~self ~value - (* + (* During certain operations that are executed on a pool slave, the slave management can reconfigure + * its management interface, we can lose connection with the slave. + * This function catches any "host cannot be contacted" exceptions during such calls and polls + * periodically to see whether the operation has completed on the slave. *) + let tolerate_connection_loss fn success timeout = + try + fn () + with + | Api_errors.Server_error (ercode, params) when ercode=Api_errors.cannot_contact_host -> + debug "Lost connection with slave during call (expected). Waiting for slave to come up again."; + let time_between_retries = 1. (* seconds *) in + let num_retries = int_of_float (timeout /. time_between_retries) in + let rec poll i = + match i with + | 0 -> raise (Api_errors.Server_error (ercode, params)) (* give up and re-raise exn *) + | i -> + begin + match success () with + | Some result -> debug "Slave is back and has completed the operation!"; result (* success *) + | None -> Thread.delay time_between_retries; poll (i-1) + end + in + poll num_retries + + let add_brackets s = + if s = "" then + "" + else + Printf.sprintf " (%s)" s + + let pool_uuid ~__context pool = + try if Pool_role.is_master () then + let name = Db.Pool.get_name_label __context pool in + Printf.sprintf "%s%s" (Db.Pool.get_uuid __context pool) (add_brackets name) + else + Ref.string_of pool + with _ -> "invalid" + + let current_pool_uuid ~__context = + if Pool_role.is_master () then + let _, pool = List.hd (Db.Pool.get_all_records ~__context) in + Printf.sprintf "%s%s" pool.API.pool_uuid (add_brackets pool.API.pool_name_label) + else + "invalid" + + let host_uuid ~__context host = + try if Pool_role.is_master () then + let name = Db.Host.get_name_label __context host in + Printf.sprintf "%s%s" (Db.Host.get_uuid __context host) (add_brackets name) + else + Ref.string_of host + with _ -> "invalid" + + let vm_uuid ~__context vm = + try if Pool_role.is_master () then + let name = Db.VM.get_name_label __context vm in + Printf.sprintf "%s%s" (Db.VM.get_uuid __context vm) (add_brackets name) + else + Ref.string_of vm + with _ -> "invalid" + + let vm_appliance_uuid ~__context vm_appliance = + try if Pool_role.is_master () then + let name = Db.VM_appliance.get_name_label __context vm_appliance in + Printf.sprintf "%s%s" (Db.VM_appliance.get_uuid __context vm_appliance) (add_brackets name) + else + Ref.string_of vm_appliance + with _ -> "invalid" + + let sr_uuid ~__context sr = + try if Pool_role.is_master () then + let name = Db.SR.get_name_label __context sr in + Printf.sprintf "%s%s" (Db.SR.get_uuid __context sr) (add_brackets name) + else + Ref.string_of sr + with _ -> "invalid" + + let vdi_uuid ~__context vdi = + try if Pool_role.is_master () then + Db.VDI.get_uuid __context vdi + else + Ref.string_of vdi + with _ -> "invalid" + + let vif_uuid ~__context vif = + try if Pool_role.is_master () then + Db.VIF.get_uuid __context vif + else + Ref.string_of vif + with _ -> "invalid" + + let vlan_uuid ~__context vlan = + try if Pool_role.is_master () then + Db.VLAN.get_uuid __context vlan + else + Ref.string_of vlan + with _ -> "invalid" + + let tunnel_uuid ~__context tunnel = + try if Pool_role.is_master () then + Db.Tunnel.get_uuid __context tunnel + else + Ref.string_of tunnel + with _ -> "invalid" + + let bond_uuid ~__context bond = + try if Pool_role.is_master () then + Db.Bond.get_uuid __context bond + else + Ref.string_of bond + with _ -> "invalid" + + + let pif_uuid ~__context pif = + try if Pool_role.is_master () then + Db.PIF.get_uuid __context pif + else + Ref.string_of pif + with _ -> "invalid" + + let vbd_uuid ~__context vbd = + try if Pool_role.is_master () then + Db.VBD.get_uuid __context vbd + else + Ref.string_of vbd + with _ -> "invalid" + + let pbd_uuid ~__context pbd = + try if Pool_role.is_master () then + Db.PBD.get_uuid __context pbd + else + Ref.string_of pbd + with _ -> "invalid" + + let task_uuid ~__context task = + try if Pool_role.is_master () then + Db.Task.get_uuid __context task + else + Ref.string_of task + with _ -> "invalid" + + let crashdump_uuid ~__context cd = + try if Pool_role.is_master () then + Db.Crashdump.get_uuid __context cd + else + Ref.string_of cd + with _ -> "invalid" + + let host_crashdump_uuid ~__context hcd = + try if Pool_role.is_master () then + Db.Host_crashdump.get_uuid __context hcd + else + Ref.string_of hcd + with _ -> "invalid" + + let network_uuid ~__context network = + try if Pool_role.is_master () then + Db.Network.get_uuid __context network + else + Ref.string_of network + with _ -> "invalid" + + let host_patch_uuid ~__context patch = + try if Pool_role.is_master () then + Db.Host_patch.get_uuid __context patch + else + Ref.string_of patch + with _ -> "invalid" + + let pool_patch_uuid ~__context patch = + try if Pool_role.is_master () then + Db.Pool_patch.get_uuid __context patch + else + Ref.string_of patch + with _ -> "invalid" + + let pci_uuid ~__context pci = + try if Pool_role.is_master () then + Db.PCI.get_uuid __context pci + else + Ref.string_of pci + with _ -> "invalid" + + let pgpu_uuid ~__context pgpu = + try if Pool_role.is_master () then + Db.PGPU.get_uuid __context pgpu + else + Ref.string_of pgpu + with _ -> "invalid" + + let gpu_group_uuid ~__context gpu_group = + try if Pool_role.is_master () then + Db.GPU_group.get_uuid __context gpu_group + else + Ref.string_of gpu_group + with _ -> "invalid" + + let vgpu_uuid ~__context vgpu = + try if Pool_role.is_master () then + Db.VGPU.get_uuid __context vgpu + else + Ref.string_of vgpu + with _ -> "invalid" + + let vgpu_type_uuid ~__context vgpu_type = + try if Pool_role.is_master () then + Db.VGPU_type.get_uuid __context vgpu_type + else + Ref.string_of vgpu_type + with _ -> "invalid" + + module Session = Local.Session + module Auth = Local.Auth + module Subject = Local.Subject + module Role = Local.Role + module Task = struct + include Local.Task + + let cancel ~__context ~task = + let local_fn = cancel ~task in + let forwarded_to = Db.Task.get_forwarded_to ~__context ~self:task in + if Db.is_valid_ref __context forwarded_to + then do_op_on ~local_fn ~__context ~host:(Db.Task.get_forwarded_to ~__context ~self:task) + (fun session_id rpc -> + Client.Task.cancel rpc session_id task + ) + else local_fn ~__context + end + module Event = Local.Event + module VMPP = Local.VMPP + module VM_appliance = struct + include Local.VM_appliance + (* Add to the VM_appliance's current operations, call a function and then remove from the *) + (* current operations. Ensure the allowed_operations are kept up to date. *) + let with_vm_appliance_operation ~__context ~self ~doc ~op f = + let task_id = Ref.string_of (Context.get_task_id __context) in + Helpers.retry_with_global_lock ~__context ~doc + (fun () -> + Xapi_vm_appliance.assert_operation_valid ~__context ~self ~op; + Db.VM_appliance.add_to_current_operations ~__context ~self ~key:task_id ~value:op; + Xapi_vm_appliance.update_allowed_operations ~__context ~self); + (* Then do the action with the lock released *) + finally f + (* Make sure to clean up at the end *) + (fun () -> + try + Db.VM_appliance.remove_from_current_operations ~__context ~self ~key:task_id; + Xapi_vm_appliance.update_allowed_operations ~__context ~self; + Helpers.Early_wakeup.broadcast (Datamodel._vm_appliance, Ref.string_of self); + with + _ -> ()) + + let start ~__context ~self ~paused = + info "VM_appliance.start: VM_appliance = '%s'" (vm_appliance_uuid ~__context self); + with_vm_appliance_operation ~__context ~self ~doc:"VM_appliance.start" ~op:`start + (fun () -> + Local.VM_appliance.start ~__context ~self ~paused) + + let clean_shutdown ~__context ~self = + info "VM_appliance.clean_shutdown: VM_appliance = '%s'" (vm_appliance_uuid ~__context self); + with_vm_appliance_operation ~__context ~self ~doc:"VM_appliance.clean_shutdown" ~op:`clean_shutdown + (fun () -> + Local.VM_appliance.clean_shutdown ~__context ~self) + + let hard_shutdown ~__context ~self = + info "VM_appliance.hard_shutdown: VM_appliance = '%s'" (vm_appliance_uuid ~__context self); + with_vm_appliance_operation ~__context ~self ~doc:"VM_appliance.hard_shutdown" ~op:`hard_shutdown + (fun () -> + Local.VM_appliance.hard_shutdown ~__context ~self) + + let shutdown ~__context ~self = + info "VM_appliance.shutdown: VM_appliance = '%s'" (vm_appliance_uuid ~__context self); + with_vm_appliance_operation ~__context ~self ~doc:"VM_appliance.shutdown" ~op:`shutdown + (fun () -> + Local.VM_appliance.shutdown ~__context ~self) + + let assert_can_be_recovered ~__context ~self ~session_to = + info "VM_appliance.assert_can_be_recovered: VM_appliance = '%s'" (vm_appliance_uuid ~__context self); + Local.VM_appliance.assert_can_be_recovered ~__context ~self ~session_to + + let get_SRs_required_for_recovery ~__context ~self ~session_to = + info "VM_appliance.get_SRs_required_for_recovery: VM_appliance = '%s'" (vm_appliance_uuid ~__context self); + Local.VM_appliance.get_SRs_required_for_recovery ~__context ~self ~session_to + + let recover ~__context ~self ~session_to ~force = + info "VM_appliance.recover: VM_appliance = '%s'" (vm_appliance_uuid ~__context self); + Local.VM_appliance.recover ~__context ~self ~session_to ~force + end + module DR_task = Local.DR_task + (* module Alert = Local.Alert *) + + module Pool = struct + include Local.Pool + + (** Add to the Pool's current operations, call a function and then remove from the + current operations. Ensure the allowed_operations are kept up to date. *) + let with_pool_operation ~__context ~self ~doc ~op f = + let task_id = Ref.string_of (Context.get_task_id __context) in + Helpers.retry_with_global_lock ~__context ~doc + (fun () -> + Xapi_pool_helpers.assert_operation_valid ~__context ~self ~op; + Db.Pool.add_to_current_operations ~__context ~self ~key:task_id ~value:op); + Xapi_pool_helpers.update_allowed_operations ~__context ~self; + (* Then do the action with the lock released *) + finally f + (* Make sure to clean up at the end *) + (fun () -> + try + Db.Pool.remove_from_current_operations ~__context ~self ~key:task_id; + Xapi_pool_helpers.update_allowed_operations ~__context ~self; + Helpers.Early_wakeup.broadcast (Datamodel._pool, Ref.string_of self); + with + _ -> ()) + + let eject ~__context ~host = + info "Pool.eject: pool = '%s'; host = '%s'" (current_pool_uuid ~__context) (host_uuid ~__context host); + let local_fn = Local.Pool.eject ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Pool.eject rpc session_id host) + + let designate_new_master ~__context ~host = + info "Pool.designate_new_master: pool = '%s'; host = '%s'" (current_pool_uuid ~__context) (host_uuid ~__context host); + (* Sync the RRDs from localhost to new master *) + Xapi_sync.sync_host __context host; + let local_fn = Local.Pool.designate_new_master ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Pool.designate_new_master rpc session_id host) + + let enable_ha ~__context ~heartbeat_srs ~configuration = + info "Pool.enable_ha: pool = '%s'; heartbeat_srs = [ %s ]; configuration = [ %s ]" + (current_pool_uuid ~__context) + (String.concat ", " (List.map Ref.string_of heartbeat_srs)) + (String.concat "; " (List.map (fun (k, v) -> k ^ "=" ^ v) configuration)); + let pool = Helpers.get_pool ~__context in + with_pool_operation ~__context ~doc:"Pool.ha_enable" ~self:pool ~op:`ha_enable + (fun () -> + Local.Pool.enable_ha __context heartbeat_srs configuration + ) + + let disable_ha ~__context = + info "Pool.disable_ha: pool = '%s'" (current_pool_uuid ~__context); + let pool = Helpers.get_pool ~__context in + with_pool_operation ~__context ~doc:"Pool.ha_disable" ~self:pool ~op:`ha_disable + (fun () -> + Local.Pool.disable_ha __context + ) + + let ha_prevent_restarts_for ~__context ~seconds = + info "Pool.ha_prevent_restarts_for: pool = '%s'; seconds = %Ld" (current_pool_uuid ~__context) seconds; + Local.Pool.ha_prevent_restarts_for ~__context ~seconds + + let ha_failover_plan_exists ~__context ~n = + info "Pool.ha_failover_plan_exists: pool = '%s'; n = %Ld" (current_pool_uuid ~__context) n; + Local.Pool.ha_failover_plan_exists ~__context ~n + + let ha_compute_max_host_failures_to_tolerate ~__context = + info "Pool.ha_compute_max_host_failures_to_tolerate: pool = '%s'" (current_pool_uuid ~__context); + Local.Pool.ha_compute_max_host_failures_to_tolerate ~__context + + let ha_compute_hypothetical_max_host_failures_to_tolerate ~__context ~configuration = + info "Pool.ha_compute_hypothetical_max_host_failures_to_tolerate: pool = '%s'; configuration = [ %s ]" + (current_pool_uuid ~__context) + (String.concat "; " (List.map (fun (vm, p) -> Ref.string_of vm ^ " " ^ p) configuration)); + Local.Pool.ha_compute_hypothetical_max_host_failures_to_tolerate ~__context ~configuration + + let ha_compute_vm_failover_plan ~__context ~failed_hosts ~failed_vms = + info "Pool.ha_compute_vm_failover_plan: pool = '%s'; failed_hosts = [ %s ]; failed_vms = [ %s ]" + (current_pool_uuid ~__context) + (String.concat "; " (List.map Ref.string_of failed_hosts)) + (String.concat "; " (List.map Ref.string_of failed_vms)); + Local.Pool.ha_compute_vm_failover_plan ~__context ~failed_hosts ~failed_vms + + let set_ha_host_failures_to_tolerate ~__context ~self ~value = + info "Pool.set_ha_host_failures_to_tolerate: pool = '%s'; value = %Ld" (pool_uuid ~__context self) value; + Local.Pool.set_ha_host_failures_to_tolerate ~__context ~self ~value + + let ha_schedule_plan_recomputation ~__context = + info "Pool.ha_schedule_plan_recomputation: pool = '%s'" (current_pool_uuid ~__context); + Local.Pool.ha_schedule_plan_recomputation ~__context + + let enable_external_auth ~__context ~pool ~config ~service_name ~auth_type = + info "Pool.enable_external_auth: pool = '%s'; service name = '%s'; auth_type = '%s'" (pool_uuid ~__context pool) service_name auth_type; + Local.Pool.enable_external_auth ~__context ~pool ~config ~service_name ~auth_type + + let disable_external_auth ~__context ~pool = + info "Pool.disable_external_auth: pool = '%s'" (pool_uuid ~__context pool); + Local.Pool.disable_external_auth ~__context ~pool + + let enable_redo_log ~__context ~sr = + info "Pool.enable_redo_log: pool = '%s'; sr_uuid = '%s'" + (current_pool_uuid ~__context) (sr_uuid __context sr); + Local.Pool.enable_redo_log ~__context ~sr + + let disable_redo_log ~__context = + info "Pool.disable_redo_log: pool = '%s'" (current_pool_uuid ~__context); + Local.Pool.disable_redo_log ~__context + + let set_vswitch_controller ~__context ~address = + info "Pool.set_vswitch_controller: pool = '%s'; address = '%s'" (current_pool_uuid ~__context) address; + Local.Pool.set_vswitch_controller ~__context ~address + + let get_license_state ~__context ~self = + info "Pool.get_license_state: pool = '%s'" (pool_uuid ~__context self); + Local.Pool.get_license_state ~__context ~self + + let apply_edition ~__context ~self ~edition = + info "Pool.apply_edition: pool = '%s'; edition = '%s'" (pool_uuid ~__context self) edition; + Local.Pool.apply_edition ~__context ~self ~edition + + let enable_ssl_legacy ~__context ~self = + info "Pool.enable_ssl_legacy: pool = '%s'" (pool_uuid ~__context self); + Local.Pool.enable_ssl_legacy ~__context ~self + + let disable_ssl_legacy ~__context ~self = + info "Pool.disable_ssl_legacy: pool = '%s'" (pool_uuid ~__context self); + Local.Pool.disable_ssl_legacy ~__context ~self + + let has_extension ~__context ~self ~name = + info "Pool.has_extension: pool = '%s'; name = '%s'" (pool_uuid ~__context self) name; + Local.Pool.has_extension ~__context ~self ~name + + let add_to_guest_agent_config ~__context ~self ~key ~value = + info "Pool.add_to_guest_agent_config: pool = '%s'; key = '%s'; value = '%s'" + (pool_uuid ~__context self) key value; + Local.Pool.add_to_guest_agent_config ~__context ~self ~key ~value + + let remove_from_guest_agent_config ~__context ~self ~key = + info "Pool.remove_from_guest_agent_config: pool = '%s'; key = '%s'" + (pool_uuid ~__context self) key; + Local.Pool.remove_from_guest_agent_config ~__context ~self ~key + end + + module VM = struct + (* Defined in Xapi_vm_helpers so it can be used from elsewhere without circular dependency. *) + let with_vm_operation = Xapi_vm_helpers.with_vm_operation + + (* Nb, we're not using the snapshots returned in 'Event.from' here because + * the tasks might get deleted. The standard mechanism for dealing with + * deleted events assumes you have a full database replica locally, and + * deletions are handled by checking your valid_ref_counts table against + * your local database. In this case, we're only interested in a subset of + * events, so this mechanism doesn't work. There will only be a few outstanding + * tasks anyway, so we're safe to just iterate through the references when an + * event happens - ie, we use the event API simply to wake us up when something + * interesting has happened. *) + + let wait_for_tasks ~__context ~tasks = + let our_task = Context.get_task_id __context in + let classes = List.map (fun x -> Printf.sprintf "task/%s" (Ref.string_of x)) (our_task::tasks) in + + let rec process token = + TaskHelper.exn_if_cancelling ~__context; (* First check if _we_ have been cancelled *) + let statuses = List.filter_map (fun task -> try Some (Db.Task.get_status ~__context ~self:task) with _ -> None) tasks in + let unfinished = List.exists (fun state -> state = `pending) statuses in + if unfinished + then begin + let from = Helpers.call_api_functions ~__context + (fun rpc session_id -> Client.Event.from ~rpc ~session_id ~classes ~token ~timeout:30.0) in + debug "Using events to wait for tasks: %s" (String.concat "," classes); + let from = Event_types.event_from_of_rpc from in + process from.Event_types.token + end else + () + in + process "" + + let cancel ~__context ~vm ~ops = + let cancelled = List.filter_map (fun (task,op) -> + if List.mem op ops then begin + info "Cancelling VM.%s for VM.hard_shutdown/reboot" (Record_util.vm_operation_to_string op); + Helpers.call_api_functions ~__context + (fun rpc session_id -> try Client.Task.cancel ~rpc ~session_id ~task:(Ref.of_string task) with _ -> ()); + Some (Ref.of_string task) + end else None + ) (Db.VM.get_current_operations ~__context ~self:vm) in + wait_for_tasks ~__context ~tasks:cancelled + + let unmark_vbds ~__context ~vbds ~doc ~op = + let task_id = Ref.string_of (Context.get_task_id __context) in + iter_with_drop ~doc:("unmarking VBDs after " ^ doc) + (fun self -> + if Db.is_valid_ref __context self then begin + Db.VBD.remove_from_current_operations ~__context ~self ~key:task_id; + Xapi_vbd_helpers.update_allowed_operations ~__context ~self; + Helpers.Early_wakeup.broadcast (Datamodel._vbd, Ref.string_of self); + end) + vbds + + let mark_vbds ~__context ~vm ~doc ~op : API.ref_VBD list = + let task_id = Ref.string_of (Context.get_task_id __context) in + let vbds = Db.VM.get_VBDs ~__context ~self:vm in + let marked = ref [] in + (* CA-26575: paper over transient VBD glitches caused by SR.lvhd_stop_the_world by throwing the + first OTHER_OPERATION_IN_PROGRESS (or whatever) we encounter and let the caller deal with it *) + try + List.iter + (fun vbd -> + Xapi_vbd_helpers.assert_operation_valid ~__context ~self:vbd ~op; + Db.VBD.add_to_current_operations ~__context ~self:vbd ~key:task_id ~value:op; + Xapi_vbd_helpers.update_allowed_operations ~__context ~self:vbd; + marked := vbd :: !marked; + ) vbds; + vbds + with e -> + debug "Caught exception marking VBD for %s on VM %s: %s" doc (Ref.string_of vm) (ExnHelper.string_of_exn e); + unmark_vbds ~__context ~vbds:!marked ~doc ~op; + raise e + + let with_vbds_marked ~__context ~vm ~doc ~op f = + (* CA-26575: paper over transient VBD glitches caused by SR.lvhd_stop_the_world *) + let vbds = Helpers.retry_with_global_lock ~__context ~doc ~policy:Helpers.Policy.fail_quickly (fun () -> + mark_vbds ~__context ~vm ~doc ~op) in + finally + (fun () -> f vbds) + (fun () -> Helpers.with_global_lock (fun () -> unmark_vbds ~__context ~vbds ~doc ~op)) + + let unmark_vifs ~__context ~vifs ~doc ~op = + let task_id = Ref.string_of (Context.get_task_id __context) in + iter_with_drop ~doc:("unmarking VIFs after " ^ doc) + (fun self -> + if Db.is_valid_ref __context self then begin + Db.VIF.remove_from_current_operations ~__context ~self ~key:task_id; + Xapi_vif_helpers.update_allowed_operations ~__context ~self; + Helpers.Early_wakeup.broadcast (Datamodel._vif, Ref.string_of self); + end) + vifs + + let mark_vifs ~__context ~vm ~doc ~op : API.ref_VIF list = + let task_id = Ref.string_of (Context.get_task_id __context) in + let vifs = Db.VM.get_VIFs ~__context ~self:vm in + let marked = map_with_drop ~doc:("marking VIFs for " ^ doc) + (fun vif -> + Xapi_vif_helpers.assert_operation_valid ~__context ~self:vif ~op; + Db.VIF.add_to_current_operations ~__context ~self:vif ~key:task_id ~value:op; + Xapi_vif_helpers.update_allowed_operations ~__context ~self:vif; + vif) vifs in + (* Did we mark them all? *) + if List.length marked <> List.length vifs then begin + unmark_vifs ~__context ~vifs:marked ~doc ~op; + raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["Failed to lock all VIFs"])) + end else marked + + let with_vifs_marked ~__context ~vm ~doc ~op f = + let vifs = Helpers.retry_with_global_lock ~__context ~doc (fun () -> mark_vifs ~__context ~vm ~doc ~op) in + finally + (fun () -> f vifs) + (fun () -> Helpers.with_global_lock (fun () -> unmark_vifs ~__context ~vifs ~doc ~op)) + + (* Some VM operations have side-effects on VBD allowed_operations but don't actually + lock the VBDs themselves (eg suspend) *) + let update_vbd_operations ~__context ~vm = + Helpers.with_global_lock + (fun () -> + List.iter (fun self -> + Xapi_vbd_helpers.update_allowed_operations ~__context ~self; + try + let vdi = Db.VBD.get_VDI ~__context ~self in + Xapi_vdi.update_allowed_operations ~__context ~self:vdi + with _ -> ()) + (Db.VM.get_VBDs ~__context ~self:vm)) + + let update_vif_operations ~__context ~vm = + Helpers.with_global_lock + (fun () -> + List.iter (fun self -> Xapi_vif_helpers.update_allowed_operations ~__context ~self) + (Db.VM.get_VIFs ~__context ~self:vm)) + + (* -------- Forwarding helper functions: ------------------------------------ *) + + (* Read resisdent-on field from vm to determine who to forward to *) + let forward_vm_op ~local_fn ~__context ~vm op = + let power_state = Db.VM.get_power_state ~__context ~self:vm in + if List.mem power_state [`Running; `Paused] then + do_op_on ~local_fn ~__context ~host:(Db.VM.get_resident_on ~__context ~self:vm) op + else + local_fn ~__context + + (* Clear scheduled_to_be_resident_on for a VM and all its vGPUs. *) + let clear_scheduled_to_be_resident_on ~__context ~vm = + Db.VM.set_scheduled_to_be_resident_on ~__context ~self:vm ~value:Ref.null; + List.iter + (fun vgpu -> + Db.VGPU.set_scheduled_to_be_resident_on ~__context + ~self:vgpu + ~value:Ref.null) + (Db.VM.get_VGPUs ~__context ~self:vm) + + (* Notes on memory checking/reservation logic: + When computing the hosts free memory we consider all VMs resident_on (ie running + and consuming resources NOW) and scheduled_to_be_resident_on (ie those which are + starting/resuming/migrating, whose memory has been reserved but may not all be being + used atm). + We generally call 'assert_can_boot_here' with the master forwarding lock held, + which verifies that a host has enough free memory to support the VM and then we + set 'scheduled_to_be_resident_on' which prevents concurrent competing attempts to + use the same resources from succeeding. *) + + (* Reserves the resources for a VM by setting it as 'scheduled_to_be_resident_on' a host *) + let allocate_vm_to_host ~__context ~vm ~host ~snapshot ?host_op () = + begin match host_op with + | Some x -> + let task_id = Ref.string_of (Context.get_task_id __context) in + Xapi_host_helpers.assert_operation_valid ~__context ~self:host ~op:x; + Db.Host.add_to_current_operations ~__context ~self:host ~key:task_id ~value:x; + Xapi_host_helpers.update_allowed_operations ~__context ~self:host + | None -> () + end; + (* Make sure the last_booted record has useful values for later use in memory checking + code. *) + if snapshot.API.vM_power_state = `Halted then begin + Helpers.set_boot_record ~__context ~self:vm snapshot + end; + (* Once this is set concurrent VM.start calls will start checking the memory used by this VM *) + Db.VM.set_scheduled_to_be_resident_on ~__context ~self:vm ~value:host; + try + Vgpuops.create_vgpus ~__context host (vm, snapshot) + (Helpers.will_boot_hvm ~__context ~self:vm) + with e -> + clear_scheduled_to_be_resident_on ~__context ~vm; + raise e + + (* For start/start_on/resume/resume_on/migrate *) + let finally_clear_host_operation ~__context ~host ?host_op () = match host_op with + | Some x -> + let task_id = Ref.string_of (Context.get_task_id __context) in + Db.Host.remove_from_current_operations ~__context ~self:host ~key:task_id; + Xapi_host_helpers.update_allowed_operations ~__context ~self:host; + Helpers.Early_wakeup.broadcast (Datamodel._host, Ref.string_of host); + | None -> () + + let check_vm_preserves_ha_plan ~__context ~vm ~snapshot ~host = + if true + && (snapshot.API.vM_ha_restart_priority = Constants.ha_restart) + && (not snapshot.API.vM_ha_always_run) + then + Xapi_ha_vm_failover.assert_new_vm_preserves_ha_plan ~__context vm + else + Xapi_ha_vm_failover.assert_vm_placement_preserves_ha_plan ~__context ~arriving:[host, (vm, snapshot)] () + + (* README: Note on locking -- forward_to_suitable_host and reserve_memory_for_vm are only + called in a context where the current_operations field for the VM object contains the + operation we're considering. Thus the global_lock in this context is _not_ used to cover + the period where current_operations are set, but is used to ensure that (i) choose_host_for_vm + is executed under mutual exclusion with other incoming operations; and (ii) that scheduled_to_be_resident_on + (which must not change whilst someone is calling choose_host_for_vm) only executes in exclusion with + choose_host_for_vm. + *) + + (* Used by VM.start and VM.resume to choose a host with enough resource and to + 'allocate_vm_to_host' (ie set the 'scheduled_to_be_resident_on' field) *) + let forward_to_suitable_host ~local_fn ~__context ~vm ~snapshot ?host_op op = + let suitable_host = Helpers.with_global_lock + (fun () -> + let host = Db.VM.get_scheduled_to_be_resident_on ~__context ~self:vm in + if host <> Ref.null then host else + let host = Xapi_vm_helpers.choose_host_for_vm ~__context ~vm ~snapshot in + (* HA overcommit protection: we can either perform 'n' HA plans by including this in + the 'choose_host_for_vm' function or we can be cheapskates by doing it here: *) + check_vm_preserves_ha_plan ~__context ~vm ~snapshot ~host; + allocate_vm_to_host ~__context ~vm ~host ~snapshot ?host_op (); + host) in + finally + (fun () -> do_op_on ~local_fn ~__context ~host:suitable_host op, suitable_host) + (fun () -> + Helpers.with_global_lock + (fun () -> + finally_clear_host_operation ~__context ~host:suitable_host ?host_op (); + (* In certain cases, VM might have been destroyed as a consequence of operation *) + if Db.is_valid_ref __context vm + then clear_scheduled_to_be_resident_on ~__context ~vm)) + + (* Used by VM.start_on, VM.resume_on, VM.migrate to verify a host has enough resource and to + 'allocate_vm_to_host' (ie set the 'scheduled_to_be_resident_on' field) *) + let reserve_memory_for_vm ~__context ~vm ~snapshot ~host ?host_op f = + Helpers.with_global_lock + (fun () -> + Xapi_vm_helpers.assert_can_boot_here ~__context ~self:vm ~host:host ~snapshot (); + (* NB in the case of migrate although we are about to increase free memory on the sending host + we ignore this because if a failure happens while a VM is in-flight it will still be considered + on both hosts, potentially breaking the failover plan. *) + check_vm_preserves_ha_plan ~__context ~vm ~snapshot ~host; + allocate_vm_to_host ~__context ~vm ~host ~snapshot ?host_op ()); + finally f + (fun () -> + Helpers.with_global_lock + (fun () -> + finally_clear_host_operation ~__context ~host ?host_op (); + clear_scheduled_to_be_resident_on ~__context ~vm)) + + (** + Used by VM.set_memory_dynamic_range to reserve enough memory for + increasing dynamic_min. Although a VM may actually be technically + outside the range [dynamic_min, dynamic_max] we still ensure that *if* + all VMs are obeying our commands and ballooning to dynamic_min if we ask + *then* the sum of the dynamic_mins will fit on the host. + *) + let reserve_memory_for_dynamic_change ~__context ~vm + new_dynamic_min new_dynamic_max f = + let host = Db.VM.get_resident_on ~__context ~self:vm in + let old_dynamic_min = Db.VM.get_memory_dynamic_min ~__context ~self:vm in + let old_dynamic_max = Db.VM.get_memory_dynamic_max ~__context ~self:vm in + let restore_old_values_on_error = ref false in + Helpers.with_global_lock + (fun () -> + let host_mem_available = + Memory_check.host_compute_free_memory_with_maximum_compression + ~__context ~host None in + let dynamic_min_change = Int64.sub old_dynamic_min + new_dynamic_min in + let new_host_mem_available = Int64.add host_mem_available + dynamic_min_change in + if new_host_mem_available < 0L + then raise (Api_errors.Server_error ( + Api_errors.host_not_enough_free_memory, [ + Int64.to_string (Int64.div (Int64.sub 0L dynamic_min_change) 1024L); + Int64.to_string (Int64.div host_mem_available 1024L); + ])); + if dynamic_min_change < 0L then begin + restore_old_values_on_error := true; + Db.VM.set_memory_dynamic_min ~__context ~self:vm + ~value:new_dynamic_min; + Db.VM.set_memory_dynamic_max ~__context ~self:vm + ~value:new_dynamic_max; + end + ); + try + f () + with exn -> + if !restore_old_values_on_error then begin + Db.VM.set_memory_dynamic_min ~__context ~self:vm + ~value:old_dynamic_min; + Db.VM.set_memory_dynamic_max ~__context ~self:vm + ~value:old_dynamic_max; + end; + raise exn + + let forward_to_access_srs ~local_fn ~__context ~vm op = + let suitable_host = + Xapi_vm_helpers.choose_host ~__context ~vm + ~choose_fn:(Xapi_vm_helpers.assert_can_see_SRs ~__context ~self:vm) () in + do_op_on ~local_fn ~__context ~host:suitable_host op + + (* Used for the VM.copy when an SR is specified *) + let forward_to_access_srs_and ~local_fn ~__context ?vm ?extra_sr op = + let choose_fn ~host = + begin match vm with + | Some vm -> + Xapi_vm_helpers.assert_can_see_SRs ~__context ~self:vm ~host + | _ -> () end; + begin match extra_sr with + | Some extra_sr -> + Xapi_vm_helpers.assert_can_see_specified_SRs ~__context + ~reqd_srs:[extra_sr] ~host + | _ -> () end in + let suitable_host = Xapi_vm_helpers.choose_host ~__context ?vm ~choose_fn () in + do_op_on ~local_fn ~__context ~host:suitable_host op + + (* -------------------------------------------------------------------------- *) + + (* don't forward create. this just makes a db record *) + let create ~__context ~name_label ~name_description = + info "VM.create: name_label = '%s' name_description = '%s'" name_label name_description; + (* Partial application: return a function which will take the dozens of remaining params *) + Local.VM.create ~__context ~name_label ~name_description + + (* don't forward destroy. this just deletes db record *) + let destroy ~__context ~self = + info "VM.destroy: VM = '%s'" (vm_uuid ~__context self); + with_vm_operation ~__context ~self ~doc:"VM.destroy" ~op:`destroy + (fun () -> + Local.VM.destroy ~__context ~self) + + let set_actions_after_shutdown ~__context ~self ~value = + info "VM.set_actions_after_shutdown: VM = '%s'" (vm_uuid ~__context self); + Local.VM.set_actions_after_shutdown ~__context ~self ~value + + let set_actions_after_reboot ~__context ~self ~value = + info "VM.set_actions_after_reboot: VM = '%s'" (vm_uuid ~__context self); + Local.VM.set_actions_after_reboot ~__context ~self ~value + + let set_actions_after_crash ~__context ~self ~value = + info "VM.set_actions_after_crash: VM = '%s'" (vm_uuid ~__context self); + Local.VM.set_actions_after_crash ~__context ~self ~value + + let set_ha_always_run ~__context ~self ~value = + info "VM.set_ha_always_run: VM = '%s'; value = '%b'" (vm_uuid ~__context self) value; + Local.VM.set_ha_always_run ~__context ~self ~value; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self + + let set_ha_restart_priority ~__context ~self ~value = + info "VM.set_ha_restart_priority: VM = '%s'; value = '%s'" (vm_uuid ~__context self) value; + Local.VM.set_ha_restart_priority ~__context ~self ~value; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self + + let set_is_a_template ~__context ~self ~value = + if value + then with_vm_operation ~__context ~self ~doc:"VM.set_is_a_template" ~op:`make_into_template + (fun () -> + Local.VM.set_is_a_template ~__context ~self ~value:true) + else Local.VM.set_is_a_template ~__context ~self ~value + (* else raise (Api_errors.Server_error(Api_errors.operation_not_allowed, [ "Must use VM.provision" ])) *) - let maximise_memory ~__context ~self ~total ~approximate = - info "VM.maximise_memory: VM = '%s'; total = '%Ld'; approximate = '%b'" (vm_uuid ~__context self) total approximate; - Local.VM.maximise_memory ~__context ~self ~total ~approximate - - let clone ~__context ~vm ~new_name = - info "VM.clone: VM = '%s'; new_name = '%s'" (vm_uuid ~__context vm) new_name; - let local_fn = Local.VM.clone ~vm ~new_name in - (* We mark the VM as cloning. We don't mark the disks; the implementation of the clone - uses the API to clone and lock the individual VDIs. We don't give any atomicity - guarantees here but we do prevent disk corruption. *) - with_vm_operation ~__context ~self:vm ~doc:"VM.clone" ~op:`clone - (fun () -> - forward_to_access_srs ~local_fn ~__context ~vm - (fun session_id rpc -> Client.VM.clone rpc session_id vm new_name)) - - let update_snapshot_metadata ~__context ~vm ~snapshot_of ~snapshot_time ~transportable_snapshot_id = - Db.VM.set_is_a_snapshot ~__context ~self:vm ~value:true; - Db.VM.set_snapshot_time ~__context ~self:vm ~value:snapshot_time; - Db.VM.set_snapshot_of ~__context ~self:vm ~value:snapshot_of; - Db.VM.set_transportable_snapshot_id ~__context ~self:vm ~value:transportable_snapshot_id - - (* almost a copy of the clone function *) - let snapshot ~__context ~vm ~new_name = - info "VM.snapshot: VM = '%s'; new_name = '%s'" (vm_uuid ~__context vm) new_name; - let local_fn = Local.VM.snapshot ~vm ~new_name in - (* We mark the VM as snapshoting. We don't mark the disks; the implementation of the snapshot uses the API *) - (* to snapshot and lock the individual VDIs. We don't give any atomicity guarantees here but we do prevent *) - (* disk corruption. *) - with_vm_operation ~__context ~self: vm ~doc:"VM.snapshot" ~op:`snapshot - (fun () -> - forward_to_access_srs ~local_fn ~__context ~vm - (fun session_id rpc -> Client.VM.snapshot rpc session_id vm new_name)) - - let snapshot_with_quiesce ~__context ~vm ~new_name = - info "VM.snapshot_with_quiesce: VM = '%s'; new_name = '%s'" (vm_uuid ~__context vm) new_name; - let local_fn = Local.VM.snapshot_with_quiesce ~vm ~new_name in - (* We mark the VM as snapshoting. We don't mark the disks; the implementation of the snapshot uses the API *) - (* to snapshot and lock the individual VDIs. We don't give any atomicity guarantees here but we do prevent *) - (* disk corruption. *) - with_vm_operation ~__context ~self: vm ~doc:"VM.snapshot_with_quiesce" ~op:`snapshot_with_quiesce - (fun () -> - let power_state = Db.VM.get_power_state ~__context ~self:vm in - let forward = - if power_state = `Running - then forward_vm_op - else forward_to_access_srs - in forward ~local_fn ~__context ~vm - (fun session_id rpc -> Client.VM.snapshot_with_quiesce rpc session_id vm new_name)) - - let checkpoint ~__context ~vm ~new_name = - info "VM.checkpoint: VM = '%s'; new_name=' %s'" (vm_uuid ~__context vm) new_name; - let local_fn = Local.VM.checkpoint ~vm ~new_name in - let forward_fn session_id rpc = Client.VM.checkpoint rpc session_id vm new_name in - - with_vm_operation ~__context ~self: vm ~doc:"VM.checkpoint" ~op:`checkpoint (fun () -> - if Db.VM.get_power_state __context vm = `Running then - forward_vm_op ~local_fn ~__context ~vm forward_fn - else - forward_to_access_srs ~local_fn ~__context ~vm forward_fn) - - let copy ~__context ~vm ~new_name ~sr = - info "VM.copy: VM = '%s'; new_name = '%s'; SR = '%s'" (vm_uuid ~__context vm) new_name (sr_uuid ~__context sr); - (* We mark the VM as cloning. We don't mark the disks; the implementation of the clone - uses the API to clone and lock the individual VDIs. We don't give any atomicity - guarantees here but we do prevent disk corruption. - VM.copy is always run on the master - the VDI.copy subtask(s) will be - forwarded to suitable hosts. *) - with_vm_operation ~__context ~self:vm ~doc:"VM.copy" ~op:`copy - (fun () -> Local.VM.copy ~__context ~vm ~new_name ~sr) - - exception Ambigious_provision_spec - exception Not_forwarding - - let provision ~__context ~vm = - info "VM.provision: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.provision ~vm in - let localhost = Helpers.get_localhost ~__context in - - with_vm_operation ~__context ~self:vm ~doc:"VM.provision" ~op:`provision - (fun () -> - let template = - Helpers.call_api_functions ~__context - (fun rpc session_id -> - Xapi_templates.get_template_record rpc session_id vm) in - (* Compute the set of hosts which can see the SRs mentioned in the provision spec *) - let possible_hosts = - try - let srs_in_provision_spec = - match template with - None -> [] - | Some template -> - let srs = List.map (fun d->d.Xapi_templates.sr) template.Xapi_templates.disks in - let srs = - List.map - (fun sr-> - try - Db.SR.get_by_uuid ~__context ~uuid:sr - with - Db_exn.Read_missing_uuid (_,_,_) - | Db_exn.Too_many_values (_,_,_) -> - begin - match (Db.SR.get_by_name_label ~__context ~label:sr) with - [] -> raise Not_forwarding (* couldn't find it. Do it locally and will report correct error *) - | [x] -> info "VM.provision: VM = '%s'; SR = '%s'" (vm_uuid ~__context vm) (sr_uuid ~__context x); x - | _ -> raise Ambigious_provision_spec - end) - srs in - srs in - Xapi_vm_helpers.possible_hosts ~__context ~vm - ~choose_fn:(Xapi_vm_helpers.assert_can_see_specified_SRs ~__context - ~reqd_srs:srs_in_provision_spec) () - with - | Not_forwarding -> [ ] - | Api_errors.Server_error (code, _) when code = Api_errors.no_hosts_available -> [] in - let hosts = if possible_hosts = [] then [ localhost ] else possible_hosts in - loadbalance_host_operation ~__context ~hosts ~doc:"VM.provision" ~op:`provision - (fun host -> - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.VM.provision rpc session_id vm) - ) - ) - - let query_services ~__context ~self = - info "VM.query_services: VM = '%s'" (vm_uuid ~__context self); - with_vm_operation ~__context ~self ~doc:"VM.query_services" ~op:`query_services - (fun () -> - Local.VM.query_services ~__context ~self - ) - - let start ~__context ~vm ~start_paused ~force = - info "VM.start: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.start ~vm ~start_paused ~force in - let host = - with_vm_operation ~__context ~self:vm ~doc:"VM.start" ~op:`start - (fun () -> - with_vbds_marked ~__context ~vm ~doc:"VM.start" ~op:`attach - (fun vbds -> - with_vifs_marked ~__context ~vm ~doc:"VM.start" ~op:`attach - (fun vifs -> - (* The start operation makes use of the cached memory overhead *) - (* value when reserving memory. It's important to recalculate *) - (* the cached value before performing the start since there's *) - (* no guarantee that the cached value is valid. In particular, *) - (* we must recalculate the value BEFORE creating the snapshot. *) - Xapi_vm_helpers.update_memory_overhead ~__context ~vm; - Xapi_vm_helpers.consider_generic_bios_strings ~__context ~vm; - let snapshot = Db.VM.get_record ~__context ~self:vm in - let (), host = forward_to_suitable_host ~local_fn ~__context ~vm ~snapshot ~host_op:`vm_start - (fun session_id rpc -> - Client.VM.start rpc session_id vm start_paused force) in - host - ))) in - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm; - let uuid = Db.VM.get_uuid ~__context ~self:vm in - let message_body = - Printf.sprintf "VM '%s' started on host: %s (uuid: %s)" - (Db.VM.get_name_label ~__context ~self:vm) - (Db.Host.get_name_label ~__context ~self:host) - (Db.Host.get_uuid ~__context ~self:host) - in - let (name, priority) = Api_messages.vm_started in - (try ignore - (Xapi_message.create - ~__context - ~name - ~priority - ~cls:`VM - ~obj_uuid:uuid - ~body:message_body) - with _ -> ()); - Rrdd_proxy.push_rrd ~__context ~vm_uuid:uuid - - let start_on ~__context ~vm ~host ~start_paused ~force = - if Helpers.rolling_upgrade_in_progress ~__context - then Helpers.assert_host_has_highest_version_in_pool - ~__context ~host ; - (* Prevent VM start on a host that is evacuating *) - List.iter (fun op -> - match op with - | ( _ , `evacuate ) -> raise (Api_errors.Server_error(Api_errors.host_evacuate_in_progress, [(Ref.string_of host)])); - | _ -> ()) - (Db.Host.get_current_operations ~__context ~self:host); - info "VM.start_on: VM = '%s'; host '%s'" - (vm_uuid ~__context vm) (host_uuid ~__context host); - let local_fn = Local.VM.start_on ~vm ~host ~start_paused ~force in - with_vm_operation ~__context ~self:vm ~doc:"VM.start_on" ~op:`start_on - (fun () -> - with_vbds_marked ~__context ~vm ~doc:"VM.start_on" ~op:`attach - (fun vbds -> - with_vifs_marked ~__context ~vm ~doc:"VM.start_on" ~op:`attach - (fun vifs -> - (* The start operation makes use of the cached memory overhead *) - (* value when reserving memory. It's important to recalculate *) - (* the cached value before performing the start since there's *) - (* no guarantee that the cached value is valid. In particular, *) - (* we must recalculate the value BEFORE creating the snapshot. *) - Xapi_vm_helpers.update_memory_overhead ~__context ~vm; - Xapi_vm_helpers.consider_generic_bios_strings ~__context ~vm; - let snapshot = Db.VM.get_record ~__context ~self:vm in - reserve_memory_for_vm ~__context ~vm ~host ~snapshot ~host_op:`vm_start - (fun () -> - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> - Client.VM.start - rpc session_id vm start_paused force) - ); - Xapi_vm_helpers.start_delay ~__context ~vm; - ))); - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm; - let _ (* uuid *) = Db.VM.get_uuid ~__context ~self:vm in - let message_body = - Printf.sprintf "VM '%s' started on host: %s (uuid: %s)" - (Db.VM.get_name_label ~__context ~self:vm) - (Db.Host.get_name_label ~__context ~self:host) - (Db.Host.get_uuid ~__context ~self:host) in - let (name, priority) = Api_messages.vm_started in - (try ignore - (Xapi_message.create - ~__context - ~name - ~priority - ~cls:`VM - ~obj_uuid:(Db.VM.get_uuid ~__context ~self:vm) - ~body:message_body) - with _ -> ()); - Rrdd_proxy.push_rrd ~__context ~vm_uuid:(Db.VM.get_uuid ~__context ~self:vm) - - let pause ~__context ~vm = - info "VM.pause: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.pause ~vm in - with_vm_operation ~__context ~self:vm ~doc:"VM.pause" ~op:`pause - (fun () -> - forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.pause rpc session_id vm)); - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm - - - let unpause ~__context ~vm = - info "VM.unpause: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.unpause ~vm in - with_vm_operation ~__context ~self:vm ~doc:"VM.unpause" ~op:`unpause - (fun () -> - forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.unpause rpc session_id vm)); - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm - - let call_plugin ~__context ~vm ~plugin ~fn ~args = - let censor_kws = ["password"] in (* We could censor "username" too, but the current decision was to leave it there. *) - let argstrs = List.map (fun (k, v) -> Printf.sprintf "args:%s = '%s'" k (if List.exists (String.has_substr k) censor_kws then "(omitted)" else v)) args in - info "VM.call_plugin: VM = '%s'; plugin = '%s'; fn = '%s'; %s" (vm_uuid ~__context vm) plugin fn (String.concat "; " argstrs); - let local_fn = Local.VM.call_plugin ~vm ~plugin ~fn ~args in - with_vm_operation ~__context ~self:vm ~doc:"VM.call_plugin" ~op:`call_plugin ~policy:Helpers.Policy.fail_immediately - (fun () -> - forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.call_plugin rpc session_id vm plugin fn args)) - - let set_has_vendor_device ~__context ~self ~value = - info "VM.set_has_vendor_device: VM = '%s' to %b" (vm_uuid ~__context self) value; - Local.VM.set_has_vendor_device ~__context ~self ~value - - let set_xenstore_data ~__context ~self ~value = - info "VM.set_xenstore_data: VM = '%s'" (vm_uuid ~__context self); - Db.VM.set_xenstore_data ~__context ~self ~value; - let power_state = Db.VM.get_power_state ~__context ~self in - if power_state = `Running then - let local_fn = Local.VM.set_xenstore_data ~self ~value in - forward_vm_op ~local_fn ~__context ~vm:self (fun session_id rpc -> Client.VM.set_xenstore_data rpc session_id self value) - - let clean_shutdown ~__context ~vm = - info "VM.clean_shutdown: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.clean_shutdown ~vm in - with_vm_operation ~__context ~self:vm ~doc:"VM.clean_shutdown" ~op:`clean_shutdown - (fun () -> - forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.clean_shutdown rpc session_id vm) - ); - let uuid = Db.VM.get_uuid ~__context ~self:vm in - let message_body = - Printf.sprintf "VM '%s' shutdown" - (Db.VM.get_name_label ~__context ~self:vm) - in - let (name, priority) = Api_messages.vm_shutdown in - (try ignore(Xapi_message.create ~__context ~name ~priority - ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm - - let shutdown ~__context ~vm = - info "VM.shutdown: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.shutdown ~vm in - with_vm_operation ~__context ~self:vm ~doc:"VM.shutdown" ~op:`shutdown - (fun () -> - if Db.VM.get_power_state ~__context ~self:vm = `Suspended - then - begin - debug "VM '%s' is suspended. Shutdown will just delete suspend VDI" (Ref.string_of vm); - let all_vm_srs = Xapi_vm_helpers.compute_required_SRs_for_shutting_down_suspended_domains ~__context ~vm in - let suitable_host = Xapi_vm_helpers.choose_host ~__context ~vm:vm - ~choose_fn:(Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs:all_vm_srs) () in - do_op_on ~__context ~local_fn:(Local.VM.hard_shutdown ~vm) ~host:suitable_host (fun session_id rpc -> Client.VM.hard_shutdown rpc session_id vm) - end - else - forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.shutdown rpc session_id vm) - ); - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm; - let uuid = Db.VM.get_uuid ~__context ~self:vm in - let message_body = - Printf.sprintf "VM '%s' shutdown" - (Db.VM.get_name_label ~__context ~self:vm) - in - let (name, priority) = Api_messages.vm_shutdown in - (try ignore(Xapi_message.create ~__context ~name - ~priority ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()) - - let clean_reboot ~__context ~vm = - info "VM.clean_reboot: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.clean_reboot ~vm in - (* Mark all the VBDs to prevent someone nicking one of the VDIs (or attaching - a conflicting VBD) while the devices are detached *) - with_vm_operation ~__context ~self:vm ~doc:"VM.clean_reboot" ~op:`clean_reboot - (fun () -> - with_vbds_marked ~__context ~vm ~doc:"VM.clean_reboot" ~op:`attach - (fun vbds -> - with_vifs_marked ~__context ~vm ~doc:"VM.clean_reboot" ~op:`attach - (fun vifs -> - (* CA-31903: we don't need to reserve memory for reboot because the memory settings can't - change across reboot. *) - forward_vm_op ~local_fn ~__context ~vm - (fun session_id rpc -> Client.VM.clean_reboot rpc session_id vm)))); - let uuid = Db.VM.get_uuid ~__context ~self:vm in - let message_body = - Printf.sprintf "VM '%s' rebooted cleanly" - (Db.VM.get_name_label ~__context ~self:vm) - in - let (name, priority) = Api_messages.vm_rebooted in - (try ignore(Xapi_message.create ~__context ~name ~priority - ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm - - (* don't forward power_state_reset; the whole point is that this can be performed when a host is down *) - let power_state_reset ~__context ~vm = - info "VM.power_state_reset: VM = '%s'" (vm_uuid ~__context vm); - Local.VM.power_state_reset ~__context ~vm - - let hard_shutdown ~__context ~vm = - info "VM.hard_shutdown: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.hard_shutdown ~vm in - let host = Db.VM.get_resident_on ~__context ~self:vm in - with_vm_operation ~__context ~self:vm ~doc:"VM.hard_shutdown" ~op:`hard_shutdown - (fun () -> - cancel ~__context ~vm ~ops:[ `clean_shutdown; `clean_reboot; `hard_reboot; `pool_migrate; `call_plugin; `suspend ]; - (* If VM is actually suspended and we ask to hard_shutdown, we need to - forward to any host that can see the VDIs *) - let policy = - if Db.VM.get_power_state ~__context ~self:vm = `Suspended - then - begin - debug "VM '%s' is suspended. Shutdown will just delete suspend VDI" (Ref.string_of vm); - (* this expression evaluates to a fn that forwards to a host that can see all vdis: *) - let all_vm_srs = Xapi_vm_helpers.compute_required_SRs_for_shutting_down_suspended_domains ~__context ~vm in - let suitable_host = Xapi_vm_helpers.choose_host ~__context ~vm:vm - ~choose_fn:(Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs:all_vm_srs) () in - do_op_on ~host:suitable_host - end - else - (* if we're nt suspended then just forward to host that has vm running on it: *) - do_op_on ~host:host - in - policy ~local_fn ~__context (fun session_id rpc -> Client.VM.hard_shutdown rpc session_id vm) - ); - let uuid = Db.VM.get_uuid ~__context ~self:vm in - let message_body = - Printf.sprintf "VM '%s' shutdown forcibly" - (Db.VM.get_name_label ~__context ~self:vm) - in - let (name, priority) = Api_messages.vm_shutdown in - (try ignore(Xapi_message.create ~__context ~name ~priority - ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm - - let hard_reboot ~__context ~vm = - info "VM.hard_reboot: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.hard_reboot ~vm in - let host = Db.VM.get_resident_on ~__context ~self:vm in - with_vm_operation ~__context ~self:vm ~doc:"VM.hard_reboot" ~op:`hard_reboot - (fun () -> - cancel ~__context ~vm ~ops:[ `clean_shutdown; `clean_reboot; `pool_migrate; `call_plugin; `suspend ]; - with_vbds_marked ~__context ~vm ~doc:"VM.hard_reboot" ~op:`attach - (fun vbds -> - with_vifs_marked ~__context ~vm ~doc:"VM.hard_reboot" ~op:`attach - (fun vifs -> - (* CA-31903: we don't need to reserve memory for reboot because the memory settings can't - change across reboot. *) - do_op_on ~host:host ~local_fn ~__context - (fun session_id rpc -> Client.VM.hard_reboot rpc session_id vm)))); - let uuid = Db.VM.get_uuid ~__context ~self:vm in - let message_body = - Printf.sprintf "VM '%s' rebooted forcibly" - (Db.VM.get_name_label ~__context ~self:vm) - in - let (name, priority) = Api_messages.vm_rebooted in - (try ignore(Xapi_message.create ~__context ~name ~priority - ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm - - let hard_reboot_internal ~__context ~vm = - info "VM.hard_reboot_internal: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.hard_reboot_internal ~vm in - (* no VM operation: we assume the VM is still Running *) - with_vbds_marked ~__context ~vm ~doc:"VM.hard_reboot" ~op:`attach - (fun vbds -> - with_vifs_marked ~__context ~vm ~doc:"VM.hard_reboot" ~op:`attach - (fun vifs -> - (* CA-31903: we don't need to reserve memory for reboot because the memory settings can't - change across reboot. *) - forward_vm_op ~local_fn ~__context ~vm - (fun session_id rpc -> Client.VM.hard_reboot_internal rpc session_id vm))); - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm - - let suspend ~__context ~vm = - info "VM.suspend: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.suspend ~vm in - with_vm_operation ~__context ~self:vm ~doc:"VM.suspend" ~op:`suspend - (fun () -> - forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.suspend rpc session_id vm)); - let uuid = Db.VM.get_uuid ~__context ~self:vm in - (* debug "placeholder for retrieving the current value of memory-actual";*) - let message_body = - Printf.sprintf "VM '%s' suspended" - (Db.VM.get_name_label ~__context ~self:vm) - in - let (name, priority) = Api_messages.vm_suspended in - (try ignore(Xapi_message.create ~__context ~name ~priority - ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm - - let revert ~__context ~snapshot = - info "VM.revert: snapshot = '%s'" (vm_uuid ~__context snapshot); - - let vm = Db.VM.get_snapshot_of ~__context ~self:snapshot in - let vm = - if Db.is_valid_ref __context vm - then vm - else Xapi_vm_snapshot.create_vm_from_snapshot ~__context ~snapshot in - - let local_fn = Local.VM.revert ~snapshot in - let forward_fn session_id rpc = Local.VM.revert ~__context ~snapshot in - - with_vm_operation ~__context ~self:snapshot ~doc:"VM.revert" ~op:`revert - (fun () -> with_vm_operation ~__context ~self:vm ~doc:"VM.reverting" ~op:`reverting - (fun () -> - (* We need to do a best-effort check that any suspend_VDI referenced by - the snapshot (not the current VM) is currently accessible. This is because - the revert code first clears space by deleting current VDIs before cloning - the suspend VDI: we want to minimise the probability that the operation fails - part-way through. *) - if Db.VM.get_power_state ~__context ~self:snapshot = `Suspended then begin - let suspend_VDI = Db.VM.get_suspend_VDI ~__context ~self:snapshot in - let sr = Db.VDI.get_SR ~__context ~self:suspend_VDI in - let pbd = choose_pbd_for_sr ~__context ~self:sr () in - let host = Db.PBD.get_host ~__context ~self:pbd in - let metrics = Db.Host.get_metrics ~__context ~self:host in - let live = Db.is_valid_ref __context metrics && (Db.Host_metrics.get_live ~__context ~self:metrics) in - if not live - then raise (Api_errors.Server_error(Api_errors.host_not_live, [ Ref.string_of host ])) - end; - (* first of all, destroy the domain if needed. *) - if Db.VM.get_power_state ~__context ~self:vm <> `Halted then begin - debug "VM %s (domid %Ld) which is reverted is not halted: shutting it down first" - (Db.VM.get_uuid __context vm) - (Db.VM.get_domid __context vm); - Helpers.call_api_functions ~__context (fun rpc session_id -> Client.VM.hard_shutdown rpc session_id vm); - end; - - Xapi_vm_snapshot.revert_vm_fields ~__context ~snapshot ~vm; - if Db.VM.get_power_state __context vm = `Running then - forward_vm_op ~local_fn ~__context ~vm forward_fn - else - forward_to_access_srs ~local_fn ~__context ~vm forward_fn)) - - (* same forwarding logic as clone *) - let csvm ~__context ~vm = - info "VM.csvm: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.csvm ~vm in - (* We mark the VM as cloning. We don't mark the disks; the implementation of the clone - uses the API to clone and lock the individual VDIs. We don't give any atomicity - guarantees here but we do prevent disk corruption. *) - let suspend_sr = Db.VDI.get_SR ~__context ~self:(Db.VM.get_suspend_VDI ~__context ~self:vm) in - let result = with_vm_operation ~__context ~self:vm ~doc:"VM.csvm" ~op:`csvm - (fun () -> - forward_to_access_srs_and ~extra_sr:suspend_sr ~local_fn ~__context ~vm - (fun session_id rpc -> Client.VM.csvm rpc session_id vm)) in - let uuid = Db.VM.get_uuid ~__context ~self:vm in - let message_body = - Printf.sprintf "VM '%s' cloned (new uuid: %s)" - (Db.VM.get_name_label ~__context ~self:vm) - (Db.VM.get_uuid ~__context ~self:result) - in - let (name, priority) = Api_messages.vm_cloned in - (try ignore(Xapi_message.create ~__context ~name ~priority - ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); - result - - (* Like start.. resume on any suitable host *) - let resume ~__context ~vm ~start_paused ~force = - info "VM.resume: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.resume ~vm ~start_paused ~force in - let host = - with_vm_operation ~__context ~self:vm ~doc:"VM.resume" ~op:`resume - (fun () -> - with_vbds_marked ~__context ~vm ~doc:"VM.resume" ~op:`attach - (fun vbds -> - let snapshot = Helpers.get_boot_record ~__context ~self:vm in - let (), host = forward_to_suitable_host ~local_fn ~__context ~vm ~snapshot ~host_op:`vm_resume - (fun session_id rpc -> Client.VM.resume rpc session_id vm start_paused force) in - host - ); - ) - in - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm; - let uuid = Db.VM.get_uuid ~__context ~self:vm in - let message_body = - Printf.sprintf "VM '%s' resumed on host: %s (uuid: %s)" - (Db.VM.get_name_label ~__context ~self:vm) - (Db.Host.get_name_label ~__context ~self:host) - (Db.Host.get_uuid ~__context ~self:host) - in - let (name, priority) = Api_messages.vm_resumed in - (try ignore(Xapi_message.create ~__context ~name ~priority - ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); - Rrdd_proxy.push_rrd ~__context ~vm_uuid:(Db.VM.get_uuid ~__context ~self:vm) - - let resume_on ~__context ~vm ~host ~start_paused ~force = - if Helpers.rolling_upgrade_in_progress ~__context - then Helpers.assert_host_has_highest_version_in_pool - ~__context ~host ; - info "VM.resume_on: VM = '%s'; host = '%s'" (vm_uuid ~__context vm) (host_uuid ~__context host); - let local_fn = Local.VM.resume_on ~vm ~host ~start_paused ~force in - with_vm_operation ~__context ~self:vm ~doc:"VM.resume_on" ~op:`resume_on - (fun () -> - with_vbds_marked ~__context ~vm ~doc:"VM.resume_on" ~op:`attach - (fun vbds -> - let snapshot = Helpers.get_boot_record ~__context ~self:vm in - reserve_memory_for_vm ~__context ~vm ~host ~snapshot ~host_op:`vm_resume - (fun () -> - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.VM.resume_on rpc session_id vm host start_paused force)); - ); - ); - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm; - let uuid = Db.VM.get_uuid ~__context ~self:vm in - let message_body = - Printf.sprintf "VM '%s' resumed on host: %s (uuid: %s)" - (Db.VM.get_name_label ~__context ~self:vm) - (Db.Host.get_name_label ~__context ~self:host) - (Db.Host.get_uuid ~__context ~self:host) - in - let (name, priority) = Api_messages.vm_resumed in - (try ignore(Xapi_message.create ~__context ~name ~priority - ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); - Rrdd_proxy.push_rrd ~__context ~vm_uuid:(Db.VM.get_uuid ~__context ~self:vm) - - let pool_migrate_complete ~__context ~vm ~host = - info "VM.pool_migrate_complete: VM = '%s'; host = '%s'" - (vm_uuid ~__context vm) (host_uuid ~__context host); - let local_fn = Local.VM.pool_migrate_complete ~vm ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> - Client.VM.pool_migrate_complete rpc session_id vm host); - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm - - let pool_migrate ~__context ~vm ~host ~options = - info "VM.pool_migrate: VM = '%s'; host = '%s'" - (vm_uuid ~__context vm) (host_uuid ~__context host); - if Helpers.rolling_upgrade_in_progress ~__context - then begin - let source_host = Db.VM.get_resident_on ~__context ~self:vm in - Helpers.assert_host_versions_not_decreasing - ~__context - ~host_from:(Helpers.LocalObject source_host) - ~host_to:(Helpers.LocalObject host); - end; - let local_fn = Local.VM.pool_migrate ~vm ~host ~options in - - (* Check that the VM is compatible with the host it is being migrated to. *) - let force = try bool_of_string (List.assoc "force" options) with _ -> false in - if not force then Cpuid_helpers.assert_vm_is_compatible ~__context ~vm ~host (); - - with_vm_operation ~__context ~self:vm ~doc:"VM.pool_migrate" ~op:`pool_migrate ~strict:(not force) - (fun () -> - (* Make sure the target has enough memory to receive the VM *) - let snapshot = Helpers.get_boot_record ~__context ~self:vm in - (* MTC: An MTC-protected VM has a peer VM on the destination host to which - it migrates to. When reserving memory, we must substitute the source VM - with this peer VM. If is not an MTC-protected VM, then this call will - simply return the same VM. Note that the call below not only accounts for - the destination VM's memory footprint but it also sets its set_scheduled_to_be_resident_on - field so we must make sure that we pass the destination VM and not the source. - Note: TBD: when migration into an existing VM is implemented, this section will - have to be revisited since the destination VM would already be occupying memory - and there won't be any need to account for its memory. *) - let dest_vm = Mtc.get_peer_vm_or_self ~__context ~self:vm in - reserve_memory_for_vm ~__context ~vm:dest_vm ~host ~snapshot ~host_op:`vm_migrate - (fun () -> - forward_vm_op ~local_fn ~__context ~vm - (fun session_id rpc -> Client.VM.pool_migrate rpc session_id vm host options))); - update_vbd_operations ~__context ~vm; - update_vif_operations ~__context ~vm; - Cpuid_helpers.update_cpu_flags ~__context ~vm ~host - - let migrate_send ~__context ~vm ~dest ~live ~vdi_map ~vif_map ~options = - info "VM.migrate_send: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.migrate_send ~vm ~dest ~live ~vdi_map ~vif_map ~options in - let forwarder = - if Xapi_vm_lifecycle.is_live ~__context ~self:vm then forward_vm_op else - let snapshot = Db.VM.get_record ~__context ~self:vm in - (fun ~local_fn ~__context ~vm op -> - fst (forward_to_suitable_host ~local_fn ~__context ~vm ~snapshot ~host_op:`vm_migrate op)) in - with_vm_operation ~__context ~self:vm ~doc:"VM.migrate_send" ~op:`migrate_send - (fun () -> - Local.VM.assert_can_migrate ~__context ~vm ~dest ~live ~vdi_map ~vif_map ~options; - forwarder ~local_fn ~__context ~vm - (fun session_id rpc -> Client.VM.migrate_send rpc session_id vm dest live vdi_map vif_map options) - ) - - let assert_can_migrate ~__context ~vm ~dest ~live ~vdi_map ~vif_map ~options = - info "VM.assert_can_migrate: VM = '%s'" (vm_uuid ~__context vm); - Local.VM.assert_can_migrate ~__context ~vm ~dest ~live ~vdi_map ~vif_map ~options - - let send_trigger ~__context ~vm ~trigger = - info "VM.send_trigger: VM = '%s'; trigger = '%s'" (vm_uuid ~__context vm) trigger; - let local_fn = Local.VM.send_trigger ~vm ~trigger in - with_vm_operation ~__context ~self:vm ~doc:"VM.send_trigger" ~op:`send_trigger - (fun () -> - forward_vm_op ~local_fn ~__context ~vm - (fun session_id rpc -> Client.VM.send_trigger rpc session_id vm trigger)) - - let send_sysrq ~__context ~vm ~key = - info "VM.send_sysrq: VM = '%s'; sysrq = '%s'" (vm_uuid ~__context vm) key; - let local_fn = Local.VM.send_sysrq ~vm ~key in - with_vm_operation ~__context ~self:vm ~doc:"VM.send_sysrq" ~op:`send_sysrq - (fun () -> - forward_vm_op ~local_fn ~__context ~vm - (fun session_id rpc -> Client.VM.send_sysrq rpc session_id vm key)) - - let set_VCPUs_number_live ~__context ~self ~nvcpu = - info "VM.set_VCPUs_number_live: VM = '%s'; number_of_VCPU = %Ld" (vm_uuid ~__context self) nvcpu; - let local_fn = Local.VM.set_VCPUs_number_live ~self ~nvcpu in - with_vm_operation ~__context ~self ~doc:"VM.set_VCPUs_number_live" ~op:`changing_VCPUs_live - (fun () -> - forward_vm_op ~local_fn ~__context ~vm:self - (fun session_id rpc -> Client.VM.set_VCPUs_number_live rpc session_id self nvcpu)) - - let add_to_VCPUs_params_live ~__context ~self ~key ~value = - info "VM.add_to_VCPUs_params_live: VM = '%s'; params = ('%s','%s')" (vm_uuid ~__context self) key value; - let local_fn = Local.VM.add_to_VCPUs_params_live ~self ~key ~value in - with_vm_operation ~__context ~self ~doc:"VM.add_to_VCPUs_params_live" ~op:`changing_VCPUs_live - (fun () -> - forward_vm_op ~local_fn ~__context ~vm:self - (fun session_id rpc -> Client.VM.add_to_VCPUs_params_live rpc session_id self key value)) - - let set_VCPUs_max ~__context ~self ~value = - info "VM.set_VCPUs_max: self = %s; value = %Ld" - (vm_uuid ~__context self) value; - with_vm_operation ~__context ~self ~doc:"VM.set_VCPUs_max" - ~op:`changing_VCPUs - (fun () -> Local.VM.set_VCPUs_max ~__context ~self ~value) - - let set_VCPUs_at_startup ~__context ~self ~value = - info "VM.set_VCPUs_at_startup: self = %s; value = %Ld" - (vm_uuid ~__context self) value; - Local.VM.set_VCPUs_at_startup ~__context ~self ~value - - let compute_memory_overhead ~__context ~vm = - info "VM.compute_memory_overhead: vm = '%s'" - (vm_uuid ~__context vm); - Local.VM.compute_memory_overhead ~__context ~vm - - let set_memory_dynamic_range ~__context ~self ~min ~max = - info "VM.set_memory_dynamic_range: VM = '%s'; min = %Ld; max = %Ld" - (Ref.string_of self) min max; - let local_fn = Local.VM.set_memory_dynamic_range ~self ~min ~max in - with_vm_operation ~__context ~self ~doc:"VM.set_memory_dynamic_range" - ~op:`changing_dynamic_range - (fun () -> - (* XXX: Perform basic parameter validation, before forwarding *) - (* to the slave. Do this after sorting out the last boot *) - (* record via set_static_range. *) - let power_state = Db.VM.get_power_state ~__context ~self in - match power_state with - | `Running -> - (* If current dynamic_min is lower *) - (* then we will block the operation *) - reserve_memory_for_dynamic_change ~__context ~vm:self - min max - (fun () -> - forward_vm_op ~local_fn ~__context ~vm:self - (fun session_id rpc -> - Client.VM.set_memory_dynamic_range - rpc session_id self min max - ) - ) - | `Halted -> - local_fn ~__context - | _ -> - failwith - "assertion_failure: set_memory_dynamic_range: \ - power_state should be Halted or Running" - ) - - let set_memory_dynamic_max ~__context ~self ~value = - info "VM.set_memory_dynamic_max: VM = '%s'; value = %Ld" - (vm_uuid ~__context self) value; - set_memory_dynamic_range ~__context ~self ~max:value - ~min:(Db.VM.get_memory_dynamic_min ~__context ~self) - - let set_memory_dynamic_min ~__context ~self ~value = - info "VM.set_memory_dynamic_min: VM = '%s'; value = %Ld" - (vm_uuid ~__context self) value; - set_memory_dynamic_range ~__context ~self ~min:value - ~max:(Db.VM.get_memory_dynamic_max ~__context ~self) - - let set_memory_static_range ~__context ~self ~min ~max = - info "VM.set_memory_static_range: self = %s; min = %Ld; max = %Ld" - (vm_uuid ~__context self) min max; - with_vm_operation ~__context ~self ~doc:"VM.set_memory_static_range" - ~op:`changing_static_range - (fun () -> Local.VM.set_memory_static_range ~__context ~self ~min ~max) - - let set_memory_static_max ~__context ~self ~value = - info "VM.set_memory_static_max: VM = '%s'; value = %Ld" - (vm_uuid ~__context self) value; - set_memory_static_range ~__context ~self ~max:value - ~min:(Db.VM.get_memory_static_min ~__context ~self) - - let set_memory_static_min ~__context ~self ~value = - info "VM.set_memory_static_min: VM = '%s'; value = %Ld" - (vm_uuid ~__context self) value; - set_memory_static_range ~__context ~self ~min:value - ~max:(Db.VM.get_memory_static_max ~__context ~self) - - let set_memory_limits ~__context ~self - ~static_min ~static_max ~dynamic_min ~dynamic_max = - info - "VM.set_memory_limits: self = %s; \ - static_min = %Ld; static_max = %Ld; \ - dynamic_min = %Ld; dynamic_max = %Ld" - (vm_uuid ~__context self) - static_min static_max dynamic_min dynamic_max; - let local_fn = Local.VM.set_memory_limits ~self - ~static_min ~static_max ~dynamic_min ~dynamic_max in - with_vm_operation ~__context ~self ~doc:"VM.set_memory_limits" ~op:`changing_memory_limits - (fun () -> - forward_vm_op ~local_fn ~__context ~vm:self - (fun session_id rpc -> Client.VM.set_memory_limits rpc session_id self - static_min static_max dynamic_min dynamic_max)) - - let set_memory ~__context ~self ~value = - info "VM.set_memory: self = %s; value = %Ld" (vm_uuid ~__context self) value; - let local_fn = Local.VM.set_memory ~self ~value in - with_vm_operation ~__context ~self ~doc:"VM.set_memory" ~op:`changing_memory_limits - (fun () -> - forward_vm_op ~local_fn ~__context ~vm:self - (fun session_id rpc -> Client.VM.set_memory rpc session_id self value)) - - let set_memory_target_live ~__context ~self ~target = - info "VM.set_memory_target_live: VM = '%s'; min = %Ld" (vm_uuid ~__context self) target; - let local_fn = Local.VM.set_memory_target_live ~self ~target in - with_vm_operation ~__context ~self ~doc:"VM.set_memory_target_live" ~op:`changing_memory_live - (fun () -> - forward_vm_op ~local_fn ~__context ~vm:self - (fun session_id rpc -> Client.VM.set_memory_target_live rpc session_id self target)) - - let wait_memory_target_live ~__context ~self = - info "VM.wait_memory_target_live: VM = '%s'" (vm_uuid ~__context self); - let local_fn = Local.VM.wait_memory_target_live ~self in - with_vm_operation ~__context ~self ~doc:"VM.wait_memory_target_live" ~op:`awaiting_memory_live - (fun () -> - forward_vm_op ~local_fn ~__context ~vm:self - (fun session_id rpc -> Client.VM.wait_memory_target_live rpc session_id self)) - - (* Dummy implementation for a deprecated API method. *) - let get_cooperative ~__context ~self = - info "VM.get_cooperative: VM = '%s'" (vm_uuid ~__context self); - Local.VM.get_cooperative ~__context ~self - - let set_HVM_shadow_multiplier ~__context ~self ~value = - info "VM.set_HVM_shadow_multiplier: self = %s; multiplier = %f" - (vm_uuid ~__context self) value; - with_vm_operation ~__context ~self ~doc:"VM.set_HVM_shadow_multiplier" - ~op:`changing_shadow_memory - (fun () -> - Local.VM.set_HVM_shadow_multiplier ~__context ~self ~value) - - let set_shadow_multiplier_live ~__context ~self ~multiplier = - info "VM.set_shadow_multiplier_live: VM = '%s'; min = %f" (vm_uuid ~__context self) multiplier; - let local_fn = Local.VM.set_shadow_multiplier_live ~self ~multiplier in - with_vm_operation ~__context ~self ~doc:"VM.set_shadow_multiplier_live" ~op:`changing_shadow_memory_live - (fun () -> - forward_vm_op ~local_fn ~__context ~vm:self - (fun session_id rpc -> - (* No need to perform a memory calculation here: the real code will tell us if the - new value is too big. *) - Client.VM.set_shadow_multiplier_live rpc session_id self multiplier - ) - ) - - (* this is in db *) - let get_boot_record ~__context ~self = - info "VM.get_boot_record: VM = '%s'" (vm_uuid ~__context self); - with_vm_operation ~__context ~self ~doc:"VM.get_boot_record" ~op:`get_boot_record - (fun () -> - Local.VM.get_boot_record ~__context ~self) - - let get_data_sources ~__context ~self = - info "VM.get_data_sources: VM = '%s'" (vm_uuid ~__context self); - let local_fn = Local.VM.get_data_sources ~self in - with_vm_operation ~__context ~self ~doc:"VM.get_data_source" ~op:`data_source_op - (fun () -> - forward_vm_op ~local_fn ~__context ~vm:self - (fun session_id rpc -> Client.VM.get_data_sources rpc session_id self)) - - let record_data_source ~__context ~self ~data_source = - info "VM.record_data_source: VM = '%s'; data source = '%s'" (vm_uuid ~__context self) data_source; - let local_fn = Local.VM.record_data_source ~self ~data_source in - with_vm_operation ~__context ~self ~doc:"VM.record_data_source" ~op:`data_source_op - (fun () -> - forward_vm_op ~local_fn ~__context ~vm:self - (fun session_id rpc -> Client.VM.record_data_source rpc session_id self data_source)) - - let query_data_source ~__context ~self ~data_source = - info "VM.query_data_source: VM = '%s'; data source = '%s'" (vm_uuid ~__context self) data_source; - Xapi_vm_lifecycle.assert_power_state_in ~__context ~self ~allowed:[`Running; `Paused]; - let local_fn = Local.VM.query_data_source ~self ~data_source in - forward_vm_op ~local_fn ~__context ~vm:self - (fun session_id rpc -> Client.VM.query_data_source rpc session_id self data_source) - - let forget_data_source_archives ~__context ~self ~data_source = - info "VM.forget_data_source_archives: VM = '%s'; data source = '%s'" (vm_uuid ~__context self) data_source; - let local_fn = Local.VM.forget_data_source_archives ~self ~data_source in - with_vm_operation ~__context ~self ~doc:"VM.forget_data_source_archives" ~op:`data_source_op - (fun () -> - forward_vm_op ~local_fn ~__context ~vm:self - (fun session_id rpc -> Client.VM.forget_data_source_archives rpc session_id self data_source)) - - let get_possible_hosts ~__context ~vm = - info "VM.get_possible_hosts: VM = '%s'" (vm_uuid ~__context vm); - Local.VM.get_possible_hosts ~__context ~vm - - let assert_operation_valid ~__context ~self ~op = - info "VM.assert_operation_valid: VM = '%s'" (vm_uuid ~__context self); - Local.VM.assert_operation_valid ~__context ~self ~op - - let update_allowed_operations ~__context ~self = - info "VM.update_allowed_operations: VM = '%s'" (vm_uuid ~__context self); - Local.VM.update_allowed_operations ~__context ~self - - let assert_can_boot_here ~__context ~self ~host = - info "VM.assert_can_boot_here: VM = '%s'; host = '%s'" (vm_uuid ~__context self) (host_uuid ~__context host); - Local.VM.assert_can_boot_here ~__context ~self ~host - - let retrieve_wlb_recommendations ~__context ~vm = - info "VM.retrieve_wlb_recommendations: VM = '%s'" (vm_uuid ~__context vm); - Local.VM.retrieve_wlb_recommendations ~__context ~vm - - let assert_agile ~__context ~self = - info "VM.assert_agile: VM = '%s'" (vm_uuid ~__context self); - Local.VM.assert_agile ~__context ~self - - let get_allowed_VBD_devices ~__context ~vm = - info "VM.get_allowed_VBD_devices: VM = '%s'" (vm_uuid ~__context vm); - Local.VM.get_allowed_VBD_devices ~__context ~vm - - let get_allowed_VIF_devices ~__context ~vm = - info "VM.get_allowed_VIF_devices: VM = '%s'" (vm_uuid ~__context vm); - Local.VM.get_allowed_VIF_devices ~__context ~vm - - let atomic_set_resident_on ~__context ~vm ~host = - info "VM.atomic_set_resident_on: VM = '%s'" (vm_uuid ~__context vm); - (* Need to prevent the host chooser being run while these fields are being modified *) - Helpers.with_global_lock - (fun () -> - Db.VM.set_resident_on ~__context ~self:vm ~value:host; - Db.VM.set_scheduled_to_be_resident_on ~__context ~self:vm ~value:Ref.null - ) - - let create_new_blob ~__context ~vm ~name ~mime_type ~public = - info "VM.create_new_blob: VM = '%s'; name = '%s'; MIME type = '%s' public = %b" (vm_uuid ~__context vm) name mime_type public; - Local.VM.create_new_blob ~__context ~vm ~name ~mime_type ~public - - let s3_suspend ~__context ~vm = - info "VM.s3_suspend: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.s3_suspend ~vm in - forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.s3_suspend rpc session_id vm) - - let s3_resume ~__context ~vm = - info "VM.s3_resume: VM = '%s'" (vm_uuid ~__context vm); - let local_fn = Local.VM.s3_resume ~vm in - forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.s3_resume rpc session_id vm) - - - let copy_bios_strings ~__context ~vm ~host = - info "VM.copy_bios_strings: VM = '%s'; host = '%s'" (vm_uuid ~__context vm) (host_uuid ~__context host); - Local.VM.copy_bios_strings ~__context ~vm ~host - - let set_protection_policy ~__context ~self ~value = - info "VM.set_protection_policy: self = '%s'; " (vm_uuid ~__context self); - Local.VM.set_protection_policy ~__context ~self ~value - - let set_start_delay ~__context ~self ~value = - info "VM.set_start_delay: self = '%s';" (vm_uuid ~__context self); - Local.VM.set_start_delay ~__context ~self ~value - - let set_shutdown_delay ~__context ~self ~value = - info "VM.set_shutdown_delay: self = '%s';" (vm_uuid ~__context self); - Local.VM.set_shutdown_delay ~__context ~self ~value - - let set_order ~__context ~self ~value = - info "VM.set_order: self = '%s';" (vm_uuid ~__context self); - Local.VM.set_order ~__context ~self ~value - - let set_suspend_VDI ~__context ~self ~value = - info "VM.set_suspend_VDI: self = '%s';" (vm_uuid ~__context self); - Local.VM.set_suspend_VDI ~__context ~self ~value - - let assert_can_be_recovered ~__context ~self ~session_to = - info "VM.assert_can_be_recovered: self = '%s';" (vm_uuid ~__context self); - Local.VM.assert_can_be_recovered ~__context ~self ~session_to - - let get_SRs_required_for_recovery ~__context ~self ~session_to = - info "VM.get_SRs_required_for_recovery: self = '%s';" (vm_uuid ~__context self); - Local.VM.get_SRs_required_for_recovery ~__context ~self ~session_to - - let recover ~__context ~self ~session_to ~force = - info "VM.recover: self = '%s'; force = %b;" (vm_uuid ~__context self) force; - (* If a VM is part of an appliance, the appliance *) - (* should be recovered using VM_appliance.recover *) - let appliance = Db.VM.get_appliance ~__context ~self in - if Db.is_valid_ref __context appliance then - raise (Api_errors.Server_error(Api_errors.vm_is_part_of_an_appliance, - [Ref.string_of self; Ref.string_of appliance])); - Local.VM.recover ~__context ~self ~session_to ~force - - let set_appliance ~__context ~self ~value = - info "VM.set_appliance: self = '%s'; value = '%s';" (vm_uuid ~__context self) (vm_appliance_uuid ~__context value); - Local.VM.set_appliance ~__context ~self ~value - - let import_convert ~__context ~_type ~username ~password ~sr ~remote_config = - info "VM.import_convert: type = '%s'; remote_config = '%s;'" - _type (String.concat "," (List.map (fun (k,v) -> k ^ "=" ^ v) remote_config)); - Local.VM.import_convert ~__context ~_type ~username ~password ~sr ~remote_config - - let import ~__context ~url ~sr ~full_restore ~force = - info "VM.import: url = '%s' sr='%s' force='%b'" url (Ref.string_of sr) force; - let pbd = choose_pbd_for_sr ~__context ~self:sr () in - let host = Db.PBD.get_host ~__context ~self:pbd in - do_op_on ~local_fn:(Local.VM.import ~url ~sr ~full_restore ~force) ~__context ~host (fun session_id rpc -> Client.VM.import rpc session_id url sr full_restore force) - - end - - module VM_metrics = struct - end - - module VM_guest_metrics = struct - end - - module Host = struct - - (** Add to the Host's current operations, call a function and then remove from the - current operations. Ensure the allowed_operations are kept up to date. *) - let with_host_operation ~__context ~self ~doc ~op f = - let task_id = Ref.string_of (Context.get_task_id __context) in - (* CA-18377: If there's a rolling upgrade in progress, only send Miami keys across the wire. *) - let operation_allowed ~op = false - || not (Helpers.rolling_upgrade_in_progress ~__context) - || List.mem op Xapi_globs.host_operations_miami in - Helpers.retry_with_global_lock ~__context ~doc - (fun () -> - Xapi_host_helpers.assert_operation_valid ~__context ~self ~op; - if operation_allowed ~op then - Db.Host.add_to_current_operations ~__context ~self ~key: task_id ~value: op; - Xapi_host_helpers.update_allowed_operations ~__context ~self); - (* Then do the action with the lock released *) - finally f - (* Make sure to clean up at the end *) - (fun () -> - try - if operation_allowed ~op then begin - Db.Host.remove_from_current_operations ~__context ~self ~key: task_id; - Helpers.Early_wakeup.broadcast (Datamodel._host, Ref.string_of self); - end; - let clustered_srs = Db.SR.get_refs_where ~__context ~expr:(Eq (Field "clustered", Literal "true")) in - if clustered_srs <> [] then - (* Host powerstate operations on one host may affect all other hosts if - * a clustered SR is in use, so update all hosts' allowed operations. *) - Xapi_host_helpers.update_allowed_operations_all_hosts ~__context - else - Xapi_host_helpers.update_allowed_operations ~__context ~self - with - _ -> ()) - - let create ~__context ~uuid ~name_label ~name_description ~hostname ~address ~external_auth_type ~external_auth_service_name ~external_auth_configuration = - info "Host.create: uuid='%s' name_label='%s' hostname='%s' address='%s'" uuid name_label hostname address; - Local.Host.create ~__context ~uuid ~name_label ~name_description ~hostname ~address ~external_auth_type ~external_auth_service_name ~external_auth_configuration - - let destroy ~__context ~self = - info "Host.destroy: host = '%s'" (host_uuid __context self); - Local.Host.destroy ~__context ~self - - let set_power_on_mode ~__context ~self ~power_on_mode ~power_on_config = - info "Host.set_power_on_mode: host = '%s'; power_on_mode = '%s' ; power_on_config = [ %s ]" - (host_uuid ~__context self) power_on_mode (String.concat ", " (List.map (fun (k, v) -> k ^ "=" ^ v) power_on_config)); - Local.Host.set_power_on_mode ~__context ~self ~power_on_mode ~power_on_config - - let set_license_params ~__context ~self ~value = - info "Host.set_license_params: host = '%s'; license_params = [ %s ]" (host_uuid ~__context self) (String.concat ", " (List.map (fun (k, v) -> k ^ "=" ^ v) value)); - Local.Host.set_license_params ~__context ~self ~value - - let set_ssl_legacy ~__context ~self ~value = - info "Host.set_ssl_legacy: host = '%s'; value = %b" (host_uuid ~__context self) value; - let success () = - if Db.Host.get_ssl_legacy ~__context ~self = value - then Some () - else None - in - let local_fn = Local.Host.set_ssl_legacy ~self ~value in - let fn () = - do_op_on ~local_fn ~__context ~host:self - (fun session_id rpc -> - Client.Host.set_ssl_legacy rpc session_id self value) - in - tolerate_connection_loss fn success 30. - - let ha_disable_failover_decisions ~__context ~host = - info "Host.ha_disable_failover_decisions: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.ha_disable_failover_decisions ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.ha_disable_failover_decisions rpc session_id host) - - let ha_disarm_fencing ~__context ~host = - info "Host.ha_disarm_fencing: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.ha_disarm_fencing ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.ha_disarm_fencing rpc session_id host) - - let ha_stop_daemon ~__context ~host = - info "Host.ha_stop_daemon: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.ha_stop_daemon ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.ha_stop_daemon rpc session_id host) - - let ha_release_resources ~__context ~host = - info "Host.ha_release_resources: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.ha_release_resources ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.ha_release_resources rpc session_id host) - - let ha_wait_for_shutdown_via_statefile ~__context ~host = - info "Host.ha_wait_for_shutdown_via_statefile: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.ha_wait_for_shutdown_via_statefile ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.ha_wait_for_shutdown_via_statefile rpc session_id host) - - let preconfigure_ha ~__context ~host ~statefiles ~metadata_vdi ~generation = - info "Host.preconfigure_ha: host = '%s'; statefiles =[ %s ]; metadata_vdi = '%s'; generation = '%s'" - (host_uuid ~__context host) (String.concat "; " (List.map Ref.string_of statefiles)) (vdi_uuid ~__context metadata_vdi) generation; - let local_fn = Local.Host.preconfigure_ha ~host ~statefiles ~metadata_vdi ~generation in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.preconfigure_ha rpc session_id host statefiles metadata_vdi generation) - - let ha_join_liveset ~__context ~host = - info "Host.ha_join_liveset: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.ha_join_liveset ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.ha_join_liveset rpc session_id host) - - let request_backup ~__context ~host ~generation ~force = - debug "Host.request_backup: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.request_backup ~host ~generation ~force in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.request_backup rpc session_id host generation force) - - let request_config_file_sync ~__context ~host ~hash = - debug "Host.request_config_file_sync: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.request_config_file_sync ~host ~hash in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.request_config_file_sync rpc session_id host hash) - - (* Call never forwarded *) - let ha_xapi_healthcheck ~__context = - Local.Host.ha_xapi_healthcheck ~__context - - (* Call never forwarded *) - let local_assert_healthy ~__context = - info "Host.local_assert_healthy"; - Local.Host.local_assert_healthy ~__context - - (* Call never forwarded *) - let propose_new_master ~__context ~address ~manual = - info "Host.propose_new_master: type = '%s'; host address = '%s'" - (if manual then "manual" else "automatic") address; - Local.Host.propose_new_master ~__context ~address ~manual - - (* If someone aborts the transaction *) - let abort_new_master ~__context ~address = - info "Host.abort_new_master: host address = '%s'" address; - Local.Host.abort_new_master ~__context ~address - - (* Call never forwarded *) - let commit_new_master ~__context ~address = - info "Host.commit_new_master: host address = '%s'" address; - Local.Host.commit_new_master ~__context ~address - - (* Call never forwarded *) - let is_in_emergency_mode ~__context = - Local.Host.is_in_emergency_mode ~__context - - let local_management_reconfigure ~__context ~interface = - info "Host.local_management_reconfigure: interface = '%s'" interface; - Local.Host.local_management_reconfigure ~__context ~interface - - let emergency_ha_disable ~__context = - info "Host.emergency_ha_disable"; - Local.Host.emergency_ha_disable ~__context - - (* Dummy implementation for a deprecated API method. *) - let get_uncooperative_resident_VMs ~__context ~self = - info "Host.get_uncooperative_resident_VMs host=%s" (Ref.string_of self); - Local.Host.get_uncooperative_resident_VMs ~__context ~self - - (* Dummy implementation for a deprecated API method. *) - let get_uncooperative_domains ~__context ~self = - info "Host.get_uncooperative_domains host=%s" (Ref.string_of self); - Local.Host.get_uncooperative_domains ~__context ~self - - let management_reconfigure ~__context ~pif = - info "Host.management_reconfigure: management PIF = '%s'" (pif_uuid ~__context pif); - (* The management interface on the slave may change during this operation, so expect connection loss. - * Consider the operation successful if management flag was set on the PIF we're working with. Since the slave - * sets this flag after bringing up the management interface, this is a good indication of success. *) - let success () = - if Db.PIF.get_management ~__context ~self:pif then Some () else None in - let local_fn = Local.Host.management_reconfigure ~pif in - let fn () = - do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self:pif) (fun session_id rpc -> Client.Host.management_reconfigure rpc session_id pif) in - tolerate_connection_loss fn success 30. - - let management_disable ~__context = - info "Host.management_disable"; - Local.Host.management_disable ~__context - - let get_management_interface ~__context ~host = - info "Host.get_management_interface: host = '%s'" (host_uuid ~__context host); - Local.Host.get_management_interface ~__context ~host - - let disable ~__context ~host = - info "Host.disable: host = '%s'" (host_uuid ~__context host); - (* Block call if this would break our VM restart plan *) - Xapi_ha_vm_failover.assert_host_disable_preserves_ha_plan ~__context host; - let local_fn = Local.Host.disable ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.disable rpc session_id host); - Xapi_host_helpers.update_allowed_operations ~__context ~self:host - - let declare_dead ~__context ~host = - info "Host.declare_dead: host = '%s'" (host_uuid ~__context host); - Local.Host.declare_dead ~__context ~host; - Xapi_host_helpers.update_allowed_operations ~__context ~self:host - - let enable ~__context ~host = - info "Host.enable: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.enable ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.enable rpc session_id host); - Xapi_host_helpers.update_allowed_operations ~__context ~self:host - - let shutdown ~__context ~host = - info "Host.shutdown: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.shutdown ~host in - with_host_operation ~__context ~self:host ~doc:"Host.shutdown" ~op:`shutdown - (fun () -> - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.shutdown rpc session_id host) - ) - - let reboot ~__context ~host = - info "Host.reboot: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.reboot ~host in - with_host_operation ~__context ~self:host ~doc:"Host.reboot" ~op:`reboot - (fun () -> - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.reboot rpc session_id host) - ) - - let power_on ~__context ~host = - info "Host.power_on: host = '%s'" (host_uuid ~__context host); - with_host_operation ~__context ~self:host ~doc:"Host.power_on" ~op:`power_on - (fun () -> - (* Always executed on the master *) - Local.Host.power_on ~__context ~host - ) - - let dmesg ~__context ~host = - info "Host.dmesg: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.dmesg ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.dmesg rpc session_id host) - - let dmesg_clear ~__context ~host = - info "Host.dmesg_clear: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.dmesg_clear ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.dmesg_clear rpc session_id host) - - let bugreport_upload ~__context ~host ~url ~options = - info "Host.bugreport_upload: host = '%s'; url = '%s'; options = [ %s ]" (host_uuid ~__context host) url (String.concat "; " (List.map (fun (k, v) -> k ^ "=" ^ v) options)); - let local_fn = Local.Host.bugreport_upload ~host ~url ~options in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.bugreport_upload rpc session_id host url options) - - let list_methods ~__context = - info "Host.list_methods"; - Local.Host.list_methods ~__context - - let send_debug_keys ~__context ~host ~keys = - info "Host.send_debug_keys: host = '%s'; keys = '%s'" (host_uuid ~__context host) keys; - let local_fn = Local.Host.send_debug_keys ~host ~keys in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.send_debug_keys rpc session_id host keys) - - let get_log ~__context ~host = - info "Host.get_log: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.get_log ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.get_log rpc session_id host) - - let license_add ~__context ~host ~contents = - info "Host.license_add: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.license_add ~host ~contents in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.license_add rpc session_id host contents) - - let license_remove ~__context ~host = - info "Host.license_remove: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.license_remove ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.license_remove rpc session_id host) - - let assert_can_evacuate ~__context ~host = - info "Host.assert_can_evacuate: host = '%s'" (host_uuid ~__context host); - Local.Host.assert_can_evacuate ~__context ~host - - let get_vms_which_prevent_evacuation ~__context ~self = - info "Host.get_vms_which_prevent_evacuation: host = '%s'" (host_uuid ~__context self); - Local.Host.get_vms_which_prevent_evacuation ~__context ~self - - let evacuate ~__context ~host = - info "Host.evacuate: host = '%s'" (host_uuid ~__context host); - (* Block call if this would break our VM restart plan (because the body of this sets enabled to false) *) - Xapi_ha_vm_failover.assert_host_disable_preserves_ha_plan ~__context host; - with_host_operation ~__context ~self:host ~doc:"Host.evacuate" ~op:`evacuate - (fun () -> - Local.Host.evacuate ~__context ~host - ) - - let retrieve_wlb_evacuate_recommendations ~__context ~self = - info "Host.retrieve_wlb_evacuate_recommendations: host = '%s'" (host_uuid ~__context self); - Local.Host.retrieve_wlb_evacuate_recommendations ~__context ~self - - let update_pool_secret ~__context ~host ~pool_secret = - info "Host.update_pool_secret: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.update_pool_secret ~host ~pool_secret in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.update_pool_secret rpc session_id host pool_secret) - - let update_master ~__context ~host ~master_address = - info "Host.update_master: host = '%s'; master = '%s'" (host_uuid ~__context host) master_address; - let local_fn = Local.Pool.emergency_reset_master ~master_address in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.update_master rpc session_id host master_address) - - let restart_agent ~__context ~host = - info "Host.restart_agent: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.restart_agent ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.restart_agent rpc session_id host) - - let shutdown_agent ~__context = - Local.Host.shutdown_agent ~__context - - let signal_networking_change ~__context = - info "Host.signal_networking_change"; - Local.Host.signal_networking_change ~__context - - let notify ~__context ~ty ~params = - info "Host.notify"; - Local.Host.notify ~__context ~ty ~params - - let syslog_reconfigure ~__context ~host = - info "Host.syslog_reconfigure: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.syslog_reconfigure ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.syslog_reconfigure rpc session_id host) - - let get_system_status_capabilities ~__context ~host = - info "Host.get_system_status_capabilities: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.get_system_status_capabilities ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.get_system_status_capabilities rpc - session_id host) - - let get_diagnostic_timing_stats ~__context ~host = - info "Host.get_diagnostic_timing_stats: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.get_diagnostic_timing_stats ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.get_diagnostic_timing_stats rpc session_id host) - - let set_hostname_live ~__context ~host ~hostname = - info "Host.set_hostname_live: host = '%s'; hostname = '%s'" (host_uuid ~__context host) hostname; - let local_fn = Local.Host.set_hostname_live ~host ~hostname in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.set_hostname_live rpc session_id host hostname) - - let get_data_sources ~__context ~host = - info "Host.get_data_sources: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.get_data_sources ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.get_data_sources rpc session_id host) - - let record_data_source ~__context ~host ~data_source = - info "Host.record_data_source: host = '%s'; data source = '%s'" (host_uuid ~__context host) data_source; - let local_fn = Local.Host.record_data_source ~host ~data_source in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.record_data_source rpc session_id host data_source) - - let query_data_source ~__context ~host ~data_source = - info "Host.query_data_source: host = '%s'; data source = '%s'" (host_uuid ~__context host) data_source; - let local_fn = Local.Host.query_data_source ~host ~data_source in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.query_data_source rpc session_id host data_source) - - let forget_data_source_archives ~__context ~host ~data_source = - info "Host.forget_data_source_archives: host = '%s'; data source = '%s'" (host_uuid ~__context host) data_source; - let local_fn = Local.Host.forget_data_source_archives ~host ~data_source in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.forget_data_source_archives rpc session_id host data_source) - - let tickle_heartbeat ~__context ~host ~stuff = - (* info "Host.tickle_heartbeat: Incoming call from host '%s' with arguments [ %s ]" (Ref.string_of host) (String.concat "; " (List.map (fun (a, b) -> a ^ ": " ^ b) stuff)); *) - Local.Host.tickle_heartbeat ~__context ~host ~stuff - - let create_new_blob ~__context ~host ~name ~mime_type ~public = - info "Host.create_new_blob: host = '%s'; name = '%s' MIME type = '%s public = %b" (host_uuid ~__context host) name mime_type public; - Local.Host.create_new_blob ~__context ~host ~name ~mime_type ~public - - let call_plugin ~__context ~host ~plugin ~fn ~args = - let plugins_to_protect = [ - "prepare_host_upgrade.py"; - ] in - if List.mem plugin plugins_to_protect - then - info "Host.call_plugin host = '%s'; plugin = '%s'; fn = '%s' args = [ 'hidden' ]" (host_uuid ~__context host) plugin fn - else - info "Host.call_plugin host = '%s'; plugin = '%s'; fn = '%s'; args = [ %s ]" (host_uuid ~__context host) plugin fn (String.concat "; " (List.map (fun (a, b) -> a ^ ": " ^ b) args)); - let local_fn = Local.Host.call_plugin ~host ~plugin ~fn ~args in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.call_plugin rpc session_id host plugin fn args) - - let call_extension ~__context ~host ~call = - info "Host.call_extension host = '%s'; call = '%s'" (host_uuid ~__context host) call; - let local_fn = Local.Host.call_extension ~host ~call in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.call_extension rpc session_id host call) - - let has_extension ~__context ~host ~name = - info "Host.has_extension: host = '%s'; name = '%s'" (host_uuid ~__context host) name; - let local_fn = Local.Host.has_extension ~host ~name in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.has_extension rpc session_id host name) - - let sync_data ~__context ~host = - info "Host.sync_data: host = '%s'" (host_uuid ~__context host); - Local.Host.sync_data ~__context ~host - - let backup_rrds ~__context ~host ~delay = - let local_fn = Local.Host.backup_rrds ~host ~delay in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.backup_rrds rpc session_id host delay) - - let compute_free_memory ~__context ~host = - info "Host.compute_free_memory: host = '%s'" (host_uuid ~__context host); - Local.Host.compute_free_memory ~__context ~host - - let compute_memory_overhead ~__context ~host = - info "Host.compute_memory_overhead: host = '%s'" - (host_uuid ~__context host); - Local.Host.compute_memory_overhead ~__context ~host - - let get_servertime ~__context ~host = - (* info "Host.get_servertime"; *) (* suppressed because the GUI calls this frequently and it isn't interesting for debugging *) - let local_fn = Local.Host.get_servertime ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.get_servertime rpc session_id host) - - let get_server_localtime ~__context ~host = - (* info "Host.get_servertime"; *) (* suppressed because the GUI calls this frequently and it isn't interesting for debugging *) - let local_fn = Local.Host.get_server_localtime ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.get_server_localtime rpc session_id host) - - let enable_binary_storage ~__context ~host = - info "Host.enable_binary_storage: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.enable_binary_storage ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.enable_binary_storage rpc session_id host) - - let disable_binary_storage ~__context ~host = - info "Host.disable_binary_storage: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.disable_binary_storage ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.disable_binary_storage rpc session_id host) - - let enable_external_auth ~__context ~host ~config ~service_name ~auth_type = - info "Host.enable_external_auth: host = '%s'; service_name = '%s'; auth_type = '%s'" (host_uuid ~__context host) service_name auth_type; - (* First assert that the AD feature is enabled if AD is requested *) - if auth_type = Extauth.auth_type_AD_Likewise then - Pool_features.assert_enabled ~__context ~f:Features.AD; - let local_fn = Local.Host.enable_external_auth ~host ~config ~service_name ~auth_type in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.enable_external_auth rpc session_id host config service_name auth_type) - - let disable_external_auth ~__context ~host ~config = - info "Host.disable_external_auth: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.disable_external_auth ~host ~config in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.disable_external_auth rpc session_id host config) - - let certificate_install ~__context ~host ~name ~cert = - let local_fn = Local.Host.certificate_install ~host ~name ~cert in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.certificate_install rpc session_id host name cert) - - let certificate_uninstall ~__context ~host ~name = - let local_fn = Local.Host.certificate_uninstall ~host ~name in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.certificate_uninstall rpc session_id host name) - - let certificate_list ~__context ~host = - let local_fn = Local.Host.certificate_list ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.certificate_list rpc session_id host) - - let crl_install ~__context ~host ~name ~crl = - let local_fn = Local.Host.crl_install ~host ~name ~crl in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.crl_install rpc session_id host name crl) - - let crl_uninstall ~__context ~host ~name = - let local_fn = Local.Host.crl_uninstall ~host ~name in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.crl_uninstall rpc session_id host name) - - let crl_list ~__context ~host = - let local_fn = Local.Host.crl_list ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.crl_list rpc session_id host) - - let certificate_sync ~__context ~host = - let local_fn = Local.Host.certificate_sync ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.certificate_sync rpc session_id host) - - let get_server_certificate ~__context ~host = - let local_fn = Local.Host.get_server_certificate ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> - Client.Host.get_server_certificate rpc session_id host) - - let attach_static_vdis ~__context ~host ~vdi_reason_map = - info "Host.attach_static_vdis: host = '%s'; vdi/reason pairs = [ %s ]" (host_uuid ~__context host) - (String.concat "; " (List.map (fun (a, b) -> Ref.string_of a ^ "/" ^ b) vdi_reason_map)); - let local_fn = Local.Host.attach_static_vdis ~host ~vdi_reason_map in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.attach_static_vdis rpc session_id host vdi_reason_map) - - let detach_static_vdis ~__context ~host ~vdis = - info "Host.detach_static_vdis: host = '%s'; vdis =[ %s ]" (host_uuid ~__context host) (String.concat "; " (List.map Ref.string_of vdis)); - let local_fn = Local.Host.detach_static_vdis ~host ~vdis in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.detach_static_vdis rpc session_id host vdis) - - let set_localdb_key ~__context ~host ~key ~value = - info "Host.set_localdb_key: host = '%s'; key = '%s'; value = '%s'" (host_uuid ~__context host) key value; - let local_fn = Local.Host.set_localdb_key ~host ~key ~value in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.set_localdb_key rpc session_id host key value) - - let apply_edition ~__context ~host ~edition ~force = - info "Host.apply_edition: host = '%s'; edition = '%s'; force = '%s'" (host_uuid ~__context host) edition (string_of_bool force); - let local_fn = Local.Host.apply_edition ~host ~edition ~force in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.apply_edition rpc session_id host edition force) - - let refresh_pack_info ~__context ~host = - info "Host.refresh_pack_info: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.refresh_pack_info ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.refresh_pack_info rpc session_id host) - - let reset_networking ~__context ~host = - info "Host.reset_networking: host = '%s'" (host_uuid ~__context host); - Local.Host.reset_networking ~__context ~host - - let enable_local_storage_caching ~__context ~host ~sr = - let local_fn = Local.Host.enable_local_storage_caching ~host ~sr in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.enable_local_storage_caching rpc session_id host sr) - - let disable_local_storage_caching ~__context ~host = - let local_fn = Local.Host.disable_local_storage_caching ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.disable_local_storage_caching rpc session_id host) - - let get_sm_diagnostics ~__context ~host = - let local_fn = Local.Host.get_sm_diagnostics ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.get_sm_diagnostics rpc session_id host) - - let get_thread_diagnostics ~__context ~host = - let local_fn = Local.Host.get_thread_diagnostics ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.get_thread_diagnostics rpc session_id host) - - let sm_dp_destroy ~__context ~host ~dp ~allow_leak = - let local_fn = Local.Host.sm_dp_destroy ~host ~dp ~allow_leak in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.sm_dp_destroy rpc session_id host dp allow_leak) - - let sync_vlans ~__context ~host = - info "Host.sync_vlans: host = '%s'" (host_uuid ~__context host); - Local.Host.sync_vlans ~__context ~host - - let sync_tunnels ~__context ~host = - info "Host.sync_tunnels: host = '%s'" (host_uuid ~__context host); - Local.Host.sync_tunnels ~__context ~host - - let sync_pif_currently_attached ~__context ~host ~bridges = - info "Host.sync_pif_currently_attached: host = '%s'" (host_uuid ~__context host); - Local.Host.sync_pif_currently_attached ~__context ~host ~bridges - - let migrate_receive ~__context ~host ~network ~options = - info "Host.migrate_receive: host = '%s'; network = '%s'" (host_uuid ~__context host) (network_uuid ~__context network); - Local.Host.migrate_receive ~__context ~host ~network ~options - - let enable_display ~__context ~host = - info "Host.enable_display: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.enable_display ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.enable_display rpc session_id host) - - let disable_display ~__context ~host = - info "Host.disable_display: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.Host.disable_display ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Host.disable_display rpc session_id host) - - let apply_guest_agent_config ~__context ~host = - info "Host.apply_guest_agent_config: host = '%s'" - (host_uuid ~__context host); - let local_fn = Local.Host.apply_guest_agent_config ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> - Client.Host.apply_guest_agent_config rpc session_id host) - end - - module Host_crashdump = struct - let destroy ~__context ~self = - info "Host_crashdump.destroy: host crashdump = '%s'" (host_crashdump_uuid ~__context self); - let local_fn = Local.Host_crashdump.destroy ~self in - do_op_on ~local_fn ~__context ~host:(Db.Host_crashdump.get_host ~__context ~self) - (fun session_id rpc -> Client.Host_crashdump.destroy rpc session_id self) - - let upload ~__context ~self ~url ~options = - info "Host_crashdump.upload: host crashdump = '%s'; url = '%s'" (host_crashdump_uuid ~__context self) url; - let local_fn = Local.Host_crashdump.upload ~self ~url ~options in - do_op_on ~local_fn ~__context ~host:(Db.Host_crashdump.get_host ~__context ~self) - (fun session_id rpc -> Client.Host_crashdump.upload rpc session_id self url options) - end - - module Host_patch = struct - let destroy ~__context ~self = - info "Host_patch.destroy: host patch = '%s'" (host_patch_uuid ~__context self); - Xapi_host_patch.destroy ~__context ~self - - let apply ~__context ~self = - info "Host_patch.apply: host patch = '%s'" (host_patch_uuid ~__context self); - let local_fn = Local.Host_patch.apply ~self in - do_op_on ~local_fn ~__context ~host:(Db.Host_patch.get_host ~__context ~self) - (fun session_id rpc -> Client.Host_patch.apply rpc session_id self) - end - - module Pool_patch = struct - let apply ~__context ~self ~host = - info "Pool_patch.apply: pool patch = '%s'; host = '%s'" (pool_patch_uuid ~__context self) (host_uuid ~__context host); - let local_fn = Local.Pool_patch.apply ~self ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Pool_patch.apply rpc session_id self host) - - let precheck ~__context ~self ~host = - info "Pool_patch.precheck: pool patch = '%s'; host = '%s'" (pool_patch_uuid ~__context self) (host_uuid ~__context host); - let local_fn = Local.Pool_patch.precheck ~self ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Pool_patch.precheck rpc session_id self host) - - let pool_apply ~__context ~self = - info "Pool_patch.pool_apply: pool patch = '%s'" (pool_patch_uuid ~__context self); - Xapi_pool_patch.pool_apply ~__context ~self - - let clean ~__context ~self = - info "Pool_patch.clean: pool patch = '%s'" (pool_patch_uuid ~__context self); - Xapi_pool_patch.clean ~__context ~self - - let clean_on_host ~__context ~self ~host = - info "Pool_patch.clean_on_host: pool patch = '%s'" (pool_patch_uuid ~__context self); - let local_fn = Local.Pool_patch.clean ~self in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Pool_patch.clean rpc session_id self) - - let pool_clean ~__context ~self = - info "Pool_patch.pool_clean: pool patch = '%s'" (pool_patch_uuid ~__context self); - Xapi_pool_patch.pool_clean ~__context ~self - - let destroy ~__context ~self = - info "Pool_patch.destroy: pool patch = '%s'" (pool_patch_uuid ~__context self); - Xapi_pool_patch.destroy ~__context ~self - end - - module Host_metrics = struct - end - - module Host_cpu = struct - end - - module Network = struct - - (* Don't forward. These are just db operations. Networks are "attached" when required by hosts that read db entries. - Bridges corresponding to networks are removed by per-host GC threads that read from db. *) - let create ~__context ~name_label ~name_description ~mTU ~other_config ~tags = - info "Network.create: name_label = '%s'" name_label; - Local.Network.create ~__context ~name_label ~name_description ~mTU ~other_config ~tags - - let attach ~__context ~network ~host = - info "Network.attach: network = '%s'; host = '%s'" (network_uuid ~__context network) (host_uuid ~__context host); - let local_fn = Local.Network.attach ~network ~host in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Network.attach rpc session_id network host) - - let pool_introduce ~__context ~name_label ~name_description ~mTU ~other_config ~bridge = - Local.Network.pool_introduce ~__context ~name_label ~name_description ~mTU ~other_config ~bridge - - let destroy ~__context ~self = - info "Network.destroy: network = '%s'" (network_uuid ~__context self); - (* WARNING WARNING WARNING: directly call Network.destroy with the global lock since it does - only database operations *) - Helpers.with_global_lock - (fun () -> - Local.Network.destroy ~__context ~self) - - let create_new_blob ~__context ~network ~name ~mime_type ~public = - info "Network.create_new_blob: network = '%s'; name = %s; MIME type = '%s' public = %b" (network_uuid ~__context network) name mime_type public; - Local.Network.create_new_blob ~__context ~network ~name ~mime_type ~public - - let set_default_locking_mode ~__context ~network ~value = - info "Network.set_default_locking_mode: network = '%s'; value = %s" (network_uuid ~__context network) (Record_util.network_default_locking_mode_to_string value); - Local.Network.set_default_locking_mode ~__context ~network ~value - - let attach_for_vm ~__context ~host ~vm = - info "Network.attach_for_vm: host = '%s'; VM = '%s'" (host_uuid ~__context host) (vm_uuid ~__context vm); - let local_fn = Local.Network.attach_for_vm ~host ~vm in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Network.attach_for_vm rpc session_id host vm) - - let detach_for_vm ~__context ~host ~vm = - info "Network.detach_for_vm: host = '%s'; VM = '%s'" (host_uuid ~__context host) (vm_uuid ~__context vm); - let local_fn = Local.Network.detach_for_vm ~host ~vm in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.Network.detach_for_vm rpc session_id host vm) - end - - module VIF = struct - - let unmark_vif ~__context ~vif ~doc ~op = - let task_id = Ref.string_of (Context.get_task_id __context) in - log_exn ~doc:("unmarking VIF after " ^ doc) - (fun self -> - if Db.is_valid_ref __context self then begin - Db.VIF.remove_from_current_operations ~__context ~self ~key:task_id; - Xapi_vif_helpers.update_allowed_operations ~__context ~self; - Helpers.Early_wakeup.broadcast (Datamodel._vif, Ref.string_of self); - end) - vif - - let mark_vif ~__context ~vif ~doc ~op = - let task_id = Ref.string_of (Context.get_task_id __context) in - log_exn ~doc:("marking VIF for " ^ doc) - (fun self -> - Xapi_vif_helpers.assert_operation_valid ~__context ~self ~op; - Db.VIF.add_to_current_operations ~__context ~self ~key:task_id ~value:op; - Xapi_vif_helpers.update_allowed_operations ~__context ~self) vif - - let with_vif_marked ~__context ~vif ~doc ~op f = - Helpers.retry_with_global_lock ~__context ~doc (fun () -> mark_vif ~__context ~vif ~doc ~op); - finally - (fun () -> f ()) - (fun () -> Helpers.with_global_lock (fun () -> unmark_vif ~__context ~vif ~doc ~op)) - - (* -------- Forwarding helper functions: ------------------------------------ *) - - let forward_vif_op ~local_fn ~__context ~self op = - let vm = Db.VIF.get_VM ~__context ~self in - let host_resident_on = Db.VM.get_resident_on ~__context ~self:vm in - if host_resident_on = Ref.null - then local_fn ~__context - else do_op_on ~local_fn ~__context ~host:host_resident_on op - - (* -------------------------------------------------------------------------- *) - - let create ~__context ~device ~network ~vM ~mAC ~mTU ~other_config ~qos_algorithm_type ~qos_algorithm_params = - info "VIF.create: VM = '%s'; network = '%s'" (vm_uuid ~__context vM) (network_uuid ~__context network); - Local.VIF.create ~__context ~device ~network ~vM ~mAC ~mTU ~other_config ~qos_algorithm_type ~qos_algorithm_params - - let destroy ~__context ~self = - info "VIF.destroy: VIF = '%s'" (vif_uuid ~__context self); - Local.VIF.destroy ~__context ~self - - let plug ~__context ~self = - info "VIF.plug: VIF = '%s'" (vif_uuid ~__context self); - let local_fn = Local.VIF.plug ~self in - with_vif_marked ~__context ~vif:self ~doc:"VIF.plug" ~op:`plug - (fun () -> - forward_vif_op ~local_fn ~__context ~self (fun session_id rpc -> Client.VIF.plug rpc session_id self)) - - let unplug_common ~__context ~self ~force = - let op = `unplug in - let name = "VIF." ^ (Record_util.vif_operation_to_string op) in - info "%s: VIF = '%s'" name (vif_uuid ~__context self); - let local_fn, remote_fn = - if force then Local.VIF.unplug_force, Client.VIF.unplug_force - else Local.VIF.unplug, Client.VIF.unplug in - let local_fn = local_fn ~self in - with_vif_marked ~__context ~vif:self ~doc:name ~op - (fun () -> - forward_vif_op ~local_fn ~__context ~self (fun session_id rpc -> remote_fn rpc session_id self)) - - let unplug ~__context ~self = unplug_common ~__context ~self ~force:false - let unplug_force ~__context ~self = unplug_common ~__context ~self ~force:true - - let move ~__context ~self ~network = - info "VIF.move: VIF = '%s' network = '%s'" (vif_uuid ~__context self) (network_uuid ~__context network); - let local_fn = Local.VIF.move ~self ~network in - let remote_fn = (fun session_id rpc -> Client.VIF.move rpc session_id self network) in - forward_vif_op ~local_fn ~__context ~self remote_fn - - let set_locking_mode ~__context ~self ~value = - info "VIF.set_locking_mode: VIF = '%s'; value = '%s'" (vif_uuid ~__context self) (Record_util.vif_locking_mode_to_string value); - let local_fn = Local.VIF.set_locking_mode ~self ~value in - let remote_fn = (fun session_id rpc -> Client.VIF.set_locking_mode rpc session_id self value) in - forward_vif_op ~local_fn ~__context ~self remote_fn - - let set_ipv4_allowed ~__context ~self ~value = - info "VIF.set_ipv4_allowed: VIF = '%s'; value = '%s'" (vif_uuid ~__context self) (String.concat "," value); - let local_fn = Local.VIF.set_ipv4_allowed ~self ~value in - let remote_fn = (fun session_id rpc -> Client.VIF.set_ipv4_allowed rpc session_id self value) in - forward_vif_op ~local_fn ~__context ~self remote_fn - - let add_ipv4_allowed ~__context ~self ~value = - info "VIF.add_ipv4_allowed: VIF = '%s'; value = '%s'" (vif_uuid ~__context self) value; - let local_fn = Local.VIF.add_ipv4_allowed ~self ~value in - let remote_fn = (fun session_id rpc -> Client.VIF.add_ipv4_allowed rpc session_id self value) in - forward_vif_op ~local_fn ~__context ~self remote_fn - - let remove_ipv4_allowed ~__context ~self ~value = - info "VIF.remove_ipv4_allowed: VIF = '%s'; value = '%s'" (vif_uuid ~__context self) value; - let local_fn = Local.VIF.remove_ipv4_allowed ~self ~value in - let remote_fn = (fun session_id rpc -> Client.VIF.remove_ipv4_allowed rpc session_id self value) in - forward_vif_op ~local_fn ~__context ~self remote_fn - - let set_ipv6_allowed ~__context ~self ~value = - info "VIF.set_ipv6_allowed: VIF = '%s'; value = '%s'" (vif_uuid ~__context self) (String.concat "," value); - let local_fn = Local.VIF.set_ipv6_allowed ~self ~value in - let remote_fn = (fun session_id rpc -> Client.VIF.set_ipv6_allowed rpc session_id self value) in - forward_vif_op ~local_fn ~__context ~self remote_fn - - let add_ipv6_allowed ~__context ~self ~value = - info "VIF.add_ipv6_allowed: VIF = '%s'; value = '%s'" (vif_uuid ~__context self) value; - let local_fn = Local.VIF.add_ipv6_allowed ~self ~value in - let remote_fn = (fun session_id rpc -> Client.VIF.add_ipv6_allowed rpc session_id self value) in - forward_vif_op ~local_fn ~__context ~self remote_fn - - let remove_ipv6_allowed ~__context ~self ~value = - info "VIF.remove_ipv6_allowed: VIF = '%s'; value = '%s'" (vif_uuid ~__context self) value; - let local_fn = Local.VIF.remove_ipv6_allowed ~self ~value in - let remote_fn = (fun session_id rpc -> Client.VIF.remove_ipv6_allowed rpc session_id self value) in - forward_vif_op ~local_fn ~__context ~self remote_fn - - let configure_ipv4 ~__context ~self ~mode ~address ~gateway = - info "VIF.configure_ipv4: VIF = '%s'; mode = '%s'; address = '%s'; gateway = '%s'" - (vif_uuid ~__context self) - (Record_util.vif_ipv4_configuration_mode_to_string mode) address gateway; - let local_fn = Local.VIF.configure_ipv4 ~self ~mode ~address ~gateway in - let remote_fn = (fun session_id rpc -> Client.VIF.configure_ipv4 rpc session_id self mode address gateway) in - forward_vif_op ~local_fn ~__context ~self remote_fn - - let configure_ipv6 ~__context ~self ~mode ~address ~gateway = - info "VIF.configure_ipv6: VIF = '%s'; mode = '%s'; address = '%s'; gateway = '%s'" - (vif_uuid ~__context self) - (Record_util.vif_ipv6_configuration_mode_to_string mode) address gateway; - let local_fn = Local.VIF.configure_ipv6 ~self ~mode ~address ~gateway in - let remote_fn = (fun session_id rpc -> Client.VIF.configure_ipv6 rpc session_id self mode address gateway) in - forward_vif_op ~local_fn ~__context ~self remote_fn - end - - module VIF_metrics = struct - end - - module VLAN = struct - let create ~__context ~tagged_PIF ~tag ~network = - info "VLAN.create: network = '%s'; VLAN tag = %Ld" (network_uuid ~__context network) tag; - let local_fn = Local.VLAN.create ~tagged_PIF ~tag ~network in - do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self:tagged_PIF) (fun session_id rpc -> Client.VLAN.create rpc session_id tagged_PIF tag network) - let destroy ~__context ~self = - info "VLAN.destroy: VLAN = '%s'" (vlan_uuid ~__context self); - let local_fn = Local.VLAN.destroy ~self in - do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self:(Db.VLAN.get_tagged_PIF ~__context ~self)) (fun session_id rpc -> Client.VLAN.destroy rpc session_id self) - end - - module Tunnel = struct - let create ~__context ~transport_PIF ~network = - info "Tunnel.create: network = '%s'" (network_uuid ~__context network); - let local_fn = Local.Tunnel.create ~transport_PIF ~network in - do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self:transport_PIF) - (fun session_id rpc -> Client.Tunnel.create rpc session_id transport_PIF network) - - let destroy ~__context ~self = - info "Tunnel.destroy: tunnel = '%s'" (tunnel_uuid ~__context self); - let local_fn = Local.Tunnel.destroy ~self in - do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context - ~self:(Db.Tunnel.get_transport_PIF ~__context ~self)) - (fun session_id rpc -> Client.Tunnel.destroy rpc session_id self) - end - - module Bond = struct - let create ~__context ~network ~members ~mAC ~mode ~properties = - info "Bond.create: network = '%s'; members = [ %s ]" - (network_uuid ~__context network) (String.concat "; " (List.map (pif_uuid ~__context) members)); - if List.length members = 0 - then raise (Api_errors.Server_error(Api_errors.pif_bond_needs_more_members, [])); - let host = Db.PIF.get_host ~__context ~self:(List.hd members) in - let local_fn = Local.Bond.create ~network ~members ~mAC ~mode ~properties in - (* The management interface on the slave may change during this operation, so expect connection loss. - * Consider the operation successful if task progress is set to 1.0. *) - let task = Context.get_task_id __context in - let success () = - let progress = Db.Task.get_progress ~__context ~self:task in - debug "Task progress %.1f" progress; - if progress = 1.0 then - Some (Db.PIF.get_bond_slave_of ~__context ~self:(List.hd members)) - else - None - in - let fn () = - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Bond.create rpc session_id network members mAC mode properties) in - tolerate_connection_loss fn success 30. - - let destroy ~__context ~self = - info "Bond.destroy: bond = '%s'" (bond_uuid ~__context self); - let host = Db.PIF.get_host ~__context ~self:(Db.Bond.get_master ~__context ~self) in - (* The management interface on the slave may change during this operation, so expect connection loss. - * Consider the operation successful if task progress is set to 1.0. *) - let task = Context.get_task_id __context in - let success () = - let progress = Db.Task.get_progress ~__context ~self:task in - debug "Task progress %.1f" progress; - if progress = 1.0 then - Some () - else - None - in - let local_fn = Local.Bond.destroy ~self in - let fn () = do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Bond.destroy rpc session_id self) in - tolerate_connection_loss fn success 30. - - let set_mode ~__context ~self ~value = - info "Bond.set_mode: bond = '%s'; value = '%s'" (bond_uuid ~__context self) (Record_util.bond_mode_to_string value); - let host = Db.PIF.get_host ~__context ~self:(Db.Bond.get_master ~__context ~self) in - let local_fn = Local.Bond.set_mode ~self ~value in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Bond.set_mode rpc session_id self value) - - let set_property ~__context ~self ~name ~value = - info "Bond.set_property: bond = '%s'; name = '%s'; value = '%s'" (bond_uuid ~__context self) name value; - let host = Db.PIF.get_host ~__context ~self:(Db.Bond.get_master ~__context ~self) in - let local_fn = Local.Bond.set_property ~self ~name ~value in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Bond.set_property rpc session_id self name value) - end - - module PIF = struct - - let pool_introduce ~__context - ~device ~network ~host ~mAC ~mTU ~vLAN ~physical ~ip_configuration_mode ~iP - ~netmask ~gateway ~dNS ~bond_slave_of ~vLAN_master_of ~management ~other_config ~disallow_unplug = - Local.PIF.pool_introduce ~__context - ~device ~network ~host ~mAC ~mTU ~vLAN ~physical ~ip_configuration_mode ~iP - ~netmask ~gateway ~dNS ~bond_slave_of ~vLAN_master_of ~management ~other_config ~disallow_unplug - - let db_introduce = Local.PIF.db_introduce - let db_forget ~__context ~self = - info "PIF.db_forget: PIF = '%s'" (pif_uuid ~__context self); - Local.PIF.db_forget ~__context ~self - - let create_VLAN ~__context ~device ~network ~host ~vLAN = - info "PIF.create_VLAN: network = '%s'; VLAN tag = %Ld" (network_uuid ~__context network) vLAN; - let local_fn = Local.PIF.create_VLAN ~device ~network ~host ~vLAN in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.PIF.create_VLAN rpc session_id device network host vLAN) - - let destroy ~__context ~self = - info "PIF.destroy: PIF = '%s'" (pif_uuid ~__context self); - let local_fn = Local.PIF.destroy ~self in - do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self) (fun session_id rpc -> Client.PIF.destroy rpc session_id self) - - let unplug ~__context ~self = - info "PIF.unplug: PIF = '%s'" (pif_uuid ~__context self); - let local_fn = Local.PIF.unplug ~self in - do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self) (fun session_id rpc -> Client.PIF.unplug rpc session_id self) - - let plug ~__context ~self = - info "PIF.plug: PIF = '%s'" (pif_uuid ~__context self); - let local_fn = Local.PIF.plug ~self in - do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self) (fun session_id rpc -> Client.PIF.plug rpc session_id self) - - let reconfigure_ip ~__context ~self ~mode ~iP ~netmask ~gateway ~dNS = - info "PIF.reconfigure_ip: PIF = '%s'; mode = '%s'; IP = '%s'; netmask = '%s'; gateway = '%s'; DNS = %s" - (pif_uuid ~__context self) - (Record_util.ip_configuration_mode_to_string mode) iP netmask gateway dNS; - let host = Db.PIF.get_host ~__context ~self in - let local_fn = Local.PIF.reconfigure_ip ~self ~mode ~iP ~netmask ~gateway ~dNS in - let task = Context.get_task_id __context in - let success () = - let status = Db.Task.get_status ~__context ~self:task in - if status <> `pending then - Some () - else - None - in - let fn () = - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> - Client.PIF.reconfigure_ip rpc session_id self mode iP netmask gateway dNS) in - tolerate_connection_loss fn success !Xapi_globs.pif_reconfigure_ip_timeout - - let reconfigure_ipv6 ~__context ~self ~mode ~iPv6 ~gateway ~dNS = - info "PIF.reconfigure_ipv6: PIF = '%s'; mode = '%s'; IPv6 = '%s'; gateway = '%s'; DNS = %s" - (pif_uuid ~__context self) - (Record_util.ipv6_configuration_mode_to_string mode) iPv6 gateway dNS; - let host = Db.PIF.get_host ~__context ~self in - let local_fn = Local.PIF.reconfigure_ipv6 ~self ~mode ~iPv6 ~gateway ~dNS in - let task = Context.get_task_id __context in - let success () = - let status = Db.Task.get_status ~__context ~self:task in - if status <> `pending then - Some () - else - None - in - let fn () = - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> - Client.PIF.reconfigure_ipv6 rpc session_id self mode iPv6 gateway dNS) in - tolerate_connection_loss fn success !Xapi_globs.pif_reconfigure_ip_timeout - - let set_primary_address_type ~__context ~self ~primary_address_type = - info "PIF.set_primary_address_type: PIF = '%s'; primary_address_type = '%s'" - (pif_uuid ~__context self) - (Record_util.primary_address_type_to_string primary_address_type); - let local_fn = Local.PIF.set_primary_address_type ~self ~primary_address_type in - do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self) - (fun session_id rpc -> Client.PIF.set_primary_address_type rpc session_id self primary_address_type) - - let set_property ~__context ~self ~name ~value = - info "PIF.set_property: PIF = '%s'; name = '%s'; value = '%s'" (pif_uuid ~__context self) name value; - let host = Db.PIF.get_host ~__context ~self in - let local_fn = Local.PIF.set_property ~self ~name ~value in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.PIF.set_property rpc session_id self name value) - - let scan ~__context ~host = - info "PIF.scan: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.PIF.scan ~host in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.PIF.scan rpc session_id host) - - let introduce ~__context ~host ~mAC ~device ~managed = - info "PIF.introduce: host = '%s'; MAC address = '%s'; device = '%s'; managed = '%b'" - (host_uuid ~__context host) mAC device managed; - let local_fn = Local.PIF.introduce ~host ~mAC ~device ~managed in - do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.PIF.introduce rpc session_id host mAC device managed) - - let forget ~__context ~self= - info "PIF.forget: PIF = '%s'" (pif_uuid ~__context self); - let local_fn = Local.PIF.forget ~self in - do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self) (fun session_id rpc -> Client.PIF.forget rpc session_id self) - end - module PIF_metrics = struct - end - module SM = struct end - module SR = struct - - let unmark_sr ~__context ~sr ~doc ~op = - let task_id = Ref.string_of (Context.get_task_id __context) in - debug "Unmarking SR after %s (task=%s)" doc task_id; - log_exn_ignore ~doc:("unmarking SR after " ^ doc) - (fun self -> - if Db.is_valid_ref __context self then begin - Db.SR.remove_from_current_operations ~__context ~self ~key:task_id; - Xapi_sr_operations.update_allowed_operations ~__context ~self; - Helpers.Early_wakeup.broadcast (Datamodel._sr, Ref.string_of self); - end) - sr - - let mark_sr ~__context ~sr ~doc ~op = - let task_id = Ref.string_of (Context.get_task_id __context) in - debug "Marking SR for %s (task=%s)" doc task_id; - log_exn ~doc:("marking SR for " ^ doc) - (fun self -> - Xapi_sr_operations.assert_operation_valid ~__context ~self ~op; - Db.SR.add_to_current_operations ~__context ~self ~key:task_id ~value:op; - Xapi_sr_operations.update_allowed_operations ~__context ~self) sr - - let with_sr_marked ~__context ~sr ~doc ~op f = - Helpers.retry_with_global_lock ~__context ~doc (fun () -> mark_sr ~__context ~sr ~doc ~op); - finally - (fun () -> f ()) - (fun () -> Helpers.with_global_lock (fun () -> unmark_sr ~__context ~sr ~doc ~op)) - - (* -------- Forwarding helper functions: ------------------------------------ *) - - (* Forward SR operation to host that has a suitable plugged (or unplugged) PBD *) - let forward_sr_op ?consider_unplugged_pbds ~local_fn ~__context ~self op = - let pbd = choose_pbd_for_sr ?consider_unplugged_pbds ~__context ~self () in - let host = Db.PBD.get_host ~__context ~self:pbd in - do_op_on ~local_fn ~__context ~host op - - (* do op on a host that can view multiple SRs, if none is found, an - exception of Not_found will be raised *) - let forward_sr_multiple_op ~local_fn ~__context ~srs ?(prefer_slaves=false) op = - let choose_fn ~host = - Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs:srs ~host in - let host = - try Xapi_vm_helpers.choose_host ~__context ~choose_fn ~prefer_slaves () - with _ -> raise Not_found in - do_op_on ~local_fn ~__context ~host op - - let set_virtual_allocation ~__context ~self ~value = - Sm.assert_session_has_internal_sr_access ~__context ~sr:self; - Local.SR.set_virtual_allocation ~__context ~self ~value - - let set_physical_size ~__context ~self ~value = - Sm.assert_session_has_internal_sr_access ~__context ~sr:self; - Local.SR.set_physical_size ~__context ~self ~value - - let set_physical_utilisation ~__context ~self ~value = - Sm.assert_session_has_internal_sr_access ~__context ~sr:self; - Local.SR.set_physical_utilisation ~__context ~self ~value - - let create ~__context ~host ~device_config ~physical_size ~name_label ~name_description ~_type ~content_type ~shared ~sm_config = - info "SR.create: name label = '%s'" name_label; - let local_fn = Local.SR.create ~host ~device_config ~physical_size ~name_label ~name_description ~_type ~content_type ~shared ~sm_config in - (* if shared, then ignore host parameter and do create on the master.. *) - if shared then - local_fn ~__context - else - (* otherwise forward to specified host *) - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.SR.create ~rpc ~session_id ~host ~device_config ~physical_size ~name_label ~name_description ~_type ~content_type ~shared ~sm_config) - - (* -------------------------------------------------------------------------- *) - - (* don't forward. this is just a db call *) - let introduce ~__context ~uuid ~name_label ~name_description ~_type ~content_type = - info "SR.introduce: uuid = '%s'; name label = '%s'" uuid name_label; - Local.SR.introduce ~__context ~uuid ~name_label ~name_description ~_type ~content_type - - let make ~__context ~host ~device_config ~physical_size ~name_label ~name_description ~_type ~content_type ~sm_config = - info "SR.make: host = '%s'; name label = '%s'" (host_uuid ~__context host) name_label; - let local_fn = Local.SR.make ~host ~device_config ~physical_size ~name_label ~name_description ~_type ~content_type ~sm_config in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.SR.make rpc session_id host device_config physical_size name_label - name_description _type content_type sm_config) - - let destroy ~__context ~sr = - info "SR.destroy: SR = '%s'" (sr_uuid ~__context sr); - let local_fn = Local.SR.destroy ~sr in - with_sr_marked ~__context ~sr ~doc:"SR.destroy" ~op:`destroy - (fun () -> - forward_sr_op ~consider_unplugged_pbds:true ~local_fn ~__context ~self:sr - (fun session_id rpc -> Client.SR.destroy rpc session_id sr)) - - (* don't forward this is just a db call *) - let forget ~__context ~sr = - info "SR.forget: SR = '%s'" (sr_uuid ~__context sr); - with_sr_marked ~__context ~sr ~doc:"SR.forget" ~op:`forget - (fun () -> - Local.SR.forget ~__context ~sr) - - let update ~__context ~sr = - info "SR.update: SR = '%s'" (sr_uuid ~__context sr); - let local_fn = Local.SR.update ~sr in - (* SR.update made lock free as of CA-27630 *) - forward_sr_op ~local_fn ~__context ~self:sr - (fun session_id rpc -> Client.SR.update rpc session_id sr) - - let get_supported_types ~__context = - info "SR.get_supported_types"; - Local.SR.get_supported_types ~__context - - let scan ~__context ~sr = - (* since we periodically sr_scan, only log those that aren't internal ones.. otherwise logs just get spammed *) - let is_internal_scan = Db.Session.get_pool ~__context ~self:(Context.get_session_id __context) in - (if is_internal_scan then debug else info) "SR.scan: SR = '%s'" (sr_uuid ~__context sr); - let local_fn = Local.SR.scan ~sr in - with_sr_marked ~__context ~sr ~doc:"SR.scan" ~op:`scan - (fun () -> - forward_sr_op ~local_fn ~__context ~self:sr - (fun session_id rpc -> Client.SR.scan rpc session_id sr)) - - let probe ~__context ~host ~device_config ~_type ~sm_config = - info "SR.probe: host = '%s'" (host_uuid ~__context host); - let local_fn = Local.SR.probe ~host ~device_config ~_type ~sm_config in - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.SR.probe ~rpc ~session_id ~host ~device_config ~_type ~sm_config) - - let set_shared ~__context ~sr ~value = - Local.SR.set_shared ~__context ~sr ~value - - let set_name_label ~__context ~sr ~value = - info "SR.set_name_label: SR = '%s' name-label = '%s'" - (sr_uuid ~__context sr) value; - let local_fn = Local.SR.set_name_label ~sr ~value in - forward_sr_op ~local_fn ~__context ~self:sr - (fun session_id rpc -> Client.SR.set_name_label rpc session_id sr value) - - let set_name_description ~__context ~sr ~value = - info "SR.set_name_description: SR = '%s' name-description = '%s'" - (sr_uuid ~__context sr) value; - let local_fn = Local.SR.set_name_description ~sr ~value in - forward_sr_op ~local_fn ~__context ~self:sr - (fun session_id rpc -> Client.SR.set_name_description rpc session_id sr value) - - let assert_can_host_ha_statefile ~__context ~sr = - info "SR.assert_can_host_ha_statefile: SR = '%s'" (sr_uuid ~__context sr); - Local.SR.assert_can_host_ha_statefile ~__context ~sr - - let assert_supports_database_replication ~__context ~sr = - info "SR.assert_supports_database_replication: SR '%s'" (sr_uuid ~__context sr); - Local.SR.assert_supports_database_replication ~__context ~sr - - let enable_database_replication ~__context ~sr = - info "SR.enable_database_replication: SR = '%s'" (sr_uuid ~__context sr); - Local.SR.enable_database_replication ~__context ~sr - - let disable_database_replication ~__context ~sr = - info "SR.disable_database_replication: SR = '%s'" (sr_uuid ~__context sr); - Local.SR.disable_database_replication ~__context ~sr - - let create_new_blob ~__context ~sr ~name ~mime_type ~public = - info "SR.create_new_blob: SR = '%s'" (sr_uuid ~__context sr); - Local.SR.create_new_blob ~__context ~sr ~name ~mime_type ~public - - (* SR Level RRDs *) - let get_data_sources ~__context ~sr = - info "SR.get_data_sources: SR = '%s'" (sr_uuid ~__context sr); - let local_fn = Local.SR.get_data_sources ~sr in - forward_sr_op ~local_fn ~__context ~self:sr - (fun session_id rpc -> Client.SR.get_data_sources rpc session_id sr) - - let record_data_source ~__context ~sr ~data_source = - info "SR.record_data_source: SR = '%s'; data source = '%s'" - (sr_uuid ~__context sr) data_source; - let local_fn = Local.SR.record_data_source ~sr ~data_source in - forward_sr_op ~local_fn ~__context ~self:sr - (fun session_id rpc -> - Client.SR.record_data_source rpc session_id sr data_source) - - let query_data_source ~__context ~sr ~data_source = - info "SR.query_data_source: SR = '%s'; data source = '%s'" - (sr_uuid ~__context sr) data_source; - let local_fn = Local.SR.query_data_source ~sr ~data_source in - forward_sr_op ~local_fn ~__context ~self:sr - (fun session_id rpc -> - Client.SR.query_data_source rpc session_id sr data_source) - - let forget_data_source_archives ~__context ~sr ~data_source = - info "SR.forget_data_source_archives: sr = '%s'; data source = '%s'" - (sr_uuid ~__context sr) data_source; - let local_fn = Local.SR.forget_data_source_archives ~sr ~data_source in - forward_sr_op ~local_fn ~__context ~self:sr - (fun session_id rpc -> - Client.SR.forget_data_source_archives rpc session_id sr data_source) - - end - module VDI = struct - - let unmark_vdi ~__context ~vdi ~doc ~op = - let task_id = Ref.string_of (Context.get_task_id __context) in - log_exn_ignore ~doc:("unmarking VDI after " ^ doc) - (fun self -> - if Db.is_valid_ref __context self then begin - Db.VDI.remove_from_current_operations ~__context ~self ~key:task_id; - Xapi_vdi.update_allowed_operations ~__context ~self; - Helpers.Early_wakeup.broadcast (Datamodel._vdi, Ref.string_of self); - end) - vdi - - let mark_vdi ~__context ~vdi ~doc ~op = - let task_id = Ref.string_of (Context.get_task_id __context) in - log_exn ~doc:("marking VDI for " ^ doc) - (fun self -> - Xapi_vdi.assert_operation_valid ~__context ~self ~op; - Db.VDI.add_to_current_operations ~__context ~self ~key:task_id ~value:op; - Xapi_vdi.update_allowed_operations ~__context ~self) vdi - - (** Use this function to mark the SR and/or the individual VDI *) - let with_sr_andor_vdi ~__context ?sr ?vdi ~doc f = - Helpers.retry_with_global_lock ~__context ~doc - (fun () -> - maybe (fun (sr, op) -> SR.mark_sr ~__context ~sr ~doc ~op) sr; - (* If we fail to acquire the VDI lock, unlock the SR *) - try - maybe (fun (vdi, op) -> mark_vdi ~__context ~vdi ~doc ~op) vdi - with e -> - maybe (fun (sr, op) -> SR.unmark_sr ~__context ~sr ~doc ~op) sr; - raise e - ); - finally - (fun () -> f ()) - (fun () -> - Helpers.with_global_lock - (fun () -> - maybe (fun (sr, op) -> SR.unmark_sr ~__context ~sr ~doc ~op) sr; - maybe (fun (vdi, op) -> unmark_vdi ~__context ~vdi ~doc ~op) vdi)) - - - (* -------- Forwarding helper functions: ------------------------------------ *) - - (* Read SR from VDI and use same forwarding mechanism as SR *) - let forward_vdi_op ~local_fn ~__context ~self op = - let sr = Db.VDI.get_SR ~__context ~self in - SR.forward_sr_op ~local_fn ~__context ~self:sr op - - (* -------------------------------------------------------------------------- *) - - let set_sharable ~__context ~self ~value = - if not (Mtc.is_vdi_accessed_by_protected_VM ~__context ~vdi:self) then begin - let sr = Db.VDI.get_SR ~__context ~self in - Sm.assert_session_has_internal_sr_access ~__context ~sr; - end; - Local.VDI.set_sharable ~__context ~self ~value - - let set_managed ~__context ~self ~value = - let sr = Db.VDI.get_SR ~__context ~self in - Sm.assert_session_has_internal_sr_access ~__context ~sr; - Local.VDI.set_managed ~__context ~self ~value - - let set_read_only ~__context ~self ~value = - let sr = Db.VDI.get_SR ~__context ~self in - Sm.assert_session_has_internal_sr_access ~__context ~sr; - Local.VDI.set_read_only ~__context ~self ~value - - let set_missing ~__context ~self ~value = - let sr = Db.VDI.get_SR ~__context ~self in - Sm.assert_session_has_internal_sr_access ~__context ~sr; - Local.VDI.set_missing ~__context ~self ~value - - let set_virtual_size ~__context ~self ~value = - let sr = Db.VDI.get_SR ~__context ~self in - Sm.assert_session_has_internal_sr_access ~__context ~sr; - Local.VDI.set_virtual_size ~__context ~self ~value - - let set_physical_utilisation ~__context ~self ~value = - let sr = Db.VDI.get_SR ~__context ~self in - Sm.assert_session_has_internal_sr_access ~__context ~sr; - Local.VDI.set_physical_utilisation ~__context ~self ~value - - let set_is_a_snapshot ~__context ~self ~value = - let sr = Db.VDI.get_SR ~__context ~self in - Sm.assert_session_has_internal_sr_access ~__context ~sr; - Local.VDI.set_is_a_snapshot ~__context ~self ~value - - let set_snapshot_of ~__context ~self ~value = - let sr = Db.VDI.get_SR ~__context ~self in - Sm.assert_session_has_internal_sr_access ~__context ~sr; - Local.VDI.set_snapshot_of ~__context ~self ~value - - let set_snapshot_time ~__context ~self ~value = - let sr = Db.VDI.get_SR ~__context ~self in - Sm.assert_session_has_internal_sr_access ~__context ~sr; - Local.VDI.set_snapshot_time ~__context ~self ~value - - let set_metadata_of_pool ~__context ~self ~value = - let sr = Db.VDI.get_SR ~__context ~self in - Sm.assert_session_has_internal_sr_access ~__context ~sr; - Local.VDI.set_metadata_of_pool ~__context ~self ~value - - let set_name_label ~__context ~self ~value = - info "VDI.set_name_label: VDI = '%s' name-label = '%s'" - (vdi_uuid ~__context self) value; - let local_fn = Local.VDI.set_name_label ~self ~value in - forward_vdi_op ~local_fn ~__context ~self - (fun session_id rpc -> Client.VDI.set_name_label rpc session_id self value) - - let set_name_description ~__context ~self ~value = - info "VDI.set_name_description: VDI = '%s' name-description = '%s'" - (vdi_uuid ~__context self) value; - let local_fn = Local.VDI.set_name_description ~self ~value in - forward_vdi_op ~local_fn ~__context ~self - (fun session_id rpc -> Client.VDI.set_name_description rpc session_id self value) - - let ensure_vdi_not_on_running_vm ~__context ~self = - let vbds = Db.VDI.get_VBDs ~__context ~self in - List.iter (fun vbd -> - let vm = Db.VBD.get_VM ~__context ~self:vbd in - Xapi_vm_lifecycle.assert_power_state_is ~__context ~self:vm ~expected:`Halted - ) vbds - - let set_on_boot ~__context ~self ~value = - ensure_vdi_not_on_running_vm ~__context ~self; - let local_fn = Local.VDI.set_on_boot ~self ~value in - forward_vdi_op ~local_fn ~__context ~self - (fun session_id rpc -> Client.VDI.set_on_boot rpc session_id self value) - - let set_allow_caching ~__context ~self ~value = - ensure_vdi_not_on_running_vm ~__context ~self; - Local.VDI.set_allow_caching ~__context ~self ~value - - let open_database ~__context ~self = - Local.VDI.open_database ~__context ~self - - let read_database_pool_uuid ~__context ~self = - Local.VDI.read_database_pool_uuid ~__context ~self - - (* know sr so just use SR forwarding policy direct here *) - let create ~__context ~name_label ~name_description ~sR ~virtual_size ~_type ~sharable ~read_only ~other_config ~xenstore_data ~sm_config ~tags = - info "VDI.create: SR = '%s'; name label = '%s'" (sr_uuid ~__context sR) name_label; - let local_fn = Local.VDI.create ~name_label ~name_description ~sR ~virtual_size ~_type ~sharable ~read_only ~other_config ~xenstore_data ~sm_config ~tags in - with_sr_andor_vdi ~__context ~sr:(sR, `vdi_create) ~doc:"VDI.create" - (fun () -> - SR.forward_sr_op ~local_fn ~__context ~self:sR - (fun session_id rpc -> Client.VDI.create ~rpc ~session_id ~name_label ~name_description ~sR ~virtual_size ~_type ~sharable ~read_only ~other_config ~xenstore_data ~sm_config ~tags)) - - (* Hidden call used in pool join only *) - let pool_introduce ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location = - Local.VDI.pool_introduce ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location - - (* Called from the SM backend *) - let db_introduce ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location = - Sm.assert_session_has_internal_sr_access ~__context ~sr:sR; - Local.VDI.db_introduce ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location - - (* Called from the SM backend *) - let db_forget ~__context ~vdi = - let sr = Db.VDI.get_SR ~__context ~self:vdi in - Sm.assert_session_has_internal_sr_access ~__context ~sr; - Local.VDI.db_forget ~__context ~vdi - - let introduce ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location ~xenstore_data ~sm_config ~managed ~virtual_size ~physical_utilisation ~metadata_of_pool ~is_a_snapshot ~snapshot_time ~snapshot_of= - info "VDI.introduce: SR = '%s'; name label = '%s'" (sr_uuid ~__context sR) name_label; - let local_fn = Local.VDI.introduce ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location ~xenstore_data ~sm_config ~managed ~virtual_size ~physical_utilisation ~metadata_of_pool ~is_a_snapshot ~snapshot_time ~snapshot_of in - with_sr_andor_vdi ~__context ~sr:(sR, `vdi_introduce) ~doc:"VDI.introduce" - (fun () -> - SR.forward_sr_op ~local_fn ~__context ~self:sR - (fun session_id rpc -> - Client.VDI.introduce ~rpc ~session_id ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location ~xenstore_data ~sm_config ~managed ~virtual_size ~physical_utilisation ~metadata_of_pool ~is_a_snapshot ~snapshot_time ~snapshot_of)) - - let update ~__context ~vdi = - let local_fn = Local.VDI.update ~vdi in - let sr = Db.VDI.get_SR ~__context ~self:vdi in - with_sr_andor_vdi ~__context ~vdi:(vdi, `update) ~doc:"VDI.update" - (fun () -> - SR.forward_sr_op ~local_fn ~__context ~self:sr - (fun session_id rpc -> - Client.VDI.update ~rpc ~session_id ~vdi)) - - let forget ~__context ~vdi = - info "VDI.forget: VDI = '%s'" (vdi_uuid ~__context vdi); - with_sr_andor_vdi ~__context ~vdi:(vdi, `forget) ~doc:"VDI.forget" - (fun () -> - Local.VDI.forget ~__context ~vdi) - - let destroy ~__context ~self = - info "VDI.destroy: VDI = '%s'" (vdi_uuid ~__context self); - let local_fn = Local.VDI.destroy ~self in - let sR = Db.VDI.get_SR ~__context ~self in - with_sr_andor_vdi ~__context ~sr:(sR, `vdi_destroy) ~vdi:(self, `destroy) ~doc:"VDI.destroy" - (fun () -> - forward_vdi_op ~local_fn ~__context ~self - (fun session_id rpc -> Client.VDI.destroy rpc session_id self)) - - (* !! FIXME - Depends on what we're doing here... *) - let snapshot ~__context ~vdi ~driver_params = - info "VDI.snapshot: VDI = '%s'" (vdi_uuid ~__context vdi); - let local_fn = Local.VDI.snapshot ~vdi ~driver_params in - let sR = Db.VDI.get_SR ~__context ~self:vdi in - with_sr_andor_vdi ~__context ~sr:(sR, `vdi_snapshot) ~vdi:(vdi, `snapshot) ~doc:"VDI.snapshot" - (fun () -> - forward_vdi_op ~local_fn ~__context ~self:vdi - (fun session_id rpc -> Client.VDI.snapshot rpc session_id vdi driver_params)) - - let clone ~__context ~vdi ~driver_params = - info "VDI.clone: VDI = '%s'" (vdi_uuid ~__context vdi); - let local_fn = Local.VDI.clone ~vdi ~driver_params in - let sR = Db.VDI.get_SR ~__context ~self:vdi in - with_sr_andor_vdi ~__context ~sr:(sR, `vdi_clone) ~vdi:(vdi, `clone) ~doc:"VDI.clone" - (fun () -> - forward_vdi_op ~local_fn ~__context ~self:vdi - (fun session_id rpc -> Client.VDI.clone rpc session_id vdi driver_params)) - - let copy ~__context ~vdi ~sr ~base_vdi ~into_vdi = - info "VDI.copy: VDI = '%s'; SR = '%s'; base_vdi = '%s'; into_vdi = '%s'" (vdi_uuid ~__context vdi) (sr_uuid ~__context sr) (vdi_uuid ~__context base_vdi) (vdi_uuid ~__context into_vdi); - let local_fn = Local.VDI.copy ~vdi ~sr ~base_vdi ~into_vdi in - let src_sr = Db.VDI.get_SR ~__context ~self:vdi in - (* No need to lock the VDI because the VBD.plug will do that for us *) - (* Try forward the request to a host which can have access to both source - and destination SR. *) - let op session_id rpc = Client.VDI.copy rpc session_id vdi sr base_vdi into_vdi in - with_sr_andor_vdi ~__context ~vdi:(vdi, `copy) ~doc:"VDI.copy" - (fun () -> - try - SR.forward_sr_multiple_op ~local_fn ~__context ~srs:[src_sr; sr] ~prefer_slaves:true op - with Not_found -> - SR.forward_sr_multiple_op ~local_fn ~__context ~srs:[src_sr] ~prefer_slaves:true op) - - let pool_migrate ~__context ~vdi ~sr ~options = - let vbds = Db.VBD.get_records_where ~__context - ~expr:(Db_filter_types.Eq(Db_filter_types.Field "VDI", - Db_filter_types.Literal (Ref.string_of vdi))) in - if List.length vbds < 1 - then raise (Api_errors.Server_error(Api_errors.vdi_needs_vm_for_migrate,[Ref.string_of vdi])); - - let vm = (snd (List.hd vbds)).API.vBD_VM in - - (* hackity hack *) - let options = ("__internal__vm",Ref.string_of vm) :: (List.remove_assoc "__internal__vm" options) in - let local_fn = Local.VDI.pool_migrate ~vdi ~sr ~options in - - info "VDI.pool_migrate: VDI = '%s'; SR = '%s'; VM = '%s'" - (vdi_uuid ~__context vdi) (sr_uuid ~__context sr) (vm_uuid ~__context vm); - - VM.with_vm_operation ~__context ~self:vm ~doc:"VDI.pool_migrate" ~op:`migrate_send - (fun () -> - let snapshot, host = - if Xapi_vm_lifecycle.is_live ~__context ~self:vm then - (Helpers.get_boot_record ~__context ~self:vm, - Db.VM.get_resident_on ~__context ~self:vm) - else - let snapshot = Db.VM.get_record ~__context ~self:vm in - let host = Db.VM.get_scheduled_to_be_resident_on ~__context ~self:vm in - let host = - if host <> Ref.null then host else - let choose_fn ~host = - Xapi_vm_helpers.assert_can_boot_here ~__context ~self:vm ~snapshot ~host (); - Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs:[sr] ~host in - Xapi_vm_helpers.choose_host ~__context ~vm ~choose_fn () in - (snapshot, host) in - VM.reserve_memory_for_vm ~__context ~vm:vm ~host ~snapshot ~host_op:`vm_migrate - (fun () -> - with_sr_andor_vdi ~__context ~vdi:(vdi, `mirror) ~doc:"VDI.mirror" - (fun () -> - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.VDI.pool_migrate ~rpc ~session_id ~vdi ~sr ~options)))) - - let resize ~__context ~vdi ~size = - info "VDI.resize: VDI = '%s'; size = %Ld" (vdi_uuid ~__context vdi) size; - let local_fn = Local.VDI.resize ~vdi ~size in - let sR = Db.VDI.get_SR ~__context ~self:vdi in - with_sr_andor_vdi ~__context ~sr:(sR, `vdi_resize) ~vdi:(vdi, `resize) ~doc:"VDI.resize" - (fun () -> - forward_vdi_op ~local_fn ~__context ~self:vdi - (fun session_id rpc -> Client.VDI.resize rpc session_id vdi size)) - - let resize_online ~__context ~vdi ~size = - info "VDI.resize_online: VDI = '%s'; size = %Ld" (vdi_uuid ~__context vdi) size; - let local_fn = Local.VDI.resize_online ~vdi ~size in - let sR = Db.VDI.get_SR ~__context ~self:vdi in - with_sr_andor_vdi ~__context ~sr:(sR, `vdi_resize) ~vdi:(vdi, `resize_online) ~doc:"VDI.resize_online" - (fun () -> - forward_vdi_op ~local_fn ~__context ~self:vdi - (fun session_id rpc -> Client.VDI.resize_online rpc session_id vdi size)) - - let generate_config ~__context ~host ~vdi = - info "VDI.generate_config: VDI = '%s'; host = '%s'" (vdi_uuid ~__context vdi) (host_uuid ~__context host); - let local_fn = Local.VDI.generate_config ~host ~vdi in - with_sr_andor_vdi ~__context ~vdi:(vdi, `generate_config) ~doc:"VDI.generate_config" - (fun () -> - do_op_on ~local_fn ~__context ~host - (fun session_id rpc -> Client.VDI.generate_config rpc session_id host vdi) - ) - - let force_unlock ~__context ~vdi = - info "VDI.force_unlock: VDI = '%s'" (vdi_uuid ~__context vdi); - let local_fn = Local.VDI.force_unlock ~vdi in - with_sr_andor_vdi ~__context ~vdi:(vdi, `force_unlock) ~doc:"VDI.force_unlock" - (fun () -> - forward_vdi_op ~local_fn ~__context ~self:vdi - (fun session_id rpc -> Client.VDI.force_unlock rpc session_id vdi)) - - let checksum ~__context ~self = - VM.forward_to_access_srs_and ~local_fn:(Local.VDI.checksum ~self) ~__context - ~extra_sr:(Db.VDI.get_SR ~__context ~self) - (fun session_id rpc -> Client.VDI.checksum rpc session_id self) - - end - module VBD = struct - - let update_vbd_and_vdi_operations ~__context ~vbd = - Helpers.with_global_lock - (fun () -> - try - Xapi_vbd_helpers.update_allowed_operations ~__context ~self:vbd; - if not (Db.VBD.get_empty ~__context ~self:vbd) then - let vdi = Db.VBD.get_VDI ~__context ~self:vbd in - Xapi_vdi.update_allowed_operations ~__context ~self:vdi - with _ -> ()) - - let unmark_vbd ~__context ~vbd ~doc ~op = - let task_id = Ref.string_of (Context.get_task_id __context) in - log_exn ~doc:("unmarking VBD after " ^ doc) - (fun self -> - if Db.is_valid_ref __context self then begin - Db.VBD.remove_from_current_operations ~__context ~self ~key:task_id; - Xapi_vbd_helpers.update_allowed_operations ~__context ~self; - Helpers.Early_wakeup.broadcast (Datamodel._vbd, Ref.string_of vbd) - end) - vbd - - let mark_vbd ~__context ~vbd ~doc ~op = - let task_id = Ref.string_of (Context.get_task_id __context) in - log_exn ~doc:("marking VBD for " ^ doc) - (fun self -> - Xapi_vbd_helpers.assert_operation_valid ~__context ~self ~op; - Db.VBD.add_to_current_operations ~__context ~self ~key:task_id ~value:op; - Xapi_vbd_helpers.update_allowed_operations ~__context ~self) vbd - - let with_vbd_marked ~__context ~vbd ~doc ~op f = - Helpers.retry_with_global_lock ~__context ~doc (fun () -> mark_vbd ~__context ~vbd ~doc ~op); - finally - (fun () -> f ()) - (fun () -> Helpers.with_global_lock (fun () -> unmark_vbd ~__context ~vbd ~doc ~op)) - - - - (* -------- Forwarding helper functions: ------------------------------------ *) - - (* Forward to host that has resident VM that this VBD references *) - let forward_vbd_op ~local_fn ~__context ~self op = - let vm = Db.VBD.get_VM ~__context ~self in - let host_resident_on = Db.VM.get_resident_on ~__context ~self:vm in - if host_resident_on = Ref.null - then local_fn ~__context - else do_op_on ~local_fn ~__context ~host:host_resident_on op - - (* -------------------------------------------------------------------------- *) - - - (* these are db functions *) - let create ~__context ~vM ~vDI ~userdevice ~bootable ~mode ~_type ~unpluggable ~empty ~other_config ~qos_algorithm_type ~qos_algorithm_params = - info "VBD.create: VM = '%s'; VDI = '%s'" (vm_uuid ~__context vM) (vdi_uuid ~__context vDI); - (* NB must always execute this on the master because of the autodetect_mutex *) - Local.VBD.create ~__context ~vM ~vDI ~userdevice ~bootable ~mode ~_type ~unpluggable ~empty ~other_config ~qos_algorithm_type ~qos_algorithm_params - - let set_mode ~__context ~self ~value = - info "VBD.set_mode: VBD = '%s'; value = %s" (vbd_uuid ~__context self) (Record_util.vbd_mode_to_string value); - Local.VBD.set_mode ~__context ~self ~value - - let destroy ~__context ~self = - info "VBD.destroy: VBD = '%s'" (vbd_uuid ~__context self); - Local.VBD.destroy ~__context ~self - - let insert ~__context ~vbd ~vdi = - info "VBD.insert: VBD = '%s'; VDI = '%s'" (vbd_uuid ~__context vbd) (vdi_uuid ~__context vdi); - let local_fn = Local.VBD.insert ~vbd ~vdi in - with_vbd_marked ~__context ~vbd ~doc:"VBD.insert" ~op:`insert - (fun () -> - let vm = Db.VBD.get_VM ~__context ~self:vbd in - if Db.VM.get_power_state ~__context ~self:vm = `Halted then begin - Xapi_vbd.assert_ok_to_insert ~__context ~vbd ~vdi; - Db.VBD.set_VDI ~__context ~self:vbd ~value:vdi; - Db.VBD.set_empty ~__context ~self:vbd ~value:false - end - else forward_vbd_op ~local_fn ~__context ~self:vbd - (fun session_id rpc -> Client.VBD.insert rpc session_id vbd vdi)); - update_vbd_and_vdi_operations ~__context ~vbd - - let eject ~__context ~vbd = - info "VBD.eject: VBD = '%s'" (vbd_uuid ~__context vbd); - let local_fn = Local.VBD.eject ~vbd in - with_vbd_marked ~__context ~vbd ~doc:"VBD.eject" ~op:`eject - (fun () -> - let vm = Db.VBD.get_VM ~__context ~self:vbd in - if Db.VM.get_power_state ~__context ~self:vm = `Halted then begin - Xapi_vbd.assert_ok_to_eject ~__context ~vbd; - Db.VBD.set_empty ~__context ~self:vbd ~value:true; - Db.VBD.set_VDI ~__context ~self:vbd ~value:Ref.null; - end - else forward_vbd_op ~local_fn ~__context ~self:vbd - (fun session_id rpc -> Client.VBD.eject rpc session_id vbd)); - update_vbd_and_vdi_operations ~__context ~vbd - - let plug ~__context ~self = - info "VBD.plug: VBD = '%s'" (vbd_uuid ~__context self); - let local_fn = Local.VBD.plug ~self in - with_vbd_marked ~__context ~vbd:self ~doc:"VBD.plug" ~op:`plug - (fun () -> - forward_vbd_op ~local_fn ~__context ~self - (fun session_id rpc -> Client.VBD.plug rpc session_id self)); - update_vbd_and_vdi_operations ~__context ~vbd:self - - let unplug ~__context ~self = - info "VBD.unplug: VBD = '%s'" (vbd_uuid ~__context self); - let local_fn = Local.VBD.unplug ~self in - with_vbd_marked ~__context ~vbd:self ~doc:"VBD.unplug" ~op:`unplug - (fun () -> - forward_vbd_op ~local_fn ~__context ~self - (fun session_id rpc -> Client.VBD.unplug rpc session_id self)); - update_vbd_and_vdi_operations ~__context ~vbd:self - - let unplug_force ~__context ~self = - info "VBD.unplug_force: VBD = '%s'" (vbd_uuid ~__context self); - let local_fn = Local.VBD.unplug_force ~self in - with_vbd_marked ~__context ~vbd:self ~doc:"VBD.unplug_force" ~op:`unplug_force - (fun () -> - forward_vbd_op ~local_fn ~__context ~self (fun session_id rpc -> Client.VBD.unplug_force rpc session_id self)); - update_vbd_and_vdi_operations ~__context ~vbd:self - - let unplug_force_no_safety_check ~__context ~self = - warn "VBD.unplug_force_no_safety_check: VBD = '%s'" (vbd_uuid ~__context self); - let local_fn = Local.VBD.unplug_force_no_safety_check ~self in - with_vbd_marked ~__context ~vbd:self ~doc:"VBD.unplug_force_no_safety_check" ~op:`unplug_force - (fun () -> - forward_vbd_op ~local_fn ~__context ~self (fun session_id rpc -> Client.VBD.unplug_force_no_safety_check rpc session_id self)); - update_vbd_and_vdi_operations ~__context ~vbd:self - - let pause ~__context ~self = - info "VBD.pause: VBD = '%s'" (vbd_uuid ~__context self); - let local_fn = Local.VBD.pause ~self in - let result = with_vbd_marked ~__context ~vbd:self ~doc:"VBD.pause" ~op:`pause - (fun () -> - forward_vbd_op ~local_fn ~__context ~self (fun session_id rpc -> Client.VBD.pause rpc session_id self) - ) in - update_vbd_and_vdi_operations ~__context ~vbd:self; - result - - let unpause ~__context ~self ~token = - info "VBD.unpause: VBD = '%s'; token = '%s'" (vbd_uuid ~__context self) token; - let local_fn = Local.VBD.unpause ~self ~token in - with_vbd_marked ~__context ~vbd:self ~doc:"VBD.unpause" ~op:`unpause - (fun () -> - forward_vbd_op ~local_fn ~__context ~self (fun session_id rpc -> Client.VBD.unpause rpc session_id self token); - ); - update_vbd_and_vdi_operations ~__context ~vbd:self - - let assert_attachable ~__context ~self = - info "VBD.assert_attachable: VBD = '%s'" (vbd_uuid ~__context self); - Local.VBD.assert_attachable ~__context ~self - end - - module VBD_metrics = struct - end - - module PBD = struct - - (* Create and destroy are just db operations, no need to forward; *) - (* however, they can affect whether SR.destroy is allowed, so update SR.allowed_operations. *) - let create ~__context ~host ~sR ~device_config ~other_config = - info "PBD.create: SR = '%s'; host '%s'" (sr_uuid ~__context sR) (host_uuid ~__context host); - SR.with_sr_marked ~__context ~sr:sR ~doc:"PBD.create" ~op:`pbd_create - (fun () -> Local.PBD.create ~__context ~host ~sR ~device_config ~other_config) - - let destroy ~__context ~self = - info "PBD.destroy: PBD '%s'" (pbd_uuid ~__context self); - let sr = Db.PBD.get_SR ~__context ~self in - SR.with_sr_marked ~__context ~sr ~doc:"PBD.destroy" ~op:`pbd_destroy - (fun () -> Local.PBD.destroy ~__context ~self) - - (* -------- Forwarding helper functions: ------------------------------------ *) - - let forward_pbd_op ~local_fn ~__context ~self op = - do_op_on ~local_fn ~__context ~host:(Db.PBD.get_host ~__context ~self) op - - (* -------------------------------------------------------------------------- *) - - let sanitize (k, v) = - if String.endswith "transformed" k then - k ^ "=undisclosed" - else - k ^ "=" ^ v - - let set_device_config ~__context ~self ~value = - info "PBD.set_device_config: PBD = '%s'; device_config = [ %s ]" - (pbd_uuid ~__context self) (String.concat "; " (List.map sanitize value)); - let sr = Db.PBD.get_SR ~__context ~self in - Sm.assert_session_has_internal_sr_access ~__context ~sr; - - let local_fn = Local.PBD.set_device_config ~self ~value in - forward_pbd_op ~local_fn ~__context ~self - (fun session_id rpc -> Client.PBD.set_device_config rpc session_id self value) - - (* Mark the SR and check, if we are the 'SRmaster' that no VDI - current_operations are present (eg snapshot, clone) since these are all - done on the SR master. *) - let with_unplug_locks ~__context ~pbd ~sr f = - let doc = "PBD.unplug" and op = `unplug in - Helpers.retry_with_global_lock ~__context ~doc - (fun () -> - if Helpers.i_am_srmaster ~__context ~sr - then - List.iter (fun vdi -> - if Db.VDI.get_current_operations ~__context ~self:vdi <> [] - then raise (Api_errors.Server_error(Api_errors.other_operation_in_progress, [ Datamodel._vdi; Ref.string_of vdi ]))) - (Db.SR.get_VDIs ~__context ~self:sr); - SR.mark_sr ~__context ~sr ~doc ~op - ); - finally - (fun () -> f ()) - (fun () -> Helpers.with_global_lock (fun () -> SR.unmark_sr ~__context ~sr ~doc ~op)) - - (* plug and unplug need to be executed on the host that the pbd is related to *) - let plug ~__context ~self = - info "PBD.plug: PBD = '%s'" (pbd_uuid ~__context self); - let local_fn = Local.PBD.plug ~self in - let sr = Db.PBD.get_SR ~__context ~self in - let is_shared_sr = Db.SR.get_shared ~__context ~self:sr in - let is_master_pbd = - let pbd_host = Db.PBD.get_host ~__context ~self in - let master_host = Helpers.get_localhost ~__context in - pbd_host = master_host in - - SR.with_sr_marked ~__context ~sr ~doc:"PBD.plug" ~op:`plug - (fun () -> - forward_pbd_op ~local_fn ~__context ~self - (fun session_id rpc -> Client.PBD.plug rpc session_id self)); - - (* We always plug the master PBD first and unplug it last. If this is the - * first PBD plugged for this SR (proxy: the PBD being plugged is for the - * master) then we should perform an initial SR scan and perform some - * asynchronous start-of-day operations in the callback. - * Note the current context contains a completed real task and we should - * not reuse it for what is effectively another call. *) - if is_master_pbd then - Server_helpers.exec_with_new_task "PBD.plug initial SR scan" (fun __context -> - let should_handle_metadata_vdis = is_shared_sr in - - if should_handle_metadata_vdis then - Xapi_dr.signal_sr_is_processing ~__context ~sr; - - let sr_scan_callback () = - if is_shared_sr then begin - Xapi_dr.handle_metadata_vdis ~__context ~sr; - Xapi_dr.signal_sr_is_ready ~__context ~sr; - end; - Xapi_sr.maybe_push_sr_rrds ~__context ~sr; - Xapi_sr.update ~__context ~sr; - in - - Xapi_sr.scan_one ~__context ~callback:sr_scan_callback sr; - ) - - let unplug ~__context ~self = - info "PBD.unplug: PBD = '%s'" (pbd_uuid ~__context self); - let local_fn = Local.PBD.unplug ~self in - let sr = Db.PBD.get_SR ~__context ~self in - let is_master_pbd = - let pbd_host = Db.PBD.get_host ~__context ~self in - let master_host = Helpers.get_localhost ~__context in - pbd_host = master_host in - - with_unplug_locks ~__context ~sr ~pbd:self - (fun () -> - if is_master_pbd then - Xapi_sr.maybe_copy_sr_rrds ~__context ~sr; - forward_pbd_op ~local_fn ~__context ~self - (fun session_id rpc -> Client.PBD.unplug rpc session_id self)) - end - - module Crashdump = struct - - (* -------- Forwarding helper functions: ------------------------------------ *) - - (* Read VDI and then re-use VDI forwarding policy *) - let forward_crashdump_op ~local_fn ~__context ~self op = - let vdi = Db.Crashdump.get_VDI ~__context ~self in - VDI.forward_vdi_op ~local_fn ~__context ~self:vdi op - - (* -------------------------------------------------------------------------- *) - - let destroy ~__context ~self = - info "Crashdump.destroy: crashdump = '%s'" (crashdump_uuid ~__context self); - let local_fn = Local.Crashdump.destroy ~self in - forward_crashdump_op ~local_fn ~__context ~self (fun session_id rpc -> Client.Crashdump.destroy rpc session_id self) - end - - (* whatever *) - module VTPM = Local.VTPM - - module Console = Local.Console - - module User = Local.User - - module Blob = Local.Blob - - module Message = Local.Message - - module Data_source = struct end - - module Secret = Local.Secret - - module PCI = struct end - - module PGPU = struct - include Local.PGPU - - let enable_dom0_access ~__context ~self = - info "PGPU.enable_dom0_access: pgpu = '%s'" (pgpu_uuid ~__context self); - let host = Db.PGPU.get_host ~__context ~self in - let local_fn = Local.PGPU.enable_dom0_access ~self in - do_op_on ~__context ~local_fn ~host - (fun session_id rpc -> Client.PGPU.enable_dom0_access rpc session_id self) - - let disable_dom0_access ~__context ~self = - info "PGPU.disable_dom0_access: pgpu = '%s'" (pgpu_uuid ~__context self); - let host = Db.PGPU.get_host ~__context ~self in - let local_fn = Local.PGPU.disable_dom0_access ~self in - do_op_on ~__context ~local_fn ~host - (fun session_id rpc -> Client.PGPU.disable_dom0_access rpc session_id self) - end - - module GPU_group = struct - (* Don't forward. These are just db operations. *) - let create ~__context ~name_label ~name_description ~other_config = - info "GPU_group.create: name_label = '%s'" name_label; - Local.GPU_group.create ~__context ~name_label ~name_description ~other_config - - let destroy ~__context ~self = - info "GPU_group.destroy: gpu_group = '%s'" (gpu_group_uuid ~__context self); - (* WARNING WARNING WARNING: directly call destroy with the global lock since it does only database operations *) - Helpers.with_global_lock (fun () -> - Local.GPU_group.destroy ~__context ~self) - - let update_enabled_VGPU_types ~__context ~self = - info "GPU_group.update_enabled_VGPU_types: gpu_group = '%s'" (gpu_group_uuid ~__context self); - Local.GPU_group.update_enabled_VGPU_types ~__context ~self - - let update_supported_VGPU_types ~__context ~self = - info "GPU_group.update_supported_VGPU_types: gpu_group = '%s'" (gpu_group_uuid ~__context self); - Local.GPU_group.update_supported_VGPU_types ~__context ~self - - let get_remaining_capacity ~__context ~self ~vgpu_type = - info "GPU_group.get_remaining_capacity: gpu_group = '%s' vgpu_type = '%s'" - (gpu_group_uuid ~__context self) - (vgpu_type_uuid ~__context vgpu_type); - Local.GPU_group.get_remaining_capacity ~__context ~self ~vgpu_type - end - - module VGPU = struct - let create ~__context ~vM ~gPU_group ~device ~other_config ~_type = - info "VGPU.create: VM = '%s'; GPU_group = '%s'" (vm_uuid ~__context vM) (gpu_group_uuid ~__context gPU_group); - Local.VGPU.create ~__context ~vM ~gPU_group ~device ~other_config ~_type - - let destroy ~__context ~self = - info "VGPU.destroy: VGPU = '%s'" (vgpu_uuid ~__context self); - Local.VGPU.destroy ~__context ~self - - let atomic_set_resident_on ~__context ~self ~value = - info "VGPU.atomic_set_resident_on: VGPU = '%s'; PGPU = '%s'" - (vgpu_uuid ~__context self) (pgpu_uuid ~__context value); - (* Need to prevent the host chooser being run while these fields are being modified *) - Helpers.with_global_lock - (fun () -> - Db.VGPU.set_resident_on ~__context ~self ~value; - Db.VGPU.set_scheduled_to_be_resident_on ~__context ~self ~value:Ref.null - ) - end + let maximise_memory ~__context ~self ~total ~approximate = + info "VM.maximise_memory: VM = '%s'; total = '%Ld'; approximate = '%b'" (vm_uuid ~__context self) total approximate; + Local.VM.maximise_memory ~__context ~self ~total ~approximate + + let clone ~__context ~vm ~new_name = + info "VM.clone: VM = '%s'; new_name = '%s'" (vm_uuid ~__context vm) new_name; + let local_fn = Local.VM.clone ~vm ~new_name in + (* We mark the VM as cloning. We don't mark the disks; the implementation of the clone + uses the API to clone and lock the individual VDIs. We don't give any atomicity + guarantees here but we do prevent disk corruption. *) + with_vm_operation ~__context ~self:vm ~doc:"VM.clone" ~op:`clone + (fun () -> + forward_to_access_srs ~local_fn ~__context ~vm + (fun session_id rpc -> Client.VM.clone rpc session_id vm new_name)) + + let update_snapshot_metadata ~__context ~vm ~snapshot_of ~snapshot_time ~transportable_snapshot_id = + Db.VM.set_is_a_snapshot ~__context ~self:vm ~value:true; + Db.VM.set_snapshot_time ~__context ~self:vm ~value:snapshot_time; + Db.VM.set_snapshot_of ~__context ~self:vm ~value:snapshot_of; + Db.VM.set_transportable_snapshot_id ~__context ~self:vm ~value:transportable_snapshot_id + + (* almost a copy of the clone function *) + let snapshot ~__context ~vm ~new_name = + info "VM.snapshot: VM = '%s'; new_name = '%s'" (vm_uuid ~__context vm) new_name; + let local_fn = Local.VM.snapshot ~vm ~new_name in + (* We mark the VM as snapshoting. We don't mark the disks; the implementation of the snapshot uses the API *) + (* to snapshot and lock the individual VDIs. We don't give any atomicity guarantees here but we do prevent *) + (* disk corruption. *) + with_vm_operation ~__context ~self: vm ~doc:"VM.snapshot" ~op:`snapshot + (fun () -> + forward_to_access_srs ~local_fn ~__context ~vm + (fun session_id rpc -> Client.VM.snapshot rpc session_id vm new_name)) + + let snapshot_with_quiesce ~__context ~vm ~new_name = + info "VM.snapshot_with_quiesce: VM = '%s'; new_name = '%s'" (vm_uuid ~__context vm) new_name; + let local_fn = Local.VM.snapshot_with_quiesce ~vm ~new_name in + (* We mark the VM as snapshoting. We don't mark the disks; the implementation of the snapshot uses the API *) + (* to snapshot and lock the individual VDIs. We don't give any atomicity guarantees here but we do prevent *) + (* disk corruption. *) + with_vm_operation ~__context ~self: vm ~doc:"VM.snapshot_with_quiesce" ~op:`snapshot_with_quiesce + (fun () -> + let power_state = Db.VM.get_power_state ~__context ~self:vm in + let forward = + if power_state = `Running + then forward_vm_op + else forward_to_access_srs + in forward ~local_fn ~__context ~vm + (fun session_id rpc -> Client.VM.snapshot_with_quiesce rpc session_id vm new_name)) + + let checkpoint ~__context ~vm ~new_name = + info "VM.checkpoint: VM = '%s'; new_name=' %s'" (vm_uuid ~__context vm) new_name; + let local_fn = Local.VM.checkpoint ~vm ~new_name in + let forward_fn session_id rpc = Client.VM.checkpoint rpc session_id vm new_name in + + with_vm_operation ~__context ~self: vm ~doc:"VM.checkpoint" ~op:`checkpoint (fun () -> + if Db.VM.get_power_state __context vm = `Running then + forward_vm_op ~local_fn ~__context ~vm forward_fn + else + forward_to_access_srs ~local_fn ~__context ~vm forward_fn) + + let copy ~__context ~vm ~new_name ~sr = + info "VM.copy: VM = '%s'; new_name = '%s'; SR = '%s'" (vm_uuid ~__context vm) new_name (sr_uuid ~__context sr); + (* We mark the VM as cloning. We don't mark the disks; the implementation of the clone + uses the API to clone and lock the individual VDIs. We don't give any atomicity + guarantees here but we do prevent disk corruption. + VM.copy is always run on the master - the VDI.copy subtask(s) will be + forwarded to suitable hosts. *) + with_vm_operation ~__context ~self:vm ~doc:"VM.copy" ~op:`copy + (fun () -> Local.VM.copy ~__context ~vm ~new_name ~sr) + + exception Ambigious_provision_spec + exception Not_forwarding + + let provision ~__context ~vm = + info "VM.provision: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.provision ~vm in + let localhost = Helpers.get_localhost ~__context in + + with_vm_operation ~__context ~self:vm ~doc:"VM.provision" ~op:`provision + (fun () -> + let template = + Helpers.call_api_functions ~__context + (fun rpc session_id -> + Xapi_templates.get_template_record rpc session_id vm) in + (* Compute the set of hosts which can see the SRs mentioned in the provision spec *) + let possible_hosts = + try + let srs_in_provision_spec = + match template with + None -> [] + | Some template -> + let srs = List.map (fun d->d.Xapi_templates.sr) template.Xapi_templates.disks in + let srs = + List.map + (fun sr-> + try + Db.SR.get_by_uuid ~__context ~uuid:sr + with + Db_exn.Read_missing_uuid (_,_,_) + | Db_exn.Too_many_values (_,_,_) -> + begin + match (Db.SR.get_by_name_label ~__context ~label:sr) with + [] -> raise Not_forwarding (* couldn't find it. Do it locally and will report correct error *) + | [x] -> info "VM.provision: VM = '%s'; SR = '%s'" (vm_uuid ~__context vm) (sr_uuid ~__context x); x + | _ -> raise Ambigious_provision_spec + end) + srs in + srs in + Xapi_vm_helpers.possible_hosts ~__context ~vm + ~choose_fn:(Xapi_vm_helpers.assert_can_see_specified_SRs ~__context + ~reqd_srs:srs_in_provision_spec) () + with + | Not_forwarding -> [ ] + | Api_errors.Server_error (code, _) when code = Api_errors.no_hosts_available -> [] in + let hosts = if possible_hosts = [] then [ localhost ] else possible_hosts in + loadbalance_host_operation ~__context ~hosts ~doc:"VM.provision" ~op:`provision + (fun host -> + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.VM.provision rpc session_id vm) + ) + ) + + let query_services ~__context ~self = + info "VM.query_services: VM = '%s'" (vm_uuid ~__context self); + with_vm_operation ~__context ~self ~doc:"VM.query_services" ~op:`query_services + (fun () -> + Local.VM.query_services ~__context ~self + ) + + let start ~__context ~vm ~start_paused ~force = + info "VM.start: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.start ~vm ~start_paused ~force in + let host = + with_vm_operation ~__context ~self:vm ~doc:"VM.start" ~op:`start + (fun () -> + with_vbds_marked ~__context ~vm ~doc:"VM.start" ~op:`attach + (fun vbds -> + with_vifs_marked ~__context ~vm ~doc:"VM.start" ~op:`attach + (fun vifs -> + (* The start operation makes use of the cached memory overhead *) + (* value when reserving memory. It's important to recalculate *) + (* the cached value before performing the start since there's *) + (* no guarantee that the cached value is valid. In particular, *) + (* we must recalculate the value BEFORE creating the snapshot. *) + Xapi_vm_helpers.update_memory_overhead ~__context ~vm; + Xapi_vm_helpers.consider_generic_bios_strings ~__context ~vm; + let snapshot = Db.VM.get_record ~__context ~self:vm in + let (), host = forward_to_suitable_host ~local_fn ~__context ~vm ~snapshot ~host_op:`vm_start + (fun session_id rpc -> + Client.VM.start rpc session_id vm start_paused force) in + host + ))) in + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm; + let uuid = Db.VM.get_uuid ~__context ~self:vm in + let message_body = + Printf.sprintf "VM '%s' started on host: %s (uuid: %s)" + (Db.VM.get_name_label ~__context ~self:vm) + (Db.Host.get_name_label ~__context ~self:host) + (Db.Host.get_uuid ~__context ~self:host) + in + let (name, priority) = Api_messages.vm_started in + (try ignore + (Xapi_message.create + ~__context + ~name + ~priority + ~cls:`VM + ~obj_uuid:uuid + ~body:message_body) + with _ -> ()); + Rrdd_proxy.push_rrd ~__context ~vm_uuid:uuid + + let start_on ~__context ~vm ~host ~start_paused ~force = + if Helpers.rolling_upgrade_in_progress ~__context + then Helpers.assert_host_has_highest_version_in_pool + ~__context ~host ; + (* Prevent VM start on a host that is evacuating *) + List.iter (fun op -> + match op with + | ( _ , `evacuate ) -> raise (Api_errors.Server_error(Api_errors.host_evacuate_in_progress, [(Ref.string_of host)])); + | _ -> ()) + (Db.Host.get_current_operations ~__context ~self:host); + info "VM.start_on: VM = '%s'; host '%s'" + (vm_uuid ~__context vm) (host_uuid ~__context host); + let local_fn = Local.VM.start_on ~vm ~host ~start_paused ~force in + with_vm_operation ~__context ~self:vm ~doc:"VM.start_on" ~op:`start_on + (fun () -> + with_vbds_marked ~__context ~vm ~doc:"VM.start_on" ~op:`attach + (fun vbds -> + with_vifs_marked ~__context ~vm ~doc:"VM.start_on" ~op:`attach + (fun vifs -> + (* The start operation makes use of the cached memory overhead *) + (* value when reserving memory. It's important to recalculate *) + (* the cached value before performing the start since there's *) + (* no guarantee that the cached value is valid. In particular, *) + (* we must recalculate the value BEFORE creating the snapshot. *) + Xapi_vm_helpers.update_memory_overhead ~__context ~vm; + Xapi_vm_helpers.consider_generic_bios_strings ~__context ~vm; + let snapshot = Db.VM.get_record ~__context ~self:vm in + reserve_memory_for_vm ~__context ~vm ~host ~snapshot ~host_op:`vm_start + (fun () -> + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> + Client.VM.start + rpc session_id vm start_paused force) + ); + Xapi_vm_helpers.start_delay ~__context ~vm; + ))); + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm; + let _ (* uuid *) = Db.VM.get_uuid ~__context ~self:vm in + let message_body = + Printf.sprintf "VM '%s' started on host: %s (uuid: %s)" + (Db.VM.get_name_label ~__context ~self:vm) + (Db.Host.get_name_label ~__context ~self:host) + (Db.Host.get_uuid ~__context ~self:host) in + let (name, priority) = Api_messages.vm_started in + (try ignore + (Xapi_message.create + ~__context + ~name + ~priority + ~cls:`VM + ~obj_uuid:(Db.VM.get_uuid ~__context ~self:vm) + ~body:message_body) + with _ -> ()); + Rrdd_proxy.push_rrd ~__context ~vm_uuid:(Db.VM.get_uuid ~__context ~self:vm) + + let pause ~__context ~vm = + info "VM.pause: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.pause ~vm in + with_vm_operation ~__context ~self:vm ~doc:"VM.pause" ~op:`pause + (fun () -> + forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.pause rpc session_id vm)); + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm + + + let unpause ~__context ~vm = + info "VM.unpause: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.unpause ~vm in + with_vm_operation ~__context ~self:vm ~doc:"VM.unpause" ~op:`unpause + (fun () -> + forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.unpause rpc session_id vm)); + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm + + let call_plugin ~__context ~vm ~plugin ~fn ~args = + let censor_kws = ["password"] in (* We could censor "username" too, but the current decision was to leave it there. *) + let argstrs = List.map (fun (k, v) -> Printf.sprintf "args:%s = '%s'" k (if List.exists (String.has_substr k) censor_kws then "(omitted)" else v)) args in + info "VM.call_plugin: VM = '%s'; plugin = '%s'; fn = '%s'; %s" (vm_uuid ~__context vm) plugin fn (String.concat "; " argstrs); + let local_fn = Local.VM.call_plugin ~vm ~plugin ~fn ~args in + with_vm_operation ~__context ~self:vm ~doc:"VM.call_plugin" ~op:`call_plugin ~policy:Helpers.Policy.fail_immediately + (fun () -> + forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.call_plugin rpc session_id vm plugin fn args)) + + let set_has_vendor_device ~__context ~self ~value = + info "VM.set_has_vendor_device: VM = '%s' to %b" (vm_uuid ~__context self) value; + Local.VM.set_has_vendor_device ~__context ~self ~value + + let set_xenstore_data ~__context ~self ~value = + info "VM.set_xenstore_data: VM = '%s'" (vm_uuid ~__context self); + Db.VM.set_xenstore_data ~__context ~self ~value; + let power_state = Db.VM.get_power_state ~__context ~self in + if power_state = `Running then + let local_fn = Local.VM.set_xenstore_data ~self ~value in + forward_vm_op ~local_fn ~__context ~vm:self (fun session_id rpc -> Client.VM.set_xenstore_data rpc session_id self value) + + let clean_shutdown ~__context ~vm = + info "VM.clean_shutdown: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.clean_shutdown ~vm in + with_vm_operation ~__context ~self:vm ~doc:"VM.clean_shutdown" ~op:`clean_shutdown + (fun () -> + forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.clean_shutdown rpc session_id vm) + ); + let uuid = Db.VM.get_uuid ~__context ~self:vm in + let message_body = + Printf.sprintf "VM '%s' shutdown" + (Db.VM.get_name_label ~__context ~self:vm) + in + let (name, priority) = Api_messages.vm_shutdown in + (try ignore(Xapi_message.create ~__context ~name ~priority + ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm + + let shutdown ~__context ~vm = + info "VM.shutdown: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.shutdown ~vm in + with_vm_operation ~__context ~self:vm ~doc:"VM.shutdown" ~op:`shutdown + (fun () -> + if Db.VM.get_power_state ~__context ~self:vm = `Suspended + then + begin + debug "VM '%s' is suspended. Shutdown will just delete suspend VDI" (Ref.string_of vm); + let all_vm_srs = Xapi_vm_helpers.compute_required_SRs_for_shutting_down_suspended_domains ~__context ~vm in + let suitable_host = Xapi_vm_helpers.choose_host ~__context ~vm:vm + ~choose_fn:(Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs:all_vm_srs) () in + do_op_on ~__context ~local_fn:(Local.VM.hard_shutdown ~vm) ~host:suitable_host (fun session_id rpc -> Client.VM.hard_shutdown rpc session_id vm) + end + else + forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.shutdown rpc session_id vm) + ); + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm; + let uuid = Db.VM.get_uuid ~__context ~self:vm in + let message_body = + Printf.sprintf "VM '%s' shutdown" + (Db.VM.get_name_label ~__context ~self:vm) + in + let (name, priority) = Api_messages.vm_shutdown in + (try ignore(Xapi_message.create ~__context ~name + ~priority ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()) + + let clean_reboot ~__context ~vm = + info "VM.clean_reboot: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.clean_reboot ~vm in + (* Mark all the VBDs to prevent someone nicking one of the VDIs (or attaching + a conflicting VBD) while the devices are detached *) + with_vm_operation ~__context ~self:vm ~doc:"VM.clean_reboot" ~op:`clean_reboot + (fun () -> + with_vbds_marked ~__context ~vm ~doc:"VM.clean_reboot" ~op:`attach + (fun vbds -> + with_vifs_marked ~__context ~vm ~doc:"VM.clean_reboot" ~op:`attach + (fun vifs -> + (* CA-31903: we don't need to reserve memory for reboot because the memory settings can't + change across reboot. *) + forward_vm_op ~local_fn ~__context ~vm + (fun session_id rpc -> Client.VM.clean_reboot rpc session_id vm)))); + let uuid = Db.VM.get_uuid ~__context ~self:vm in + let message_body = + Printf.sprintf "VM '%s' rebooted cleanly" + (Db.VM.get_name_label ~__context ~self:vm) + in + let (name, priority) = Api_messages.vm_rebooted in + (try ignore(Xapi_message.create ~__context ~name ~priority + ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm + + (* don't forward power_state_reset; the whole point is that this can be performed when a host is down *) + let power_state_reset ~__context ~vm = + info "VM.power_state_reset: VM = '%s'" (vm_uuid ~__context vm); + Local.VM.power_state_reset ~__context ~vm + + let hard_shutdown ~__context ~vm = + info "VM.hard_shutdown: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.hard_shutdown ~vm in + let host = Db.VM.get_resident_on ~__context ~self:vm in + with_vm_operation ~__context ~self:vm ~doc:"VM.hard_shutdown" ~op:`hard_shutdown + (fun () -> + cancel ~__context ~vm ~ops:[ `clean_shutdown; `clean_reboot; `hard_reboot; `pool_migrate; `call_plugin; `suspend ]; + (* If VM is actually suspended and we ask to hard_shutdown, we need to + forward to any host that can see the VDIs *) + let policy = + if Db.VM.get_power_state ~__context ~self:vm = `Suspended + then + begin + debug "VM '%s' is suspended. Shutdown will just delete suspend VDI" (Ref.string_of vm); + (* this expression evaluates to a fn that forwards to a host that can see all vdis: *) + let all_vm_srs = Xapi_vm_helpers.compute_required_SRs_for_shutting_down_suspended_domains ~__context ~vm in + let suitable_host = Xapi_vm_helpers.choose_host ~__context ~vm:vm + ~choose_fn:(Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs:all_vm_srs) () in + do_op_on ~host:suitable_host + end + else + (* if we're nt suspended then just forward to host that has vm running on it: *) + do_op_on ~host:host + in + policy ~local_fn ~__context (fun session_id rpc -> Client.VM.hard_shutdown rpc session_id vm) + ); + let uuid = Db.VM.get_uuid ~__context ~self:vm in + let message_body = + Printf.sprintf "VM '%s' shutdown forcibly" + (Db.VM.get_name_label ~__context ~self:vm) + in + let (name, priority) = Api_messages.vm_shutdown in + (try ignore(Xapi_message.create ~__context ~name ~priority + ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm + + let hard_reboot ~__context ~vm = + info "VM.hard_reboot: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.hard_reboot ~vm in + let host = Db.VM.get_resident_on ~__context ~self:vm in + with_vm_operation ~__context ~self:vm ~doc:"VM.hard_reboot" ~op:`hard_reboot + (fun () -> + cancel ~__context ~vm ~ops:[ `clean_shutdown; `clean_reboot; `pool_migrate; `call_plugin; `suspend ]; + with_vbds_marked ~__context ~vm ~doc:"VM.hard_reboot" ~op:`attach + (fun vbds -> + with_vifs_marked ~__context ~vm ~doc:"VM.hard_reboot" ~op:`attach + (fun vifs -> + (* CA-31903: we don't need to reserve memory for reboot because the memory settings can't + change across reboot. *) + do_op_on ~host:host ~local_fn ~__context + (fun session_id rpc -> Client.VM.hard_reboot rpc session_id vm)))); + let uuid = Db.VM.get_uuid ~__context ~self:vm in + let message_body = + Printf.sprintf "VM '%s' rebooted forcibly" + (Db.VM.get_name_label ~__context ~self:vm) + in + let (name, priority) = Api_messages.vm_rebooted in + (try ignore(Xapi_message.create ~__context ~name ~priority + ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm + + let hard_reboot_internal ~__context ~vm = + info "VM.hard_reboot_internal: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.hard_reboot_internal ~vm in + (* no VM operation: we assume the VM is still Running *) + with_vbds_marked ~__context ~vm ~doc:"VM.hard_reboot" ~op:`attach + (fun vbds -> + with_vifs_marked ~__context ~vm ~doc:"VM.hard_reboot" ~op:`attach + (fun vifs -> + (* CA-31903: we don't need to reserve memory for reboot because the memory settings can't + change across reboot. *) + forward_vm_op ~local_fn ~__context ~vm + (fun session_id rpc -> Client.VM.hard_reboot_internal rpc session_id vm))); + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm + + let suspend ~__context ~vm = + info "VM.suspend: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.suspend ~vm in + with_vm_operation ~__context ~self:vm ~doc:"VM.suspend" ~op:`suspend + (fun () -> + forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.suspend rpc session_id vm)); + let uuid = Db.VM.get_uuid ~__context ~self:vm in + (* debug "placeholder for retrieving the current value of memory-actual";*) + let message_body = + Printf.sprintf "VM '%s' suspended" + (Db.VM.get_name_label ~__context ~self:vm) + in + let (name, priority) = Api_messages.vm_suspended in + (try ignore(Xapi_message.create ~__context ~name ~priority + ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm + + let revert ~__context ~snapshot = + info "VM.revert: snapshot = '%s'" (vm_uuid ~__context snapshot); + + let vm = Db.VM.get_snapshot_of ~__context ~self:snapshot in + let vm = + if Db.is_valid_ref __context vm + then vm + else Xapi_vm_snapshot.create_vm_from_snapshot ~__context ~snapshot in + + let local_fn = Local.VM.revert ~snapshot in + let forward_fn session_id rpc = Local.VM.revert ~__context ~snapshot in + + with_vm_operation ~__context ~self:snapshot ~doc:"VM.revert" ~op:`revert + (fun () -> with_vm_operation ~__context ~self:vm ~doc:"VM.reverting" ~op:`reverting + (fun () -> + (* We need to do a best-effort check that any suspend_VDI referenced by + the snapshot (not the current VM) is currently accessible. This is because + the revert code first clears space by deleting current VDIs before cloning + the suspend VDI: we want to minimise the probability that the operation fails + part-way through. *) + if Db.VM.get_power_state ~__context ~self:snapshot = `Suspended then begin + let suspend_VDI = Db.VM.get_suspend_VDI ~__context ~self:snapshot in + let sr = Db.VDI.get_SR ~__context ~self:suspend_VDI in + let pbd = choose_pbd_for_sr ~__context ~self:sr () in + let host = Db.PBD.get_host ~__context ~self:pbd in + let metrics = Db.Host.get_metrics ~__context ~self:host in + let live = Db.is_valid_ref __context metrics && (Db.Host_metrics.get_live ~__context ~self:metrics) in + if not live + then raise (Api_errors.Server_error(Api_errors.host_not_live, [ Ref.string_of host ])) + end; + (* first of all, destroy the domain if needed. *) + if Db.VM.get_power_state ~__context ~self:vm <> `Halted then begin + debug "VM %s (domid %Ld) which is reverted is not halted: shutting it down first" + (Db.VM.get_uuid __context vm) + (Db.VM.get_domid __context vm); + Helpers.call_api_functions ~__context (fun rpc session_id -> Client.VM.hard_shutdown rpc session_id vm); + end; + + Xapi_vm_snapshot.revert_vm_fields ~__context ~snapshot ~vm; + if Db.VM.get_power_state __context vm = `Running then + forward_vm_op ~local_fn ~__context ~vm forward_fn + else + forward_to_access_srs ~local_fn ~__context ~vm forward_fn)) + + (* same forwarding logic as clone *) + let csvm ~__context ~vm = + info "VM.csvm: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.csvm ~vm in + (* We mark the VM as cloning. We don't mark the disks; the implementation of the clone + uses the API to clone and lock the individual VDIs. We don't give any atomicity + guarantees here but we do prevent disk corruption. *) + let suspend_sr = Db.VDI.get_SR ~__context ~self:(Db.VM.get_suspend_VDI ~__context ~self:vm) in + let result = with_vm_operation ~__context ~self:vm ~doc:"VM.csvm" ~op:`csvm + (fun () -> + forward_to_access_srs_and ~extra_sr:suspend_sr ~local_fn ~__context ~vm + (fun session_id rpc -> Client.VM.csvm rpc session_id vm)) in + let uuid = Db.VM.get_uuid ~__context ~self:vm in + let message_body = + Printf.sprintf "VM '%s' cloned (new uuid: %s)" + (Db.VM.get_name_label ~__context ~self:vm) + (Db.VM.get_uuid ~__context ~self:result) + in + let (name, priority) = Api_messages.vm_cloned in + (try ignore(Xapi_message.create ~__context ~name ~priority + ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); + result + + (* Like start.. resume on any suitable host *) + let resume ~__context ~vm ~start_paused ~force = + info "VM.resume: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.resume ~vm ~start_paused ~force in + let host = + with_vm_operation ~__context ~self:vm ~doc:"VM.resume" ~op:`resume + (fun () -> + with_vbds_marked ~__context ~vm ~doc:"VM.resume" ~op:`attach + (fun vbds -> + let snapshot = Helpers.get_boot_record ~__context ~self:vm in + let (), host = forward_to_suitable_host ~local_fn ~__context ~vm ~snapshot ~host_op:`vm_resume + (fun session_id rpc -> Client.VM.resume rpc session_id vm start_paused force) in + host + ); + ) + in + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm; + let uuid = Db.VM.get_uuid ~__context ~self:vm in + let message_body = + Printf.sprintf "VM '%s' resumed on host: %s (uuid: %s)" + (Db.VM.get_name_label ~__context ~self:vm) + (Db.Host.get_name_label ~__context ~self:host) + (Db.Host.get_uuid ~__context ~self:host) + in + let (name, priority) = Api_messages.vm_resumed in + (try ignore(Xapi_message.create ~__context ~name ~priority + ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); + Rrdd_proxy.push_rrd ~__context ~vm_uuid:(Db.VM.get_uuid ~__context ~self:vm) + + let resume_on ~__context ~vm ~host ~start_paused ~force = + if Helpers.rolling_upgrade_in_progress ~__context + then Helpers.assert_host_has_highest_version_in_pool + ~__context ~host ; + info "VM.resume_on: VM = '%s'; host = '%s'" (vm_uuid ~__context vm) (host_uuid ~__context host); + let local_fn = Local.VM.resume_on ~vm ~host ~start_paused ~force in + with_vm_operation ~__context ~self:vm ~doc:"VM.resume_on" ~op:`resume_on + (fun () -> + with_vbds_marked ~__context ~vm ~doc:"VM.resume_on" ~op:`attach + (fun vbds -> + let snapshot = Helpers.get_boot_record ~__context ~self:vm in + reserve_memory_for_vm ~__context ~vm ~host ~snapshot ~host_op:`vm_resume + (fun () -> + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.VM.resume_on rpc session_id vm host start_paused force)); + ); + ); + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm; + let uuid = Db.VM.get_uuid ~__context ~self:vm in + let message_body = + Printf.sprintf "VM '%s' resumed on host: %s (uuid: %s)" + (Db.VM.get_name_label ~__context ~self:vm) + (Db.Host.get_name_label ~__context ~self:host) + (Db.Host.get_uuid ~__context ~self:host) + in + let (name, priority) = Api_messages.vm_resumed in + (try ignore(Xapi_message.create ~__context ~name ~priority + ~cls:`VM ~obj_uuid:uuid ~body:message_body) with _ -> ()); + Rrdd_proxy.push_rrd ~__context ~vm_uuid:(Db.VM.get_uuid ~__context ~self:vm) + + let pool_migrate_complete ~__context ~vm ~host = + info "VM.pool_migrate_complete: VM = '%s'; host = '%s'" + (vm_uuid ~__context vm) (host_uuid ~__context host); + let local_fn = Local.VM.pool_migrate_complete ~vm ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> + Client.VM.pool_migrate_complete rpc session_id vm host); + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm + + let pool_migrate ~__context ~vm ~host ~options = + info "VM.pool_migrate: VM = '%s'; host = '%s'" + (vm_uuid ~__context vm) (host_uuid ~__context host); + if Helpers.rolling_upgrade_in_progress ~__context + then begin + let source_host = Db.VM.get_resident_on ~__context ~self:vm in + Helpers.assert_host_versions_not_decreasing + ~__context + ~host_from:(Helpers.LocalObject source_host) + ~host_to:(Helpers.LocalObject host); + end; + let local_fn = Local.VM.pool_migrate ~vm ~host ~options in + + (* Check that the VM is compatible with the host it is being migrated to. *) + let force = try bool_of_string (List.assoc "force" options) with _ -> false in + if not force then Cpuid_helpers.assert_vm_is_compatible ~__context ~vm ~host (); + + with_vm_operation ~__context ~self:vm ~doc:"VM.pool_migrate" ~op:`pool_migrate ~strict:(not force) + (fun () -> + (* Make sure the target has enough memory to receive the VM *) + let snapshot = Helpers.get_boot_record ~__context ~self:vm in + (* MTC: An MTC-protected VM has a peer VM on the destination host to which + it migrates to. When reserving memory, we must substitute the source VM + with this peer VM. If is not an MTC-protected VM, then this call will + simply return the same VM. Note that the call below not only accounts for + the destination VM's memory footprint but it also sets its set_scheduled_to_be_resident_on + field so we must make sure that we pass the destination VM and not the source. + Note: TBD: when migration into an existing VM is implemented, this section will + have to be revisited since the destination VM would already be occupying memory + and there won't be any need to account for its memory. *) + let dest_vm = Mtc.get_peer_vm_or_self ~__context ~self:vm in + reserve_memory_for_vm ~__context ~vm:dest_vm ~host ~snapshot ~host_op:`vm_migrate + (fun () -> + forward_vm_op ~local_fn ~__context ~vm + (fun session_id rpc -> Client.VM.pool_migrate rpc session_id vm host options))); + update_vbd_operations ~__context ~vm; + update_vif_operations ~__context ~vm; + Cpuid_helpers.update_cpu_flags ~__context ~vm ~host + + let migrate_send ~__context ~vm ~dest ~live ~vdi_map ~vif_map ~options = + info "VM.migrate_send: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.migrate_send ~vm ~dest ~live ~vdi_map ~vif_map ~options in + let forwarder = + if Xapi_vm_lifecycle.is_live ~__context ~self:vm then forward_vm_op else + let snapshot = Db.VM.get_record ~__context ~self:vm in + (fun ~local_fn ~__context ~vm op -> + fst (forward_to_suitable_host ~local_fn ~__context ~vm ~snapshot ~host_op:`vm_migrate op)) in + with_vm_operation ~__context ~self:vm ~doc:"VM.migrate_send" ~op:`migrate_send + (fun () -> + Local.VM.assert_can_migrate ~__context ~vm ~dest ~live ~vdi_map ~vif_map ~options; + forwarder ~local_fn ~__context ~vm + (fun session_id rpc -> Client.VM.migrate_send rpc session_id vm dest live vdi_map vif_map options) + ) + + let assert_can_migrate ~__context ~vm ~dest ~live ~vdi_map ~vif_map ~options = + info "VM.assert_can_migrate: VM = '%s'" (vm_uuid ~__context vm); + Local.VM.assert_can_migrate ~__context ~vm ~dest ~live ~vdi_map ~vif_map ~options + + let send_trigger ~__context ~vm ~trigger = + info "VM.send_trigger: VM = '%s'; trigger = '%s'" (vm_uuid ~__context vm) trigger; + let local_fn = Local.VM.send_trigger ~vm ~trigger in + with_vm_operation ~__context ~self:vm ~doc:"VM.send_trigger" ~op:`send_trigger + (fun () -> + forward_vm_op ~local_fn ~__context ~vm + (fun session_id rpc -> Client.VM.send_trigger rpc session_id vm trigger)) + + let send_sysrq ~__context ~vm ~key = + info "VM.send_sysrq: VM = '%s'; sysrq = '%s'" (vm_uuid ~__context vm) key; + let local_fn = Local.VM.send_sysrq ~vm ~key in + with_vm_operation ~__context ~self:vm ~doc:"VM.send_sysrq" ~op:`send_sysrq + (fun () -> + forward_vm_op ~local_fn ~__context ~vm + (fun session_id rpc -> Client.VM.send_sysrq rpc session_id vm key)) + + let set_VCPUs_number_live ~__context ~self ~nvcpu = + info "VM.set_VCPUs_number_live: VM = '%s'; number_of_VCPU = %Ld" (vm_uuid ~__context self) nvcpu; + let local_fn = Local.VM.set_VCPUs_number_live ~self ~nvcpu in + with_vm_operation ~__context ~self ~doc:"VM.set_VCPUs_number_live" ~op:`changing_VCPUs_live + (fun () -> + forward_vm_op ~local_fn ~__context ~vm:self + (fun session_id rpc -> Client.VM.set_VCPUs_number_live rpc session_id self nvcpu)) + + let add_to_VCPUs_params_live ~__context ~self ~key ~value = + info "VM.add_to_VCPUs_params_live: VM = '%s'; params = ('%s','%s')" (vm_uuid ~__context self) key value; + let local_fn = Local.VM.add_to_VCPUs_params_live ~self ~key ~value in + with_vm_operation ~__context ~self ~doc:"VM.add_to_VCPUs_params_live" ~op:`changing_VCPUs_live + (fun () -> + forward_vm_op ~local_fn ~__context ~vm:self + (fun session_id rpc -> Client.VM.add_to_VCPUs_params_live rpc session_id self key value)) + + let set_VCPUs_max ~__context ~self ~value = + info "VM.set_VCPUs_max: self = %s; value = %Ld" + (vm_uuid ~__context self) value; + with_vm_operation ~__context ~self ~doc:"VM.set_VCPUs_max" + ~op:`changing_VCPUs + (fun () -> Local.VM.set_VCPUs_max ~__context ~self ~value) + + let set_VCPUs_at_startup ~__context ~self ~value = + info "VM.set_VCPUs_at_startup: self = %s; value = %Ld" + (vm_uuid ~__context self) value; + Local.VM.set_VCPUs_at_startup ~__context ~self ~value + + let compute_memory_overhead ~__context ~vm = + info "VM.compute_memory_overhead: vm = '%s'" + (vm_uuid ~__context vm); + Local.VM.compute_memory_overhead ~__context ~vm + + let set_memory_dynamic_range ~__context ~self ~min ~max = + info "VM.set_memory_dynamic_range: VM = '%s'; min = %Ld; max = %Ld" + (Ref.string_of self) min max; + let local_fn = Local.VM.set_memory_dynamic_range ~self ~min ~max in + with_vm_operation ~__context ~self ~doc:"VM.set_memory_dynamic_range" + ~op:`changing_dynamic_range + (fun () -> + (* XXX: Perform basic parameter validation, before forwarding *) + (* to the slave. Do this after sorting out the last boot *) + (* record via set_static_range. *) + let power_state = Db.VM.get_power_state ~__context ~self in + match power_state with + | `Running -> + (* If current dynamic_min is lower *) + (* then we will block the operation *) + reserve_memory_for_dynamic_change ~__context ~vm:self + min max + (fun () -> + forward_vm_op ~local_fn ~__context ~vm:self + (fun session_id rpc -> + Client.VM.set_memory_dynamic_range + rpc session_id self min max + ) + ) + | `Halted -> + local_fn ~__context + | _ -> + failwith + "assertion_failure: set_memory_dynamic_range: \ + power_state should be Halted or Running" + ) + + let set_memory_dynamic_max ~__context ~self ~value = + info "VM.set_memory_dynamic_max: VM = '%s'; value = %Ld" + (vm_uuid ~__context self) value; + set_memory_dynamic_range ~__context ~self ~max:value + ~min:(Db.VM.get_memory_dynamic_min ~__context ~self) + + let set_memory_dynamic_min ~__context ~self ~value = + info "VM.set_memory_dynamic_min: VM = '%s'; value = %Ld" + (vm_uuid ~__context self) value; + set_memory_dynamic_range ~__context ~self ~min:value + ~max:(Db.VM.get_memory_dynamic_max ~__context ~self) + + let set_memory_static_range ~__context ~self ~min ~max = + info "VM.set_memory_static_range: self = %s; min = %Ld; max = %Ld" + (vm_uuid ~__context self) min max; + with_vm_operation ~__context ~self ~doc:"VM.set_memory_static_range" + ~op:`changing_static_range + (fun () -> Local.VM.set_memory_static_range ~__context ~self ~min ~max) + + let set_memory_static_max ~__context ~self ~value = + info "VM.set_memory_static_max: VM = '%s'; value = %Ld" + (vm_uuid ~__context self) value; + set_memory_static_range ~__context ~self ~max:value + ~min:(Db.VM.get_memory_static_min ~__context ~self) + + let set_memory_static_min ~__context ~self ~value = + info "VM.set_memory_static_min: VM = '%s'; value = %Ld" + (vm_uuid ~__context self) value; + set_memory_static_range ~__context ~self ~min:value + ~max:(Db.VM.get_memory_static_max ~__context ~self) + + let set_memory_limits ~__context ~self + ~static_min ~static_max ~dynamic_min ~dynamic_max = + info + "VM.set_memory_limits: self = %s; \ + static_min = %Ld; static_max = %Ld; \ + dynamic_min = %Ld; dynamic_max = %Ld" + (vm_uuid ~__context self) + static_min static_max dynamic_min dynamic_max; + let local_fn = Local.VM.set_memory_limits ~self + ~static_min ~static_max ~dynamic_min ~dynamic_max in + with_vm_operation ~__context ~self ~doc:"VM.set_memory_limits" ~op:`changing_memory_limits + (fun () -> + forward_vm_op ~local_fn ~__context ~vm:self + (fun session_id rpc -> Client.VM.set_memory_limits rpc session_id self + static_min static_max dynamic_min dynamic_max)) + + let set_memory ~__context ~self ~value = + info "VM.set_memory: self = %s; value = %Ld" (vm_uuid ~__context self) value; + let local_fn = Local.VM.set_memory ~self ~value in + with_vm_operation ~__context ~self ~doc:"VM.set_memory" ~op:`changing_memory_limits + (fun () -> + forward_vm_op ~local_fn ~__context ~vm:self + (fun session_id rpc -> Client.VM.set_memory rpc session_id self value)) + + let set_memory_target_live ~__context ~self ~target = + info "VM.set_memory_target_live: VM = '%s'; min = %Ld" (vm_uuid ~__context self) target; + let local_fn = Local.VM.set_memory_target_live ~self ~target in + with_vm_operation ~__context ~self ~doc:"VM.set_memory_target_live" ~op:`changing_memory_live + (fun () -> + forward_vm_op ~local_fn ~__context ~vm:self + (fun session_id rpc -> Client.VM.set_memory_target_live rpc session_id self target)) + + let wait_memory_target_live ~__context ~self = + info "VM.wait_memory_target_live: VM = '%s'" (vm_uuid ~__context self); + let local_fn = Local.VM.wait_memory_target_live ~self in + with_vm_operation ~__context ~self ~doc:"VM.wait_memory_target_live" ~op:`awaiting_memory_live + (fun () -> + forward_vm_op ~local_fn ~__context ~vm:self + (fun session_id rpc -> Client.VM.wait_memory_target_live rpc session_id self)) + + (* Dummy implementation for a deprecated API method. *) + let get_cooperative ~__context ~self = + info "VM.get_cooperative: VM = '%s'" (vm_uuid ~__context self); + Local.VM.get_cooperative ~__context ~self + + let set_HVM_shadow_multiplier ~__context ~self ~value = + info "VM.set_HVM_shadow_multiplier: self = %s; multiplier = %f" + (vm_uuid ~__context self) value; + with_vm_operation ~__context ~self ~doc:"VM.set_HVM_shadow_multiplier" + ~op:`changing_shadow_memory + (fun () -> + Local.VM.set_HVM_shadow_multiplier ~__context ~self ~value) + + let set_shadow_multiplier_live ~__context ~self ~multiplier = + info "VM.set_shadow_multiplier_live: VM = '%s'; min = %f" (vm_uuid ~__context self) multiplier; + let local_fn = Local.VM.set_shadow_multiplier_live ~self ~multiplier in + with_vm_operation ~__context ~self ~doc:"VM.set_shadow_multiplier_live" ~op:`changing_shadow_memory_live + (fun () -> + forward_vm_op ~local_fn ~__context ~vm:self + (fun session_id rpc -> + (* No need to perform a memory calculation here: the real code will tell us if the + new value is too big. *) + Client.VM.set_shadow_multiplier_live rpc session_id self multiplier + ) + ) + + (* this is in db *) + let get_boot_record ~__context ~self = + info "VM.get_boot_record: VM = '%s'" (vm_uuid ~__context self); + with_vm_operation ~__context ~self ~doc:"VM.get_boot_record" ~op:`get_boot_record + (fun () -> + Local.VM.get_boot_record ~__context ~self) + + let get_data_sources ~__context ~self = + info "VM.get_data_sources: VM = '%s'" (vm_uuid ~__context self); + let local_fn = Local.VM.get_data_sources ~self in + with_vm_operation ~__context ~self ~doc:"VM.get_data_source" ~op:`data_source_op + (fun () -> + forward_vm_op ~local_fn ~__context ~vm:self + (fun session_id rpc -> Client.VM.get_data_sources rpc session_id self)) + + let record_data_source ~__context ~self ~data_source = + info "VM.record_data_source: VM = '%s'; data source = '%s'" (vm_uuid ~__context self) data_source; + let local_fn = Local.VM.record_data_source ~self ~data_source in + with_vm_operation ~__context ~self ~doc:"VM.record_data_source" ~op:`data_source_op + (fun () -> + forward_vm_op ~local_fn ~__context ~vm:self + (fun session_id rpc -> Client.VM.record_data_source rpc session_id self data_source)) + + let query_data_source ~__context ~self ~data_source = + info "VM.query_data_source: VM = '%s'; data source = '%s'" (vm_uuid ~__context self) data_source; + Xapi_vm_lifecycle.assert_power_state_in ~__context ~self ~allowed:[`Running; `Paused]; + let local_fn = Local.VM.query_data_source ~self ~data_source in + forward_vm_op ~local_fn ~__context ~vm:self + (fun session_id rpc -> Client.VM.query_data_source rpc session_id self data_source) + + let forget_data_source_archives ~__context ~self ~data_source = + info "VM.forget_data_source_archives: VM = '%s'; data source = '%s'" (vm_uuid ~__context self) data_source; + let local_fn = Local.VM.forget_data_source_archives ~self ~data_source in + with_vm_operation ~__context ~self ~doc:"VM.forget_data_source_archives" ~op:`data_source_op + (fun () -> + forward_vm_op ~local_fn ~__context ~vm:self + (fun session_id rpc -> Client.VM.forget_data_source_archives rpc session_id self data_source)) + + let get_possible_hosts ~__context ~vm = + info "VM.get_possible_hosts: VM = '%s'" (vm_uuid ~__context vm); + Local.VM.get_possible_hosts ~__context ~vm + + let assert_operation_valid ~__context ~self ~op = + info "VM.assert_operation_valid: VM = '%s'" (vm_uuid ~__context self); + Local.VM.assert_operation_valid ~__context ~self ~op + + let update_allowed_operations ~__context ~self = + info "VM.update_allowed_operations: VM = '%s'" (vm_uuid ~__context self); + Local.VM.update_allowed_operations ~__context ~self + + let assert_can_boot_here ~__context ~self ~host = + info "VM.assert_can_boot_here: VM = '%s'; host = '%s'" (vm_uuid ~__context self) (host_uuid ~__context host); + Local.VM.assert_can_boot_here ~__context ~self ~host + + let retrieve_wlb_recommendations ~__context ~vm = + info "VM.retrieve_wlb_recommendations: VM = '%s'" (vm_uuid ~__context vm); + Local.VM.retrieve_wlb_recommendations ~__context ~vm + + let assert_agile ~__context ~self = + info "VM.assert_agile: VM = '%s'" (vm_uuid ~__context self); + Local.VM.assert_agile ~__context ~self + + let get_allowed_VBD_devices ~__context ~vm = + info "VM.get_allowed_VBD_devices: VM = '%s'" (vm_uuid ~__context vm); + Local.VM.get_allowed_VBD_devices ~__context ~vm + + let get_allowed_VIF_devices ~__context ~vm = + info "VM.get_allowed_VIF_devices: VM = '%s'" (vm_uuid ~__context vm); + Local.VM.get_allowed_VIF_devices ~__context ~vm + + let atomic_set_resident_on ~__context ~vm ~host = + info "VM.atomic_set_resident_on: VM = '%s'" (vm_uuid ~__context vm); + (* Need to prevent the host chooser being run while these fields are being modified *) + Helpers.with_global_lock + (fun () -> + Db.VM.set_resident_on ~__context ~self:vm ~value:host; + Db.VM.set_scheduled_to_be_resident_on ~__context ~self:vm ~value:Ref.null + ) + + let create_new_blob ~__context ~vm ~name ~mime_type ~public = + info "VM.create_new_blob: VM = '%s'; name = '%s'; MIME type = '%s' public = %b" (vm_uuid ~__context vm) name mime_type public; + Local.VM.create_new_blob ~__context ~vm ~name ~mime_type ~public + + let s3_suspend ~__context ~vm = + info "VM.s3_suspend: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.s3_suspend ~vm in + forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.s3_suspend rpc session_id vm) + + let s3_resume ~__context ~vm = + info "VM.s3_resume: VM = '%s'" (vm_uuid ~__context vm); + let local_fn = Local.VM.s3_resume ~vm in + forward_vm_op ~local_fn ~__context ~vm (fun session_id rpc -> Client.VM.s3_resume rpc session_id vm) + + + let copy_bios_strings ~__context ~vm ~host = + info "VM.copy_bios_strings: VM = '%s'; host = '%s'" (vm_uuid ~__context vm) (host_uuid ~__context host); + Local.VM.copy_bios_strings ~__context ~vm ~host + + let set_protection_policy ~__context ~self ~value = + info "VM.set_protection_policy: self = '%s'; " (vm_uuid ~__context self); + Local.VM.set_protection_policy ~__context ~self ~value + + let set_start_delay ~__context ~self ~value = + info "VM.set_start_delay: self = '%s';" (vm_uuid ~__context self); + Local.VM.set_start_delay ~__context ~self ~value + + let set_shutdown_delay ~__context ~self ~value = + info "VM.set_shutdown_delay: self = '%s';" (vm_uuid ~__context self); + Local.VM.set_shutdown_delay ~__context ~self ~value + + let set_order ~__context ~self ~value = + info "VM.set_order: self = '%s';" (vm_uuid ~__context self); + Local.VM.set_order ~__context ~self ~value + + let set_suspend_VDI ~__context ~self ~value = + info "VM.set_suspend_VDI: self = '%s';" (vm_uuid ~__context self); + Local.VM.set_suspend_VDI ~__context ~self ~value + + let assert_can_be_recovered ~__context ~self ~session_to = + info "VM.assert_can_be_recovered: self = '%s';" (vm_uuid ~__context self); + Local.VM.assert_can_be_recovered ~__context ~self ~session_to + + let get_SRs_required_for_recovery ~__context ~self ~session_to = + info "VM.get_SRs_required_for_recovery: self = '%s';" (vm_uuid ~__context self); + Local.VM.get_SRs_required_for_recovery ~__context ~self ~session_to + + let recover ~__context ~self ~session_to ~force = + info "VM.recover: self = '%s'; force = %b;" (vm_uuid ~__context self) force; + (* If a VM is part of an appliance, the appliance *) + (* should be recovered using VM_appliance.recover *) + let appliance = Db.VM.get_appliance ~__context ~self in + if Db.is_valid_ref __context appliance then + raise (Api_errors.Server_error(Api_errors.vm_is_part_of_an_appliance, + [Ref.string_of self; Ref.string_of appliance])); + Local.VM.recover ~__context ~self ~session_to ~force + + let set_appliance ~__context ~self ~value = + info "VM.set_appliance: self = '%s'; value = '%s';" (vm_uuid ~__context self) (vm_appliance_uuid ~__context value); + Local.VM.set_appliance ~__context ~self ~value + + let import_convert ~__context ~_type ~username ~password ~sr ~remote_config = + info "VM.import_convert: type = '%s'; remote_config = '%s;'" + _type (String.concat "," (List.map (fun (k,v) -> k ^ "=" ^ v) remote_config)); + Local.VM.import_convert ~__context ~_type ~username ~password ~sr ~remote_config + + let import ~__context ~url ~sr ~full_restore ~force = + info "VM.import: url = '%s' sr='%s' force='%b'" url (Ref.string_of sr) force; + let pbd = choose_pbd_for_sr ~__context ~self:sr () in + let host = Db.PBD.get_host ~__context ~self:pbd in + do_op_on ~local_fn:(Local.VM.import ~url ~sr ~full_restore ~force) ~__context ~host (fun session_id rpc -> Client.VM.import rpc session_id url sr full_restore force) + + end + + module VM_metrics = struct + end + + module VM_guest_metrics = struct + end + + module Host = struct + + (** Add to the Host's current operations, call a function and then remove from the + current operations. Ensure the allowed_operations are kept up to date. *) + let with_host_operation ~__context ~self ~doc ~op f = + let task_id = Ref.string_of (Context.get_task_id __context) in + (* CA-18377: If there's a rolling upgrade in progress, only send Miami keys across the wire. *) + let operation_allowed ~op = false + || not (Helpers.rolling_upgrade_in_progress ~__context) + || List.mem op Xapi_globs.host_operations_miami in + Helpers.retry_with_global_lock ~__context ~doc + (fun () -> + Xapi_host_helpers.assert_operation_valid ~__context ~self ~op; + if operation_allowed ~op then + Db.Host.add_to_current_operations ~__context ~self ~key: task_id ~value: op; + Xapi_host_helpers.update_allowed_operations ~__context ~self); + (* Then do the action with the lock released *) + finally f + (* Make sure to clean up at the end *) + (fun () -> + try + if operation_allowed ~op then begin + Db.Host.remove_from_current_operations ~__context ~self ~key: task_id; + Helpers.Early_wakeup.broadcast (Datamodel._host, Ref.string_of self); + end; + let clustered_srs = Db.SR.get_refs_where ~__context ~expr:(Eq (Field "clustered", Literal "true")) in + if clustered_srs <> [] then + (* Host powerstate operations on one host may affect all other hosts if + * a clustered SR is in use, so update all hosts' allowed operations. *) + Xapi_host_helpers.update_allowed_operations_all_hosts ~__context + else + Xapi_host_helpers.update_allowed_operations ~__context ~self + with + _ -> ()) + + let create ~__context ~uuid ~name_label ~name_description ~hostname ~address ~external_auth_type ~external_auth_service_name ~external_auth_configuration = + info "Host.create: uuid='%s' name_label='%s' hostname='%s' address='%s'" uuid name_label hostname address; + Local.Host.create ~__context ~uuid ~name_label ~name_description ~hostname ~address ~external_auth_type ~external_auth_service_name ~external_auth_configuration + + let destroy ~__context ~self = + info "Host.destroy: host = '%s'" (host_uuid __context self); + Local.Host.destroy ~__context ~self + + let set_power_on_mode ~__context ~self ~power_on_mode ~power_on_config = + info "Host.set_power_on_mode: host = '%s'; power_on_mode = '%s' ; power_on_config = [ %s ]" + (host_uuid ~__context self) power_on_mode (String.concat ", " (List.map (fun (k, v) -> k ^ "=" ^ v) power_on_config)); + Local.Host.set_power_on_mode ~__context ~self ~power_on_mode ~power_on_config + + let set_license_params ~__context ~self ~value = + info "Host.set_license_params: host = '%s'; license_params = [ %s ]" (host_uuid ~__context self) (String.concat ", " (List.map (fun (k, v) -> k ^ "=" ^ v) value)); + Local.Host.set_license_params ~__context ~self ~value + + let set_ssl_legacy ~__context ~self ~value = + info "Host.set_ssl_legacy: host = '%s'; value = %b" (host_uuid ~__context self) value; + let success () = + if Db.Host.get_ssl_legacy ~__context ~self = value + then Some () + else None + in + let local_fn = Local.Host.set_ssl_legacy ~self ~value in + let fn () = + do_op_on ~local_fn ~__context ~host:self + (fun session_id rpc -> + Client.Host.set_ssl_legacy rpc session_id self value) + in + tolerate_connection_loss fn success 30. + + let ha_disable_failover_decisions ~__context ~host = + info "Host.ha_disable_failover_decisions: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.ha_disable_failover_decisions ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.ha_disable_failover_decisions rpc session_id host) + + let ha_disarm_fencing ~__context ~host = + info "Host.ha_disarm_fencing: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.ha_disarm_fencing ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.ha_disarm_fencing rpc session_id host) + + let ha_stop_daemon ~__context ~host = + info "Host.ha_stop_daemon: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.ha_stop_daemon ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.ha_stop_daemon rpc session_id host) + + let ha_release_resources ~__context ~host = + info "Host.ha_release_resources: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.ha_release_resources ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.ha_release_resources rpc session_id host) + + let ha_wait_for_shutdown_via_statefile ~__context ~host = + info "Host.ha_wait_for_shutdown_via_statefile: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.ha_wait_for_shutdown_via_statefile ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.ha_wait_for_shutdown_via_statefile rpc session_id host) + + let preconfigure_ha ~__context ~host ~statefiles ~metadata_vdi ~generation = + info "Host.preconfigure_ha: host = '%s'; statefiles =[ %s ]; metadata_vdi = '%s'; generation = '%s'" + (host_uuid ~__context host) (String.concat "; " (List.map Ref.string_of statefiles)) (vdi_uuid ~__context metadata_vdi) generation; + let local_fn = Local.Host.preconfigure_ha ~host ~statefiles ~metadata_vdi ~generation in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.preconfigure_ha rpc session_id host statefiles metadata_vdi generation) + + let ha_join_liveset ~__context ~host = + info "Host.ha_join_liveset: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.ha_join_liveset ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.ha_join_liveset rpc session_id host) + + let request_backup ~__context ~host ~generation ~force = + debug "Host.request_backup: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.request_backup ~host ~generation ~force in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.request_backup rpc session_id host generation force) + + let request_config_file_sync ~__context ~host ~hash = + debug "Host.request_config_file_sync: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.request_config_file_sync ~host ~hash in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.request_config_file_sync rpc session_id host hash) + + (* Call never forwarded *) + let ha_xapi_healthcheck ~__context = + Local.Host.ha_xapi_healthcheck ~__context + + (* Call never forwarded *) + let local_assert_healthy ~__context = + info "Host.local_assert_healthy"; + Local.Host.local_assert_healthy ~__context + + (* Call never forwarded *) + let propose_new_master ~__context ~address ~manual = + info "Host.propose_new_master: type = '%s'; host address = '%s'" + (if manual then "manual" else "automatic") address; + Local.Host.propose_new_master ~__context ~address ~manual + + (* If someone aborts the transaction *) + let abort_new_master ~__context ~address = + info "Host.abort_new_master: host address = '%s'" address; + Local.Host.abort_new_master ~__context ~address + + (* Call never forwarded *) + let commit_new_master ~__context ~address = + info "Host.commit_new_master: host address = '%s'" address; + Local.Host.commit_new_master ~__context ~address + + (* Call never forwarded *) + let is_in_emergency_mode ~__context = + Local.Host.is_in_emergency_mode ~__context + + let local_management_reconfigure ~__context ~interface = + info "Host.local_management_reconfigure: interface = '%s'" interface; + Local.Host.local_management_reconfigure ~__context ~interface + + let emergency_ha_disable ~__context = + info "Host.emergency_ha_disable"; + Local.Host.emergency_ha_disable ~__context + + (* Dummy implementation for a deprecated API method. *) + let get_uncooperative_resident_VMs ~__context ~self = + info "Host.get_uncooperative_resident_VMs host=%s" (Ref.string_of self); + Local.Host.get_uncooperative_resident_VMs ~__context ~self + + (* Dummy implementation for a deprecated API method. *) + let get_uncooperative_domains ~__context ~self = + info "Host.get_uncooperative_domains host=%s" (Ref.string_of self); + Local.Host.get_uncooperative_domains ~__context ~self + + let management_reconfigure ~__context ~pif = + info "Host.management_reconfigure: management PIF = '%s'" (pif_uuid ~__context pif); + (* The management interface on the slave may change during this operation, so expect connection loss. + * Consider the operation successful if management flag was set on the PIF we're working with. Since the slave + * sets this flag after bringing up the management interface, this is a good indication of success. *) + let success () = + if Db.PIF.get_management ~__context ~self:pif then Some () else None in + let local_fn = Local.Host.management_reconfigure ~pif in + let fn () = + do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self:pif) (fun session_id rpc -> Client.Host.management_reconfigure rpc session_id pif) in + tolerate_connection_loss fn success 30. + + let management_disable ~__context = + info "Host.management_disable"; + Local.Host.management_disable ~__context + + let get_management_interface ~__context ~host = + info "Host.get_management_interface: host = '%s'" (host_uuid ~__context host); + Local.Host.get_management_interface ~__context ~host + + let disable ~__context ~host = + info "Host.disable: host = '%s'" (host_uuid ~__context host); + (* Block call if this would break our VM restart plan *) + Xapi_ha_vm_failover.assert_host_disable_preserves_ha_plan ~__context host; + let local_fn = Local.Host.disable ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.disable rpc session_id host); + Xapi_host_helpers.update_allowed_operations ~__context ~self:host + + let declare_dead ~__context ~host = + info "Host.declare_dead: host = '%s'" (host_uuid ~__context host); + Local.Host.declare_dead ~__context ~host; + Xapi_host_helpers.update_allowed_operations ~__context ~self:host + + let enable ~__context ~host = + info "Host.enable: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.enable ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.enable rpc session_id host); + Xapi_host_helpers.update_allowed_operations ~__context ~self:host + + let shutdown ~__context ~host = + info "Host.shutdown: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.shutdown ~host in + with_host_operation ~__context ~self:host ~doc:"Host.shutdown" ~op:`shutdown + (fun () -> + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.shutdown rpc session_id host) + ) + + let reboot ~__context ~host = + info "Host.reboot: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.reboot ~host in + with_host_operation ~__context ~self:host ~doc:"Host.reboot" ~op:`reboot + (fun () -> + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.reboot rpc session_id host) + ) + + let power_on ~__context ~host = + info "Host.power_on: host = '%s'" (host_uuid ~__context host); + with_host_operation ~__context ~self:host ~doc:"Host.power_on" ~op:`power_on + (fun () -> + (* Always executed on the master *) + Local.Host.power_on ~__context ~host + ) + + let dmesg ~__context ~host = + info "Host.dmesg: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.dmesg ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.dmesg rpc session_id host) + + let dmesg_clear ~__context ~host = + info "Host.dmesg_clear: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.dmesg_clear ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.dmesg_clear rpc session_id host) + + let bugreport_upload ~__context ~host ~url ~options = + info "Host.bugreport_upload: host = '%s'; url = '%s'; options = [ %s ]" (host_uuid ~__context host) url (String.concat "; " (List.map (fun (k, v) -> k ^ "=" ^ v) options)); + let local_fn = Local.Host.bugreport_upload ~host ~url ~options in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.bugreport_upload rpc session_id host url options) + + let list_methods ~__context = + info "Host.list_methods"; + Local.Host.list_methods ~__context + + let send_debug_keys ~__context ~host ~keys = + info "Host.send_debug_keys: host = '%s'; keys = '%s'" (host_uuid ~__context host) keys; + let local_fn = Local.Host.send_debug_keys ~host ~keys in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.send_debug_keys rpc session_id host keys) + + let get_log ~__context ~host = + info "Host.get_log: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.get_log ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.get_log rpc session_id host) + + let license_add ~__context ~host ~contents = + info "Host.license_add: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.license_add ~host ~contents in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.license_add rpc session_id host contents) + + let license_remove ~__context ~host = + info "Host.license_remove: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.license_remove ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.license_remove rpc session_id host) + + let assert_can_evacuate ~__context ~host = + info "Host.assert_can_evacuate: host = '%s'" (host_uuid ~__context host); + Local.Host.assert_can_evacuate ~__context ~host + + let get_vms_which_prevent_evacuation ~__context ~self = + info "Host.get_vms_which_prevent_evacuation: host = '%s'" (host_uuid ~__context self); + Local.Host.get_vms_which_prevent_evacuation ~__context ~self + + let evacuate ~__context ~host = + info "Host.evacuate: host = '%s'" (host_uuid ~__context host); + (* Block call if this would break our VM restart plan (because the body of this sets enabled to false) *) + Xapi_ha_vm_failover.assert_host_disable_preserves_ha_plan ~__context host; + with_host_operation ~__context ~self:host ~doc:"Host.evacuate" ~op:`evacuate + (fun () -> + Local.Host.evacuate ~__context ~host + ) + + let retrieve_wlb_evacuate_recommendations ~__context ~self = + info "Host.retrieve_wlb_evacuate_recommendations: host = '%s'" (host_uuid ~__context self); + Local.Host.retrieve_wlb_evacuate_recommendations ~__context ~self + + let update_pool_secret ~__context ~host ~pool_secret = + info "Host.update_pool_secret: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.update_pool_secret ~host ~pool_secret in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.update_pool_secret rpc session_id host pool_secret) + + let update_master ~__context ~host ~master_address = + info "Host.update_master: host = '%s'; master = '%s'" (host_uuid ~__context host) master_address; + let local_fn = Local.Pool.emergency_reset_master ~master_address in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.update_master rpc session_id host master_address) + + let restart_agent ~__context ~host = + info "Host.restart_agent: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.restart_agent ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.restart_agent rpc session_id host) + + let shutdown_agent ~__context = + Local.Host.shutdown_agent ~__context + + let signal_networking_change ~__context = + info "Host.signal_networking_change"; + Local.Host.signal_networking_change ~__context + + let notify ~__context ~ty ~params = + info "Host.notify"; + Local.Host.notify ~__context ~ty ~params + + let syslog_reconfigure ~__context ~host = + info "Host.syslog_reconfigure: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.syslog_reconfigure ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.syslog_reconfigure rpc session_id host) + + let get_system_status_capabilities ~__context ~host = + info "Host.get_system_status_capabilities: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.get_system_status_capabilities ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.get_system_status_capabilities rpc + session_id host) + + let get_diagnostic_timing_stats ~__context ~host = + info "Host.get_diagnostic_timing_stats: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.get_diagnostic_timing_stats ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.get_diagnostic_timing_stats rpc session_id host) + + let set_hostname_live ~__context ~host ~hostname = + info "Host.set_hostname_live: host = '%s'; hostname = '%s'" (host_uuid ~__context host) hostname; + let local_fn = Local.Host.set_hostname_live ~host ~hostname in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.set_hostname_live rpc session_id host hostname) + + let get_data_sources ~__context ~host = + info "Host.get_data_sources: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.get_data_sources ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.get_data_sources rpc session_id host) + + let record_data_source ~__context ~host ~data_source = + info "Host.record_data_source: host = '%s'; data source = '%s'" (host_uuid ~__context host) data_source; + let local_fn = Local.Host.record_data_source ~host ~data_source in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.record_data_source rpc session_id host data_source) + + let query_data_source ~__context ~host ~data_source = + info "Host.query_data_source: host = '%s'; data source = '%s'" (host_uuid ~__context host) data_source; + let local_fn = Local.Host.query_data_source ~host ~data_source in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.query_data_source rpc session_id host data_source) + + let forget_data_source_archives ~__context ~host ~data_source = + info "Host.forget_data_source_archives: host = '%s'; data source = '%s'" (host_uuid ~__context host) data_source; + let local_fn = Local.Host.forget_data_source_archives ~host ~data_source in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.forget_data_source_archives rpc session_id host data_source) + + let tickle_heartbeat ~__context ~host ~stuff = + (* info "Host.tickle_heartbeat: Incoming call from host '%s' with arguments [ %s ]" (Ref.string_of host) (String.concat "; " (List.map (fun (a, b) -> a ^ ": " ^ b) stuff)); *) + Local.Host.tickle_heartbeat ~__context ~host ~stuff + + let create_new_blob ~__context ~host ~name ~mime_type ~public = + info "Host.create_new_blob: host = '%s'; name = '%s' MIME type = '%s public = %b" (host_uuid ~__context host) name mime_type public; + Local.Host.create_new_blob ~__context ~host ~name ~mime_type ~public + + let call_plugin ~__context ~host ~plugin ~fn ~args = + let plugins_to_protect = [ + "prepare_host_upgrade.py"; + ] in + if List.mem plugin plugins_to_protect + then + info "Host.call_plugin host = '%s'; plugin = '%s'; fn = '%s' args = [ 'hidden' ]" (host_uuid ~__context host) plugin fn + else + info "Host.call_plugin host = '%s'; plugin = '%s'; fn = '%s'; args = [ %s ]" (host_uuid ~__context host) plugin fn (String.concat "; " (List.map (fun (a, b) -> a ^ ": " ^ b) args)); + let local_fn = Local.Host.call_plugin ~host ~plugin ~fn ~args in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.call_plugin rpc session_id host plugin fn args) + + let call_extension ~__context ~host ~call = + info "Host.call_extension host = '%s'; call = '%s'" (host_uuid ~__context host) call; + let local_fn = Local.Host.call_extension ~host ~call in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.call_extension rpc session_id host call) + + let has_extension ~__context ~host ~name = + info "Host.has_extension: host = '%s'; name = '%s'" (host_uuid ~__context host) name; + let local_fn = Local.Host.has_extension ~host ~name in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.has_extension rpc session_id host name) + + let sync_data ~__context ~host = + info "Host.sync_data: host = '%s'" (host_uuid ~__context host); + Local.Host.sync_data ~__context ~host + + let backup_rrds ~__context ~host ~delay = + let local_fn = Local.Host.backup_rrds ~host ~delay in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.backup_rrds rpc session_id host delay) + + let compute_free_memory ~__context ~host = + info "Host.compute_free_memory: host = '%s'" (host_uuid ~__context host); + Local.Host.compute_free_memory ~__context ~host + + let compute_memory_overhead ~__context ~host = + info "Host.compute_memory_overhead: host = '%s'" + (host_uuid ~__context host); + Local.Host.compute_memory_overhead ~__context ~host + + let get_servertime ~__context ~host = + (* info "Host.get_servertime"; *) (* suppressed because the GUI calls this frequently and it isn't interesting for debugging *) + let local_fn = Local.Host.get_servertime ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.get_servertime rpc session_id host) + + let get_server_localtime ~__context ~host = + (* info "Host.get_servertime"; *) (* suppressed because the GUI calls this frequently and it isn't interesting for debugging *) + let local_fn = Local.Host.get_server_localtime ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.get_server_localtime rpc session_id host) + + let enable_binary_storage ~__context ~host = + info "Host.enable_binary_storage: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.enable_binary_storage ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.enable_binary_storage rpc session_id host) + + let disable_binary_storage ~__context ~host = + info "Host.disable_binary_storage: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.disable_binary_storage ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.disable_binary_storage rpc session_id host) + + let enable_external_auth ~__context ~host ~config ~service_name ~auth_type = + info "Host.enable_external_auth: host = '%s'; service_name = '%s'; auth_type = '%s'" (host_uuid ~__context host) service_name auth_type; + (* First assert that the AD feature is enabled if AD is requested *) + if auth_type = Extauth.auth_type_AD_Likewise then + Pool_features.assert_enabled ~__context ~f:Features.AD; + let local_fn = Local.Host.enable_external_auth ~host ~config ~service_name ~auth_type in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.enable_external_auth rpc session_id host config service_name auth_type) + + let disable_external_auth ~__context ~host ~config = + info "Host.disable_external_auth: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.disable_external_auth ~host ~config in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.disable_external_auth rpc session_id host config) + + let certificate_install ~__context ~host ~name ~cert = + let local_fn = Local.Host.certificate_install ~host ~name ~cert in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.certificate_install rpc session_id host name cert) + + let certificate_uninstall ~__context ~host ~name = + let local_fn = Local.Host.certificate_uninstall ~host ~name in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.certificate_uninstall rpc session_id host name) + + let certificate_list ~__context ~host = + let local_fn = Local.Host.certificate_list ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.certificate_list rpc session_id host) + + let crl_install ~__context ~host ~name ~crl = + let local_fn = Local.Host.crl_install ~host ~name ~crl in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.crl_install rpc session_id host name crl) + + let crl_uninstall ~__context ~host ~name = + let local_fn = Local.Host.crl_uninstall ~host ~name in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.crl_uninstall rpc session_id host name) + + let crl_list ~__context ~host = + let local_fn = Local.Host.crl_list ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.crl_list rpc session_id host) + + let certificate_sync ~__context ~host = + let local_fn = Local.Host.certificate_sync ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.certificate_sync rpc session_id host) + + let get_server_certificate ~__context ~host = + let local_fn = Local.Host.get_server_certificate ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> + Client.Host.get_server_certificate rpc session_id host) + + let attach_static_vdis ~__context ~host ~vdi_reason_map = + info "Host.attach_static_vdis: host = '%s'; vdi/reason pairs = [ %s ]" (host_uuid ~__context host) + (String.concat "; " (List.map (fun (a, b) -> Ref.string_of a ^ "/" ^ b) vdi_reason_map)); + let local_fn = Local.Host.attach_static_vdis ~host ~vdi_reason_map in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.attach_static_vdis rpc session_id host vdi_reason_map) + + let detach_static_vdis ~__context ~host ~vdis = + info "Host.detach_static_vdis: host = '%s'; vdis =[ %s ]" (host_uuid ~__context host) (String.concat "; " (List.map Ref.string_of vdis)); + let local_fn = Local.Host.detach_static_vdis ~host ~vdis in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.detach_static_vdis rpc session_id host vdis) + + let set_localdb_key ~__context ~host ~key ~value = + info "Host.set_localdb_key: host = '%s'; key = '%s'; value = '%s'" (host_uuid ~__context host) key value; + let local_fn = Local.Host.set_localdb_key ~host ~key ~value in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.set_localdb_key rpc session_id host key value) + + let apply_edition ~__context ~host ~edition ~force = + info "Host.apply_edition: host = '%s'; edition = '%s'; force = '%s'" (host_uuid ~__context host) edition (string_of_bool force); + let local_fn = Local.Host.apply_edition ~host ~edition ~force in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.apply_edition rpc session_id host edition force) + + let refresh_pack_info ~__context ~host = + info "Host.refresh_pack_info: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.refresh_pack_info ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.refresh_pack_info rpc session_id host) + + let reset_networking ~__context ~host = + info "Host.reset_networking: host = '%s'" (host_uuid ~__context host); + Local.Host.reset_networking ~__context ~host + + let enable_local_storage_caching ~__context ~host ~sr = + let local_fn = Local.Host.enable_local_storage_caching ~host ~sr in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.enable_local_storage_caching rpc session_id host sr) + + let disable_local_storage_caching ~__context ~host = + let local_fn = Local.Host.disable_local_storage_caching ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.disable_local_storage_caching rpc session_id host) + + let get_sm_diagnostics ~__context ~host = + let local_fn = Local.Host.get_sm_diagnostics ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.get_sm_diagnostics rpc session_id host) + + let get_thread_diagnostics ~__context ~host = + let local_fn = Local.Host.get_thread_diagnostics ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.get_thread_diagnostics rpc session_id host) + + let sm_dp_destroy ~__context ~host ~dp ~allow_leak = + let local_fn = Local.Host.sm_dp_destroy ~host ~dp ~allow_leak in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Host.sm_dp_destroy rpc session_id host dp allow_leak) + + let sync_vlans ~__context ~host = + info "Host.sync_vlans: host = '%s'" (host_uuid ~__context host); + Local.Host.sync_vlans ~__context ~host + + let sync_tunnels ~__context ~host = + info "Host.sync_tunnels: host = '%s'" (host_uuid ~__context host); + Local.Host.sync_tunnels ~__context ~host + + let sync_pif_currently_attached ~__context ~host ~bridges = + info "Host.sync_pif_currently_attached: host = '%s'" (host_uuid ~__context host); + Local.Host.sync_pif_currently_attached ~__context ~host ~bridges + + let migrate_receive ~__context ~host ~network ~options = + info "Host.migrate_receive: host = '%s'; network = '%s'" (host_uuid ~__context host) (network_uuid ~__context network); + Local.Host.migrate_receive ~__context ~host ~network ~options + + let enable_display ~__context ~host = + info "Host.enable_display: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.enable_display ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.enable_display rpc session_id host) + + let disable_display ~__context ~host = + info "Host.disable_display: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.Host.disable_display ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Host.disable_display rpc session_id host) + + let apply_guest_agent_config ~__context ~host = + info "Host.apply_guest_agent_config: host = '%s'" + (host_uuid ~__context host); + let local_fn = Local.Host.apply_guest_agent_config ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> + Client.Host.apply_guest_agent_config rpc session_id host) + end + + module Host_crashdump = struct + let destroy ~__context ~self = + info "Host_crashdump.destroy: host crashdump = '%s'" (host_crashdump_uuid ~__context self); + let local_fn = Local.Host_crashdump.destroy ~self in + do_op_on ~local_fn ~__context ~host:(Db.Host_crashdump.get_host ~__context ~self) + (fun session_id rpc -> Client.Host_crashdump.destroy rpc session_id self) + + let upload ~__context ~self ~url ~options = + info "Host_crashdump.upload: host crashdump = '%s'; url = '%s'" (host_crashdump_uuid ~__context self) url; + let local_fn = Local.Host_crashdump.upload ~self ~url ~options in + do_op_on ~local_fn ~__context ~host:(Db.Host_crashdump.get_host ~__context ~self) + (fun session_id rpc -> Client.Host_crashdump.upload rpc session_id self url options) + end + + module Host_patch = struct + let destroy ~__context ~self = + info "Host_patch.destroy: host patch = '%s'" (host_patch_uuid ~__context self); + Xapi_host_patch.destroy ~__context ~self + + let apply ~__context ~self = + info "Host_patch.apply: host patch = '%s'" (host_patch_uuid ~__context self); + let local_fn = Local.Host_patch.apply ~self in + do_op_on ~local_fn ~__context ~host:(Db.Host_patch.get_host ~__context ~self) + (fun session_id rpc -> Client.Host_patch.apply rpc session_id self) + end + + module Pool_patch = struct + let apply ~__context ~self ~host = + info "Pool_patch.apply: pool patch = '%s'; host = '%s'" (pool_patch_uuid ~__context self) (host_uuid ~__context host); + let local_fn = Local.Pool_patch.apply ~self ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Pool_patch.apply rpc session_id self host) + + let precheck ~__context ~self ~host = + info "Pool_patch.precheck: pool patch = '%s'; host = '%s'" (pool_patch_uuid ~__context self) (host_uuid ~__context host); + let local_fn = Local.Pool_patch.precheck ~self ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Pool_patch.precheck rpc session_id self host) + + let pool_apply ~__context ~self = + info "Pool_patch.pool_apply: pool patch = '%s'" (pool_patch_uuid ~__context self); + Xapi_pool_patch.pool_apply ~__context ~self + + let clean ~__context ~self = + info "Pool_patch.clean: pool patch = '%s'" (pool_patch_uuid ~__context self); + Xapi_pool_patch.clean ~__context ~self + + let clean_on_host ~__context ~self ~host = + info "Pool_patch.clean_on_host: pool patch = '%s'" (pool_patch_uuid ~__context self); + let local_fn = Local.Pool_patch.clean ~self in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Pool_patch.clean rpc session_id self) + + let pool_clean ~__context ~self = + info "Pool_patch.pool_clean: pool patch = '%s'" (pool_patch_uuid ~__context self); + Xapi_pool_patch.pool_clean ~__context ~self + + let destroy ~__context ~self = + info "Pool_patch.destroy: pool patch = '%s'" (pool_patch_uuid ~__context self); + Xapi_pool_patch.destroy ~__context ~self + end + + module Host_metrics = struct + end + + module Host_cpu = struct + end + + module Network = struct + + (* Don't forward. These are just db operations. Networks are "attached" when required by hosts that read db entries. + Bridges corresponding to networks are removed by per-host GC threads that read from db. *) + let create ~__context ~name_label ~name_description ~mTU ~other_config ~tags = + info "Network.create: name_label = '%s'" name_label; + Local.Network.create ~__context ~name_label ~name_description ~mTU ~other_config ~tags + + let attach ~__context ~network ~host = + info "Network.attach: network = '%s'; host = '%s'" (network_uuid ~__context network) (host_uuid ~__context host); + let local_fn = Local.Network.attach ~network ~host in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Network.attach rpc session_id network host) + + let pool_introduce ~__context ~name_label ~name_description ~mTU ~other_config ~bridge = + Local.Network.pool_introduce ~__context ~name_label ~name_description ~mTU ~other_config ~bridge + + let destroy ~__context ~self = + info "Network.destroy: network = '%s'" (network_uuid ~__context self); + (* WARNING WARNING WARNING: directly call Network.destroy with the global lock since it does + only database operations *) + Helpers.with_global_lock + (fun () -> + Local.Network.destroy ~__context ~self) + + let create_new_blob ~__context ~network ~name ~mime_type ~public = + info "Network.create_new_blob: network = '%s'; name = %s; MIME type = '%s' public = %b" (network_uuid ~__context network) name mime_type public; + Local.Network.create_new_blob ~__context ~network ~name ~mime_type ~public + + let set_default_locking_mode ~__context ~network ~value = + info "Network.set_default_locking_mode: network = '%s'; value = %s" (network_uuid ~__context network) (Record_util.network_default_locking_mode_to_string value); + Local.Network.set_default_locking_mode ~__context ~network ~value + + let attach_for_vm ~__context ~host ~vm = + info "Network.attach_for_vm: host = '%s'; VM = '%s'" (host_uuid ~__context host) (vm_uuid ~__context vm); + let local_fn = Local.Network.attach_for_vm ~host ~vm in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Network.attach_for_vm rpc session_id host vm) + + let detach_for_vm ~__context ~host ~vm = + info "Network.detach_for_vm: host = '%s'; VM = '%s'" (host_uuid ~__context host) (vm_uuid ~__context vm); + let local_fn = Local.Network.detach_for_vm ~host ~vm in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.Network.detach_for_vm rpc session_id host vm) + end + + module VIF = struct + + let unmark_vif ~__context ~vif ~doc ~op = + let task_id = Ref.string_of (Context.get_task_id __context) in + log_exn ~doc:("unmarking VIF after " ^ doc) + (fun self -> + if Db.is_valid_ref __context self then begin + Db.VIF.remove_from_current_operations ~__context ~self ~key:task_id; + Xapi_vif_helpers.update_allowed_operations ~__context ~self; + Helpers.Early_wakeup.broadcast (Datamodel._vif, Ref.string_of self); + end) + vif + + let mark_vif ~__context ~vif ~doc ~op = + let task_id = Ref.string_of (Context.get_task_id __context) in + log_exn ~doc:("marking VIF for " ^ doc) + (fun self -> + Xapi_vif_helpers.assert_operation_valid ~__context ~self ~op; + Db.VIF.add_to_current_operations ~__context ~self ~key:task_id ~value:op; + Xapi_vif_helpers.update_allowed_operations ~__context ~self) vif + + let with_vif_marked ~__context ~vif ~doc ~op f = + Helpers.retry_with_global_lock ~__context ~doc (fun () -> mark_vif ~__context ~vif ~doc ~op); + finally + (fun () -> f ()) + (fun () -> Helpers.with_global_lock (fun () -> unmark_vif ~__context ~vif ~doc ~op)) + + (* -------- Forwarding helper functions: ------------------------------------ *) + + let forward_vif_op ~local_fn ~__context ~self op = + let vm = Db.VIF.get_VM ~__context ~self in + let host_resident_on = Db.VM.get_resident_on ~__context ~self:vm in + if host_resident_on = Ref.null + then local_fn ~__context + else do_op_on ~local_fn ~__context ~host:host_resident_on op + + (* -------------------------------------------------------------------------- *) + + let create ~__context ~device ~network ~vM ~mAC ~mTU ~other_config ~qos_algorithm_type ~qos_algorithm_params = + info "VIF.create: VM = '%s'; network = '%s'" (vm_uuid ~__context vM) (network_uuid ~__context network); + Local.VIF.create ~__context ~device ~network ~vM ~mAC ~mTU ~other_config ~qos_algorithm_type ~qos_algorithm_params + + let destroy ~__context ~self = + info "VIF.destroy: VIF = '%s'" (vif_uuid ~__context self); + Local.VIF.destroy ~__context ~self + + let plug ~__context ~self = + info "VIF.plug: VIF = '%s'" (vif_uuid ~__context self); + let local_fn = Local.VIF.plug ~self in + with_vif_marked ~__context ~vif:self ~doc:"VIF.plug" ~op:`plug + (fun () -> + forward_vif_op ~local_fn ~__context ~self (fun session_id rpc -> Client.VIF.plug rpc session_id self)) + + let unplug_common ~__context ~self ~force = + let op = `unplug in + let name = "VIF." ^ (Record_util.vif_operation_to_string op) in + info "%s: VIF = '%s'" name (vif_uuid ~__context self); + let local_fn, remote_fn = + if force then Local.VIF.unplug_force, Client.VIF.unplug_force + else Local.VIF.unplug, Client.VIF.unplug in + let local_fn = local_fn ~self in + with_vif_marked ~__context ~vif:self ~doc:name ~op + (fun () -> + forward_vif_op ~local_fn ~__context ~self (fun session_id rpc -> remote_fn rpc session_id self)) + + let unplug ~__context ~self = unplug_common ~__context ~self ~force:false + let unplug_force ~__context ~self = unplug_common ~__context ~self ~force:true + + let move ~__context ~self ~network = + info "VIF.move: VIF = '%s' network = '%s'" (vif_uuid ~__context self) (network_uuid ~__context network); + let local_fn = Local.VIF.move ~self ~network in + let remote_fn = (fun session_id rpc -> Client.VIF.move rpc session_id self network) in + forward_vif_op ~local_fn ~__context ~self remote_fn + + let set_locking_mode ~__context ~self ~value = + info "VIF.set_locking_mode: VIF = '%s'; value = '%s'" (vif_uuid ~__context self) (Record_util.vif_locking_mode_to_string value); + let local_fn = Local.VIF.set_locking_mode ~self ~value in + let remote_fn = (fun session_id rpc -> Client.VIF.set_locking_mode rpc session_id self value) in + forward_vif_op ~local_fn ~__context ~self remote_fn + + let set_ipv4_allowed ~__context ~self ~value = + info "VIF.set_ipv4_allowed: VIF = '%s'; value = '%s'" (vif_uuid ~__context self) (String.concat "," value); + let local_fn = Local.VIF.set_ipv4_allowed ~self ~value in + let remote_fn = (fun session_id rpc -> Client.VIF.set_ipv4_allowed rpc session_id self value) in + forward_vif_op ~local_fn ~__context ~self remote_fn + + let add_ipv4_allowed ~__context ~self ~value = + info "VIF.add_ipv4_allowed: VIF = '%s'; value = '%s'" (vif_uuid ~__context self) value; + let local_fn = Local.VIF.add_ipv4_allowed ~self ~value in + let remote_fn = (fun session_id rpc -> Client.VIF.add_ipv4_allowed rpc session_id self value) in + forward_vif_op ~local_fn ~__context ~self remote_fn + + let remove_ipv4_allowed ~__context ~self ~value = + info "VIF.remove_ipv4_allowed: VIF = '%s'; value = '%s'" (vif_uuid ~__context self) value; + let local_fn = Local.VIF.remove_ipv4_allowed ~self ~value in + let remote_fn = (fun session_id rpc -> Client.VIF.remove_ipv4_allowed rpc session_id self value) in + forward_vif_op ~local_fn ~__context ~self remote_fn + + let set_ipv6_allowed ~__context ~self ~value = + info "VIF.set_ipv6_allowed: VIF = '%s'; value = '%s'" (vif_uuid ~__context self) (String.concat "," value); + let local_fn = Local.VIF.set_ipv6_allowed ~self ~value in + let remote_fn = (fun session_id rpc -> Client.VIF.set_ipv6_allowed rpc session_id self value) in + forward_vif_op ~local_fn ~__context ~self remote_fn + + let add_ipv6_allowed ~__context ~self ~value = + info "VIF.add_ipv6_allowed: VIF = '%s'; value = '%s'" (vif_uuid ~__context self) value; + let local_fn = Local.VIF.add_ipv6_allowed ~self ~value in + let remote_fn = (fun session_id rpc -> Client.VIF.add_ipv6_allowed rpc session_id self value) in + forward_vif_op ~local_fn ~__context ~self remote_fn + + let remove_ipv6_allowed ~__context ~self ~value = + info "VIF.remove_ipv6_allowed: VIF = '%s'; value = '%s'" (vif_uuid ~__context self) value; + let local_fn = Local.VIF.remove_ipv6_allowed ~self ~value in + let remote_fn = (fun session_id rpc -> Client.VIF.remove_ipv6_allowed rpc session_id self value) in + forward_vif_op ~local_fn ~__context ~self remote_fn + + let configure_ipv4 ~__context ~self ~mode ~address ~gateway = + info "VIF.configure_ipv4: VIF = '%s'; mode = '%s'; address = '%s'; gateway = '%s'" + (vif_uuid ~__context self) + (Record_util.vif_ipv4_configuration_mode_to_string mode) address gateway; + let local_fn = Local.VIF.configure_ipv4 ~self ~mode ~address ~gateway in + let remote_fn = (fun session_id rpc -> Client.VIF.configure_ipv4 rpc session_id self mode address gateway) in + forward_vif_op ~local_fn ~__context ~self remote_fn + + let configure_ipv6 ~__context ~self ~mode ~address ~gateway = + info "VIF.configure_ipv6: VIF = '%s'; mode = '%s'; address = '%s'; gateway = '%s'" + (vif_uuid ~__context self) + (Record_util.vif_ipv6_configuration_mode_to_string mode) address gateway; + let local_fn = Local.VIF.configure_ipv6 ~self ~mode ~address ~gateway in + let remote_fn = (fun session_id rpc -> Client.VIF.configure_ipv6 rpc session_id self mode address gateway) in + forward_vif_op ~local_fn ~__context ~self remote_fn + end + + module VIF_metrics = struct + end + + module VLAN = struct + let create ~__context ~tagged_PIF ~tag ~network = + info "VLAN.create: network = '%s'; VLAN tag = %Ld" (network_uuid ~__context network) tag; + let local_fn = Local.VLAN.create ~tagged_PIF ~tag ~network in + do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self:tagged_PIF) (fun session_id rpc -> Client.VLAN.create rpc session_id tagged_PIF tag network) + let destroy ~__context ~self = + info "VLAN.destroy: VLAN = '%s'" (vlan_uuid ~__context self); + let local_fn = Local.VLAN.destroy ~self in + do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self:(Db.VLAN.get_tagged_PIF ~__context ~self)) (fun session_id rpc -> Client.VLAN.destroy rpc session_id self) + end + + module Tunnel = struct + let create ~__context ~transport_PIF ~network = + info "Tunnel.create: network = '%s'" (network_uuid ~__context network); + let local_fn = Local.Tunnel.create ~transport_PIF ~network in + do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self:transport_PIF) + (fun session_id rpc -> Client.Tunnel.create rpc session_id transport_PIF network) + + let destroy ~__context ~self = + info "Tunnel.destroy: tunnel = '%s'" (tunnel_uuid ~__context self); + let local_fn = Local.Tunnel.destroy ~self in + do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context + ~self:(Db.Tunnel.get_transport_PIF ~__context ~self)) + (fun session_id rpc -> Client.Tunnel.destroy rpc session_id self) + end + + module Bond = struct + let create ~__context ~network ~members ~mAC ~mode ~properties = + info "Bond.create: network = '%s'; members = [ %s ]" + (network_uuid ~__context network) (String.concat "; " (List.map (pif_uuid ~__context) members)); + if List.length members = 0 + then raise (Api_errors.Server_error(Api_errors.pif_bond_needs_more_members, [])); + let host = Db.PIF.get_host ~__context ~self:(List.hd members) in + let local_fn = Local.Bond.create ~network ~members ~mAC ~mode ~properties in + (* The management interface on the slave may change during this operation, so expect connection loss. + * Consider the operation successful if task progress is set to 1.0. *) + let task = Context.get_task_id __context in + let success () = + let progress = Db.Task.get_progress ~__context ~self:task in + debug "Task progress %.1f" progress; + if progress = 1.0 then + Some (Db.PIF.get_bond_slave_of ~__context ~self:(List.hd members)) + else + None + in + let fn () = + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Bond.create rpc session_id network members mAC mode properties) in + tolerate_connection_loss fn success 30. + + let destroy ~__context ~self = + info "Bond.destroy: bond = '%s'" (bond_uuid ~__context self); + let host = Db.PIF.get_host ~__context ~self:(Db.Bond.get_master ~__context ~self) in + (* The management interface on the slave may change during this operation, so expect connection loss. + * Consider the operation successful if task progress is set to 1.0. *) + let task = Context.get_task_id __context in + let success () = + let progress = Db.Task.get_progress ~__context ~self:task in + debug "Task progress %.1f" progress; + if progress = 1.0 then + Some () + else + None + in + let local_fn = Local.Bond.destroy ~self in + let fn () = do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Bond.destroy rpc session_id self) in + tolerate_connection_loss fn success 30. + + let set_mode ~__context ~self ~value = + info "Bond.set_mode: bond = '%s'; value = '%s'" (bond_uuid ~__context self) (Record_util.bond_mode_to_string value); + let host = Db.PIF.get_host ~__context ~self:(Db.Bond.get_master ~__context ~self) in + let local_fn = Local.Bond.set_mode ~self ~value in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Bond.set_mode rpc session_id self value) + + let set_property ~__context ~self ~name ~value = + info "Bond.set_property: bond = '%s'; name = '%s'; value = '%s'" (bond_uuid ~__context self) name value; + let host = Db.PIF.get_host ~__context ~self:(Db.Bond.get_master ~__context ~self) in + let local_fn = Local.Bond.set_property ~self ~name ~value in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.Bond.set_property rpc session_id self name value) + end + + module PIF = struct + + let pool_introduce ~__context + ~device ~network ~host ~mAC ~mTU ~vLAN ~physical ~ip_configuration_mode ~iP + ~netmask ~gateway ~dNS ~bond_slave_of ~vLAN_master_of ~management ~other_config ~disallow_unplug = + Local.PIF.pool_introduce ~__context + ~device ~network ~host ~mAC ~mTU ~vLAN ~physical ~ip_configuration_mode ~iP + ~netmask ~gateway ~dNS ~bond_slave_of ~vLAN_master_of ~management ~other_config ~disallow_unplug + + let db_introduce = Local.PIF.db_introduce + let db_forget ~__context ~self = + info "PIF.db_forget: PIF = '%s'" (pif_uuid ~__context self); + Local.PIF.db_forget ~__context ~self + + let create_VLAN ~__context ~device ~network ~host ~vLAN = + info "PIF.create_VLAN: network = '%s'; VLAN tag = %Ld" (network_uuid ~__context network) vLAN; + let local_fn = Local.PIF.create_VLAN ~device ~network ~host ~vLAN in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.PIF.create_VLAN rpc session_id device network host vLAN) + + let destroy ~__context ~self = + info "PIF.destroy: PIF = '%s'" (pif_uuid ~__context self); + let local_fn = Local.PIF.destroy ~self in + do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self) (fun session_id rpc -> Client.PIF.destroy rpc session_id self) + + let unplug ~__context ~self = + info "PIF.unplug: PIF = '%s'" (pif_uuid ~__context self); + let local_fn = Local.PIF.unplug ~self in + do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self) (fun session_id rpc -> Client.PIF.unplug rpc session_id self) + + let plug ~__context ~self = + info "PIF.plug: PIF = '%s'" (pif_uuid ~__context self); + let local_fn = Local.PIF.plug ~self in + do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self) (fun session_id rpc -> Client.PIF.plug rpc session_id self) + + let reconfigure_ip ~__context ~self ~mode ~iP ~netmask ~gateway ~dNS = + info "PIF.reconfigure_ip: PIF = '%s'; mode = '%s'; IP = '%s'; netmask = '%s'; gateway = '%s'; DNS = %s" + (pif_uuid ~__context self) + (Record_util.ip_configuration_mode_to_string mode) iP netmask gateway dNS; + let host = Db.PIF.get_host ~__context ~self in + let local_fn = Local.PIF.reconfigure_ip ~self ~mode ~iP ~netmask ~gateway ~dNS in + let task = Context.get_task_id __context in + let success () = + let status = Db.Task.get_status ~__context ~self:task in + if status <> `pending then + Some () + else + None + in + let fn () = + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> + Client.PIF.reconfigure_ip rpc session_id self mode iP netmask gateway dNS) in + tolerate_connection_loss fn success !Xapi_globs.pif_reconfigure_ip_timeout + + let reconfigure_ipv6 ~__context ~self ~mode ~iPv6 ~gateway ~dNS = + info "PIF.reconfigure_ipv6: PIF = '%s'; mode = '%s'; IPv6 = '%s'; gateway = '%s'; DNS = %s" + (pif_uuid ~__context self) + (Record_util.ipv6_configuration_mode_to_string mode) iPv6 gateway dNS; + let host = Db.PIF.get_host ~__context ~self in + let local_fn = Local.PIF.reconfigure_ipv6 ~self ~mode ~iPv6 ~gateway ~dNS in + let task = Context.get_task_id __context in + let success () = + let status = Db.Task.get_status ~__context ~self:task in + if status <> `pending then + Some () + else + None + in + let fn () = + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> + Client.PIF.reconfigure_ipv6 rpc session_id self mode iPv6 gateway dNS) in + tolerate_connection_loss fn success !Xapi_globs.pif_reconfigure_ip_timeout + + let set_primary_address_type ~__context ~self ~primary_address_type = + info "PIF.set_primary_address_type: PIF = '%s'; primary_address_type = '%s'" + (pif_uuid ~__context self) + (Record_util.primary_address_type_to_string primary_address_type); + let local_fn = Local.PIF.set_primary_address_type ~self ~primary_address_type in + do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self) + (fun session_id rpc -> Client.PIF.set_primary_address_type rpc session_id self primary_address_type) + + let set_property ~__context ~self ~name ~value = + info "PIF.set_property: PIF = '%s'; name = '%s'; value = '%s'" (pif_uuid ~__context self) name value; + let host = Db.PIF.get_host ~__context ~self in + let local_fn = Local.PIF.set_property ~self ~name ~value in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.PIF.set_property rpc session_id self name value) + + let scan ~__context ~host = + info "PIF.scan: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.PIF.scan ~host in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.PIF.scan rpc session_id host) + + let introduce ~__context ~host ~mAC ~device ~managed = + info "PIF.introduce: host = '%s'; MAC address = '%s'; device = '%s'; managed = '%b'" + (host_uuid ~__context host) mAC device managed; + let local_fn = Local.PIF.introduce ~host ~mAC ~device ~managed in + do_op_on ~local_fn ~__context ~host (fun session_id rpc -> Client.PIF.introduce rpc session_id host mAC device managed) + + let forget ~__context ~self= + info "PIF.forget: PIF = '%s'" (pif_uuid ~__context self); + let local_fn = Local.PIF.forget ~self in + do_op_on ~local_fn ~__context ~host:(Db.PIF.get_host ~__context ~self) (fun session_id rpc -> Client.PIF.forget rpc session_id self) + end + module PIF_metrics = struct + end + module SM = struct end + module SR = struct + + let unmark_sr ~__context ~sr ~doc ~op = + let task_id = Ref.string_of (Context.get_task_id __context) in + debug "Unmarking SR after %s (task=%s)" doc task_id; + log_exn_ignore ~doc:("unmarking SR after " ^ doc) + (fun self -> + if Db.is_valid_ref __context self then begin + Db.SR.remove_from_current_operations ~__context ~self ~key:task_id; + Xapi_sr_operations.update_allowed_operations ~__context ~self; + Helpers.Early_wakeup.broadcast (Datamodel._sr, Ref.string_of self); + end) + sr + + let mark_sr ~__context ~sr ~doc ~op = + let task_id = Ref.string_of (Context.get_task_id __context) in + debug "Marking SR for %s (task=%s)" doc task_id; + log_exn ~doc:("marking SR for " ^ doc) + (fun self -> + Xapi_sr_operations.assert_operation_valid ~__context ~self ~op; + Db.SR.add_to_current_operations ~__context ~self ~key:task_id ~value:op; + Xapi_sr_operations.update_allowed_operations ~__context ~self) sr + + let with_sr_marked ~__context ~sr ~doc ~op f = + Helpers.retry_with_global_lock ~__context ~doc (fun () -> mark_sr ~__context ~sr ~doc ~op); + finally + (fun () -> f ()) + (fun () -> Helpers.with_global_lock (fun () -> unmark_sr ~__context ~sr ~doc ~op)) + + (* -------- Forwarding helper functions: ------------------------------------ *) + + (* Forward SR operation to host that has a suitable plugged (or unplugged) PBD *) + let forward_sr_op ?consider_unplugged_pbds ~local_fn ~__context ~self op = + let pbd = choose_pbd_for_sr ?consider_unplugged_pbds ~__context ~self () in + let host = Db.PBD.get_host ~__context ~self:pbd in + do_op_on ~local_fn ~__context ~host op + + (* do op on a host that can view multiple SRs, if none is found, an + exception of Not_found will be raised *) + let forward_sr_multiple_op ~local_fn ~__context ~srs ?(prefer_slaves=false) op = + let choose_fn ~host = + Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs:srs ~host in + let host = + try Xapi_vm_helpers.choose_host ~__context ~choose_fn ~prefer_slaves () + with _ -> raise Not_found in + do_op_on ~local_fn ~__context ~host op + + let set_virtual_allocation ~__context ~self ~value = + Sm.assert_session_has_internal_sr_access ~__context ~sr:self; + Local.SR.set_virtual_allocation ~__context ~self ~value + + let set_physical_size ~__context ~self ~value = + Sm.assert_session_has_internal_sr_access ~__context ~sr:self; + Local.SR.set_physical_size ~__context ~self ~value + + let set_physical_utilisation ~__context ~self ~value = + Sm.assert_session_has_internal_sr_access ~__context ~sr:self; + Local.SR.set_physical_utilisation ~__context ~self ~value + + let create ~__context ~host ~device_config ~physical_size ~name_label ~name_description ~_type ~content_type ~shared ~sm_config = + info "SR.create: name label = '%s'" name_label; + let local_fn = Local.SR.create ~host ~device_config ~physical_size ~name_label ~name_description ~_type ~content_type ~shared ~sm_config in + (* if shared, then ignore host parameter and do create on the master.. *) + if shared then + local_fn ~__context + else + (* otherwise forward to specified host *) + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.SR.create ~rpc ~session_id ~host ~device_config ~physical_size ~name_label ~name_description ~_type ~content_type ~shared ~sm_config) + + (* -------------------------------------------------------------------------- *) + + (* don't forward. this is just a db call *) + let introduce ~__context ~uuid ~name_label ~name_description ~_type ~content_type = + info "SR.introduce: uuid = '%s'; name label = '%s'" uuid name_label; + Local.SR.introduce ~__context ~uuid ~name_label ~name_description ~_type ~content_type + + let make ~__context ~host ~device_config ~physical_size ~name_label ~name_description ~_type ~content_type ~sm_config = + info "SR.make: host = '%s'; name label = '%s'" (host_uuid ~__context host) name_label; + let local_fn = Local.SR.make ~host ~device_config ~physical_size ~name_label ~name_description ~_type ~content_type ~sm_config in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.SR.make rpc session_id host device_config physical_size name_label + name_description _type content_type sm_config) + + let destroy ~__context ~sr = + info "SR.destroy: SR = '%s'" (sr_uuid ~__context sr); + let local_fn = Local.SR.destroy ~sr in + with_sr_marked ~__context ~sr ~doc:"SR.destroy" ~op:`destroy + (fun () -> + forward_sr_op ~consider_unplugged_pbds:true ~local_fn ~__context ~self:sr + (fun session_id rpc -> Client.SR.destroy rpc session_id sr)) + + (* don't forward this is just a db call *) + let forget ~__context ~sr = + info "SR.forget: SR = '%s'" (sr_uuid ~__context sr); + with_sr_marked ~__context ~sr ~doc:"SR.forget" ~op:`forget + (fun () -> + Local.SR.forget ~__context ~sr) + + let update ~__context ~sr = + info "SR.update: SR = '%s'" (sr_uuid ~__context sr); + let local_fn = Local.SR.update ~sr in + (* SR.update made lock free as of CA-27630 *) + forward_sr_op ~local_fn ~__context ~self:sr + (fun session_id rpc -> Client.SR.update rpc session_id sr) + + let get_supported_types ~__context = + info "SR.get_supported_types"; + Local.SR.get_supported_types ~__context + + let scan ~__context ~sr = + (* since we periodically sr_scan, only log those that aren't internal ones.. otherwise logs just get spammed *) + let is_internal_scan = Db.Session.get_pool ~__context ~self:(Context.get_session_id __context) in + (if is_internal_scan then debug else info) "SR.scan: SR = '%s'" (sr_uuid ~__context sr); + let local_fn = Local.SR.scan ~sr in + with_sr_marked ~__context ~sr ~doc:"SR.scan" ~op:`scan + (fun () -> + forward_sr_op ~local_fn ~__context ~self:sr + (fun session_id rpc -> Client.SR.scan rpc session_id sr)) + + let probe ~__context ~host ~device_config ~_type ~sm_config = + info "SR.probe: host = '%s'" (host_uuid ~__context host); + let local_fn = Local.SR.probe ~host ~device_config ~_type ~sm_config in + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.SR.probe ~rpc ~session_id ~host ~device_config ~_type ~sm_config) + + let set_shared ~__context ~sr ~value = + Local.SR.set_shared ~__context ~sr ~value + + let set_name_label ~__context ~sr ~value = + info "SR.set_name_label: SR = '%s' name-label = '%s'" + (sr_uuid ~__context sr) value; + let local_fn = Local.SR.set_name_label ~sr ~value in + forward_sr_op ~local_fn ~__context ~self:sr + (fun session_id rpc -> Client.SR.set_name_label rpc session_id sr value) + + let set_name_description ~__context ~sr ~value = + info "SR.set_name_description: SR = '%s' name-description = '%s'" + (sr_uuid ~__context sr) value; + let local_fn = Local.SR.set_name_description ~sr ~value in + forward_sr_op ~local_fn ~__context ~self:sr + (fun session_id rpc -> Client.SR.set_name_description rpc session_id sr value) + + let assert_can_host_ha_statefile ~__context ~sr = + info "SR.assert_can_host_ha_statefile: SR = '%s'" (sr_uuid ~__context sr); + Local.SR.assert_can_host_ha_statefile ~__context ~sr + + let assert_supports_database_replication ~__context ~sr = + info "SR.assert_supports_database_replication: SR '%s'" (sr_uuid ~__context sr); + Local.SR.assert_supports_database_replication ~__context ~sr + + let enable_database_replication ~__context ~sr = + info "SR.enable_database_replication: SR = '%s'" (sr_uuid ~__context sr); + Local.SR.enable_database_replication ~__context ~sr + + let disable_database_replication ~__context ~sr = + info "SR.disable_database_replication: SR = '%s'" (sr_uuid ~__context sr); + Local.SR.disable_database_replication ~__context ~sr + + let create_new_blob ~__context ~sr ~name ~mime_type ~public = + info "SR.create_new_blob: SR = '%s'" (sr_uuid ~__context sr); + Local.SR.create_new_blob ~__context ~sr ~name ~mime_type ~public + + (* SR Level RRDs *) + let get_data_sources ~__context ~sr = + info "SR.get_data_sources: SR = '%s'" (sr_uuid ~__context sr); + let local_fn = Local.SR.get_data_sources ~sr in + forward_sr_op ~local_fn ~__context ~self:sr + (fun session_id rpc -> Client.SR.get_data_sources rpc session_id sr) + + let record_data_source ~__context ~sr ~data_source = + info "SR.record_data_source: SR = '%s'; data source = '%s'" + (sr_uuid ~__context sr) data_source; + let local_fn = Local.SR.record_data_source ~sr ~data_source in + forward_sr_op ~local_fn ~__context ~self:sr + (fun session_id rpc -> + Client.SR.record_data_source rpc session_id sr data_source) + + let query_data_source ~__context ~sr ~data_source = + info "SR.query_data_source: SR = '%s'; data source = '%s'" + (sr_uuid ~__context sr) data_source; + let local_fn = Local.SR.query_data_source ~sr ~data_source in + forward_sr_op ~local_fn ~__context ~self:sr + (fun session_id rpc -> + Client.SR.query_data_source rpc session_id sr data_source) + + let forget_data_source_archives ~__context ~sr ~data_source = + info "SR.forget_data_source_archives: sr = '%s'; data source = '%s'" + (sr_uuid ~__context sr) data_source; + let local_fn = Local.SR.forget_data_source_archives ~sr ~data_source in + forward_sr_op ~local_fn ~__context ~self:sr + (fun session_id rpc -> + Client.SR.forget_data_source_archives rpc session_id sr data_source) + + end + module VDI = struct + + let unmark_vdi ~__context ~vdi ~doc ~op = + let task_id = Ref.string_of (Context.get_task_id __context) in + log_exn_ignore ~doc:("unmarking VDI after " ^ doc) + (fun self -> + if Db.is_valid_ref __context self then begin + Db.VDI.remove_from_current_operations ~__context ~self ~key:task_id; + Xapi_vdi.update_allowed_operations ~__context ~self; + Helpers.Early_wakeup.broadcast (Datamodel._vdi, Ref.string_of self); + end) + vdi + + let mark_vdi ~__context ~vdi ~doc ~op = + let task_id = Ref.string_of (Context.get_task_id __context) in + log_exn ~doc:("marking VDI for " ^ doc) + (fun self -> + Xapi_vdi.assert_operation_valid ~__context ~self ~op; + Db.VDI.add_to_current_operations ~__context ~self ~key:task_id ~value:op; + Xapi_vdi.update_allowed_operations ~__context ~self) vdi + + (** Use this function to mark the SR and/or the individual VDI *) + let with_sr_andor_vdi ~__context ?sr ?vdi ~doc f = + Helpers.retry_with_global_lock ~__context ~doc + (fun () -> + maybe (fun (sr, op) -> SR.mark_sr ~__context ~sr ~doc ~op) sr; + (* If we fail to acquire the VDI lock, unlock the SR *) + try + maybe (fun (vdi, op) -> mark_vdi ~__context ~vdi ~doc ~op) vdi + with e -> + maybe (fun (sr, op) -> SR.unmark_sr ~__context ~sr ~doc ~op) sr; + raise e + ); + finally + (fun () -> f ()) + (fun () -> + Helpers.with_global_lock + (fun () -> + maybe (fun (sr, op) -> SR.unmark_sr ~__context ~sr ~doc ~op) sr; + maybe (fun (vdi, op) -> unmark_vdi ~__context ~vdi ~doc ~op) vdi)) + + + (* -------- Forwarding helper functions: ------------------------------------ *) + + (* Read SR from VDI and use same forwarding mechanism as SR *) + let forward_vdi_op ~local_fn ~__context ~self op = + let sr = Db.VDI.get_SR ~__context ~self in + SR.forward_sr_op ~local_fn ~__context ~self:sr op + + (* -------------------------------------------------------------------------- *) + + let set_sharable ~__context ~self ~value = + if not (Mtc.is_vdi_accessed_by_protected_VM ~__context ~vdi:self) then begin + let sr = Db.VDI.get_SR ~__context ~self in + Sm.assert_session_has_internal_sr_access ~__context ~sr; + end; + Local.VDI.set_sharable ~__context ~self ~value + + let set_managed ~__context ~self ~value = + let sr = Db.VDI.get_SR ~__context ~self in + Sm.assert_session_has_internal_sr_access ~__context ~sr; + Local.VDI.set_managed ~__context ~self ~value + + let set_read_only ~__context ~self ~value = + let sr = Db.VDI.get_SR ~__context ~self in + Sm.assert_session_has_internal_sr_access ~__context ~sr; + Local.VDI.set_read_only ~__context ~self ~value + + let set_missing ~__context ~self ~value = + let sr = Db.VDI.get_SR ~__context ~self in + Sm.assert_session_has_internal_sr_access ~__context ~sr; + Local.VDI.set_missing ~__context ~self ~value + + let set_virtual_size ~__context ~self ~value = + let sr = Db.VDI.get_SR ~__context ~self in + Sm.assert_session_has_internal_sr_access ~__context ~sr; + Local.VDI.set_virtual_size ~__context ~self ~value + + let set_physical_utilisation ~__context ~self ~value = + let sr = Db.VDI.get_SR ~__context ~self in + Sm.assert_session_has_internal_sr_access ~__context ~sr; + Local.VDI.set_physical_utilisation ~__context ~self ~value + + let set_is_a_snapshot ~__context ~self ~value = + let sr = Db.VDI.get_SR ~__context ~self in + Sm.assert_session_has_internal_sr_access ~__context ~sr; + Local.VDI.set_is_a_snapshot ~__context ~self ~value + + let set_snapshot_of ~__context ~self ~value = + let sr = Db.VDI.get_SR ~__context ~self in + Sm.assert_session_has_internal_sr_access ~__context ~sr; + Local.VDI.set_snapshot_of ~__context ~self ~value + + let set_snapshot_time ~__context ~self ~value = + let sr = Db.VDI.get_SR ~__context ~self in + Sm.assert_session_has_internal_sr_access ~__context ~sr; + Local.VDI.set_snapshot_time ~__context ~self ~value + + let set_metadata_of_pool ~__context ~self ~value = + let sr = Db.VDI.get_SR ~__context ~self in + Sm.assert_session_has_internal_sr_access ~__context ~sr; + Local.VDI.set_metadata_of_pool ~__context ~self ~value + + let set_name_label ~__context ~self ~value = + info "VDI.set_name_label: VDI = '%s' name-label = '%s'" + (vdi_uuid ~__context self) value; + let local_fn = Local.VDI.set_name_label ~self ~value in + forward_vdi_op ~local_fn ~__context ~self + (fun session_id rpc -> Client.VDI.set_name_label rpc session_id self value) + + let set_name_description ~__context ~self ~value = + info "VDI.set_name_description: VDI = '%s' name-description = '%s'" + (vdi_uuid ~__context self) value; + let local_fn = Local.VDI.set_name_description ~self ~value in + forward_vdi_op ~local_fn ~__context ~self + (fun session_id rpc -> Client.VDI.set_name_description rpc session_id self value) + + let ensure_vdi_not_on_running_vm ~__context ~self = + let vbds = Db.VDI.get_VBDs ~__context ~self in + List.iter (fun vbd -> + let vm = Db.VBD.get_VM ~__context ~self:vbd in + Xapi_vm_lifecycle.assert_power_state_is ~__context ~self:vm ~expected:`Halted + ) vbds + + let set_on_boot ~__context ~self ~value = + ensure_vdi_not_on_running_vm ~__context ~self; + let local_fn = Local.VDI.set_on_boot ~self ~value in + forward_vdi_op ~local_fn ~__context ~self + (fun session_id rpc -> Client.VDI.set_on_boot rpc session_id self value) + + let set_allow_caching ~__context ~self ~value = + ensure_vdi_not_on_running_vm ~__context ~self; + Local.VDI.set_allow_caching ~__context ~self ~value + + let open_database ~__context ~self = + Local.VDI.open_database ~__context ~self + + let read_database_pool_uuid ~__context ~self = + Local.VDI.read_database_pool_uuid ~__context ~self + + (* know sr so just use SR forwarding policy direct here *) + let create ~__context ~name_label ~name_description ~sR ~virtual_size ~_type ~sharable ~read_only ~other_config ~xenstore_data ~sm_config ~tags = + info "VDI.create: SR = '%s'; name label = '%s'" (sr_uuid ~__context sR) name_label; + let local_fn = Local.VDI.create ~name_label ~name_description ~sR ~virtual_size ~_type ~sharable ~read_only ~other_config ~xenstore_data ~sm_config ~tags in + with_sr_andor_vdi ~__context ~sr:(sR, `vdi_create) ~doc:"VDI.create" + (fun () -> + SR.forward_sr_op ~local_fn ~__context ~self:sR + (fun session_id rpc -> Client.VDI.create ~rpc ~session_id ~name_label ~name_description ~sR ~virtual_size ~_type ~sharable ~read_only ~other_config ~xenstore_data ~sm_config ~tags)) + + (* Hidden call used in pool join only *) + let pool_introduce ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location = + Local.VDI.pool_introduce ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location + + (* Called from the SM backend *) + let db_introduce ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location = + Sm.assert_session_has_internal_sr_access ~__context ~sr:sR; + Local.VDI.db_introduce ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location + + (* Called from the SM backend *) + let db_forget ~__context ~vdi = + let sr = Db.VDI.get_SR ~__context ~self:vdi in + Sm.assert_session_has_internal_sr_access ~__context ~sr; + Local.VDI.db_forget ~__context ~vdi + + let introduce ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location ~xenstore_data ~sm_config ~managed ~virtual_size ~physical_utilisation ~metadata_of_pool ~is_a_snapshot ~snapshot_time ~snapshot_of= + info "VDI.introduce: SR = '%s'; name label = '%s'" (sr_uuid ~__context sR) name_label; + let local_fn = Local.VDI.introduce ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location ~xenstore_data ~sm_config ~managed ~virtual_size ~physical_utilisation ~metadata_of_pool ~is_a_snapshot ~snapshot_time ~snapshot_of in + with_sr_andor_vdi ~__context ~sr:(sR, `vdi_introduce) ~doc:"VDI.introduce" + (fun () -> + SR.forward_sr_op ~local_fn ~__context ~self:sR + (fun session_id rpc -> + Client.VDI.introduce ~rpc ~session_id ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location ~xenstore_data ~sm_config ~managed ~virtual_size ~physical_utilisation ~metadata_of_pool ~is_a_snapshot ~snapshot_time ~snapshot_of)) + + let update ~__context ~vdi = + let local_fn = Local.VDI.update ~vdi in + let sr = Db.VDI.get_SR ~__context ~self:vdi in + with_sr_andor_vdi ~__context ~vdi:(vdi, `update) ~doc:"VDI.update" + (fun () -> + SR.forward_sr_op ~local_fn ~__context ~self:sr + (fun session_id rpc -> + Client.VDI.update ~rpc ~session_id ~vdi)) + + let forget ~__context ~vdi = + info "VDI.forget: VDI = '%s'" (vdi_uuid ~__context vdi); + with_sr_andor_vdi ~__context ~vdi:(vdi, `forget) ~doc:"VDI.forget" + (fun () -> + Local.VDI.forget ~__context ~vdi) + + let destroy ~__context ~self = + info "VDI.destroy: VDI = '%s'" (vdi_uuid ~__context self); + let local_fn = Local.VDI.destroy ~self in + let sR = Db.VDI.get_SR ~__context ~self in + with_sr_andor_vdi ~__context ~sr:(sR, `vdi_destroy) ~vdi:(self, `destroy) ~doc:"VDI.destroy" + (fun () -> + forward_vdi_op ~local_fn ~__context ~self + (fun session_id rpc -> Client.VDI.destroy rpc session_id self)) + + (* !! FIXME - Depends on what we're doing here... *) + let snapshot ~__context ~vdi ~driver_params = + info "VDI.snapshot: VDI = '%s'" (vdi_uuid ~__context vdi); + let local_fn = Local.VDI.snapshot ~vdi ~driver_params in + let sR = Db.VDI.get_SR ~__context ~self:vdi in + with_sr_andor_vdi ~__context ~sr:(sR, `vdi_snapshot) ~vdi:(vdi, `snapshot) ~doc:"VDI.snapshot" + (fun () -> + forward_vdi_op ~local_fn ~__context ~self:vdi + (fun session_id rpc -> Client.VDI.snapshot rpc session_id vdi driver_params)) + + let clone ~__context ~vdi ~driver_params = + info "VDI.clone: VDI = '%s'" (vdi_uuid ~__context vdi); + let local_fn = Local.VDI.clone ~vdi ~driver_params in + let sR = Db.VDI.get_SR ~__context ~self:vdi in + with_sr_andor_vdi ~__context ~sr:(sR, `vdi_clone) ~vdi:(vdi, `clone) ~doc:"VDI.clone" + (fun () -> + forward_vdi_op ~local_fn ~__context ~self:vdi + (fun session_id rpc -> Client.VDI.clone rpc session_id vdi driver_params)) + + let copy ~__context ~vdi ~sr ~base_vdi ~into_vdi = + info "VDI.copy: VDI = '%s'; SR = '%s'; base_vdi = '%s'; into_vdi = '%s'" (vdi_uuid ~__context vdi) (sr_uuid ~__context sr) (vdi_uuid ~__context base_vdi) (vdi_uuid ~__context into_vdi); + let local_fn = Local.VDI.copy ~vdi ~sr ~base_vdi ~into_vdi in + let src_sr = Db.VDI.get_SR ~__context ~self:vdi in + (* No need to lock the VDI because the VBD.plug will do that for us *) + (* Try forward the request to a host which can have access to both source + and destination SR. *) + let op session_id rpc = Client.VDI.copy rpc session_id vdi sr base_vdi into_vdi in + with_sr_andor_vdi ~__context ~vdi:(vdi, `copy) ~doc:"VDI.copy" + (fun () -> + try + SR.forward_sr_multiple_op ~local_fn ~__context ~srs:[src_sr; sr] ~prefer_slaves:true op + with Not_found -> + SR.forward_sr_multiple_op ~local_fn ~__context ~srs:[src_sr] ~prefer_slaves:true op) + + let pool_migrate ~__context ~vdi ~sr ~options = + let vbds = Db.VBD.get_records_where ~__context + ~expr:(Db_filter_types.Eq(Db_filter_types.Field "VDI", + Db_filter_types.Literal (Ref.string_of vdi))) in + if List.length vbds < 1 + then raise (Api_errors.Server_error(Api_errors.vdi_needs_vm_for_migrate,[Ref.string_of vdi])); + + let vm = (snd (List.hd vbds)).API.vBD_VM in + + (* hackity hack *) + let options = ("__internal__vm",Ref.string_of vm) :: (List.remove_assoc "__internal__vm" options) in + let local_fn = Local.VDI.pool_migrate ~vdi ~sr ~options in + + info "VDI.pool_migrate: VDI = '%s'; SR = '%s'; VM = '%s'" + (vdi_uuid ~__context vdi) (sr_uuid ~__context sr) (vm_uuid ~__context vm); + + VM.with_vm_operation ~__context ~self:vm ~doc:"VDI.pool_migrate" ~op:`migrate_send + (fun () -> + let snapshot, host = + if Xapi_vm_lifecycle.is_live ~__context ~self:vm then + (Helpers.get_boot_record ~__context ~self:vm, + Db.VM.get_resident_on ~__context ~self:vm) + else + let snapshot = Db.VM.get_record ~__context ~self:vm in + let host = Db.VM.get_scheduled_to_be_resident_on ~__context ~self:vm in + let host = + if host <> Ref.null then host else + let choose_fn ~host = + Xapi_vm_helpers.assert_can_boot_here ~__context ~self:vm ~snapshot ~host (); + Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs:[sr] ~host in + Xapi_vm_helpers.choose_host ~__context ~vm ~choose_fn () in + (snapshot, host) in + VM.reserve_memory_for_vm ~__context ~vm:vm ~host ~snapshot ~host_op:`vm_migrate + (fun () -> + with_sr_andor_vdi ~__context ~vdi:(vdi, `mirror) ~doc:"VDI.mirror" + (fun () -> + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.VDI.pool_migrate ~rpc ~session_id ~vdi ~sr ~options)))) + + let resize ~__context ~vdi ~size = + info "VDI.resize: VDI = '%s'; size = %Ld" (vdi_uuid ~__context vdi) size; + let local_fn = Local.VDI.resize ~vdi ~size in + let sR = Db.VDI.get_SR ~__context ~self:vdi in + with_sr_andor_vdi ~__context ~sr:(sR, `vdi_resize) ~vdi:(vdi, `resize) ~doc:"VDI.resize" + (fun () -> + forward_vdi_op ~local_fn ~__context ~self:vdi + (fun session_id rpc -> Client.VDI.resize rpc session_id vdi size)) + + let resize_online ~__context ~vdi ~size = + info "VDI.resize_online: VDI = '%s'; size = %Ld" (vdi_uuid ~__context vdi) size; + let local_fn = Local.VDI.resize_online ~vdi ~size in + let sR = Db.VDI.get_SR ~__context ~self:vdi in + with_sr_andor_vdi ~__context ~sr:(sR, `vdi_resize) ~vdi:(vdi, `resize_online) ~doc:"VDI.resize_online" + (fun () -> + forward_vdi_op ~local_fn ~__context ~self:vdi + (fun session_id rpc -> Client.VDI.resize_online rpc session_id vdi size)) + + let generate_config ~__context ~host ~vdi = + info "VDI.generate_config: VDI = '%s'; host = '%s'" (vdi_uuid ~__context vdi) (host_uuid ~__context host); + let local_fn = Local.VDI.generate_config ~host ~vdi in + with_sr_andor_vdi ~__context ~vdi:(vdi, `generate_config) ~doc:"VDI.generate_config" + (fun () -> + do_op_on ~local_fn ~__context ~host + (fun session_id rpc -> Client.VDI.generate_config rpc session_id host vdi) + ) + + let force_unlock ~__context ~vdi = + info "VDI.force_unlock: VDI = '%s'" (vdi_uuid ~__context vdi); + let local_fn = Local.VDI.force_unlock ~vdi in + with_sr_andor_vdi ~__context ~vdi:(vdi, `force_unlock) ~doc:"VDI.force_unlock" + (fun () -> + forward_vdi_op ~local_fn ~__context ~self:vdi + (fun session_id rpc -> Client.VDI.force_unlock rpc session_id vdi)) + + let checksum ~__context ~self = + VM.forward_to_access_srs_and ~local_fn:(Local.VDI.checksum ~self) ~__context + ~extra_sr:(Db.VDI.get_SR ~__context ~self) + (fun session_id rpc -> Client.VDI.checksum rpc session_id self) + + end + module VBD = struct + + let update_vbd_and_vdi_operations ~__context ~vbd = + Helpers.with_global_lock + (fun () -> + try + Xapi_vbd_helpers.update_allowed_operations ~__context ~self:vbd; + if not (Db.VBD.get_empty ~__context ~self:vbd) then + let vdi = Db.VBD.get_VDI ~__context ~self:vbd in + Xapi_vdi.update_allowed_operations ~__context ~self:vdi + with _ -> ()) + + let unmark_vbd ~__context ~vbd ~doc ~op = + let task_id = Ref.string_of (Context.get_task_id __context) in + log_exn ~doc:("unmarking VBD after " ^ doc) + (fun self -> + if Db.is_valid_ref __context self then begin + Db.VBD.remove_from_current_operations ~__context ~self ~key:task_id; + Xapi_vbd_helpers.update_allowed_operations ~__context ~self; + Helpers.Early_wakeup.broadcast (Datamodel._vbd, Ref.string_of vbd) + end) + vbd + + let mark_vbd ~__context ~vbd ~doc ~op = + let task_id = Ref.string_of (Context.get_task_id __context) in + log_exn ~doc:("marking VBD for " ^ doc) + (fun self -> + Xapi_vbd_helpers.assert_operation_valid ~__context ~self ~op; + Db.VBD.add_to_current_operations ~__context ~self ~key:task_id ~value:op; + Xapi_vbd_helpers.update_allowed_operations ~__context ~self) vbd + + let with_vbd_marked ~__context ~vbd ~doc ~op f = + Helpers.retry_with_global_lock ~__context ~doc (fun () -> mark_vbd ~__context ~vbd ~doc ~op); + finally + (fun () -> f ()) + (fun () -> Helpers.with_global_lock (fun () -> unmark_vbd ~__context ~vbd ~doc ~op)) + + + + (* -------- Forwarding helper functions: ------------------------------------ *) + + (* Forward to host that has resident VM that this VBD references *) + let forward_vbd_op ~local_fn ~__context ~self op = + let vm = Db.VBD.get_VM ~__context ~self in + let host_resident_on = Db.VM.get_resident_on ~__context ~self:vm in + if host_resident_on = Ref.null + then local_fn ~__context + else do_op_on ~local_fn ~__context ~host:host_resident_on op + + (* -------------------------------------------------------------------------- *) + + + (* these are db functions *) + let create ~__context ~vM ~vDI ~userdevice ~bootable ~mode ~_type ~unpluggable ~empty ~other_config ~qos_algorithm_type ~qos_algorithm_params = + info "VBD.create: VM = '%s'; VDI = '%s'" (vm_uuid ~__context vM) (vdi_uuid ~__context vDI); + (* NB must always execute this on the master because of the autodetect_mutex *) + Local.VBD.create ~__context ~vM ~vDI ~userdevice ~bootable ~mode ~_type ~unpluggable ~empty ~other_config ~qos_algorithm_type ~qos_algorithm_params + + let set_mode ~__context ~self ~value = + info "VBD.set_mode: VBD = '%s'; value = %s" (vbd_uuid ~__context self) (Record_util.vbd_mode_to_string value); + Local.VBD.set_mode ~__context ~self ~value + + let destroy ~__context ~self = + info "VBD.destroy: VBD = '%s'" (vbd_uuid ~__context self); + Local.VBD.destroy ~__context ~self + + let insert ~__context ~vbd ~vdi = + info "VBD.insert: VBD = '%s'; VDI = '%s'" (vbd_uuid ~__context vbd) (vdi_uuid ~__context vdi); + let local_fn = Local.VBD.insert ~vbd ~vdi in + with_vbd_marked ~__context ~vbd ~doc:"VBD.insert" ~op:`insert + (fun () -> + let vm = Db.VBD.get_VM ~__context ~self:vbd in + if Db.VM.get_power_state ~__context ~self:vm = `Halted then begin + Xapi_vbd.assert_ok_to_insert ~__context ~vbd ~vdi; + Db.VBD.set_VDI ~__context ~self:vbd ~value:vdi; + Db.VBD.set_empty ~__context ~self:vbd ~value:false + end + else forward_vbd_op ~local_fn ~__context ~self:vbd + (fun session_id rpc -> Client.VBD.insert rpc session_id vbd vdi)); + update_vbd_and_vdi_operations ~__context ~vbd + + let eject ~__context ~vbd = + info "VBD.eject: VBD = '%s'" (vbd_uuid ~__context vbd); + let local_fn = Local.VBD.eject ~vbd in + with_vbd_marked ~__context ~vbd ~doc:"VBD.eject" ~op:`eject + (fun () -> + let vm = Db.VBD.get_VM ~__context ~self:vbd in + if Db.VM.get_power_state ~__context ~self:vm = `Halted then begin + Xapi_vbd.assert_ok_to_eject ~__context ~vbd; + Db.VBD.set_empty ~__context ~self:vbd ~value:true; + Db.VBD.set_VDI ~__context ~self:vbd ~value:Ref.null; + end + else forward_vbd_op ~local_fn ~__context ~self:vbd + (fun session_id rpc -> Client.VBD.eject rpc session_id vbd)); + update_vbd_and_vdi_operations ~__context ~vbd + + let plug ~__context ~self = + info "VBD.plug: VBD = '%s'" (vbd_uuid ~__context self); + let local_fn = Local.VBD.plug ~self in + with_vbd_marked ~__context ~vbd:self ~doc:"VBD.plug" ~op:`plug + (fun () -> + forward_vbd_op ~local_fn ~__context ~self + (fun session_id rpc -> Client.VBD.plug rpc session_id self)); + update_vbd_and_vdi_operations ~__context ~vbd:self + + let unplug ~__context ~self = + info "VBD.unplug: VBD = '%s'" (vbd_uuid ~__context self); + let local_fn = Local.VBD.unplug ~self in + with_vbd_marked ~__context ~vbd:self ~doc:"VBD.unplug" ~op:`unplug + (fun () -> + forward_vbd_op ~local_fn ~__context ~self + (fun session_id rpc -> Client.VBD.unplug rpc session_id self)); + update_vbd_and_vdi_operations ~__context ~vbd:self + + let unplug_force ~__context ~self = + info "VBD.unplug_force: VBD = '%s'" (vbd_uuid ~__context self); + let local_fn = Local.VBD.unplug_force ~self in + with_vbd_marked ~__context ~vbd:self ~doc:"VBD.unplug_force" ~op:`unplug_force + (fun () -> + forward_vbd_op ~local_fn ~__context ~self (fun session_id rpc -> Client.VBD.unplug_force rpc session_id self)); + update_vbd_and_vdi_operations ~__context ~vbd:self + + let unplug_force_no_safety_check ~__context ~self = + warn "VBD.unplug_force_no_safety_check: VBD = '%s'" (vbd_uuid ~__context self); + let local_fn = Local.VBD.unplug_force_no_safety_check ~self in + with_vbd_marked ~__context ~vbd:self ~doc:"VBD.unplug_force_no_safety_check" ~op:`unplug_force + (fun () -> + forward_vbd_op ~local_fn ~__context ~self (fun session_id rpc -> Client.VBD.unplug_force_no_safety_check rpc session_id self)); + update_vbd_and_vdi_operations ~__context ~vbd:self + + let pause ~__context ~self = + info "VBD.pause: VBD = '%s'" (vbd_uuid ~__context self); + let local_fn = Local.VBD.pause ~self in + let result = with_vbd_marked ~__context ~vbd:self ~doc:"VBD.pause" ~op:`pause + (fun () -> + forward_vbd_op ~local_fn ~__context ~self (fun session_id rpc -> Client.VBD.pause rpc session_id self) + ) in + update_vbd_and_vdi_operations ~__context ~vbd:self; + result + + let unpause ~__context ~self ~token = + info "VBD.unpause: VBD = '%s'; token = '%s'" (vbd_uuid ~__context self) token; + let local_fn = Local.VBD.unpause ~self ~token in + with_vbd_marked ~__context ~vbd:self ~doc:"VBD.unpause" ~op:`unpause + (fun () -> + forward_vbd_op ~local_fn ~__context ~self (fun session_id rpc -> Client.VBD.unpause rpc session_id self token); + ); + update_vbd_and_vdi_operations ~__context ~vbd:self + + let assert_attachable ~__context ~self = + info "VBD.assert_attachable: VBD = '%s'" (vbd_uuid ~__context self); + Local.VBD.assert_attachable ~__context ~self + end + + module VBD_metrics = struct + end + + module PBD = struct + + (* Create and destroy are just db operations, no need to forward; *) + (* however, they can affect whether SR.destroy is allowed, so update SR.allowed_operations. *) + let create ~__context ~host ~sR ~device_config ~other_config = + info "PBD.create: SR = '%s'; host '%s'" (sr_uuid ~__context sR) (host_uuid ~__context host); + SR.with_sr_marked ~__context ~sr:sR ~doc:"PBD.create" ~op:`pbd_create + (fun () -> Local.PBD.create ~__context ~host ~sR ~device_config ~other_config) + + let destroy ~__context ~self = + info "PBD.destroy: PBD '%s'" (pbd_uuid ~__context self); + let sr = Db.PBD.get_SR ~__context ~self in + SR.with_sr_marked ~__context ~sr ~doc:"PBD.destroy" ~op:`pbd_destroy + (fun () -> Local.PBD.destroy ~__context ~self) + + (* -------- Forwarding helper functions: ------------------------------------ *) + + let forward_pbd_op ~local_fn ~__context ~self op = + do_op_on ~local_fn ~__context ~host:(Db.PBD.get_host ~__context ~self) op + + (* -------------------------------------------------------------------------- *) + + let sanitize (k, v) = + if String.endswith "transformed" k then + k ^ "=undisclosed" + else + k ^ "=" ^ v + + let set_device_config ~__context ~self ~value = + info "PBD.set_device_config: PBD = '%s'; device_config = [ %s ]" + (pbd_uuid ~__context self) (String.concat "; " (List.map sanitize value)); + let sr = Db.PBD.get_SR ~__context ~self in + Sm.assert_session_has_internal_sr_access ~__context ~sr; + + let local_fn = Local.PBD.set_device_config ~self ~value in + forward_pbd_op ~local_fn ~__context ~self + (fun session_id rpc -> Client.PBD.set_device_config rpc session_id self value) + + (* Mark the SR and check, if we are the 'SRmaster' that no VDI + current_operations are present (eg snapshot, clone) since these are all + done on the SR master. *) + let with_unplug_locks ~__context ~pbd ~sr f = + let doc = "PBD.unplug" and op = `unplug in + Helpers.retry_with_global_lock ~__context ~doc + (fun () -> + if Helpers.i_am_srmaster ~__context ~sr + then + List.iter (fun vdi -> + if Db.VDI.get_current_operations ~__context ~self:vdi <> [] + then raise (Api_errors.Server_error(Api_errors.other_operation_in_progress, [ Datamodel._vdi; Ref.string_of vdi ]))) + (Db.SR.get_VDIs ~__context ~self:sr); + SR.mark_sr ~__context ~sr ~doc ~op + ); + finally + (fun () -> f ()) + (fun () -> Helpers.with_global_lock (fun () -> SR.unmark_sr ~__context ~sr ~doc ~op)) + + (* plug and unplug need to be executed on the host that the pbd is related to *) + let plug ~__context ~self = + info "PBD.plug: PBD = '%s'" (pbd_uuid ~__context self); + let local_fn = Local.PBD.plug ~self in + let sr = Db.PBD.get_SR ~__context ~self in + let is_shared_sr = Db.SR.get_shared ~__context ~self:sr in + let is_master_pbd = + let pbd_host = Db.PBD.get_host ~__context ~self in + let master_host = Helpers.get_localhost ~__context in + pbd_host = master_host in + + SR.with_sr_marked ~__context ~sr ~doc:"PBD.plug" ~op:`plug + (fun () -> + forward_pbd_op ~local_fn ~__context ~self + (fun session_id rpc -> Client.PBD.plug rpc session_id self)); + + (* We always plug the master PBD first and unplug it last. If this is the + * first PBD plugged for this SR (proxy: the PBD being plugged is for the + * master) then we should perform an initial SR scan and perform some + * asynchronous start-of-day operations in the callback. + * Note the current context contains a completed real task and we should + * not reuse it for what is effectively another call. *) + if is_master_pbd then + Server_helpers.exec_with_new_task "PBD.plug initial SR scan" (fun __context -> + let should_handle_metadata_vdis = is_shared_sr in + + if should_handle_metadata_vdis then + Xapi_dr.signal_sr_is_processing ~__context ~sr; + + let sr_scan_callback () = + if is_shared_sr then begin + Xapi_dr.handle_metadata_vdis ~__context ~sr; + Xapi_dr.signal_sr_is_ready ~__context ~sr; + end; + Xapi_sr.maybe_push_sr_rrds ~__context ~sr; + Xapi_sr.update ~__context ~sr; + in + + Xapi_sr.scan_one ~__context ~callback:sr_scan_callback sr; + ) + + let unplug ~__context ~self = + info "PBD.unplug: PBD = '%s'" (pbd_uuid ~__context self); + let local_fn = Local.PBD.unplug ~self in + let sr = Db.PBD.get_SR ~__context ~self in + let is_master_pbd = + let pbd_host = Db.PBD.get_host ~__context ~self in + let master_host = Helpers.get_localhost ~__context in + pbd_host = master_host in + + with_unplug_locks ~__context ~sr ~pbd:self + (fun () -> + if is_master_pbd then + Xapi_sr.maybe_copy_sr_rrds ~__context ~sr; + forward_pbd_op ~local_fn ~__context ~self + (fun session_id rpc -> Client.PBD.unplug rpc session_id self)) + end + + module Crashdump = struct + + (* -------- Forwarding helper functions: ------------------------------------ *) + + (* Read VDI and then re-use VDI forwarding policy *) + let forward_crashdump_op ~local_fn ~__context ~self op = + let vdi = Db.Crashdump.get_VDI ~__context ~self in + VDI.forward_vdi_op ~local_fn ~__context ~self:vdi op + + (* -------------------------------------------------------------------------- *) + + let destroy ~__context ~self = + info "Crashdump.destroy: crashdump = '%s'" (crashdump_uuid ~__context self); + let local_fn = Local.Crashdump.destroy ~self in + forward_crashdump_op ~local_fn ~__context ~self (fun session_id rpc -> Client.Crashdump.destroy rpc session_id self) + end + + (* whatever *) + module VTPM = Local.VTPM + + module Console = Local.Console + + module User = Local.User + + module Blob = Local.Blob + + module Message = Local.Message + + module Data_source = struct end + + module Secret = Local.Secret + + module PCI = struct end + + module PGPU = struct + include Local.PGPU + + let enable_dom0_access ~__context ~self = + info "PGPU.enable_dom0_access: pgpu = '%s'" (pgpu_uuid ~__context self); + let host = Db.PGPU.get_host ~__context ~self in + let local_fn = Local.PGPU.enable_dom0_access ~self in + do_op_on ~__context ~local_fn ~host + (fun session_id rpc -> Client.PGPU.enable_dom0_access rpc session_id self) + + let disable_dom0_access ~__context ~self = + info "PGPU.disable_dom0_access: pgpu = '%s'" (pgpu_uuid ~__context self); + let host = Db.PGPU.get_host ~__context ~self in + let local_fn = Local.PGPU.disable_dom0_access ~self in + do_op_on ~__context ~local_fn ~host + (fun session_id rpc -> Client.PGPU.disable_dom0_access rpc session_id self) + end + + module GPU_group = struct + (* Don't forward. These are just db operations. *) + let create ~__context ~name_label ~name_description ~other_config = + info "GPU_group.create: name_label = '%s'" name_label; + Local.GPU_group.create ~__context ~name_label ~name_description ~other_config + + let destroy ~__context ~self = + info "GPU_group.destroy: gpu_group = '%s'" (gpu_group_uuid ~__context self); + (* WARNING WARNING WARNING: directly call destroy with the global lock since it does only database operations *) + Helpers.with_global_lock (fun () -> + Local.GPU_group.destroy ~__context ~self) + + let update_enabled_VGPU_types ~__context ~self = + info "GPU_group.update_enabled_VGPU_types: gpu_group = '%s'" (gpu_group_uuid ~__context self); + Local.GPU_group.update_enabled_VGPU_types ~__context ~self + + let update_supported_VGPU_types ~__context ~self = + info "GPU_group.update_supported_VGPU_types: gpu_group = '%s'" (gpu_group_uuid ~__context self); + Local.GPU_group.update_supported_VGPU_types ~__context ~self + + let get_remaining_capacity ~__context ~self ~vgpu_type = + info "GPU_group.get_remaining_capacity: gpu_group = '%s' vgpu_type = '%s'" + (gpu_group_uuid ~__context self) + (vgpu_type_uuid ~__context vgpu_type); + Local.GPU_group.get_remaining_capacity ~__context ~self ~vgpu_type + end + + module VGPU = struct + let create ~__context ~vM ~gPU_group ~device ~other_config ~_type = + info "VGPU.create: VM = '%s'; GPU_group = '%s'" (vm_uuid ~__context vM) (gpu_group_uuid ~__context gPU_group); + Local.VGPU.create ~__context ~vM ~gPU_group ~device ~other_config ~_type + + let destroy ~__context ~self = + info "VGPU.destroy: VGPU = '%s'" (vgpu_uuid ~__context self); + Local.VGPU.destroy ~__context ~self + + let atomic_set_resident_on ~__context ~self ~value = + info "VGPU.atomic_set_resident_on: VGPU = '%s'; PGPU = '%s'" + (vgpu_uuid ~__context self) (pgpu_uuid ~__context value); + (* Need to prevent the host chooser being run while these fields are being modified *) + Helpers.with_global_lock + (fun () -> + Db.VGPU.set_resident_on ~__context ~self ~value; + Db.VGPU.set_scheduled_to_be_resident_on ~__context ~self ~value:Ref.null + ) + end - module VGPU_type = struct end - module LVHD = struct end + module VGPU_type = struct end + module LVHD = struct end end diff --git a/ocaml/xapi/monitor_dbcalls.ml b/ocaml/xapi/monitor_dbcalls.ml index 4d07cf67cf4..5981e6b1b0a 100644 --- a/ocaml/xapi/monitor_dbcalls.ml +++ b/ocaml/xapi/monitor_dbcalls.ml @@ -25,144 +25,144 @@ open D (* Helper map functions. *) let transfer_map ~source ~target = - Hashtbl.clear target; - Hashtbl.iter (fun k v -> Hashtbl.add target k v) source; - Hashtbl.clear source + Hashtbl.clear target; + Hashtbl.iter (fun k v -> Hashtbl.add target k v) source; + Hashtbl.clear source let get_updates ~before ~after ~f = - Hashtbl.fold (fun k v acc -> - if (try v <> Hashtbl.find before k with Not_found -> true) - then (f k v acc) - else acc - ) after [] + Hashtbl.fold (fun k v acc -> + if (try v <> Hashtbl.find before k with Not_found -> true) + then (f k v acc) + else acc + ) after [] let get_updates_map = get_updates ~f:(fun k v acc -> (k, v)::acc) let get_updates_values = get_updates ~f:(fun _ v acc -> v::acc) let get_host_memory_changes xc = - let physinfo = Xenctrl.physinfo xc in - let bytes_of_pages pages = - let kib = Xenctrl.pages_to_kib (Int64.of_nativeint pages) in - Int64.shift_left kib 10 - in - let free_bytes = bytes_of_pages physinfo.Xenctrl.free_pages in - let total_bytes = bytes_of_pages physinfo.Xenctrl.total_pages in - Mutex.execute host_memory_m (fun _ -> - let host_memory_changed = - !host_memory_free_cached <> free_bytes || - !host_memory_total_cached <> total_bytes - in - host_memory_free_cached := free_bytes; - host_memory_total_cached := total_bytes; - if host_memory_changed then Some (free_bytes, total_bytes) else None - ) + let physinfo = Xenctrl.physinfo xc in + let bytes_of_pages pages = + let kib = Xenctrl.pages_to_kib (Int64.of_nativeint pages) in + Int64.shift_left kib 10 + in + let free_bytes = bytes_of_pages physinfo.Xenctrl.free_pages in + let total_bytes = bytes_of_pages physinfo.Xenctrl.total_pages in + Mutex.execute host_memory_m (fun _ -> + let host_memory_changed = + !host_memory_free_cached <> free_bytes || + !host_memory_total_cached <> total_bytes + in + host_memory_free_cached := free_bytes; + host_memory_total_cached := total_bytes; + if host_memory_changed then Some (free_bytes, total_bytes) else None + ) let get_vm_memory_changes xc = - let domains = Xenctrl.domain_getinfolist xc 0 in - let process_vm dom = - let open Xenctrl in - if not dom.dying then - begin - let uuid = Uuid.string_of_uuid (Uuid.uuid_of_int_array dom.handle) in - let kib = Xenctrl.pages_to_kib (Int64.of_nativeint dom.total_memory_pages) in - let memory = Int64.mul kib 1024L in - Hashtbl.add vm_memory_tmp uuid memory - end - in - List.iter process_vm domains; - Mutex.execute vm_memory_cached_m (fun _ -> - let changed_vm_memory = - get_updates_map ~before:vm_memory_cached ~after:vm_memory_tmp in - transfer_map ~source:vm_memory_tmp ~target:vm_memory_cached; - changed_vm_memory - ) + let domains = Xenctrl.domain_getinfolist xc 0 in + let process_vm dom = + let open Xenctrl in + if not dom.dying then + begin + let uuid = Uuid.string_of_uuid (Uuid.uuid_of_int_array dom.handle) in + let kib = Xenctrl.pages_to_kib (Int64.of_nativeint dom.total_memory_pages) in + let memory = Int64.mul kib 1024L in + Hashtbl.add vm_memory_tmp uuid memory + end + in + List.iter process_vm domains; + Mutex.execute vm_memory_cached_m (fun _ -> + let changed_vm_memory = + get_updates_map ~before:vm_memory_cached ~after:vm_memory_tmp in + transfer_map ~source:vm_memory_tmp ~target:vm_memory_cached; + changed_vm_memory + ) let get_pif_and_bond_changes () = - (* Read fresh PIF information from networkd. *) - let open Network_stats in - let stats = read_stats () in - List.iter (fun (dev, stat) -> - if not (String.startswith "vif" dev) then ( - if stat.nb_links > 1 then (* bond *) - Hashtbl.add bonds_links_up_tmp dev stat.links_up; - let pif = { - pif_name = dev; - pif_tx = -1.0; - pif_rx = -1.0; - pif_raw_tx = 0L; - pif_raw_rx = 0L; - pif_carrier = stat.carrier; - pif_speed = stat.speed; - pif_duplex = stat.duplex; - pif_pci_bus_path = stat.pci_bus_path; - pif_vendor_id = stat.vendor_id; - pif_device_id = stat.device_id; - } in - Hashtbl.add pifs_tmp pif.pif_name pif; - ) - ) stats; - (* Check if any of the bonds have changed since our last reading. *) - let bond_changes = Mutex.execute bonds_links_up_cached_m (fun _ -> - let changes = - get_updates_map ~before:bonds_links_up_cached ~after:bonds_links_up_tmp in - transfer_map ~source:bonds_links_up_tmp ~target:bonds_links_up_cached; - changes - ) in - (* Check if any of the PIFs have changed since our last reading. *) - let pif_changes = Mutex.execute pifs_cached_m (fun _ -> - let changes = get_updates_values ~before:pifs_cached ~after:pifs_tmp in - transfer_map ~source:pifs_tmp ~target:pifs_cached; - changes - ) in - (* Return lists of changes. *) - pif_changes, bond_changes + (* Read fresh PIF information from networkd. *) + let open Network_stats in + let stats = read_stats () in + List.iter (fun (dev, stat) -> + if not (String.startswith "vif" dev) then ( + if stat.nb_links > 1 then (* bond *) + Hashtbl.add bonds_links_up_tmp dev stat.links_up; + let pif = { + pif_name = dev; + pif_tx = -1.0; + pif_rx = -1.0; + pif_raw_tx = 0L; + pif_raw_rx = 0L; + pif_carrier = stat.carrier; + pif_speed = stat.speed; + pif_duplex = stat.duplex; + pif_pci_bus_path = stat.pci_bus_path; + pif_vendor_id = stat.vendor_id; + pif_device_id = stat.device_id; + } in + Hashtbl.add pifs_tmp pif.pif_name pif; + ) + ) stats; + (* Check if any of the bonds have changed since our last reading. *) + let bond_changes = Mutex.execute bonds_links_up_cached_m (fun _ -> + let changes = + get_updates_map ~before:bonds_links_up_cached ~after:bonds_links_up_tmp in + transfer_map ~source:bonds_links_up_tmp ~target:bonds_links_up_cached; + changes + ) in + (* Check if any of the PIFs have changed since our last reading. *) + let pif_changes = Mutex.execute pifs_cached_m (fun _ -> + let changes = get_updates_values ~before:pifs_cached ~after:pifs_tmp in + transfer_map ~source:pifs_tmp ~target:pifs_cached; + changes + ) in + (* Return lists of changes. *) + pif_changes, bond_changes (* This function updates the database for all the slowly changing properties * of host memory, VM memory, PIFs, and bonds. - *) +*) let pifs_and_memory_update_fn xc = - let host_memory_changes = get_host_memory_changes xc in - let vm_memory_changes = get_vm_memory_changes xc in - let pif_changes, bond_changes = get_pif_and_bond_changes () in - Server_helpers.exec_with_new_task "updating VM_metrics.memory_actual fields and PIFs" - (fun __context -> - let host = Helpers.get_localhost ~__context in - List.iter (fun (uuid, memory) -> - let vm = Db.VM.get_by_uuid ~__context ~uuid in - let vmm = Db.VM.get_metrics ~__context ~self:vm in - if (Db.VM.get_resident_on ~__context ~self:vm = - Helpers.get_localhost ~__context) - then Db.VM_metrics.set_memory_actual ~__context ~self:vmm ~value:memory - else clear_cache_for_vm uuid - ) vm_memory_changes; - Monitor_master.update_pifs ~__context host pif_changes; - let localhost = Helpers.get_localhost ~__context in - List.iter (fun (bond, links_up) -> - let my_bond_pifs = Db.PIF.get_records_where ~__context - ~expr:(And (And (Eq (Field "host", Literal (Ref.string_of localhost)), - Not (Eq (Field "bond_master_of", Literal "()"))), - Eq(Field "device", Literal bond))) in - let my_bonds = List.map (fun (_, pif) -> List.hd pif.API.pIF_bond_master_of) my_bond_pifs in - if (List.length my_bonds) <> 1 then - debug "Error: bond %s cannot be found" bond - else - Db.Bond.set_links_up ~__context ~self:(List.hd my_bonds) - ~value:(Int64.of_int links_up) - ) bond_changes; - match host_memory_changes with None -> () | Some (free, total) -> - let metrics = Db.Host.get_metrics ~__context ~self:localhost in - Db.Host_metrics.set_memory_total ~__context ~self:metrics ~value:total; - Db.Host_metrics.set_memory_free ~__context ~self:metrics ~value:free - ) + let host_memory_changes = get_host_memory_changes xc in + let vm_memory_changes = get_vm_memory_changes xc in + let pif_changes, bond_changes = get_pif_and_bond_changes () in + Server_helpers.exec_with_new_task "updating VM_metrics.memory_actual fields and PIFs" + (fun __context -> + let host = Helpers.get_localhost ~__context in + List.iter (fun (uuid, memory) -> + let vm = Db.VM.get_by_uuid ~__context ~uuid in + let vmm = Db.VM.get_metrics ~__context ~self:vm in + if (Db.VM.get_resident_on ~__context ~self:vm = + Helpers.get_localhost ~__context) + then Db.VM_metrics.set_memory_actual ~__context ~self:vmm ~value:memory + else clear_cache_for_vm uuid + ) vm_memory_changes; + Monitor_master.update_pifs ~__context host pif_changes; + let localhost = Helpers.get_localhost ~__context in + List.iter (fun (bond, links_up) -> + let my_bond_pifs = Db.PIF.get_records_where ~__context + ~expr:(And (And (Eq (Field "host", Literal (Ref.string_of localhost)), + Not (Eq (Field "bond_master_of", Literal "()"))), + Eq(Field "device", Literal bond))) in + let my_bonds = List.map (fun (_, pif) -> List.hd pif.API.pIF_bond_master_of) my_bond_pifs in + if (List.length my_bonds) <> 1 then + debug "Error: bond %s cannot be found" bond + else + Db.Bond.set_links_up ~__context ~self:(List.hd my_bonds) + ~value:(Int64.of_int links_up) + ) bond_changes; + match host_memory_changes with None -> () | Some (free, total) -> + let metrics = Db.Host.get_metrics ~__context ~self:localhost in + Db.Host_metrics.set_memory_total ~__context ~self:metrics ~value:total; + Db.Host_metrics.set_memory_free ~__context ~self:metrics ~value:free + ) let monitor_dbcall_thread () = - Xenctrl.with_intf (fun xc -> - while true do - try - pifs_and_memory_update_fn xc; - Thread.delay 5. - with e -> - debug "monitor_dbcall_thread would have died from: %s; restarting in 30s." - (ExnHelper.string_of_exn e); - Thread.delay 30. - done - ) + Xenctrl.with_intf (fun xc -> + while true do + try + pifs_and_memory_update_fn xc; + Thread.delay 5. + with e -> + debug "monitor_dbcall_thread would have died from: %s; restarting in 30s." + (ExnHelper.string_of_exn e); + Thread.delay 30. + done + ) diff --git a/ocaml/xapi/monitor_dbcalls.mli b/ocaml/xapi/monitor_dbcalls.mli index 8faa1395cab..9b6099b75ea 100644 --- a/ocaml/xapi/monitor_dbcalls.mli +++ b/ocaml/xapi/monitor_dbcalls.mli @@ -14,7 +14,7 @@ (** Gathering of fresh properties, and detecting property changes. * @group Property Monitoring - *) +*) (** This module triggers updates of xapi's database according to the PIF, bond, * and memory information gathered on a regular interval about the VMs and the @@ -24,7 +24,7 @@ * have no effect; furthermore, the push will not be retried. One can * explicitly clear a part or the whole cache in order to force pushing of * fresh information into the database. - *) +*) (** The function to be executed as a stand-alone thread as xapi starts. This * thread is responsible for continually gathering fresh properties and diff --git a/ocaml/xapi/monitor_dbcalls_cache.ml b/ocaml/xapi/monitor_dbcalls_cache.ml index 7ce395936dd..d3a4f2f8099 100644 --- a/ocaml/xapi/monitor_dbcalls_cache.ml +++ b/ocaml/xapi/monitor_dbcalls_cache.ml @@ -34,24 +34,24 @@ let host_memory_total_cached : Int64.t ref = ref Int64.zero (** [clear_cache_for_pif] removes any current cache for PIF with [pif_name], * which forces fresh properties for the PIF into xapi's database. *) let clear_cache_for_pif ~pif_name = - Mutex.execute pifs_cached_m (fun _ -> Hashtbl.remove pifs_cached pif_name) + Mutex.execute pifs_cached_m (fun _ -> Hashtbl.remove pifs_cached pif_name) (** [clear_cache_for_vm] removes any current cache for VM with [vm_uuid], * which forces fresh properties for the VM into xapi's database. *) let clear_cache_for_vm ~vm_uuid = - Mutex.execute vm_memory_cached_m - (fun _ -> Hashtbl.remove vm_memory_cached vm_uuid) + Mutex.execute vm_memory_cached_m + (fun _ -> Hashtbl.remove vm_memory_cached vm_uuid) (** Clear the whole cache. This forces fresh properties to be written into * xapi's database. *) let clear_cache () = - let safe_clear ~cache ~lock = - Mutex.execute lock (fun _ -> Hashtbl.clear cache) in - safe_clear ~cache:pifs_cached ~lock:pifs_cached_m; - safe_clear ~cache:bonds_links_up_cached ~lock:bonds_links_up_cached_m; - safe_clear ~cache:vm_memory_cached ~lock:vm_memory_cached_m; - Mutex.execute host_memory_m (fun _ -> - host_memory_free_cached := Int64.zero; - host_memory_total_cached := Int64.zero; - ) + let safe_clear ~cache ~lock = + Mutex.execute lock (fun _ -> Hashtbl.clear cache) in + safe_clear ~cache:pifs_cached ~lock:pifs_cached_m; + safe_clear ~cache:bonds_links_up_cached ~lock:bonds_links_up_cached_m; + safe_clear ~cache:vm_memory_cached ~lock:vm_memory_cached_m; + Mutex.execute host_memory_m (fun _ -> + host_memory_free_cached := Int64.zero; + host_memory_total_cached := Int64.zero; + ) diff --git a/ocaml/xapi/monitor_fake_plugin.ml b/ocaml/xapi/monitor_fake_plugin.ml index 3a6a78433bf..062dc03b3be 100644 --- a/ocaml/xapi/monitor_fake_plugin.ml +++ b/ocaml/xapi/monitor_fake_plugin.ml @@ -4,71 +4,71 @@ exception UnknownRpc let add_fake_ds fname ds_name ds_type value = - let value = float_of_string value in - let ty = match ds_type with - | "absolute" -> Rrd.Absolute - | "gauge" -> Rrd.Gauge - | "derive" -> Rrd.Derive - | _ -> failwith "Unknown ds type" - in - Unixext.mkdir_rec fake_dir 0o755; - let orig_dss = - try - fake_ds_list_of_rpc (Jsonrpc.of_string (Unixext.string_of_file fname)) - with _ -> [] - in - let new_ds = { f_name=ds_name; f_ty=ty; f_val=value } in - let new_dss = new_ds::(List.filter (fun ds -> ds.f_name <> ds_name) orig_dss) in - Unixext.write_string_to_file fname (Jsonrpc.to_string (rpc_of_fake_ds_list new_dss)) + let value = float_of_string value in + let ty = match ds_type with + | "absolute" -> Rrd.Absolute + | "gauge" -> Rrd.Gauge + | "derive" -> Rrd.Derive + | _ -> failwith "Unknown ds type" + in + Unixext.mkdir_rec fake_dir 0o755; + let orig_dss = + try + fake_ds_list_of_rpc (Jsonrpc.of_string (Unixext.string_of_file fname)) + with _ -> [] + in + let new_ds = { f_name=ds_name; f_ty=ty; f_val=value } in + let new_dss = new_ds::(List.filter (fun ds -> ds.f_name <> ds_name) orig_dss) in + Unixext.write_string_to_file fname (Jsonrpc.to_string (rpc_of_fake_ds_list new_dss)) let add_fake_ds_vm uuid = - let fname = Printf.sprintf "%s/%s.fakestats" fake_dir uuid in - add_fake_ds fname + let fname = Printf.sprintf "%s/%s.fakestats" fake_dir uuid in + add_fake_ds fname let add_fake_ds_host = - let fname = Printf.sprintf "%s/host.fakestats" fake_dir in - add_fake_ds fname + let fname = Printf.sprintf "%s/host.fakestats" fake_dir in + add_fake_ds fname let _ = - try - let oc = open_out "/tmp/foo" in - Printf.fprintf oc "%s" Sys.argv.(1); - let call = Xmlrpc.call_of_string Sys.argv.(1) in - let host = Rpc.string_of_rpc (List.hd call.Rpc.params) in - let oc = open_out "/tmp/foo2" in - Printf.fprintf oc "%s" host; - let args = match (List.hd (List.tl call.Rpc.params)) with - | Rpc.Dict args -> - args - | _ -> - failwith "Can't parse args" - in - let oc = open_out "/tmp/foo3" in - List.iter (fun (a,b) -> Printf.fprintf oc "%s: %s" a (Rpc.string_of_rpc b)) args; - let contents = - match call.Rpc.name with - | "add_fake_ds" -> - let uuid = Rpc.string_of_rpc (List.assoc "uuid" args) in - let ds_name = Rpc.string_of_rpc (List.assoc "ds_name" args) in - let ds_type = Rpc.string_of_rpc (List.assoc "ds_type" args) in - let value = Rpc.string_of_rpc (List.assoc "value" args) in - add_fake_ds_vm uuid ds_name ds_type value; - Rpc.rpc_of_string "OK" - | "add_fake_ds_host" -> - let ds_name = Rpc.string_of_rpc (List.assoc "ds_name" args) in - let ds_type = Rpc.string_of_rpc (List.assoc "ds_type" args) in - let value = Rpc.string_of_rpc (List.assoc "value" args) in - add_fake_ds_host ds_name ds_type value; - Rpc.rpc_of_string "OK" - | _ -> - raise UnknownRpc - in - Printf.printf "%s" (Xmlrpc.string_of_response {Rpc.success=true; contents=contents}); - exit 0 - with - | UnknownRpc -> - Printf.printf "%s" (Xmlrpc.string_of_response {Rpc.success=false; contents=Rpc.rpc_of_string "Unknown RPC"}); - exit 1 - | _ -> - Printf.printf "%s" (Xmlrpc.string_of_response {Rpc.success=false; contents=Rpc.rpc_of_string "Internal error"}); - exit 1 + try + let oc = open_out "/tmp/foo" in + Printf.fprintf oc "%s" Sys.argv.(1); + let call = Xmlrpc.call_of_string Sys.argv.(1) in + let host = Rpc.string_of_rpc (List.hd call.Rpc.params) in + let oc = open_out "/tmp/foo2" in + Printf.fprintf oc "%s" host; + let args = match (List.hd (List.tl call.Rpc.params)) with + | Rpc.Dict args -> + args + | _ -> + failwith "Can't parse args" + in + let oc = open_out "/tmp/foo3" in + List.iter (fun (a,b) -> Printf.fprintf oc "%s: %s" a (Rpc.string_of_rpc b)) args; + let contents = + match call.Rpc.name with + | "add_fake_ds" -> + let uuid = Rpc.string_of_rpc (List.assoc "uuid" args) in + let ds_name = Rpc.string_of_rpc (List.assoc "ds_name" args) in + let ds_type = Rpc.string_of_rpc (List.assoc "ds_type" args) in + let value = Rpc.string_of_rpc (List.assoc "value" args) in + add_fake_ds_vm uuid ds_name ds_type value; + Rpc.rpc_of_string "OK" + | "add_fake_ds_host" -> + let ds_name = Rpc.string_of_rpc (List.assoc "ds_name" args) in + let ds_type = Rpc.string_of_rpc (List.assoc "ds_type" args) in + let value = Rpc.string_of_rpc (List.assoc "value" args) in + add_fake_ds_host ds_name ds_type value; + Rpc.rpc_of_string "OK" + | _ -> + raise UnknownRpc + in + Printf.printf "%s" (Xmlrpc.string_of_response {Rpc.success=true; contents=contents}); + exit 0 + with + | UnknownRpc -> + Printf.printf "%s" (Xmlrpc.string_of_response {Rpc.success=false; contents=Rpc.rpc_of_string "Unknown RPC"}); + exit 1 + | _ -> + Printf.printf "%s" (Xmlrpc.string_of_response {Rpc.success=false; contents=Rpc.rpc_of_string "Internal error"}); + exit 1 diff --git a/ocaml/xapi/monitor_master.ml b/ocaml/xapi/monitor_master.ml index 8494f77f5f2..632292f0afd 100644 --- a/ocaml/xapi/monitor_master.ml +++ b/ocaml/xapi/monitor_master.ml @@ -26,271 +26,271 @@ module D = Debug.Make(struct let name = "monitor_master" end) open D let update_configuration_from_master () = - Server_helpers.exec_with_new_task "update_configuration_from_master" (fun __context -> - let oc = Db.Pool.get_other_config ~__context ~self:(Helpers.get_pool ~__context) in - let new_use_min_max = (List.mem_assoc Xapi_globs.create_min_max_in_new_VM_RRDs oc) && - (List.assoc Xapi_globs.create_min_max_in_new_VM_RRDs oc = "true") in - log_and_ignore_exn (fun () -> Rrdd.update_use_min_max ~value:new_use_min_max); - let carrier = (List.mem_assoc Xapi_globs.pass_through_pif_carrier_key oc) && - (List.assoc Xapi_globs.pass_through_pif_carrier_key oc = "true") in - if !Xapi_globs.pass_through_pif_carrier <> carrier - then debug "Updating pass_through_pif_carrier: New value=%b" carrier; - Xapi_globs.pass_through_pif_carrier := carrier - ) + Server_helpers.exec_with_new_task "update_configuration_from_master" (fun __context -> + let oc = Db.Pool.get_other_config ~__context ~self:(Helpers.get_pool ~__context) in + let new_use_min_max = (List.mem_assoc Xapi_globs.create_min_max_in_new_VM_RRDs oc) && + (List.assoc Xapi_globs.create_min_max_in_new_VM_RRDs oc = "true") in + log_and_ignore_exn (fun () -> Rrdd.update_use_min_max ~value:new_use_min_max); + let carrier = (List.mem_assoc Xapi_globs.pass_through_pif_carrier_key oc) && + (List.assoc Xapi_globs.pass_through_pif_carrier_key oc = "true") in + if !Xapi_globs.pass_through_pif_carrier <> carrier + then debug "Updating pass_through_pif_carrier: New value=%b" carrier; + Xapi_globs.pass_through_pif_carrier := carrier + ) let set_vm_metrics ~__context ~vm ~memory ~cpus = - (* If VM metrics don't exist, then create them. *) - let metrics = Db.VM.get_metrics ~__context ~self:vm in - if not (Db.is_valid_ref __context metrics) then ( - let ref = Ref.make () in - Db.VM_metrics.create ~__context ~ref - ~uuid:(Uuid.to_string (Uuid.make_uuid ())) - ~memory_actual:0L - ~vCPUs_number:0L - ~vCPUs_utilisation:[] - ~vCPUs_CPU:[] - ~vCPUs_params:[] - ~vCPUs_flags:[] - ~state:[] - ~start_time:Date.never - ~install_time:Date.never - ~last_updated:Date.never - ~other_config:[] - ~hvm:false - ~nested_virt:false - ~nomigrate:false - ; - Db.VM.set_metrics ~__context ~self:vm ~value:ref - ); + (* If VM metrics don't exist, then create them. *) + let metrics = Db.VM.get_metrics ~__context ~self:vm in + if not (Db.is_valid_ref __context metrics) then ( + let ref = Ref.make () in + Db.VM_metrics.create ~__context ~ref + ~uuid:(Uuid.to_string (Uuid.make_uuid ())) + ~memory_actual:0L + ~vCPUs_number:0L + ~vCPUs_utilisation:[] + ~vCPUs_CPU:[] + ~vCPUs_params:[] + ~vCPUs_flags:[] + ~state:[] + ~start_time:Date.never + ~install_time:Date.never + ~last_updated:Date.never + ~other_config:[] + ~hvm:false + ~nested_virt:false + ~nomigrate:false + ; + Db.VM.set_metrics ~__context ~self:vm ~value:ref + ); - let metrics = Db.VM.get_metrics ~__context ~self:vm in - let v = List.mapi (fun i e -> (Int64.of_int i), e) (Array.to_list cpus.vcpu_vcpus) in - Db.VM_metrics.set_VCPUs_utilisation ~__context ~self:metrics ~value:v; - Db.VM_metrics.set_last_updated ~__context ~self:metrics ~value:(Date.of_float (Unix.gettimeofday ())); - match memory with - | Some memory -> - Db.VM_metrics.set_memory_actual ~__context ~self:metrics ~value:memory.memory_mem - | None -> () + let metrics = Db.VM.get_metrics ~__context ~self:vm in + let v = List.mapi (fun i e -> (Int64.of_int i), e) (Array.to_list cpus.vcpu_vcpus) in + Db.VM_metrics.set_VCPUs_utilisation ~__context ~self:metrics ~value:v; + Db.VM_metrics.set_last_updated ~__context ~self:metrics ~value:(Date.of_float (Unix.gettimeofday ())); + match memory with + | Some memory -> + Db.VM_metrics.set_memory_actual ~__context ~self:metrics ~value:memory.memory_mem + | None -> () (* Update the VM's vCPU, VIF stats *) let update_vm_stats ~__context uuid cpus vbds vifs memory = - try - let vm = Db.VM.get_by_uuid ~__context ~uuid:uuid in - set_vm_metrics ~__context ~vm ~memory ~cpus; - let vm_vifs = Db.VM.get_VIFs ~__context ~self:vm in - List.iter (fun self -> - let num = int_of_string (Db.VIF.get_device ~__context ~self) in - let io_write,io_read = - try - let vif = List.find (fun vif -> vif.vif_n = num) vifs in - vif.vif_tx, vif.vif_rx - with _ -> 0., 0. - in + try + let vm = Db.VM.get_by_uuid ~__context ~uuid:uuid in + set_vm_metrics ~__context ~vm ~memory ~cpus; + let vm_vifs = Db.VM.get_VIFs ~__context ~self:vm in + List.iter (fun self -> + let num = int_of_string (Db.VIF.get_device ~__context ~self) in + let io_write,io_read = + try + let vif = List.find (fun vif -> vif.vif_n = num) vifs in + vif.vif_tx, vif.vif_rx + with _ -> 0., 0. + in - (* If VIF metrics don't exist, then make them. *) - let metrics = Db.VIF.get_metrics ~__context ~self in - if not (Db.is_valid_ref __context metrics) then begin - let ref = Ref.make () in - Db.VIF_metrics.create ~__context ~ref ~uuid:(Uuid.to_string (Uuid.make_uuid ())) - ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Date.of_float 0.) ~other_config:[]; - Db.VIF.set_metrics ~__context ~self ~value:ref - end; + (* If VIF metrics don't exist, then make them. *) + let metrics = Db.VIF.get_metrics ~__context ~self in + if not (Db.is_valid_ref __context metrics) then begin + let ref = Ref.make () in + Db.VIF_metrics.create ~__context ~ref ~uuid:(Uuid.to_string (Uuid.make_uuid ())) + ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Date.of_float 0.) ~other_config:[]; + Db.VIF.set_metrics ~__context ~self ~value:ref + end; - let metrics = Db.VIF.get_metrics ~__context ~self in begin - Db.VIF_metrics.set_io_write_kbs ~__context ~self:metrics ~value:io_write; - Db.VIF_metrics.set_io_read_kbs ~__context ~self:metrics ~value:io_read; - Db.VIF_metrics.set_last_updated ~__context ~self:metrics - ~value:(Date.of_float (Unix.gettimeofday ())); - end - ) vm_vifs; - let vm_vbds = Db.VM.get_VBDs ~__context ~self:vm in - List.iter (fun self -> - (* NB we only get stats from PV devices *) - let num = try Device_number.to_xenstore_key (Device_number.of_string false (Db.VBD.get_device ~__context ~self)) with _ -> -1 in - let io_write, io_read = - try - let vbd = List.find (fun vbd -> vbd.vbd_device_id = num) vbds in - vbd.vbd_io_write, vbd.vbd_io_read - with _ -> 0., 0. - in + let metrics = Db.VIF.get_metrics ~__context ~self in begin + Db.VIF_metrics.set_io_write_kbs ~__context ~self:metrics ~value:io_write; + Db.VIF_metrics.set_io_read_kbs ~__context ~self:metrics ~value:io_read; + Db.VIF_metrics.set_last_updated ~__context ~self:metrics + ~value:(Date.of_float (Unix.gettimeofday ())); + end + ) vm_vifs; + let vm_vbds = Db.VM.get_VBDs ~__context ~self:vm in + List.iter (fun self -> + (* NB we only get stats from PV devices *) + let num = try Device_number.to_xenstore_key (Device_number.of_string false (Db.VBD.get_device ~__context ~self)) with _ -> -1 in + let io_write, io_read = + try + let vbd = List.find (fun vbd -> vbd.vbd_device_id = num) vbds in + vbd.vbd_io_write, vbd.vbd_io_read + with _ -> 0., 0. + in - (* if vbd metrics don't exist then make one *) - let metrics = Db.VBD.get_metrics ~__context ~self in - if not (Db.is_valid_ref __context metrics) then begin - let ref = Ref.make () in - Db.VBD_metrics.create ~__context ~ref ~uuid:(Uuid.to_string (Uuid.make_uuid ())) - ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Date.of_float 0.) ~other_config:[]; - Db.VBD.set_metrics ~__context ~self ~value:ref - end; + (* if vbd metrics don't exist then make one *) + let metrics = Db.VBD.get_metrics ~__context ~self in + if not (Db.is_valid_ref __context metrics) then begin + let ref = Ref.make () in + Db.VBD_metrics.create ~__context ~ref ~uuid:(Uuid.to_string (Uuid.make_uuid ())) + ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Date.of_float 0.) ~other_config:[]; + Db.VBD.set_metrics ~__context ~self ~value:ref + end; - let metrics = Db.VBD.get_metrics ~__context ~self in - Db.VBD_metrics.set_io_write_kbs ~__context ~self:metrics ~value:io_write; - Db.VBD_metrics.set_io_read_kbs ~__context ~self:metrics ~value:io_read; - Db.VBD_metrics.set_last_updated ~__context ~self:metrics - ~value:(Date.of_float (Unix.gettimeofday ())); - ) vm_vbds - with e -> - error "Caught exception updating stats for vm (uuid: %s) -- %s" uuid (Printexc.to_string e); - Debug.log_backtrace e (Backtrace.get e) + let metrics = Db.VBD.get_metrics ~__context ~self in + Db.VBD_metrics.set_io_write_kbs ~__context ~self:metrics ~value:io_write; + Db.VBD_metrics.set_io_read_kbs ~__context ~self:metrics ~value:io_read; + Db.VBD_metrics.set_last_updated ~__context ~self:metrics + ~value:(Date.of_float (Unix.gettimeofday ())); + ) vm_vbds + with e -> + error "Caught exception updating stats for vm (uuid: %s) -- %s" uuid (Printexc.to_string e); + Debug.log_backtrace e (Backtrace.get e) let update_host_cpu ~__context host cpus' = - let cpus = cpus'.pcpus_usage in - let all = Db.Host.get_host_CPUs ~__context ~self:host in - if Array.length cpus < List.length all then - debug "Monitor update_host_cpu got Array.length cpus = %d; num host_cpus = %d" - (Array.length cpus) (List.length all) - else begin - (* If Host_cpu objects are missing, fill 'em in with temporary random data. - This is needed to make sure Rio/Miami migrate succeeds *) - if List.length all < Array.length cpus then begin - let numbers = List.map (fun self -> Int64.to_int (Db.Host_cpu.get_number ~__context ~self)) all in - for i = 0 to Array.length cpus - 1 do - if not (List.mem i numbers) then - let () = Db.Host_cpu.create ~__context ~ref:(Ref.make()) - ~uuid:(Uuid.string_of_uuid (Uuid.make_uuid ())) ~host ~number:(Int64.of_int i) - ~vendor:"unknown" ~speed:0L ~modelname:"unknown" - ~utilisation:cpus.(i) ~flags:"unknown" ~stepping:"unknown" ~model:(-1L) ~family:(-1L) - ~features:"unknown" ~other_config:[] in () - done - end; - let all = Db.Host.get_host_CPUs ~__context ~self:host in - List.iter (fun self -> - let num = Int64.to_int (Db.Host_cpu.get_number ~__context ~self) in - let value = cpus.(num) in - Db.Host_cpu.set_utilisation ~__context ~self ~value - ) all - end + let cpus = cpus'.pcpus_usage in + let all = Db.Host.get_host_CPUs ~__context ~self:host in + if Array.length cpus < List.length all then + debug "Monitor update_host_cpu got Array.length cpus = %d; num host_cpus = %d" + (Array.length cpus) (List.length all) + else begin + (* If Host_cpu objects are missing, fill 'em in with temporary random data. + This is needed to make sure Rio/Miami migrate succeeds *) + if List.length all < Array.length cpus then begin + let numbers = List.map (fun self -> Int64.to_int (Db.Host_cpu.get_number ~__context ~self)) all in + for i = 0 to Array.length cpus - 1 do + if not (List.mem i numbers) then + let () = Db.Host_cpu.create ~__context ~ref:(Ref.make()) + ~uuid:(Uuid.string_of_uuid (Uuid.make_uuid ())) ~host ~number:(Int64.of_int i) + ~vendor:"unknown" ~speed:0L ~modelname:"unknown" + ~utilisation:cpus.(i) ~flags:"unknown" ~stepping:"unknown" ~model:(-1L) ~family:(-1L) + ~features:"unknown" ~other_config:[] in () + done + end; + let all = Db.Host.get_host_CPUs ~__context ~self:host in + List.iter (fun self -> + let num = Int64.to_int (Db.Host_cpu.get_number ~__context ~self) in + let value = cpus.(num) in + Db.Host_cpu.set_utilisation ~__context ~self ~value + ) all + end let update_host_metrics ~__context h = - let bytes_of_kib x = Int64.shift_left x 10 in - Xapi_host_helpers.update_host_metrics ~__context ~host:h.host_ref - ~memory_total:(bytes_of_kib h.total_kib) - ~memory_free:(bytes_of_kib h.free_kib) + let bytes_of_kib x = Int64.shift_left x 10 in + Xapi_host_helpers.update_host_metrics ~__context ~host:h.host_ref + ~memory_total:(bytes_of_kib h.total_kib) + ~memory_free:(bytes_of_kib h.free_kib) let get_pciids vendor device = - (* FIXME : put a lazy cache *) - let v, d = Pciutil.parse vendor device in - (match v with None -> "" | Some x -> x), - (match d with None -> "" | Some x -> x) + (* FIXME : put a lazy cache *) + let v, d = Pciutil.parse vendor device in + (match v with None -> "" | Some x -> x), + (match d with None -> "" | Some x -> x) let set_pif_metrics ~__context ~self ~vendor ~device ~carrier ~speed ~duplex - ~pcibuspath ~io_write ~io_read pmr = - (* don't update & and reread pciids if db already contains same value *) - if pmr.API.pIF_metrics_vendor_id <> vendor - || pmr.API.pIF_metrics_device_id <> device then ( - let vendor_str, device_str = get_pciids vendor device in - Db.PIF_metrics.set_vendor_id ~__context ~self ~value:vendor; - Db.PIF_metrics.set_device_id ~__context ~self ~value:device; - Db.PIF_metrics.set_vendor_name ~__context ~self ~value:vendor_str; - Db.PIF_metrics.set_device_name ~__context ~self ~value:device_str; - ); - if pmr.API.pIF_metrics_carrier <> carrier then - Db.PIF_metrics.set_carrier ~__context ~self ~value:carrier; - if pmr.API.pIF_metrics_speed <> speed then - Db.PIF_metrics.set_speed ~__context ~self ~value:speed; - if pmr.API.pIF_metrics_duplex <> duplex then - Db.PIF_metrics.set_duplex ~__context ~self ~value:duplex; - if pmr.API.pIF_metrics_pci_bus_path <> pcibuspath then - Db.PIF_metrics.set_pci_bus_path ~__context ~self ~value:pcibuspath; - if io_write >= 0.0 then - Db.PIF_metrics.set_io_write_kbs ~__context ~self ~value:io_write; - if io_read >= 0.0 then - Db.PIF_metrics.set_io_read_kbs ~__context ~self ~value:io_read; - Db.PIF_metrics.set_last_updated ~__context ~self - ~value:(Date.of_float (Unix.gettimeofday ())) + ~pcibuspath ~io_write ~io_read pmr = + (* don't update & and reread pciids if db already contains same value *) + if pmr.API.pIF_metrics_vendor_id <> vendor + || pmr.API.pIF_metrics_device_id <> device then ( + let vendor_str, device_str = get_pciids vendor device in + Db.PIF_metrics.set_vendor_id ~__context ~self ~value:vendor; + Db.PIF_metrics.set_device_id ~__context ~self ~value:device; + Db.PIF_metrics.set_vendor_name ~__context ~self ~value:vendor_str; + Db.PIF_metrics.set_device_name ~__context ~self ~value:device_str; + ); + if pmr.API.pIF_metrics_carrier <> carrier then + Db.PIF_metrics.set_carrier ~__context ~self ~value:carrier; + if pmr.API.pIF_metrics_speed <> speed then + Db.PIF_metrics.set_speed ~__context ~self ~value:speed; + if pmr.API.pIF_metrics_duplex <> duplex then + Db.PIF_metrics.set_duplex ~__context ~self ~value:duplex; + if pmr.API.pIF_metrics_pci_bus_path <> pcibuspath then + Db.PIF_metrics.set_pci_bus_path ~__context ~self ~value:pcibuspath; + if io_write >= 0.0 then + Db.PIF_metrics.set_io_write_kbs ~__context ~self ~value:io_write; + if io_read >= 0.0 then + Db.PIF_metrics.set_io_read_kbs ~__context ~self ~value:io_read; + Db.PIF_metrics.set_last_updated ~__context ~self + ~value:(Date.of_float (Unix.gettimeofday ())) (* Note that the following function is actually called on the slave most of the * time now but only when the PIF information changes. *) let update_pifs ~__context host pifs = - match List.length pifs with 0 -> () | _ -> - (* Fetch all physical and bond PIFs from DB. *) - let db_pifs = Db.PIF.get_records_where ~__context - ~expr:(And (Eq (Field "host", Literal (Ref.string_of host)), - Or (Eq (Field "physical", Literal "true"), - Not (Eq (Field "bond_master_of", Literal "()"))))) in - (* Iterate over them, and spot and update changes. *) - List.iter (fun (pifdev, pifrec) -> - begin try - let pif_stats = List.find (fun p -> p.pif_name = pifrec.API.pIF_device) pifs in - let carrier = pif_stats.pif_carrier in - let speed = Int64.of_int pif_stats.pif_speed in - let duplex = match pif_stats.pif_duplex with - | Network_interface.Duplex_full -> true - | Network_interface.Duplex_half -> false - | Network_interface.Duplex_unknown -> false - in - let vendor = pif_stats.pif_vendor_id in - let device = pif_stats.pif_device_id in - let pcibuspath = pif_stats.pif_pci_bus_path in + match List.length pifs with 0 -> () | _ -> + (* Fetch all physical and bond PIFs from DB. *) + let db_pifs = Db.PIF.get_records_where ~__context + ~expr:(And (Eq (Field "host", Literal (Ref.string_of host)), + Or (Eq (Field "physical", Literal "true"), + Not (Eq (Field "bond_master_of", Literal "()"))))) in + (* Iterate over them, and spot and update changes. *) + List.iter (fun (pifdev, pifrec) -> + begin try + let pif_stats = List.find (fun p -> p.pif_name = pifrec.API.pIF_device) pifs in + let carrier = pif_stats.pif_carrier in + let speed = Int64.of_int pif_stats.pif_speed in + let duplex = match pif_stats.pif_duplex with + | Network_interface.Duplex_full -> true + | Network_interface.Duplex_half -> false + | Network_interface.Duplex_unknown -> false + in + let vendor = pif_stats.pif_vendor_id in + let device = pif_stats.pif_device_id in + let pcibuspath = pif_stats.pif_pci_bus_path in - (* 1. Update corresponding VIF carrier flags *) - if !Xapi_globs.pass_through_pif_carrier then begin - try - (* Go from physical interface -> bridge -> vif devices. - * Do this for the physical network and any VLANs/tunnels on top of it. *) - let network = pifrec.API.pIF_network in - let vlan_networks = List.map (fun vlan -> - let vlan_master = Db.VLAN.get_untagged_PIF ~__context ~self:vlan in - Db.PIF.get_network ~__context ~self:vlan_master - ) pifrec.API.pIF_VLAN_slave_of - in - let tunnel_networks = List.map (fun tunnel -> - let access_pif = Db.Tunnel.get_access_PIF ~__context ~self:tunnel in - Db.PIF.get_network ~__context ~self:access_pif - ) pifrec.API.pIF_tunnel_transport_PIF_of - in - let bridges = List.map (fun network -> Db.Network.get_bridge ~__context ~self:network) - (network :: vlan_networks @ tunnel_networks) in - let dbg = Context.string_of_task __context in - let ifs = List.flatten (List.map (fun bridge -> try Net.Bridge.get_interfaces dbg ~name:bridge with _ -> []) bridges) in - let open Vif_device in - let set_carrier vif = - if vif.pv then ( - let open Xapi_xenops_queue in - let vm_uuid = fst vif.vif in - let queue_name = queue_of_vm ~__context ~self:(Db.VM.get_by_uuid ~__context ~uuid:vm_uuid) in - let module Client = (val make_client queue_name : XENOPS) in - Client.VIF.set_carrier dbg vif.vif carrier |> Xapi_xenops.sync __context queue_name - ) - in List.iter set_carrier (List.filter_map vif_device_of_string ifs) - with e -> - debug "Failed to update VIF carrier flags for PIF: %s" (ExnHelper.string_of_exn e) - end; + (* 1. Update corresponding VIF carrier flags *) + if !Xapi_globs.pass_through_pif_carrier then begin + try + (* Go from physical interface -> bridge -> vif devices. + * Do this for the physical network and any VLANs/tunnels on top of it. *) + let network = pifrec.API.pIF_network in + let vlan_networks = List.map (fun vlan -> + let vlan_master = Db.VLAN.get_untagged_PIF ~__context ~self:vlan in + Db.PIF.get_network ~__context ~self:vlan_master + ) pifrec.API.pIF_VLAN_slave_of + in + let tunnel_networks = List.map (fun tunnel -> + let access_pif = Db.Tunnel.get_access_PIF ~__context ~self:tunnel in + Db.PIF.get_network ~__context ~self:access_pif + ) pifrec.API.pIF_tunnel_transport_PIF_of + in + let bridges = List.map (fun network -> Db.Network.get_bridge ~__context ~self:network) + (network :: vlan_networks @ tunnel_networks) in + let dbg = Context.string_of_task __context in + let ifs = List.flatten (List.map (fun bridge -> try Net.Bridge.get_interfaces dbg ~name:bridge with _ -> []) bridges) in + let open Vif_device in + let set_carrier vif = + if vif.pv then ( + let open Xapi_xenops_queue in + let vm_uuid = fst vif.vif in + let queue_name = queue_of_vm ~__context ~self:(Db.VM.get_by_uuid ~__context ~uuid:vm_uuid) in + let module Client = (val make_client queue_name : XENOPS) in + Client.VIF.set_carrier dbg vif.vif carrier |> Xapi_xenops.sync __context queue_name + ) + in List.iter set_carrier (List.filter_map vif_device_of_string ifs) + with e -> + debug "Failed to update VIF carrier flags for PIF: %s" (ExnHelper.string_of_exn e) + end; - (* 2. Update database *) - let metrics = - (* If PIF metrics don't exist then create them *) - if Db.is_valid_ref __context pifrec.API.pIF_metrics then - pifrec.API.pIF_metrics - else begin - let ref = Ref.make() in - Db.PIF_metrics.create ~__context ~ref ~uuid:(Uuid.to_string (Uuid.make_uuid ())) ~carrier:false - ~device_name:"" ~vendor_name:"" ~device_id:"" ~vendor_id:"" - ~speed:0L ~duplex:false ~pci_bus_path:"" - ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Date.of_float 0.) - ~other_config:[]; - Db.PIF.set_metrics ~__context ~self:pifdev ~value:ref; - ref - end - in - let pmr = Db.PIF_metrics.get_record ~__context ~self:metrics in - set_pif_metrics ~__context ~self:metrics ~vendor ~device ~carrier ~speed:speed ~duplex:duplex - ~pcibuspath ~io_write:pif_stats.pif_tx ~io_read:pif_stats.pif_rx pmr; - with Not_found -> () end - ) db_pifs + (* 2. Update database *) + let metrics = + (* If PIF metrics don't exist then create them *) + if Db.is_valid_ref __context pifrec.API.pIF_metrics then + pifrec.API.pIF_metrics + else begin + let ref = Ref.make() in + Db.PIF_metrics.create ~__context ~ref ~uuid:(Uuid.to_string (Uuid.make_uuid ())) ~carrier:false + ~device_name:"" ~vendor_name:"" ~device_id:"" ~vendor_id:"" + ~speed:0L ~duplex:false ~pci_bus_path:"" + ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Date.of_float 0.) + ~other_config:[]; + Db.PIF.set_metrics ~__context ~self:pifdev ~value:ref; + ref + end + in + let pmr = Db.PIF_metrics.get_record ~__context ~self:metrics in + set_pif_metrics ~__context ~self:metrics ~vendor ~device ~carrier ~speed:speed ~duplex:duplex + ~pcibuspath ~io_write:pif_stats.pif_tx ~io_read:pif_stats.pif_rx pmr; + with Not_found -> () end + ) db_pifs let update_all ~__context host_stats = - let hostref = host_stats.host_ref in - update_host_metrics ~__context host_stats; - update_pifs ~__context hostref host_stats.pifs; - update_host_cpu ~__context hostref host_stats.pcpus; - List.iter (fun uuid -> - try - let vcpus = List.assoc uuid host_stats.vcpus in - let vifs = List.map snd (List.filter (fun d -> fst d = uuid) host_stats.vifs) in - let vbds = List.map snd (List.filter (fun d -> fst d = uuid) host_stats.vbds) in - let memory = try Some (List.assoc uuid host_stats.mem) with _ -> None in - update_vm_stats ~__context uuid vcpus vbds vifs memory - with e -> - debug "Caught exception: '%s' (uuid=%s)" (Printexc.to_string e) uuid - ) host_stats.registered + let hostref = host_stats.host_ref in + update_host_metrics ~__context host_stats; + update_pifs ~__context hostref host_stats.pifs; + update_host_cpu ~__context hostref host_stats.pcpus; + List.iter (fun uuid -> + try + let vcpus = List.assoc uuid host_stats.vcpus in + let vifs = List.map snd (List.filter (fun d -> fst d = uuid) host_stats.vifs) in + let vbds = List.map snd (List.filter (fun d -> fst d = uuid) host_stats.vbds) in + let memory = try Some (List.assoc uuid host_stats.mem) with _ -> None in + update_vm_stats ~__context uuid vcpus vbds vifs memory + with e -> + debug "Caught exception: '%s' (uuid=%s)" (Printexc.to_string e) uuid + ) host_stats.registered diff --git a/ocaml/xapi/monitor_master.mli b/ocaml/xapi/monitor_master.mli index a5fe42a4142..7d1aed4121f 100644 --- a/ocaml/xapi/monitor_master.mli +++ b/ocaml/xapi/monitor_master.mli @@ -14,7 +14,7 @@ (** Pushing object properties to database. * @group Property Monitoring - *) +*) (** This module implements the saving of properties for various objects (e.g. * VIFs, CPUs) into xapi's database. It is also used to regularly update diff --git a/ocaml/xapi/monitor_transfer.ml b/ocaml/xapi/monitor_transfer.ml index 78e0ba55ed7..febc760bc10 100644 --- a/ocaml/xapi/monitor_transfer.ml +++ b/ocaml/xapi/monitor_transfer.ml @@ -13,71 +13,71 @@ *) (** * @group Performance Monitoring - *) - +*) + open Monitor_types let marshall_vifs l = let f x = match x with - (uuid,vif) -> (*(i2,(s,i64_1, i64_2))) ->*) - XMLRPC.To.array - [ - XMLRPC.To.string uuid; - XMLRPC.To.string (string_of_int vif.vif_n); - XMLRPC.To.string vif.vif_name; - XMLRPC.To.string (string_of_float vif.vif_tx); - XMLRPC.To.string (string_of_float vif.vif_rx) - ] in - XMLRPC.To.array (List.map f l) + (uuid,vif) -> (*(i2,(s,i64_1, i64_2))) ->*) + XMLRPC.To.array + [ + XMLRPC.To.string uuid; + XMLRPC.To.string (string_of_int vif.vif_n); + XMLRPC.To.string vif.vif_name; + XMLRPC.To.string (string_of_float vif.vif_tx); + XMLRPC.To.string (string_of_float vif.vif_rx) + ] in + XMLRPC.To.array (List.map f l) let unmarshall_vifs xml = let f xml = match XMLRPC.From.array (fun x->x) xml with - [uuid;i2;s;i64_1;i64_2] -> - (XMLRPC.From.string uuid, - {vif_n=int_of_string (XMLRPC.From.string i2); - vif_name=(XMLRPC.From.string s); - vif_tx=float_of_string (XMLRPC.From.string i64_1); - vif_rx=float_of_string (XMLRPC.From.string i64_2); - vif_raw_tx=0L; - vif_raw_rx=0L; - vif_raw_tx_err=0L; - vif_raw_rx_err=0L}) - | _ -> failwith (Printf.sprintf "unmarshall_vifs unexpected XML: %s" (Xml.to_string xml)) - in - List.map f (XMLRPC.From.array (fun x->x) xml) + [uuid;i2;s;i64_1;i64_2] -> + (XMLRPC.From.string uuid, + {vif_n=int_of_string (XMLRPC.From.string i2); + vif_name=(XMLRPC.From.string s); + vif_tx=float_of_string (XMLRPC.From.string i64_1); + vif_rx=float_of_string (XMLRPC.From.string i64_2); + vif_raw_tx=0L; + vif_raw_rx=0L; + vif_raw_tx_err=0L; + vif_raw_rx_err=0L}) + | _ -> failwith (Printf.sprintf "unmarshall_vifs unexpected XML: %s" (Xml.to_string xml)) + in + List.map f (XMLRPC.From.array (fun x->x) xml) let marshall_vbds l = let f x = match x with - (uuid,vbd) -> - XMLRPC.To.array - [ - XMLRPC.To.string uuid; - XMLRPC.To.string (string_of_int vbd.vbd_device_id); - XMLRPC.To.string (string_of_float vbd.vbd_io_read); - XMLRPC.To.string (string_of_float vbd.vbd_io_write); - ] in - XMLRPC.To.array (List.map f l) + (uuid,vbd) -> + XMLRPC.To.array + [ + XMLRPC.To.string uuid; + XMLRPC.To.string (string_of_int vbd.vbd_device_id); + XMLRPC.To.string (string_of_float vbd.vbd_io_read); + XMLRPC.To.string (string_of_float vbd.vbd_io_write); + ] in + XMLRPC.To.array (List.map f l) let unmarshall_vbds xml = let f xml = match XMLRPC.From.array (fun x->x) xml with - [uuid;i2;i64_1;i64_2] -> - (XMLRPC.From.string uuid, - {vbd_device_id=int_of_string (XMLRPC.From.string i2); - vbd_io_read=float_of_string (XMLRPC.From.string i64_1); - vbd_io_write=float_of_string (XMLRPC.From.string i64_2); - vbd_raw_io_read=0L; - vbd_raw_io_write=0L;}) - | _ -> failwith (Printf.sprintf "unmarshall_vbds unexpected XML: %s" (Xml.to_string xml)) - in - List.map f (XMLRPC.From.array (fun x->x) xml) + [uuid;i2;i64_1;i64_2] -> + (XMLRPC.From.string uuid, + {vbd_device_id=int_of_string (XMLRPC.From.string i2); + vbd_io_read=float_of_string (XMLRPC.From.string i64_1); + vbd_io_write=float_of_string (XMLRPC.From.string i64_2); + vbd_raw_io_read=0L; + vbd_raw_io_write=0L;}) + | _ -> failwith (Printf.sprintf "unmarshall_vbds unexpected XML: %s" (Xml.to_string xml)) + in + List.map f (XMLRPC.From.array (fun x->x) xml) let marshall_float_array (a : float array) = let l = Array.to_list a in - XMLRPC.To.array (List.map (fun x -> XMLRPC.To.string (string_of_float x)) l) + XMLRPC.To.array (List.map (fun x -> XMLRPC.To.string (string_of_float x)) l) let unmarshall_float_array xml : float array = Array.of_list (XMLRPC.From.array (fun x -> float_of_string (XMLRPC.From.string x)) xml) @@ -86,91 +86,91 @@ let marshall_pcpus pcpus = XMLRPC.To.array [ marshall_float_array pcpus.pcpus_usage ] let unmarshall_pcpus xml = - match XMLRPC.From.array (fun x->x) xml with - [ia] -> {pcpus_usage=unmarshall_float_array ia} - | _ -> failwith (Printf.sprintf "unmarshall_pcpus unexpected XML: %s" (Xml.to_string xml)) + match XMLRPC.From.array (fun x->x) xml with + [ia] -> {pcpus_usage=unmarshall_float_array ia} + | _ -> failwith (Printf.sprintf "unmarshall_pcpus unexpected XML: %s" (Xml.to_string xml)) let marshall_vcpus l = let f x = match x with - (uuid, vcpus) -> - XMLRPC.To.array - [ - XMLRPC.To.string uuid; - XMLRPC.To.string (string_of_float vcpus.vcpu_sumcpus); - marshall_float_array (vcpus.vcpu_vcpus) - ] in - XMLRPC.To.array (List.map f l) + (uuid, vcpus) -> + XMLRPC.To.array + [ + XMLRPC.To.string uuid; + XMLRPC.To.string (string_of_float vcpus.vcpu_sumcpus); + marshall_float_array (vcpus.vcpu_vcpus) + ] in + XMLRPC.To.array (List.map f l) let unmarshall_vcpus xml = let f xml = match XMLRPC.From.array (fun x->x) xml with - [uuid;i;ia] -> - (XMLRPC.From.string uuid, - {vcpu_sumcpus=float_of_string (XMLRPC.From.string i); - vcpu_vcpus=unmarshall_float_array ia; - vcpu_rawvcpus=[| |]; - vcpu_cputime=0L; - }) - | _ -> failwith (Printf.sprintf "unmarshall_vcpus unexpected XML: %s" (Xml.to_string xml)) - in - List.map f (XMLRPC.From.array (fun x->x) xml) + [uuid;i;ia] -> + (XMLRPC.From.string uuid, + {vcpu_sumcpus=float_of_string (XMLRPC.From.string i); + vcpu_vcpus=unmarshall_float_array ia; + vcpu_rawvcpus=[| |]; + vcpu_cputime=0L; + }) + | _ -> failwith (Printf.sprintf "unmarshall_vcpus unexpected XML: %s" (Xml.to_string xml)) + in + List.map f (XMLRPC.From.array (fun x->x) xml) let marshall_memory l = let f x = match x with - (uuid, mem) -> - XMLRPC.To.array - [ - XMLRPC.To.string uuid; - XMLRPC.To.string (Int64.to_string mem.memory_mem) - ] in - XMLRPC.To.array (List.map f l) + (uuid, mem) -> + XMLRPC.To.array + [ + XMLRPC.To.string uuid; + XMLRPC.To.string (Int64.to_string mem.memory_mem) + ] in + XMLRPC.To.array (List.map f l) let unmarshall_memory xml = let f xml = match XMLRPC.From.array (fun x->x) xml with - [uuid;i64] -> - (XMLRPC.From.string uuid, - {memory_mem=Int64.of_string (XMLRPC.From.string i64)}) - | _ -> failwith (Printf.sprintf "unmarshall_memory unexpected XML: %s" (Xml.to_string xml)) - in - List.map f (XMLRPC.From.array (fun x->x) xml) + [uuid;i64] -> + (XMLRPC.From.string uuid, + {memory_mem=Int64.of_string (XMLRPC.From.string i64)}) + | _ -> failwith (Printf.sprintf "unmarshall_memory unexpected XML: %s" (Xml.to_string xml)) + in + List.map f (XMLRPC.From.array (fun x->x) xml) let marshall_pifs pifs = - let f x = match x with - | pif -> - XMLRPC.To.array [ - XMLRPC.To.string pif.pif_name; - XMLRPC.To.string (string_of_float pif.pif_tx); - XMLRPC.To.string (string_of_float pif.pif_rx); - XMLRPC.To.string (string_of_bool pif.pif_carrier); - XMLRPC.To.string (string_of_int pif.pif_speed); - XMLRPC.To.string (Network_interface.string_of_duplex pif.pif_duplex); - XMLRPC.To.string pif.pif_pci_bus_path; - XMLRPC.To.string pif.pif_vendor_id; - XMLRPC.To.string pif.pif_device_id; - ] in - XMLRPC.To.array (List.map f pifs) + let f x = match x with + | pif -> + XMLRPC.To.array [ + XMLRPC.To.string pif.pif_name; + XMLRPC.To.string (string_of_float pif.pif_tx); + XMLRPC.To.string (string_of_float pif.pif_rx); + XMLRPC.To.string (string_of_bool pif.pif_carrier); + XMLRPC.To.string (string_of_int pif.pif_speed); + XMLRPC.To.string (Network_interface.string_of_duplex pif.pif_duplex); + XMLRPC.To.string pif.pif_pci_bus_path; + XMLRPC.To.string pif.pif_vendor_id; + XMLRPC.To.string pif.pif_device_id; + ] in + XMLRPC.To.array (List.map f pifs) let unmarshall_pifs xml = - let f xml = match XMLRPC.From.array (fun x -> x) xml with - | [ name; i64_1; i64_2; carrier; speed; duplex; pcibuspath; vendor; device ] -> - {pif_name=XMLRPC.From.string name; - pif_tx=float_of_string (XMLRPC.From.string i64_1); - pif_rx=float_of_string (XMLRPC.From.string i64_2); - pif_raw_tx=0L; - pif_raw_rx=0L; (* Ignore these, for RRD only *) - pif_carrier=bool_of_string (XMLRPC.From.string carrier); - pif_speed=int_of_string (XMLRPC.From.string speed); - pif_duplex=Network_interface.duplex_of_string (XMLRPC.From.string duplex); - pif_pci_bus_path=XMLRPC.From.string pcibuspath; - pif_vendor_id=XMLRPC.From.string vendor; - pif_device_id=XMLRPC.From.string device} - | _ -> failwith (Printf.sprintf "unmarshall_pifs unexpected XML: %s" (Xml.to_string xml)) - in - List.map f (XMLRPC.From.array (fun x -> x) xml) + let f xml = match XMLRPC.From.array (fun x -> x) xml with + | [ name; i64_1; i64_2; carrier; speed; duplex; pcibuspath; vendor; device ] -> + {pif_name=XMLRPC.From.string name; + pif_tx=float_of_string (XMLRPC.From.string i64_1); + pif_rx=float_of_string (XMLRPC.From.string i64_2); + pif_raw_tx=0L; + pif_raw_rx=0L; (* Ignore these, for RRD only *) + pif_carrier=bool_of_string (XMLRPC.From.string carrier); + pif_speed=int_of_string (XMLRPC.From.string speed); + pif_duplex=Network_interface.duplex_of_string (XMLRPC.From.string duplex); + pif_pci_bus_path=XMLRPC.From.string pcibuspath; + pif_vendor_id=XMLRPC.From.string vendor; + pif_device_id=XMLRPC.From.string device} + | _ -> failwith (Printf.sprintf "unmarshall_pifs unexpected XML: %s" (Xml.to_string xml)) + in + List.map f (XMLRPC.From.array (fun x -> x) xml) let marshall_uuids uuids = XMLRPC.To.array (List.map XMLRPC.To.string uuids) @@ -192,42 +192,42 @@ let marshall_host_stats hs = marshall_memory hs.mem; marshall_uuids hs.registered ] - + let unmarshall_host_stats xml = match (XMLRPC.From.array (fun x->x) xml) with - [href; i64_1; i64_2; vifs; pifs; vbds; pcpus; vcpus; mem; uuids] -> - {timestamp=0.0; - host_ref=Ref.of_string (XMLRPC.From.string href); - total_kib=Int64.of_string (XMLRPC.From.string i64_1); - free_kib=Int64.of_string (XMLRPC.From.string i64_2); - vifs=unmarshall_vifs vifs; - pifs=unmarshall_pifs pifs; - vbds=unmarshall_vbds vbds; - pcpus=unmarshall_pcpus pcpus; - vcpus=unmarshall_vcpus vcpus; - mem=unmarshall_memory mem; - registered=unmarshall_uuids uuids} - | [vifs; pifs; vbds; pcpus; vcpus; mem; hostmetrics; uuids] -> - (* CA-18377: This case supports unmarshalling of data from a Miami host. *) - begin - match (XMLRPC.From.array (fun x->x) hostmetrics) with - [href; i64_1; i64_2] -> - {timestamp=0.0; - host_ref=Ref.of_string (XMLRPC.From.string href); - total_kib=Int64.of_string (XMLRPC.From.string i64_1); - free_kib=Int64.of_string (XMLRPC.From.string i64_2); - vifs=unmarshall_vifs vifs; - pifs=unmarshall_pifs pifs; - vbds=unmarshall_vbds vbds; - pcpus=unmarshall_pcpus pcpus; - vcpus=unmarshall_vcpus vcpus; - mem=unmarshall_memory mem; - registered=unmarshall_uuids uuids} - | _ -> failwith (Printf.sprintf "unmarshall_host_stats unexpected XML: %s" (Xml.to_string xml)) - end - | _ -> failwith (Printf.sprintf "unmarshall_host_stats unexpected XML: %s" (Xml.to_string xml)) - -let marshall hs = + [href; i64_1; i64_2; vifs; pifs; vbds; pcpus; vcpus; mem; uuids] -> + {timestamp=0.0; + host_ref=Ref.of_string (XMLRPC.From.string href); + total_kib=Int64.of_string (XMLRPC.From.string i64_1); + free_kib=Int64.of_string (XMLRPC.From.string i64_2); + vifs=unmarshall_vifs vifs; + pifs=unmarshall_pifs pifs; + vbds=unmarshall_vbds vbds; + pcpus=unmarshall_pcpus pcpus; + vcpus=unmarshall_vcpus vcpus; + mem=unmarshall_memory mem; + registered=unmarshall_uuids uuids} + | [vifs; pifs; vbds; pcpus; vcpus; mem; hostmetrics; uuids] -> + (* CA-18377: This case supports unmarshalling of data from a Miami host. *) + begin + match (XMLRPC.From.array (fun x->x) hostmetrics) with + [href; i64_1; i64_2] -> + {timestamp=0.0; + host_ref=Ref.of_string (XMLRPC.From.string href); + total_kib=Int64.of_string (XMLRPC.From.string i64_1); + free_kib=Int64.of_string (XMLRPC.From.string i64_2); + vifs=unmarshall_vifs vifs; + pifs=unmarshall_pifs pifs; + vbds=unmarshall_vbds vbds; + pcpus=unmarshall_pcpus pcpus; + vcpus=unmarshall_vcpus vcpus; + mem=unmarshall_memory mem; + registered=unmarshall_uuids uuids} + | _ -> failwith (Printf.sprintf "unmarshall_host_stats unexpected XML: %s" (Xml.to_string xml)) + end + | _ -> failwith (Printf.sprintf "unmarshall_host_stats unexpected XML: %s" (Xml.to_string xml)) + +let marshall hs = marshall_host_stats hs let unmarshall xml = diff --git a/ocaml/xapi/monitor_types.ml b/ocaml/xapi/monitor_types.ml index 8da712b3a98..a1dd716b75c 100644 --- a/ocaml/xapi/monitor_types.ml +++ b/ocaml/xapi/monitor_types.ml @@ -13,97 +13,97 @@ *) (** Some records for easy passing around of monitor types. * @group Performance Monitoring - *) +*) open Stdext open Xstringext type vcpu = { - vcpu_sumcpus: float; - vcpu_vcpus: float array; - vcpu_rawvcpus: Xenctrl.vcpuinfo array; - vcpu_cputime: int64; + vcpu_sumcpus: float; + vcpu_vcpus: float array; + vcpu_rawvcpus: Xenctrl.vcpuinfo array; + vcpu_cputime: int64; } type memory = { - memory_mem: int64; + memory_mem: int64; } type vif = { - vif_n: int; - vif_name: string; - vif_tx: float; - vif_rx: float; - vif_raw_tx: int64; - vif_raw_rx: int64; - vif_raw_tx_err: int64; - vif_raw_rx_err: int64; + vif_n: int; + vif_name: string; + vif_tx: float; + vif_rx: float; + vif_raw_tx: int64; + vif_raw_rx: int64; + vif_raw_tx_err: int64; + vif_raw_rx_err: int64; } type vbd = { - vbd_device_id: int; - vbd_io_read: float; - vbd_io_write: float; - vbd_raw_io_read: int64; - vbd_raw_io_write: int64; + vbd_device_id: int; + vbd_io_read: float; + vbd_io_write: float; + vbd_raw_io_read: int64; + vbd_raw_io_write: int64; } type pcpus = { - pcpus_usage: float array; + pcpus_usage: float array; } type pif = { - pif_name: string; - pif_tx: float; - pif_rx: float; - pif_raw_tx: int64; - pif_raw_rx: int64; - pif_carrier: bool; - pif_speed: int; - pif_duplex: Network_interface.duplex; - pif_pci_bus_path: string; - pif_vendor_id: string; - pif_device_id: string; + pif_name: string; + pif_tx: float; + pif_rx: float; + pif_raw_tx: int64; + pif_raw_rx: int64; + pif_carrier: bool; + pif_speed: int; + pif_duplex: Network_interface.duplex; + pif_pci_bus_path: string; + pif_vendor_id: string; + pif_device_id: string; } type host_stats = { - timestamp: float; - host_ref: [ `host ] Ref.t; - total_kib: int64; - free_kib: int64; + timestamp: float; + host_ref: [ `host ] Ref.t; + total_kib: int64; + free_kib: int64; - pifs : pif list; (* host pif metrics *) - pcpus : pcpus; (* host pcpu usage metrics *) - vbds : (string * vbd) list; (* domain uuid * vbd stats list *) - vifs : (string * vif) list; (* domain uuid * vif stats list *) - vcpus : (string * vcpu) list; (* domain uuid to vcpus stats assoc list *) - mem : (string * memory) list; (* domain uuid to memory stats assoc list *) - registered : string list; (* registered domain uuids *) + pifs : pif list; (* host pif metrics *) + pcpus : pcpus; (* host pcpu usage metrics *) + vbds : (string * vbd) list; (* domain uuid * vbd stats list *) + vifs : (string * vif) list; (* domain uuid * vif stats list *) + vcpus : (string * vcpu) list; (* domain uuid to vcpus stats assoc list *) + mem : (string * memory) list; (* domain uuid to memory stats assoc list *) + registered : string list; (* registered domain uuids *) } module Vif_device = struct - type t = { - pv: bool; - vif: Xenops_interface.Vif.id; - domid: int; - devid: int; - } + type t = { + pv: bool; + vif: Xenops_interface.Vif.id; + domid: int; + devid: int; + } end let vif_device_of_string x = - let open Vif_device in - try - let ty = String.sub x 0 3 and params = String.sub_to_end x 3 in - let domid, devid = Scanf.sscanf params "%d.%d" (fun x y -> x,y) in - let di = Xenctrl.with_intf (fun xc -> Xenctrl.domain_getinfo xc domid) in - let uuid = Uuid.uuid_of_int_array di.Xenctrl.handle |> Uuid.to_string in - let vif = (uuid, string_of_int devid) in - match ty with - | "vif" -> Some { pv = true; vif = vif; domid = domid; devid = devid } - | "tap" -> Some { pv = false; vif = vif; domid = domid; devid = devid } - | _ -> failwith "bad device" - with _ -> None + let open Vif_device in + try + let ty = String.sub x 0 3 and params = String.sub_to_end x 3 in + let domid, devid = Scanf.sscanf params "%d.%d" (fun x y -> x,y) in + let di = Xenctrl.with_intf (fun xc -> Xenctrl.domain_getinfo xc domid) in + let uuid = Uuid.uuid_of_int_array di.Xenctrl.handle |> Uuid.to_string in + let vif = (uuid, string_of_int devid) in + match ty with + | "vif" -> Some { pv = true; vif = vif; domid = domid; devid = devid } + | "tap" -> Some { pv = false; vif = vif; domid = domid; devid = devid } + | _ -> failwith "bad device" + with _ -> None let string_of_vif_device x = - let open Vif_device in - Printf.sprintf "%s%d.%d" (if x.pv then "vif" else "tap") x.domid x.devid + let open Vif_device in + Printf.sprintf "%s%d.%d" (if x.pv then "vif" else "tap") x.domid x.devid diff --git a/ocaml/xapi/mtc.ml b/ocaml/xapi/mtc.ml index 68e1b733ab8..8bb00bc4d07 100644 --- a/ocaml/xapi/mtc.ml +++ b/ocaml/xapi/mtc.ml @@ -11,7 +11,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(* +(* --------------------------------------------------------------------------------- Provides MTC-specific code to integrate with Citrix's XAPI Code @@ -19,7 +19,7 @@ --------------------------------------------------------------------------------- *) -(* +(* * ----------------------------------------------------------------------------- * Include other modules here. * ----------------------------------------------------------------------------- @@ -35,31 +35,31 @@ open DD module Internal = struct -let read_one_line file = - let inchan = open_in file in - try - let result = input_line inchan in - close_in inchan; - result - with exn -> close_in inchan; raise exn + let read_one_line file = + let inchan = open_in file in + try + let result = input_line inchan in + close_in inchan; + result + with exn -> close_in inchan; raise exn end -(* +(* * ----------------------------------------------------------------------------- * Put global constants here. * ----------------------------------------------------------------------------- *) -(* +(* * ----------------------------------------------------------------------------- * Functions related to MTC peer and enabled feature. * ----------------------------------------------------------------------------- *) -(* - * MTC: Newly added value in a VM's other-config field that specifies (when true) - * that this VM is protected. By protection we mean high-availability or fault +(* + * MTC: Newly added value in a VM's other-config field that specifies (when true) + * that this VM is protected. By protection we mean high-availability or fault * tolerant protection. *) let vm_protected_key = "vm_protected" @@ -73,13 +73,13 @@ let mtc_vdi_share_key = "mtc_vdi_shareable" * then it returns that value, otherwise, it returns None. *) let get_peer_vm_uuid ~__context ~self = - try Some (List.assoc vm_peer_uuid_key (Db.VM.get_other_config ~__context ~self)) with _ -> None + try Some (List.assoc vm_peer_uuid_key (Db.VM.get_other_config ~__context ~self)) with _ -> None (* * This function looks in the configuration database and examines * the record of the provided VM to see if a peer VM is specified. - * If a peer VM is specified, it returns its VM reference object + * If a peer VM is specified, it returns its VM reference object * representation. Otherwise, it returns a null VM. *) let get_peer_vm ~__context ~self = @@ -89,13 +89,13 @@ let get_peer_vm ~__context ~self = (* If a VM peer was found, then look up in the database the VM's record * using the VM's UUID field as a key. - *) + *) match uuid_str_op with - Some uuid -> - (* debug "VM %s has a peer VM UUID of %s" (Db.VM.get_uuid ~__context ~self) uuid; *) - Db.VM.get_by_uuid ~__context ~uuid - | None -> - Ref.null + Some uuid -> + (* debug "VM %s has a peer VM UUID of %s" (Db.VM.get_uuid ~__context ~self) uuid; *) + Db.VM.get_by_uuid ~__context ~uuid + | None -> + Ref.null (* @@ -104,17 +104,17 @@ let get_peer_vm ~__context ~self = * It will return true if both of these conditions exist. *) let is_this_vm_protected ~__context ~self = - try + try let other_config = Db.VM.get_other_config ~__context ~self in let protected = ((List.mem_assoc vm_protected_key other_config) && (List.assoc vm_protected_key other_config)="true") in protected with _ -> false - + (* * This routine is invoke when a request for a migration is received - * at the destination side. It figures out the correct VM configuration - * to be used to instantiate a VM to receive the migrated data. The + * at the destination side. It figures out the correct VM configuration + * to be used to instantiate a VM to receive the migrated data. The * logic says that if the source VM is a protected VM, then we'll * look up its peer VM (the destination) and return that VM to be instantiated. * If it's not protected, then the VM reference returned is that of the @@ -127,8 +127,8 @@ let get_peer_vm_or_self ~__context ~self = if peer_vm <> Ref.null then peer_vm else ( - error "MTC: VM %s was found to be protected but it lacked its peer VM specification" - (Db.VM.get_uuid ~__context ~self); + error "MTC: VM %s was found to be protected but it lacked its peer VM specification" + (Db.VM.get_uuid ~__context ~self); self ) ) @@ -142,11 +142,11 @@ let get_peer_vm_or_self ~__context ~self = * -1 to signal the caller that it should create its own domain. *) let use_protected_vm ~__context ~self = - if (is_this_vm_protected ~__context ~self) then - begin + if (is_this_vm_protected ~__context ~self) then + begin let domid = Helpers.domid_of_vm ~__context ~self in debug "This VM (%s) is protected and its currently running in domID = %d" - (Db.VM.get_uuid ~__context ~self) domid; + (Db.VM.get_uuid ~__context ~self) domid; domid; end else @@ -156,35 +156,35 @@ let use_protected_vm ~__context ~self = end -(* +(* * ----------------------------------------------------------------------------- * External Event Related Functions * ----------------------------------------------------------------------------- *) -(* This is the base migration key on which sub-keys will be added to provide +(* This is the base migration key on which sub-keys will be added to provide and receive external events *) -let migration_key = "/migration" -let migration_task_status_key = "/status" -let migration_task_progress_key = "/progress" -let migration_task_error_info_key = "/error_info" -let migration_event_entered_suspend_key = "/entering_fg" -let migration_event_entered_suspend_acked_key = "/entering_fg_acked" +let migration_key = "/migration" +let migration_task_status_key = "/status" +let migration_task_progress_key = "/progress" +let migration_task_error_info_key = "/error_info" +let migration_event_entered_suspend_key = "/entering_fg" +let migration_event_entered_suspend_acked_key = "/entering_fg_acked" let migration_event_abort_req_key = "/abort" (* Converts the Task object's status into a string. Any new states that - this code does not recognize will return "unknown" *) + this code does not recognize will return "unknown" *) let string_of_task_status status = - match status with - | `pending -> "pending" - | `success -> "success" - | `failure -> "failure" - | `cancelled -> "cancelled" - | _ -> "unknown" + match status with + | `pending -> "pending" + | `success -> "success" + | `failure -> "failure" + | `cancelled -> "cancelled" + | _ -> "unknown" -(* +(* * ----------------------------------------------------------------------------- * Network Functions * ----------------------------------------------------------------------------- @@ -193,9 +193,9 @@ let string_of_task_status status = * restarted. Returns TRUE if this is a PIF fielding a VIF attached to an MTC- * protected VM and we don't want it marked offline because we have checked here that the * PIF and its bridge are already up. - *) +*) let is_pif_attached_to_mtc_vms_and_should_not_be_offline ~__context ~self = - try + try (* Get the VMs that are hooked up to this PIF *) let network = Db.PIF.get_network ~__context ~self in @@ -203,20 +203,20 @@ let is_pif_attached_to_mtc_vms_and_should_not_be_offline ~__context ~self = (* Figure out the VIFs attached to local MTC VMs and then derive their networks, bridges and PIFs *) - let vms = List.map (fun vif -> - Db.VIF.get_VM ~__context ~self:vif) - vifs in + let vms = List.map (fun vif -> + Db.VIF.get_VM ~__context ~self:vif) + vifs in let localhost = Helpers.get_localhost ~__context in - let resident_vms = List.filter (fun vm -> - localhost = (Db.VM.get_resident_on ~__context ~self:vm)) - vms in - let protected_vms = List.filter (fun vm -> - List.mem_assoc mtc_pvm_key (Db.VM.get_other_config ~__context ~self:vm)) - resident_vms in + let resident_vms = List.filter (fun vm -> + localhost = (Db.VM.get_resident_on ~__context ~self:vm)) + vms in + let protected_vms = List.filter (fun vm -> + List.mem_assoc mtc_pvm_key (Db.VM.get_other_config ~__context ~self:vm)) + resident_vms in - let protected_vms_uuid = List.map (fun vm -> - Db.VM.get_uuid ~__context ~self:vm) - protected_vms in + let protected_vms_uuid = List.map (fun vm -> + Db.VM.get_uuid ~__context ~self:vm) + protected_vms in (* If we have protected VMs using this PIF, then decide whether it should be marked offline *) @@ -225,10 +225,10 @@ let is_pif_attached_to_mtc_vms_and_should_not_be_offline ~__context ~self = let current = Net.Bridge.get_all dbg () in let bridge = Db.Network.get_bridge ~__context ~self:network in let nic = Db.PIF.get_device ~__context ~self in - debug "The following MTC VMs are using %s for PIF %s: [%s]" - nic - (Db.PIF.get_uuid ~__context ~self) - (String.concat "; " protected_vms_uuid); + debug "The following MTC VMs are using %s for PIF %s: [%s]" + nic + (Db.PIF.get_uuid ~__context ~self) + (String.concat "; " protected_vms_uuid); let nic_device_path = Printf.sprintf "/sys/class/net/%s/operstate" nic in let nic_device_state = Internal.read_one_line nic_device_path in @@ -241,20 +241,20 @@ let is_pif_attached_to_mtc_vms_and_should_not_be_offline ~__context ~self = 2) the bridge link is up and 3) the physical NIC is up and 4) the bridge operational state is up (unknown is also up). - *) - let mark_online = (List.mem bridge current) && - (Net.Interface.is_up dbg ~name:bridge) && - nic_device_state = "up" && - (bridge_device_state = "up" || - bridge_device_state = "unknown") in - - debug "Its current operational state is %s. Therefore we'll be marking it as %s" - nic_device_state (if mark_online then "online" else "offline"); - mark_online + *) + let mark_online = (List.mem bridge current) && + (Net.Interface.is_up dbg ~name:bridge) && + nic_device_state = "up" && + (bridge_device_state = "up" || + bridge_device_state = "unknown") in + + debug "Its current operational state is %s. Therefore we'll be marking it as %s" + nic_device_state (if mark_online then "online" else "offline"); + mark_online end else false with _ -> false -(* +(* * ----------------------------------------------------------------------------- * Miscellaneous Functions * ----------------------------------------------------------------------------- @@ -262,7 +262,7 @@ let is_pif_attached_to_mtc_vms_and_should_not_be_offline ~__context ~self = (* * This routine is invoked to update the state of a VM at the end of a migration * receive cycle. For MTC VM's, we may be migrating into a stopped VM and - * we need to then update its state. Normal XenMotion migration does not + * we need to then update its state. Normal XenMotion migration does not * change the VM's state since they expect the source VM (which is the same * as the destination VM) to already be running (otherwise, you couldn't be. * doing a migration to begin with). @@ -286,7 +286,7 @@ let is_vdi_accessed_by_protected_VM ~__context ~vdi = (* Return TRUE if this VDI is attached to a protected VM *) if protected_vdi then begin - debug "VDI %s is attached to a Marathon-protected VM" (Uuid.to_string uuid); - true + debug "VDI %s is attached to a Marathon-protected VM" (Uuid.to_string uuid); + true end else - false + false diff --git a/ocaml/xapi/network.ml b/ocaml/xapi/network.ml index cf978a12ccd..db95b1d360d 100644 --- a/ocaml/xapi/network.ml +++ b/ocaml/xapi/network.ml @@ -23,28 +23,28 @@ module Net = Network_client.Client (* Catch any uncaught networkd exceptions and transform into the most relevant XenAPI error. We do not want a XenAPI client to see a raw network error. *) let transform_networkd_exn pif f = - let reraise code params = - error "Re-raising as %s [ %s ]" code (String.concat "; " params); - raise (Api_errors.Server_error(code, params)) in - try - f () - with - | Script_missing script -> - let e = Printf.sprintf "script %s missing" script in - reraise Api_errors.pif_configuration_error [Ref.string_of pif; e] - | Script_error params -> - let e = Printf.sprintf "script error [%s]" (String.concat ", " - (List.map (fun (k, v) -> k ^ " = " ^ v) params)) in - reraise Api_errors.pif_configuration_error [Ref.string_of pif; e] - | Read_error file | Write_error file -> - let e = "failed to access file " ^ file in - reraise Api_errors.pif_configuration_error [Ref.string_of pif; e] - | Not_implemented -> - let e = "networkd function not implemented" in - reraise Api_errors.pif_configuration_error [Ref.string_of pif; e] - | Vlan_in_use (device, vlan) -> - reraise Api_errors.vlan_in_use [device; string_of_int vlan] - | e -> - error "Caught %s while trying to plug a PIF" (ExnHelper.string_of_exn e); - reraise Api_errors.pif_configuration_error [Ref.string_of pif; ""] + let reraise code params = + error "Re-raising as %s [ %s ]" code (String.concat "; " params); + raise (Api_errors.Server_error(code, params)) in + try + f () + with + | Script_missing script -> + let e = Printf.sprintf "script %s missing" script in + reraise Api_errors.pif_configuration_error [Ref.string_of pif; e] + | Script_error params -> + let e = Printf.sprintf "script error [%s]" (String.concat ", " + (List.map (fun (k, v) -> k ^ " = " ^ v) params)) in + reraise Api_errors.pif_configuration_error [Ref.string_of pif; e] + | Read_error file | Write_error file -> + let e = "failed to access file " ^ file in + reraise Api_errors.pif_configuration_error [Ref.string_of pif; e] + | Not_implemented -> + let e = "networkd function not implemented" in + reraise Api_errors.pif_configuration_error [Ref.string_of pif; e] + | Vlan_in_use (device, vlan) -> + reraise Api_errors.vlan_in_use [device; string_of_int vlan] + | e -> + error "Caught %s while trying to plug a PIF" (ExnHelper.string_of_exn e); + reraise Api_errors.pif_configuration_error [Ref.string_of pif; ""] diff --git a/ocaml/xapi/network.mli b/ocaml/xapi/network.mli index 147c9f26068..e42f90e12d3 100644 --- a/ocaml/xapi/network.mli +++ b/ocaml/xapi/network.mli @@ -1,146 +1,146 @@ module Net : - sig - val reopen_logs : 'a -> bool - val clear_state : 'a -> unit - val reset_state : 'a -> unit - val set_gateway_interface : - string -> name:Network_interface.iface -> unit - val set_dns_interface : - string -> name:Network_interface.iface -> unit - module Interface : - sig - val get_all : string -> 'a -> string list - val exists : string -> name:Network_interface.iface -> bool - val get_mac : string -> name:Network_interface.iface -> string - val is_up : string -> name:Network_interface.iface -> bool - val get_capabilities: string -> name:Network_interface.iface -> string list - val get_ipv4_addr : - string -> - name:Network_interface.iface -> (Unix.inet_addr * int) list - val set_ipv4_conf : - string -> - name:Network_interface.iface -> - conf:Network_interface.ipv4 -> unit - val get_ipv4_gateway : - string -> name:Network_interface.iface -> Unix.inet_addr option - val set_ipv4_gateway : - string -> - name:Network_interface.iface -> - address:Network_interface.Unix.inet_addr -> unit - val get_ipv6_addr : - string -> - name:Network_interface.iface -> (Unix.inet_addr * int) list - val set_ipv6_conf : - string -> - name:Network_interface.iface -> - conf:Network_interface.ipv6 -> unit - val get_ipv6_gateway : - string -> name:Network_interface.iface -> Unix.inet_addr option - val set_ipv6_gateway : - string -> - name:Network_interface.iface -> - address:Network_interface.Unix.inet_addr -> unit - val set_ipv4_routes : - string -> - name:Network_interface.iface -> - routes:(Network_interface.Unix.inet_addr * int * - Network_interface.Unix.inet_addr) - list -> - unit - val get_dns : - string -> - name:Network_interface.iface -> - Unix.inet_addr list * string list - val set_dns : - string -> - name:Network_interface.iface -> - nameservers:Network_interface.Unix.inet_addr list -> - domains:string list -> unit - val get_mtu : string -> name:Network_interface.iface -> int - val set_mtu : - string -> name:Network_interface.iface -> mtu:int -> unit - val set_ethtool_settings : - string -> - name:Network_interface.iface -> - params:(string * string) list -> unit - val set_ethtool_offload : - string -> - name:Network_interface.iface -> - params:(string * string) list -> unit - val is_connected : string -> name:Network_interface.iface -> bool - val is_physical : string -> name:Network_interface.iface -> bool - val has_vlan : string -> name:Network_interface.iface -> vlan:int -> bool - val bring_up : string -> name:Network_interface.iface -> unit - val bring_down : string -> name:Network_interface.iface -> unit - val is_persistent : - string -> name:Network_interface.iface -> bool - val set_persistent : - string -> name:Network_interface.iface -> value:bool -> unit - val make_config : - string -> - ?conservative:bool -> - config:(Network_interface.iface * - Network_interface.interface_config_t) - list -> - 'a -> unit - end - module Bridge : - sig - val get_all : string -> 'a -> string list - val get_bond_links_up : - string -> name:Network_interface.port -> int - val create : - string -> - ?vlan:Network_interface.bridge * int -> - ?mac:string -> - ?other_config:(string * string) list -> - name:Network_interface.bridge -> 'a -> unit - val destroy : - string -> - ?force:bool -> name:Network_interface.bridge -> 'a -> unit - val get_kind : string -> 'a -> Network_interface.kind - val get_ports : - string -> - name:Network_interface.bridge -> (string * string list) list - val get_all_ports : - string -> ?from_cache:bool -> 'a -> (string * string list) list - val get_bonds : - string -> - name:Network_interface.bridge -> (string * string list) list - val get_all_bonds : - string -> ?from_cache:bool -> 'a -> (string * string list) list - val is_persistent : - string -> name:Network_interface.bridge -> bool - val set_persistent : - string -> name:Network_interface.bridge -> value:bool -> unit - val get_vlan : - string -> - name:Network_interface.bridge -> (string * int) option - val add_port : - string -> - ?bond_mac:string -> - bridge:Network_interface.bridge -> - name:Network_interface.port -> - interfaces:Network_interface.iface list -> - ?bond_properties:(string * string) list -> 'a -> unit - val remove_port : - string -> - bridge:Network_interface.bridge -> - name:Network_interface.port -> unit - val get_interfaces : - string -> name:Network_interface.bridge -> string list - val get_fail_mode : - string -> - name:Network_interface.bridge -> - Network_interface.fail_mode option - val make_config : - string -> - ?conservative:bool -> - config:(Network_interface.bridge * - Network_interface.bridge_config_t) - list -> - 'a -> unit - end - end +sig + val reopen_logs : 'a -> bool + val clear_state : 'a -> unit + val reset_state : 'a -> unit + val set_gateway_interface : + string -> name:Network_interface.iface -> unit + val set_dns_interface : + string -> name:Network_interface.iface -> unit + module Interface : + sig + val get_all : string -> 'a -> string list + val exists : string -> name:Network_interface.iface -> bool + val get_mac : string -> name:Network_interface.iface -> string + val is_up : string -> name:Network_interface.iface -> bool + val get_capabilities: string -> name:Network_interface.iface -> string list + val get_ipv4_addr : + string -> + name:Network_interface.iface -> (Unix.inet_addr * int) list + val set_ipv4_conf : + string -> + name:Network_interface.iface -> + conf:Network_interface.ipv4 -> unit + val get_ipv4_gateway : + string -> name:Network_interface.iface -> Unix.inet_addr option + val set_ipv4_gateway : + string -> + name:Network_interface.iface -> + address:Network_interface.Unix.inet_addr -> unit + val get_ipv6_addr : + string -> + name:Network_interface.iface -> (Unix.inet_addr * int) list + val set_ipv6_conf : + string -> + name:Network_interface.iface -> + conf:Network_interface.ipv6 -> unit + val get_ipv6_gateway : + string -> name:Network_interface.iface -> Unix.inet_addr option + val set_ipv6_gateway : + string -> + name:Network_interface.iface -> + address:Network_interface.Unix.inet_addr -> unit + val set_ipv4_routes : + string -> + name:Network_interface.iface -> + routes:(Network_interface.Unix.inet_addr * int * + Network_interface.Unix.inet_addr) + list -> + unit + val get_dns : + string -> + name:Network_interface.iface -> + Unix.inet_addr list * string list + val set_dns : + string -> + name:Network_interface.iface -> + nameservers:Network_interface.Unix.inet_addr list -> + domains:string list -> unit + val get_mtu : string -> name:Network_interface.iface -> int + val set_mtu : + string -> name:Network_interface.iface -> mtu:int -> unit + val set_ethtool_settings : + string -> + name:Network_interface.iface -> + params:(string * string) list -> unit + val set_ethtool_offload : + string -> + name:Network_interface.iface -> + params:(string * string) list -> unit + val is_connected : string -> name:Network_interface.iface -> bool + val is_physical : string -> name:Network_interface.iface -> bool + val has_vlan : string -> name:Network_interface.iface -> vlan:int -> bool + val bring_up : string -> name:Network_interface.iface -> unit + val bring_down : string -> name:Network_interface.iface -> unit + val is_persistent : + string -> name:Network_interface.iface -> bool + val set_persistent : + string -> name:Network_interface.iface -> value:bool -> unit + val make_config : + string -> + ?conservative:bool -> + config:(Network_interface.iface * + Network_interface.interface_config_t) + list -> + 'a -> unit + end + module Bridge : + sig + val get_all : string -> 'a -> string list + val get_bond_links_up : + string -> name:Network_interface.port -> int + val create : + string -> + ?vlan:Network_interface.bridge * int -> + ?mac:string -> + ?other_config:(string * string) list -> + name:Network_interface.bridge -> 'a -> unit + val destroy : + string -> + ?force:bool -> name:Network_interface.bridge -> 'a -> unit + val get_kind : string -> 'a -> Network_interface.kind + val get_ports : + string -> + name:Network_interface.bridge -> (string * string list) list + val get_all_ports : + string -> ?from_cache:bool -> 'a -> (string * string list) list + val get_bonds : + string -> + name:Network_interface.bridge -> (string * string list) list + val get_all_bonds : + string -> ?from_cache:bool -> 'a -> (string * string list) list + val is_persistent : + string -> name:Network_interface.bridge -> bool + val set_persistent : + string -> name:Network_interface.bridge -> value:bool -> unit + val get_vlan : + string -> + name:Network_interface.bridge -> (string * int) option + val add_port : + string -> + ?bond_mac:string -> + bridge:Network_interface.bridge -> + name:Network_interface.port -> + interfaces:Network_interface.iface list -> + ?bond_properties:(string * string) list -> 'a -> unit + val remove_port : + string -> + bridge:Network_interface.bridge -> + name:Network_interface.port -> unit + val get_interfaces : + string -> name:Network_interface.bridge -> string list + val get_fail_mode : + string -> + name:Network_interface.bridge -> + Network_interface.fail_mode option + val make_config : + string -> + ?conservative:bool -> + config:(Network_interface.bridge * + Network_interface.bridge_config_t) + list -> + 'a -> unit + end +end val transform_networkd_exn : [ `PIF ] API.Ref.t -> (unit -> 'a) -> 'a diff --git a/ocaml/xapi/nm.ml b/ocaml/xapi/nm.ml index f1fa3f063c9..19de2541f21 100644 --- a/ocaml/xapi/nm.ml +++ b/ocaml/xapi/nm.ml @@ -29,465 +29,465 @@ let local_m = Mutex.create () let with_local_lock f = Mutex.execute local_m f let is_dom0_interface pif_r = - pif_r.API.pIF_ip_configuration_mode <> `None - || pif_r.API.pIF_ipv6_configuration_mode <> `None - || pif_r.API.pIF_physical = true - || pif_r.API.pIF_bond_master_of <> [] + pif_r.API.pIF_ip_configuration_mode <> `None + || pif_r.API.pIF_ipv6_configuration_mode <> `None + || pif_r.API.pIF_physical = true + || pif_r.API.pIF_bond_master_of <> [] let determine_mtu pif_rc net_rc = - let mtu = Int64.to_int net_rc.API.network_MTU in - if List.mem_assoc "mtu" pif_rc.API.pIF_other_config then - let value = List.assoc "mtu" pif_rc.API.pIF_other_config in - try - int_of_string value - with _ -> - debug "Invalid value for mtu = %s" value; - mtu - else - mtu + let mtu = Int64.to_int net_rc.API.network_MTU in + if List.mem_assoc "mtu" pif_rc.API.pIF_other_config then + let value = List.assoc "mtu" pif_rc.API.pIF_other_config in + try + int_of_string value + with _ -> + debug "Invalid value for mtu = %s" value; + mtu + else + mtu let determine_ethtool_settings properties oc = - let proc key = - if List.mem_assoc ("ethtool-" ^ key) oc then - let value = List.assoc ("ethtool-" ^ key) oc in - if value = "true" || value = "on" then - [key, "on"] - else if value = "false" || value = "off" then - [key, "off"] - else begin - debug "Invalid value for ethtool-%s = %s. Must be on|true|off|false." key value; - [] - end - else if List.mem_assoc key properties then - [key, List.assoc key properties] - else - [] - in - let speed = - if List.mem_assoc "ethtool-speed" oc then - let value = List.assoc "ethtool-speed" oc in - if value = "10" || value = "100" || value = "1000" then - ["speed", value] - else begin - debug "Invalid value for ethtool-speed = %s. Must be 10|100|1000." value; - [] - end - else - [] - in - let duplex = - if List.mem_assoc "ethtool-duplex" oc then - let value = List.assoc "ethtool-duplex" oc in - if value = "half" || value = "full" then - ["duplex", value] - else begin - debug "Invalid value for ethtool-duplex = %s. Must be half|full." value; - [] - end - else - [] - in - let autoneg = proc "autoneg" in - let settings = speed @ duplex @ autoneg in - let offload = List.flatten (List.map proc ["rx"; "tx"; "sg"; "tso"; "ufo"; "gso"; "gro"; "lro"]) in - settings, offload + let proc key = + if List.mem_assoc ("ethtool-" ^ key) oc then + let value = List.assoc ("ethtool-" ^ key) oc in + if value = "true" || value = "on" then + [key, "on"] + else if value = "false" || value = "off" then + [key, "off"] + else begin + debug "Invalid value for ethtool-%s = %s. Must be on|true|off|false." key value; + [] + end + else if List.mem_assoc key properties then + [key, List.assoc key properties] + else + [] + in + let speed = + if List.mem_assoc "ethtool-speed" oc then + let value = List.assoc "ethtool-speed" oc in + if value = "10" || value = "100" || value = "1000" then + ["speed", value] + else begin + debug "Invalid value for ethtool-speed = %s. Must be 10|100|1000." value; + [] + end + else + [] + in + let duplex = + if List.mem_assoc "ethtool-duplex" oc then + let value = List.assoc "ethtool-duplex" oc in + if value = "half" || value = "full" then + ["duplex", value] + else begin + debug "Invalid value for ethtool-duplex = %s. Must be half|full." value; + [] + end + else + [] + in + let autoneg = proc "autoneg" in + let settings = speed @ duplex @ autoneg in + let offload = List.flatten (List.map proc ["rx"; "tx"; "sg"; "tso"; "ufo"; "gso"; "gro"; "lro"]) in + settings, offload let determine_other_config ~__context pif_rc net_rc = - let pif_oc = pif_rc.API.pIF_other_config in - let net_oc = net_rc.API.network_other_config in - let pool_oc = Db.Pool.get_other_config ~__context ~self:(Helpers.get_pool ~__context) in - let additional = ["network-uuids", net_rc.API.network_uuid] in - (pool_oc |> (List.update_assoc net_oc) |> (List.update_assoc pif_oc)) @ additional + let pif_oc = pif_rc.API.pIF_other_config in + let net_oc = net_rc.API.network_other_config in + let pool_oc = Db.Pool.get_other_config ~__context ~self:(Helpers.get_pool ~__context) in + let additional = ["network-uuids", net_rc.API.network_uuid] in + (pool_oc |> (List.update_assoc net_oc) |> (List.update_assoc pif_oc)) @ additional let create_bond ~__context bond mtu persistent = - (* Get all information we need from the DB before doing anything that may drop our - * management connection *) - let master = Db.Bond.get_master ~__context ~self:bond in - let master_rc = Db.PIF.get_record ~__context ~self:master in - let slaves = Db.Bond.get_slaves ~__context ~self:bond in - let slave_devices_bridges_and_config = List.map (fun pif -> - let device = Db.PIF.get_device ~__context ~self:pif in - let bridge = - let network = Db.PIF.get_network ~__context ~self:pif in - Db.Network.get_bridge ~__context ~self:network - in - let other_config = Db.PIF.get_other_config ~__context ~self:pif in - let (ethtool_settings, ethtool_offload) = determine_ethtool_settings master_rc.API.pIF_properties other_config in - let config = {default_interface with mtu; ethtool_settings; ethtool_offload; - persistent_i=persistent} in - device, bridge, config - ) slaves in - let master_net_rc = Db.Network.get_record ~__context ~self:master_rc.API.pIF_network in - let props = Db.Bond.get_properties ~__context ~self:bond in - let mode = Db.Bond.get_mode ~__context ~self:bond in - let other_config = determine_other_config ~__context master_rc master_net_rc in - - (* clean up and configure bond slaves *) - let cleanup = List.map (fun (_, bridge, _) -> bridge, true) slave_devices_bridges_and_config in - let interface_config = - List.map (fun (device, _, config) -> device, config) slave_devices_bridges_and_config in - - let port = master_rc.API.pIF_device in - let mac = master_rc.API.pIF_MAC in - - (* set bond properties *) - let props = - let rec get_prop p = - if List.mem_assoc p props - then List.assoc p props - else "" - and get_prop_assoc_if_mode m p = if mode = m - then if List.mem_assoc p props - then [ p, List.assoc p props ] - else [] - else [] - in - - if List.length slaves > 1 then - let hashing_algorithm = get_prop "hashing_algorithm" - and rebalance_interval = if mode = `lacp - then [] - else ["rebalance-interval", "1800000"] - and lacp_timeout = get_prop_assoc_if_mode `lacp "lacp-time" - and lacp_aggregation_key = get_prop_assoc_if_mode `lacp "lacp-aggregation-key" - and lacp_fallback_ab = get_prop_assoc_if_mode `lacp "lacp-fallback-ab" - in - let props = [ - "mode", Record_util.bond_mode_to_string mode; - "miimon", "100"; - "downdelay", "200"; - "updelay", "31000"; - "use_carrier", "1"; - "hashing-algorithm", hashing_algorithm; - ] @ rebalance_interval - @ lacp_timeout - @ lacp_aggregation_key - @ lacp_fallback_ab in - let overrides = List.filter_map (fun (k, v) -> - if String.startswith "bond-" k then - Some ((String.sub_to_end k 5), v) - else - None - ) master_rc.API.pIF_other_config in - (* add defaults for properties that are not overridden *) - (List.filter (fun (k, _) -> not (List.mem_assoc k overrides)) props) @ overrides - else - (* Sometimes a "Bond" is not actually a bond... *) - [] - in - - let ports = [port, {interfaces=(List.map (fun (device, _, _) -> device) slave_devices_bridges_and_config); - bond_properties=props; bond_mac=Some mac}] in - cleanup, - [master_net_rc.API.network_bridge, {default_bridge with ports; bridge_mac=(Some mac); other_config; - persistent_b=persistent}], - interface_config + (* Get all information we need from the DB before doing anything that may drop our + * management connection *) + let master = Db.Bond.get_master ~__context ~self:bond in + let master_rc = Db.PIF.get_record ~__context ~self:master in + let slaves = Db.Bond.get_slaves ~__context ~self:bond in + let slave_devices_bridges_and_config = List.map (fun pif -> + let device = Db.PIF.get_device ~__context ~self:pif in + let bridge = + let network = Db.PIF.get_network ~__context ~self:pif in + Db.Network.get_bridge ~__context ~self:network + in + let other_config = Db.PIF.get_other_config ~__context ~self:pif in + let (ethtool_settings, ethtool_offload) = determine_ethtool_settings master_rc.API.pIF_properties other_config in + let config = {default_interface with mtu; ethtool_settings; ethtool_offload; + persistent_i=persistent} in + device, bridge, config + ) slaves in + let master_net_rc = Db.Network.get_record ~__context ~self:master_rc.API.pIF_network in + let props = Db.Bond.get_properties ~__context ~self:bond in + let mode = Db.Bond.get_mode ~__context ~self:bond in + let other_config = determine_other_config ~__context master_rc master_net_rc in + + (* clean up and configure bond slaves *) + let cleanup = List.map (fun (_, bridge, _) -> bridge, true) slave_devices_bridges_and_config in + let interface_config = + List.map (fun (device, _, config) -> device, config) slave_devices_bridges_and_config in + + let port = master_rc.API.pIF_device in + let mac = master_rc.API.pIF_MAC in + + (* set bond properties *) + let props = + let rec get_prop p = + if List.mem_assoc p props + then List.assoc p props + else "" + and get_prop_assoc_if_mode m p = if mode = m + then if List.mem_assoc p props + then [ p, List.assoc p props ] + else [] + else [] + in + + if List.length slaves > 1 then + let hashing_algorithm = get_prop "hashing_algorithm" + and rebalance_interval = if mode = `lacp + then [] + else ["rebalance-interval", "1800000"] + and lacp_timeout = get_prop_assoc_if_mode `lacp "lacp-time" + and lacp_aggregation_key = get_prop_assoc_if_mode `lacp "lacp-aggregation-key" + and lacp_fallback_ab = get_prop_assoc_if_mode `lacp "lacp-fallback-ab" + in + let props = [ + "mode", Record_util.bond_mode_to_string mode; + "miimon", "100"; + "downdelay", "200"; + "updelay", "31000"; + "use_carrier", "1"; + "hashing-algorithm", hashing_algorithm; + ] @ rebalance_interval + @ lacp_timeout + @ lacp_aggregation_key + @ lacp_fallback_ab in + let overrides = List.filter_map (fun (k, v) -> + if String.startswith "bond-" k then + Some ((String.sub_to_end k 5), v) + else + None + ) master_rc.API.pIF_other_config in + (* add defaults for properties that are not overridden *) + (List.filter (fun (k, _) -> not (List.mem_assoc k overrides)) props) @ overrides + else + (* Sometimes a "Bond" is not actually a bond... *) + [] + in + + let ports = [port, {interfaces=(List.map (fun (device, _, _) -> device) slave_devices_bridges_and_config); + bond_properties=props; bond_mac=Some mac}] in + cleanup, + [master_net_rc.API.network_bridge, {default_bridge with ports; bridge_mac=(Some mac); other_config; + persistent_b=persistent}], + interface_config let destroy_bond ~__context ~force bond = - let master = Db.Bond.get_master ~__context ~self:bond in - let network = Db.PIF.get_network ~__context ~self:master in - [Db.Network.get_bridge ~__context ~self:network, force] + let master = Db.Bond.get_master ~__context ~self:bond in + let network = Db.PIF.get_network ~__context ~self:master in + [Db.Network.get_bridge ~__context ~self:network, force] let create_vlan ~__context vlan persistent = - let master = Db.VLAN.get_untagged_PIF ~__context ~self:vlan in - let master_rc = Db.PIF.get_record ~__context ~self:master in - let master_network_rc = Db.Network.get_record ~__context ~self:master_rc.API.pIF_network in + let master = Db.VLAN.get_untagged_PIF ~__context ~self:vlan in + let master_rc = Db.PIF.get_record ~__context ~self:master in + let master_network_rc = Db.Network.get_record ~__context ~self:master_rc.API.pIF_network in - let slave = Db.VLAN.get_tagged_PIF ~__context ~self:vlan in - let slave_rc = Db.PIF.get_record ~__context ~self:slave in - let slave_network_rc = Db.Network.get_record ~__context ~self:slave_rc.API.pIF_network in + let slave = Db.VLAN.get_tagged_PIF ~__context ~self:vlan in + let slave_rc = Db.PIF.get_record ~__context ~self:slave in + let slave_network_rc = Db.Network.get_record ~__context ~self:slave_rc.API.pIF_network in - let tag = Int64.to_int (Db.VLAN.get_tag ~__context ~self:vlan) in - let mac = slave_rc.API.pIF_MAC in - let other_config = determine_other_config ~__context master_rc master_network_rc in - let other_config = List.replace_assoc "network-uuids" - (master_network_rc.API.network_uuid ^ ";" ^ slave_network_rc.API.network_uuid) other_config in + let tag = Int64.to_int (Db.VLAN.get_tag ~__context ~self:vlan) in + let mac = slave_rc.API.pIF_MAC in + let other_config = determine_other_config ~__context master_rc master_network_rc in + let other_config = List.replace_assoc "network-uuids" + (master_network_rc.API.network_uuid ^ ";" ^ slave_network_rc.API.network_uuid) other_config in - [master_network_rc.API.network_bridge, - {default_bridge with vlan=(Some (slave_network_rc.API.network_bridge, tag)); other_config; - bridge_mac=(Some mac); persistent_b=persistent}] + [master_network_rc.API.network_bridge, + {default_bridge with vlan=(Some (slave_network_rc.API.network_bridge, tag)); other_config; + bridge_mac=(Some mac); persistent_b=persistent}] let destroy_vlan ~__context vlan = - let master = Db.VLAN.get_untagged_PIF ~__context ~self:vlan in - let bridge = - let network = Db.PIF.get_network ~__context ~self:master in - Db.Network.get_bridge ~__context ~self:network - in - [bridge, false] + let master = Db.VLAN.get_untagged_PIF ~__context ~self:vlan in + let bridge = + let network = Db.PIF.get_network ~__context ~self:master in + Db.Network.get_bridge ~__context ~self:network + in + [bridge, false] let get_bond pif_rc = - match pif_rc.API.pIF_bond_master_of with - | [] -> None - | bond :: _ -> - Some bond + match pif_rc.API.pIF_bond_master_of with + | [] -> None + | bond :: _ -> + Some bond let get_vlan pif_rc = - if pif_rc.API.pIF_VLAN_master_of = Ref.null then - None - else - Some pif_rc.API.pIF_VLAN_master_of + if pif_rc.API.pIF_VLAN_master_of = Ref.null then + None + else + Some pif_rc.API.pIF_VLAN_master_of let get_tunnel pif_rc = - if pif_rc.API.pIF_tunnel_access_PIF_of = [] then - None - else - Some (List.hd pif_rc.API.pIF_tunnel_access_PIF_of) + if pif_rc.API.pIF_tunnel_access_PIF_of = [] then + None + else + Some (List.hd pif_rc.API.pIF_tunnel_access_PIF_of) let get_pif_type pif_rc = - match get_vlan pif_rc with - | Some vlan -> `vlan_pif vlan - | None -> - match get_bond pif_rc with - | Some bond -> `bond_pif bond - | None -> - match get_tunnel pif_rc with - | Some tunnel -> `tunnel_pif tunnel - | None -> `phy_pif + match get_vlan pif_rc with + | Some vlan -> `vlan_pif vlan + | None -> + match get_bond pif_rc with + | Some bond -> `bond_pif bond + | None -> + match get_tunnel pif_rc with + | Some tunnel -> `tunnel_pif tunnel + | None -> `phy_pif let linux_pif_config pif_type pif_rc properties mtu persistent = - (* If we are using linux bridge rather than OVS, then we need to - * configure the "pif" that represents the vlan or bond. - * In OVS there is no such device, so the config entry will be ignored - * by Interface.make_config in xcp-networkd/networkd/network_server.ml *) - let (ethtool_settings, ethtool_offload) = - determine_ethtool_settings properties pif_rc.API.pIF_other_config in - pif_rc.API.pIF_device ^ (match pif_type with - | `bond_pif -> "" - | `vlan_pif -> ("." ^ Int64.to_string pif_rc.API.pIF_VLAN) - ), - {default_interface with mtu; ethtool_settings; ethtool_offload; persistent_i=persistent;} + (* If we are using linux bridge rather than OVS, then we need to + * configure the "pif" that represents the vlan or bond. + * In OVS there is no such device, so the config entry will be ignored + * by Interface.make_config in xcp-networkd/networkd/network_server.ml *) + let (ethtool_settings, ethtool_offload) = + determine_ethtool_settings properties pif_rc.API.pIF_other_config in + pif_rc.API.pIF_device ^ (match pif_type with + | `bond_pif -> "" + | `vlan_pif -> ("." ^ Int64.to_string pif_rc.API.pIF_VLAN) + ), + {default_interface with mtu; ethtool_settings; ethtool_offload; persistent_i=persistent;} let rec create_bridges ~__context pif_rc net_rc = - let mtu = determine_mtu pif_rc net_rc in - let other_config = determine_other_config ~__context pif_rc net_rc in - let persistent = is_dom0_interface pif_rc in - match get_pif_type pif_rc with - | `tunnel_pif _ -> - [], - [net_rc.API.network_bridge, {default_bridge with bridge_mac=(Some pif_rc.API.pIF_MAC); - other_config; persistent_b=persistent}], - [] - | `vlan_pif vlan -> - let original_pif_rc = pif_rc in - let slave = Db.VLAN.get_tagged_PIF ~__context ~self:vlan in - let pif_rc = Db.PIF.get_record ~__context ~self:slave in - let net_rc = Db.Network.get_record ~__context ~self:pif_rc.API.pIF_network in - let cleanup, bridge_config, interface_config = create_bridges ~__context pif_rc net_rc in - let interface_config = (* Add configuration for the vlan device itself *) - linux_pif_config `vlan_pif original_pif_rc pif_rc.API.pIF_properties mtu persistent - :: interface_config - in - cleanup, - create_vlan ~__context vlan persistent @ bridge_config, - interface_config - | `bond_pif bond -> - let cleanup, bridge_config, interface_config = create_bond ~__context bond mtu persistent in - let interface_config = (* Add configuration for the bond pif itself *) - linux_pif_config `bond_pif pif_rc pif_rc.API.pIF_properties mtu persistent - :: interface_config - in - cleanup, bridge_config, interface_config - | `phy_pif -> - let cleanup = - if pif_rc.API.pIF_bond_slave_of <> Ref.null then - destroy_bond ~__context ~force:true pif_rc.API.pIF_bond_slave_of - else - [] - in - let (ethtool_settings, ethtool_offload) = - determine_ethtool_settings pif_rc.API.pIF_properties pif_rc.API.pIF_other_config in - let ports = [pif_rc.API.pIF_device, {default_port with interfaces=[pif_rc.API.pIF_device]}] in - cleanup, - [net_rc.API.network_bridge, {default_bridge with ports; bridge_mac=(Some pif_rc.API.pIF_MAC); - other_config; persistent_b=persistent}], - [pif_rc.API.pIF_device, {default_interface with mtu; ethtool_settings; ethtool_offload; persistent_i=persistent}] + let mtu = determine_mtu pif_rc net_rc in + let other_config = determine_other_config ~__context pif_rc net_rc in + let persistent = is_dom0_interface pif_rc in + match get_pif_type pif_rc with + | `tunnel_pif _ -> + [], + [net_rc.API.network_bridge, {default_bridge with bridge_mac=(Some pif_rc.API.pIF_MAC); + other_config; persistent_b=persistent}], + [] + | `vlan_pif vlan -> + let original_pif_rc = pif_rc in + let slave = Db.VLAN.get_tagged_PIF ~__context ~self:vlan in + let pif_rc = Db.PIF.get_record ~__context ~self:slave in + let net_rc = Db.Network.get_record ~__context ~self:pif_rc.API.pIF_network in + let cleanup, bridge_config, interface_config = create_bridges ~__context pif_rc net_rc in + let interface_config = (* Add configuration for the vlan device itself *) + linux_pif_config `vlan_pif original_pif_rc pif_rc.API.pIF_properties mtu persistent + :: interface_config + in + cleanup, + create_vlan ~__context vlan persistent @ bridge_config, + interface_config + | `bond_pif bond -> + let cleanup, bridge_config, interface_config = create_bond ~__context bond mtu persistent in + let interface_config = (* Add configuration for the bond pif itself *) + linux_pif_config `bond_pif pif_rc pif_rc.API.pIF_properties mtu persistent + :: interface_config + in + cleanup, bridge_config, interface_config + | `phy_pif -> + let cleanup = + if pif_rc.API.pIF_bond_slave_of <> Ref.null then + destroy_bond ~__context ~force:true pif_rc.API.pIF_bond_slave_of + else + [] + in + let (ethtool_settings, ethtool_offload) = + determine_ethtool_settings pif_rc.API.pIF_properties pif_rc.API.pIF_other_config in + let ports = [pif_rc.API.pIF_device, {default_port with interfaces=[pif_rc.API.pIF_device]}] in + cleanup, + [net_rc.API.network_bridge, {default_bridge with ports; bridge_mac=(Some pif_rc.API.pIF_MAC); + other_config; persistent_b=persistent}], + [pif_rc.API.pIF_device, {default_interface with mtu; ethtool_settings; ethtool_offload; persistent_i=persistent}] let rec destroy_bridges ~__context ~force pif_rc bridge = - match get_pif_type pif_rc with - | `tunnel_pif _ -> - [bridge, false] - | `vlan_pif vlan -> - let cleanup = destroy_vlan ~__context vlan in - let slave = Db.VLAN.get_tagged_PIF ~__context ~self:vlan in - let rc = Db.PIF.get_record ~__context ~self:slave in - if not rc.API.pIF_currently_attached then - let bridge = Db.Network.get_bridge ~__context ~self:rc.API.pIF_network in - (destroy_bridges ~__context ~force rc bridge) @ cleanup - else - cleanup - | `bond_pif bond -> - destroy_bond ~__context ~force bond - | `phy_pif -> - [bridge, false] + match get_pif_type pif_rc with + | `tunnel_pif _ -> + [bridge, false] + | `vlan_pif vlan -> + let cleanup = destroy_vlan ~__context vlan in + let slave = Db.VLAN.get_tagged_PIF ~__context ~self:vlan in + let rc = Db.PIF.get_record ~__context ~self:slave in + if not rc.API.pIF_currently_attached then + let bridge = Db.Network.get_bridge ~__context ~self:rc.API.pIF_network in + (destroy_bridges ~__context ~force rc bridge) @ cleanup + else + cleanup + | `bond_pif bond -> + destroy_bond ~__context ~force bond + | `phy_pif -> + [bridge, false] let determine_static_routes net_rc = - if List.mem_assoc "static-routes" net_rc.API.network_other_config then - try - let routes = String.split ',' (List.assoc "static-routes" net_rc.API.network_other_config) in - List.map (fun route -> Scanf.sscanf route "%[^/]/%d/%[^/]" (fun a b c -> Unix.inet_addr_of_string a, b, Unix.inet_addr_of_string c)) routes - with _ -> [] - else - [] + if List.mem_assoc "static-routes" net_rc.API.network_other_config then + try + let routes = String.split ',' (List.assoc "static-routes" net_rc.API.network_other_config) in + List.map (fun route -> Scanf.sscanf route "%[^/]/%d/%[^/]" (fun a b c -> Unix.inet_addr_of_string a, b, Unix.inet_addr_of_string c)) routes + with _ -> [] + else + [] let bring_pif_up ~__context ?(management_interface=false) (pif: API.ref_PIF) = - with_local_lock (fun () -> - let dbg = Context.string_of_task __context in - let rc = Db.PIF.get_record ~__context ~self:pif in - let net_rc = Db.Network.get_record ~__context ~self:rc.API.pIF_network in - let bridge = net_rc.API.network_bridge in - - (* Call networkd even if currently_attached is false, just to update its state *) - debug "Making sure that PIF %s is up" rc.API.pIF_uuid; - - let old_ip = try Net.Interface.get_ipv4_addr dbg ~name:bridge with _ -> [] in - - (* If the PIF is a bond master, the bond slaves will now go down *) - (* Interface-reconfigure in bridge mode requires us to set currently_attached to false here *) - begin match rc.API.pIF_bond_master_of with - | [] -> () - | bond :: _ -> - let slaves = Db.Bond.get_slaves ~__context ~self:bond in - List.iter (fun self -> Db.PIF.set_currently_attached ~__context ~self ~value:false) slaves - end; - - Network.transform_networkd_exn pif (fun () -> - let persistent = is_dom0_interface rc in - let gateway_if, dns_if = Helpers.determine_gateway_and_dns_ifs ~__context - ?management_interface:(if management_interface then Some pif else None) () in - Opt.iter (fun (_, name) -> Net.set_gateway_interface dbg ~name) gateway_if; - Opt.iter (fun (_, name) -> Net.set_dns_interface dbg ~name) dns_if; - - (* Setup network infrastructure *) - let cleanup, bridge_config, interface_config = create_bridges ~__context rc net_rc in - List.iter (fun (name, force) -> Net.Bridge.destroy dbg ~name ~force ()) cleanup; - Net.Bridge.make_config dbg ~config:bridge_config (); - Net.Interface.make_config dbg ~config:interface_config (); - - (* Configure IPv4 parameters and DNS *) - let ipv4_conf, ipv4_gateway, dns = - match rc.API.pIF_ip_configuration_mode with - | `None -> None4, None, ([], []) - | `DHCP -> DHCP4, None, ([], []) - | `Static -> - let conf = (Static4 [ - Unix.inet_addr_of_string rc.API.pIF_IP, - netmask_to_prefixlen rc.API.pIF_netmask]) in - let gateway = - if rc.API.pIF_gateway <> "" then - Some (Unix.inet_addr_of_string rc.API.pIF_gateway) - else - None in - let dns = - if rc.API.pIF_DNS <> "" then begin - let nameservers = List.map Unix.inet_addr_of_string (String.split ',' rc.API.pIF_DNS) in - let domains = - if List.mem_assoc "domain" rc.API.pIF_other_config then - let domains = List.assoc "domain" rc.API.pIF_other_config in - try - String.split ',' domains - with _ -> - warn "Invalid DNS search domains: %s" domains; - [] - else - [] - in - nameservers, domains - end else - [], [] - in - conf, gateway, dns - in - let ipv4_routes = determine_static_routes net_rc in - - (* Configure IPv6 parameters *) - let ipv6_conf, ipv6_gateway = - match rc.API.pIF_ipv6_configuration_mode with - | `None -> Linklocal6, None - | `DHCP -> DHCP6, None - | `Autoconf -> Autoconf6, None - | `Static -> - let addresses = List.filter_map (fun addr_and_prefixlen -> - try - let n = String.index addr_and_prefixlen '/' in - let addr = Unix.inet_addr_of_string (String.sub addr_and_prefixlen 0 n) in - let prefixlen = int_of_string (String.sub_to_end addr_and_prefixlen (n + 1)) in - Some (addr, prefixlen) - with _ -> None - ) rc.API.pIF_IPv6 in - let conf = Static6 addresses in - let gateway = - if rc.API.pIF_ipv6_gateway <> "" then - Some (Unix.inet_addr_of_string rc.API.pIF_ipv6_gateway) - else - None in - conf, gateway - in - - let mtu = determine_mtu rc net_rc in - let (ethtool_settings, ethtool_offload) = - determine_ethtool_settings rc.API.pIF_properties net_rc.API.network_other_config in - let interface_config = [bridge, {ipv4_conf; ipv4_gateway; ipv6_conf; ipv6_gateway; - ipv4_routes; dns; ethtool_settings; ethtool_offload; mtu; persistent_i=persistent}] in - Net.Interface.make_config dbg ~config:interface_config () - ); - - let new_ip = try Net.Interface.get_ipv4_addr dbg ~name:bridge with _ -> [] in - if new_ip <> old_ip then begin - warn "An IP address of dom0 was changed"; - warn "About to kill idle client stunnels"; - (* The master_connection would otherwise try to take a broken stunnel from the cache *) - Stunnel_cache.flush (); - warn "About to forcibly reset the master connection"; - Master_connection.force_connection_reset () - end; - - if rc.API.pIF_currently_attached = false || management_interface then begin - if management_interface then begin - warn "About to kill active client stunnels"; - let stunnels = - let all = Locking_helpers.Thread_state.get_all_acquired_resources () in - debug "There are %d allocated resources" (List.length all); - List.filter (function Locking_helpers.Process("stunnel", _) -> true | _ -> false) all in - debug "Of which %d are stunnels" (List.length stunnels); - List.iter Locking_helpers.kill_resource stunnels; - end; - - Db.PIF.set_currently_attached ~__context ~self:pif ~value:true; - - (* If the PIF is a bond slave, the bond master will now be down *) - begin match rc.API.pIF_bond_slave_of with - | bond when bond = Ref.null -> () - | bond -> - let master = Db.Bond.get_master ~__context ~self:bond in - Db.PIF.set_currently_attached ~__context ~self:master ~value:false - end; - - (* sync MTU *) - (try - let mtu = Int64.of_int (Net.Interface.get_mtu dbg ~name:bridge) in - Db.PIF.set_MTU ~__context ~self:pif ~value:mtu - with _ -> - debug "could not update MTU field on PIF %s" rc.API.pIF_uuid - ); - - Xapi_mgmt_iface.on_dom0_networking_change ~__context - end - ) + with_local_lock (fun () -> + let dbg = Context.string_of_task __context in + let rc = Db.PIF.get_record ~__context ~self:pif in + let net_rc = Db.Network.get_record ~__context ~self:rc.API.pIF_network in + let bridge = net_rc.API.network_bridge in + + (* Call networkd even if currently_attached is false, just to update its state *) + debug "Making sure that PIF %s is up" rc.API.pIF_uuid; + + let old_ip = try Net.Interface.get_ipv4_addr dbg ~name:bridge with _ -> [] in + + (* If the PIF is a bond master, the bond slaves will now go down *) + (* Interface-reconfigure in bridge mode requires us to set currently_attached to false here *) + begin match rc.API.pIF_bond_master_of with + | [] -> () + | bond :: _ -> + let slaves = Db.Bond.get_slaves ~__context ~self:bond in + List.iter (fun self -> Db.PIF.set_currently_attached ~__context ~self ~value:false) slaves + end; + + Network.transform_networkd_exn pif (fun () -> + let persistent = is_dom0_interface rc in + let gateway_if, dns_if = Helpers.determine_gateway_and_dns_ifs ~__context + ?management_interface:(if management_interface then Some pif else None) () in + Opt.iter (fun (_, name) -> Net.set_gateway_interface dbg ~name) gateway_if; + Opt.iter (fun (_, name) -> Net.set_dns_interface dbg ~name) dns_if; + + (* Setup network infrastructure *) + let cleanup, bridge_config, interface_config = create_bridges ~__context rc net_rc in + List.iter (fun (name, force) -> Net.Bridge.destroy dbg ~name ~force ()) cleanup; + Net.Bridge.make_config dbg ~config:bridge_config (); + Net.Interface.make_config dbg ~config:interface_config (); + + (* Configure IPv4 parameters and DNS *) + let ipv4_conf, ipv4_gateway, dns = + match rc.API.pIF_ip_configuration_mode with + | `None -> None4, None, ([], []) + | `DHCP -> DHCP4, None, ([], []) + | `Static -> + let conf = (Static4 [ + Unix.inet_addr_of_string rc.API.pIF_IP, + netmask_to_prefixlen rc.API.pIF_netmask]) in + let gateway = + if rc.API.pIF_gateway <> "" then + Some (Unix.inet_addr_of_string rc.API.pIF_gateway) + else + None in + let dns = + if rc.API.pIF_DNS <> "" then begin + let nameservers = List.map Unix.inet_addr_of_string (String.split ',' rc.API.pIF_DNS) in + let domains = + if List.mem_assoc "domain" rc.API.pIF_other_config then + let domains = List.assoc "domain" rc.API.pIF_other_config in + try + String.split ',' domains + with _ -> + warn "Invalid DNS search domains: %s" domains; + [] + else + [] + in + nameservers, domains + end else + [], [] + in + conf, gateway, dns + in + let ipv4_routes = determine_static_routes net_rc in + + (* Configure IPv6 parameters *) + let ipv6_conf, ipv6_gateway = + match rc.API.pIF_ipv6_configuration_mode with + | `None -> Linklocal6, None + | `DHCP -> DHCP6, None + | `Autoconf -> Autoconf6, None + | `Static -> + let addresses = List.filter_map (fun addr_and_prefixlen -> + try + let n = String.index addr_and_prefixlen '/' in + let addr = Unix.inet_addr_of_string (String.sub addr_and_prefixlen 0 n) in + let prefixlen = int_of_string (String.sub_to_end addr_and_prefixlen (n + 1)) in + Some (addr, prefixlen) + with _ -> None + ) rc.API.pIF_IPv6 in + let conf = Static6 addresses in + let gateway = + if rc.API.pIF_ipv6_gateway <> "" then + Some (Unix.inet_addr_of_string rc.API.pIF_ipv6_gateway) + else + None in + conf, gateway + in + + let mtu = determine_mtu rc net_rc in + let (ethtool_settings, ethtool_offload) = + determine_ethtool_settings rc.API.pIF_properties net_rc.API.network_other_config in + let interface_config = [bridge, {ipv4_conf; ipv4_gateway; ipv6_conf; ipv6_gateway; + ipv4_routes; dns; ethtool_settings; ethtool_offload; mtu; persistent_i=persistent}] in + Net.Interface.make_config dbg ~config:interface_config () + ); + + let new_ip = try Net.Interface.get_ipv4_addr dbg ~name:bridge with _ -> [] in + if new_ip <> old_ip then begin + warn "An IP address of dom0 was changed"; + warn "About to kill idle client stunnels"; + (* The master_connection would otherwise try to take a broken stunnel from the cache *) + Stunnel_cache.flush (); + warn "About to forcibly reset the master connection"; + Master_connection.force_connection_reset () + end; + + if rc.API.pIF_currently_attached = false || management_interface then begin + if management_interface then begin + warn "About to kill active client stunnels"; + let stunnels = + let all = Locking_helpers.Thread_state.get_all_acquired_resources () in + debug "There are %d allocated resources" (List.length all); + List.filter (function Locking_helpers.Process("stunnel", _) -> true | _ -> false) all in + debug "Of which %d are stunnels" (List.length stunnels); + List.iter Locking_helpers.kill_resource stunnels; + end; + + Db.PIF.set_currently_attached ~__context ~self:pif ~value:true; + + (* If the PIF is a bond slave, the bond master will now be down *) + begin match rc.API.pIF_bond_slave_of with + | bond when bond = Ref.null -> () + | bond -> + let master = Db.Bond.get_master ~__context ~self:bond in + Db.PIF.set_currently_attached ~__context ~self:master ~value:false + end; + + (* sync MTU *) + (try + let mtu = Int64.of_int (Net.Interface.get_mtu dbg ~name:bridge) in + Db.PIF.set_MTU ~__context ~self:pif ~value:mtu + with _ -> + debug "could not update MTU field on PIF %s" rc.API.pIF_uuid + ); + + Xapi_mgmt_iface.on_dom0_networking_change ~__context + end + ) let bring_pif_down ~__context ?(force=false) (pif: API.ref_PIF) = - with_local_lock (fun () -> - Network.transform_networkd_exn pif (fun () -> - let dbg = Context.string_of_task __context in - let rc = Db.PIF.get_record ~__context ~self:pif in - debug "Making sure that PIF %s down" rc.API.pIF_uuid; - - let bridge = Db.Network.get_bridge ~__context ~self:rc.API.pIF_network in - let cleanup = destroy_bridges ~__context ~force rc bridge in - List.iter (fun (name, force) -> Net.Bridge.destroy dbg ~name ~force ()) cleanup; - Net.Interface.set_persistent dbg ~name:bridge ~value:false; - - Db.PIF.set_currently_attached ~__context ~self:pif ~value:false - ) - ) + with_local_lock (fun () -> + Network.transform_networkd_exn pif (fun () -> + let dbg = Context.string_of_task __context in + let rc = Db.PIF.get_record ~__context ~self:pif in + debug "Making sure that PIF %s down" rc.API.pIF_uuid; + + let bridge = Db.Network.get_bridge ~__context ~self:rc.API.pIF_network in + let cleanup = destroy_bridges ~__context ~force rc bridge in + List.iter (fun (name, force) -> Net.Bridge.destroy dbg ~name ~force ()) cleanup; + Net.Interface.set_persistent dbg ~name:bridge ~value:false; + + Db.PIF.set_currently_attached ~__context ~self:pif ~value:false + ) + ) diff --git a/ocaml/xapi/nm.mli b/ocaml/xapi/nm.mli index aafbcbb5382..5664f82bb5a 100644 --- a/ocaml/xapi/nm.mli +++ b/ocaml/xapi/nm.mli @@ -13,13 +13,13 @@ *) (** Helper module to plug and unplug PIFs * @group Networking - *) +*) (** Calls the [interface-reconfigure] script to bring up a PIF on this host. The script will be skipped if * PIF.currently_attached is still marked as [true] {i unless} [management_interface] is set. * The [management_interface] argument determines whether this PIF is {i going} to become the management * interface in the future. - *) +*) val bring_pif_up : __context:Context.t -> ?management_interface:bool -> API.ref_PIF -> unit (** Calls the [interface-reconfigure] script to take down a PIF on this host *) @@ -28,6 +28,6 @@ val bring_pif_down : __context:Context.t -> ?force:bool -> API.ref_PIF -> unit (** Execute a given function under the control of a mutex *) val with_local_lock : (unit -> 'a) -> 'a -(** [is_dom0_interface pif_r] returns true if pif_r is a network interface - which has a dom0 endpoint *) +(** [is_dom0_interface pif_r] returns true if pif_r is a network interface + which has a dom0 endpoint *) val is_dom0_interface : API.pIF_t -> bool diff --git a/ocaml/xapi/parse_db_conf.ml b/ocaml/xapi/parse_db_conf.ml index 313636a7d25..6a73955b6cf 100644 --- a/ocaml/xapi/parse_db_conf.ml +++ b/ocaml/xapi/parse_db_conf.ml @@ -21,15 +21,15 @@ open D type db_connection_mode = Write_limit | No_limit type db_connection = - {path:string; - mode:db_connection_mode; - compress:bool; - write_limit_period:int; - write_limit_write_cycles:int; - is_on_remote_storage:bool; - other_parameters:(string*string) list; - mutable last_generation_count: Generation.t; - } + {path:string; + mode:db_connection_mode; + compress:bool; + write_limit_period:int; + write_limit_write_cycles:int; + is_on_remote_storage:bool; + other_parameters:(string*string) list; + mutable last_generation_count: Generation.t; + } let default_write_limit_period = 21600 (* 6 hours *) let default_write_cycles = 10 @@ -50,8 +50,8 @@ let make path = { dummy_conf with path = path } let generation_filename dbconn = dbconn.path ^ Generation.suffix (** Return the generation of a given database 'connection'. Note we normally - expect the database file and the generation file to be present together; - however after upgrade only the database file will be present. *) + expect the database file and the generation file to be present together; + however after upgrade only the database file will be present. *) let generation_read dbconn = let gencount_fname = generation_filename dbconn in try Generation.of_string (Unixext.string_of_file gencount_fname) with _ -> 0L @@ -59,8 +59,8 @@ let generation_read dbconn = (* The db conf used for bootstrap purposes, e.g. mounting the 'real' db on shared storage *) let db_snapshot_dbconn = {dummy_conf with - path=Xapi_globs.snapshot_db -} + path=Xapi_globs.snapshot_db + } let from_mode v = match v with @@ -71,12 +71,12 @@ let from_block r = String.concat "" [ Printf.sprintf - "[%s]\nmode:%s\nformat:xml\ncompress:%b\nis_on_remote_storage:%b\n" - r.path (from_mode r.mode) r.compress - r.is_on_remote_storage; + "[%s]\nmode:%s\nformat:xml\ncompress:%b\nis_on_remote_storage:%b\n" + r.path (from_mode r.mode) r.compress + r.is_on_remote_storage; if r.mode = Write_limit then - Printf.sprintf "write_limit_period:%d\nwrite_limit_write_cycles:%d\n" - r.write_limit_period r.write_limit_write_cycles + Printf.sprintf "write_limit_period:%d\nwrite_limit_write_cycles:%d\n" + r.write_limit_period r.write_limit_write_cycles else ""; String.concat "" (List.map (fun (k,v) -> Printf.sprintf "%s:%s\n" k v) r.other_parameters) ] @@ -111,25 +111,25 @@ let parse_db_conf s = consume_line(); let key_values = ref [] in while (!lines<>[] && (List.hd !lines)<>"") do - let line = List.hd !lines in - key_values := (match (String.split ':' line) with - k::vs->(String.lowercase k,String.lowercase (String.concat ":" vs)) - | _ -> failwith (Printf.sprintf "Failed to parse: %s" line) - )::!key_values; - consume_line(); + let line = List.hd !lines in + key_values := (match (String.split ':' line) with + k::vs->(String.lowercase k,String.lowercase (String.concat ":" vs)) + | _ -> failwith (Printf.sprintf "Failed to parse: %s" line) + )::!key_values; + consume_line(); done; (* if the key_name exists then return the value; otherwise return the default. - if the key_name exists we remove the value from the association list -- this is so at the end of - populating the record what we have left are the "other_fields" *) + if the key_name exists we remove the value from the association list -- this is so at the end of + populating the record what we have left are the "other_fields" *) let maybe_put_in key_name default conv_fn = - if List.mem_assoc key_name !key_values then - begin - let value = List.assoc key_name !key_values in - key_values := List.remove_assoc key_name !key_values; - conv_fn value - end - else default in + if List.mem_assoc key_name !key_values then + begin + let value = List.assoc key_name !key_values in + key_values := List.remove_assoc key_name !key_values; + conv_fn value + end + else default in {path=path; mode=maybe_put_in "mode" (* key name *) No_limit (* default if key not present *) to_mode (* fn to conv string->mode type *); compress = maybe_put_in "compress" false bool_of_string; @@ -137,13 +137,13 @@ let parse_db_conf s = write_limit_period=maybe_put_in "write_limit_period" default_write_limit_period int_of_string; write_limit_write_cycles=maybe_put_in "write_limit_write_cycles" default_write_cycles int_of_string; other_parameters = !key_values; (* the things remaining in key_values at this point are the ones we haven't parsed out explicitly above.. *) - last_generation_count = Generation.null_generation; + last_generation_count = Generation.null_generation; } in let connections : db_connection list ref = ref [] in while !lines<>[] do let line = List.hd !lines in if String.startswith "[" line then - connections := read_block() :: !connections + connections := read_block() :: !connections else consume_line() done; sanity_check !connections; @@ -158,11 +158,11 @@ let get_db_conf path = else begin warn "No db_conf file. Using default"; [{path="/var/lib/xcp/state.db"; - mode=No_limit; - compress=false; - is_on_remote_storage=false; - write_limit_period=default_write_limit_period; - write_limit_write_cycles=default_write_cycles; - other_parameters=["available_this_boot","true"]; - last_generation_count=Generation.null_generation}] + mode=No_limit; + compress=false; + is_on_remote_storage=false; + write_limit_period=default_write_limit_period; + write_limit_write_cycles=default_write_cycles; + other_parameters=["available_this_boot","true"]; + last_generation_count=Generation.null_generation}] end diff --git a/ocaml/xapi/pciops.ml b/ocaml/xapi/pciops.ml index 42db7314181..fc09dec8f49 100644 --- a/ocaml/xapi/pciops.ml +++ b/ocaml/xapi/pciops.ml @@ -22,9 +22,9 @@ open Threadext let m = Mutex.create () let get_free_functions ~__context pci = - let assignments = List.length (Db.PCI.get_attached_VMs ~__context ~self:pci) in - let functions = Int64.to_int (Db.PCI.get_functions ~__context ~self:pci) in - functions - assignments + let assignments = List.length (Db.PCI.get_attached_VMs ~__context ~self:pci) in + let functions = Int64.to_int (Db.PCI.get_functions ~__context ~self:pci) in + functions - assignments (* http://wiki.xen.org/wiki/Bus:Device.Function_%28BDF%29_Notation *) (* It might be possible to refactor this but attempts so far have failed. *) @@ -35,115 +35,115 @@ let bdf_paren_prnt_fmt = format_of_string "(%04x:%02x:%02x.%01x)" let bdf_paren_scan_fmt = format_of_string "(%04x:%02x:%02x.%01x)" let pcidev_of_pci ~__context pci = - let bdf_str = Db.PCI.get_pci_id ~__context ~self:pci in - Scanf.sscanf bdf_str bdf_fmt (fun a b c d -> (a, b, c, d)) + let bdf_str = Db.PCI.get_pci_id ~__context ~self:pci in + Scanf.sscanf bdf_str bdf_fmt (fun a b c d -> (a, b, c, d)) (* Confusion: the n/xxxx:xx:xx.x syntax originally meant PCI device xxxx:xx:xx.x should be plugged into bus number n. HVM guests don't have multiple PCI buses anyway. We reinterpret the 'n' to be a hotplug ordering *) let sort_pcidevs devs = - let ids = List.sort compare (Listext.List.setify (List.map fst devs)) in - List.map (fun id -> - id, (List.map snd (List.filter (fun (x, _) -> x = id) devs)) - ) ids + let ids = List.sort compare (Listext.List.setify (List.map fst devs)) in + List.map (fun id -> + id, (List.map snd (List.filter (fun (x, _) -> x = id) devs)) + ) ids let of_string dev = - Scanf.sscanf dev slash_bdf_scan_fmt (fun id a b c d -> (id, (a, b, c, d))) + Scanf.sscanf dev slash_bdf_scan_fmt (fun id a b c d -> (id, (a, b, c, d))) let to_string (id, (a, b, c, d)) = - Printf.sprintf slash_bdf_prnt_fmt id a b c d + Printf.sprintf slash_bdf_prnt_fmt id a b c d let other_pcidevs_of_vm ~__context other_config = - let devs = - try - let oc = List.assoc "pci" other_config in - String.split ',' oc - with Not_found -> [] - in - List.fold_left (fun acc dev -> - try - of_string dev :: acc - with _ -> acc - ) [] devs + let devs = + try + let oc = List.assoc "pci" other_config in + String.split ',' oc + with Not_found -> [] + in + List.fold_left (fun acc dev -> + try + of_string dev :: acc + with _ -> acc + ) [] devs let pci_hiding_key = "xen-pciback.hide" let pci_hiding_key_eq = pci_hiding_key ^ "=" let get_pci_hidden_raw_value () = - let cmd = !Xapi_globs.xen_cmdline_path ^ " --get-dom0 " ^ pci_hiding_key in - let raw_kv_string = Helpers.get_process_output cmd in - (* E.g. "xen-pciback.hide=(0000:00:02.0)(0000:00:02.1)\n" or just "\n" *) - if String.startswith pci_hiding_key_eq raw_kv_string then - let keylen = String.length pci_hiding_key_eq in - (* rtrim to remove trailing newline *) - String.rtrim(String.sub_to_end raw_kv_string keylen) - else - "" + let cmd = !Xapi_globs.xen_cmdline_path ^ " --get-dom0 " ^ pci_hiding_key in + let raw_kv_string = Helpers.get_process_output cmd in + (* E.g. "xen-pciback.hide=(0000:00:02.0)(0000:00:02.1)\n" or just "\n" *) + if String.startswith pci_hiding_key_eq raw_kv_string then + let keylen = String.length pci_hiding_key_eq in + (* rtrim to remove trailing newline *) + String.rtrim(String.sub_to_end raw_kv_string keylen) + else + "" let get_hidden_pcidevs () = - let paren_len = String.length "(0000:00:00.0)" in - let rec read_dev devs raw = - match raw with - | "" -> devs - | _ -> ( - let dev = Scanf.sscanf - raw bdf_paren_scan_fmt (fun a b c d -> (a, b, c, d)) in - read_dev (dev::devs) (String.sub_to_end raw paren_len) - ) - in - read_dev [] (get_pci_hidden_raw_value ()) + let paren_len = String.length "(0000:00:00.0)" in + let rec read_dev devs raw = + match raw with + | "" -> devs + | _ -> ( + let dev = Scanf.sscanf + raw bdf_paren_scan_fmt (fun a b c d -> (a, b, c, d)) in + read_dev (dev::devs) (String.sub_to_end raw paren_len) + ) + in + read_dev [] (get_pci_hidden_raw_value ()) let _is_pci_hidden ~__context pci = - let pcidev = pcidev_of_pci ~__context pci in - List.mem pcidev (get_hidden_pcidevs ()) + let pcidev = pcidev_of_pci ~__context pci in + List.mem pcidev (get_hidden_pcidevs ()) (** Check whether a PCI device will be hidden from the dom0 kernel on boot. *) let is_pci_hidden ~__context pci = - Mutex.execute m (fun () -> - _is_pci_hidden ~__context pci - ) + Mutex.execute m (fun () -> + _is_pci_hidden ~__context pci + ) let _hide_pci ~__context pci = - if not (_is_pci_hidden ~__context pci) then ( - let paren_of (a, b, c, d) = ( - Printf.sprintf bdf_paren_prnt_fmt a b c d - ) in - let p = pcidev_of_pci ~__context pci in - let devs = p::(get_hidden_pcidevs ()) in - let valstr = List.fold_left (fun acc d -> acc ^ (paren_of d)) "" devs in - let cmd = Printf.sprintf "%s --set-dom0 %s%s" - !Xapi_globs.xen_cmdline_path pci_hiding_key_eq valstr in - let _ = Helpers.get_process_output cmd in - () - ) + if not (_is_pci_hidden ~__context pci) then ( + let paren_of (a, b, c, d) = ( + Printf.sprintf bdf_paren_prnt_fmt a b c d + ) in + let p = pcidev_of_pci ~__context pci in + let devs = p::(get_hidden_pcidevs ()) in + let valstr = List.fold_left (fun acc d -> acc ^ (paren_of d)) "" devs in + let cmd = Printf.sprintf "%s --set-dom0 %s%s" + !Xapi_globs.xen_cmdline_path pci_hiding_key_eq valstr in + let _ = Helpers.get_process_output cmd in + () + ) (** Hide a PCI device from the dom0 kernel. (Takes effect after next boot.) *) let hide_pci ~__context pci = - Mutex.execute m (fun () -> - _hide_pci ~__context pci - ) + Mutex.execute m (fun () -> + _hide_pci ~__context pci + ) let _unhide_pci ~__context pci = - if (_is_pci_hidden ~__context pci) then ( - let raw_value = get_pci_hidden_raw_value () in - let bdf_paren = Printf.sprintf "(%s)" - (Db.PCI.get_pci_id ~__context ~self:pci) in - let new_value = String.replace bdf_paren "" raw_value in - let cmd = match new_value with - | "" -> Printf.sprintf "%s --delete-dom0 %s" - !Xapi_globs.xen_cmdline_path pci_hiding_key - | _ -> Printf.sprintf "%s --set-dom0 %s%s" - !Xapi_globs.xen_cmdline_path pci_hiding_key_eq new_value - in - let _ = Helpers.get_process_output cmd in - () - ) + if (_is_pci_hidden ~__context pci) then ( + let raw_value = get_pci_hidden_raw_value () in + let bdf_paren = Printf.sprintf "(%s)" + (Db.PCI.get_pci_id ~__context ~self:pci) in + let new_value = String.replace bdf_paren "" raw_value in + let cmd = match new_value with + | "" -> Printf.sprintf "%s --delete-dom0 %s" + !Xapi_globs.xen_cmdline_path pci_hiding_key + | _ -> Printf.sprintf "%s --set-dom0 %s%s" + !Xapi_globs.xen_cmdline_path pci_hiding_key_eq new_value + in + let _ = Helpers.get_process_output cmd in + () + ) (** Unhide a PCI device from the dom0 kernel. (Takes effect after next boot.) *) let unhide_pci ~__context pci = - Mutex.execute m (fun () -> - _unhide_pci ~__context pci - ) + Mutex.execute m (fun () -> + _unhide_pci ~__context pci + ) (** Return the id of a PCI device *) let id_of (id, (domain, bus, dev, fn)) = id diff --git a/ocaml/xapi/pciops.mli b/ocaml/xapi/pciops.mli index 03a5d3211d7..e3a904603b0 100644 --- a/ocaml/xapi/pciops.mli +++ b/ocaml/xapi/pciops.mli @@ -13,7 +13,7 @@ *) (** Module that handles assigning PCI devices to VMs. * @group Virtual-Machine Management - *) +*) (** Check if a given PCI device is free. *) val get_free_functions : __context:Context.t -> [ `PCI ] Ref.t -> int diff --git a/ocaml/xapi/pool_db_backup.ml b/ocaml/xapi/pool_db_backup.ml index c261f8b911c..5356b9cb513 100644 --- a/ocaml/xapi/pool_db_backup.ml +++ b/ocaml/xapi/pool_db_backup.ml @@ -13,7 +13,7 @@ *) (** Synchronises a copy of the master database amongst the pool's hosts * @group Pool Management - *) +*) open Stdext open Threadext @@ -28,145 +28,145 @@ let octet_stream = Http.Hdr.content_type ^": application/octet-stream" (* CA-18377: The smallest database that is compatible with the Miami database schema. *) let minimally_compliant_miami_database = - "
" + "
" (** Write the database dump out to a file/socket *) -let write_database (s: Unix.file_descr) ~__context = - if (Helpers.rolling_upgrade_in_progress ~__context) then - (* CA-18377: If we're in the middle of a rolling upgrade from Miami *) - (* to Orlando, then only send a minimally-compliant Miami database. *) - (* Orlando hosts will ignore this database and carry on. *) - let len = String.length minimally_compliant_miami_database in - ignore (Unix.write s minimally_compliant_miami_database 0 len) - else - Db_xml.To.fd s (Db_ref.get_database (Context.database_of __context)) +let write_database (s: Unix.file_descr) ~__context = + if (Helpers.rolling_upgrade_in_progress ~__context) then + (* CA-18377: If we're in the middle of a rolling upgrade from Miami *) + (* to Orlando, then only send a minimally-compliant Miami database. *) + (* Orlando hosts will ignore this database and carry on. *) + let len = String.length minimally_compliant_miami_database in + ignore (Unix.write s minimally_compliant_miami_database 0 len) + else + Db_xml.To.fd s (Db_ref.get_database (Context.database_of __context)) (** Make sure the backup database version is compatible *) let version_check db = - let major, minor = Manifest.schema (Database.manifest db) in - if major <> Datamodel.schema_major_vsn || minor <> Datamodel.schema_minor_vsn then begin - error "Pool backup file was created with incompatible product version"; - raise (Api_errors.Server_error(Api_errors.restore_incompatible_version, [])) - end + let major, minor = Manifest.schema (Database.manifest db) in + if major <> Datamodel.schema_major_vsn || minor <> Datamodel.schema_minor_vsn then begin + error "Pool backup file was created with incompatible product version"; + raise (Api_errors.Server_error(Api_errors.restore_incompatible_version, [])) + end (** Makes a new database suitable for xapi by rewriting some configuration from the current - database. *) -let prepare_database_for_restore ~old_context ~new_context = - - (* To prevent duplicate installation_uuids or duplicate IP address confusing the - "no other masters" check we remove all hosts from the backup except the master. *) - - (* Look up the pool master: *) - let master = Helpers.get_master ~__context:new_context in - - (* Remove all slaves from the database *) - List.iter (fun self -> - if self <> master then begin - List.iter (fun self -> Db.PIF.destroy ~__context:new_context ~self) - (Db.Host.get_PIFs ~__context:new_context ~self); - Db.Host.destroy ~__context:new_context ~self - end) - (Db.Host.get_all ~__context:new_context); - - (* Set the master's uuid to ours *) - let my_installation_uuid = Xapi_inventory.lookup Xapi_inventory._installation_uuid in - Db.Host.set_uuid ~__context:new_context ~self:master ~value:my_installation_uuid; - - (* Set the master's dom0 to ours *) - let my_control_uuid = Xapi_inventory.lookup Xapi_inventory._control_domain_uuid in - let dom0 = Db.Host.get_control_domain ~__context:new_context ~self:master in - Db.VM.set_uuid ~__context:new_context ~self:dom0 ~value:my_control_uuid; - - (* Rewrite this host's PIFs' MAC addresses based on device name. *) - - (* First inspect the current machine's configuration and build up a table of - device name -> PIF reference. *) - let all_pifs = Db.Host.get_PIFs ~__context:old_context ~self:(Helpers.get_localhost ~__context:old_context) in - - let device_to_ref = - let physical = List.filter (fun self -> Db.PIF.get_physical ~__context:old_context ~self) all_pifs in - List.map (fun self -> Db.PIF.get_device ~__context:old_context ~self, self) physical in - - (* Since it's difficult for us to change the @INVENTORY@ and the ifcfg- - files, we /preserve/ the current management PIF across the restore. NB this interface - might be a bond or a vlan. *) - let mgmt_dev = - match List.filter (fun self -> Db.PIF.get_management ~__context:old_context ~self) all_pifs with - | [ dev ] -> Some (Db.PIF.get_device ~__context:old_context ~self:dev) - | _ -> None (* no management interface configured *) in - - (* The PIFs of the master host in the backup need their MAC addresses adjusting - to match the current machine. For safety the new machine needs to have at least - the same number and same device names as the backup being restored. (Note that - any excess interfaces will be forgotten and need to be manually reintroduced) - - Additionally we require the currently configured management interface device name - is found in the backup so we can re-use the existing ifcfg- files in /etc/. - We need this because the interface-reconfigure --force-up relies on the existing - config files. Ideally a master startup (such as that in the restore db code) would - actively regenerate the config files but this is too invasive a change for CA-15164. - - PIFs whose device name are not recognised or those belonging to (now dead) - slaves are forgotten. *) - - let found_mgmt_if = ref false in - let ifs_in_backup = ref [] in - List.iter - (fun self -> - let device = Db.PIF.get_device ~__context:new_context ~self in - ifs_in_backup := device :: !ifs_in_backup; - - let uuid = Db.PIF.get_uuid ~__context:new_context ~self in - let physical = Db.PIF.get_physical ~__context:new_context ~self in - let is_mgmt = Some device = mgmt_dev in - Db.PIF.set_management ~__context:new_context ~self ~value:is_mgmt; - if is_mgmt then found_mgmt_if := true; - - (* We only need to rewrite the MAC addresses of physical PIFs *) - if physical then begin - (* If this is a physical PIF but we can't find the device name - on the restore target, bail out. *) - if not(List.mem_assoc device device_to_ref) - then raise (Api_errors.Server_error(Api_errors.restore_target_missing_device, [ device ])); - (* Otherwise rewrite the MAC address to match the current machine - and set the management flag accordingly *) - let existing_pif = List.assoc device device_to_ref in - Db.PIF.set_MAC ~__context:new_context ~self ~value:(Db.PIF.get_MAC ~__context:old_context ~self:existing_pif) - end; - debug "Rewriting PIF uuid %s device %s management %b MAC %s" - uuid device is_mgmt (Db.PIF.get_MAC ~__context:new_context ~self); - ) (Db.Host.get_PIFs ~__context:new_context ~self:master); - - (* Check that management interface was synced up *) - if not(!found_mgmt_if) && mgmt_dev <> None - then raise (Api_errors.Server_error(Api_errors.restore_target_mgmt_if_not_in_backup, !ifs_in_backup)) + database. *) +let prepare_database_for_restore ~old_context ~new_context = + + (* To prevent duplicate installation_uuids or duplicate IP address confusing the + "no other masters" check we remove all hosts from the backup except the master. *) + + (* Look up the pool master: *) + let master = Helpers.get_master ~__context:new_context in + + (* Remove all slaves from the database *) + List.iter (fun self -> + if self <> master then begin + List.iter (fun self -> Db.PIF.destroy ~__context:new_context ~self) + (Db.Host.get_PIFs ~__context:new_context ~self); + Db.Host.destroy ~__context:new_context ~self + end) + (Db.Host.get_all ~__context:new_context); + + (* Set the master's uuid to ours *) + let my_installation_uuid = Xapi_inventory.lookup Xapi_inventory._installation_uuid in + Db.Host.set_uuid ~__context:new_context ~self:master ~value:my_installation_uuid; + + (* Set the master's dom0 to ours *) + let my_control_uuid = Xapi_inventory.lookup Xapi_inventory._control_domain_uuid in + let dom0 = Db.Host.get_control_domain ~__context:new_context ~self:master in + Db.VM.set_uuid ~__context:new_context ~self:dom0 ~value:my_control_uuid; + + (* Rewrite this host's PIFs' MAC addresses based on device name. *) + + (* First inspect the current machine's configuration and build up a table of + device name -> PIF reference. *) + let all_pifs = Db.Host.get_PIFs ~__context:old_context ~self:(Helpers.get_localhost ~__context:old_context) in + + let device_to_ref = + let physical = List.filter (fun self -> Db.PIF.get_physical ~__context:old_context ~self) all_pifs in + List.map (fun self -> Db.PIF.get_device ~__context:old_context ~self, self) physical in + + (* Since it's difficult for us to change the @INVENTORY@ and the ifcfg- + files, we /preserve/ the current management PIF across the restore. NB this interface + might be a bond or a vlan. *) + let mgmt_dev = + match List.filter (fun self -> Db.PIF.get_management ~__context:old_context ~self) all_pifs with + | [ dev ] -> Some (Db.PIF.get_device ~__context:old_context ~self:dev) + | _ -> None (* no management interface configured *) in + + (* The PIFs of the master host in the backup need their MAC addresses adjusting + to match the current machine. For safety the new machine needs to have at least + the same number and same device names as the backup being restored. (Note that + any excess interfaces will be forgotten and need to be manually reintroduced) + + Additionally we require the currently configured management interface device name + is found in the backup so we can re-use the existing ifcfg- files in /etc/. + We need this because the interface-reconfigure --force-up relies on the existing + config files. Ideally a master startup (such as that in the restore db code) would + actively regenerate the config files but this is too invasive a change for CA-15164. + + PIFs whose device name are not recognised or those belonging to (now dead) + slaves are forgotten. *) + + let found_mgmt_if = ref false in + let ifs_in_backup = ref [] in + List.iter + (fun self -> + let device = Db.PIF.get_device ~__context:new_context ~self in + ifs_in_backup := device :: !ifs_in_backup; + + let uuid = Db.PIF.get_uuid ~__context:new_context ~self in + let physical = Db.PIF.get_physical ~__context:new_context ~self in + let is_mgmt = Some device = mgmt_dev in + Db.PIF.set_management ~__context:new_context ~self ~value:is_mgmt; + if is_mgmt then found_mgmt_if := true; + + (* We only need to rewrite the MAC addresses of physical PIFs *) + if physical then begin + (* If this is a physical PIF but we can't find the device name + on the restore target, bail out. *) + if not(List.mem_assoc device device_to_ref) + then raise (Api_errors.Server_error(Api_errors.restore_target_missing_device, [ device ])); + (* Otherwise rewrite the MAC address to match the current machine + and set the management flag accordingly *) + let existing_pif = List.assoc device device_to_ref in + Db.PIF.set_MAC ~__context:new_context ~self ~value:(Db.PIF.get_MAC ~__context:old_context ~self:existing_pif) + end; + debug "Rewriting PIF uuid %s device %s management %b MAC %s" + uuid device is_mgmt (Db.PIF.get_MAC ~__context:new_context ~self); + ) (Db.Host.get_PIFs ~__context:new_context ~self:master); + + (* Check that management interface was synced up *) + if not(!found_mgmt_if) && mgmt_dev <> None + then raise (Api_errors.Server_error(Api_errors.restore_target_mgmt_if_not_in_backup, !ifs_in_backup)) (** Restore all of our state from an XML backup. This includes the pool config, token etc *) -let restore_from_xml __context dry_run (xml_filename: string) = - debug "attempting to restore database from %s" xml_filename; - let db = Db_upgrade.generic_database_upgrade (Db_xml.From.file (Datamodel_schema.of_datamodel ()) xml_filename) in - version_check db; - - let db_ref = Db_ref.in_memory (ref (ref db)) in - let new_context = Context.make ~database:db_ref "restore_db" in - - prepare_database_for_restore ~old_context:__context ~new_context; - (* write manifest and unmarshalled db directly to db_temporary_restore_path, so its ready for us on restart *) - if not(dry_run) - then Db_xml.To.file Xapi_globs.db_temporary_restore_path (Db_ref.get_database (Context.database_of new_context)) - +let restore_from_xml __context dry_run (xml_filename: string) = + debug "attempting to restore database from %s" xml_filename; + let db = Db_upgrade.generic_database_upgrade (Db_xml.From.file (Datamodel_schema.of_datamodel ()) xml_filename) in + version_check db; + + let db_ref = Db_ref.in_memory (ref (ref db)) in + let new_context = Context.make ~database:db_ref "restore_db" in + + prepare_database_for_restore ~old_context:__context ~new_context; + (* write manifest and unmarshalled db directly to db_temporary_restore_path, so its ready for us on restart *) + if not(dry_run) + then Db_xml.To.file Xapi_globs.db_temporary_restore_path (Db_ref.get_database (Context.database_of new_context)) + (** Called when a CLI user downloads a backup of the database *) let pull_database_backup_handler (req: Http.Request.t) s _ = debug "received request to write out db as xml"; req.Http.Request.close <- true; Xapi_http.with_context "Dumping database as XML" req s (fun __context -> - debug "sending headers"; - Http_svr.headers s (Http.http_200_ok ~keep_alive:false ()); - debug "writing database xml"; - write_database s ~__context; - debug "finished writing database xml" + debug "sending headers"; + Http_svr.headers s (Http.http_200_ok ~keep_alive:false ()); + debug "writing database xml"; + write_database s ~__context; + debug "finished writing database xml" ) (** Invoked only by the explicit database restore code *) @@ -174,49 +174,49 @@ let push_database_restore_handler (req: Http.Request.t) s _ = debug "received request to restore db from xml dump"; Xapi_http.with_context "Reading database as XML" req s (fun __context -> - debug "sending headers"; - Http_svr.headers s (Http.http_200_ok ~keep_alive:false ()); - debug "sent headers"; - (* XXX: write to temp file *) - let tmp_xml_file = Filename.temp_file "" "xml_file" in - let xml_file_fd = Unix.openfile tmp_xml_file [ Unix.O_WRONLY ] 0o600 in - let () = Pervasiveext.finally - (fun ()->ignore (Unixext.copy_file s xml_file_fd)) - (fun ()->Unix.close xml_file_fd) in - - let dry_run = List.mem_assoc "dry_run" req.Http.Request.query && (List.assoc "dry_run" req.Http.Request.query = "true") in - if dry_run - then debug "performing dry-run database restore" - else debug "performing full restore and restart"; - Unixext.unlink_safe Xapi_globs.db_temporary_restore_path; - restore_from_xml __context dry_run tmp_xml_file; - Unixext.unlink_safe tmp_xml_file; - if not(dry_run) then begin - (* We will restart as a master *) - Pool_role.set_role Pool_role.Master; - - (* now restart *) - debug "xapi has received new database via xml; will reboot and use that db..."; - info "Rebooting to use restored database after delay of: %f" !Xapi_globs.db_restore_fuse_time; - Xapi_fuse.light_fuse_and_reboot ~fuse_length:!Xapi_globs.db_restore_fuse_time (); - end - ) + debug "sending headers"; + Http_svr.headers s (Http.http_200_ok ~keep_alive:false ()); + debug "sent headers"; + (* XXX: write to temp file *) + let tmp_xml_file = Filename.temp_file "" "xml_file" in + let xml_file_fd = Unix.openfile tmp_xml_file [ Unix.O_WRONLY ] 0o600 in + let () = Pervasiveext.finally + (fun ()->ignore (Unixext.copy_file s xml_file_fd)) + (fun ()->Unix.close xml_file_fd) in + + let dry_run = List.mem_assoc "dry_run" req.Http.Request.query && (List.assoc "dry_run" req.Http.Request.query = "true") in + if dry_run + then debug "performing dry-run database restore" + else debug "performing full restore and restart"; + Unixext.unlink_safe Xapi_globs.db_temporary_restore_path; + restore_from_xml __context dry_run tmp_xml_file; + Unixext.unlink_safe tmp_xml_file; + if not(dry_run) then begin + (* We will restart as a master *) + Pool_role.set_role Pool_role.Master; + + (* now restart *) + debug "xapi has received new database via xml; will reboot and use that db..."; + info "Rebooting to use restored database after delay of: %f" !Xapi_globs.db_restore_fuse_time; + Xapi_fuse.light_fuse_and_reboot ~fuse_length:!Xapi_globs.db_restore_fuse_time (); + end + ) let http_fetch_db ~master_address ~pool_secret = - let request = Xapi_http.http_request ~cookie:[ "pool_secret", pool_secret ] - Http.Get Constants.pool_xml_db_sync in - let open Xmlrpc_client in - let transport = SSL(SSL.make (), master_address, !Xapi_globs.https_port) in - with_transport transport - (with_http request - (fun (response, fd) -> - (* no content length since it's streaming *) - let inchan = Unix.in_channel_of_descr fd in (* never read from fd again! *) - let db = Db_xml.From.channel (Datamodel_schema.of_datamodel ()) inchan in - version_check db; - db - ) - ) + let request = Xapi_http.http_request ~cookie:[ "pool_secret", pool_secret ] + Http.Get Constants.pool_xml_db_sync in + let open Xmlrpc_client in + let transport = SSL(SSL.make (), master_address, !Xapi_globs.https_port) in + with_transport transport + (with_http request + (fun (response, fd) -> + (* no content length since it's streaming *) + let inchan = Unix.in_channel_of_descr fd in (* never read from fd again! *) + let db = Db_xml.From.channel (Datamodel_schema.of_datamodel ()) inchan in + version_check db; + db + ) + ) (* When we eject from a pool, a slave deletes its backup files. This mutex is used to ensure that we're not trying to delete these backup files concurrently with making more! *) @@ -233,42 +233,42 @@ let fetch_database_backup ~master_address ~pool_secret ~force = let db = http_fetch_db ~master_address ~pool_secret in (* flush backup to each of our database connections *) List.iter - (fun dbconn -> - Threadext.Mutex.execute slave_backup_m - (fun () -> - Db_connections.flush dbconn db; - Slave_backup.notify_write dbconn (* update writes_this_period for this connection *) - ) - ) - connections + (fun dbconn -> + Threadext.Mutex.execute slave_backup_m + (fun () -> + Db_connections.flush dbconn db; + Slave_backup.notify_write dbconn (* update writes_this_period for this connection *) + ) + ) + connections end else debug "Not requesting backup from master, no candidate db connections to backup to" (* Master sync thread *) let pool_db_backup_thread () = Debug.with_thread_named "pool_db_backup_thread" (fun () -> - Server_helpers.exec_with_new_task "Pool DB sync" (fun __context -> - while (true) do - try - begin - let hosts = Db.Host.get_all ~__context in - let hosts = List.filter (fun hostref -> hostref <> !Xapi_globs.localhost_ref) hosts in - let generation = Db_lock.with_lock (fun () -> Manifest.generation (Database.manifest (Db_ref.get_database (Context.database_of __context)))) in - let dohost host = - try - Thread.delay !Xapi_globs.pool_db_sync_interval; - debug "Starting DB synchronise with host %s" (Ref.string_of host); - Helpers.call_api_functions ~__context - (fun rpc session_id -> Client.Host.request_backup rpc session_id host generation false); - debug "Finished DB synchronise"; - with - e -> - error "Failed to synchronise DB with host %s: %s" (Ref.string_of host) (Printexc.to_string e) in - - (* since thread.delay is inside dohost fn make sure we don't spin if hosts=[]: *) - if hosts=[] then Thread.delay !Xapi_globs.pool_db_sync_interval - else List.iter dohost hosts; - end - with e -> debug "Exception in DB synchronise thread: %s" (ExnHelper.string_of_exn e) - done) + Server_helpers.exec_with_new_task "Pool DB sync" (fun __context -> + while (true) do + try + begin + let hosts = Db.Host.get_all ~__context in + let hosts = List.filter (fun hostref -> hostref <> !Xapi_globs.localhost_ref) hosts in + let generation = Db_lock.with_lock (fun () -> Manifest.generation (Database.manifest (Db_ref.get_database (Context.database_of __context)))) in + let dohost host = + try + Thread.delay !Xapi_globs.pool_db_sync_interval; + debug "Starting DB synchronise with host %s" (Ref.string_of host); + Helpers.call_api_functions ~__context + (fun rpc session_id -> Client.Host.request_backup rpc session_id host generation false); + debug "Finished DB synchronise"; + with + e -> + error "Failed to synchronise DB with host %s: %s" (Ref.string_of host) (Printexc.to_string e) in + + (* since thread.delay is inside dohost fn make sure we don't spin if hosts=[]: *) + if hosts=[] then Thread.delay !Xapi_globs.pool_db_sync_interval + else List.iter dohost hosts; + end + with e -> debug "Exception in DB synchronise thread: %s" (ExnHelper.string_of_exn e) + done) ) () diff --git a/ocaml/xapi/pool_features.ml b/ocaml/xapi/pool_features.ml index db0722dfd75..97ccbd16434 100644 --- a/ocaml/xapi/pool_features.ml +++ b/ocaml/xapi/pool_features.ml @@ -9,7 +9,7 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. - *) +*) open Features module D = Debug.Make(struct let name="pool_features" end) @@ -28,81 +28,81 @@ open D let all_flags = List.map (fun (k, v) -> k) (to_assoc_list all_features) let get_pool_features ~__context = - let pool = Helpers.get_pool ~__context in - of_assoc_list (Db.Pool.get_restrictions ~__context ~self:pool) + let pool = Helpers.get_pool ~__context in + of_assoc_list (Db.Pool.get_restrictions ~__context ~self:pool) let is_enabled ~__context f = - let pool_features = get_pool_features ~__context in - List.mem f pool_features + let pool_features = get_pool_features ~__context in + List.mem f pool_features let assert_enabled ~__context ~f = - if not (is_enabled ~__context f) then - raise (Api_errors.Server_error(Api_errors.license_restriction, [name_of_feature f])) + if not (is_enabled ~__context f) then + raise (Api_errors.Server_error(Api_errors.license_restriction, [name_of_feature f])) (* The set of core restrictions of a pool is the intersection of the sets of features of the individual hosts. *) let compute_core_features all_host_params = - List.map of_assoc_list all_host_params - |> List.fold_left Stdext.Listext.List.intersect all_features + List.map of_assoc_list all_host_params + |> List.fold_left Stdext.Listext.List.intersect all_features (* Find the feature flags in the given license params that are not represented in the feature type. These are additional flags given to us by v6d. Assume that their names always start with "restrict_". *) let find_additional_flags params = - let kvs = List.filter (fun (k, v) -> - try String.sub k 0 9 = "restrict_" && not (List.mem k all_flags) - with Invalid_argument _ -> false - ) params in - List.map fst kvs + let kvs = List.filter (fun (k, v) -> + try String.sub k 0 9 = "restrict_" && not (List.mem k all_flags) + with Invalid_argument _ -> false + ) params in + List.map fst kvs (* Determine the set of additional features. For each restrict_ flag, looks for matching flags on all hosts; if one of them is restricted ("true") or absent, then the feature on the pool level is marked as restricted. *) let rec compute_additional_restrictions all_host_params = function - | [] -> [] - | flag :: rest -> - let switches = - List.map - (function params -> - if List.mem_assoc flag params - then bool_of_string (List.assoc flag params) - else true) - all_host_params - in - (flag, string_of_bool (List.fold_left (||) false switches)) :: - compute_additional_restrictions all_host_params rest + | [] -> [] + | flag :: rest -> + let switches = + List.map + (function params -> + if List.mem_assoc flag params + then bool_of_string (List.assoc flag params) + else true) + all_host_params + in + (flag, string_of_bool (List.fold_left (||) false switches)) :: + compute_additional_restrictions all_host_params rest (* Combine the host-level feature restrictions into pool-level ones, and write the result to the database. *) let update_pool_features ~__context = - (* Get information from the database *) - let pool = Helpers.get_pool ~__context in - let old_restrictions = Db.Pool.get_restrictions ~__context ~self:pool in - let all_host_params = List.map - (fun (_, host_r) -> host_r.API.host_license_params) - (Db.Host.get_all_records ~__context) in - let master_params = - let master_ref = Db.Pool.get_master ~__context ~self:pool in - Db.Host.get_license_params ~__context ~self:master_ref - in + (* Get information from the database *) + let pool = Helpers.get_pool ~__context in + let old_restrictions = Db.Pool.get_restrictions ~__context ~self:pool in + let all_host_params = List.map + (fun (_, host_r) -> host_r.API.host_license_params) + (Db.Host.get_all_records ~__context) in + let master_params = + let master_ref = Db.Pool.get_master ~__context ~self:pool in + Db.Host.get_license_params ~__context ~self:master_ref + in - (* Determine the set of core restrictions *) - let new_core_features = compute_core_features all_host_params in - let new_core_restrictions = to_assoc_list new_core_features in + (* Determine the set of core restrictions *) + let new_core_features = compute_core_features all_host_params in + let new_core_restrictions = to_assoc_list new_core_features in - (* Determine the set of additional restrictions *) - let additional_flags = find_additional_flags master_params in - let new_additional_restrictions = compute_additional_restrictions all_host_params additional_flags in + (* Determine the set of additional restrictions *) + let additional_flags = find_additional_flags master_params in + let new_additional_restrictions = compute_additional_restrictions all_host_params additional_flags in - (* The complete set of restrictions is formed by the core feature plus the additional features *) - let new_restrictions = new_additional_restrictions @ new_core_restrictions in + (* The complete set of restrictions is formed by the core feature plus the additional features *) + let new_restrictions = new_additional_restrictions @ new_core_restrictions in - (* Update the DB if the restrictions have changed *) - if new_restrictions <> old_restrictions then begin - let old_core_features = of_assoc_list old_restrictions in - info "Old pool features enabled: %s" (to_compact_string old_core_features); - info "New pool features enabled: %s" (to_compact_string new_core_features); - Db.Pool.set_restrictions ~__context ~self:pool ~value:new_restrictions; - Xapi_pool_helpers.apply_guest_agent_config ~__context - end + (* Update the DB if the restrictions have changed *) + if new_restrictions <> old_restrictions then begin + let old_core_features = of_assoc_list old_restrictions in + info "Old pool features enabled: %s" (to_compact_string old_core_features); + info "New pool features enabled: %s" (to_compact_string new_core_features); + Db.Pool.set_restrictions ~__context ~self:pool ~value:new_restrictions; + Xapi_pool_helpers.apply_guest_agent_config ~__context + end diff --git a/ocaml/xapi/pool_features.mli b/ocaml/xapi/pool_features.mli index 517af8eaaac..6e8f6d8e536 100644 --- a/ocaml/xapi/pool_features.mli +++ b/ocaml/xapi/pool_features.mli @@ -13,7 +13,7 @@ *) (** Module that controls feature restriction. * @group Licensing - *) +*) (** Check whether a given feature is currently enabled on the pool. *) val is_enabled : __context:Context.t -> Features.feature -> bool diff --git a/ocaml/xapi/pool_role.ml b/ocaml/xapi/pool_role.ml index dc8a01ade0e..ac3348db9ce 100644 --- a/ocaml/xapi/pool_role.ml +++ b/ocaml/xapi/pool_role.ml @@ -13,7 +13,7 @@ *) (** * @group Pool Management - *) +*) open Stdext open Xstringext @@ -24,59 +24,59 @@ open D (** The role of this node *) type t = - | Master - | Slave of string (* IP address *) - | Broken + | Master + | Slave of string (* IP address *) + | Broken let role = ref None let role_unit_tests = ref false let role_m = Mutex.create () let set_pool_role_for_test () = - Mutex.execute role_m (fun _ -> role := Some Master; - role_unit_tests := true) + Mutex.execute role_m (fun _ -> role := Some Master; + role_unit_tests := true) let is_unit_test () = - Mutex.execute role_m (fun _ -> !role_unit_tests) + Mutex.execute role_m (fun _ -> !role_unit_tests) let string_of = function - | Master -> "master" - | Slave x -> "slave:" ^ x - | Broken -> "broken" + | Master -> "master" + | Slave x -> "slave:" ^ x + | Broken -> "broken" let read_pool_role () = - try - let s = String.strip String.isspace - (Unixext.string_of_file !Xapi_globs.pool_config_file) in - match String.split ~limit:2 ':' s with - | [ "master" ] -> Master - | [ "slave"; m_ip ] -> Slave m_ip - | [ "broken" ] -> Broken - | _ -> failwith "cannot parse pool_role from pool config file" - with _ -> - (* If exec name is suite.opt, we're running as unit tests *) - if "xapi" <> Filename.basename Sys.executable_name - then (debug "Executable name is not 'xapi', so we must be running \ - in unit-test mode; setting pool-role to 'Master'"; - Master) - else (error "Failed to read pool role from %s" !Xapi_globs.pool_config_file; - Broken) + try + let s = String.strip String.isspace + (Unixext.string_of_file !Xapi_globs.pool_config_file) in + match String.split ~limit:2 ':' s with + | [ "master" ] -> Master + | [ "slave"; m_ip ] -> Slave m_ip + | [ "broken" ] -> Broken + | _ -> failwith "cannot parse pool_role from pool config file" + with _ -> + (* If exec name is suite.opt, we're running as unit tests *) + if "xapi" <> Filename.basename Sys.executable_name + then (debug "Executable name is not 'xapi', so we must be running \ + in unit-test mode; setting pool-role to 'Master'"; + Master) + else (error "Failed to read pool role from %s" !Xapi_globs.pool_config_file; + Broken) let get_role () = - Mutex.execute role_m (fun _ -> - match !role with - | Some x -> x - | None -> - let r = read_pool_role () in - role := Some r; - r - ) + Mutex.execute role_m (fun _ -> + match !role with + | Some x -> x + | None -> + let r = read_pool_role () in + role := Some r; + r + ) let is_master () = get_role () = Master let is_slave () = match get_role () with - | Slave _ -> true - | _ -> false + | Slave _ -> true + | _ -> false let is_broken () = get_role () = Broken @@ -84,9 +84,9 @@ exception This_host_is_a_master exception This_host_is_broken let get_master_address () = match get_role () with - | Slave ip -> ip - | Master -> raise This_host_is_a_master - | Broken -> raise This_host_is_broken + | Slave ip -> ip + | Master -> raise This_host_is_a_master + | Broken -> raise This_host_is_broken let set_role r = let old_role = get_role () in diff --git a/ocaml/xapi/pool_role.mli b/ocaml/xapi/pool_role.mli index 932c89cb000..e162e2dda9f 100644 --- a/ocaml/xapi/pool_role.mli +++ b/ocaml/xapi/pool_role.mli @@ -12,8 +12,8 @@ * GNU Lesser General Public License for more details. *) -type t = - | Master +type t = + | Master | Slave of string (** IP address *) | Broken diff --git a/ocaml/xapi/quicktest.ml b/ocaml/xapi/quicktest.ml index 511f177ac42..896e7a2688e 100644 --- a/ocaml/xapi/quicktest.ml +++ b/ocaml/xapi/quicktest.ml @@ -23,10 +23,10 @@ open Quicktest_common let username = ref "" let password = ref "" -let export_filename = "/tmp/quicktest-export" +let export_filename = "/tmp/quicktest-export" (* CA-11402 *) -let event_next_unblocking_test () = +let event_next_unblocking_test () = let test = make_test "Event.next unblocking test" 0 in start test; (* Need to create a temporary session ID *) @@ -35,15 +35,15 @@ let event_next_unblocking_test () = let m = Mutex.create () in let unblocked = ref false in let (_: Thread.t) = Thread.create - (fun () -> - begin - try ignore(Client.Event.next !rpc session_id) - with e -> - debug test (Printf.sprintf "background thread caught: %s (an exception is expected)" (Printexc.to_string e)) - end; - Mutex.execute m (fun () -> unblocked := true) - ) () in - (* Background thread is started but it cannot simultaneously block and signal us to + (fun () -> + begin + try ignore(Client.Event.next !rpc session_id) + with e -> + debug test (Printf.sprintf "background thread caught: %s (an exception is expected)" (Printexc.to_string e)) + end; + Mutex.execute m (fun () -> unblocked := true) + ) () in + (* Background thread is started but it cannot simultaneously block and signal us to logout so a little pause in here is probably the best we can do *) Thread.delay 2.; (* Logout which should cause the background thread to unblock *) @@ -56,222 +56,222 @@ let event_next_unblocking_test () = else success test let event_next_test session_id = - let test = make_test "Event.next test" 0 in - start test; - let () = Client.Event.register !rpc session_id [ "pool" ] in - let m = Mutex.create () in - let finished = ref false in - let pool = Client.Pool.get_all !rpc session_id |> List.hd in - let key = "event_next_test" in - begin try Client.Pool.remove_from_other_config !rpc session_id pool key with _ -> () end; - let (_: Thread.t) = Thread.create - (fun () -> - while not (Mutex.execute m (fun () -> !finished)) do - ignore (Client.Event.next !rpc session_id); - let oc = Client.Pool.get_other_config !rpc session_id pool in - if List.mem_assoc key oc && (List.assoc key oc) = "1" - then Mutex.execute m (fun () -> - debug test "got expected event"; - finished := true; - ) - done - ) () in - Thread.delay 1.; - Client.Pool.add_to_other_config !rpc session_id pool key "1"; - Thread.delay 1.; - if not(Mutex.execute m (fun () -> !finished)) - then failed test "failed to see pool.other_config change" - else success test + let test = make_test "Event.next test" 0 in + start test; + let () = Client.Event.register !rpc session_id [ "pool" ] in + let m = Mutex.create () in + let finished = ref false in + let pool = Client.Pool.get_all !rpc session_id |> List.hd in + let key = "event_next_test" in + begin try Client.Pool.remove_from_other_config !rpc session_id pool key with _ -> () end; + let (_: Thread.t) = Thread.create + (fun () -> + while not (Mutex.execute m (fun () -> !finished)) do + ignore (Client.Event.next !rpc session_id); + let oc = Client.Pool.get_other_config !rpc session_id pool in + if List.mem_assoc key oc && (List.assoc key oc) = "1" + then Mutex.execute m (fun () -> + debug test "got expected event"; + finished := true; + ) + done + ) () in + Thread.delay 1.; + Client.Pool.add_to_other_config !rpc session_id pool key "1"; + Thread.delay 1.; + if not(Mutex.execute m (fun () -> !finished)) + then failed test "failed to see pool.other_config change" + else success test let wait_for_pool_key test session_id key = - let token = ref "" in - let finished = ref false in - let pool = Client.Pool.get_all !rpc session_id |> List.hd in - while not !finished do - let events = Client.Event.from !rpc session_id [ "pool" ] (!token) 10. |> event_from_of_rpc in - token := events.token; - let oc = Client.Pool.get_other_config !rpc session_id pool in - if List.mem_assoc key oc && (List.assoc key oc) = "1" then finished := true; - done + let token = ref "" in + let finished = ref false in + let pool = Client.Pool.get_all !rpc session_id |> List.hd in + while not !finished do + let events = Client.Event.from !rpc session_id [ "pool" ] (!token) 10. |> event_from_of_rpc in + token := events.token; + let oc = Client.Pool.get_other_config !rpc session_id pool in + if List.mem_assoc key oc && (List.assoc key oc) = "1" then finished := true; + done let event_from_test session_id = - let test = make_test "Event.from test" 0 in - start test; - let m = Mutex.create () in - let finished = ref false in - let pool = Client.Pool.get_all !rpc session_id |> List.hd in - let key = "event_next_test" in - begin try Client.Pool.remove_from_other_config !rpc session_id pool key with _ -> () end; - let (_: Thread.t) = Thread.create - (fun () -> - wait_for_pool_key test session_id key; - Mutex.execute m (fun () -> finished := true) - ) () in - Thread.delay 1.; - Client.Pool.add_to_other_config !rpc session_id pool key "1"; - Thread.delay 1.; - if not(Mutex.execute m (fun () -> !finished)) - then failed test "failed to see pool.other_config change" - else success test + let test = make_test "Event.from test" 0 in + start test; + let m = Mutex.create () in + let finished = ref false in + let pool = Client.Pool.get_all !rpc session_id |> List.hd in + let key = "event_next_test" in + begin try Client.Pool.remove_from_other_config !rpc session_id pool key with _ -> () end; + let (_: Thread.t) = Thread.create + (fun () -> + wait_for_pool_key test session_id key; + Mutex.execute m (fun () -> finished := true) + ) () in + Thread.delay 1.; + Client.Pool.add_to_other_config !rpc session_id pool key "1"; + Thread.delay 1.; + if not(Mutex.execute m (fun () -> !finished)) + then failed test "failed to see pool.other_config change" + else success test let event_from_parallel_test session_id = - let test = make_test "Event.from parallel test" 0 in - start test; - let pool = Client.Pool.get_all !rpc session_id |> List.hd in - let key = "event_next_test" in - begin try Client.Pool.remove_from_other_config !rpc session_id pool key with _ -> () end; - let ok = ref true in - let (i_should_succeed: Thread.t) = Thread.create - (fun () -> - try - let _ = Client.Event.from !rpc session_id [] "" 10. in - () (* good *) - with e -> - debug test (ExnHelper.string_of_exn e); - ok := false; - ) () in - let (interfering_thread: Thread.t) = Thread.create - (fun () -> - wait_for_pool_key test session_id key - ) () in - Thread.delay 1.; (* wait for both threads to block in Event.from *) - Client.Pool.add_to_other_config !rpc session_id pool key "1"; - Thread.join interfering_thread; - Thread.join i_should_succeed; - if not !ok - then failed test "Event.from got cancelled by mistake" - else success test + let test = make_test "Event.from parallel test" 0 in + start test; + let pool = Client.Pool.get_all !rpc session_id |> List.hd in + let key = "event_next_test" in + begin try Client.Pool.remove_from_other_config !rpc session_id pool key with _ -> () end; + let ok = ref true in + let (i_should_succeed: Thread.t) = Thread.create + (fun () -> + try + let _ = Client.Event.from !rpc session_id [] "" 10. in + () (* good *) + with e -> + debug test (ExnHelper.string_of_exn e); + ok := false; + ) () in + let (interfering_thread: Thread.t) = Thread.create + (fun () -> + wait_for_pool_key test session_id key + ) () in + Thread.delay 1.; (* wait for both threads to block in Event.from *) + Client.Pool.add_to_other_config !rpc session_id pool key "1"; + Thread.join interfering_thread; + Thread.join i_should_succeed; + if not !ok + then failed test "Event.from got cancelled by mistake" + else success test let object_level_event_test session_id = - let test = make_test "Event.from object-level test" 0 in - start test; - let m = Mutex.create () in - let finished = ref false in - let reported_failure = ref false in - (* Let's play with templates *) - let vms = Client.VM.get_all !rpc session_id in - if List.length vms < 2 then failwith "Test needs 2 VMs"; - let vm_a = List.hd vms in - let vm_b = List.hd (List.tl vms) in - debug test (Printf.sprintf "watching %s" (Ref.string_of vm_a)); - debug test (Printf.sprintf "ignoring %s" (Ref.string_of vm_b)); - let key = "object_level_event_next" in - begin try Client.VM.remove_from_other_config !rpc session_id vm_a key with _ -> () end; - begin try Client.VM.remove_from_other_config !rpc session_id vm_b key with _ -> () end; - - let (_: Thread.t) = Thread.create - (fun () -> - let token = ref "" in - while not (Mutex.execute m (fun () -> !finished)) do - let events = Client.Event.from !rpc session_id [ Printf.sprintf "vm/%s" (Ref.string_of vm_a) ] (!token) 10. |> event_from_of_rpc in - List.iter - (fun event -> - if event.reference <> Ref.string_of vm_a then begin - debug test (Printf.sprintf "event on %s which we aren't watching" event.reference); - Mutex.execute m - (fun () -> - reported_failure := true; - failed test (Printf.sprintf "got unexpected event (new token = %s)" !token); - finished := true; - ) - end - ) events.events; - token := events.token; - let oc = Client.VM.get_other_config !rpc session_id vm_a in - if List.mem_assoc key oc && (List.assoc key oc) = "1" - then Mutex.execute m (fun () -> - debug test (Printf.sprintf "got expected event (new token = %s)" !token); - finished := true; - ); - done - ) () in - Thread.delay 1.; - Client.VM.add_to_other_config !rpc session_id vm_b key "1"; - Thread.delay 1.; - Client.VM.remove_from_other_config !rpc session_id vm_b key; - Client.VM.add_to_other_config !rpc session_id vm_a key "1"; - Thread.delay 1.; - Mutex.execute m - (fun () -> - if not (!reported_failure) then begin - if !finished - then success test - else failed test "failed to see object-level event change" - end - ) + let test = make_test "Event.from object-level test" 0 in + start test; + let m = Mutex.create () in + let finished = ref false in + let reported_failure = ref false in + (* Let's play with templates *) + let vms = Client.VM.get_all !rpc session_id in + if List.length vms < 2 then failwith "Test needs 2 VMs"; + let vm_a = List.hd vms in + let vm_b = List.hd (List.tl vms) in + debug test (Printf.sprintf "watching %s" (Ref.string_of vm_a)); + debug test (Printf.sprintf "ignoring %s" (Ref.string_of vm_b)); + let key = "object_level_event_next" in + begin try Client.VM.remove_from_other_config !rpc session_id vm_a key with _ -> () end; + begin try Client.VM.remove_from_other_config !rpc session_id vm_b key with _ -> () end; + + let (_: Thread.t) = Thread.create + (fun () -> + let token = ref "" in + while not (Mutex.execute m (fun () -> !finished)) do + let events = Client.Event.from !rpc session_id [ Printf.sprintf "vm/%s" (Ref.string_of vm_a) ] (!token) 10. |> event_from_of_rpc in + List.iter + (fun event -> + if event.reference <> Ref.string_of vm_a then begin + debug test (Printf.sprintf "event on %s which we aren't watching" event.reference); + Mutex.execute m + (fun () -> + reported_failure := true; + failed test (Printf.sprintf "got unexpected event (new token = %s)" !token); + finished := true; + ) + end + ) events.events; + token := events.token; + let oc = Client.VM.get_other_config !rpc session_id vm_a in + if List.mem_assoc key oc && (List.assoc key oc) = "1" + then Mutex.execute m (fun () -> + debug test (Printf.sprintf "got expected event (new token = %s)" !token); + finished := true; + ); + done + ) () in + Thread.delay 1.; + Client.VM.add_to_other_config !rpc session_id vm_b key "1"; + Thread.delay 1.; + Client.VM.remove_from_other_config !rpc session_id vm_b key; + Client.VM.add_to_other_config !rpc session_id vm_a key "1"; + Thread.delay 1.; + Mutex.execute m + (fun () -> + if not (!reported_failure) then begin + if !finished + then success test + else failed test "failed to see object-level event change" + end + ) let event_message_test session_id = - let test = make_test "Message creation event test" 1 in - start test; - let events = Client.Event.from !rpc session_id [ "message" ] "" 1.0 |> event_from_of_rpc in - let token = events.token in - let pool = List.hd (Client.Pool.get_all !rpc session_id) in - let obj_uuid = Client.Pool.get_uuid !rpc session_id pool in - debug test "Creating message"; - let cls = `Pool in - let message = Client.Message.create ~rpc:!rpc ~session_id ~name:"test" ~priority:1L ~cls - ~obj_uuid ~body:"Hello" in - debug test (Printf.sprintf "Created message: %s" (Ref.string_of message)); - let events = Client.Event.from !rpc session_id [ "message" ] token 1.0 |> event_from_of_rpc - in - debug test (Printf.sprintf "Got some events: %d %s" (List.length events.events) (String.concat "," (List.map (fun ev -> ev.reference) events.events))); - let token = events.token in - if List.exists (fun ev -> ev.reference = (Ref.string_of message) && ev.op = `add) events.events - then success test - else failed test "Failed to receive an event with the message"; - - let test = make_test "Message deletion event test" 1 in - start test; - debug test "Destroying message"; - Client.Message.destroy !rpc session_id message; - let events = Client.Event.from !rpc session_id [ "message" ] token 1.0 |> event_from_of_rpc in - debug test "Got some events"; - if List.exists (fun ev -> ev.reference = (Ref.string_of message) && ev.op = `del) events.events - then success test - else failed test "Failed to receive a delete event"; - - let test = make_test "Message deletion from cache test" 1 in - start test; - let events = Client.Event.from !rpc session_id [ "message" ] "" 1.0 |> event_from_of_rpc in - debug test "Got lots of events"; - if List.exists (fun ev -> ev.reference = (Ref.string_of message) && ev.op <> `del) events.events - then failed test "Got told about a deleted message" - else success test; - - let test = make_test "Multi message test" 1 in - start test; - let message1 = Client.Message.create ~rpc:!rpc ~session_id ~name:"test" ~priority:1L ~cls - ~obj_uuid ~body:"Hello" in - let message2 = Client.Message.create ~rpc:!rpc ~session_id ~name:"test" ~priority:1L ~cls - ~obj_uuid ~body:"Hello" in - let events = Client.Event.from !rpc session_id [ "message" ] token 1.0 |> event_from_of_rpc in - let token = events.token in - let message3 = Client.Message.create ~rpc:!rpc ~session_id ~name:"test" ~priority:1L ~cls - ~obj_uuid ~body:"Hello" in - let events2 = Client.Event.from !rpc session_id [ "message" ] token 1.0 |> event_from_of_rpc in - debug test (Printf.sprintf "message1=%s" (Ref.string_of message1)); - debug test (Printf.sprintf "message2=%s" (Ref.string_of message2)); - debug test (Printf.sprintf "message3=%s" (Ref.string_of message3)); - List.iter (fun ev -> debug test (Printf.sprintf "events1: ev.ref=%s" ev.reference)) events.events; - List.iter (fun ev -> debug test (Printf.sprintf "events2: ev.ref=%s" ev.reference)) events2.events; - let ok1 = - List.exists (fun ev -> ev.reference = (Ref.string_of message1) && ev.op = `add) events.events && - List.exists (fun ev -> ev.reference = (Ref.string_of message2) && ev.op = `add) events.events in - let ok2 = - List.exists (fun ev -> ev.reference = (Ref.string_of message3) && ev.op = `add) events2.events in - let ok3 = - not (List.exists (fun ev -> ev.reference = (Ref.string_of message1) && ev.op = `add) events2.events) && - not (List.exists (fun ev -> ev.reference = (Ref.string_of message2) && ev.op = `add) events2.events) - in - if ok1 && ok2 && ok3 then success test else failed test (Printf.sprintf "ok1=%b ok2=%b ok3=%b" ok1 ok2 ok3); - - let test = make_test "Object messages test" 1 in - start test; - debug test (Printf.sprintf "Finding messages for object: %s" (Client.Pool.get_uuid !rpc session_id pool)); - let messages = Client.Message.get ~rpc:!rpc ~session_id ~cls ~obj_uuid ~since:(Date.never) in - let has_msg m = List.exists (fun (r,_) -> r=m) messages in - let ok = has_msg message1 && has_msg message2 && has_msg message3 in - if ok then success test else failed test "Failed to get messages for object" + let test = make_test "Message creation event test" 1 in + start test; + let events = Client.Event.from !rpc session_id [ "message" ] "" 1.0 |> event_from_of_rpc in + let token = events.token in + let pool = List.hd (Client.Pool.get_all !rpc session_id) in + let obj_uuid = Client.Pool.get_uuid !rpc session_id pool in + debug test "Creating message"; + let cls = `Pool in + let message = Client.Message.create ~rpc:!rpc ~session_id ~name:"test" ~priority:1L ~cls + ~obj_uuid ~body:"Hello" in + debug test (Printf.sprintf "Created message: %s" (Ref.string_of message)); + let events = Client.Event.from !rpc session_id [ "message" ] token 1.0 |> event_from_of_rpc + in + debug test (Printf.sprintf "Got some events: %d %s" (List.length events.events) (String.concat "," (List.map (fun ev -> ev.reference) events.events))); + let token = events.token in + if List.exists (fun ev -> ev.reference = (Ref.string_of message) && ev.op = `add) events.events + then success test + else failed test "Failed to receive an event with the message"; + + let test = make_test "Message deletion event test" 1 in + start test; + debug test "Destroying message"; + Client.Message.destroy !rpc session_id message; + let events = Client.Event.from !rpc session_id [ "message" ] token 1.0 |> event_from_of_rpc in + debug test "Got some events"; + if List.exists (fun ev -> ev.reference = (Ref.string_of message) && ev.op = `del) events.events + then success test + else failed test "Failed to receive a delete event"; + + let test = make_test "Message deletion from cache test" 1 in + start test; + let events = Client.Event.from !rpc session_id [ "message" ] "" 1.0 |> event_from_of_rpc in + debug test "Got lots of events"; + if List.exists (fun ev -> ev.reference = (Ref.string_of message) && ev.op <> `del) events.events + then failed test "Got told about a deleted message" + else success test; + + let test = make_test "Multi message test" 1 in + start test; + let message1 = Client.Message.create ~rpc:!rpc ~session_id ~name:"test" ~priority:1L ~cls + ~obj_uuid ~body:"Hello" in + let message2 = Client.Message.create ~rpc:!rpc ~session_id ~name:"test" ~priority:1L ~cls + ~obj_uuid ~body:"Hello" in + let events = Client.Event.from !rpc session_id [ "message" ] token 1.0 |> event_from_of_rpc in + let token = events.token in + let message3 = Client.Message.create ~rpc:!rpc ~session_id ~name:"test" ~priority:1L ~cls + ~obj_uuid ~body:"Hello" in + let events2 = Client.Event.from !rpc session_id [ "message" ] token 1.0 |> event_from_of_rpc in + debug test (Printf.sprintf "message1=%s" (Ref.string_of message1)); + debug test (Printf.sprintf "message2=%s" (Ref.string_of message2)); + debug test (Printf.sprintf "message3=%s" (Ref.string_of message3)); + List.iter (fun ev -> debug test (Printf.sprintf "events1: ev.ref=%s" ev.reference)) events.events; + List.iter (fun ev -> debug test (Printf.sprintf "events2: ev.ref=%s" ev.reference)) events2.events; + let ok1 = + List.exists (fun ev -> ev.reference = (Ref.string_of message1) && ev.op = `add) events.events && + List.exists (fun ev -> ev.reference = (Ref.string_of message2) && ev.op = `add) events.events in + let ok2 = + List.exists (fun ev -> ev.reference = (Ref.string_of message3) && ev.op = `add) events2.events in + let ok3 = + not (List.exists (fun ev -> ev.reference = (Ref.string_of message1) && ev.op = `add) events2.events) && + not (List.exists (fun ev -> ev.reference = (Ref.string_of message2) && ev.op = `add) events2.events) + in + if ok1 && ok2 && ok3 then success test else failed test (Printf.sprintf "ok1=%b ok2=%b ok3=%b" ok1 ok2 ok3); + + let test = make_test "Object messages test" 1 in + start test; + debug test (Printf.sprintf "Finding messages for object: %s" (Client.Pool.get_uuid !rpc session_id pool)); + let messages = Client.Message.get ~rpc:!rpc ~session_id ~cls ~obj_uuid ~since:(Date.never) in + let has_msg m = List.exists (fun (r,_) -> r=m) messages in + let ok = has_msg message1 && has_msg message2 && has_msg message3 in + if ok then success test else failed test "Failed to get messages for object" let event_inject_test session_id = let test = make_test "Event.inject test" 0 in @@ -281,19 +281,19 @@ let event_inject_test session_id = let pool = List.hd (Client.Pool.get_all !rpc session_id) in let starttime = Unix.gettimeofday () in let (x: Thread.t) = Thread.create - (fun () -> - let _ = Client.Event.from !rpc session_id [ "pool" ] token 5.0 in - () - ) () in + (fun () -> + let _ = Client.Event.from !rpc session_id [ "pool" ] token 5.0 in + () + ) () in ignore(Client.Event.inject ~rpc:!rpc ~session_id ~_class:"pool" ~_ref:(Ref.string_of pool)); Thread.join x; let endtime = Unix.gettimeofday () in - if endtime -. starttime > 4.5 + if endtime -. starttime > 4.5 then failed test "Failed to see injected event" else success test -let all_srs_with_vdi_create session_id = +let all_srs_with_vdi_create session_id = Quicktest_storage.list_srs session_id (* Filter out those which support the vdi_create capability *) |> List.filter (fun sr -> List.mem Quicktest_storage.vdi_create (Quicktest_storage.sm_caps_of_sr session_id sr)) @@ -303,7 +303,7 @@ let all_srs_with_vdi_create session_id = |> List.filter (fun sr -> Client.SR.get_content_type !rpc session_id sr <> "iso") (** Create a small VM with a selection of CDs, empty drives, "iso" Disks etc *) -let setup_export_test_vm session_id = +let setup_export_test_vm session_id = let test = make_test "Setting up test VM" 1 in start test; let t = find_template session_id other in @@ -324,9 +324,9 @@ let setup_export_test_vm session_id = let smallest : int64 option list = List.map (fun sr -> Quicktest_storage.find_smallest_disk_size session_id sr) all_srs in let sr_names = List.map (Quicktest_storage.name_of_sr session_id) all_srs in List.iter (function - | sr, Some size -> debug test (Printf.sprintf "SR %s has minimum disk size: %Ld" sr size) - | sr, None -> debug test (Printf.sprintf "SR %s has no minimum disk size!" sr) - ) (List.combine sr_names smallest); + | sr, Some size -> debug test (Printf.sprintf "SR %s has minimum disk size: %Ld" sr size) + | sr, None -> debug test (Printf.sprintf "SR %s has no minimum disk size!" sr) + ) (List.combine sr_names smallest); let minimum = List.fold_left min (1L ** gib) (List.map (fun x -> Opt.default (1L ** gib) x) smallest) in let sr = match List.filter (fun (_, size) -> size = Some minimum) (List.combine all_srs smallest) with @@ -337,16 +337,16 @@ let setup_export_test_vm session_id = in debug test (Printf.sprintf "Using a disk size of: %Ld on SR: %s" minimum (Quicktest_storage.name_of_sr session_id sr)); let vdi = Client.VDI.create !rpc session_id "small" - "description" sr 4194304L `user false false [] [] [] [] in + "description" sr 4194304L `user false false [] [] [] [] in ignore(Client.VBD.create ~rpc:!rpc ~session_id ~vM:vm ~vDI:cd ~userdevice:"0" ~bootable:false - ~mode:`RO ~_type:`CD ~unpluggable:true ~empty:false ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[]); + ~mode:`RO ~_type:`CD ~unpluggable:true ~empty:false ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[]); ignore(Client.VBD.create ~rpc:!rpc ~session_id ~vM:vm ~vDI:cd ~userdevice:"1" ~bootable:false - ~mode:`RO ~_type:`Disk ~unpluggable:true ~empty:false ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[]); + ~mode:`RO ~_type:`Disk ~unpluggable:true ~empty:false ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[]); ignore(Client.VBD.create ~rpc:!rpc ~session_id ~vM:vm ~vDI:cd ~userdevice:"2" ~bootable:false - ~mode:`RO ~_type:`CD ~unpluggable:true ~empty:true ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[]); + ~mode:`RO ~_type:`CD ~unpluggable:true ~empty:true ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[]); ignore(Client.VBD.create ~rpc:!rpc ~session_id ~vM:vm ~vDI:vdi ~userdevice:"3" ~bootable:false - ~mode:`RW ~_type:`Disk ~unpluggable:true ~empty:false ~other_config:[Xapi_globs.owner_key,""] - ~qos_algorithm_type:"" ~qos_algorithm_params:[]); + ~mode:`RW ~_type:`Disk ~unpluggable:true ~empty:false ~other_config:[Xapi_globs.owner_key,""] + ~qos_algorithm_type:"" ~qos_algorithm_params:[]); success test; vm @@ -355,7 +355,7 @@ let all_non_iso_srs_with_vdi_create session_id = (fun sr -> "iso" <> Client.SR.get_content_type !rpc session_id sr) (all_srs_with_vdi_create session_id) -let import_export_test session_id = +let import_export_test session_id = let test = make_test "VM import/export test" 0 in start test; let vm = setup_export_test_vm session_id in @@ -369,28 +369,28 @@ let import_export_test session_id = debug test (Printf.sprintf "Attempting import to SR: %s" (Quicktest_storage.name_of_sr session_id sr)); let vm' = List.hd (vm_import ~sr test session_id export_filename) in let vbds = Client.VM.get_VBDs !rpc session_id vm' in - + if List.length vbds <> (List.length by_device) then failed test "Wrong number of VBDs after import"; - List.iter (fun vbd -> - let all = Client.VBD.get_record !rpc session_id vbd in - let orig_vbd = List.assoc all.API.vBD_userdevice by_device in - let orig_vbd = Client.VBD.get_record !rpc session_id orig_vbd in - - (* type, empty should match *) - if all.API.vBD_type <> orig_vbd.API.vBD_type - then failed test (Printf.sprintf "Device %s varies in type" all.API.vBD_userdevice); - if all.API.vBD_empty <> orig_vbd.API.vBD_empty - then failed test (Printf.sprintf "Device %s varies in emptiness" all.API.vBD_userdevice); - match all.API.vBD_userdevice with - | "0" | "1" | "2" -> - (* VDI should be the same *) - if all.API.vBD_VDI <> orig_vbd.API.vBD_VDI - then failed test (Printf.sprintf "Device %s varies in VDIness (original = %s; new = %s)" all.API.vBD_userdevice (Client.VDI.get_uuid !rpc session_id orig_vbd.API.vBD_VDI) (Client.VDI.get_uuid !rpc session_id all.API.vBD_VDI)); - | "3" -> - (* VDI should be different *) - if all.API.vBD_VDI = orig_vbd.API.vBD_VDI - then failed test (Printf.sprintf "Device %s should not vary in VDIness" all.API.vBD_userdevice) - | _ -> failed test (Printf.sprintf "Unhandled device number: %s" all.API.vBD_userdevice)) vbds; + List.iter (fun vbd -> + let all = Client.VBD.get_record !rpc session_id vbd in + let orig_vbd = List.assoc all.API.vBD_userdevice by_device in + let orig_vbd = Client.VBD.get_record !rpc session_id orig_vbd in + + (* type, empty should match *) + if all.API.vBD_type <> orig_vbd.API.vBD_type + then failed test (Printf.sprintf "Device %s varies in type" all.API.vBD_userdevice); + if all.API.vBD_empty <> orig_vbd.API.vBD_empty + then failed test (Printf.sprintf "Device %s varies in emptiness" all.API.vBD_userdevice); + match all.API.vBD_userdevice with + | "0" | "1" | "2" -> + (* VDI should be the same *) + if all.API.vBD_VDI <> orig_vbd.API.vBD_VDI + then failed test (Printf.sprintf "Device %s varies in VDIness (original = %s; new = %s)" all.API.vBD_userdevice (Client.VDI.get_uuid !rpc session_id orig_vbd.API.vBD_VDI) (Client.VDI.get_uuid !rpc session_id all.API.vBD_VDI)); + | "3" -> + (* VDI should be different *) + if all.API.vBD_VDI = orig_vbd.API.vBD_VDI + then failed test (Printf.sprintf "Device %s should not vary in VDIness" all.API.vBD_userdevice) + | _ -> failed test (Printf.sprintf "Unhandled device number: %s" all.API.vBD_userdevice)) vbds; vm_uninstall test session_id vm' ) all_srs; vm_uninstall test session_id vm; @@ -398,123 +398,123 @@ let import_export_test session_id = success test (* Expect that two VMs have identical looking VIFs, mapped to the same Networks *) -let compare_vifs session_id test one two = +let compare_vifs session_id test one two = let one_vifs = Client.VM.get_VIFs !rpc session_id one in let two_vifs = Client.VM.get_VIFs !rpc session_id two in if List.length one_vifs <> (List.length two_vifs) then begin - failed test (Printf.sprintf "Original VM had %d VIFs; clone has %d VIFs" - (List.length one_vifs) (List.length two_vifs)); + failed test (Printf.sprintf "Original VM had %d VIFs; clone has %d VIFs" + (List.length one_vifs) (List.length two_vifs)); failwith "powercycle_test" end; let one_vifs = List.filter (fun vif -> Client.VIF.get_currently_attached !rpc session_id vif) one_vifs in let two_vifs = List.filter (fun vif -> Client.VIF.get_currently_attached !rpc session_id vif) two_vifs in if List.length one_vifs <> (List.length two_vifs) then begin - failed test (Printf.sprintf "Original VM had %d currently_attached VIFs; clone has %d currently_attached VIFs" - (List.length one_vifs) (List.length two_vifs)); + failed test (Printf.sprintf "Original VM had %d currently_attached VIFs; clone has %d currently_attached VIFs" + (List.length one_vifs) (List.length two_vifs)); failwith "powercycle_test" end; (* look up two's VIFs by their device name *) let by_device = List.map (fun vif -> Client.VIF.get_device !rpc session_id vif, vif) two_vifs in List.iter (fun vif -> - let dev = Client.VIF.get_device !rpc session_id vif in - if not(List.mem_assoc dev by_device) then begin - failed test (Printf.sprintf "Original VM has attached VIF device %s; clone has no" dev); - failwith "powercycle_test" - end; - let vif' = List.assoc dev by_device in - let one_net = Client.VIF.get_network !rpc session_id vif - and two_net = Client.VIF.get_network !rpc session_id vif' in - if one_net <> two_net then begin - failed test (Printf.sprintf "Original VM has attached VIF device %s plugged into Network %s; clone has Network %s" dev (Client.Network.get_uuid !rpc session_id one_net) (Client.Network.get_uuid !rpc session_id two_net)); - failwith "powercycle_test" - end) one_vifs + let dev = Client.VIF.get_device !rpc session_id vif in + if not(List.mem_assoc dev by_device) then begin + failed test (Printf.sprintf "Original VM has attached VIF device %s; clone has no" dev); + failwith "powercycle_test" + end; + let vif' = List.assoc dev by_device in + let one_net = Client.VIF.get_network !rpc session_id vif + and two_net = Client.VIF.get_network !rpc session_id vif' in + if one_net <> two_net then begin + failed test (Printf.sprintf "Original VM has attached VIF device %s plugged into Network %s; clone has Network %s" dev (Client.Network.get_uuid !rpc session_id one_net) (Client.Network.get_uuid !rpc session_id two_net)); + failwith "powercycle_test" + end) one_vifs (* Expect that two VMs have identical looking VBDs, mapped to the same VDIs *) -let compare_vbds session_id test one two = +let compare_vbds session_id test one two = let one_vbds = Client.VM.get_VBDs !rpc session_id one in let two_vbds = Client.VM.get_VBDs !rpc session_id two in if List.length one_vbds <> (List.length two_vbds) then begin - failed test (Printf.sprintf "Original VM had %d VBDs; clone has %d VBDs" - (List.length one_vbds) (List.length two_vbds)); + failed test (Printf.sprintf "Original VM had %d VBDs; clone has %d VBDs" + (List.length one_vbds) (List.length two_vbds)); failwith "powercycle_test" end; let one_vbds = List.filter (fun vbd -> Client.VBD.get_currently_attached !rpc session_id vbd) one_vbds in let two_vbds = List.filter (fun vbd -> Client.VBD.get_currently_attached !rpc session_id vbd) two_vbds in if List.length one_vbds <> (List.length two_vbds) then begin - failed test (Printf.sprintf "Original VM had %d currently_attached VBDs; clone has %d currently_attached VBDs" - (List.length one_vbds) (List.length two_vbds)); + failed test (Printf.sprintf "Original VM had %d currently_attached VBDs; clone has %d currently_attached VBDs" + (List.length one_vbds) (List.length two_vbds)); failwith "powercycle_test" end; (* look up two's VBDs by their device name *) let by_device = List.map (fun vbd -> Client.VBD.get_userdevice !rpc session_id vbd, vbd) two_vbds in List.iter (fun vbd -> - let dev = Client.VBD.get_userdevice !rpc session_id vbd in - if not(List.mem_assoc dev by_device) then begin - failed test (Printf.sprintf "Original VM has attached VBD device %s; clone has no" dev); - failwith "powercycle_test" - end; - let vbd' = List.assoc dev by_device in - let one_vdi = Client.VBD.get_VDI !rpc session_id vbd - and two_vdi = Client.VBD.get_VDI !rpc session_id vbd' in - if one_vdi <> two_vdi then begin - failed test (Printf.sprintf "Original VM has attached VBD device %s plugged into VDI %s; clone has VDI %s" dev (Client.VDI.get_uuid !rpc session_id one_vdi) (Client.VDI.get_uuid !rpc session_id two_vdi)); - failwith "powercycle_test" - end) one_vbds - -let compare_vms session_id test one two = + let dev = Client.VBD.get_userdevice !rpc session_id vbd in + if not(List.mem_assoc dev by_device) then begin + failed test (Printf.sprintf "Original VM has attached VBD device %s; clone has no" dev); + failwith "powercycle_test" + end; + let vbd' = List.assoc dev by_device in + let one_vdi = Client.VBD.get_VDI !rpc session_id vbd + and two_vdi = Client.VBD.get_VDI !rpc session_id vbd' in + if one_vdi <> two_vdi then begin + failed test (Printf.sprintf "Original VM has attached VBD device %s plugged into VDI %s; clone has VDI %s" dev (Client.VDI.get_uuid !rpc session_id one_vdi) (Client.VDI.get_uuid !rpc session_id two_vdi)); + failwith "powercycle_test" + end) one_vbds + +let compare_vms session_id test one two = let one_r = Client.VM.get_record !rpc session_id one and two_r = Client.VM.get_record !rpc session_id two in - (* check the power-state field *) - if one_r.API.vM_power_state <> two_r.API.vM_power_state then begin - failed test (Printf.sprintf "Original VM powerstate = %s; copy has %s" - (Record_util.power_to_string one_r.API.vM_power_state) - (Record_util.power_to_string two_r.API.vM_power_state)); - failwith "powercycle_test"; - end; - - (* Check one 'normal' field and one 'last_boot_record' field *) - if one_r.API.vM_HVM_shadow_multiplier <> two_r.API.vM_HVM_shadow_multiplier then begin - failed test (Printf.sprintf "Original VM has shadow_multiplier = %f; copy has %f" - one_r.API.vM_HVM_shadow_multiplier two_r.API.vM_HVM_shadow_multiplier); - failwith "powercycle_test" - end; - - if one_r.API.vM_power_state <> `Halted then begin - let one_b = Client.VM.get_boot_record !rpc session_id one - and two_b = Client.VM.get_boot_record !rpc session_id two in - if one_b.API.vM_HVM_shadow_multiplier <> two_b.API.vM_HVM_shadow_multiplier then begin - failed test (Printf.sprintf "Original VM has live shadow_multiplier = %f; copy has %f" - one_b.API.vM_HVM_shadow_multiplier two_b.API.vM_HVM_shadow_multiplier); - failwith "powercycle_test" - end - end; - - (* check snapshot fields *) - if one_r.API.vM_is_a_snapshot <> two_r.API.vM_is_a_snapshot || - one_r.API.vM_is_a_template <> two_r.API.vM_is_a_template || - one_r.API.vM_snapshot_time <> two_r.API.vM_snapshot_time then begin - failed test (Printf.sprintf - "Original VM has snapshot metadata: is-a-snapshot:%b, is-a-template:%b, snapshot-time:%s; copy has is-a-snapshot:%b, is-a-template:%b, snapshot-time:%s" - one_r.API.vM_is_a_snapshot one_r.API.vM_is_a_template (Date.to_string one_r.API.vM_snapshot_time) - two_r.API.vM_is_a_snapshot two_r.API.vM_is_a_template (Date.to_string two_r.API.vM_snapshot_time)); - failwith "powercycle_test"; - end + (* check the power-state field *) + if one_r.API.vM_power_state <> two_r.API.vM_power_state then begin + failed test (Printf.sprintf "Original VM powerstate = %s; copy has %s" + (Record_util.power_to_string one_r.API.vM_power_state) + (Record_util.power_to_string two_r.API.vM_power_state)); + failwith "powercycle_test"; + end; + + (* Check one 'normal' field and one 'last_boot_record' field *) + if one_r.API.vM_HVM_shadow_multiplier <> two_r.API.vM_HVM_shadow_multiplier then begin + failed test (Printf.sprintf "Original VM has shadow_multiplier = %f; copy has %f" + one_r.API.vM_HVM_shadow_multiplier two_r.API.vM_HVM_shadow_multiplier); + failwith "powercycle_test" + end; + + if one_r.API.vM_power_state <> `Halted then begin + let one_b = Client.VM.get_boot_record !rpc session_id one + and two_b = Client.VM.get_boot_record !rpc session_id two in + if one_b.API.vM_HVM_shadow_multiplier <> two_b.API.vM_HVM_shadow_multiplier then begin + failed test (Printf.sprintf "Original VM has live shadow_multiplier = %f; copy has %f" + one_b.API.vM_HVM_shadow_multiplier two_b.API.vM_HVM_shadow_multiplier); + failwith "powercycle_test" + end + end; + + (* check snapshot fields *) + if one_r.API.vM_is_a_snapshot <> two_r.API.vM_is_a_snapshot || + one_r.API.vM_is_a_template <> two_r.API.vM_is_a_template || + one_r.API.vM_snapshot_time <> two_r.API.vM_snapshot_time then begin + failed test (Printf.sprintf + "Original VM has snapshot metadata: is-a-snapshot:%b, is-a-template:%b, snapshot-time:%s; copy has is-a-snapshot:%b, is-a-template:%b, snapshot-time:%s" + one_r.API.vM_is_a_snapshot one_r.API.vM_is_a_template (Date.to_string one_r.API.vM_snapshot_time) + two_r.API.vM_is_a_snapshot two_r.API.vM_is_a_template (Date.to_string two_r.API.vM_snapshot_time)); + failwith "powercycle_test"; + end let compare_snapshots session_id test one two = - let get_snapshots x = Client.VM.get_snapshots !rpc session_id x in - let sort l = - let lt = List.map (fun s -> s, Client.VM.get_snapshot_time !rpc session_id s) l in - let lt_sorted = List.sort (fun (s1, t1) (s2, t2) -> compare t1 t2) lt in - let l_sorted, _ = List.split lt_sorted in - l_sorted in - let one_s = sort (get_snapshots one) in - let two_s = sort (get_snapshots two) in - let compare_all x y = - compare_vifs session_id test x y; - compare_vbds session_id test x y; - compare_vms session_id test x y in - List.iter2 compare_all one_s two_s + let get_snapshots x = Client.VM.get_snapshots !rpc session_id x in + let sort l = + let lt = List.map (fun s -> s, Client.VM.get_snapshot_time !rpc session_id s) l in + let lt_sorted = List.sort (fun (s1, t1) (s2, t2) -> compare t1 t2) lt in + let l_sorted, _ = List.split lt_sorted in + l_sorted in + let one_s = sort (get_snapshots one) in + let two_s = sort (get_snapshots two) in + let compare_all x y = + compare_vifs session_id test x y; + compare_vbds session_id test x y; + compare_vms session_id test x y in + List.iter2 compare_all one_s two_s let read_sys path = Xstringext.String.strip Xstringext.String.isspace (Unixext.string_of_file path) @@ -527,7 +527,7 @@ let verify_network_connectivity session_id test vm = let device = Printf.sprintf "vif%Ld.%s" (Client.VM.get_domid !rpc session_id vm) (Client.VIF.get_device !rpc session_id vif) in let devices = Netdev.network.Netdev.intf_list bridge in let other_config = Client.VIF.get_other_config !rpc session_id vif in - if not(List.mem device devices) + if not(List.mem device devices) then failed test (Printf.sprintf "Failed to find device %s on bridge %s (found [ %s ])" device bridge (String.concat ", " devices)) else debug test (Printf.sprintf "Device %s is on bridge %s" device bridge); @@ -537,7 +537,7 @@ let verify_network_connectivity session_id test vm = then begin let promisc = List.mem_assoc "promiscuous" other_config && (let x = List.assoc "promiscuous" other_config in x = "true" || x = "on") in let promisc' = read_sys sysfs_promisc = "1" in - if promisc <> promisc' + if promisc <> promisc' then failed test (Printf.sprintf "VIF.other_config says promiscuous mode is %b while dom0 /sys says %b" promisc promisc') else debug test (Printf.sprintf "VIF.other_config and dom0 /sys agree that promiscuous mode is %b" promisc); end else @@ -548,12 +548,12 @@ let verify_network_connectivity session_id test vm = let mtu' = if List.mem_assoc "mtu" other_config then Int64.of_string(List.assoc "mtu" other_config) else mtu in let mtu'' = Int64.of_string (read_sys (Printf.sprintf "/sys/class/net/%s/mtu" device)) in - if mtu' <> mtu'' + if mtu' <> mtu'' then failed test (Printf.sprintf "VIF.MTU is %Ld but /sys says %Ld" mtu' mtu'') else debug test (Printf.sprintf "VIF.MTU is %Ld and /sys says %Ld" mtu' mtu''); ) vifs -let rec wait_for_task_complete session_id task = +let rec wait_for_task_complete session_id task = Thread.delay 1.; match Client.Task.get_status !rpc session_id task with | `pending | `cancelling -> wait_for_task_complete session_id task @@ -561,149 +561,149 @@ let rec wait_for_task_complete session_id task = (* CP-831 *) let test_vhd_locking_hook session_id vm = - let test = make_test "test vhd locking hook" 2 in - start test; - Client.VM.start !rpc session_id vm false false; - (* Add a new VDI whose VBD is unplugged (so 2 plugged, 1 unplugged *) - - let all_srs = all_srs_with_vdi_create session_id in - let sr = List.hd all_srs in - - let new_vdi = Client.VDI.create !rpc session_id "lvhd_testvdi" - "description" sr 4194304L `user false false [] [] [] [] in - let new_vbd = Client.VBD.create ~rpc:!rpc ~session_id ~vM:vm ~vDI:new_vdi ~userdevice:"9" ~bootable:false - ~mode:`RW ~_type:`Disk ~unpluggable:true ~empty:false ~other_config:[Xapi_globs.owner_key,""] - ~qos_algorithm_type:"" ~qos_algorithm_params:[] in - - (* In a background thread plug/unplug the new VBD to cause some transient locking failures *) - let start = Unix.gettimeofday () in - debug test "Starting up conflicting thread in the background"; - let total_bg_ops = ref 0 in - let t = Thread.create - (fun () -> - while Unix.gettimeofday () -. start < 30. do - (* We throw away exceptions because unplugs can fail (if the guest isn't ready) and this causes the - next plug to fail. We use asynchronous operations because we are sharing a single HTTP connection to the - master and we genuinely want the operations to (attempt to) execute in parallel *) - let task = Client.Async.VBD.plug !rpc session_id new_vbd in - incr total_bg_ops; - wait_for_task_complete session_id task; - let task = Client.Async.VBD.unplug !rpc session_id new_vbd in - incr total_bg_ops; - wait_for_task_complete session_id task - done) () in - (* Give the background thread a chance to start *) - Thread.delay 1.5; - (* Verify that the function 'test' can be called in the script *) - - Thread.join t; - debug test (Printf.sprintf "Meanwhile background thread executed %d conflicting operations" !total_bg_ops); - success test - -let powercycle_test session_id vm = - let test = make_test "Powercycling VM" 1 in - start test; - (* avoid the race whereby reboot requests are ignored if too early *) - let delay () = - debug test "Pausing for 10s"; - Thread.delay 10. in - debug test (Printf.sprintf "Trying to enable VM.clone for suspended VMs pool-wide"); - let pool = get_pool session_id in - let enabled_csvm = - try Client.Pool.add_to_other_config !rpc session_id pool "allow_clone_suspended_vm" "true"; true - with _ -> false in - finally - (fun () -> - (* We play with three VMs: - 1. a clean install of a VM (vm) - 2. a suspended clone of (1) (vm') - 3. a metadata import of the metadata export of (2) (vm'') - *) - debug test "Starting VM"; - Client.VM.start !rpc session_id vm false false; - (* Check that all VBDs are plugged in correctly *) - List.iter - (fun vbd -> - let currently_attached = Client.VBD.get_currently_attached !rpc session_id vbd in - if not currently_attached then failwith "after VM.start not currently_attached"; - ) (Client.VM.get_VBDs !rpc session_id vm); - delay (); - debug test "Rebooting VM"; - Client.VM.clean_reboot !rpc session_id vm; - delay (); - debug test "Shutting down VM"; - Client.VM.clean_shutdown !rpc session_id vm; - debug test "Starting VM again"; - Client.VM.start !rpc session_id vm false false; - verify_network_connectivity session_id test vm; - delay (); - debug test "Setting shadow-multiplier live to 10."; - Client.VM.set_shadow_multiplier_live !rpc session_id vm 10.; - delay (); - debug test "Suspending VM"; - Client.VM.suspend !rpc session_id vm; - debug test "Cloning suspended VM"; - let vm' = Client.VM.clone !rpc session_id vm "clone-suspended-test" in - debug test "Snapshoting the VM twice"; - ignore(Client.VM.snapshot !rpc session_id vm' "snap1"); - ignore(Client.VM.snapshot !rpc session_id vm' "snap2"); - - debug test "Comparing original, clone VIF configuration"; - compare_vifs session_id test vm vm'; - debug test "Comparing original, clone VM configuration"; - compare_vms session_id test vm vm'; - - debug test "Importing metadata export of cloned suspended VM"; - Unixext.unlink_safe export_filename; - vm_export ~metadata_only:true test session_id vm' export_filename; - let vms = vm_import ~metadata_only:true test session_id export_filename in - let vm'' = List.find (fun vm -> Client.VM.get_name_label !rpc session_id vm = "clone-suspended-test") vms in - debug test "Comparing clone, import VIF configuration"; - compare_vifs session_id test vm' vm''; - debug test "Comparing clone, import VBD configuration"; - compare_vbds session_id test vm' vm''; - debug test "Comparing clone, import VM configuration"; - compare_vms session_id test vm' vm''; - debug test "Comparing clone, import snapshot configuration"; - compare_snapshots session_id test vm' vm''; - debug test "Comparing original, import VIF configuration"; - compare_vifs session_id test vm vm''; - debug test "Comparing original, import VM configuration"; - compare_vms session_id test vm vm''; - - debug test "Resuming original VM"; - Client.VM.resume !rpc session_id vm false false; - verify_network_connectivity session_id test vm; - let host = Client.VM.get_resident_on !rpc session_id vm in - debug test "Performing localhost migrate of original VM"; - Client.VM.pool_migrate !rpc session_id vm host []; - verify_network_connectivity session_id test vm; - debug test "Shutting down original VM"; - Client.VM.clean_shutdown !rpc session_id vm; - debug test "Resuming imported VM"; - Client.VM.resume !rpc session_id vm'' false false; - verify_network_connectivity session_id test vm''; - debug test "Shutting down imported VMs"; - List.iter (fun vm -> if Client.VM.get_power_state !rpc session_id vm <> `Halted then Client.VM.hard_shutdown !rpc session_id vm) vms; - - (* Keep the imported VM and chuck away the clone *) - (* NB cannot do this earlier because the suspend VDI would be destroyed - and prevent the other VM being resumed *) - Client.VM.hard_shutdown !rpc session_id vm'; - vm_uninstall test session_id vm'; - - debug test "Uninstalling imported VMs"; - List.iter (vm_uninstall test session_id) vms; - success test; - ) (fun () -> - if enabled_csvm then begin - debug test (Printf.sprintf "Disabling VM.clone for suspended VMs pool-wide"); - Client.Pool.remove_from_other_config !rpc session_id pool "allow_clone_suspended_vm" - end) + let test = make_test "test vhd locking hook" 2 in + start test; + Client.VM.start !rpc session_id vm false false; + (* Add a new VDI whose VBD is unplugged (so 2 plugged, 1 unplugged *) + + let all_srs = all_srs_with_vdi_create session_id in + let sr = List.hd all_srs in + + let new_vdi = Client.VDI.create !rpc session_id "lvhd_testvdi" + "description" sr 4194304L `user false false [] [] [] [] in + let new_vbd = Client.VBD.create ~rpc:!rpc ~session_id ~vM:vm ~vDI:new_vdi ~userdevice:"9" ~bootable:false + ~mode:`RW ~_type:`Disk ~unpluggable:true ~empty:false ~other_config:[Xapi_globs.owner_key,""] + ~qos_algorithm_type:"" ~qos_algorithm_params:[] in + + (* In a background thread plug/unplug the new VBD to cause some transient locking failures *) + let start = Unix.gettimeofday () in + debug test "Starting up conflicting thread in the background"; + let total_bg_ops = ref 0 in + let t = Thread.create + (fun () -> + while Unix.gettimeofday () -. start < 30. do + (* We throw away exceptions because unplugs can fail (if the guest isn't ready) and this causes the + next plug to fail. We use asynchronous operations because we are sharing a single HTTP connection to the + master and we genuinely want the operations to (attempt to) execute in parallel *) + let task = Client.Async.VBD.plug !rpc session_id new_vbd in + incr total_bg_ops; + wait_for_task_complete session_id task; + let task = Client.Async.VBD.unplug !rpc session_id new_vbd in + incr total_bg_ops; + wait_for_task_complete session_id task + done) () in + (* Give the background thread a chance to start *) + Thread.delay 1.5; + (* Verify that the function 'test' can be called in the script *) + + Thread.join t; + debug test (Printf.sprintf "Meanwhile background thread executed %d conflicting operations" !total_bg_ops); + success test + +let powercycle_test session_id vm = + let test = make_test "Powercycling VM" 1 in + start test; + (* avoid the race whereby reboot requests are ignored if too early *) + let delay () = + debug test "Pausing for 10s"; + Thread.delay 10. in + debug test (Printf.sprintf "Trying to enable VM.clone for suspended VMs pool-wide"); + let pool = get_pool session_id in + let enabled_csvm = + try Client.Pool.add_to_other_config !rpc session_id pool "allow_clone_suspended_vm" "true"; true + with _ -> false in + finally + (fun () -> + (* We play with three VMs: + 1. a clean install of a VM (vm) + 2. a suspended clone of (1) (vm') + 3. a metadata import of the metadata export of (2) (vm'') + *) + debug test "Starting VM"; + Client.VM.start !rpc session_id vm false false; + (* Check that all VBDs are plugged in correctly *) + List.iter + (fun vbd -> + let currently_attached = Client.VBD.get_currently_attached !rpc session_id vbd in + if not currently_attached then failwith "after VM.start not currently_attached"; + ) (Client.VM.get_VBDs !rpc session_id vm); + delay (); + debug test "Rebooting VM"; + Client.VM.clean_reboot !rpc session_id vm; + delay (); + debug test "Shutting down VM"; + Client.VM.clean_shutdown !rpc session_id vm; + debug test "Starting VM again"; + Client.VM.start !rpc session_id vm false false; + verify_network_connectivity session_id test vm; + delay (); + debug test "Setting shadow-multiplier live to 10."; + Client.VM.set_shadow_multiplier_live !rpc session_id vm 10.; + delay (); + debug test "Suspending VM"; + Client.VM.suspend !rpc session_id vm; + debug test "Cloning suspended VM"; + let vm' = Client.VM.clone !rpc session_id vm "clone-suspended-test" in + debug test "Snapshoting the VM twice"; + ignore(Client.VM.snapshot !rpc session_id vm' "snap1"); + ignore(Client.VM.snapshot !rpc session_id vm' "snap2"); + + debug test "Comparing original, clone VIF configuration"; + compare_vifs session_id test vm vm'; + debug test "Comparing original, clone VM configuration"; + compare_vms session_id test vm vm'; + + debug test "Importing metadata export of cloned suspended VM"; + Unixext.unlink_safe export_filename; + vm_export ~metadata_only:true test session_id vm' export_filename; + let vms = vm_import ~metadata_only:true test session_id export_filename in + let vm'' = List.find (fun vm -> Client.VM.get_name_label !rpc session_id vm = "clone-suspended-test") vms in + debug test "Comparing clone, import VIF configuration"; + compare_vifs session_id test vm' vm''; + debug test "Comparing clone, import VBD configuration"; + compare_vbds session_id test vm' vm''; + debug test "Comparing clone, import VM configuration"; + compare_vms session_id test vm' vm''; + debug test "Comparing clone, import snapshot configuration"; + compare_snapshots session_id test vm' vm''; + debug test "Comparing original, import VIF configuration"; + compare_vifs session_id test vm vm''; + debug test "Comparing original, import VM configuration"; + compare_vms session_id test vm vm''; + + debug test "Resuming original VM"; + Client.VM.resume !rpc session_id vm false false; + verify_network_connectivity session_id test vm; + let host = Client.VM.get_resident_on !rpc session_id vm in + debug test "Performing localhost migrate of original VM"; + Client.VM.pool_migrate !rpc session_id vm host []; + verify_network_connectivity session_id test vm; + debug test "Shutting down original VM"; + Client.VM.clean_shutdown !rpc session_id vm; + debug test "Resuming imported VM"; + Client.VM.resume !rpc session_id vm'' false false; + verify_network_connectivity session_id test vm''; + debug test "Shutting down imported VMs"; + List.iter (fun vm -> if Client.VM.get_power_state !rpc session_id vm <> `Halted then Client.VM.hard_shutdown !rpc session_id vm) vms; + + (* Keep the imported VM and chuck away the clone *) + (* NB cannot do this earlier because the suspend VDI would be destroyed + and prevent the other VM being resumed *) + Client.VM.hard_shutdown !rpc session_id vm'; + vm_uninstall test session_id vm'; + + debug test "Uninstalling imported VMs"; + List.iter (vm_uninstall test session_id) vms; + success test; + ) (fun () -> + if enabled_csvm then begin + debug test (Printf.sprintf "Disabling VM.clone for suspended VMs pool-wide"); + Client.Pool.remove_from_other_config !rpc session_id pool "allow_clone_suspended_vm" + end) (* Make a VDI, find a host to put it on, create a VBD to dom0 on that host, * Attach, Unattach, destroy VBD, destroy VDI *) - + let vdi_test session_id = let test = make_test "VDI.create/copy/destroy test" 0 in start test; @@ -713,7 +713,7 @@ let vdi_test session_id = let sr = List.hd all_srs in let t = Unix.gettimeofday () in let newvdi = Client.VDI.create !rpc session_id "testvdi" - "description" sr 4194304L `user false false [] [] [] [] in + "description" sr 4194304L `user false false [] [] [] [] in let createtime = Unix.gettimeofday () -. t in debug test (Printf.sprintf "Time to create: %f%!" createtime); let pbd = List.hd (Client.SR.get_PBDs !rpc session_id sr) in @@ -722,7 +722,7 @@ let vdi_test session_id = let device = List.hd (Client.VM.get_allowed_VBD_devices !rpc session_id dom0) in debug test (Printf.sprintf "Creating a VBD connecting the VDI to localhost%!"); let vbd = Client.VBD.create ~rpc:!rpc ~session_id ~vM:dom0 ~vDI:newvdi ~userdevice:device ~bootable:false - ~mode:`RW ~_type:`Disk ~unpluggable:true ~empty:false ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[] in + ~mode:`RW ~_type:`Disk ~unpluggable:true ~empty:false ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[] in let t = Unix.gettimeofday () in debug test (Printf.sprintf "Attempting to copy the VDI%!"); let newvdi2 = Client.VDI.copy !rpc session_id newvdi sr Ref.null Ref.null in @@ -734,7 +734,7 @@ let vdi_test session_id = debug test (Printf.sprintf "Destroying copied VDI%!"); Client.VDI.destroy !rpc session_id newvdi2; success test - + (* Test a couple of async calls - VDIs are good for this, again! *) let async_test session_id = let test = make_test "Async.VDI.copy" 0 in @@ -742,28 +742,28 @@ let async_test session_id = let all_srs = all_srs_with_vdi_create session_id in let sr = List.hd all_srs in let newvdi = Client.VDI.create !rpc session_id "testvdi" - "description" sr 4194304L `user false false [] [] [] [] in + "description" sr 4194304L `user false false [] [] [] [] in let pbd = List.hd (Client.SR.get_PBDs !rpc session_id sr) in let host = Client.PBD.get_host !rpc session_id pbd in let dom0 = dom0_of_host session_id host in let device = List.hd (Client.VM.get_allowed_VBD_devices !rpc session_id dom0) in let vbd = Client.VBD.create ~rpc:!rpc ~session_id ~vM:dom0 ~vDI:newvdi ~userdevice:device ~bootable:false - ~mode:`RW ~_type:`Disk ~unpluggable:true ~empty:false ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[] in + ~mode:`RW ~_type:`Disk ~unpluggable:true ~empty:false ~other_config:[] ~qos_algorithm_type:"" ~qos_algorithm_params:[] in let vdis = Client.VDI.get_all !rpc session_id in let task = Client.Async.VDI.copy !rpc session_id newvdi sr Ref.null Ref.null in wait_for_task_complete session_id task; debug test (Printf.sprintf "Task completed!%!"); let status = Client.Task.get_status !rpc session_id task in debug test (Printf.sprintf "Status: %s result: %s%!" - (match status with - | `pending -> "pending" - | `success -> "success" - | `failure -> "failure" - | `cancelling -> "cancelling" - | `cancelled -> "cancelled") - (Client.Task.get_result !rpc session_id task)); - if status=`failure then - begin + (match status with + | `pending -> "pending" + | `success -> "success" + | `failure -> "failure" + | `cancelling -> "cancelling" + | `cancelled -> "cancelled") + (Client.Task.get_result !rpc session_id task)); + if status=`failure then + begin failed test (Printf.sprintf "Failure of VDI copy! error_info: %s%!" (String.concat "," (Client.Task.get_error_info !rpc session_id task))); failwith "Async VDI copy failed" end; @@ -771,31 +771,31 @@ let async_test session_id = let newvdis = List.filter (fun vdi -> try Client.VDI.get_SR !rpc session_id vdi = sr with _ -> false) newvdis in let newvdis2 = List.filter (fun vdi -> not (List.mem vdi vdis)) newvdis in match newvdis2 with - | [newvdi2] -> - debug test (Printf.sprintf "New vdi: %s%!" (Ref.string_of newvdi2)); - Client.VBD.destroy !rpc session_id vbd; - Client.VDI.destroy !rpc session_id newvdi; - Client.VDI.destroy !rpc session_id newvdi2; - success test - | _ -> failwith "Expecting 1 new disk!" - -let make_vif ~session_id ~vM ~network ~device = - Client.VIF.create ~rpc:!rpc ~session_id ~vM ~network ~mTU:0L ~mAC:"" ~device ~other_config:["promiscuous", "on"; "mtu", "1400"] ~qos_algorithm_type:"" ~qos_algorithm_params:[] - -let with_vm s f = + | [newvdi2] -> + debug test (Printf.sprintf "New vdi: %s%!" (Ref.string_of newvdi2)); + Client.VBD.destroy !rpc session_id vbd; + Client.VDI.destroy !rpc session_id newvdi; + Client.VDI.destroy !rpc session_id newvdi2; + success test + | _ -> failwith "Expecting 1 new disk!" + +let make_vif ~session_id ~vM ~network ~device = + Client.VIF.create ~rpc:!rpc ~session_id ~vM ~network ~mTU:0L ~mAC:"" ~device ~other_config:["promiscuous", "on"; "mtu", "1400"] ~qos_algorithm_type:"" ~qos_algorithm_params:[] + +let with_vm s f = try let (_: API.ref_VM) = find_template s vm_template in let test = make_test "Setting up test VM" 0 in start test; let vm = install_vm test s in - f s vm; - vm_uninstall test s vm; - success test + f s vm; + vm_uninstall test s vm; + success test with Unable_to_find_suitable_vm_template -> (* SKIP *) () -let vm_powercycle_test s vm = +let vm_powercycle_test s vm = let test = make_test "VM powercycle test" 1 in start test; (* Try to add some VIFs *) @@ -803,12 +803,12 @@ let vm_powercycle_test s vm = debug test (Printf.sprintf "Adding VIF to guest installer network (%s)" (Client.Network.get_uuid !rpc s guest_installer_network)); let (_: API.ref_VIF) = make_vif ~session_id:s ~vM:vm ~network:guest_installer_network ~device:"0" ~locking_mode:`network_default ~ipv4_allowed:[] ~ipv6_allowed:[] in begin match Client.PIF.get_all !rpc s with - | pif :: _ -> - let net = Client.PIF.get_network !rpc s pif in - debug test (Printf.sprintf "Adding VIF to physical network (%s)" (Client.Network.get_uuid !rpc s net)); - let (_: API.ref_VIF) = make_vif ~session_id:s ~vM:vm ~network:net ~device:"1" ~locking_mode:`network_default ~ipv4_allowed:[] ~ipv6_allowed:[] in - () - | _ -> () + | pif :: _ -> + let net = Client.PIF.get_network !rpc s pif in + debug test (Printf.sprintf "Adding VIF to physical network (%s)" (Client.Network.get_uuid !rpc s net)); + let (_: API.ref_VIF) = make_vif ~session_id:s ~vM:vm ~network:net ~device:"1" ~locking_mode:`network_default ~ipv4_allowed:[] ~ipv6_allowed:[] in + () + | _ -> () end; powercycle_test s vm; success test @@ -816,72 +816,72 @@ let vm_powercycle_test s vm = let _ = - let all_tests = [ - "storage"; - "vm-placement"; - "vm-memory-constraints"; - "encodings"; - "http"; - "event"; - "vdi"; - "async"; - "import"; - "powercycle"; - "lifecycle"; - "vhd"; - "copy"; - ] in - let default_tests = List.filter (fun x -> not(List.mem x [ "lifecycle"; "vhd" ])) all_tests in - - let tests_to_run = ref default_tests in (* default is everything *) - Arg.parse [ - "-xe-path", Arg.String (fun x -> Quicktest_common.xe_path := x), "Path to xe command line executable"; - "-iso-sr-path", Arg.String (fun x -> Quicktest_storage.iso_path := x), "Path to ISO SR"; - "-single", Arg.String (fun x -> tests_to_run := [ x ]), Printf.sprintf "Only run one test (possibilities are %s)" (String.concat ", " all_tests) ; - "-all", Arg.Unit (fun () -> tests_to_run := all_tests), Printf.sprintf "Run all tests (%s)" (String.concat ", " all_tests); - "-nocolour", Arg.Clear Quicktest_common.use_colour, "Don't use colour in the output" ] - (fun x -> match !host, !username, !password with - | "", _, _ -> host := x; rpc := rpc_remote; using_unix_domain_socket := false; - | _, "", _ -> username := x - | _, _, "" -> password := x - | _, _, _ -> Printf.fprintf stderr "Skipping unrecognised argument: %s" x) - "Perform some quick functional tests. The default is to test localhost over a Unix socket. For remote server supply and arguments."; - if !host = "" then host := "localhost"; - if !username = "" then username := "root"; - - let maybe_run_test name f = - assert (List.mem name all_tests); - if List.mem name !tests_to_run then f () in - - Stunnel.set_good_ciphersuites "!EXPORT:RSA+AES128-SHA256"; - let s = init_session !username !password in - let all_srs = all_srs_with_vdi_create s in - let sr = List.hd all_srs in - finally - (fun () -> - (try - maybe_run_test "encodings" Quicktest_encodings.run_from_within_quicktest; - maybe_run_test "vm-memory-constraints" Quicktest_vm_memory_constraints.run_from_within_quicktest; - maybe_run_test "vm-placement" Quicktest_vm_placement.run_from_within_quicktest; - maybe_run_test "storage" (fun () -> Quicktest_storage.go s); - if not !using_unix_domain_socket then maybe_run_test "http" Quicktest_http.run_from_within_quicktest; - maybe_run_test "event" event_next_unblocking_test; - maybe_run_test "event" (fun () -> event_next_test s); - maybe_run_test "event" (fun () -> event_from_test s); - maybe_run_test "event" (fun () -> event_from_parallel_test s); -(* maybe_run_test "event" (fun () -> object_level_event_test s);*) - maybe_run_test "event" (fun () -> event_message_test s); - maybe_run_test "event" (fun () -> event_inject_test s); - maybe_run_test "vdi" (fun () -> vdi_test s); - maybe_run_test "async" (fun () -> async_test s); - maybe_run_test "import" (fun () -> import_export_test s); - maybe_run_test "vhd" (fun () -> with_vm s test_vhd_locking_hook); - maybe_run_test "powercycle" (fun () -> with_vm s vm_powercycle_test); - maybe_run_test "lifecycle" (fun () -> with_vm s Quicktest_lifecycle.test); - maybe_run_test "copy" (fun () -> Quicktest_vdi_copy.start s sr); - with - | Api_errors.Server_error (a,b) -> - output_string stderr (Printf.sprintf "%s: %s" a (String.concat "," b)); - | e -> - output_string stderr (Printexc.to_string e)) - ) (fun () -> summarise ()) + let all_tests = [ + "storage"; + "vm-placement"; + "vm-memory-constraints"; + "encodings"; + "http"; + "event"; + "vdi"; + "async"; + "import"; + "powercycle"; + "lifecycle"; + "vhd"; + "copy"; + ] in + let default_tests = List.filter (fun x -> not(List.mem x [ "lifecycle"; "vhd" ])) all_tests in + + let tests_to_run = ref default_tests in (* default is everything *) + Arg.parse [ + "-xe-path", Arg.String (fun x -> Quicktest_common.xe_path := x), "Path to xe command line executable"; + "-iso-sr-path", Arg.String (fun x -> Quicktest_storage.iso_path := x), "Path to ISO SR"; + "-single", Arg.String (fun x -> tests_to_run := [ x ]), Printf.sprintf "Only run one test (possibilities are %s)" (String.concat ", " all_tests) ; + "-all", Arg.Unit (fun () -> tests_to_run := all_tests), Printf.sprintf "Run all tests (%s)" (String.concat ", " all_tests); + "-nocolour", Arg.Clear Quicktest_common.use_colour, "Don't use colour in the output" ] + (fun x -> match !host, !username, !password with + | "", _, _ -> host := x; rpc := rpc_remote; using_unix_domain_socket := false; + | _, "", _ -> username := x + | _, _, "" -> password := x + | _, _, _ -> Printf.fprintf stderr "Skipping unrecognised argument: %s" x) + "Perform some quick functional tests. The default is to test localhost over a Unix socket. For remote server supply and arguments."; + if !host = "" then host := "localhost"; + if !username = "" then username := "root"; + + let maybe_run_test name f = + assert (List.mem name all_tests); + if List.mem name !tests_to_run then f () in + + Stunnel.set_good_ciphersuites "!EXPORT:RSA+AES128-SHA256"; + let s = init_session !username !password in + let all_srs = all_srs_with_vdi_create s in + let sr = List.hd all_srs in + finally + (fun () -> + (try + maybe_run_test "encodings" Quicktest_encodings.run_from_within_quicktest; + maybe_run_test "vm-memory-constraints" Quicktest_vm_memory_constraints.run_from_within_quicktest; + maybe_run_test "vm-placement" Quicktest_vm_placement.run_from_within_quicktest; + maybe_run_test "storage" (fun () -> Quicktest_storage.go s); + if not !using_unix_domain_socket then maybe_run_test "http" Quicktest_http.run_from_within_quicktest; + maybe_run_test "event" event_next_unblocking_test; + maybe_run_test "event" (fun () -> event_next_test s); + maybe_run_test "event" (fun () -> event_from_test s); + maybe_run_test "event" (fun () -> event_from_parallel_test s); + (* maybe_run_test "event" (fun () -> object_level_event_test s);*) + maybe_run_test "event" (fun () -> event_message_test s); + maybe_run_test "event" (fun () -> event_inject_test s); + maybe_run_test "vdi" (fun () -> vdi_test s); + maybe_run_test "async" (fun () -> async_test s); + maybe_run_test "import" (fun () -> import_export_test s); + maybe_run_test "vhd" (fun () -> with_vm s test_vhd_locking_hook); + maybe_run_test "powercycle" (fun () -> with_vm s vm_powercycle_test); + maybe_run_test "lifecycle" (fun () -> with_vm s Quicktest_lifecycle.test); + maybe_run_test "copy" (fun () -> Quicktest_vdi_copy.start s sr); + with + | Api_errors.Server_error (a,b) -> + output_string stderr (Printf.sprintf "%s: %s" a (String.concat "," b)); + | e -> + output_string stderr (Printexc.to_string e)) + ) (fun () -> summarise ()) diff --git a/ocaml/xapi/quicktest_common.ml b/ocaml/xapi/quicktest_common.ml index 6b34c50c946..37807879063 100644 --- a/ocaml/xapi/quicktest_common.ml +++ b/ocaml/xapi/quicktest_common.ml @@ -30,7 +30,7 @@ let total_passed = ref 0 type status = Pending | Success | Failed type vt100 = Control of string | Data of string -let length_of_vt100 sequence = +let length_of_vt100 sequence = let length = function | Control _ -> 0 | Data x -> String.length x in List.fold_left (+) 0 (List.map length sequence) @@ -38,7 +38,7 @@ let flatten_vt100 sequence = List.fold_left (^) "" (List.map (function Control x let escape = String.make 1 (char_of_int 0x1b) let set_attribute attrs = Control(Printf.sprintf "%s[%sm" escape (String.concat ";" (List.map string_of_int attrs))) -let reset = 0 +let reset = 0 let bright = 1 let dim = 2 let red = 31 @@ -52,17 +52,17 @@ let basic_string_of_status = function let coloured_string_of_status = function | Pending -> [ Data " " ] | Success -> - [ Data "[ "; - set_attribute [ bright; green ]; - Data "Success"; - set_attribute [ reset ]; - Data " ]" ] + [ Data "[ "; + set_attribute [ bright; green ]; + Data "Success"; + set_attribute [ reset ]; + Data " ]" ] | Failed -> - [ Data "[ "; - set_attribute [ bright; red ]; - Data "Failed "; - set_attribute [ reset ]; - Data " ]" ] + [ Data "[ "; + set_attribute [ bright; red ]; + Data "Failed "; + set_attribute [ reset ]; + Data " ]" ] let xe_path = ref "/opt/xensource/bin/xe" let use_colour = ref true @@ -94,7 +94,7 @@ let all_tests = Hashtbl.create 10 let make_test name indent = { name = name; indent = indent; status = Pending } -let rec failed (test: test_description) msg = +let rec failed (test: test_description) msg = if not (Hashtbl.mem all_tests test.name) then failwith (Printf.sprintf "Test not started: %s" test.name); if Hashtbl.mem all_tests test.name then Hashtbl.remove all_tests test.name; @@ -102,8 +102,8 @@ let rec failed (test: test_description) msg = debug test msg; Printf.printf "%s\n" (nice_status_output test.indent "" Failed); flush stdout - -and start (test: test_description) = + +and start (test: test_description) = incr total_started; Hashtbl.add all_tests test.name test; Printf.printf "%s\n" (nice_status_output test.indent test.name Pending); @@ -112,7 +112,7 @@ and start (test: test_description) = and debug (test: test_description) msg = (* Might need to divide into multiple lines *) let max_length = cols - length_of_vt100 (coloured_string_of_status test.status) - test.indent - 1 in - let rec loop start_offset = + let rec loop start_offset = if start_offset < String.length msg then begin let length = min (String.length msg - start_offset) max_length in let submsg = String.sub msg start_offset length in @@ -122,7 +122,7 @@ and debug (test: test_description) msg = loop 0; flush stdout -let success (test: test_description) = +let success (test: test_description) = if not (Hashtbl.mem all_tests test.name) then failwith (Printf.sprintf "Test not started: %s" test.name); @@ -134,19 +134,19 @@ let success (test: test_description) = Printf.printf "%s\n" (nice_status_output test.indent "" test.status); flush stdout -let summarise () = +let summarise () = Printf.printf "\n\nTotal tests started: %d; total passed: %d (pass rate %.2f%c)\n" !total_started !total_passed (float_of_int !total_passed /. (float_of_int !total_started) *. 100.) '%'; flush stdout; Hashtbl.iter (fun name test -> Printf.printf "Test neither succeeded nor failed: %s\n" name) all_tests; - if !total_passed <> !total_started then begin + if !total_passed <> !total_started then begin Printf.printf "*** Some tests failed ***\n"; flush stdout; exit 1; end - -let host = ref "" + +let host = ref "" open Xmlrpc_client let http = xmlrpc ~version:"1.1" "/" @@ -170,55 +170,55 @@ let other = "Other install media" exception Unable_to_find_suitable_vm_template -let find_template session_id startswith = +let find_template session_id startswith = let vms = Client.VM.get_all !rpc session_id in match List.filter (fun self -> - (String.startswith startswith (Client.VM.get_name_label !rpc session_id self)) - && (Client.VM.get_is_a_template !rpc session_id self) - ) vms with + (String.startswith startswith (Client.VM.get_name_label !rpc session_id self)) + && (Client.VM.get_is_a_template !rpc session_id self) + ) vms with | [] -> raise Unable_to_find_suitable_vm_template | x :: _ -> - (* Printf.printf "Choosing template with name: %s\n" (Client.VM.get_name_label !rpc session_id x); *) - x + (* Printf.printf "Choosing template with name: %s\n" (Client.VM.get_name_label !rpc session_id x); *) + x -let cli_cmd test args = +let cli_cmd test args = debug test (String.concat " " ("$ xe" :: args)); try let output = String.rtrim (fst(Forkhelpers.execute_command_get_output !xe_path args)) in debug test output; output - with + with | Forkhelpers.Spawn_internal_error(log, output, Unix.WEXITED n) -> - failed test (Printf.sprintf "Exit code: %d" n); - failed test (Printf.sprintf "output=[%s] log=[%s]" output log); - failwith "CLI failed" + failed test (Printf.sprintf "Exit code: %d" n); + failed test (Printf.sprintf "output=[%s] log=[%s]" output log); + failwith "CLI failed" | Forkhelpers.Spawn_internal_error(log, output, _) -> - failed test "Exit code unknown"; - failed test (Printf.sprintf "output=[%s] log=[%s]" output log); - failwith "CLI failed" + failed test "Exit code unknown"; + failed test (Printf.sprintf "output=[%s] log=[%s]" output log); + failwith "CLI failed" | e -> - failed test (Printexc.to_string e); - failwith "CLI failed" + failed test (Printexc.to_string e); + failwith "CLI failed" -let vm_install test session_id template name = +let vm_install test session_id template name = let newvm_uuid = cli_cmd test [ "vm-install"; "template-uuid=" ^ template; "new-name-label=" ^ name ] in - Client.VM.get_by_uuid !rpc session_id newvm_uuid + Client.VM.get_by_uuid !rpc session_id newvm_uuid let template_uninstall test session_id vm = - let uuid = Client.VM.get_uuid !rpc session_id vm in - ignore(cli_cmd test [ "template-uninstall"; "template-uuid=" ^ uuid; "--force" ]) + let uuid = Client.VM.get_uuid !rpc session_id vm in + ignore(cli_cmd test [ "template-uninstall"; "template-uuid=" ^ uuid; "--force" ]) let vm_uninstall test session_id vm = let uuid = Client.VM.get_uuid !rpc session_id vm in ignore(cli_cmd test [ "vm-uninstall"; "uuid=" ^ uuid; "--force" ]) -let vm_export ?(metadata_only=false) test session_id vm filename = +let vm_export ?(metadata_only=false) test session_id vm filename = let uuid = Client.VM.get_uuid !rpc session_id vm in let args = [ "vm-export"; "vm=" ^ uuid; "filename=" ^ filename ] in let args = if metadata_only then args @ [ "metadata=true" ] else args in ignore(cli_cmd test args) -let vm_import ?(metadata_only=false) ?(preserve=false) ?sr test session_id filename = +let vm_import ?(metadata_only=false) ?(preserve=false) ?sr test session_id filename = let sr_uuid = Opt.map (fun sr -> Client.SR.get_uuid !rpc session_id sr) sr in let args = [ "vm-import"; "filename=" ^ filename ] in let args = args @ (Opt.default [] (Opt.map (fun x -> [ "sr-uuid=" ^ x ]) sr_uuid)) in @@ -227,7 +227,7 @@ let vm_import ?(metadata_only=false) ?(preserve=false) ?sr test session_id filen let newvm_uuids = String.split ',' (cli_cmd test args) in List.map (fun uuid -> Client.VM.get_by_uuid !rpc session_id uuid) newvm_uuids -let install_vm test session_id = +let install_vm test session_id = let t = find_template session_id vm_template in let uuid = Client.VM.get_uuid !rpc session_id t in debug test (Printf.sprintf "Template has uuid: %s%!" uuid); @@ -236,7 +236,7 @@ let install_vm test session_id = Client.VM.set_PV_args !rpc session_id vm "noninteractive"; vm -let find_guest_installer_network session_id = +let find_guest_installer_network session_id = let all = Client.Network.get_all_records !rpc session_id in match List.filter (fun (_, r) -> List.mem_assoc Xapi_globs.is_guest_installer_network r.API.network_other_config) all with | (rf, _) :: _ -> rf diff --git a/ocaml/xapi/quicktest_encodings.ml b/ocaml/xapi/quicktest_encodings.ml index 16e357ac89c..42024cf166d 100644 --- a/ocaml/xapi/quicktest_encodings.ml +++ b/ocaml/xapi/quicktest_encodings.ml @@ -29,7 +29,7 @@ module type WIDTH_GENERATOR = sig val next : unit -> int end (** A validator that always succeeds. *) module Lenient_UCS_validator : UCS_VALIDATOR = struct - let validate value = () + let validate value = () end (* === Mock character decoders ============================================= *) @@ -37,44 +37,44 @@ end (** A character decoder that logs every index it is called with. *) module Logged_character_decoder (W : WIDTH_GENERATOR) = struct - (** The indices already supplied to the decoder. *) - let indices = ref ([] : int list) + (** The indices already supplied to the decoder. *) + let indices = ref ([] : int list) - (** Clears the list of indices. *) - let reset () = indices := [] + (** Clears the list of indices. *) + let reset () = indices := [] - (** Records the given index in the list of indices. *) - let decode_character string index = - let width = W.next () in - for index = index to index + width - 1 do - ignore (string.[index]) - done; - indices := (index :: !indices); - 0l, width + (** Records the given index in the list of indices. *) + let decode_character string index = + let width = W.next () in + for index = index to index + width - 1 do + ignore (string.[index]) + done; + indices := (index :: !indices); + 0l, width end module Logged_1_byte_character_decoder = Logged_character_decoder - (struct let next () = 1 end) + (struct let next () = 1 end) module Logged_2_byte_character_decoder = Logged_character_decoder - (struct let next () = 2 end) + (struct let next () = 2 end) module Logged_n_byte_character_decoder = Logged_character_decoder - (struct let last = ref 0 let next () = incr last; !last end) + (struct let last = ref 0 let next () = incr last; !last end) (** A decoder that succeeds for all characters. *) module Universal_character_decoder = struct - let decode_character string index = (0l, 1) + let decode_character string index = (0l, 1) end (** A decoder that fails for all characters. *) module Failing_character_decoder = struct - let decode_character string index = raise Decode_error + let decode_character string index = raise Decode_error end (** A decoder that succeeds for all characters except the letter 'F'. *) module Selective_character_decoder = struct - let decode_character string index = - if string.[index] = 'F' then raise Decode_error else (0l, 1) + let decode_character string index = + if string.[index] = 'F' then raise Decode_error else (0l, 1) end (* === Mock codecs ========================================================= *) @@ -84,497 +84,497 @@ module Lenient_UTF8_codec = UTF8_CODEC (Lenient_UCS_validator) (* === Mock string validators ============================================== *) module Logged_1_byte_character_string_validator = String_validator - (Logged_1_byte_character_decoder) + (Logged_1_byte_character_decoder) module Logged_2_byte_character_string_validator = String_validator - (Logged_2_byte_character_decoder) + (Logged_2_byte_character_decoder) module Logged_n_byte_character_string_validator = String_validator - (Logged_n_byte_character_decoder) + (Logged_n_byte_character_decoder) (** A validator that accepts all strings. *) module Universal_string_validator = String_validator - (Universal_character_decoder) + (Universal_character_decoder) (** A validator that rejects all strings. *) module Failing_string_validator = String_validator - (Failing_character_decoder) + (Failing_character_decoder) (** A validator that rejects strings containing the character 'F'. *) module Selective_string_validator = String_validator - (Selective_character_decoder) + (Selective_character_decoder) (* === Tests =============================================================== *) module String_validator = struct - let test_is_valid = make_test_case "is_valid" - "Tests the is_valid function." - begin fun () -> - assert_true (Universal_string_validator.is_valid "" ); - assert_true (Universal_string_validator.is_valid "123456789"); - assert_true (Selective_string_validator.is_valid "" ); - assert_true (Selective_string_validator.is_valid "123456789"); - assert_false (Selective_string_validator.is_valid "F23456789"); - assert_false (Selective_string_validator.is_valid "1234F6789"); - assert_false (Selective_string_validator.is_valid "12345678F"); - assert_false (Selective_string_validator.is_valid "FFFFFFFFF") - end - - let test_longest_valid_prefix = make_test_case "longest_valid_prefix" - "Tests the longest_valid_prefix function." - begin fun () -> - assert_equal (Universal_string_validator.longest_valid_prefix "" ) "" ; - assert_equal (Universal_string_validator.longest_valid_prefix "123456789") "123456789"; - assert_equal (Selective_string_validator.longest_valid_prefix "" ) "" ; - assert_equal (Selective_string_validator.longest_valid_prefix "123456789") "123456789"; - assert_equal (Selective_string_validator.longest_valid_prefix "F23456789") "" ; - assert_equal (Selective_string_validator.longest_valid_prefix "1234F6789") "1234" ; - assert_equal (Selective_string_validator.longest_valid_prefix "12345678F") "12345678" ; - assert_equal (Selective_string_validator.longest_valid_prefix "FFFFFFFFF") "" - end - - let test_validate_with_1_byte_characters = make_test_case "validate_with_1_byte_characters" - "Tests validation with 1-byte-wide characters." - begin fun () -> - Logged_1_byte_character_decoder.reset (); - Logged_1_byte_character_string_validator.validate "0123456789"; - assert_equal !Logged_1_byte_character_decoder.indices [9;8;7;6;5;4;3;2;1;0] - end - - let test_validate_with_2_byte_characters = make_test_case "validate_with_2_byte_characters" - "Tests validation with 2-byte-wide characters." - begin fun () -> - Logged_2_byte_character_decoder.reset (); - Logged_2_byte_character_string_validator.validate "0123456789"; - assert_equal !Logged_2_byte_character_decoder.indices [8;6;4;2;0] - end - - let test_validate_with_n_byte_characters = make_test_case "validate_with_n_byte_characters" - "Tests validation with characters of multiple widths." - begin fun () -> - Logged_n_byte_character_decoder.reset (); - Logged_n_byte_character_string_validator.validate "0123456789"; - assert_equal !Logged_n_byte_character_decoder.indices [6;3;1;0] - end - - let test_validate_with_empty_string = make_test_case "validate_with_empty_string" - "Tests that validation does not fail for an empty string." - begin fun () -> - Logged_1_byte_character_decoder.reset (); - Logged_1_byte_character_string_validator.validate ""; - assert_equal !Logged_1_byte_character_decoder.indices [] - end - - let test_validate_with_incomplete_string = make_test_case "validate_with_incomplete_string" - "Tests that validation fails correctly for an incomplete string." - begin fun () -> - Logged_2_byte_character_decoder.reset (); - assert_raises String_incomplete - (fun () -> Logged_2_byte_character_string_validator.validate "0") - end - - let test_validate_with_failing_decoders = make_test_case "validate_with_failing_decoders" - "Tests that validation fails correctly in the presence of failing decoders." - begin fun () -> - Failing_string_validator.validate ""; - assert_raises_match - (function Validation_error (0, Decode_error) -> true | _ -> false) - (fun () -> Selective_string_validator.validate "F"); - assert_raises_match - (function Validation_error (0, Decode_error) -> true | _ -> false) - (fun () -> Selective_string_validator.validate "F12345678"); - assert_raises_match - (function Validation_error (4, Decode_error) -> true | _ -> false) - (fun () -> Selective_string_validator.validate "0123F5678"); - assert_raises_match - (function Validation_error (8, Decode_error) -> true | _ -> false) - (fun () -> Selective_string_validator.validate "01234567F"); - assert_raises_match - (function Validation_error (0, Decode_error) -> true | _ -> false) - (fun () -> Selective_string_validator.validate "FFFFFFFFF") - end - - let tests = make_module_test_suite "String_validator" - [ - test_is_valid; - test_longest_valid_prefix; - test_validate_with_1_byte_characters; - test_validate_with_2_byte_characters; - test_validate_with_n_byte_characters; - test_validate_with_empty_string; - test_validate_with_incomplete_string; - test_validate_with_failing_decoders; - ] + let test_is_valid = make_test_case "is_valid" + "Tests the is_valid function." + begin fun () -> + assert_true (Universal_string_validator.is_valid "" ); + assert_true (Universal_string_validator.is_valid "123456789"); + assert_true (Selective_string_validator.is_valid "" ); + assert_true (Selective_string_validator.is_valid "123456789"); + assert_false (Selective_string_validator.is_valid "F23456789"); + assert_false (Selective_string_validator.is_valid "1234F6789"); + assert_false (Selective_string_validator.is_valid "12345678F"); + assert_false (Selective_string_validator.is_valid "FFFFFFFFF") + end + + let test_longest_valid_prefix = make_test_case "longest_valid_prefix" + "Tests the longest_valid_prefix function." + begin fun () -> + assert_equal (Universal_string_validator.longest_valid_prefix "" ) "" ; + assert_equal (Universal_string_validator.longest_valid_prefix "123456789") "123456789"; + assert_equal (Selective_string_validator.longest_valid_prefix "" ) "" ; + assert_equal (Selective_string_validator.longest_valid_prefix "123456789") "123456789"; + assert_equal (Selective_string_validator.longest_valid_prefix "F23456789") "" ; + assert_equal (Selective_string_validator.longest_valid_prefix "1234F6789") "1234" ; + assert_equal (Selective_string_validator.longest_valid_prefix "12345678F") "12345678" ; + assert_equal (Selective_string_validator.longest_valid_prefix "FFFFFFFFF") "" + end + + let test_validate_with_1_byte_characters = make_test_case "validate_with_1_byte_characters" + "Tests validation with 1-byte-wide characters." + begin fun () -> + Logged_1_byte_character_decoder.reset (); + Logged_1_byte_character_string_validator.validate "0123456789"; + assert_equal !Logged_1_byte_character_decoder.indices [9;8;7;6;5;4;3;2;1;0] + end + + let test_validate_with_2_byte_characters = make_test_case "validate_with_2_byte_characters" + "Tests validation with 2-byte-wide characters." + begin fun () -> + Logged_2_byte_character_decoder.reset (); + Logged_2_byte_character_string_validator.validate "0123456789"; + assert_equal !Logged_2_byte_character_decoder.indices [8;6;4;2;0] + end + + let test_validate_with_n_byte_characters = make_test_case "validate_with_n_byte_characters" + "Tests validation with characters of multiple widths." + begin fun () -> + Logged_n_byte_character_decoder.reset (); + Logged_n_byte_character_string_validator.validate "0123456789"; + assert_equal !Logged_n_byte_character_decoder.indices [6;3;1;0] + end + + let test_validate_with_empty_string = make_test_case "validate_with_empty_string" + "Tests that validation does not fail for an empty string." + begin fun () -> + Logged_1_byte_character_decoder.reset (); + Logged_1_byte_character_string_validator.validate ""; + assert_equal !Logged_1_byte_character_decoder.indices [] + end + + let test_validate_with_incomplete_string = make_test_case "validate_with_incomplete_string" + "Tests that validation fails correctly for an incomplete string." + begin fun () -> + Logged_2_byte_character_decoder.reset (); + assert_raises String_incomplete + (fun () -> Logged_2_byte_character_string_validator.validate "0") + end + + let test_validate_with_failing_decoders = make_test_case "validate_with_failing_decoders" + "Tests that validation fails correctly in the presence of failing decoders." + begin fun () -> + Failing_string_validator.validate ""; + assert_raises_match + (function Validation_error (0, Decode_error) -> true | _ -> false) + (fun () -> Selective_string_validator.validate "F"); + assert_raises_match + (function Validation_error (0, Decode_error) -> true | _ -> false) + (fun () -> Selective_string_validator.validate "F12345678"); + assert_raises_match + (function Validation_error (4, Decode_error) -> true | _ -> false) + (fun () -> Selective_string_validator.validate "0123F5678"); + assert_raises_match + (function Validation_error (8, Decode_error) -> true | _ -> false) + (fun () -> Selective_string_validator.validate "01234567F"); + assert_raises_match + (function Validation_error (0, Decode_error) -> true | _ -> false) + (fun () -> Selective_string_validator.validate "FFFFFFFFF") + end + + let tests = make_module_test_suite "String_validator" + [ + test_is_valid; + test_longest_valid_prefix; + test_validate_with_1_byte_characters; + test_validate_with_2_byte_characters; + test_validate_with_n_byte_characters; + test_validate_with_empty_string; + test_validate_with_incomplete_string; + test_validate_with_failing_decoders; + ] end module UCS = struct include UCS - (** A list of UCS non-characters values, including: *) - (** a. non-characters within the basic multilingual plane; *) - (** b. non-characters at the end of the basic multilingual plane; *) - (** c. non-characters at the end of the private use area. *) - let non_characters = [ - 0x00fdd0l; 0x00fdefl; (* case a. *) - 0x00fffel; 0x00ffffl; (* case b. *) - 0x1ffffel; 0x1fffffl; (* case c. *) - ] - - (** A list of UCS character values located immediately before or *) - (** after UCS non-character values, including: *) - (** a. non-characters within the basic multilingual plane; *) - (** b. non-characters at the end of the basic multilingual plane; *) - (** c. non-characters at the end of the private use area. *) - let valid_characters_next_to_non_characters = [ - 0x00fdcfl; 0x00fdf0l; (* case a. *) - 0x00fffdl; 0x010000l; (* case b. *) - 0x1ffffdl; 0x200000l; (* case c. *) - ] - - let test_is_non_character = make_test_case "is_non_character" - "Tests the non-character indicator function." - begin fun () -> - List.iter (fun value -> assert_true (is_non_character (value))) - non_characters; - List.iter (fun value -> assert_false (is_non_character (value))) - valid_characters_next_to_non_characters - end - - let test_is_out_of_range = make_test_case "is_out_of_range" - "Tests the out-of-range indicator function." - begin fun () -> - assert_true (is_out_of_range (min_value --- 1l)); - assert_false (is_out_of_range (min_value)); - assert_false (is_out_of_range (max_value)); - assert_true (is_out_of_range (max_value +++ 1l)) - end - - let test_is_surrogate = make_test_case "is_surrogate" - "Tests the surrogate indicator function." - begin fun () -> - assert_false (is_surrogate (0xd7ffl)); - assert_true (is_surrogate (0xd800l)); - assert_true (is_surrogate (0xdfffl)); - assert_false (is_surrogate (0xe000l)) - end - - let tests = make_module_test_suite "UCS" - [ - test_is_non_character; - test_is_out_of_range; - test_is_surrogate; - ] + (** A list of UCS non-characters values, including: *) + (** a. non-characters within the basic multilingual plane; *) + (** b. non-characters at the end of the basic multilingual plane; *) + (** c. non-characters at the end of the private use area. *) + let non_characters = [ + 0x00fdd0l; 0x00fdefl; (* case a. *) + 0x00fffel; 0x00ffffl; (* case b. *) + 0x1ffffel; 0x1fffffl; (* case c. *) + ] + + (** A list of UCS character values located immediately before or *) + (** after UCS non-character values, including: *) + (** a. non-characters within the basic multilingual plane; *) + (** b. non-characters at the end of the basic multilingual plane; *) + (** c. non-characters at the end of the private use area. *) + let valid_characters_next_to_non_characters = [ + 0x00fdcfl; 0x00fdf0l; (* case a. *) + 0x00fffdl; 0x010000l; (* case b. *) + 0x1ffffdl; 0x200000l; (* case c. *) + ] + + let test_is_non_character = make_test_case "is_non_character" + "Tests the non-character indicator function." + begin fun () -> + List.iter (fun value -> assert_true (is_non_character (value))) + non_characters; + List.iter (fun value -> assert_false (is_non_character (value))) + valid_characters_next_to_non_characters + end + + let test_is_out_of_range = make_test_case "is_out_of_range" + "Tests the out-of-range indicator function." + begin fun () -> + assert_true (is_out_of_range (min_value --- 1l)); + assert_false (is_out_of_range (min_value)); + assert_false (is_out_of_range (max_value)); + assert_true (is_out_of_range (max_value +++ 1l)) + end + + let test_is_surrogate = make_test_case "is_surrogate" + "Tests the surrogate indicator function." + begin fun () -> + assert_false (is_surrogate (0xd7ffl)); + assert_true (is_surrogate (0xd800l)); + assert_true (is_surrogate (0xdfffl)); + assert_false (is_surrogate (0xe000l)) + end + + let tests = make_module_test_suite "UCS" + [ + test_is_non_character; + test_is_out_of_range; + test_is_surrogate; + ] end module XML = struct include XML - let test_is_forbidden_control_character = make_test_case "is_forbidden_control_character" - "Tests the forbidden-control-character indicator function." - begin fun () -> - assert_true (is_forbidden_control_character (0x00l)); - assert_true (is_forbidden_control_character (0x19l)); - assert_false (is_forbidden_control_character (0x09l)); - assert_false (is_forbidden_control_character (0x0al)); - assert_false (is_forbidden_control_character (0x0dl)); - assert_false (is_forbidden_control_character (0x20l)) - end - - let tests = make_module_test_suite "XML" - [ - test_is_forbidden_control_character; - ] + let test_is_forbidden_control_character = make_test_case "is_forbidden_control_character" + "Tests the forbidden-control-character indicator function." + begin fun () -> + assert_true (is_forbidden_control_character (0x00l)); + assert_true (is_forbidden_control_character (0x19l)); + assert_false (is_forbidden_control_character (0x09l)); + assert_false (is_forbidden_control_character (0x0al)); + assert_false (is_forbidden_control_character (0x0dl)); + assert_false (is_forbidden_control_character (0x20l)) + end + + let tests = make_module_test_suite "XML" + [ + test_is_forbidden_control_character; + ] end module UTF8_UCS_validator = struct include UTF8_UCS_validator - let test_validate = make_test_case "validate" - "Tests the UTF-8 UCS validation function." - begin fun () -> - let value = ref (UCS.min_value --- 1l) in - while !value <= (UCS.max_value +++ 1l) do - if UCS.is_out_of_range !value - then assert_raises UCS_value_out_of_range - (fun () -> validate !value) - else - if UCS.is_non_character !value - || UCS.is_surrogate !value - then assert_raises UCS_value_prohibited_in_UTF8 - (fun () -> validate !value) - else - validate !value; - value := !value +++ 1l - done - end - - let tests = make_module_test_suite "UTF8_UCS_validator" - [ - test_validate; - ] + let test_validate = make_test_case "validate" + "Tests the UTF-8 UCS validation function." + begin fun () -> + let value = ref (UCS.min_value --- 1l) in + while !value <= (UCS.max_value +++ 1l) do + if UCS.is_out_of_range !value + then assert_raises UCS_value_out_of_range + (fun () -> validate !value) + else + if UCS.is_non_character !value + || UCS.is_surrogate !value + then assert_raises UCS_value_prohibited_in_UTF8 + (fun () -> validate !value) + else + validate !value; + value := !value +++ 1l + done + end + + let tests = make_module_test_suite "UTF8_UCS_validator" + [ + test_validate; + ] end module XML_UTF8_UCS_validator = struct include XML_UTF8_UCS_validator - let test_validate = make_test_case "validate" - "Tests the XML-specific UTF-8 UCS validation function." - begin fun () -> - let value = ref (UCS.min_value --- 1l) in - while !value <= (UCS.max_value +++ 1l) do - if UCS.is_out_of_range !value - then assert_raises UCS_value_out_of_range - (fun () -> validate !value) - else - if UCS.is_non_character !value - || UCS.is_surrogate !value - then assert_raises UCS_value_prohibited_in_UTF8 - (fun () -> validate !value) - else - if XML.is_forbidden_control_character !value - then assert_raises UCS_value_prohibited_in_XML - (fun () -> validate !value) - else - validate !value; - value := !value +++ 1l - done - end - - let tests = make_module_test_suite "XML_UTF8_UCS_validator" - [ - test_validate; - ] + let test_validate = make_test_case "validate" + "Tests the XML-specific UTF-8 UCS validation function." + begin fun () -> + let value = ref (UCS.min_value --- 1l) in + while !value <= (UCS.max_value +++ 1l) do + if UCS.is_out_of_range !value + then assert_raises UCS_value_out_of_range + (fun () -> validate !value) + else + if UCS.is_non_character !value + || UCS.is_surrogate !value + then assert_raises UCS_value_prohibited_in_UTF8 + (fun () -> validate !value) + else + if XML.is_forbidden_control_character !value + then assert_raises UCS_value_prohibited_in_XML + (fun () -> validate !value) + else + validate !value; + value := !value +++ 1l + done + end + + let tests = make_module_test_suite "XML_UTF8_UCS_validator" + [ + test_validate; + ] end module UTF8_codec = struct include UTF8_codec - (** A list of canonical encoding widths of UCS values, *) - (** represented by tuples of the form (v, w), where: *) - (** v = the UCS character value to be encoded; and *) - (** w = the width of the encoded character, in bytes. *) - let valid_ucs_value_widths = - [ - (1l , 1); ((1l <<< 7) --- 1l, 1); - (1l <<< 7, 2); ((1l <<< 11) --- 1l, 2); - (1l <<< 11, 3); ((1l <<< 16) --- 1l, 3); - (1l <<< 16, 4); ((1l <<< 21) --- 1l, 4); - ] - - let test_width_required_for_ucs_value = make_test_case "width_required_for_ucs_value" - "Tests the width-required-for-UCS-value function." - begin fun () -> - List.iter - (fun (value, width) -> - assert_equal (width_required_for_ucs_value value) width) - valid_ucs_value_widths - end - - (** A list of valid header byte decodings, represented by *) - (** tuples of the form (b, (v, w)), where: *) - (** b = a valid header byte; *) - (** v = the (partial) value contained within the byte; and *) - (** w = the total width of the encoded character, in bytes. *) - let valid_header_byte_decodings = - [ - (0b00000000, (0b00000000, 1)); - (0b00000001, (0b00000001, 1)); - (0b01111111, (0b01111111, 1)); - (0b11000000, (0b00000000, 2)); - (0b11000001, (0b00000001, 2)); - (0b11011111, (0b00011111, 2)); - (0b11100000, (0b00000000, 3)); - (0b11100001, (0b00000001, 3)); - (0b11101111, (0b00001111, 3)); - (0b11110000, (0b00000000, 4)); - (0b11110001, (0b00000001, 4)); - (0b11110111, (0b00000111, 4)); - ] - - (** A list of invalid header bytes that should not be decodable. *) - let invalid_header_bytes = - [ - 0b10000000; 0b10111111; - 0b11111000; 0b11111011; - 0b11111100; 0b11111101; - 0b11111110; 0b11111111; - ] - - let test_decode_header_byte_when_valid = make_test_case "decode_header_byte_when_valid" - "Tests decoding with valid header bytes." - begin fun () -> - List.iter - (fun (b, (v, w)) -> - assert_equal (decode_header_byte b) (v, w)) - valid_header_byte_decodings - end - - let test_decode_header_byte_when_invalid = make_test_case "decode_header_byte_when_invalid" - "Tests decoding with invalid header bytes." - begin fun () -> - List.iter - (fun b -> - assert_raises UTF8_header_byte_invalid - (fun () -> decode_header_byte b)) - invalid_header_bytes - end - - (** A list of valid continuation byte decodings, represented *) - (** by tuples of the form (b, v), where: *) - (** b = a valid continuation byte; and *) - (** v = the partial value contained within the byte. *) - let valid_continuation_byte_decodings = - [ - (0b10000000, 0b00000000); - (0b10000001, 0b00000001); - (0b10111110, 0b00111110); - (0b10111111, 0b00111111); - ] - - (** A list of invalid continuation bytes that should not be decodable. *) - let invalid_continuation_bytes = - [ - 0b00000000; 0b01111111; - 0b11000000; 0b11011111; - 0b11100000; 0b11101111; - 0b11110000; 0b11110111; - 0b11111000; 0b11111011; - 0b11111100; 0b11111101; - 0b11111111; 0b11111110; - ] - - let test_decode_continuation_byte_when_valid = make_test_case "decode_continuation_byte_when_valid" - "Tests decoding with valid continuation bytes." - begin fun () -> - List.iter - (fun (byte, value) -> - assert_equal (decode_continuation_byte byte) value) - valid_continuation_byte_decodings - end - - let test_decode_continuation_byte_when_invalid = make_test_case "decode_continuation_byte_when_invalid" - "Tests decoding with invalid continuation bytes." - begin fun () -> - List.iter - (fun byte -> - assert_raises UTF8_continuation_byte_invalid - (fun () -> decode_continuation_byte byte)) - invalid_continuation_bytes - end - - (** A list of valid character decodings represented by *) - (** tuples of the form (s, (v, w)), where: *) - (** *) - (** s = a validly-encoded UTF-8 string; *) - (** v = the UCS value represented by the string; *) - (** (which may or may not be valid in its own right) *) - (** w = the width of the encoded string, in bytes. *) - (** *) - (** For each byte length b in [1...4], the list contains *) - (** decodings for: *) - (** *) - (** v_min = the smallest UCS value encodable in b bytes. *) - (** v_max = the greatest UCS value encodable in b bytes. *) - (** *) - let valid_character_decodings = [ - (* 7654321 *) - (* 0b0xxxxxxx *) (* 00000000000000xxxxxxx *) - "\x00" (* 0b00000000 *), (0b000000000000000000000l, 1); - "\x7f" (* 0b01111111 *), (0b000000000000001111111l, 1); - (* 10987654321 *) - (* 0b110xxxsx 0b10xxxxxx *) (* 0000000000xxxsxxxxxxx *) - "\xc2\x80" (* 0b11000010 0b10000000 *), (0b000000000000010000000l, 2); - "\xdf\xbf" (* 0b11011111 0b10111111 *), (0b000000000011111111111l, 2); - (* 6543210987654321 *) - (* 0b1110xxxx 0b10sxxxxx 0b10xxxxxx *) (* xxxxsxxxxxxxxxxx *) - "\xe0\xa0\x80" (* 0b11100000 0b10100000 0b10000000 *), (0b000000000100000000000l, 3); - "\xef\xbf\xbf" (* 0b11101111 0b10111111 0b10111111 *), (0b000001111111111111111l, 3); - (* 109876543210987654321 *) - (* 0b11110xxx 0b10xsxxxx 0b10xxxxxx 0b10xxxxxx *) (* xxxxsxxxxxxxxxxxxxxxx *) - "\xf0\x90\x80\x80" (* 0b11110000 0b10010000 0b10000000 0b10000000 *), (0b000010000000000000000l, 4); - "\xf7\xbf\xbf\xbf" (* 0b11110111 0b10111111 0b10111111 0b10111111 *), (0b111111111111111111111l, 4); - ] - - let test_decode_character_when_valid = make_test_case "decode_character_when_valid" - "Tests decoding with valid characters." - begin fun () -> - List.iter - (fun (string, (value, width)) -> - assert_equal - (Lenient_UTF8_codec.decode_character string 0) - (value, width)) - valid_character_decodings - end - - (** A list of strings containing overlong character encodings. *) - (** For each byte length b in [2...4], this list contains the *) - (** overlong encoding e (v), where v is the UCS value one less *) - (** than the smallest UCS value validly-encodable in b bytes. *) - let overlong_character_encodings = - [ - "\xc1\xbf" (* 0b11000001 0b10111111 *); - "\xe0\x9f\xbf" (* 0b11100000 0b10011111 0b10111111 *); - "\xf0\x8f\xbf\xbf" (* 0b11110000 0b10001111 0b10111111 0b10111111 *); - ] - - let test_decode_character_when_overlong = make_test_case "decode_character_when_overlong" - "Tests decoding with overlong characters." - begin fun () -> - List.iter - (fun string -> - assert_raises UTF8_encoding_not_canonical - (fun () -> Lenient_UTF8_codec.decode_character string 0)) - overlong_character_encodings - end - - (** Encodes a valid UCS value and then decodes it again, testing: *) - (** a. that the encoded width is canonical for the given value. *) - (** b. that the decoded value is identical to the original value. *) - let test_encode_decode_cycle_for_value value = - let string = Lenient_UTF8_codec.encode_character value in - let decoded_value, decoded_width = - Lenient_UTF8_codec.decode_character string 0 in - let width = UTF8_codec.width_required_for_ucs_value value in - if (value <> decoded_value) then fail - (Printf.sprintf - "expected value %06lx but decoded value %06lx\n" - value decoded_value); - if (width <> decoded_width) then fail - (Printf.sprintf - "expected width %i but decoded width %i\n" - width decoded_width) - - let test_encode_decode_cycle = make_test_case "encode_decode_cycle" - "Performs an encode-decode cycle for every valid UCS character." - begin fun () -> - let value = ref UCS.min_value in - while !value <= UCS.max_value do - test_encode_decode_cycle_for_value !value; - value := Int32.add !value 1l; - done - end - - let tests = make_module_test_suite "UTF8_codec" - [ - test_width_required_for_ucs_value; - test_decode_header_byte_when_valid; - test_decode_header_byte_when_invalid; - test_decode_continuation_byte_when_valid; - test_decode_continuation_byte_when_invalid; - test_decode_character_when_valid; - test_decode_character_when_overlong; - test_encode_decode_cycle; - ] + (** A list of canonical encoding widths of UCS values, *) + (** represented by tuples of the form (v, w), where: *) + (** v = the UCS character value to be encoded; and *) + (** w = the width of the encoded character, in bytes. *) + let valid_ucs_value_widths = + [ + (1l , 1); ((1l <<< 7) --- 1l, 1); + (1l <<< 7, 2); ((1l <<< 11) --- 1l, 2); + (1l <<< 11, 3); ((1l <<< 16) --- 1l, 3); + (1l <<< 16, 4); ((1l <<< 21) --- 1l, 4); + ] + + let test_width_required_for_ucs_value = make_test_case "width_required_for_ucs_value" + "Tests the width-required-for-UCS-value function." + begin fun () -> + List.iter + (fun (value, width) -> + assert_equal (width_required_for_ucs_value value) width) + valid_ucs_value_widths + end + + (** A list of valid header byte decodings, represented by *) + (** tuples of the form (b, (v, w)), where: *) + (** b = a valid header byte; *) + (** v = the (partial) value contained within the byte; and *) + (** w = the total width of the encoded character, in bytes. *) + let valid_header_byte_decodings = + [ + (0b00000000, (0b00000000, 1)); + (0b00000001, (0b00000001, 1)); + (0b01111111, (0b01111111, 1)); + (0b11000000, (0b00000000, 2)); + (0b11000001, (0b00000001, 2)); + (0b11011111, (0b00011111, 2)); + (0b11100000, (0b00000000, 3)); + (0b11100001, (0b00000001, 3)); + (0b11101111, (0b00001111, 3)); + (0b11110000, (0b00000000, 4)); + (0b11110001, (0b00000001, 4)); + (0b11110111, (0b00000111, 4)); + ] + + (** A list of invalid header bytes that should not be decodable. *) + let invalid_header_bytes = + [ + 0b10000000; 0b10111111; + 0b11111000; 0b11111011; + 0b11111100; 0b11111101; + 0b11111110; 0b11111111; + ] + + let test_decode_header_byte_when_valid = make_test_case "decode_header_byte_when_valid" + "Tests decoding with valid header bytes." + begin fun () -> + List.iter + (fun (b, (v, w)) -> + assert_equal (decode_header_byte b) (v, w)) + valid_header_byte_decodings + end + + let test_decode_header_byte_when_invalid = make_test_case "decode_header_byte_when_invalid" + "Tests decoding with invalid header bytes." + begin fun () -> + List.iter + (fun b -> + assert_raises UTF8_header_byte_invalid + (fun () -> decode_header_byte b)) + invalid_header_bytes + end + + (** A list of valid continuation byte decodings, represented *) + (** by tuples of the form (b, v), where: *) + (** b = a valid continuation byte; and *) + (** v = the partial value contained within the byte. *) + let valid_continuation_byte_decodings = + [ + (0b10000000, 0b00000000); + (0b10000001, 0b00000001); + (0b10111110, 0b00111110); + (0b10111111, 0b00111111); + ] + + (** A list of invalid continuation bytes that should not be decodable. *) + let invalid_continuation_bytes = + [ + 0b00000000; 0b01111111; + 0b11000000; 0b11011111; + 0b11100000; 0b11101111; + 0b11110000; 0b11110111; + 0b11111000; 0b11111011; + 0b11111100; 0b11111101; + 0b11111111; 0b11111110; + ] + + let test_decode_continuation_byte_when_valid = make_test_case "decode_continuation_byte_when_valid" + "Tests decoding with valid continuation bytes." + begin fun () -> + List.iter + (fun (byte, value) -> + assert_equal (decode_continuation_byte byte) value) + valid_continuation_byte_decodings + end + + let test_decode_continuation_byte_when_invalid = make_test_case "decode_continuation_byte_when_invalid" + "Tests decoding with invalid continuation bytes." + begin fun () -> + List.iter + (fun byte -> + assert_raises UTF8_continuation_byte_invalid + (fun () -> decode_continuation_byte byte)) + invalid_continuation_bytes + end + + (** A list of valid character decodings represented by *) + (** tuples of the form (s, (v, w)), where: *) + (** *) + (** s = a validly-encoded UTF-8 string; *) + (** v = the UCS value represented by the string; *) + (** (which may or may not be valid in its own right) *) + (** w = the width of the encoded string, in bytes. *) + (** *) + (** For each byte length b in [1...4], the list contains *) + (** decodings for: *) + (** *) + (** v_min = the smallest UCS value encodable in b bytes. *) + (** v_max = the greatest UCS value encodable in b bytes. *) + (** *) + let valid_character_decodings = [ + (* 7654321 *) + (* 0b0xxxxxxx *) (* 00000000000000xxxxxxx *) + "\x00" (* 0b00000000 *), (0b000000000000000000000l, 1); + "\x7f" (* 0b01111111 *), (0b000000000000001111111l, 1); + (* 10987654321 *) + (* 0b110xxxsx 0b10xxxxxx *) (* 0000000000xxxsxxxxxxx *) + "\xc2\x80" (* 0b11000010 0b10000000 *), (0b000000000000010000000l, 2); + "\xdf\xbf" (* 0b11011111 0b10111111 *), (0b000000000011111111111l, 2); + (* 6543210987654321 *) + (* 0b1110xxxx 0b10sxxxxx 0b10xxxxxx *) (* xxxxsxxxxxxxxxxx *) + "\xe0\xa0\x80" (* 0b11100000 0b10100000 0b10000000 *), (0b000000000100000000000l, 3); + "\xef\xbf\xbf" (* 0b11101111 0b10111111 0b10111111 *), (0b000001111111111111111l, 3); + (* 109876543210987654321 *) + (* 0b11110xxx 0b10xsxxxx 0b10xxxxxx 0b10xxxxxx *) (* xxxxsxxxxxxxxxxxxxxxx *) + "\xf0\x90\x80\x80" (* 0b11110000 0b10010000 0b10000000 0b10000000 *), (0b000010000000000000000l, 4); + "\xf7\xbf\xbf\xbf" (* 0b11110111 0b10111111 0b10111111 0b10111111 *), (0b111111111111111111111l, 4); + ] + + let test_decode_character_when_valid = make_test_case "decode_character_when_valid" + "Tests decoding with valid characters." + begin fun () -> + List.iter + (fun (string, (value, width)) -> + assert_equal + (Lenient_UTF8_codec.decode_character string 0) + (value, width)) + valid_character_decodings + end + + (** A list of strings containing overlong character encodings. *) + (** For each byte length b in [2...4], this list contains the *) + (** overlong encoding e (v), where v is the UCS value one less *) + (** than the smallest UCS value validly-encodable in b bytes. *) + let overlong_character_encodings = + [ + "\xc1\xbf" (* 0b11000001 0b10111111 *); + "\xe0\x9f\xbf" (* 0b11100000 0b10011111 0b10111111 *); + "\xf0\x8f\xbf\xbf" (* 0b11110000 0b10001111 0b10111111 0b10111111 *); + ] + + let test_decode_character_when_overlong = make_test_case "decode_character_when_overlong" + "Tests decoding with overlong characters." + begin fun () -> + List.iter + (fun string -> + assert_raises UTF8_encoding_not_canonical + (fun () -> Lenient_UTF8_codec.decode_character string 0)) + overlong_character_encodings + end + + (** Encodes a valid UCS value and then decodes it again, testing: *) + (** a. that the encoded width is canonical for the given value. *) + (** b. that the decoded value is identical to the original value. *) + let test_encode_decode_cycle_for_value value = + let string = Lenient_UTF8_codec.encode_character value in + let decoded_value, decoded_width = + Lenient_UTF8_codec.decode_character string 0 in + let width = UTF8_codec.width_required_for_ucs_value value in + if (value <> decoded_value) then fail + (Printf.sprintf + "expected value %06lx but decoded value %06lx\n" + value decoded_value); + if (width <> decoded_width) then fail + (Printf.sprintf + "expected width %i but decoded width %i\n" + width decoded_width) + + let test_encode_decode_cycle = make_test_case "encode_decode_cycle" + "Performs an encode-decode cycle for every valid UCS character." + begin fun () -> + let value = ref UCS.min_value in + while !value <= UCS.max_value do + test_encode_decode_cycle_for_value !value; + value := Int32.add !value 1l; + done + end + + let tests = make_module_test_suite "UTF8_codec" + [ + test_width_required_for_ucs_value; + test_decode_header_byte_when_valid; + test_decode_header_byte_when_invalid; + test_decode_continuation_byte_when_valid; + test_decode_continuation_byte_when_invalid; + test_decode_character_when_valid; + test_decode_character_when_overlong; + test_encode_decode_cycle; + ] end let tests = make_module_test_suite "Encodings" -[ - UCS .tests; - XML .tests; - String_validator .tests; - UTF8_UCS_validator .tests; - XML_UTF8_UCS_validator.tests; - UTF8_codec .tests; -] + [ + UCS .tests; + XML .tests; + String_validator .tests; + UTF8_UCS_validator .tests; + XML_UTF8_UCS_validator.tests; + UTF8_codec .tests; + ] let run_from_within_quicktest () = run_from_within_quicktest tests diff --git a/ocaml/xapi/quicktest_http.ml b/ocaml/xapi/quicktest_http.ml index d6118008deb..d9e4f184be6 100644 --- a/ocaml/xapi/quicktest_http.ml +++ b/ocaml/xapi/quicktest_http.ml @@ -23,124 +23,124 @@ open Ocamltest module Uds = struct (* {{{1 *) - module D = Debug.Make(struct let name = "quicktest_http:Uds" end) - open D - - exception Parse_error of string - - let with_unix_channels filename func = - let fd = Unixext.open_connection_unix_fd filename in - let ic, oc = (Unix.in_channel_of_descr fd, Unix.out_channel_of_descr fd) in - finally (fun () -> func ic oc) (fun () -> Unix.close fd) - - let http_response_code d = - match Xstringext.String.split ' ' d with - | _ :: code :: _ -> int_of_string code - | _ -> raise (Parse_error "Failed to parse HTTP reponse code") - - let rec read_header ic acc = - let line = input_line ic in - if line = "\r" - then List.rev (line :: acc) - else read_header ic (line :: acc) - - let rec read_body ic acc = - try - let line = input_line ic in - read_body ic (line :: acc) - with - End_of_file -> List.rev acc - - let http_command filename cmd = with_unix_channels filename (fun ic oc -> - Printf.fprintf oc "%s" cmd; - flush oc; - let result_line = input_line ic in - let response_code = http_response_code result_line in - let header = read_header ic [] in - let body = read_body ic [] in - (response_code, result_line, header, body)) + module D = Debug.Make(struct let name = "quicktest_http:Uds" end) + open D + + exception Parse_error of string + + let with_unix_channels filename func = + let fd = Unixext.open_connection_unix_fd filename in + let ic, oc = (Unix.in_channel_of_descr fd, Unix.out_channel_of_descr fd) in + finally (fun () -> func ic oc) (fun () -> Unix.close fd) + + let http_response_code d = + match Xstringext.String.split ' ' d with + | _ :: code :: _ -> int_of_string code + | _ -> raise (Parse_error "Failed to parse HTTP reponse code") + + let rec read_header ic acc = + let line = input_line ic in + if line = "\r" + then List.rev (line :: acc) + else read_header ic (line :: acc) + + let rec read_body ic acc = + try + let line = input_line ic in + read_body ic (line :: acc) + with + End_of_file -> List.rev acc + + let http_command filename cmd = with_unix_channels filename (fun ic oc -> + Printf.fprintf oc "%s" cmd; + flush oc; + let result_line = input_line ic in + let response_code = http_response_code result_line in + let header = read_header ic [] in + let body = read_body ic [] in + (response_code, result_line, header, body)) end module Secret_Auth_fails = struct (* {{{1 *) - let http request f = - let open Xmlrpc_client in - let transport = - if !using_unix_domain_socket - then Unix Xapi_globs.unix_domain_socket - else SSL(SSL.make ~use_fork_exec_helper:false (), !host, 443) in - with_transport transport (with_http request f) - - let invalid_pool_secret = - Http.Request.make ~version:"1.0" ~cookie:["pool_secret", "whatever"] - ~user_agent:"quicktest" - Http.Get "/sync_config_files" - - let invalid_basicauth = - Http.Request.make ~version:"1.0" - ~user_agent:"quicktest" - ~headers:["Authorization", "Basic cm9vdDpiYXI="] (* root:bar *) - Http.Get "/rss" - - let test_invalid_pool_secret = make_test_case "invalid_pool_secret" - "Tests that invalid pool secrets are rejected." - begin fun () -> - assert_raises_match - (function Http_client.Http_error _ -> true | _ -> false) - (fun () -> http invalid_pool_secret (fun _ -> ())) - end - - let test_invalid_basicauth = make_test_case "invalid_basicauth" - "Tests that invalid basic authentication fails." - begin fun () -> - assert_raises_match - (function Http_client.Http_error _ -> true | Http_client.Http_request_rejected _ -> true | _ -> false) - (fun () -> http invalid_basicauth (fun _ -> ())) - end - - let tests = make_module_test_suite "Secret_Auth" - [ test_invalid_pool_secret - ; test_invalid_basicauth - ] + let http request f = + let open Xmlrpc_client in + let transport = + if !using_unix_domain_socket + then Unix Xapi_globs.unix_domain_socket + else SSL(SSL.make ~use_fork_exec_helper:false (), !host, 443) in + with_transport transport (with_http request f) + + let invalid_pool_secret = + Http.Request.make ~version:"1.0" ~cookie:["pool_secret", "whatever"] + ~user_agent:"quicktest" + Http.Get "/sync_config_files" + + let invalid_basicauth = + Http.Request.make ~version:"1.0" + ~user_agent:"quicktest" + ~headers:["Authorization", "Basic cm9vdDpiYXI="] (* root:bar *) + Http.Get "/rss" + + let test_invalid_pool_secret = make_test_case "invalid_pool_secret" + "Tests that invalid pool secrets are rejected." + begin fun () -> + assert_raises_match + (function Http_client.Http_error _ -> true | _ -> false) + (fun () -> http invalid_pool_secret (fun _ -> ())) + end + + let test_invalid_basicauth = make_test_case "invalid_basicauth" + "Tests that invalid basic authentication fails." + begin fun () -> + assert_raises_match + (function Http_client.Http_error _ -> true | Http_client.Http_request_rejected _ -> true | _ -> false) + (fun () -> http invalid_basicauth (fun _ -> ())) + end + + let tests = make_module_test_suite "Secret_Auth" + [ test_invalid_pool_secret + ; test_invalid_basicauth + ] end module HTML_Escaping = struct (* {{{1 *) - module D = Debug.Make(struct let name = "quicktest_http:HTML_Escaping" end) - open D - - let non_resource_cmd = "GET /foo<>'\"& HTTP/1.0\r\n\r\n" - let non_resource_exp = "<>'"&" - let bad_resource_cmd = "GET /%foo<>'\"& HTTP/1.0\r\n\r\n" - let bad_resource_exp = "<>'"&" - let bad_command_cmd = "FOO<>'\"& /foo HTTP/1.0\r\n\r\n" - let bad_command_exp = "<>'\\"&" - - let html_escaping expected cmd = - let check_result b = String.has_substr b expected in - let _, _, _, body = Uds.http_command Xapi_globs.unix_domain_socket cmd in - Printf.printf "expected = [%s]; received = [%s]\n%!" expected (List.hd body); - check_result (List.hd body) - - let test_html_escaping_non_resource = make_test_case "html_escaping_non_resouce" - "Tests that the data returned when asking for a non-existing resource is properly escaped." - (fun () -> assert_true (html_escaping non_resource_exp non_resource_cmd)) - - let test_html_escaping_bad_resource = make_test_case "html_escaping_bad_resouce" - "Tests that the data returned when asking for a badly named resource is properly escaped." - (fun () -> assert_true (html_escaping bad_resource_exp bad_resource_cmd)) - - let tests = make_module_test_suite "HTML_Escaping" - [ test_html_escaping_non_resource - ; test_html_escaping_bad_resource - ] + module D = Debug.Make(struct let name = "quicktest_http:HTML_Escaping" end) + open D + + let non_resource_cmd = "GET /foo<>'\"& HTTP/1.0\r\n\r\n" + let non_resource_exp = "<>'"&" + let bad_resource_cmd = "GET /%foo<>'\"& HTTP/1.0\r\n\r\n" + let bad_resource_exp = "<>'"&" + let bad_command_cmd = "FOO<>'\"& /foo HTTP/1.0\r\n\r\n" + let bad_command_exp = "<>'\\"&" + + let html_escaping expected cmd = + let check_result b = String.has_substr b expected in + let _, _, _, body = Uds.http_command Xapi_globs.unix_domain_socket cmd in + Printf.printf "expected = [%s]; received = [%s]\n%!" expected (List.hd body); + check_result (List.hd body) + + let test_html_escaping_non_resource = make_test_case "html_escaping_non_resouce" + "Tests that the data returned when asking for a non-existing resource is properly escaped." + (fun () -> assert_true (html_escaping non_resource_exp non_resource_cmd)) + + let test_html_escaping_bad_resource = make_test_case "html_escaping_bad_resouce" + "Tests that the data returned when asking for a badly named resource is properly escaped." + (fun () -> assert_true (html_escaping bad_resource_exp bad_resource_cmd)) + + let tests = make_module_test_suite "HTML_Escaping" + [ test_html_escaping_non_resource + ; test_html_escaping_bad_resource + ] end (* Test suite and definition of test function {{{1 *) let tests = make_module_test_suite "Rejects" - [ Secret_Auth_fails.tests - ; HTML_Escaping.tests - ] + [ Secret_Auth_fails.tests + ; HTML_Escaping.tests + ] let run_from_within_quicktest () = run_from_within_quicktest tests diff --git a/ocaml/xapi/quicktest_lifecycle.ml b/ocaml/xapi/quicktest_lifecycle.ml index f065ad0a3d1..c2c8445bed4 100644 --- a/ocaml/xapi/quicktest_lifecycle.ml +++ b/ocaml/xapi/quicktest_lifecycle.ml @@ -1,27 +1,27 @@ -type 'a api_call = +type 'a api_call = | Shutdown of 'a | Reboot of 'a -type api_mode = +type api_mode = | Clean | Hard type api = api_mode api_call -type parallel_op = +type parallel_op = | Internal_reboot | Internal_halt | Internal_suspend | Internal_crash -type code_path = +type code_path = | Sync | Event | Both -type result = +type result = | Rebooted | Halted @@ -29,10 +29,10 @@ let final_guest_state = function | Shutdown _ -> Halted | Reboot _ -> Rebooted -type test = { - api: api option; - parallel_op: parallel_op option; - code_path: code_path; +type test = { + api: api option; + parallel_op: parallel_op option; + code_path: code_path; } let string_of_result = function @@ -46,58 +46,58 @@ let expected_result = function | { api = Some (Reboot _); parallel_op = None; code_path = (Sync|Event|Both) } -> Some Rebooted | { parallel_op = Some (Internal_halt | Internal_crash); code_path = Event } -> Some Halted | { parallel_op = Some Internal_reboot; code_path = Event } -> Some Rebooted - + | _ -> None (* invalid test *) -let string_of_test x = +let string_of_test x = let string_of_api = function - | Shutdown Clean -> "clean_shutdown" - | Shutdown Hard -> "hard_shutdown " - | Reboot Clean -> "clean_reboot " - | Reboot Hard -> "hard_reboot " in + | Shutdown Clean -> "clean_shutdown" + | Shutdown Hard -> "hard_shutdown " + | Reboot Clean -> "clean_reboot " + | Reboot Hard -> "hard_reboot " in let string_of_parallel_op = function - | Internal_reboot -> "reboot " - | Internal_halt -> "halt " - | Internal_suspend -> "suspend " - | Internal_crash -> "crash " in + | Internal_reboot -> "reboot " + | Internal_halt -> "halt " + | Internal_suspend -> "suspend " + | Internal_crash -> "crash " in let string_of_code_path = function - | Sync -> "synch " - | Event -> "event " - | Both -> "both " in - let dm f x = match x with - | None -> "Nothing " - | Some x -> f x in - Printf.sprintf "%s %s %s -> %s" - (dm string_of_api x.api) (dm string_of_parallel_op x.parallel_op) (string_of_code_path x.code_path) - (match expected_result x with None -> "invalid" | Some y -> string_of_result y) + | Sync -> "synch " + | Event -> "event " + | Both -> "both " in + let dm f x = match x with + | None -> "Nothing " + | Some x -> f x in + Printf.sprintf "%s %s %s -> %s" + (dm string_of_api x.api) (dm string_of_parallel_op x.parallel_op) (string_of_code_path x.code_path) + (match expected_result x with None -> "invalid" | Some y -> string_of_result y) open List let all_possible_tests = - let all_api_variants x = - [ { x with api = None }; - { x with api = Some (Shutdown Clean) }; - { x with api = Some (Shutdown Hard) }; - { x with api = Some (Reboot Clean) }; - { x with api = Some (Reboot Hard) } ] in - let all_parallel_op_variants x = - [ { x with parallel_op = None }; - { x with parallel_op = Some Internal_reboot }; - { x with parallel_op = Some Internal_halt }; - { x with parallel_op = Some Internal_suspend }; - { x with parallel_op = Some Internal_crash } ] in - let all_code_path_variants x = - [ { x with code_path = Sync }; - { x with code_path = Event }; - { x with code_path = Both } ] in + let all_api_variants x = + [ { x with api = None }; + { x with api = Some (Shutdown Clean) }; + { x with api = Some (Shutdown Hard) }; + { x with api = Some (Reboot Clean) }; + { x with api = Some (Reboot Hard) } ] in + let all_parallel_op_variants x = + [ { x with parallel_op = None }; + { x with parallel_op = Some Internal_reboot }; + { x with parallel_op = Some Internal_halt }; + { x with parallel_op = Some Internal_suspend }; + { x with parallel_op = Some Internal_crash } ] in + let all_code_path_variants x = + [ { x with code_path = Sync }; + { x with code_path = Event }; + { x with code_path = Both } ] in let xs = [ { api = None; parallel_op = None; code_path = Sync } ] in concat (map all_code_path_variants (concat (map all_parallel_op_variants (concat (map all_api_variants xs))))) - + let all_valid_tests = List.filter (fun t -> expected_result t <> None) all_possible_tests - (* -let _ = + (* +let _ = List.iter print_endline (map string_of_test all_valid_tests); Printf.printf "In total there are %d tests.\n" (List.length all_valid_tests) *) @@ -105,95 +105,95 @@ let _ = open Quicktest_common open Client -let one s vm test = - let open Stdext in - let t = make_test (string_of_test test) 1 in - start t; - let event = "/tmp/fist_disable_event_lifecycle_path" in - let sync = "/tmp/fist_disable_sync_lifecycle_path" in - let simulate = "/tmp/fist_simulate_internal_shutdown" in - let delay = "/tmp/fist_disable_reboot_delay" in - - Pervasiveext.finally - (fun () -> - try - begin - Unixext.unlink_safe simulate; - Unixext.touch_file delay; - match test.code_path with - | Sync -> - Unixext.unlink_safe sync; - Unixext.touch_file event - | Event -> - Unixext.unlink_safe event; - Unixext.touch_file sync - | Both -> - Unixext.unlink_safe sync; - Unixext.unlink_safe event - end; - if Client.VM.get_power_state !rpc s vm = `Halted - then Client.VM.start !rpc s vm false false; - (* wait for the guest to actually start up *) - Thread.delay 15.; - - let call_api = function - | Shutdown Clean -> Client.VM.clean_shutdown !rpc s vm - | Shutdown Hard -> Client.VM.hard_shutdown !rpc s vm - | Reboot Clean -> Client.VM.clean_reboot !rpc s vm - | Reboot Hard -> Client.VM.hard_reboot !rpc s vm in - - let domid = Client.VM.get_domid !rpc s vm in - begin match test with - | { api = None; parallel_op = Some x } -> - let reason = match x with - | Internal_reboot -> Xenctrl.Reboot - | Internal_halt -> Xenctrl.Poweroff - | Internal_crash -> Xenctrl.Crash - | Internal_suspend -> Xenctrl.Suspend in - begin - try - Xenctrl.with_intf (fun xc -> Xenctrl.domain_shutdown xc (Int64.to_int domid) reason) - with e -> - debug t (Printf.sprintf "Ignoring exception: %s" (Printexc.to_string e)) - end - | { api = Some x; parallel_op = Some y } -> - let reason = match y with - | Internal_reboot -> "reboot" - | Internal_halt -> "halt" - | Internal_crash -> "crash" - | Internal_suspend -> "suspend" in - Unixext.write_string_to_file simulate reason; - call_api x - | { api = Some x; parallel_op = None } -> - call_api x - | t -> failwith (Printf.sprintf "Invalid test: %s" (string_of_test t)) - end; - - let wait_for_domid p = - let start = Unix.gettimeofday () in - let finished = ref false in - while Unix.gettimeofday () -. start < 300. && (not !finished) do - finished := p (Client.VM.get_domid !rpc s vm); - if not !finished then Thread.delay 1. - done; - if not !finished then failwith "timeout" - in - - begin match expected_result test with - | None -> failwith (Printf.sprintf "Invalid test: %s" (string_of_test test)) - | Some Rebooted -> - wait_for_domid (fun domid' -> domid <> domid') - | Some Halted -> - wait_for_domid (fun domid' -> domid' = -1L) - end - with e -> failed t (Printexc.to_string e) - ) - (fun () -> - Unixext.unlink_safe sync; - Unixext.unlink_safe event; - Unixext.unlink_safe delay - ); - success t - -let test s vm = +let one s vm test = + let open Stdext in + let t = make_test (string_of_test test) 1 in + start t; + let event = "/tmp/fist_disable_event_lifecycle_path" in + let sync = "/tmp/fist_disable_sync_lifecycle_path" in + let simulate = "/tmp/fist_simulate_internal_shutdown" in + let delay = "/tmp/fist_disable_reboot_delay" in + + Pervasiveext.finally + (fun () -> + try + begin + Unixext.unlink_safe simulate; + Unixext.touch_file delay; + match test.code_path with + | Sync -> + Unixext.unlink_safe sync; + Unixext.touch_file event + | Event -> + Unixext.unlink_safe event; + Unixext.touch_file sync + | Both -> + Unixext.unlink_safe sync; + Unixext.unlink_safe event + end; + if Client.VM.get_power_state !rpc s vm = `Halted + then Client.VM.start !rpc s vm false false; + (* wait for the guest to actually start up *) + Thread.delay 15.; + + let call_api = function + | Shutdown Clean -> Client.VM.clean_shutdown !rpc s vm + | Shutdown Hard -> Client.VM.hard_shutdown !rpc s vm + | Reboot Clean -> Client.VM.clean_reboot !rpc s vm + | Reboot Hard -> Client.VM.hard_reboot !rpc s vm in + + let domid = Client.VM.get_domid !rpc s vm in + begin match test with + | { api = None; parallel_op = Some x } -> + let reason = match x with + | Internal_reboot -> Xenctrl.Reboot + | Internal_halt -> Xenctrl.Poweroff + | Internal_crash -> Xenctrl.Crash + | Internal_suspend -> Xenctrl.Suspend in + begin + try + Xenctrl.with_intf (fun xc -> Xenctrl.domain_shutdown xc (Int64.to_int domid) reason) + with e -> + debug t (Printf.sprintf "Ignoring exception: %s" (Printexc.to_string e)) + end + | { api = Some x; parallel_op = Some y } -> + let reason = match y with + | Internal_reboot -> "reboot" + | Internal_halt -> "halt" + | Internal_crash -> "crash" + | Internal_suspend -> "suspend" in + Unixext.write_string_to_file simulate reason; + call_api x + | { api = Some x; parallel_op = None } -> + call_api x + | t -> failwith (Printf.sprintf "Invalid test: %s" (string_of_test t)) + end; + + let wait_for_domid p = + let start = Unix.gettimeofday () in + let finished = ref false in + while Unix.gettimeofday () -. start < 300. && (not !finished) do + finished := p (Client.VM.get_domid !rpc s vm); + if not !finished then Thread.delay 1. + done; + if not !finished then failwith "timeout" + in + + begin match expected_result test with + | None -> failwith (Printf.sprintf "Invalid test: %s" (string_of_test test)) + | Some Rebooted -> + wait_for_domid (fun domid' -> domid <> domid') + | Some Halted -> + wait_for_domid (fun domid' -> domid' = -1L) + end + with e -> failed t (Printexc.to_string e) + ) + (fun () -> + Unixext.unlink_safe sync; + Unixext.unlink_safe event; + Unixext.unlink_safe delay + ); + success t + +let test s vm = List.iter (one s vm) all_valid_tests diff --git a/ocaml/xapi/quicktest_ocamltest.ml b/ocaml/xapi/quicktest_ocamltest.ml index e08ce7cd6b5..3061524605b 100644 --- a/ocaml/xapi/quicktest_ocamltest.ml +++ b/ocaml/xapi/quicktest_ocamltest.ml @@ -18,50 +18,50 @@ open Printf open Quicktest_common type test_result = passed * failed - and passed = int - and failed = int +and passed = int +and failed = int let add_result (passed, failed) (passed', failed') = - (passed + passed', failed + failed') + (passed + passed', failed + failed') let run_from_within_quicktest (test : test) = - let rec run (test : test) (level : int) = - match test with - | Case (name, description, fn) -> - run_case (name, description, fn) level - | Suite (name, description, tests) -> - run_suite (name, description, tests) level + let rec run (test : test) (level : int) = + match test with + | Case (name, description, fn) -> + run_case (name, description, fn) level + | Suite (name, description, tests) -> + run_suite (name, description, tests) level - and run_case (name, description, fn) level = - let test = make_test (sprintf "Testing '%s'" name) level in - start test; - try - fn (); - success test; - (1, 0) - with failure -> - debug test (Backtrace.(to_string_hum (get failure))); - failed test (sprintf "Failed with %s" (Printexc.to_string failure)); - (0, 1) + and run_case (name, description, fn) level = + let test = make_test (sprintf "Testing '%s'" name) level in + start test; + try + fn (); + success test; + (1, 0) + with failure -> + debug test (Backtrace.(to_string_hum (get failure))); + failed test (sprintf "Failed with %s" (Printexc.to_string failure)); + (0, 1) - and run_suite (name, description, tests) level = - let test = make_test (sprintf "Testing '%s'" name) level in - start test; - print_endline ""; - let result = List.fold_left ( - fun accumulating_result test -> - add_result accumulating_result (run test (level + 4)) - ) (0, 0) tests in - debug test (sprintf "Finished testing '%s'" name); - begin match result with - | (_, 0) -> - success test - | (_, failure_count) -> - let failure_description = sprintf "Detected %i failure%s" - failure_count (if failure_count = 1 then "" else "s") in - failed test failure_description - end; - result + and run_suite (name, description, tests) level = + let test = make_test (sprintf "Testing '%s'" name) level in + start test; + print_endline ""; + let result = List.fold_left ( + fun accumulating_result test -> + add_result accumulating_result (run test (level + 4)) + ) (0, 0) tests in + debug test (sprintf "Finished testing '%s'" name); + begin match result with + | (_, 0) -> + success test + | (_, failure_count) -> + let failure_description = sprintf "Detected %i failure%s" + failure_count (if failure_count = 1 then "" else "s") in + failed test failure_description + end; + result - in let (_: int*int) = run test 0 in () + in let (_: int*int) = run test 0 in () diff --git a/ocaml/xapi/quicktest_storage.ml b/ocaml/xapi/quicktest_storage.ml index e703079cc5e..f5433363e6d 100644 --- a/ocaml/xapi/quicktest_storage.ml +++ b/ocaml/xapi/quicktest_storage.ml @@ -34,81 +34,81 @@ let iso_path = ref "/opt/xensource/packages/iso" (** Return a list of all SRs which have at least one plugged-in PBD ie those which we can use for stuff *) -let list_srs session_id = +let list_srs session_id = let all = Client.SR.get_all !rpc session_id in List.filter (fun sr -> - let pbds = Client.SR.get_PBDs !rpc session_id sr in - List.fold_left (||) false - (List.map (fun pbd -> Client.PBD.get_currently_attached !rpc session_id pbd) pbds)) all + let pbds = Client.SR.get_PBDs !rpc session_id sr in + List.fold_left (||) false + (List.map (fun pbd -> Client.PBD.get_currently_attached !rpc session_id pbd) pbds)) all -let name_of_sr session_id sr = +let name_of_sr session_id sr = let name_label = Client.SR.get_name_label !rpc session_id sr in let ty = Client.SR.get_type !rpc session_id sr in - Printf.sprintf "%s/%s" name_label ty + Printf.sprintf "%s/%s" name_label ty (* Helper function to make a disk *) -let vdi_create_helper ~session_id ?(name_label="quicktest") ?(virtual_size=4L ** mib) ~sr () : API.ref_VDI = +let vdi_create_helper ~session_id ?(name_label="quicktest") ?(virtual_size=4L ** mib) ~sr () : API.ref_VDI = Client.VDI.create ~rpc:!rpc ~session_id ~name_label ~name_description:"" - ~sR:sr ~virtual_size ~_type:`user ~sharable:false ~read_only:false + ~sR:sr ~virtual_size ~_type:`user ~sharable:false ~read_only:false ~other_config:[] ~xenstore_data:[] ~sm_config:[] ~tags:[] (** Return the size of the smallest disk we can create in each SR. This wouldn't be necessary except the Netapp SR breaks with convention and returns errors rather than rounding up for small disks *) -let find_smallest_disk_size session_id sr = +let find_smallest_disk_size session_id sr = let sizes = [ 0L; 1L; 1L ** mib; 2L ** mib; 4L ** mib ] in - let try_one size = + let try_one size = try let vdi = vdi_create_helper ~session_id ~virtual_size:size ~sr () in Client.VDI.destroy !rpc session_id vdi; Some size with _ -> None in - let find_smallest = List.fold_left - (fun state size -> if state = None then try_one size else state) None sizes in + let find_smallest = List.fold_left + (fun state size -> if state = None then try_one size else state) None sizes in find_smallest (** For an SR which may be shared, return one plugged in PBD *) -let choose_active_pbd session_id sr = +let choose_active_pbd session_id sr = let pbds = Client.SR.get_PBDs !rpc session_id sr in match List.filter (fun pbd -> Client.PBD.get_currently_attached !rpc session_id pbd) pbds with | [] -> failwith (Printf.sprintf "SR %s has no attached PBDs" (Client.SR.get_uuid !rpc session_id sr)) | x :: _ -> x (** Scan an SR and return the number of VDIs contained within *) -let count_vdis session_id sr = +let count_vdis session_id sr = Client.SR.scan !rpc session_id sr; let vdis = Client.SR.get_VDIs !rpc session_id sr in (* NB vhd backends may delete records beneath us *) let managed_vdis = List.filter (fun vdi -> try Client.VDI.get_managed !rpc session_id vdi with Api_errors.Server_error(_ (* handle_invalid *), _) -> false) vdis in List.length managed_vdis -(** Common code for VDI.{create,clone,snapshot} which checks to see that a new VDI +(** Common code for VDI.{create,clone,snapshot} which checks to see that a new VDI is successfully created and destroyed by the backend *) -let vdi_create_clone_snapshot test session_id sr make_fn = - let before = count_vdis session_id sr in - let vdi = make_fn () in - let vdi_r = Client.VDI.get_record !rpc session_id vdi in - debug test (Printf.sprintf "Created VDI has uuid: %s (size = %Ld)" vdi_r.API.vDI_uuid vdi_r.API.vDI_virtual_size); - let during = count_vdis session_id sr in - if during <= before then begin - debug test (Printf.sprintf "SR has %d VDIs before the test" before); - debug test (Printf.sprintf "SR has %d VDIs during the test" during); - failed test (Printf.sprintf "Before VDI was created there were %d VDIs. After there were %d VDIs." before during); - failwith "vdi_create_clone_snapshot" - end; - Client.VDI.destroy !rpc session_id vdi +let vdi_create_clone_snapshot test session_id sr make_fn = + let before = count_vdis session_id sr in + let vdi = make_fn () in + let vdi_r = Client.VDI.get_record !rpc session_id vdi in + debug test (Printf.sprintf "Created VDI has uuid: %s (size = %Ld)" vdi_r.API.vDI_uuid vdi_r.API.vDI_virtual_size); + let during = count_vdis session_id sr in + if during <= before then begin + debug test (Printf.sprintf "SR has %d VDIs before the test" before); + debug test (Printf.sprintf "SR has %d VDIs during the test" during); + failed test (Printf.sprintf "Before VDI was created there were %d VDIs. After there were %d VDIs." before during); + failwith "vdi_create_clone_snapshot" + end; + Client.VDI.destroy !rpc session_id vdi (* Helper function to make a VBD *) -let vbd_create_helper ~session_id ~vM ~vDI ?(userdevice="autodetect") () : API.ref_VBD = +let vbd_create_helper ~session_id ~vM ~vDI ?(userdevice="autodetect") () : API.ref_VBD = Client.VBD.create ~rpc:!rpc ~session_id ~vM ~vDI ~userdevice ~bootable:false ~mode:`RW ~_type:`Disk ~unpluggable:true ~empty:false ~other_config:[] - ~qos_algorithm_type:"" ~qos_algorithm_params:[] + ~qos_algorithm_type:"" ~qos_algorithm_params:[] (** If VDI_CREATE and VDI_DELETE are present then make sure VDIs appear and disappear correctly *) -let vdi_create_destroy caps session_id sr = - if true - && (List.mem vdi_create caps) - && (List.mem vdi_delete caps) +let vdi_create_destroy caps session_id sr = + if true + && (List.mem vdi_create caps) + && (List.mem vdi_delete caps) then begin let test = make_test "VDI_CREATE should make a fresh disk; VDI_DELETE should remove it" 2 in start test; @@ -118,24 +118,24 @@ let vdi_create_destroy caps session_id sr = List.iter (fun virtual_size -> - vdi_create_clone_snapshot test session_id sr - (fun () -> - let vdi = vdi_create_helper ~session_id ~name_label:"quicktest" ~virtual_size ~sr () in - let actual_size = Client.VDI.get_virtual_size !rpc session_id vdi in - if actual_size < virtual_size then begin - debug test (Printf.sprintf "VDI requested size of %Ld but was given only %Ld" virtual_size actual_size); - failed test "VDI.create created too small a VDI" - end; - new_uuid := Some (Client.VDI.get_uuid !rpc session_id vdi); - vdi); - (* check that the new disk has gone already (after only one SR.scan) *) - maybe (fun uuid -> - try - let vdi = Client.VDI.get_by_uuid !rpc session_id uuid in - debug test "VDI still exists: checking to see whether it is marked as managed"; - if Client.VDI.get_managed !rpc session_id vdi - then failed test "VDI was not destroyed (or marked as unmanaged) properly after one SR.scan" - with _ -> ()) !new_uuid + vdi_create_clone_snapshot test session_id sr + (fun () -> + let vdi = vdi_create_helper ~session_id ~name_label:"quicktest" ~virtual_size ~sr () in + let actual_size = Client.VDI.get_virtual_size !rpc session_id vdi in + if actual_size < virtual_size then begin + debug test (Printf.sprintf "VDI requested size of %Ld but was given only %Ld" virtual_size actual_size); + failed test "VDI.create created too small a VDI" + end; + new_uuid := Some (Client.VDI.get_uuid !rpc session_id vdi); + vdi); + (* check that the new disk has gone already (after only one SR.scan) *) + maybe (fun uuid -> + try + let vdi = Client.VDI.get_by_uuid !rpc session_id uuid in + debug test "VDI still exists: checking to see whether it is marked as managed"; + if Client.VDI.get_managed !rpc session_id vdi + then failed test "VDI was not destroyed (or marked as unmanaged) properly after one SR.scan" + with _ -> ()) !new_uuid ) sizes_to_try; success test @@ -144,7 +144,7 @@ let vdi_create_destroy caps session_id sr = exception Not_this_host (* Query /sys to find the actual size of the plugged in device *) -let size_of_dom0_vbd session_id vbd = +let size_of_dom0_vbd session_id vbd = let device = Client.VBD.get_device !rpc session_id vbd in let path = Printf.sprintf "/sys/block/%s/size" device in try @@ -157,11 +157,11 @@ let size_of_dom0_vbd session_id vbd = (** Make sure that VDI_CREATE; plug; VDI_DESTROY; VDI_CREATE; plug results in a device of the correct size in dom0 *) -let vdi_create_destroy_plug_checksize caps session_id sr = - if true - && (List.mem vdi_create caps) - && (List.mem vdi_delete caps) - && (List.mem vdi_attach caps) (* DummySR can't even do this *) +let vdi_create_destroy_plug_checksize caps session_id sr = + if true + && (List.mem vdi_create caps) + && (List.mem vdi_delete caps) + && (List.mem vdi_attach caps) (* DummySR can't even do this *) (* && (List.mem `vdi_create allowed_ops) (* The Tools SR is where these two concepts diverge *) && (List.mem `vdi_destroy allowed_ops) @@ -173,27 +173,27 @@ let vdi_create_destroy_plug_checksize caps session_id sr = let host = Client.PBD.get_host !rpc session_id pbd in debug test (Printf.sprintf "Will plug into host %s" (Client.Host.get_name_label !rpc session_id host)); - let plug_in_check_size session_id host vdi = + let plug_in_check_size session_id host vdi = let size_should_be = Client.VDI.get_virtual_size !rpc session_id vdi in let dom0 = dom0_of_host session_id host in let vbd = vbd_create_helper ~session_id ~vM:dom0 ~vDI:vdi () in Client.VBD.plug !rpc session_id vbd; finally - (fun () -> - try - let size_dom0 = size_of_dom0_vbd session_id vbd in - debug test (Printf.sprintf "XenAPI reports size: %Ld; dom0 reports size: %Ld" size_should_be size_dom0); - if size_should_be <> size_dom0 then begin - failed test (Printf.sprintf "Size should have been: %Ld" size_should_be); - failwith "vdi_create_destroy_plug_checksize" - end - with Not_this_host -> - debug test "Skipping size check: disk is plugged into another host" - ) - (fun () -> - Client.VBD.unplug !rpc session_id vbd; - Client.VBD.destroy !rpc session_id vbd - ) in + (fun () -> + try + let size_dom0 = size_of_dom0_vbd session_id vbd in + debug test (Printf.sprintf "XenAPI reports size: %Ld; dom0 reports size: %Ld" size_should_be size_dom0); + if size_should_be <> size_dom0 then begin + failed test (Printf.sprintf "Size should have been: %Ld" size_should_be); + failwith "vdi_create_destroy_plug_checksize" + end + with Not_this_host -> + debug test "Skipping size check: disk is plugged into another host" + ) + (fun () -> + Client.VBD.unplug !rpc session_id vbd; + Client.VBD.destroy !rpc session_id vbd + ) in let small_size = 4L ** mib and large_size = 1L ** gib in @@ -219,12 +219,12 @@ let vdi_create_destroy_plug_checksize caps session_id sr = (** If VDI_CREATE is supported this will create a fresh VDI, otherwise it will pass an existing one for the test function 'f' *) -let with_arbitrary_vdi caps session_id sr f = +let with_arbitrary_vdi caps session_id sr f = let initial_vdis = count_vdis session_id sr in if List.mem vdi_create caps then begin let vdi = Client.VDI.create ~rpc:!rpc ~session_id ~name_label:"quicktest" ~name_description:"" - ~sR:sr ~virtual_size:4194304L ~_type:`user ~sharable:false ~read_only:false - ~other_config:[] ~xenstore_data:[] ~sm_config:[] ~tags:[] in + ~sR:sr ~virtual_size:4194304L ~_type:`user ~sharable:false ~read_only:false + ~other_config:[] ~xenstore_data:[] ~sm_config:[] ~tags:[] in finally (fun () -> f caps session_id sr vdi) (fun () -> Client.VDI.destroy !rpc session_id vdi) @@ -232,8 +232,8 @@ let with_arbitrary_vdi caps session_id sr f = Client.SR.scan !rpc session_id sr; match Client.SR.get_VDIs !rpc session_id sr with | [] -> () - | vdi::_ -> - f caps session_id sr vdi + | vdi::_ -> + f caps session_id sr vdi end; (* If everything is supposedly ok then: *) let test = make_test "Checking for VDI leak" 2 in @@ -242,62 +242,62 @@ let with_arbitrary_vdi caps session_id sr f = if current <> initial_vdis then begin failed test (Printf.sprintf "Initally there were %d VDIs; now there are %d VDIs" initial_vdis current); failwith "vdi_leak" - end else success test + end else success test (* When cloning/snapshotting perform field by field comparisons to look for problems *) -let check_fields test list = +let check_fields test list = let check (comparison, field, a, b) = match comparison with | `Same -> - if a <> b then failed test (Printf.sprintf "%s field differs: %s <> %s" field a b) + if a <> b then failed test (Printf.sprintf "%s field differs: %s <> %s" field a b) | `Different -> - if a = b then failed test (Printf.sprintf "%s field unchanged: %s = %s" field a b) in + if a = b then failed test (Printf.sprintf "%s field unchanged: %s = %s" field a b) in List.iter check list (* Clones and snapshots should have some identical fields and some different fields: *) -let clone_snapshot_fields a b = - [ `Same, "virtual_size", - Int64.to_string a.API.vDI_virtual_size, +let clone_snapshot_fields a b = + [ `Same, "virtual_size", + Int64.to_string a.API.vDI_virtual_size, Int64.to_string b.API.vDI_virtual_size; `Different, "location", a.API.vDI_location, b.API.vDI_location; - ] + ] (** If VDI_CLONE and VDI_DELETE are present then make sure VDIs appear and disappear correctly *) -let vdi_clone_destroy caps session_id sr vdi = +let vdi_clone_destroy caps session_id sr vdi = if List.mem vdi_clone caps then begin let test = make_test "VDI_CLONE should make a new VDI and VDI_DELETE should remove it" 2 in start test; vdi_create_clone_snapshot test session_id sr - (fun () -> - let vdi' = Client.VDI.clone ~rpc:!rpc ~session_id ~vdi ~driver_params:[] in - (* Check these look like clones *) - let a = Client.VDI.get_record ~rpc:!rpc ~session_id ~self:vdi in - let b = Client.VDI.get_record ~rpc:!rpc ~session_id ~self:vdi' in - check_fields test (clone_snapshot_fields a b); - vdi'); + (fun () -> + let vdi' = Client.VDI.clone ~rpc:!rpc ~session_id ~vdi ~driver_params:[] in + (* Check these look like clones *) + let a = Client.VDI.get_record ~rpc:!rpc ~session_id ~self:vdi in + let b = Client.VDI.get_record ~rpc:!rpc ~session_id ~self:vdi' in + check_fields test (clone_snapshot_fields a b); + vdi'); success test; Client.SR.scan !rpc session_id sr; end (** If VDI_SNAPSHOT and VDI_DELETE are present then make sure VDIs appear and disappear correctly *) -let vdi_snapshot_destroy caps session_id sr vdi = +let vdi_snapshot_destroy caps session_id sr vdi = if List.mem vdi_snapshot caps then begin let test = make_test "VDI_SNAPSHOT should make a new VDI and VDI_DELETE should remove it" 2 in start test; vdi_create_clone_snapshot test session_id sr - (fun () -> - let vdi' = Client.VDI.snapshot ~rpc:!rpc ~session_id ~vdi ~driver_params:[] in - (* Check these look like clones *) - let a = Client.VDI.get_record ~rpc:!rpc ~session_id ~self:vdi in - let b = Client.VDI.get_record ~rpc:!rpc ~session_id ~self:vdi' in - check_fields test (clone_snapshot_fields a b); - vdi'); + (fun () -> + let vdi' = Client.VDI.snapshot ~rpc:!rpc ~session_id ~vdi ~driver_params:[] in + (* Check these look like clones *) + let a = Client.VDI.get_record ~rpc:!rpc ~session_id ~self:vdi in + let b = Client.VDI.get_record ~rpc:!rpc ~session_id ~self:vdi' in + check_fields test (clone_snapshot_fields a b); + vdi'); success test end (** If VDI_RESIZE is present then try it out *) -let vdi_resize_test caps session_id sr vdi = +let vdi_resize_test caps session_id sr vdi = if List.mem vdi_resize caps then begin let test = make_test "VDI_RESIZE should be able to resize a VDI" 2 in start test; @@ -317,7 +317,7 @@ let vdi_resize_test caps session_id sr vdi = end (** If VDI_UPDATE is present then try it out *) -let vdi_update_test caps session_id sr vdi = +let vdi_update_test caps session_id sr vdi = if List.mem vdi_update caps then begin let test = make_test "VDI_UPDATE should not fail" 2 in start test; @@ -326,7 +326,7 @@ let vdi_update_test caps session_id sr vdi = end (** If VDI_GENERATE_CONFIG is present then try it out *) -let vdi_generate_config_test caps session_id sr vdi = +let vdi_generate_config_test caps session_id sr vdi = if List.mem vdi_generate_config caps then begin let test = make_test "VDI_GENERATE_CONFIG should not fail" 2 in let pbd = choose_active_pbd session_id sr in @@ -337,7 +337,7 @@ let vdi_generate_config_test caps session_id sr vdi = end (** If SR_UPDATE is present then try it out *) -let sr_update_test caps session_id sr = +let sr_update_test caps session_id sr = if List.mem sr_update caps then begin let test = make_test "SR_UPDATE should not fail" 2 in start test; @@ -346,57 +346,57 @@ let sr_update_test caps session_id sr = end (** Make sure that I can't call VDI.db_forget *) -let vdi_db_forget caps session_id sr vdi = +let vdi_db_forget caps session_id sr vdi = let test = make_test "VDI.db_forget should always fail without authorisation" 2 in start test; try Client.VDI.db_forget !rpc session_id vdi; failed test "Call succeeded but it shouldn't have"; failwith "db_forget" - with + with | Api_errors.Server_error(code, _) when code = Api_errors.permission_denied -> - debug test "Caught PERMISSION_DENIED"; - success test + debug test "Caught PERMISSION_DENIED"; + success test | e -> - failed test (Printf.sprintf "Caught wrong error: %s" (Printexc.to_string e)) + failed test (Printf.sprintf "Caught wrong error: %s" (Printexc.to_string e)) (** If VDI_INTRODUCE is present then attempt to introduce a VDI with a duplicate location and another with a bad UUID to make sure that is reported as an error *) -let vdi_bad_introduce caps session_id sr vdi = +let vdi_bad_introduce caps session_id sr vdi = if List.mem vdi_introduce caps then begin let test = make_test "VDI_INTRODUCE should fail when given bad locations or uuids" 2 in start test; let vdir = Client.VDI.get_record !rpc session_id vdi in begin try - debug test (Printf.sprintf "Introducing a VDI with a duplicate UUID (%s)" vdir.API.vDI_uuid); - let (_: API.ref_VDI) = Client.VDI.introduce ~rpc:!rpc ~session_id - ~uuid:vdir.API.vDI_uuid ~name_label:"bad uuid" ~name_description:"" - ~sR:vdir.API.vDI_SR ~_type:vdir.API.vDI_type ~sharable:false ~read_only:false ~other_config:[] - ~location:(Ref.string_of (Ref.make ())) ~xenstore_data:[] ~sm_config:[] - ~managed:true ~virtual_size:0L ~physical_utilisation:0L ~metadata_of_pool:Ref.null - ~is_a_snapshot:false ~snapshot_time:Date.never ~snapshot_of:Ref.null - in - failed test "A bad VDI with a duplicate UUID was introduced"; - failwith "vdi_bad_introduce" - with Api_errors.Server_error(_, _) -> - debug test "API error caught as expected"; + debug test (Printf.sprintf "Introducing a VDI with a duplicate UUID (%s)" vdir.API.vDI_uuid); + let (_: API.ref_VDI) = Client.VDI.introduce ~rpc:!rpc ~session_id + ~uuid:vdir.API.vDI_uuid ~name_label:"bad uuid" ~name_description:"" + ~sR:vdir.API.vDI_SR ~_type:vdir.API.vDI_type ~sharable:false ~read_only:false ~other_config:[] + ~location:(Ref.string_of (Ref.make ())) ~xenstore_data:[] ~sm_config:[] + ~managed:true ~virtual_size:0L ~physical_utilisation:0L ~metadata_of_pool:Ref.null + ~is_a_snapshot:false ~snapshot_time:Date.never ~snapshot_of:Ref.null + in + failed test "A bad VDI with a duplicate UUID was introduced"; + failwith "vdi_bad_introduce" + with Api_errors.Server_error(_, _) -> + debug test "API error caught as expected"; end; begin try - debug test (Printf.sprintf "Introducing a VDI with a duplicate location (%s)" vdir.API.vDI_location); - let (_: API.ref_VDI) = Client.VDI.introduce ~rpc:!rpc ~session_id - ~uuid:(Uuid.string_of_uuid (Uuid.make_uuid ())) - ~name_label:"bad location" ~name_description:"" - ~sR:vdir.API.vDI_SR ~_type:vdir.API.vDI_type ~sharable:false ~read_only:false ~other_config:[] - ~location:vdir.API.vDI_location ~xenstore_data:[] ~sm_config:[] - ~managed:true ~virtual_size:0L ~physical_utilisation:0L ~metadata_of_pool:Ref.null - ~is_a_snapshot:false ~snapshot_time:Date.never ~snapshot_of:Ref.null -in - failed test "A bad VDI with a duplicate location was introduced"; - failwith "vdi_bad_introduce" + debug test (Printf.sprintf "Introducing a VDI with a duplicate location (%s)" vdir.API.vDI_location); + let (_: API.ref_VDI) = Client.VDI.introduce ~rpc:!rpc ~session_id + ~uuid:(Uuid.string_of_uuid (Uuid.make_uuid ())) + ~name_label:"bad location" ~name_description:"" + ~sR:vdir.API.vDI_SR ~_type:vdir.API.vDI_type ~sharable:false ~read_only:false ~other_config:[] + ~location:vdir.API.vDI_location ~xenstore_data:[] ~sm_config:[] + ~managed:true ~virtual_size:0L ~physical_utilisation:0L ~metadata_of_pool:Ref.null + ~is_a_snapshot:false ~snapshot_time:Date.never ~snapshot_of:Ref.null + in + failed test "A bad VDI with a duplicate location was introduced"; + failwith "vdi_bad_introduce" with Api_errors.Server_error(_, _) -> - debug test "API error caught as expected"; + debug test "API error caught as expected"; end; success test end @@ -405,59 +405,59 @@ in (** Basic support for parsing the SR probe result *) type sr_probe_sr = { uuid: string } -let parse_sr_probe_xml (xml: string) : sr_probe_sr list = +let parse_sr_probe_xml (xml: string) : sr_probe_sr list = match Xml.parse_string xml with | Xml.Element("SRlist", _, children) -> - let parse_sr = function - | Xml.Element("SR", _, children) -> - let parse_kv = function - | Xml.Element(key, _, [ Xml.PCData v ]) -> - key, String.strip String.isspace v (* remove whitespace at both ends *) - | _ -> failwith "Malformed key/value pair" in - let all = List.map parse_kv children in - { uuid = List.assoc "UUID" all } - | _ -> failwith "Malformed or missing " in - List.map parse_sr children + let parse_sr = function + | Xml.Element("SR", _, children) -> + let parse_kv = function + | Xml.Element(key, _, [ Xml.PCData v ]) -> + key, String.strip String.isspace v (* remove whitespace at both ends *) + | _ -> failwith "Malformed key/value pair" in + let all = List.map parse_kv children in + { uuid = List.assoc "UUID" all } + | _ -> failwith "Malformed or missing " in + List.map parse_sr children | _ -> failwith "Missing element" - + (** If SR_PROBE is present then probe for an existing plugged in SR and make sure it can be found. *) -let sr_probe_test caps session_id sr = +let sr_probe_test caps session_id sr = if List.mem sr_probe caps then begin let test = make_test "SR_PROBE should be able to probe a working SR" 2 in start test; (* Acquire device config parameters from an attached PBD *) let all_pbds = Client.SR.get_PBDs !rpc session_id sr in match List.filter (fun pbd -> Client.PBD.get_currently_attached !rpc session_id pbd) all_pbds with - | [] -> - failed test "Couldn't find an attached PBD"; - failwith "sr_probe_test" + | [] -> + failed test "Couldn't find an attached PBD"; + failwith "sr_probe_test" | pbd :: _ -> - let srr = Client.SR.get_record !rpc session_id sr in - let pbdr = Client.PBD.get_record !rpc session_id pbd in - Client.PBD.unplug !rpc session_id pbd; - let xml = Client.SR.probe ~rpc:!rpc ~session_id - ~host:pbdr.API.pBD_host - ~device_config:pbdr.API.pBD_device_config - ~sm_config:srr.API.sR_sm_config - ~_type:srr.API.sR_type in - Client.PBD.plug !rpc session_id pbd; - let srs = parse_sr_probe_xml xml in - List.iter (fun sr -> debug test (Printf.sprintf "Probe found SR: %s" sr.uuid)) srs; - if List.length srs = 0 then begin - failed test "Probe failed to find an SR, even though one is plugged in"; - failwith "sr_probe_test" - end; - let all_uuids = List.map (fun sr -> sr.uuid) srs in - if not(List.mem srr.API.sR_uuid all_uuids) then begin - failed test (Printf.sprintf "Probe failed to find SR %s even though it is plugged in" srr.API.sR_uuid); - failwith "sr_probe_test" - end; - success test + let srr = Client.SR.get_record !rpc session_id sr in + let pbdr = Client.PBD.get_record !rpc session_id pbd in + Client.PBD.unplug !rpc session_id pbd; + let xml = Client.SR.probe ~rpc:!rpc ~session_id + ~host:pbdr.API.pBD_host + ~device_config:pbdr.API.pBD_device_config + ~sm_config:srr.API.sR_sm_config + ~_type:srr.API.sR_type in + Client.PBD.plug !rpc session_id pbd; + let srs = parse_sr_probe_xml xml in + List.iter (fun sr -> debug test (Printf.sprintf "Probe found SR: %s" sr.uuid)) srs; + if List.length srs = 0 then begin + failed test "Probe failed to find an SR, even though one is plugged in"; + failwith "sr_probe_test" + end; + let all_uuids = List.map (fun sr -> sr.uuid) srs in + if not(List.mem srr.API.sR_uuid all_uuids) then begin + failed test (Printf.sprintf "Probe failed to find SR %s even though it is plugged in" srr.API.sR_uuid); + failwith "sr_probe_test" + end; + success test end (** Make sure sr_scan doesn't throw an exception *) -let sr_scan_test caps session_id sr = +let sr_scan_test caps session_id sr = let test = make_test "SR_SCAN should be able to scan a working SR" 2 in start test; Client.SR.scan !rpc session_id sr; @@ -468,69 +468,69 @@ let packages_iso_test session_id = start test; let host = List.hd (Client.Host.get_all !rpc session_id) in debug test (Printf.sprintf "Will plug into host %s" (Client.Host.get_name_label !rpc session_id host)); - let sr = Client.SR.introduce ~rpc:!rpc ~session_id ~uuid:(Uuid.string_of_uuid (Uuid.make_uuid ())) - ~name_label:"test tools SR" ~name_description:"" ~_type:"iso" ~content_type:"iso" - ~shared:true ~sm_config:[] in + let sr = Client.SR.introduce ~rpc:!rpc ~session_id ~uuid:(Uuid.string_of_uuid (Uuid.make_uuid ())) + ~name_label:"test tools SR" ~name_description:"" ~_type:"iso" ~content_type:"iso" + ~shared:true ~sm_config:[] in finally (fun () -> let device_config = [ "location", !iso_path; - "legacy_mode", "true" ] in + "legacy_mode", "true" ] in let pbd = Client.PBD.create ~rpc:!rpc ~session_id ~sR:sr ~host ~device_config ~other_config:[] in finally - (fun () -> - debug test "Plugging PBD"; - Client.PBD.plug !rpc session_id pbd; - Client.SR.scan !rpc session_id sr; - let is_iso x = String.endswith ".iso" (String.lowercase x) in - let files = List.filter is_iso (Array.to_list (Sys.readdir !iso_path)) in - let vdis = Client.SR.get_VDIs !rpc session_id sr in - debug test (Printf.sprintf "SR.scan found %d files (directory has %d .isos)" (List.length vdis) (List.length files)); - if List.length files <> List.length vdis then begin - failed test (Printf.sprintf "%s has %d files; SR has %d VDIs" !iso_path (List.length files) (List.length vdis)); - failwith "packages_iso_test" - end; - let locations = List.map (fun vdi -> Client.VDI.get_location !rpc session_id vdi) vdis in - (* Check each file has a VDI.location *) - List.iter (fun file -> - if not(List.mem file locations) then begin - failed test (Printf.sprintf "ISO %s has no corresponding VDI" file); - failwith "packages_iso_test" - end) files; - (* Check each VDI is read-only *) - List.iter (fun vdi -> - let vdir = Client.VDI.get_record !rpc session_id vdi in - if not(vdir.API.vDI_read_only) then begin - failed test (Printf.sprintf "ISO VDI has read_only set to false (%s)" vdir.API.vDI_name_label); - failwith "packages_iso_test" - end; - debug test (Printf.sprintf "ISO VDI %s looks ok" vdir.API.vDI_name_label); - ) vdis; - success test - ) (fun () -> - Client.PBD.unplug !rpc session_id pbd; - Client.PBD.destroy !rpc session_id pbd) + (fun () -> + debug test "Plugging PBD"; + Client.PBD.plug !rpc session_id pbd; + Client.SR.scan !rpc session_id sr; + let is_iso x = String.endswith ".iso" (String.lowercase x) in + let files = List.filter is_iso (Array.to_list (Sys.readdir !iso_path)) in + let vdis = Client.SR.get_VDIs !rpc session_id sr in + debug test (Printf.sprintf "SR.scan found %d files (directory has %d .isos)" (List.length vdis) (List.length files)); + if List.length files <> List.length vdis then begin + failed test (Printf.sprintf "%s has %d files; SR has %d VDIs" !iso_path (List.length files) (List.length vdis)); + failwith "packages_iso_test" + end; + let locations = List.map (fun vdi -> Client.VDI.get_location !rpc session_id vdi) vdis in + (* Check each file has a VDI.location *) + List.iter (fun file -> + if not(List.mem file locations) then begin + failed test (Printf.sprintf "ISO %s has no corresponding VDI" file); + failwith "packages_iso_test" + end) files; + (* Check each VDI is read-only *) + List.iter (fun vdi -> + let vdir = Client.VDI.get_record !rpc session_id vdi in + if not(vdir.API.vDI_read_only) then begin + failed test (Printf.sprintf "ISO VDI has read_only set to false (%s)" vdir.API.vDI_name_label); + failwith "packages_iso_test" + end; + debug test (Printf.sprintf "ISO VDI %s looks ok" vdir.API.vDI_name_label); + ) vdis; + success test + ) (fun () -> + Client.PBD.unplug !rpc session_id pbd; + Client.PBD.destroy !rpc session_id pbd) ) (fun () -> Client.SR.forget ~rpc:!rpc ~session_id ~sr) -let sm_caps_of_sr session_id sr = +let sm_caps_of_sr session_id sr = let ty = Client.SR.get_type !rpc session_id sr in let sm = Client.SM.get_all_records !rpc session_id in match List.filter (fun (_, r) -> r.API.sM_type = ty) sm with | [ _, plugin ] -> - plugin.API.sM_capabilities + plugin.API.sM_capabilities | _ -> - failwith (Printf.sprintf "Failed to query SM plugin type = %s" ty) + failwith (Printf.sprintf "Failed to query SM plugin type = %s" ty) (* Even though the SM backend may expose a VDI_CREATE capability attempts to actually create a VDI will fail in (eg) the tools SR and any that happen to be R/O NFS exports *) -let avoid_vdi_create session_id sr = +let avoid_vdi_create session_id sr = let other_config = Client.SR.get_other_config !rpc session_id sr in let is_tools_sr = Client.SR.get_is_tools_sr !rpc session_id sr in let special_key = "quicktest-no-VDI_CREATE" in let is_marked = List.mem_assoc special_key other_config && List.assoc special_key other_config = "true" in is_tools_sr || is_marked -let foreach_sr session_id sr = +let foreach_sr session_id sr = let ty = Client.SR.get_type !rpc session_id sr in let name = Client.SR.get_name_label !rpc session_id sr in let test = make_test (Printf.sprintf "Querying capabilities of SR type %s (name %s)" ty name) 1 in @@ -538,43 +538,43 @@ let foreach_sr session_id sr = let sm = Client.SM.get_all_records !rpc session_id in match List.filter (fun (_, r) -> r.API.sM_type = ty) sm with | [] -> - failed test "Failed to query SM plugin" + failed test "Failed to query SM plugin" | [ _, plugin ] -> - let caps = plugin.API.sM_capabilities in - debug test (Printf.sprintf "Capabilities reported: [ %s ]" (String.concat " " caps)); - let oc = Client.SR.get_other_config !rpc session_id sr in - debug test (Printf.sprintf "SR.other_config = [ %s ]" (String.concat "; " (List.map (fun (k, v) -> k ^ ":" ^ v) oc))); - let avoid_vdi_create = avoid_vdi_create session_id sr in - debug test (Printf.sprintf "avoid_vdi_create = %b" avoid_vdi_create); - (* Mirror the special handling for the XenServer Tools SR; the - create and delete capabilities are forbidden in that special case. - See Xapi_sr.valid_operations. *) - let caps = - if avoid_vdi_create then - List.filter - (fun cap -> not (List.mem cap [ vdi_create; vdi_delete ])) caps - else - caps - in - debug test (Printf.sprintf "Capabilities filtered to: [ %s ]" (String.concat " " caps)); - success test; - - sr_scan_test caps session_id sr; - sr_probe_test caps session_id sr; - sr_update_test caps session_id sr; - vdi_create_destroy caps session_id sr; - vdi_create_destroy_plug_checksize caps session_id sr; - with_arbitrary_vdi caps session_id sr vdi_bad_introduce; - with_arbitrary_vdi caps session_id sr vdi_db_forget; - with_arbitrary_vdi caps session_id sr vdi_clone_destroy; - with_arbitrary_vdi caps session_id sr vdi_snapshot_destroy; - with_arbitrary_vdi caps session_id sr vdi_resize_test; - with_arbitrary_vdi caps session_id sr vdi_update_test; - with_arbitrary_vdi caps session_id sr vdi_generate_config_test; + let caps = plugin.API.sM_capabilities in + debug test (Printf.sprintf "Capabilities reported: [ %s ]" (String.concat " " caps)); + let oc = Client.SR.get_other_config !rpc session_id sr in + debug test (Printf.sprintf "SR.other_config = [ %s ]" (String.concat "; " (List.map (fun (k, v) -> k ^ ":" ^ v) oc))); + let avoid_vdi_create = avoid_vdi_create session_id sr in + debug test (Printf.sprintf "avoid_vdi_create = %b" avoid_vdi_create); + (* Mirror the special handling for the XenServer Tools SR; the + create and delete capabilities are forbidden in that special case. + See Xapi_sr.valid_operations. *) + let caps = + if avoid_vdi_create then + List.filter + (fun cap -> not (List.mem cap [ vdi_create; vdi_delete ])) caps + else + caps + in + debug test (Printf.sprintf "Capabilities filtered to: [ %s ]" (String.concat " " caps)); + success test; + + sr_scan_test caps session_id sr; + sr_probe_test caps session_id sr; + sr_update_test caps session_id sr; + vdi_create_destroy caps session_id sr; + vdi_create_destroy_plug_checksize caps session_id sr; + with_arbitrary_vdi caps session_id sr vdi_bad_introduce; + with_arbitrary_vdi caps session_id sr vdi_db_forget; + with_arbitrary_vdi caps session_id sr vdi_clone_destroy; + with_arbitrary_vdi caps session_id sr vdi_snapshot_destroy; + with_arbitrary_vdi caps session_id sr vdi_resize_test; + with_arbitrary_vdi caps session_id sr vdi_update_test; + with_arbitrary_vdi caps session_id sr vdi_generate_config_test; | _ -> - failed test "Multiple plugins with the same type detected" + failed test "Multiple plugins with the same type detected" -let go s = +let go s = let test = make_test "Listing available Storage Repositories" 0 in start test; let srs = list_srs s in diff --git a/ocaml/xapi/quicktest_vdi_copy.ml b/ocaml/xapi/quicktest_vdi_copy.ml index a9d937b94b3..7a060c1c54f 100644 --- a/ocaml/xapi/quicktest_vdi_copy.ml +++ b/ocaml/xapi/quicktest_vdi_copy.ml @@ -28,19 +28,19 @@ let http request f = let write_to_vdi ~session_id ~vdi f = let task_id = Client.Task.create ~rpc:!rpc ~session_id - ~label:"quicktest importing VDI contents" - ~description:"" in + ~label:"quicktest importing VDI contents" + ~description:"" in (*let uri = Printf.sprintf "/import_raw_vdi?session_id=%s&vdi=%s&task_id=%si&chunked=true" (Ref.string_of session_id) (Ref.string_of vdi) (Ref.string_of task_id) in *) let req = Http.Request.make ~version:"1.0" - ~user_agent:"quicktest" - ~query:[ - "session_id", Ref.string_of session_id; - "vdi", Ref.string_of vdi; - "task_id", Ref.string_of task_id; - "chunked", "true" - ] - Http.Put "/import_raw_vdi" in + ~user_agent:"quicktest" + ~query:[ + "session_id", Ref.string_of session_id; + "vdi", Ref.string_of vdi; + "task_id", Ref.string_of task_id; + "chunked", "true" + ] + Http.Put "/import_raw_vdi" in http req (fun (_, fd) -> f fd); while Client.Task.get_status ~rpc:!rpc ~session_id ~self:task_id = `pending do Unix.sleep 1 @@ -52,10 +52,10 @@ let write_to_vdi ~session_id ~vdi f = let read_from_vdi ~session_id ~vdi f = let uri = Printf.sprintf "/export_raw_vdi?session_id=%s&vdi=%s" - (Ref.string_of session_id) (Ref.string_of vdi) in + (Ref.string_of session_id) (Ref.string_of vdi) in let req = Http.Request.make ~version:"1.0" - ~user_agent:"quicktest" - Http.Get uri in + ~user_agent:"quicktest" + Http.Get uri in http req (fun (_, fd) -> f fd) let start session_id sr = @@ -65,10 +65,10 @@ let start session_id sr = (* Create a 4 MiB disk on src_sr *) let original = Client.VDI.create ~rpc:!rpc ~session_id ~name_label:"quicktest original" - ~name_description:"Used by the VDI.copy test" - ~sR:sr ~virtual_size:Int64.(mul (mul 4L 1024L) 1024L) - ~_type:`user ~sharable:false ~read_only:false - ~other_config:[] ~xenstore_data:[] ~sm_config:[] ~tags:[] in + ~name_description:"Used by the VDI.copy test" + ~sR:sr ~virtual_size:Int64.(mul (mul 4L 1024L) 1024L) + ~_type:`user ~sharable:false ~read_only:false + ~other_config:[] ~xenstore_data:[] ~sm_config:[] ~tags:[] in debug t "Created a 4MiB test disk"; @@ -76,11 +76,11 @@ let start session_id sr = half of a 2 MiB block *) write_to_vdi ~session_id ~vdi:original (fun fd -> - let data = String.make (1024 * 1024) 'a' in - let chunk = { Chunk.start = 0L; data } in - Chunk.marshal fd chunk; - let final = { Chunk.start = 0L; data = "" } in - Chunk.marshal fd final; + let data = String.make (1024 * 1024) 'a' in + let chunk = { Chunk.start = 0L; data } in + Chunk.marshal fd chunk; + let final = { Chunk.start = 0L; data = "" } in + Chunk.marshal fd final; ); debug t "Uploaded 1MiB of 'a's"; @@ -95,11 +95,11 @@ let start session_id sr = be represented as a block with an almost-empty bitmap. *) write_to_vdi ~session_id ~vdi:original (fun fd -> - let data = String.make 512 'b' in - let chunk = { Chunk.start = 0L; data } in - Chunk.marshal fd chunk; - let final = { Chunk.start = 0L; data = "" } in - Chunk.marshal fd final; + let data = String.make 512 'b' in + let chunk = { Chunk.start = 0L; data } in + Chunk.marshal fd chunk; + let final = { Chunk.start = 0L; data = "" } in + Chunk.marshal fd final; ); debug t "Uploaded 1 sector of 'b's"; @@ -130,24 +130,24 @@ let start session_id sr = we've written to the original *) read_from_vdi ~session_id ~vdi:snapshot_backup (fun fd -> - let a = Stdext.Unixext.really_read_string fd 512 in - for i = 0 to String.length a - 1 do - if a.[i] <> 'b' then begin - let msg = Printf.sprintf "VDI offset %d has %c: expected %c" i a.[i] 'b' in - failed t msg; - failwith msg; - end - done; - debug t "First sector is full of 'b's"; - let b = Stdext.Unixext.really_read_string fd (1024 * 1024 - 512) in - for i = 0 to String.length b - 1 do - if b.[i] <> 'a' then begin - let msg = Printf.sprintf "VDI offset %d has %c: expected %c" i b.[i] 'a' in - failed t msg; - failwith msg - end; - done; - debug t "1MiB - 1 sector is full of 'a's"; + let a = Stdext.Unixext.really_read_string fd 512 in + for i = 0 to String.length a - 1 do + if a.[i] <> 'b' then begin + let msg = Printf.sprintf "VDI offset %d has %c: expected %c" i a.[i] 'b' in + failed t msg; + failwith msg; + end + done; + debug t "First sector is full of 'b's"; + let b = Stdext.Unixext.really_read_string fd (1024 * 1024 - 512) in + for i = 0 to String.length b - 1 do + if b.[i] <> 'a' then begin + let msg = Printf.sprintf "VDI offset %d has %c: expected %c" i b.[i] 'a' in + failed t msg; + failwith msg + end; + done; + debug t "1MiB - 1 sector is full of 'a's"; ); success t diff --git a/ocaml/xapi/quicktest_vm_memory_constraints.ml b/ocaml/xapi/quicktest_vm_memory_constraints.ml index 509971a18b8..fe53cd11c63 100644 --- a/ocaml/xapi/quicktest_vm_memory_constraints.ml +++ b/ocaml/xapi/quicktest_vm_memory_constraints.ml @@ -22,95 +22,95 @@ let ( ** ) = Int64.mul let ( // ) = Int64.div (** Creates a memory constraints record (with values in bytes) from the given -memory constraints tuple (with values in MiB). *) + memory constraints tuple (with values in MiB). *) let create (static_min, dynamic_min, target, dynamic_max, static_max) = - let scale value = (Int64.of_int value) ** 1024L ** 1024L in - { - static_min = scale static_min ; - dynamic_min = scale dynamic_min; - target = scale target ; - dynamic_max = scale dynamic_max; - static_max = scale static_max ; - } + let scale value = (Int64.of_int value) ** 1024L ** 1024L in + { + static_min = scale static_min ; + dynamic_min = scale dynamic_min; + target = scale target ; + dynamic_max = scale dynamic_max; + static_max = scale static_max ; + } let constraints_pinned = [ - (0,0,0,0,5); (0,1,1,1,5); - (0,2,2,2,5); (0,3,3,3,5); - (0,4,4,4,5); (0,5,5,5,5); - ] + (0,0,0,0,5); (0,1,1,1,5); + (0,2,2,2,5); (0,3,3,3,5); + (0,4,4,4,5); (0,5,5,5,5); +] let constraints_unpinned = [ - (0,0,0,1,5); (0,1,1,2,5); - (0,2,2,3,5); (0,2,3,3,5); - (0,3,4,4,5); (0,4,5,5,5); - ] + (0,0,0,1,5); (0,1,1,2,5); + (0,2,2,3,5); (0,2,3,3,5); + (0,3,4,4,5); (0,4,5,5,5); +] let constraints_valid = [ - (0,1,2,3,4); (1,2,3,4,5); - (1,1,2,3,4); (1,2,3,4,4); - (2,2,2,3,4); (1,2,3,3,3); - (3,3,3,3,4); (1,2,2,2,2); - (4,4,4,4,4); (1,1,1,1,1); - ] + (0,1,2,3,4); (1,2,3,4,5); + (1,1,2,3,4); (1,2,3,4,4); + (2,2,2,3,4); (1,2,3,3,3); + (3,3,3,3,4); (1,2,2,2,2); + (4,4,4,4,4); (1,1,1,1,1); +] let constraints_invalid = [ - (2,1,2,3,4); (4,1,2,3,4); - (5,1,2,3,4); (0,4,2,3,4); - (0,5,2,3,4); (0,1,2,5,4); - ] + (2,1,2,3,4); (4,1,2,3,4); + (5,1,2,3,4); (0,4,2,3,4); + (0,5,2,3,4); (0,1,2,5,4); +] let constraints_pinned_at_static_max = [ - (0,0,0,0,0); (0,1,1,1,1); - (0,2,2,2,2); (1,2,2,2,2); - ] + (0,0,0,0,0); (0,1,1,1,1); + (0,2,2,2,2); (1,2,2,2,2); +] (** Tests that [fn i] evaluates to [output] for all values [i] in [inputs]. *) let test_indicator_function fn fn_name output output_name inputs = - make_test_case - (sprintf "%s_%s" fn_name output_name) - (sprintf "Tests that function %s returns %s" fn_name output_name) - (fun () -> - List.iter - (fun i -> assert_equal (fn ~constraints:(create i)) output) - (inputs)) + make_test_case + (sprintf "%s_%s" fn_name output_name) + (sprintf "Tests that function %s returns %s" fn_name output_name) + (fun () -> + List.iter + (fun i -> assert_equal (fn ~constraints:(create i)) output) + (inputs)) let test_are_pinned_true = test_indicator_function - are_pinned "are_pinned" true "true" constraints_pinned + are_pinned "are_pinned" true "true" constraints_pinned let test_are_pinned_false = test_indicator_function - are_pinned "are_pinned" false "false" constraints_unpinned + are_pinned "are_pinned" false "false" constraints_unpinned let test_are_valid_true = test_indicator_function - are_valid "are_valid" true "true" constraints_valid + are_valid "are_valid" true "true" constraints_valid let test_are_valid_false = test_indicator_function - are_valid "are_valid" false "false" constraints_invalid + are_valid "are_valid" false "false" constraints_invalid let test_are_valid_and_pinned_at_static_max_true = test_indicator_function - are_valid_and_pinned_at_static_max "are_valid_and_pinned_at_static_max" - true "true" constraints_pinned_at_static_max + are_valid_and_pinned_at_static_max "are_valid_and_pinned_at_static_max" + true "true" constraints_pinned_at_static_max let test_are_valid_and_pinned_at_static_max_false = test_indicator_function - are_valid_and_pinned_at_static_max "are_valid_and_pinned_at_static_max" - false "false" (constraints_invalid @ constraints_unpinned) + are_valid_and_pinned_at_static_max "are_valid_and_pinned_at_static_max" + false "false" (constraints_invalid @ constraints_unpinned) let test_reset_to_safe_defaults = make_function_test_case - "reset_to_safe_defaults" - (fun () -> - List.iter - (fun (input, output) -> - let reset constraints = reset_to_safe_defaults ~constraints in - assert_equal (reset (create input)) (create output)) - [ - ( 256, 512,1024,2048,4096), ( 256,4096,4096,4096,4096); - (4096,2048,1024, 512, 256), ( 256, 256, 256, 256, 256); - (1024,1024,1024,1024,1024), (1024,1024,1024,1024,1024); - ]) + "reset_to_safe_defaults" + (fun () -> + List.iter + (fun (input, output) -> + let reset constraints = reset_to_safe_defaults ~constraints in + assert_equal (reset (create input)) (create output)) + [ + ( 256, 512,1024,2048,4096), ( 256,4096,4096,4096,4096); + (4096,2048,1024, 512, 256), ( 256, 256, 256, 256, 256); + (1024,1024,1024,1024,1024), (1024,1024,1024,1024,1024); + ]) let tests = make_module_test_suite "VM_memory_constraints" -[ - test_are_pinned_true; - test_are_pinned_false; - test_are_valid_true; - test_are_valid_false; - test_are_valid_and_pinned_at_static_max_true; - test_are_valid_and_pinned_at_static_max_false; - test_reset_to_safe_defaults; -] + [ + test_are_pinned_true; + test_are_pinned_false; + test_are_valid_true; + test_are_valid_false; + test_are_valid_and_pinned_at_static_max_true; + test_are_valid_and_pinned_at_static_max_false; + test_reset_to_safe_defaults; + ] let run_from_within_quicktest () = run_from_within_quicktest tests diff --git a/ocaml/xapi/quicktest_vm_placement.ml b/ocaml/xapi/quicktest_vm_placement.ml index c7ed5b000b1..c704d05f428 100644 --- a/ocaml/xapi/quicktest_vm_placement.ml +++ b/ocaml/xapi/quicktest_vm_placement.ml @@ -17,332 +17,332 @@ open Vm_placement module Utility = struct - let assert_invalid_argument = assert_raises_match - (function Invalid_argument _ -> true | _ -> false) - - let test_drop_valid = make_test_case "drop_valid" - "Tests the drop function with valid arguments." - (fun () -> - List.iter - (fun (n, xs, xs') -> assert_equal (drop n xs) xs') - [ - 0, [ ], [ ]; - 0, [0 ], [0 ]; - 0, [0; 1 ], [0; 1 ]; - 0, [0; 1; 2], [0; 1; 2]; - 1, [0 ], [ ]; - 1, [0; 1 ], [ 1 ]; - 1, [0; 1; 2], [ 1; 2]; - 2, [0; 1 ], [ ]; - 2, [0; 1; 2], [ 2]; - 3, [0; 1; 2], [ ]; - ] - ) - - let test_drop_invalid = make_test_case "drop_invalid" - "Tests the drop function with invalid arguments." - (fun () -> - List.iter - (fun (n, xs) -> - assert_invalid_argument (fun () -> ignore (drop n xs))) - [ - -1, [ ]; - -1, [0 ]; - -1, [0; 1 ]; - -1, [0; 1; 2]; - 1, [ ]; - 2, [0 ]; - 3, [0; 1 ]; - 4, [0; 1; 2]; - ] - ) - - let test_take_valid = make_test_case "take_valid" - "Tests the take function with valid arguments." - (fun () -> - List.iter - (fun (n, xs, xs') -> assert_equal (take n xs) xs') - [ - 0, [ ], [ ]; - 0, [0 ], [ ]; - 0, [0; 1 ], [ ]; - 0, [0; 1; 2], [ ]; - 1, [0 ], [0; ]; - 1, [0; 1 ], [0; ]; - 1, [0; 1; 2], [0; ]; - 2, [0; 1 ], [0; 1 ]; - 2, [0; 1; 2], [0; 1 ]; - 3, [0; 1; 2], [0; 1; 2]; - ] - ) - - let test_take_invalid = make_test_case "take_invalid" - "Tests the take function with invalid arguments." - (fun () -> - List.iter - (fun (n, xs) -> - assert_invalid_argument (fun () -> ignore (take n xs))) - [ - -1, [ ]; - -1, [0 ]; - -1, [0; 1 ]; - -1, [0; 1; 2]; - 1, [ ]; - 2, [0 ]; - 3, [0; 1 ]; - 4, [0; 1; 2]; - ] - ) - - let test_take_nth_valid = make_test_case "take_nth_valid" - "Tests the take_nth function with valid arguments." - (fun () -> - List.iter - (fun (n, xs, x, xs') -> - assert_equal (take_nth n xs) (x, xs')) - [ - 0, [0 ], 0, [ ]; - 0, [0; 1 ], 0, [ 1 ]; - 0, [0; 1; 2], 0, [ 1; 2]; - 1, [0; 1 ], 1, [0; ]; - 1, [0; 1; 2], 1, [0; 2]; - 2, [0; 1; 2], 2, [0; 1 ]; - ] - ) - - let test_take_nth_invalid = make_test_case "take_nth_invalid" - "Tests the take_nth function with invalid arguments." - (fun () -> - List.iter - (fun (n, xs) -> - assert_invalid_argument (fun () -> ignore (take_nth n xs))) - [ - -1, [ ]; - -1, [0 ]; - -1, [0; 1 ]; - -1, [0; 1; 2]; - 1, [ ]; - 2, [0 ]; - 3, [0; 1 ]; - 4, [0; 1; 2]; - ] - ) - - let test_generate_list_index_valid = make_test_case - "generate_list_index_valid" - "Tests the generate_list_index function with valid arguments." - (fun () -> - List.iter - (fun (v, xs, i) -> - assert_equal (generate_list_index (fun () -> v) xs) i) - [ - 0.00, [0 ], 0; - 0.00, [0; 1 ], 0; - 0.49, [0; 1 ], 0; - 0.50, [0; 1 ], 1; - 0.99, [0; 1 ], 1; - 0.00, [0; 1; 2; 3], 0; - 0.24, [0; 1; 2; 3], 0; - 0.25, [0; 1; 2; 3], 1; - 0.49, [0; 1; 2; 3], 1; - 0.50, [0; 1; 2; 3], 2; - 0.74, [0; 1; 2; 3], 2; - 0.75, [0; 1; 2; 3], 3; - 0.99, [0; 1; 2; 3], 3; - ] - ) - - let test_generate_list_index_invalid = make_test_case - "generate_list_index_invalid" - "Tests the generate_list_index function with invalid arguments." - (fun () -> - List.iter - (fun (v, xs) -> - assert_invalid_argument - (fun () -> - ignore (generate_list_index (fun () -> v) xs))) - [ - 0.50, [ ]; - -0.01, [0; 1; 2; 3]; - 1.00, [0; 1; 2; 3]; - ] - ) - - let test_take_random_element_from_list_valid = make_test_case - "take_random_element_from_list_valid" - "Tests the take_random_element_from_list function with valid arguments." - (fun () -> - List.iter - (fun (v, xs, x, xs') -> - assert_equal (x, xs') - (take_random_element_from_list (fun () -> v) xs)) - [ - 0.00, [0 ], 0, [ ]; - 0.00, [0; 1 ], 0, [ 1 ]; - 0.49, [0; 1 ], 0, [ 1 ]; - 0.50, [0; 1 ], 1, [0 ]; - 0.99, [0; 1 ], 1, [0 ]; - 0.00, [0; 1; 2; 3], 0, [ 1; 2; 3]; - 0.24, [0; 1; 2; 3], 0, [ 1; 2; 3]; - 0.25, [0; 1; 2; 3], 1, [0; 2; 3]; - 0.49, [0; 1; 2; 3], 1, [0; 2; 3]; - 0.50, [0; 1; 2; 3], 2, [0; 1; 3]; - 0.74, [0; 1; 2; 3], 2, [0; 1; 3]; - 0.75, [0; 1; 2; 3], 3, [0; 1; 2; ]; - 0.99, [0; 1; 2; 3], 3, [0; 1; 2; ]; - ] - ) - - let test_take_random_element_from_list_invalid = make_test_case - "take_random_element_from_list_invalid" - "Tests the take_random_element_from_list function with invalid arguments." - (fun () -> - List.iter - (fun (v, xs) -> - assert_invalid_argument - (fun () -> - ignore (take_random_element_from_list - (fun () -> v) xs))) - [ - 0.50, [ ]; - -0.01, [0; 1; 2; 3]; - 1.00, [0; 1; 2; 3]; - ] - ) - - type dummy = N2 | N1 | Z | P1 | P2 - - let evaluate_dummy = function - | N2 -> -2 | N1 -> -1 | Z -> 0 | P1 -> 1 | P2 -> 2 - - let test_evaluate_sort_partition = make_test_case - "evaluate_sort_partition" - "Tests the evaluate_sort_partition function." - (fun () -> - (* Comparator for ascending order. *) - let forward = compare in - (* Comparator for descending order. *) - let reverse = (fun x y -> compare y x) in - (* Filter for positive values. *) - let positive = ((<) 0) in - (* Filter for negative values. *) - let negative = ((>) 0) in - List.iter - (fun (input, sort, partition, out_selected, out_unselected) -> - assert_equal - (evaluate_sort_partition - evaluate_dummy sort partition input) - (out_selected, out_unselected)) - [ - [ ], forward, positive, [ ], [ ]; - [ Z ], forward, positive, [ ], [ Z]; - [ P1;Z ], forward, positive, [P1 ], [ Z]; - [P2;P1;Z ], forward, positive, [P1;P2], [ Z]; - [ Z;N1 ], forward, positive, [ ], [ N1;Z]; - [ Z;N1;N2], forward, positive, [ ], [N2;N1;Z]; - [P2;P1;Z;N1;N2], forward, positive, [P1;P2], [N2;N1;Z]; - - [ ], forward, negative, [ ], [ ]; - [ Z ], forward, negative, [ ], [Z ]; - [ P1;Z ], forward, negative, [ ], [Z;P1 ]; - [P2;P1;Z ], forward, negative, [ ], [Z;P1;P2]; - [ Z;N1 ], forward, negative, [ N1], [Z ]; - [ Z;N1;N2], forward, negative, [N2;N1], [Z ]; - [P2;P1;Z;N1;N2], forward, negative, [N2;N1], [Z;P1;P2]; - - [ ], reverse, positive, [ ], [ ]; - [ Z ], reverse, positive, [ ], [Z ]; - [ P1;Z ], reverse, positive, [ P1], [Z ]; - [P2;P1;Z ], reverse, positive, [P2;P1], [Z ]; - [ Z;N1 ], reverse, positive, [ ], [Z;N1 ]; - [ Z;N1;N2], reverse, positive, [ ], [Z;N1;N2]; - [P2;P1;Z;N1;N2], reverse, positive, [P2;P1], [Z;N1;N2]; - - [ ], reverse, negative, [ ], [ ]; - [ Z ], reverse, negative, [ ], [ Z]; - [ P1;Z ], reverse, negative, [ ], [ P1;Z]; - [P2;P1;Z ], reverse, negative, [ ], [P2;P1;Z]; - [ Z;N1 ], reverse, negative, [N1 ], [ Z]; - [ Z;N1;N2], reverse, negative, [N1;N2], [Z ]; - [P2;P1;Z;N1;N2], reverse, negative, [N1;N2], [P2;P1;Z]; - ] - ) - - let tests = make_test_suite "Utility" - "Generic utility functions." - [ - test_drop_valid; - test_drop_invalid; - test_take_valid; - test_take_invalid; - test_take_nth_valid; - test_take_nth_invalid; - test_generate_list_index_valid; - test_generate_list_index_invalid; - test_take_random_element_from_list_valid; - test_take_random_element_from_list_invalid; - test_evaluate_sort_partition; - ] + let assert_invalid_argument = assert_raises_match + (function Invalid_argument _ -> true | _ -> false) + + let test_drop_valid = make_test_case "drop_valid" + "Tests the drop function with valid arguments." + (fun () -> + List.iter + (fun (n, xs, xs') -> assert_equal (drop n xs) xs') + [ + 0, [ ], [ ]; + 0, [0 ], [0 ]; + 0, [0; 1 ], [0; 1 ]; + 0, [0; 1; 2], [0; 1; 2]; + 1, [0 ], [ ]; + 1, [0; 1 ], [ 1 ]; + 1, [0; 1; 2], [ 1; 2]; + 2, [0; 1 ], [ ]; + 2, [0; 1; 2], [ 2]; + 3, [0; 1; 2], [ ]; + ] + ) + + let test_drop_invalid = make_test_case "drop_invalid" + "Tests the drop function with invalid arguments." + (fun () -> + List.iter + (fun (n, xs) -> + assert_invalid_argument (fun () -> ignore (drop n xs))) + [ + -1, [ ]; + -1, [0 ]; + -1, [0; 1 ]; + -1, [0; 1; 2]; + 1, [ ]; + 2, [0 ]; + 3, [0; 1 ]; + 4, [0; 1; 2]; + ] + ) + + let test_take_valid = make_test_case "take_valid" + "Tests the take function with valid arguments." + (fun () -> + List.iter + (fun (n, xs, xs') -> assert_equal (take n xs) xs') + [ + 0, [ ], [ ]; + 0, [0 ], [ ]; + 0, [0; 1 ], [ ]; + 0, [0; 1; 2], [ ]; + 1, [0 ], [0; ]; + 1, [0; 1 ], [0; ]; + 1, [0; 1; 2], [0; ]; + 2, [0; 1 ], [0; 1 ]; + 2, [0; 1; 2], [0; 1 ]; + 3, [0; 1; 2], [0; 1; 2]; + ] + ) + + let test_take_invalid = make_test_case "take_invalid" + "Tests the take function with invalid arguments." + (fun () -> + List.iter + (fun (n, xs) -> + assert_invalid_argument (fun () -> ignore (take n xs))) + [ + -1, [ ]; + -1, [0 ]; + -1, [0; 1 ]; + -1, [0; 1; 2]; + 1, [ ]; + 2, [0 ]; + 3, [0; 1 ]; + 4, [0; 1; 2]; + ] + ) + + let test_take_nth_valid = make_test_case "take_nth_valid" + "Tests the take_nth function with valid arguments." + (fun () -> + List.iter + (fun (n, xs, x, xs') -> + assert_equal (take_nth n xs) (x, xs')) + [ + 0, [0 ], 0, [ ]; + 0, [0; 1 ], 0, [ 1 ]; + 0, [0; 1; 2], 0, [ 1; 2]; + 1, [0; 1 ], 1, [0; ]; + 1, [0; 1; 2], 1, [0; 2]; + 2, [0; 1; 2], 2, [0; 1 ]; + ] + ) + + let test_take_nth_invalid = make_test_case "take_nth_invalid" + "Tests the take_nth function with invalid arguments." + (fun () -> + List.iter + (fun (n, xs) -> + assert_invalid_argument (fun () -> ignore (take_nth n xs))) + [ + -1, [ ]; + -1, [0 ]; + -1, [0; 1 ]; + -1, [0; 1; 2]; + 1, [ ]; + 2, [0 ]; + 3, [0; 1 ]; + 4, [0; 1; 2]; + ] + ) + + let test_generate_list_index_valid = make_test_case + "generate_list_index_valid" + "Tests the generate_list_index function with valid arguments." + (fun () -> + List.iter + (fun (v, xs, i) -> + assert_equal (generate_list_index (fun () -> v) xs) i) + [ + 0.00, [0 ], 0; + 0.00, [0; 1 ], 0; + 0.49, [0; 1 ], 0; + 0.50, [0; 1 ], 1; + 0.99, [0; 1 ], 1; + 0.00, [0; 1; 2; 3], 0; + 0.24, [0; 1; 2; 3], 0; + 0.25, [0; 1; 2; 3], 1; + 0.49, [0; 1; 2; 3], 1; + 0.50, [0; 1; 2; 3], 2; + 0.74, [0; 1; 2; 3], 2; + 0.75, [0; 1; 2; 3], 3; + 0.99, [0; 1; 2; 3], 3; + ] + ) + + let test_generate_list_index_invalid = make_test_case + "generate_list_index_invalid" + "Tests the generate_list_index function with invalid arguments." + (fun () -> + List.iter + (fun (v, xs) -> + assert_invalid_argument + (fun () -> + ignore (generate_list_index (fun () -> v) xs))) + [ + 0.50, [ ]; + -0.01, [0; 1; 2; 3]; + 1.00, [0; 1; 2; 3]; + ] + ) + + let test_take_random_element_from_list_valid = make_test_case + "take_random_element_from_list_valid" + "Tests the take_random_element_from_list function with valid arguments." + (fun () -> + List.iter + (fun (v, xs, x, xs') -> + assert_equal (x, xs') + (take_random_element_from_list (fun () -> v) xs)) + [ + 0.00, [0 ], 0, [ ]; + 0.00, [0; 1 ], 0, [ 1 ]; + 0.49, [0; 1 ], 0, [ 1 ]; + 0.50, [0; 1 ], 1, [0 ]; + 0.99, [0; 1 ], 1, [0 ]; + 0.00, [0; 1; 2; 3], 0, [ 1; 2; 3]; + 0.24, [0; 1; 2; 3], 0, [ 1; 2; 3]; + 0.25, [0; 1; 2; 3], 1, [0; 2; 3]; + 0.49, [0; 1; 2; 3], 1, [0; 2; 3]; + 0.50, [0; 1; 2; 3], 2, [0; 1; 3]; + 0.74, [0; 1; 2; 3], 2, [0; 1; 3]; + 0.75, [0; 1; 2; 3], 3, [0; 1; 2; ]; + 0.99, [0; 1; 2; 3], 3, [0; 1; 2; ]; + ] + ) + + let test_take_random_element_from_list_invalid = make_test_case + "take_random_element_from_list_invalid" + "Tests the take_random_element_from_list function with invalid arguments." + (fun () -> + List.iter + (fun (v, xs) -> + assert_invalid_argument + (fun () -> + ignore (take_random_element_from_list + (fun () -> v) xs))) + [ + 0.50, [ ]; + -0.01, [0; 1; 2; 3]; + 1.00, [0; 1; 2; 3]; + ] + ) + + type dummy = N2 | N1 | Z | P1 | P2 + + let evaluate_dummy = function + | N2 -> -2 | N1 -> -1 | Z -> 0 | P1 -> 1 | P2 -> 2 + + let test_evaluate_sort_partition = make_test_case + "evaluate_sort_partition" + "Tests the evaluate_sort_partition function." + (fun () -> + (* Comparator for ascending order. *) + let forward = compare in + (* Comparator for descending order. *) + let reverse = (fun x y -> compare y x) in + (* Filter for positive values. *) + let positive = ((<) 0) in + (* Filter for negative values. *) + let negative = ((>) 0) in + List.iter + (fun (input, sort, partition, out_selected, out_unselected) -> + assert_equal + (evaluate_sort_partition + evaluate_dummy sort partition input) + (out_selected, out_unselected)) + [ + [ ], forward, positive, [ ], [ ]; + [ Z ], forward, positive, [ ], [ Z]; + [ P1;Z ], forward, positive, [P1 ], [ Z]; + [P2;P1;Z ], forward, positive, [P1;P2], [ Z]; + [ Z;N1 ], forward, positive, [ ], [ N1;Z]; + [ Z;N1;N2], forward, positive, [ ], [N2;N1;Z]; + [P2;P1;Z;N1;N2], forward, positive, [P1;P2], [N2;N1;Z]; + + [ ], forward, negative, [ ], [ ]; + [ Z ], forward, negative, [ ], [Z ]; + [ P1;Z ], forward, negative, [ ], [Z;P1 ]; + [P2;P1;Z ], forward, negative, [ ], [Z;P1;P2]; + [ Z;N1 ], forward, negative, [ N1], [Z ]; + [ Z;N1;N2], forward, negative, [N2;N1], [Z ]; + [P2;P1;Z;N1;N2], forward, negative, [N2;N1], [Z;P1;P2]; + + [ ], reverse, positive, [ ], [ ]; + [ Z ], reverse, positive, [ ], [Z ]; + [ P1;Z ], reverse, positive, [ P1], [Z ]; + [P2;P1;Z ], reverse, positive, [P2;P1], [Z ]; + [ Z;N1 ], reverse, positive, [ ], [Z;N1 ]; + [ Z;N1;N2], reverse, positive, [ ], [Z;N1;N2]; + [P2;P1;Z;N1;N2], reverse, positive, [P2;P1], [Z;N1;N2]; + + [ ], reverse, negative, [ ], [ ]; + [ Z ], reverse, negative, [ ], [ Z]; + [ P1;Z ], reverse, negative, [ ], [ P1;Z]; + [P2;P1;Z ], reverse, negative, [ ], [P2;P1;Z]; + [ Z;N1 ], reverse, negative, [N1 ], [ Z]; + [ Z;N1;N2], reverse, negative, [N1;N2], [Z ]; + [P2;P1;Z;N1;N2], reverse, negative, [N1;N2], [P2;P1;Z]; + ] + ) + + let tests = make_test_suite "Utility" + "Generic utility functions." + [ + test_drop_valid; + test_drop_invalid; + test_take_valid; + test_take_invalid; + test_take_nth_valid; + test_take_nth_invalid; + test_generate_list_index_valid; + test_generate_list_index_invalid; + test_take_random_element_from_list_valid; + test_take_random_element_from_list_invalid; + test_evaluate_sort_partition; + ] end module Construction = struct - let guest_snapshot id - memory_overhead - memory_static_min - memory_dynamic_min - memory_dynamic_max - memory_static_max - = - { GS.id = id - ; GS.memory_overhead = Int64.of_int memory_overhead - ; GS.memory_static_min = Int64.of_int memory_static_min - ; GS.memory_dynamic_min = Int64.of_int memory_dynamic_min - ; GS.memory_dynamic_max = Int64.of_int memory_dynamic_max - ; GS.memory_static_max = Int64.of_int memory_static_max - } - - let host_snapshot id - is_pool_master - guests_resident - guests_scheduled - memory_overhead - memory_total - = - { HS.id = id - ; HS.is_pool_master = is_pool_master - ; HS.guests_resident = guests_resident - ; HS.guests_scheduled = guests_scheduled - ; HS.memory_overhead = Int64.of_int memory_overhead - ; HS.memory_total = Int64.of_int memory_total - } - - let host_snapshot_summary id - is_pool_master - memory_available_sum - memory_static_min_sum - memory_dynamic_min_sum - memory_dynamic_max_sum - memory_static_max_sum - = - { HSS.id = id - ; HSS.is_pool_master = is_pool_master - ; HSS.memory_available_sum = Int64.of_int memory_available_sum - ; HSS.memory_static_min_sum = Int64.of_int memory_static_min_sum - ; HSS.memory_dynamic_min_sum = Int64.of_int memory_dynamic_min_sum - ; HSS.memory_dynamic_max_sum = Int64.of_int memory_dynamic_max_sum - ; HSS.memory_static_max_sum = Int64.of_int memory_static_max_sum - } + let guest_snapshot id + memory_overhead + memory_static_min + memory_dynamic_min + memory_dynamic_max + memory_static_max + = + { GS.id = id + ; GS.memory_overhead = Int64.of_int memory_overhead + ; GS.memory_static_min = Int64.of_int memory_static_min + ; GS.memory_dynamic_min = Int64.of_int memory_dynamic_min + ; GS.memory_dynamic_max = Int64.of_int memory_dynamic_max + ; GS.memory_static_max = Int64.of_int memory_static_max + } + + let host_snapshot id + is_pool_master + guests_resident + guests_scheduled + memory_overhead + memory_total + = + { HS.id = id + ; HS.is_pool_master = is_pool_master + ; HS.guests_resident = guests_resident + ; HS.guests_scheduled = guests_scheduled + ; HS.memory_overhead = Int64.of_int memory_overhead + ; HS.memory_total = Int64.of_int memory_total + } + + let host_snapshot_summary id + is_pool_master + memory_available_sum + memory_static_min_sum + memory_dynamic_min_sum + memory_dynamic_max_sum + memory_static_max_sum + = + { HSS.id = id + ; HSS.is_pool_master = is_pool_master + ; HSS.memory_available_sum = Int64.of_int memory_available_sum + ; HSS.memory_static_min_sum = Int64.of_int memory_static_min_sum + ; HSS.memory_dynamic_min_sum = Int64.of_int memory_dynamic_min_sum + ; HSS.memory_dynamic_max_sum = Int64.of_int memory_dynamic_max_sum + ; HSS.memory_static_max_sum = Int64.of_int memory_static_max_sum + } end module Summarisation = struct - open Construction + open Construction - (** Raw input and output data for the summarise_host_snapshot function. *) - let rec summarise_host_snapshot_input_output_data = [ (* + (** Raw input and output data for the summarise_host_snapshot function. *) + let rec summarise_host_snapshot_input_output_data = [ (* (---------------------------------------------------), (-------------) ( INPUT: ), ( OUTPUT: ) (---------------------------------------------------), (-------------) @@ -352,328 +352,328 @@ module Summarisation = struct ([ resident ],[ scheduled ],[ extra ] ), ( ) ([-------------],[-------------],[-------------] ), (-------------) ([ xpqrs; xpqrs],[ xpqrs; xpqrs],[ xpqrs; xpqrs],x,t), (A, P, Q, R, S)*) - ([ ],[ ],[ ],0,0), (0, 0, 0, 0, 0) ; - ([ ],[ ],[ ],0,8), (8, 0, 0, 0, 0) ; - ([ ],[ ],[ ],1,8), (7, 0, 0, 0, 0) ; - ([ ],[ ],[ _11234],1,8), (6, 1, 2, 3, 4) ; - ([ ],[ _11234],[ ],1,8), (6, 1, 2, 3, 4) ; - ([ _11234],[ ],[ ],1,8), (6, 1, 2, 3, 4) ; - ([ ],[ ],[_11234;_11234],1,8), (5, 2, 4, 6, 8) ; - ([ ],[_11234;_11234],[ ],1,8), (5, 2, 4, 6, 8) ; - ([_11234;_11234],[ ],[ ],1,8), (5, 2, 4, 6, 8) ; - ([ ],[ _11234],[ _11234],1,8), (5, 2, 4, 6, 8) ; - ([ _11234],[ ],[ _11234],1,8), (5, 2, 4, 6, 8) ; - ([ _11234],[ _11234],[ ],1,8), (5, 2, 4, 6, 8) ; - ([ ],[ _11234],[_11234;_11234],1,8), (4, 3, 6, 9,12) ; - ([ _11234],[_11234;_11234],[ ],1,8), (4, 3, 6, 9,12) ; - ([_11234;_11234],[ ],[ _11234],1,8), (4, 3, 6, 9,12) ; - ([ _11234],[ _11234],[ _11234],1,8), (4, 3, 6, 9,12) ; - ([ ],[_11234;_11234],[_11234;_11234],1,8), (3, 4, 8,12,16) ; - ([_11234;_11234],[_11234;_11234],[ ],1,8), (3, 4, 8,12,16) ; - ([_11234;_11234],[ ],[_11234;_11234],1,8), (3, 4, 8,12,16) ; - ([ _11234],[ _11234],[_11234;_11234],1,8), (3, 4, 8,12,16) ; - ([ _11234],[_11234;_11234],[ _11234],1,8), (3, 4, 8,12,16) ; - ([_11234;_11234],[ _11234],[ _11234],1,8), (3, 4, 8,12,16) ; - ([ _11234],[_11234;_11234],[_11234;_11234],1,8), (2, 5,10,15,20) ; - ([_11234;_11234],[_11234;_11234],[ _11234],1,8), (2, 5,10,15,20) ; - ([_11234;_11234],[ _11234],[_11234;_11234],1,8), (2, 5,10,15,20) ; - ([_11234;_11234],[_11234;_11234],[_11234;_11234],1,8), (1, 6,12,18,24) ] - (*-------------------------+----------------------------*) - (* INPUT KEY: | OUTPUT KEY: *) - (*-------------------------+----------------------------*) - (* t = memory_total | A = memory_total *) - (* x = memory_overhead | - Σ memory_overhead *) - (*-------------------------+----------------------------*) - (* p = memory_static_min | P = Σ memory_static_min *) - (* q = memory_dynamic_min | Q = Σ memory_dynamic_min *) - (* r = memory_dynamic_max | R = Σ memory_dynamic_max *) - (* s = memory_static_max | S = Σ memory_static_max *) - (*-------------------------+----------------------------*) - and _11234 = (1, 1, 2, 3, 4) - - (** A list of (input, output) for the summarise_host_snapshot function. *) - let summarise_host_snapshot_input_output_list = - let make_guest (x, p, q, r, s) = - guest_snapshot "" x p q r s in - let make_guests guests = - List.map make_guest guests in - let make_input (resident, scheduled, extra, x, t) = - (host_snapshot "" false - (make_guests resident) - (make_guests scheduled) x t) - , - (make_guests extra) in - let make_output (a, p, q, r, s) = - host_snapshot_summary "" false a p q r s in - List.map - (fun (input, output) -> (make_input input, make_output output)) - summarise_host_snapshot_input_output_data - - let test_summarise_host_snapshot = make_test_case - "summarise_host_snapshot" - "Tests the summarise_host_snapshot function." - (fun () -> - List.iter - (fun ((host_snapshot, extra_guests), host_snapshot_summary) -> - assert_equal - (summarise_host_snapshot extra_guests host_snapshot) - (host_snapshot_summary)) - summarise_host_snapshot_input_output_list - ) - - let tests = make_test_suite "Summarisation" - "Tests relating to pool, host and guest snapshot summarisation." - [ - test_summarise_host_snapshot; - ] + ([ ],[ ],[ ],0,0), (0, 0, 0, 0, 0) ; + ([ ],[ ],[ ],0,8), (8, 0, 0, 0, 0) ; + ([ ],[ ],[ ],1,8), (7, 0, 0, 0, 0) ; + ([ ],[ ],[ _11234],1,8), (6, 1, 2, 3, 4) ; + ([ ],[ _11234],[ ],1,8), (6, 1, 2, 3, 4) ; + ([ _11234],[ ],[ ],1,8), (6, 1, 2, 3, 4) ; + ([ ],[ ],[_11234;_11234],1,8), (5, 2, 4, 6, 8) ; + ([ ],[_11234;_11234],[ ],1,8), (5, 2, 4, 6, 8) ; + ([_11234;_11234],[ ],[ ],1,8), (5, 2, 4, 6, 8) ; + ([ ],[ _11234],[ _11234],1,8), (5, 2, 4, 6, 8) ; + ([ _11234],[ ],[ _11234],1,8), (5, 2, 4, 6, 8) ; + ([ _11234],[ _11234],[ ],1,8), (5, 2, 4, 6, 8) ; + ([ ],[ _11234],[_11234;_11234],1,8), (4, 3, 6, 9,12) ; + ([ _11234],[_11234;_11234],[ ],1,8), (4, 3, 6, 9,12) ; + ([_11234;_11234],[ ],[ _11234],1,8), (4, 3, 6, 9,12) ; + ([ _11234],[ _11234],[ _11234],1,8), (4, 3, 6, 9,12) ; + ([ ],[_11234;_11234],[_11234;_11234],1,8), (3, 4, 8,12,16) ; + ([_11234;_11234],[_11234;_11234],[ ],1,8), (3, 4, 8,12,16) ; + ([_11234;_11234],[ ],[_11234;_11234],1,8), (3, 4, 8,12,16) ; + ([ _11234],[ _11234],[_11234;_11234],1,8), (3, 4, 8,12,16) ; + ([ _11234],[_11234;_11234],[ _11234],1,8), (3, 4, 8,12,16) ; + ([_11234;_11234],[ _11234],[ _11234],1,8), (3, 4, 8,12,16) ; + ([ _11234],[_11234;_11234],[_11234;_11234],1,8), (2, 5,10,15,20) ; + ([_11234;_11234],[_11234;_11234],[ _11234],1,8), (2, 5,10,15,20) ; + ([_11234;_11234],[ _11234],[_11234;_11234],1,8), (2, 5,10,15,20) ; + ([_11234;_11234],[_11234;_11234],[_11234;_11234],1,8), (1, 6,12,18,24) ] + (*-------------------------+----------------------------*) + (* INPUT KEY: | OUTPUT KEY: *) + (*-------------------------+----------------------------*) + (* t = memory_total | A = memory_total *) + (* x = memory_overhead | - Σ memory_overhead *) + (*-------------------------+----------------------------*) + (* p = memory_static_min | P = Σ memory_static_min *) + (* q = memory_dynamic_min | Q = Σ memory_dynamic_min *) + (* r = memory_dynamic_max | R = Σ memory_dynamic_max *) + (* s = memory_static_max | S = Σ memory_static_max *) + (*-------------------------+----------------------------*) + and _11234 = (1, 1, 2, 3, 4) + + (** A list of (input, output) for the summarise_host_snapshot function. *) + let summarise_host_snapshot_input_output_list = + let make_guest (x, p, q, r, s) = + guest_snapshot "" x p q r s in + let make_guests guests = + List.map make_guest guests in + let make_input (resident, scheduled, extra, x, t) = + (host_snapshot "" false + (make_guests resident) + (make_guests scheduled) x t) + , + (make_guests extra) in + let make_output (a, p, q, r, s) = + host_snapshot_summary "" false a p q r s in + List.map + (fun (input, output) -> (make_input input, make_output output)) + summarise_host_snapshot_input_output_data + + let test_summarise_host_snapshot = make_test_case + "summarise_host_snapshot" + "Tests the summarise_host_snapshot function." + (fun () -> + List.iter + (fun ((host_snapshot, extra_guests), host_snapshot_summary) -> + assert_equal + (summarise_host_snapshot extra_guests host_snapshot) + (host_snapshot_summary)) + summarise_host_snapshot_input_output_list + ) + + let tests = make_test_suite "Summarisation" + "Tests relating to pool, host and guest snapshot summarisation." + [ + test_summarise_host_snapshot; + ] end module Categorisation = struct - open Construction - - let mock_slave = host_snapshot_summary "id" false - let mock_master = host_snapshot_summary "id" true - - (* Wildcard value to aid readability. *) - let x = 0 - - let test_definite_host_category_slave = make_test_case - "definite_host_category_slave" - "Tests the definite_host_category function with slaves." - (fun () -> - List.iter - (fun ((a, s_min, d_min, d_max, s_max), expected_result) -> - assert_equal - (Int64.of_int expected_result) - (definite_host_category - (mock_slave a s_min d_min d_max s_max))) - [ - (* Varying these parameters SHOULD vary the result: *) - (* Σ available, Σ static_max *) - (0, x, x, x, 0), 0; - (0, x, x, x, 1), -1; - (0, x, x, x, 4), -4; - (1, x, x, x, 0), 1; - (1, x, x, x, 1), 0; - (1, x, x, x, 4), -3; - (4, x, x, x, 0), 4; - (4, x, x, x, 1), 3; - (4, x, x, x, 4), 0; - - (* Varying these parameters should NOT vary the result: *) - (* Σ static_min, Σ dynamic_min, Σ dynamic_max *) - (x, 0, 0, 0, x), 0; - (x, 1, 0, 0, x), 0; - (x, 0, 1, 0, x), 0; - (x, 0, 0, 1, x), 0; - ] - ) - - let test_definite_host_category_master = make_test_case - "definite_host_category_master" - "Tests the definite_host_category function with masters." - (fun () -> - List.iter - (fun ((a, s_min, d_min, d_max, s_max), expected_result) -> - assert_equal - (Int64.of_int expected_result) - (definite_host_category - (mock_master a s_min d_min d_max s_max))) - [ - (* Varying these parameters SHOULD vary the result: *) - (* Σ available, Σ static_max *) - (0, x, x, x, 0), 0; - (0, x, x, x, 1), -1; - (0, x, x, x, 4), -2; - (1, x, x, x, 0), 0; - (1, x, x, x, 1), 0; - (1, x, x, x, 4), -2; - (4, x, x, x, 0), 1; - (4, x, x, x, 1), 1; - (4, x, x, x, 4), 0; - - (* Varying these parameters should NOT vary the result: *) - (* Σ static_min, Σ dynamic_min, Σ dynamic_max *) - (x, 0, 0, 0, x), 0; - (x, 1, 0, 0, x), 0; - (x, 0, 1, 0, x), 0; - (x, 0, 0, 1, x), 0; - ] - ) - - let test_probable_host_category_slave = make_test_case - "probable_host_category_slave" - "Tests the probable_host_category function with slaves." - (fun () -> - List.iter - (fun ((a, s_min, d_min, d_max, s_max), expected_result) -> - assert_equal - (Int64.of_int expected_result) - (probable_host_category - (mock_slave a s_min d_min d_max s_max))) - [ - (* Varying these parameters SHOULD vary the result: *) - (* Σ available, Σ dynamic_max *) - (0, x, x, 0, x), 0; - (0, x, x, 1, x), -1; - (0, x, x, 4, x), -4; - (1, x, x, 0, x), 1; - (1, x, x, 1, x), 0; - (1, x, x, 4, x), -3; - (4, x, x, 0, x), 4; - (4, x, x, 1, x), 3; - (4, x, x, 4, x), 0; - - (* Varying these parameters should NOT vary the result: *) - (* Σ static_min, Σ dynamic_min, Σ static_max *) - (x, 0, 0, x, 0), 0; - (x, 1, 0, x, 0), 0; - (x, 0, 1, x, 0), 0; - (x, 0, 0, x, 1), 0; - ] - ) - - let test_probable_host_category_master = make_test_case - "probable_host_category_master" - "Tests the probable_host_category function with masters." - (fun () -> - List.iter - (fun ((a, s_min, d_min, d_max, s_max), expected_result) -> - assert_equal - (Int64.of_int expected_result) - (probable_host_category - (mock_master a s_min d_min d_max s_max))) - [ - (* Varying these parameters SHOULD vary the result: *) - (* Σ available, Σ dynamic_max *) - (0, x, x, 0, x), 0; - (0, x, x, 1, x), -1; - (0, x, x, 4, x), -2; - (1, x, x, 0, x), 0; - (1, x, x, 1, x), 0; - (1, x, x, 4, x), -2; - (4, x, x, 0, x), 1; - (4, x, x, 1, x), 1; - (4, x, x, 4, x), 0; - - (* Varying these parameters should NOT vary the result: *) - (* Σ static_max, Σ static_min, Σ dynamic_min *) - (x, 0, 0, x, 0), 0; - (x, 1, 0, x, 0), 0; - (x, 0, 1, x, 0), 0; - (x, 0, 0, x, 1), 0; - ] - ) - - let test_compression_host_category (category_fn : host_category) mock_host = - let ceiling = compression_ratio_resolution in - List.iter - (fun ((a, s_min, d_min, d_max, s_max), expected_result) -> - assert_equal - (expected_result) - (category_fn - (mock_host a s_min d_min d_max s_max))) - [ - (* Varying these parameters SHOULD vary the result: *) - (* Σ available, Σ dynamic_mix, Σ dynamic_max *) - - (* Vary (Σ available) while (Σ dynamic_min = Σ dynamic_max) *) - (0, x, 1, 1, x), -1L; - (1, x, 1, 1, x), ceiling; - (2, x, 1, 1, x), ceiling; - - (* Vary (Σ available) while (Σ dynamic_min ≠ Σ dynamic_max) *) - (-1, x, 0, 4, x), -1L; - ( 0, x, 0, 4, x), 0L; - ( 1, x, 0, 4, x), ceiling ** 1L // 4L; - ( 2, x, 0, 4, x), ceiling ** 2L // 4L; - ( 3, x, 0, 4, x), ceiling ** 3L // 4L; - ( 4, x, 0, 4, x), ceiling; - ( 5, x, 0, 4, x), ceiling; - - (* Varying these parameters should NOT vary the result: *) - (* Σ static_min, Σ static_max *) - (x, 0, x, x, 0), ceiling; - (x, 1, x, x, 0), ceiling; - (x, 0, x, x, 1), ceiling; - ] - - let test_possible_host_category = make_test_case - "possible_host_category" - "Tests the possible_host_category function." - (fun () -> - test_compression_host_category possible_host_category mock_master; - test_compression_host_category possible_host_category mock_slave; - ) - - let test_affinity_host_category = make_test_case - "affinity_host_category" - "Tests the affinity_host_category function." - (fun () -> - (* The affinity host category excludes all non-affinity hosts. *) - let non_matching_category = affinity_host_category ["??"] in - assert_equal (-1L) (non_matching_category (mock_master 0 0 0 0 0)); - assert_equal (-1L) (non_matching_category (mock_slave 0 0 0 0 0)); - - (* The affinity-host-category function values affinity hosts *) - (* identically to the possible-host-category function. *) - let matching_category = affinity_host_category ["id"] in - test_compression_host_category matching_category mock_master; - test_compression_host_category matching_category mock_slave; - ) - - let tests = make_test_suite "Categorisation" - "Functions relating to host categorisation." - [ - test_definite_host_category_slave; - test_definite_host_category_master; - test_probable_host_category_slave; - test_probable_host_category_master; - test_possible_host_category; - test_affinity_host_category; - ] + open Construction + + let mock_slave = host_snapshot_summary "id" false + let mock_master = host_snapshot_summary "id" true + + (* Wildcard value to aid readability. *) + let x = 0 + + let test_definite_host_category_slave = make_test_case + "definite_host_category_slave" + "Tests the definite_host_category function with slaves." + (fun () -> + List.iter + (fun ((a, s_min, d_min, d_max, s_max), expected_result) -> + assert_equal + (Int64.of_int expected_result) + (definite_host_category + (mock_slave a s_min d_min d_max s_max))) + [ + (* Varying these parameters SHOULD vary the result: *) + (* Σ available, Σ static_max *) + (0, x, x, x, 0), 0; + (0, x, x, x, 1), -1; + (0, x, x, x, 4), -4; + (1, x, x, x, 0), 1; + (1, x, x, x, 1), 0; + (1, x, x, x, 4), -3; + (4, x, x, x, 0), 4; + (4, x, x, x, 1), 3; + (4, x, x, x, 4), 0; + + (* Varying these parameters should NOT vary the result: *) + (* Σ static_min, Σ dynamic_min, Σ dynamic_max *) + (x, 0, 0, 0, x), 0; + (x, 1, 0, 0, x), 0; + (x, 0, 1, 0, x), 0; + (x, 0, 0, 1, x), 0; + ] + ) + + let test_definite_host_category_master = make_test_case + "definite_host_category_master" + "Tests the definite_host_category function with masters." + (fun () -> + List.iter + (fun ((a, s_min, d_min, d_max, s_max), expected_result) -> + assert_equal + (Int64.of_int expected_result) + (definite_host_category + (mock_master a s_min d_min d_max s_max))) + [ + (* Varying these parameters SHOULD vary the result: *) + (* Σ available, Σ static_max *) + (0, x, x, x, 0), 0; + (0, x, x, x, 1), -1; + (0, x, x, x, 4), -2; + (1, x, x, x, 0), 0; + (1, x, x, x, 1), 0; + (1, x, x, x, 4), -2; + (4, x, x, x, 0), 1; + (4, x, x, x, 1), 1; + (4, x, x, x, 4), 0; + + (* Varying these parameters should NOT vary the result: *) + (* Σ static_min, Σ dynamic_min, Σ dynamic_max *) + (x, 0, 0, 0, x), 0; + (x, 1, 0, 0, x), 0; + (x, 0, 1, 0, x), 0; + (x, 0, 0, 1, x), 0; + ] + ) + + let test_probable_host_category_slave = make_test_case + "probable_host_category_slave" + "Tests the probable_host_category function with slaves." + (fun () -> + List.iter + (fun ((a, s_min, d_min, d_max, s_max), expected_result) -> + assert_equal + (Int64.of_int expected_result) + (probable_host_category + (mock_slave a s_min d_min d_max s_max))) + [ + (* Varying these parameters SHOULD vary the result: *) + (* Σ available, Σ dynamic_max *) + (0, x, x, 0, x), 0; + (0, x, x, 1, x), -1; + (0, x, x, 4, x), -4; + (1, x, x, 0, x), 1; + (1, x, x, 1, x), 0; + (1, x, x, 4, x), -3; + (4, x, x, 0, x), 4; + (4, x, x, 1, x), 3; + (4, x, x, 4, x), 0; + + (* Varying these parameters should NOT vary the result: *) + (* Σ static_min, Σ dynamic_min, Σ static_max *) + (x, 0, 0, x, 0), 0; + (x, 1, 0, x, 0), 0; + (x, 0, 1, x, 0), 0; + (x, 0, 0, x, 1), 0; + ] + ) + + let test_probable_host_category_master = make_test_case + "probable_host_category_master" + "Tests the probable_host_category function with masters." + (fun () -> + List.iter + (fun ((a, s_min, d_min, d_max, s_max), expected_result) -> + assert_equal + (Int64.of_int expected_result) + (probable_host_category + (mock_master a s_min d_min d_max s_max))) + [ + (* Varying these parameters SHOULD vary the result: *) + (* Σ available, Σ dynamic_max *) + (0, x, x, 0, x), 0; + (0, x, x, 1, x), -1; + (0, x, x, 4, x), -2; + (1, x, x, 0, x), 0; + (1, x, x, 1, x), 0; + (1, x, x, 4, x), -2; + (4, x, x, 0, x), 1; + (4, x, x, 1, x), 1; + (4, x, x, 4, x), 0; + + (* Varying these parameters should NOT vary the result: *) + (* Σ static_max, Σ static_min, Σ dynamic_min *) + (x, 0, 0, x, 0), 0; + (x, 1, 0, x, 0), 0; + (x, 0, 1, x, 0), 0; + (x, 0, 0, x, 1), 0; + ] + ) + + let test_compression_host_category (category_fn : host_category) mock_host = + let ceiling = compression_ratio_resolution in + List.iter + (fun ((a, s_min, d_min, d_max, s_max), expected_result) -> + assert_equal + (expected_result) + (category_fn + (mock_host a s_min d_min d_max s_max))) + [ + (* Varying these parameters SHOULD vary the result: *) + (* Σ available, Σ dynamic_mix, Σ dynamic_max *) + + (* Vary (Σ available) while (Σ dynamic_min = Σ dynamic_max) *) + (0, x, 1, 1, x), -1L; + (1, x, 1, 1, x), ceiling; + (2, x, 1, 1, x), ceiling; + + (* Vary (Σ available) while (Σ dynamic_min ≠ Σ dynamic_max) *) + (-1, x, 0, 4, x), -1L; + ( 0, x, 0, 4, x), 0L; + ( 1, x, 0, 4, x), ceiling ** 1L // 4L; + ( 2, x, 0, 4, x), ceiling ** 2L // 4L; + ( 3, x, 0, 4, x), ceiling ** 3L // 4L; + ( 4, x, 0, 4, x), ceiling; + ( 5, x, 0, 4, x), ceiling; + + (* Varying these parameters should NOT vary the result: *) + (* Σ static_min, Σ static_max *) + (x, 0, x, x, 0), ceiling; + (x, 1, x, x, 0), ceiling; + (x, 0, x, x, 1), ceiling; + ] + + let test_possible_host_category = make_test_case + "possible_host_category" + "Tests the possible_host_category function." + (fun () -> + test_compression_host_category possible_host_category mock_master; + test_compression_host_category possible_host_category mock_slave; + ) + + let test_affinity_host_category = make_test_case + "affinity_host_category" + "Tests the affinity_host_category function." + (fun () -> + (* The affinity host category excludes all non-affinity hosts. *) + let non_matching_category = affinity_host_category ["??"] in + assert_equal (-1L) (non_matching_category (mock_master 0 0 0 0 0)); + assert_equal (-1L) (non_matching_category (mock_slave 0 0 0 0 0)); + + (* The affinity-host-category function values affinity hosts *) + (* identically to the possible-host-category function. *) + let matching_category = affinity_host_category ["id"] in + test_compression_host_category matching_category mock_master; + test_compression_host_category matching_category mock_slave; + ) + + let tests = make_test_suite "Categorisation" + "Functions relating to host categorisation." + [ + test_definite_host_category_slave; + test_definite_host_category_master; + test_probable_host_category_slave; + test_probable_host_category_master; + test_possible_host_category; + test_affinity_host_category; + ] end module Selection = struct - open Construction - - let match_no_hosts = fun host -> -1L - let match_all_hosts = fun host -> 1L - let validate_all_hosts = fun host -> true - let select_first_host () = 0.0 - - let test_select_host_from_category = make_test_case - "select_host_from_category" - "Tests the select_host_from_category function." - (fun () -> - assert_equal - (select_host_from_category - match_no_hosts [] validate_all_hosts select_first_host) - (None, []); - assert_equal - (select_host_from_category - match_all_hosts [] validate_all_hosts select_first_host) - (None, []); - ) - - let tests = make_test_suite "Selection" - "Functions relating to host selection." - [ - test_select_host_from_category; - ] + open Construction + + let match_no_hosts = fun host -> -1L + let match_all_hosts = fun host -> 1L + let validate_all_hosts = fun host -> true + let select_first_host () = 0.0 + + let test_select_host_from_category = make_test_case + "select_host_from_category" + "Tests the select_host_from_category function." + (fun () -> + assert_equal + (select_host_from_category + match_no_hosts [] validate_all_hosts select_first_host) + (None, []); + assert_equal + (select_host_from_category + match_all_hosts [] validate_all_hosts select_first_host) + (None, []); + ) + + let tests = make_test_suite "Selection" + "Functions relating to host selection." + [ + test_select_host_from_category; + ] end let tests = make_module_test_suite "Xapi_vm_placement" -[ - Utility.tests; - Summarisation.tests; - Categorisation.tests; - Selection.tests; -] + [ + Utility.tests; + Summarisation.tests; + Categorisation.tests; + Selection.tests; + ] let run_from_within_quicktest () = run_from_within_quicktest tests diff --git a/ocaml/xapi/redo_log_alert.ml b/ocaml/xapi/redo_log_alert.ml index 2ce84141000..b019450e192 100644 --- a/ocaml/xapi/redo_log_alert.ml +++ b/ocaml/xapi/redo_log_alert.ml @@ -16,30 +16,30 @@ module R = Debug.Make(struct let name = "redo_log" end) open R let raise_system_alert (name, priority) body = - (* This code may block indefinitely while attempting to look up the pool UUID and send the alert, so do it in a separate thread *) - ignore (Thread.create (fun () -> - debug "Processing redo log event: %s" name; - let __context = Context.make "context" in - let pool = Helpers.get_pool ~__context in - let obj_uuid = Db.Pool.get_uuid ~__context ~self:pool in - let other_config = Db.Pool.get_other_config ~__context ~self:pool in - if List.mem_assoc Xapi_globs.redo_log_alert_key other_config && (List.assoc Xapi_globs.redo_log_alert_key other_config = "true") then begin - debug "Raising alert for pool UUID %s" obj_uuid; - (try ignore (Xapi_message.create ~__context ~name ~priority ~cls:`Pool ~obj_uuid ~body) with _ -> ()); - debug "Alert raised" - end else debug "Not raising alert because Pool.other_config:%s <> true" Xapi_globs.redo_log_alert_key; - ) ()) + (* This code may block indefinitely while attempting to look up the pool UUID and send the alert, so do it in a separate thread *) + ignore (Thread.create (fun () -> + debug "Processing redo log event: %s" name; + let __context = Context.make "context" in + let pool = Helpers.get_pool ~__context in + let obj_uuid = Db.Pool.get_uuid ~__context ~self:pool in + let other_config = Db.Pool.get_other_config ~__context ~self:pool in + if List.mem_assoc Xapi_globs.redo_log_alert_key other_config && (List.assoc Xapi_globs.redo_log_alert_key other_config = "true") then begin + debug "Raising alert for pool UUID %s" obj_uuid; + (try ignore (Xapi_message.create ~__context ~name ~priority ~cls:`Pool ~obj_uuid ~body) with _ -> ()); + debug "Alert raised" + end else debug "Not raising alert because Pool.other_config:%s <> true" Xapi_globs.redo_log_alert_key; + ) ()) (* Listen for redo_log events, and raise alerts when they occur. *) let loop () = Debug.with_thread_named "Metadata VDI monitor" (fun () -> - while true do - let (name, accessible) = Event.sync (Event.receive Redo_log.redo_log_events) in - let alert_body = Printf.sprintf "Redo log [%s]" name in - if accessible then begin - info "Raising system alert that redo log [%s] is now healthy" name; - raise_system_alert Api_messages.redo_log_healthy alert_body - end else begin - info "Raising system alert to say that we can't access redo log [%s]" name; - raise_system_alert Api_messages.redo_log_broken alert_body - end - done) () + while true do + let (name, accessible) = Event.sync (Event.receive Redo_log.redo_log_events) in + let alert_body = Printf.sprintf "Redo log [%s]" name in + if accessible then begin + info "Raising system alert that redo log [%s] is now healthy" name; + raise_system_alert Api_messages.redo_log_healthy alert_body + end else begin + info "Raising system alert to say that we can't access redo log [%s]" name; + raise_system_alert Api_messages.redo_log_broken alert_body + end + done) () diff --git a/ocaml/xapi/redo_log_alert.mli b/ocaml/xapi/redo_log_alert.mli index be8315a4e84..778cafd1a2d 100644 --- a/ocaml/xapi/redo_log_alert.mli +++ b/ocaml/xapi/redo_log_alert.mli @@ -11,11 +11,11 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(** +(** * @group Redo-log - *) +*) (** Runs forever waiting for the redo log's status to change i.e. for - it to fail or to recover, generating alerts on transitions if + it to fail or to recover, generating alerts on transitions if Pool.other_config:metadata_lun_alerts is set to "true" *) val loop: unit -> unit diff --git a/ocaml/xapi/redo_log_usage.ml b/ocaml/xapi/redo_log_usage.ml index ffc8f02b40e..ed432c6dd74 100644 --- a/ocaml/xapi/redo_log_usage.ml +++ b/ocaml/xapi/redo_log_usage.ml @@ -29,31 +29,31 @@ let read_from_redo_log log staging_path db_ref = let read_db gen_count fd expected_length latest_response_time = (* Read the database from the fd into a file *) let temp_file = Filename.temp_file "from-vdi" ".db" in - Stdext.Pervasiveext.finally + Stdext.Pervasiveext.finally (fun () -> - let outfd = Unix.openfile temp_file [Unix.O_CREAT; Unix.O_WRONLY; Unix.O_TRUNC] 0o755 in - (* ideally, the reading would also respect the latest_response_time *) - let total_read = Stdext.Unixext.read_data_in_chunks (fun str length -> Stdext.Unixext.time_limited_write outfd length str latest_response_time) fd in - R.debug "Reading database from fd into file %s" temp_file; + let outfd = Unix.openfile temp_file [Unix.O_CREAT; Unix.O_WRONLY; Unix.O_TRUNC] 0o755 in + (* ideally, the reading would also respect the latest_response_time *) + let total_read = Stdext.Unixext.read_data_in_chunks (fun str length -> Stdext.Unixext.time_limited_write outfd length str latest_response_time) fd in + R.debug "Reading database from fd into file %s" temp_file; - (* Check that we read the expected amount of data *) - R.debug "We read %d bytes and were told to expect %d bytes" total_read expected_length; - if total_read <> expected_length then raise (DatabaseWrongSize (expected_length, total_read)); + (* Check that we read the expected amount of data *) + R.debug "We read %d bytes and were told to expect %d bytes" total_read expected_length; + if total_read <> expected_length then raise (DatabaseWrongSize (expected_length, total_read)); - (* Read from the file into the cache *) - let conn = Parse_db_conf.make temp_file in - (* ideally, the reading from the file would also respect the latest_response_time *) - let db = Backend_xml.populate (Datamodel_schema.of_datamodel ()) conn in - Db_ref.update_database db_ref (fun _ -> db); + (* Read from the file into the cache *) + let conn = Parse_db_conf.make temp_file in + (* ideally, the reading from the file would also respect the latest_response_time *) + let db = Backend_xml.populate (Datamodel_schema.of_datamodel ()) conn in + Db_ref.update_database db_ref (fun _ -> db); - R.debug "Finished reading database from %s into cache (generation = %Ld)" temp_file gen_count; + R.debug "Finished reading database from %s into cache (generation = %Ld)" temp_file gen_count; - (* Set the generation count *) - latest_generation := Some gen_count + (* Set the generation count *) + latest_generation := Some gen_count ) (fun () -> - (* Remove the temporary file *) - Stdext.Unixext.unlink_safe temp_file + (* Remove the temporary file *) + Stdext.Unixext.unlink_safe temp_file ) in @@ -64,7 +64,7 @@ let read_from_redo_log log staging_path db_ref = match !latest_generation with | None -> raise NoGeneration (* we should have already read in a database with a generation count *) | Some g -> - if gen_count > g + if gen_count > g then latest_generation := Some gen_count else raise DeltaTooOld (* the delta should be at least as new as the database to which it applies *) in @@ -72,12 +72,12 @@ let read_from_redo_log log staging_path db_ref = R.debug "Reading from redo log"; Redo_log.apply read_db read_delta log; - (* 3. Write the database and generation to a file - * Note: if there were no deltas applied then this is semantically - * equivalent to copying the temp_file used above in read_db rather than - * deleting it. - * Note: we don't do this using the DB lock since this is only executed at - * startup, before the database engine has been started, so there's no + (* 3. Write the database and generation to a file + * Note: if there were no deltas applied then this is semantically + * equivalent to copying the temp_file used above in read_db rather than + * deleting it. + * Note: we don't do this using the DB lock since this is only executed at + * startup, before the database engine has been started, so there's no * danger of conflicting writes. *) R.debug "Staging redo log to file %s" staging_path; (* Remove any existing file *) diff --git a/ocaml/xapi/redo_log_usage.mli b/ocaml/xapi/redo_log_usage.mli index 9c3615beabb..2aae25dadaa 100644 --- a/ocaml/xapi/redo_log_usage.mli +++ b/ocaml/xapi/redo_log_usage.mli @@ -11,11 +11,11 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(** +(** * @group Redo-log - *) - -(** Connect to the block device and write the latest version of the database +*) + +(** Connect to the block device and write the latest version of the database * on it to a file with a given name. *) val read_from_redo_log : Redo_log.redo_log -> string -> Db_ref.t -> unit diff --git a/ocaml/xapi/remote_requests.ml b/ocaml/xapi/remote_requests.ml index 3feaaafd508..d034fee4008 100644 --- a/ocaml/xapi/remote_requests.ml +++ b/ocaml/xapi/remote_requests.ml @@ -64,7 +64,7 @@ let make_queued_request task verify_cert host port request handler verify_cert = verify_cert; host = host; port = port; - request = request; + request = request; handler = handler; resp = resp; resp_mutex = resp_mutex; @@ -77,7 +77,7 @@ let request_queue : queued_request list ref = ref [] let request_mutex = Mutex.create() let request_cond = Condition.create() -let signal_result' req result () = +let signal_result' req result () = if !(req.resp) = NoResponse then begin req.resp := result; @@ -89,34 +89,34 @@ let signal_result req result = let watcher_thread = function | (__context, timeout, delay, req) -> - ignore (Delay.wait delay timeout); - Mutex.execute req.resp_mutex - (fun () -> - if !(req.resp) = NoResponse then - begin - warn "Remote request timed out"; - let resources = Locking_helpers.Thread_state.get_acquired_resources_by_task req.task in - List.iter Locking_helpers.kill_resource resources; - signal_result' req (Exception Timed_out) () - end) + ignore (Delay.wait delay timeout); + Mutex.execute req.resp_mutex + (fun () -> + if !(req.resp) = NoResponse then + begin + warn "Remote request timed out"; + let resources = Locking_helpers.Thread_state.get_acquired_resources_by_task req.task in + List.iter Locking_helpers.kill_resource resources; + signal_result' req (Exception Timed_out) () + end) let handle_request req = try - let open Xmlrpc_client in - let transport = SSL(SSL.make ~verify_cert:req.verify_cert ~task_id:(Ref.string_of req.task) (), req.host, req.port) in - with_transport transport - (with_http req.request - (fun (response, s) -> - req.handler response s; - signal_result req Success - ) - ) + let open Xmlrpc_client in + let transport = SSL(SSL.make ~verify_cert:req.verify_cert ~task_id:(Ref.string_of req.task) (), req.host, req.port) in + with_transport transport + (with_http req.request + (fun (response, s) -> + req.handler response s; + signal_result req Success + ) + ) with - | exn -> - if req.enable_log then - warn "Exception handling remote request %s: %s" (Opt.default "" req.request.Http.Request.body) - (ExnHelper.string_of_exn exn); - signal_result req (Exception exn) + | exn -> + if req.enable_log then + warn "Exception handling remote request %s: %s" (Opt.default "" req.request.Http.Request.body) + (ExnHelper.string_of_exn exn); + signal_result req (Exception exn) let handle_requests () = while Mutex.execute request_mutex (fun () -> not !shutting_down) do @@ -128,18 +128,18 @@ let handle_requests () = Condition.wait request_cond request_mutex; done; let q = !request_queue in - request_queue := List.tl q; - List.hd q) + request_queue := List.tl q; + List.hd q) in - handle_request req + handle_request req with - | exn -> - error "Exception in handle_requests thread! %s" - (ExnHelper.string_of_exn exn); - Thread.delay 30. + | exn -> + error "Exception in handle_requests thread! %s" + (ExnHelper.string_of_exn exn); + Thread.delay 30. done -let start_watcher __context timeout delay req = +let start_watcher __context timeout delay req = ignore (Thread.create watcher_thread (__context, timeout, delay, req)) let queue_request req = @@ -154,28 +154,28 @@ let perform_request ~__context ~timeout ~verify_cert ~host ~port let resp = ref NoResponse in let resp_mutex = Mutex.create() in let resp_cond = Condition.create() in - Mutex.execute resp_mutex - (fun () -> - let delay = Delay.make () in - let req = - make_queued_request - task verify_cert host port request handler - resp resp_mutex resp_cond enable_log - in - start_watcher __context timeout delay req; - queue_request req; - - Condition.wait resp_cond resp_mutex; - Delay.signal delay; - - match !resp with - | Success -> - () - | Exception exn -> - raise exn - | NoResponse -> - error "No response in perform_request!"; - raise Internal_error) + Mutex.execute resp_mutex + (fun () -> + let delay = Delay.make () in + let req = + make_queued_request + task verify_cert host port request handler + resp resp_mutex resp_cond enable_log + in + start_watcher __context timeout delay req; + queue_request req; + + Condition.wait resp_cond resp_mutex; + Delay.signal delay; + + match !resp with + | Success -> + () + | Exception exn -> + raise exn + | NoResponse -> + error "No response in perform_request!"; + raise Internal_error) let stop_request_thread () = Mutex.execute request_mutex @@ -187,22 +187,22 @@ let read_response result response s = try result := Unixext.string_of_fd s with - | Unix.Unix_error(Unix.ECONNRESET, _, _) -> - raise Xmlrpc_client.Connection_reset + | Unix.Unix_error(Unix.ECONNRESET, _, _) -> + raise Xmlrpc_client.Connection_reset let send_test_post ~__context ~host ~port ~body = try let result = ref "" in - let request = Xapi_http.http_request ~keep_alive:false ~body - ~headers:["Host", host] Http.Post "/" in - perform_request ~__context ~timeout:30.0 ~verify_cert:true - ~host ~port:(Int64.to_int port) ~request - ~handler:(read_response result) ~enable_log:true; - !result + let request = Xapi_http.http_request ~keep_alive:false ~body + ~headers:["Host", host] Http.Post "/" in + perform_request ~__context ~timeout:30.0 ~verify_cert:true + ~host ~port:(Int64.to_int port) ~request + ~handler:(read_response result) ~enable_log:true; + !result with - | Timed_out -> - raise (Api_errors.Server_error - (Api_errors.wlb_timeout, ["30.0"])) - | Stunnel.Stunnel_verify_error reason -> - raise (Api_errors.Server_error - (Api_errors.ssl_verify_error, [reason])) + | Timed_out -> + raise (Api_errors.Server_error + (Api_errors.wlb_timeout, ["30.0"])) + | Stunnel.Stunnel_verify_error reason -> + raise (Api_errors.Server_error + (Api_errors.ssl_verify_error, [reason])) diff --git a/ocaml/xapi/rrdd_helper.ml b/ocaml/xapi/rrdd_helper.ml index 3d1a5e32a58..2c8d4c50031 100644 --- a/ocaml/xapi/rrdd_helper.ml +++ b/ocaml/xapi/rrdd_helper.ml @@ -16,12 +16,12 @@ open Data_source let to_API_data_source (ds : t) = { - API.data_source_name_label = ds.name; - data_source_name_description = ds.description; - data_source_enabled = ds.enabled; - data_source_standard = ds.standard; - data_source_units = ds.units; - data_source_min = ds.min; - data_source_max = ds.max; - data_source_value = 0.; + API.data_source_name_label = ds.name; + data_source_name_description = ds.description; + data_source_enabled = ds.enabled; + data_source_standard = ds.standard; + data_source_units = ds.units; + data_source_min = ds.min; + data_source_max = ds.max; + data_source_value = 0.; } diff --git a/ocaml/xapi/rrdd_proxy.ml b/ocaml/xapi/rrdd_proxy.ml index 174b477737e..c4d83b75fee 100644 --- a/ocaml/xapi/rrdd_proxy.ml +++ b/ocaml/xapi/rrdd_proxy.ml @@ -13,12 +13,12 @@ *) (** * @group Performance Monitoring - *) +*) (* This module is used for easier interaction of xapi with rrdd. Mainly, it * looks up the required information that is available to xapi, and calls * same-named methods in rrdd. - *) +*) module D = Debug.Make(struct let name="rrdd_proxy" end) open D @@ -27,218 +27,218 @@ module Rrdd = Rrd_client.Client (* Helper methods. Should probably be moved to the Http.Request module. *) let get_query_string_from_query ~(query : (string * string) list) : string = - String.concat "&" (List.map (fun (k, v) -> k ^ "=" ^ v) query) + String.concat "&" (List.map (fun (k, v) -> k ^ "=" ^ v) query) let get_query_string ~(req : Http.Request.t) : string = - get_query_string_from_query ~query:req.Http.Request.query + get_query_string_from_query ~query:req.Http.Request.query let make_url_from_query ~(address : string) ~(uri : string) - ~(query : (string * string) list) : string = - let query_string = get_query_string_from_query query in - Printf.sprintf "https://%s%s?%s" address uri query_string + ~(query : (string * string) list) : string = + let query_string = get_query_string_from_query query in + Printf.sprintf "https://%s%s?%s" address uri query_string let make_url ~(address : string) ~(req : Http.Request.t) : string = - let open Http.Request in - make_url_from_query ~address ~uri:req.uri ~query:req.query + let open Http.Request in + make_url_from_query ~address ~uri:req.uri ~query:req.query let fail_req_with (s : Unix.file_descr) msg (http_err : unit -> string list) = - error msg; - Http_svr.headers s (http_err ()) + error msg; + Http_svr.headers s (http_err ()) (* End of helper methods. *) (* If the host contains the RRD for the requested VM then simply forward the * HTTP request to rrdd_http_handler. Otherwise, we redirect to the host that * contains the corresponding VM. The last resort is to unarchive the RRD on * the master. The exact logic can be seen under "The logic." below. - *) +*) let get_vm_rrd_forwarder (req : Http.Request.t) (s : Unix.file_descr) _ = - debug "put_rrd_forwarder: start"; - let query = req.Http.Request.query in - req.Http.Request.close <- true; - let vm_uuid = List.assoc "uuid" query in - if not (List.mem_assoc "ref" query) && not (List.mem_assoc "uuid" query) then - fail_req_with s "get_vm_rrd: missing the 'uuid' parameter" - Http.http_400_badrequest - else if Rrdd.has_vm_rrd ~vm_uuid then ( - ignore (Xapi_services.hand_over_connection req s !(Rrd_interface.forwarded_path)) - ) else ( - Xapi_http.with_context ~dummy:true "Get VM RRD." req s - (fun __context -> - let open Http.Request in - (* List of possible actions. *) - let read_at_owner owner = - let address = Db.Host.get_address ~__context ~self:owner in - let url = make_url ~address ~req in - Http_svr.headers s (Http.http_302_redirect url) in - let unarchive_at_master () = - let address = Pool_role.get_master_address () in - let query = (Constants.rrd_unarchive, "") :: query in - let url = make_url_from_query ~address ~uri:req.uri ~query in - Http_svr.headers s (Http.http_302_redirect url) in - let unarchive () = - let req = {req with uri = Constants.rrd_unarchive_uri} in - ignore (Xapi_services.hand_over_connection req s !(Rrd_interface.forwarded_path)) in - (* List of conditions involved. *) - let is_unarchive_request = List.mem_assoc Constants.rrd_unarchive query in - let is_master = Pool_role.is_master () in - let is_owner_online owner = Db.is_valid_ref __context owner in - let is_xapi_initialising = List.mem_assoc "dbsync" query in - (* The logic. *) - if is_unarchive_request then unarchive () - else ( - let localhost_uuid = Helpers.get_localhost_uuid () in - let vm_ref = Db.VM.get_by_uuid ~__context ~uuid:vm_uuid in - let owner = Db.VM.get_resident_on ~__context ~self:vm_ref in - let owner_uuid = Ref.string_of owner in - let is_owner_localhost = (owner_uuid = localhost_uuid) in - if is_owner_localhost then ( - if is_master then unarchive () else unarchive_at_master () - ) else ( - if is_owner_online owner && not is_xapi_initialising - then read_at_owner owner - else unarchive_at_master () - ) - ) - ) - ) + debug "put_rrd_forwarder: start"; + let query = req.Http.Request.query in + req.Http.Request.close <- true; + let vm_uuid = List.assoc "uuid" query in + if not (List.mem_assoc "ref" query) && not (List.mem_assoc "uuid" query) then + fail_req_with s "get_vm_rrd: missing the 'uuid' parameter" + Http.http_400_badrequest + else if Rrdd.has_vm_rrd ~vm_uuid then ( + ignore (Xapi_services.hand_over_connection req s !(Rrd_interface.forwarded_path)) + ) else ( + Xapi_http.with_context ~dummy:true "Get VM RRD." req s + (fun __context -> + let open Http.Request in + (* List of possible actions. *) + let read_at_owner owner = + let address = Db.Host.get_address ~__context ~self:owner in + let url = make_url ~address ~req in + Http_svr.headers s (Http.http_302_redirect url) in + let unarchive_at_master () = + let address = Pool_role.get_master_address () in + let query = (Constants.rrd_unarchive, "") :: query in + let url = make_url_from_query ~address ~uri:req.uri ~query in + Http_svr.headers s (Http.http_302_redirect url) in + let unarchive () = + let req = {req with uri = Constants.rrd_unarchive_uri} in + ignore (Xapi_services.hand_over_connection req s !(Rrd_interface.forwarded_path)) in + (* List of conditions involved. *) + let is_unarchive_request = List.mem_assoc Constants.rrd_unarchive query in + let is_master = Pool_role.is_master () in + let is_owner_online owner = Db.is_valid_ref __context owner in + let is_xapi_initialising = List.mem_assoc "dbsync" query in + (* The logic. *) + if is_unarchive_request then unarchive () + else ( + let localhost_uuid = Helpers.get_localhost_uuid () in + let vm_ref = Db.VM.get_by_uuid ~__context ~uuid:vm_uuid in + let owner = Db.VM.get_resident_on ~__context ~self:vm_ref in + let owner_uuid = Ref.string_of owner in + let is_owner_localhost = (owner_uuid = localhost_uuid) in + if is_owner_localhost then ( + if is_master then unarchive () else unarchive_at_master () + ) else ( + if is_owner_online owner && not is_xapi_initialising + then read_at_owner owner + else unarchive_at_master () + ) + ) + ) + ) (* Forward the request for host RRD data to the RRDD HTTP handler. If the host * is initialising, send the unarchive command to the host instead. - *) +*) let get_host_rrd_forwarder (req: Http.Request.t) (s : Unix.file_descr) _ = - debug "get_host_rrd_forwarder"; - let query = req.Http.Request.query in - req.Http.Request.close <- true; - Xapi_http.with_context ~dummy:true "Get Host RRD." req s - (fun __context -> - debug "get_host_rrd_forwarder: obtained context"; - if List.mem_assoc "dbsync" query then ( (* Host initialising. *) - debug "get_host_rrd_forwarder: dbsync"; - if not (List.mem_assoc "uuid" query) then ( - fail_req_with s "get_host_rrd: missing the 'uuid' parameter" - Http.http_400_badrequest - ) else ( - debug "get_host_rrd_forwarder: forward to unarchive"; - let req = {req with Http.Request.uri = Constants.rrd_unarchive_uri} in - ignore (Xapi_services.hand_over_connection req s !(Rrd_interface.forwarded_path)) - ) - ) else ( (* Normal request. *) - debug "get_host_rrd_forwarder: normal"; - ignore (Xapi_services.hand_over_connection req s !(Rrd_interface.forwarded_path)) - ) - ) + debug "get_host_rrd_forwarder"; + let query = req.Http.Request.query in + req.Http.Request.close <- true; + Xapi_http.with_context ~dummy:true "Get Host RRD." req s + (fun __context -> + debug "get_host_rrd_forwarder: obtained context"; + if List.mem_assoc "dbsync" query then ( (* Host initialising. *) + debug "get_host_rrd_forwarder: dbsync"; + if not (List.mem_assoc "uuid" query) then ( + fail_req_with s "get_host_rrd: missing the 'uuid' parameter" + Http.http_400_badrequest + ) else ( + debug "get_host_rrd_forwarder: forward to unarchive"; + let req = {req with Http.Request.uri = Constants.rrd_unarchive_uri} in + ignore (Xapi_services.hand_over_connection req s !(Rrd_interface.forwarded_path)) + ) + ) else ( (* Normal request. *) + debug "get_host_rrd_forwarder: normal"; + ignore (Xapi_services.hand_over_connection req s !(Rrd_interface.forwarded_path)) + ) + ) (* Forward the request for SR RRD data to the RRDD HTTP handler. *) let get_sr_rrd_forwarder (req: Http.Request.t) (s: Unix.file_descr) _ = - debug "get_sr_rrd_forwarder"; - let query = req.Http.Request.query in - req.Http.Request.close <- true; - Xapi_http.with_context ~dummy:true "Get SR RRD." req s - (fun __context -> - debug "get_sr_rrd_forwarder: obtained context"; - if not (List.mem_assoc "uuid" query) then - fail_req_with s "get_sr_rrd: missing the 'uuid' parameter" - Http.http_400_badrequest - else - ignore (Xapi_services.hand_over_connection req s !(Rrd_interface.forwarded_path)) - ) + debug "get_sr_rrd_forwarder"; + let query = req.Http.Request.query in + req.Http.Request.close <- true; + Xapi_http.with_context ~dummy:true "Get SR RRD." req s + (fun __context -> + debug "get_sr_rrd_forwarder: obtained context"; + if not (List.mem_assoc "uuid" query) then + fail_req_with s "get_sr_rrd: missing the 'uuid' parameter" + Http.http_400_badrequest + else + ignore (Xapi_services.hand_over_connection req s !(Rrd_interface.forwarded_path)) + ) (* Forward the request for obtaining RRD data updates to the RRDD HTTP handler. *) let get_rrd_updates_forwarder (req: Http.Request.t) (s : Unix.file_descr) _ = - (* Do not log this event, since commonly called. *) - let query = req.Http.Request.query in - req.Http.Request.close <- true; - Xapi_http.with_context ~dummy:true "Get RRD updates." req s - (fun __context -> - if List.mem_assoc "start" query then - ignore (Xapi_services.hand_over_connection req s !(Rrd_interface.forwarded_path)) - else - fail_req_with s "get_rrd_updates: missing the 'start' parameter" - Http.http_400_badrequest - ) + (* Do not log this event, since commonly called. *) + let query = req.Http.Request.query in + req.Http.Request.close <- true; + Xapi_http.with_context ~dummy:true "Get RRD updates." req s + (fun __context -> + if List.mem_assoc "start" query then + ignore (Xapi_services.hand_over_connection req s !(Rrd_interface.forwarded_path)) + else + fail_req_with s "get_rrd_updates: missing the 'start' parameter" + Http.http_400_badrequest + ) let vm_uuid_to_domid ~__context ~(uuid : string) : int = - let vm = Db.VM.get_by_uuid ~__context ~uuid in - Int64.to_int (Db.VM.get_domid ~__context ~self:vm) + let vm = Db.VM.get_by_uuid ~__context ~uuid in + Int64.to_int (Db.VM.get_domid ~__context ~self:vm) (* Given a uuid, this function returns None if uuid is not recognised; * otherwise, it returns (false, domid) for a VM, and (true, 0) for a host. *) let uuid_to_domid ~__context ~(uuid : string) : (bool * int) option = - try - ignore (Db.VM.get_by_uuid ~__context ~uuid); - Some (false, vm_uuid_to_domid ~__context ~uuid) - with _ -> ( - try ignore (Db.Host.get_by_uuid ~__context ~uuid); Some (true, 0) - with _ -> None - ) + try + ignore (Db.VM.get_by_uuid ~__context ~uuid); + Some (false, vm_uuid_to_domid ~__context ~uuid) + with _ -> ( + try ignore (Db.Host.get_by_uuid ~__context ~uuid); Some (true, 0) + with _ -> None + ) (* Forward the request for storing RRD. In case an archive is required, the * request is redirected to the master. See * Rrdd_http_handler.put_rrd_handler. *) let put_rrd_forwarder (req : Http.Request.t) (s : Unix.file_descr) _ = - debug "put_rrd_forwarder"; - let query = req.Http.Request.query in - req.Http.Request.close <- true; - let has_uuid = List.mem_assoc "uuid" query in - let should_archive = List.mem_assoc "archive" query in - let is_master = Pool_role.is_master () in - if not has_uuid then ( - fail_req_with s "put_rrd: missing the 'uuid' parameter" - Http.http_400_badrequest; - ) else if should_archive && not is_master then ( - let address = Pool_role.get_master_address () in - let url = make_url ~address ~req in - Http_svr.headers s (Http.http_302_redirect url) - ) else Xapi_http.with_context ~dummy:true "Put VM RRD." req s - (fun __context -> - let uuid = List.assoc "uuid" query in - match uuid_to_domid ~__context ~uuid, should_archive with - | None, _ -> - fail_req_with s "put_rrd: invalid 'uuid' parameter" - Http.http_404_missing - | Some (true, _), false -> - fail_req_with s "put_rrd: cannot archive host RRD" - Http.http_400_badrequest - | Some (is_host, domid), _ -> - let is_host_key_val = "is_host", string_of_bool is_host in - let domid_key_val = "domid", string_of_int domid in - let req = {req with - Http.Request.query = is_host_key_val::domid_key_val::query - } in - ignore (Xapi_services.hand_over_connection req s !(Rrd_interface.forwarded_path)) - ) + debug "put_rrd_forwarder"; + let query = req.Http.Request.query in + req.Http.Request.close <- true; + let has_uuid = List.mem_assoc "uuid" query in + let should_archive = List.mem_assoc "archive" query in + let is_master = Pool_role.is_master () in + if not has_uuid then ( + fail_req_with s "put_rrd: missing the 'uuid' parameter" + Http.http_400_badrequest; + ) else if should_archive && not is_master then ( + let address = Pool_role.get_master_address () in + let url = make_url ~address ~req in + Http_svr.headers s (Http.http_302_redirect url) + ) else Xapi_http.with_context ~dummy:true "Put VM RRD." req s + (fun __context -> + let uuid = List.assoc "uuid" query in + match uuid_to_domid ~__context ~uuid, should_archive with + | None, _ -> + fail_req_with s "put_rrd: invalid 'uuid' parameter" + Http.http_404_missing + | Some (true, _), false -> + fail_req_with s "put_rrd: cannot archive host RRD" + Http.http_400_badrequest + | Some (is_host, domid), _ -> + let is_host_key_val = "is_host", string_of_bool is_host in + let domid_key_val = "domid", string_of_int domid in + let req = {req with + Http.Request.query = is_host_key_val::domid_key_val::query + } in + ignore (Xapi_services.hand_over_connection req s !(Rrd_interface.forwarded_path)) + ) let host_for_vm ~__context ~vm_uuid = - let vm = Db.VM.get_by_uuid ~__context ~uuid:vm_uuid in - Db.VM.get_resident_on ~__context ~self:vm + let vm = Db.VM.get_by_uuid ~__context ~uuid:vm_uuid in + Db.VM.get_resident_on ~__context ~self:vm let push_rrd ~__context ~(vm_uuid : string) : unit = - let vm_host = host_for_vm ~__context ~vm_uuid in - if vm_host = (Helpers.get_localhost ~__context) then - let domid = vm_uuid_to_domid ~__context ~uuid:vm_uuid in - log_and_ignore_exn (fun () -> Rrdd.push_rrd_local ~vm_uuid ~domid) - else - let remote_address = Db.Host.get_address ~__context ~self:vm_host in - log_and_ignore_exn (fun () -> Rrdd.push_rrd_remote ~vm_uuid ~remote_address) + let vm_host = host_for_vm ~__context ~vm_uuid in + if vm_host = (Helpers.get_localhost ~__context) then + let domid = vm_uuid_to_domid ~__context ~uuid:vm_uuid in + log_and_ignore_exn (fun () -> Rrdd.push_rrd_local ~vm_uuid ~domid) + else + let remote_address = Db.Host.get_address ~__context ~self:vm_host in + log_and_ignore_exn (fun () -> Rrdd.push_rrd_remote ~vm_uuid ~remote_address) let migrate_rrd ~__context ?remote_address ?session_id ~vm_uuid ~host_uuid () = - let remote_address = match remote_address with - | None -> Db.Host.get_address ~__context ~self:(Ref.of_string host_uuid) - | Some a -> a - in - log_and_ignore_exn (fun () -> - Rrdd.migrate_rrd ~remote_address ?session_id ~vm_uuid ~host_uuid - ) + let remote_address = match remote_address with + | None -> Db.Host.get_address ~__context ~self:(Ref.of_string host_uuid) + | Some a -> a + in + log_and_ignore_exn (fun () -> + Rrdd.migrate_rrd ~remote_address ?session_id ~vm_uuid ~host_uuid + ) module Deprecated = struct - let get_timescale ~__context = - let host = Helpers.get_localhost ~__context in - let other_config = Db.Host.get_other_config ~__context ~self:host in - try int_of_string (List.assoc Constants.rrd_update_interval other_config) - with _ -> 0 - - let load_rrd ~__context ~uuid = - let master_address = try Some (Pool_role.get_master_address ()) with _ -> None in - let timescale = get_timescale ~__context in - log_and_ignore_exn (fun () -> Rrdd.Deprecated.load_rrd ~uuid ~master_address ~timescale) + let get_timescale ~__context = + let host = Helpers.get_localhost ~__context in + let other_config = Db.Host.get_other_config ~__context ~self:host in + try int_of_string (List.assoc Constants.rrd_update_interval other_config) + with _ -> 0 + + let load_rrd ~__context ~uuid = + let master_address = try Some (Pool_role.get_master_address ()) with _ -> None in + let timescale = get_timescale ~__context in + log_and_ignore_exn (fun () -> Rrdd.Deprecated.load_rrd ~uuid ~master_address ~timescale) end diff --git a/ocaml/xapi/slave_backup.ml b/ocaml/xapi/slave_backup.ml index 490df095b68..1ad1cd61192 100644 --- a/ocaml/xapi/slave_backup.ml +++ b/ocaml/xapi/slave_backup.ml @@ -13,7 +13,7 @@ *) (** * @group Pool Management - *) +*) (** Immediately fetch a database backup from the master. If a flush_spec is given, with a list of db connections, then the backup is flushed to those connections; if no flush spec is given then the backup is flushed to all @@ -42,25 +42,25 @@ let tick_backup_write_table() = with_backup_lock (fun () -> Hashtbl.iter - (fun dbconn write_entry -> - match dbconn.Parse_db_conf.mode with - Parse_db_conf.Write_limit -> - if (int_of_float (Unix.gettimeofday() -. write_entry.period_start_time)) > dbconn.Parse_db_conf.write_limit_period then - Hashtbl.replace backup_write_table dbconn {period_start_time=Unix.gettimeofday(); writes_this_period=0} - | _ -> () - ) - backup_write_table) + (fun dbconn write_entry -> + match dbconn.Parse_db_conf.mode with + Parse_db_conf.Write_limit -> + if (int_of_float (Unix.gettimeofday() -. write_entry.period_start_time)) > dbconn.Parse_db_conf.write_limit_period then + Hashtbl.replace backup_write_table dbconn {period_start_time=Unix.gettimeofday(); writes_this_period=0} + | _ -> () + ) + backup_write_table) (* Can we write to specified connection *) let can_we_write dbconn = with_backup_lock (fun () -> match dbconn.Parse_db_conf.mode with - Parse_db_conf.No_limit -> true + Parse_db_conf.No_limit -> true | Parse_db_conf.Write_limit -> - let write_entry = lookup_write_entry dbconn in - (* we can write if we haven't used up all our write-cycles for this period: *) - (write_entry.writes_this_period < dbconn.Parse_db_conf.write_limit_write_cycles) + let write_entry = lookup_write_entry dbconn in + (* we can write if we haven't used up all our write-cycles for this period: *) + (write_entry.writes_this_period < dbconn.Parse_db_conf.write_limit_write_cycles) ) (* Update writes_this_period for dbconn *) diff --git a/ocaml/xapi/sm.ml b/ocaml/xapi/sm.ml index 811d264c6a4..85d11a7a8ae 100644 --- a/ocaml/xapi/sm.ml +++ b/ocaml/xapi/sm.ml @@ -13,7 +13,7 @@ *) (** Storage manager interface * @group Storage - *) +*) open Stdext open Xstringext @@ -32,10 +32,10 @@ exception Unknown_driver of string exception MasterOnly let supported_drivers () = - Hashtbl.fold (fun name _ acc -> name :: acc) driver_info_cache [] + Hashtbl.fold (fun name _ acc -> name :: acc) driver_info_cache [] (** Scans the plugin directory and registers everything it finds there *) -let register () = +let register () = let add_entry driver info = let name = String.lowercase driver in Hashtbl.replace driver_info_cache name info @@ -45,14 +45,14 @@ let register () = let info_of_driver (name: string) = - let name = String.lowercase name in - if not(Hashtbl.mem driver_info_cache name) - then raise (Unknown_driver name) - else (Hashtbl.find driver_info_cache name) + let name = String.lowercase name in + if not(Hashtbl.mem driver_info_cache name) + then raise (Unknown_driver name) + else (Hashtbl.find driver_info_cache name) let features_of_driver (name: string) = (info_of_driver name).sr_driver_features -let driver_filename driver = +let driver_filename driver = let info=info_of_driver driver in info.sr_driver_filename @@ -69,9 +69,9 @@ let debug operation driver msg = debug "SM %s %s %s" driver operation msg let srmaster_only (_,dconf) = - let is_srmaster = try List.assoc "SRmaster" dconf = "true" with _ -> false in - if not is_srmaster - then (warn "srmaster_only: Raising MasterOnly exception"; raise MasterOnly) + let is_srmaster = try List.assoc "SRmaster" dconf = "true" with _ -> false in + if not is_srmaster + then (warn "srmaster_only: Raising MasterOnly exception"; raise MasterOnly) let sr_create dconf driver sr size = let call = Sm_exec.make_call ~sr_ref:sr dconf "sr_create" [ Int64.to_string size ] in @@ -87,61 +87,61 @@ let sr_delete dconf driver sr = let serialize_attach_detach = Locking_helpers.Named_mutex.create "sr_attach/detach" let sr_attach dconf driver sr = - Locking_helpers.Named_mutex.execute serialize_attach_detach + Locking_helpers.Named_mutex.execute serialize_attach_detach (fun ()-> debug "sr_attach" driver (sprintf "sr=%s" (Ref.string_of sr)); let call = Sm_exec.make_call ~sr_ref:sr dconf "sr_attach" [] in Sm_exec.parse_unit (Sm_exec.exec_xmlrpc (driver_filename driver) call)) let sr_detach dconf driver sr = - Locking_helpers.Named_mutex.execute serialize_attach_detach + Locking_helpers.Named_mutex.execute serialize_attach_detach (fun ()-> debug "sr_detach" driver (sprintf "sr=%s" (Ref.string_of sr)); let call = Sm_exec.make_call ~sr_ref:sr dconf "sr_detach" [] in Sm_exec.parse_unit (Sm_exec.exec_xmlrpc (driver_filename driver) call)); Threadext.Mutex.execute sr_content_type_cache_m (fun () -> Hashtbl.remove sr_content_type_cache sr) - + let sr_probe dconf driver sr_sm_config = if List.mem_assoc Sr_probe (features_of_driver driver) then - Locking_helpers.Named_mutex.execute serialize_attach_detach + Locking_helpers.Named_mutex.execute serialize_attach_detach (fun ()-> - debug "sr_probe" driver (sprintf "sm_config=[%s]" (String.concat "; " (List.map (fun (k, v) -> k ^ "=" ^ v) sr_sm_config))); - let call = Sm_exec.make_call ~sr_sm_config dconf "sr_probe" [] in - (* sr_probe returns an XML document marshalled within an XMLRPC string *) - XMLRPC.From.string (Sm_exec.exec_xmlrpc (driver_filename driver) call)) + debug "sr_probe" driver (sprintf "sm_config=[%s]" (String.concat "; " (List.map (fun (k, v) -> k ^ "=" ^ v) sr_sm_config))); + let call = Sm_exec.make_call ~sr_sm_config dconf "sr_probe" [] in + (* sr_probe returns an XML document marshalled within an XMLRPC string *) + XMLRPC.From.string (Sm_exec.exec_xmlrpc (driver_filename driver) call)) else raise (Api_errors.Server_error (Api_errors.sr_backend_failure, [ ("Operation 'sr_probe' not supported by this SR type"); ""; ""])) -let sr_scan dconf driver sr = - debug "sr_scan" driver (sprintf "sr=%s" (Ref.string_of sr)); - srmaster_only dconf; - let call = Sm_exec.make_call ~sr_ref:sr dconf "sr_scan" [] in - Sm_exec.parse_unit (Sm_exec.exec_xmlrpc (driver_filename driver) call) +let sr_scan dconf driver sr = + debug "sr_scan" driver (sprintf "sr=%s" (Ref.string_of sr)); + srmaster_only dconf; + let call = Sm_exec.make_call ~sr_ref:sr dconf "sr_scan" [] in + Sm_exec.parse_unit (Sm_exec.exec_xmlrpc (driver_filename driver) call) let sr_content_type dconf driver sr = debug "sr_content_type" driver (sprintf "sr=%s" (Ref.string_of sr)); let call = Sm_exec.make_call ~sr_ref:sr dconf "sr_content_type" [] in Sm_exec.parse_sr_content_type (Sm_exec.exec_xmlrpc (driver_filename driver) call) -let sr_update dconf driver sr = +let sr_update dconf driver sr = debug "sr_update" driver (sprintf "sr=%s" (Ref.string_of sr)); let call = Sm_exec.make_call ~sr_ref:sr dconf "sr_update" [] in Sm_exec.parse_unit (Sm_exec.exec_xmlrpc (driver_filename driver) call) let vdi_create dconf driver sr sm_config vdi_type size name_label name_description metadata_of_pool is_a_snapshot snapshot_time snapshot_of read_only = debug "vdi_create" driver (sprintf "sr=%s sm_config=[%s] type=[%s] size=%Ld" (Ref.string_of sr) (String.concat "; " (List.map (fun (k, v) -> k ^ "=" ^ v) sm_config)) vdi_type size); - srmaster_only dconf; + srmaster_only dconf; let call = Sm_exec.make_call ~sr_ref:sr ~vdi_sm_config:sm_config ~vdi_type dconf "vdi_create" [ sprintf "%Lu" size; name_label ; name_description; metadata_of_pool; string_of_bool is_a_snapshot; snapshot_time; snapshot_of; string_of_bool read_only ] in Sm_exec.parse_vdi_info (Sm_exec.exec_xmlrpc (driver_filename driver) call) -let vdi_update dconf driver sr vdi = +let vdi_update dconf driver sr vdi = debug "vdi_update" driver (sprintf "sr=%s vdi=%s" (Ref.string_of sr) (Ref.string_of vdi)); let call = Sm_exec.make_call ~sr_ref:sr ~vdi_ref:vdi dconf "vdi_update" [] in Sm_exec.parse_unit (Sm_exec.exec_xmlrpc (driver_filename driver) call) -let vdi_introduce dconf driver sr new_uuid sm_config location = +let vdi_introduce dconf driver sr new_uuid sm_config location = debug "vdi_introduce" driver (sprintf "sr=%s new_uuid=%s sm_config=[%s] location=%s" (Ref.string_of sr) new_uuid (String.concat "; " (List.map (fun (k, v) -> k ^ "=" ^ v) sm_config)) location); let call = Sm_exec.make_call ~sr_ref:sr ~vdi_location:location ~vdi_sm_config:sm_config ~new_uuid:new_uuid dconf "vdi_introduce" [] in Sm_exec.parse_vdi_info (Sm_exec.exec_xmlrpc (driver_filename driver) call) @@ -169,7 +169,7 @@ let vdi_activate dconf driver sr vdi writable = debug "vdi_activate" driver (sprintf "sr=%s vdi=%s" (Ref.string_of sr) (Ref.string_of vdi)); let call = Sm_exec.make_call ~sr_ref:sr ~vdi_ref:vdi dconf "vdi_activate" [ sprintf "%b" writable ] in Sm_exec.parse_unit (Sm_exec.exec_xmlrpc (driver_filename driver) call) - + let vdi_deactivate dconf driver sr vdi = debug "vdi_deactivate" driver (sprintf "sr=%s vdi=%s" (Ref.string_of sr) (Ref.string_of vdi)); let call = Sm_exec.make_call ~sr_ref:sr ~vdi_ref:vdi dconf "vdi_deactivate" [] in @@ -180,7 +180,7 @@ let vdi_snapshot dconf driver driver_params sr vdi = srmaster_only dconf; let call = Sm_exec.make_call ~sr_ref:sr ~vdi_ref:vdi ~driver_params dconf "vdi_snapshot" [] in Sm_exec.parse_vdi_info (Sm_exec.exec_xmlrpc (driver_filename driver) call) - + let vdi_clone dconf driver driver_params sr vdi = debug "vdi_clone" driver (sprintf "sr=%s vdi=%s driver_params=[%s]" (Ref.string_of sr) (Ref.string_of vdi) (String.concat "; " (List.map (fun (k, v) -> k ^ "=" ^ v) driver_params))); srmaster_only dconf; @@ -199,37 +199,37 @@ let vdi_resize_online dconf driver sr vdi newsize = let call = Sm_exec.make_call ~sr_ref:sr ~vdi_ref:vdi dconf "vdi_resize_online" [ sprintf "%Lu" newsize ] in Sm_exec.parse_vdi_info (Sm_exec.exec_xmlrpc (driver_filename driver) call) -let vdi_generate_config dconf driver sr vdi = +let vdi_generate_config dconf driver sr vdi = debug "vdi_generate_config" driver (sprintf "sr=%s vdi=%s" (Ref.string_of sr) (Ref.string_of vdi)); let call = Sm_exec.make_call ~sr_ref:sr ~vdi_ref:vdi dconf "vdi_generate_config" [] in Sm_exec.parse_string (Sm_exec.exec_xmlrpc (driver_filename driver) call) let vdi_compose dconf driver sr vdi1 vdi2 = - debug "vdi_compose" driver (sprintf "sr=%s vdi1=%s vdi2=%s" (Ref.string_of sr) (Ref.string_of vdi1) (Ref.string_of vdi2)); - srmaster_only dconf; - let call = Sm_exec.make_call ~sr_ref:sr ~vdi_ref:vdi2 dconf "vdi_compose" [ Ref.string_of vdi1] in - Sm_exec.parse_unit (Sm_exec.exec_xmlrpc (driver_filename driver) call) + debug "vdi_compose" driver (sprintf "sr=%s vdi1=%s vdi2=%s" (Ref.string_of sr) (Ref.string_of vdi1) (Ref.string_of vdi2)); + srmaster_only dconf; + let call = Sm_exec.make_call ~sr_ref:sr ~vdi_ref:vdi2 dconf "vdi_compose" [ Ref.string_of vdi1] in + Sm_exec.parse_unit (Sm_exec.exec_xmlrpc (driver_filename driver) call) let vdi_epoch_begin dconf driver sr vdi = - debug "vdi_epoch_begin" driver (sprintf "sr=%s vdi=%s" - (Ref.string_of sr) (Ref.string_of vdi)); - let call = Sm_exec.make_call ~sr_ref:sr ~vdi_ref:vdi dconf "vdi_epoch_begin" [] in - Sm_exec.parse_unit (Sm_exec.exec_xmlrpc (driver_filename driver) call) + debug "vdi_epoch_begin" driver (sprintf "sr=%s vdi=%s" + (Ref.string_of sr) (Ref.string_of vdi)); + let call = Sm_exec.make_call ~sr_ref:sr ~vdi_ref:vdi dconf "vdi_epoch_begin" [] in + Sm_exec.parse_unit (Sm_exec.exec_xmlrpc (driver_filename driver) call) let vdi_epoch_end dconf driver sr vdi = - debug "vdi_epoch_end" driver (sprintf "sr=%s vdi=%s" - (Ref.string_of sr) (Ref.string_of vdi)); - let call = Sm_exec.make_call ~sr_ref:sr ~vdi_ref:vdi dconf "vdi_epoch_end" [] in - Sm_exec.parse_unit (Sm_exec.exec_xmlrpc (driver_filename driver) call) + debug "vdi_epoch_end" driver (sprintf "sr=%s vdi=%s" + (Ref.string_of sr) (Ref.string_of vdi)); + let call = Sm_exec.make_call ~sr_ref:sr ~vdi_ref:vdi dconf "vdi_epoch_end" [] in + Sm_exec.parse_unit (Sm_exec.exec_xmlrpc (driver_filename driver) call) -let session_has_internal_sr_access ~__context ~sr = +let session_has_internal_sr_access ~__context ~sr = let session_id = Context.get_session_id __context in (* XXX: need to move this somewhere else eventually *) let other_config = Db.Session.get_other_config ~__context ~self:session_id in List.mem_assoc Xapi_globs._sm_session other_config && (List.assoc Xapi_globs._sm_session other_config) = Ref.string_of sr -let assert_session_has_internal_sr_access ~__context ~sr = +let assert_session_has_internal_sr_access ~__context ~sr = if not(session_has_internal_sr_access ~__context ~sr) then raise (Api_errors.Server_error(Api_errors.permission_denied, [""])) @@ -238,16 +238,16 @@ let assert_session_has_internal_sr_access ~__context ~sr = let get_my_pbd_for_sr __context sr_id = let me = Helpers.get_localhost __context in - let pbd_ref_and_record = Db.PBD.get_records_where ~__context - ~expr:(Db_filter_types.And ( - Db_filter_types.Eq (Db_filter_types.Field "host", Db_filter_types.Literal (Ref.string_of me)), - Db_filter_types.Eq (Db_filter_types.Field "SR", Db_filter_types.Literal (Ref.string_of sr_id)))) + let pbd_ref_and_record = Db.PBD.get_records_where ~__context + ~expr:(Db_filter_types.And ( + Db_filter_types.Eq (Db_filter_types.Field "host", Db_filter_types.Literal (Ref.string_of me)), + Db_filter_types.Eq (Db_filter_types.Field "SR", Db_filter_types.Literal (Ref.string_of sr_id)))) in match pbd_ref_and_record with - | [] -> raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of sr_id ])) - | (x::_) -> x + | [] -> raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of sr_id ])) + | (x::_) -> x -let assert_pbd_is_plugged ~__context ~sr = +let assert_pbd_is_plugged ~__context ~sr = let _, pbd_r = get_my_pbd_for_sr __context sr in if not(pbd_r.API.pBD_currently_attached) then raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of sr ])) @@ -258,31 +258,31 @@ let sm_master x = ("SRmaster", string_of_bool x) let __get_my_devconf_for_sr __context sr_id = let srmaster = Helpers.i_am_srmaster ~__context ~sr:sr_id in let (pbdref,pbd) = get_my_pbd_for_sr __context sr_id in - (sm_master srmaster) :: pbd.API.pBD_device_config + (sm_master srmaster) :: pbd.API.pBD_device_config (** Make it easier to call SM backend functions on an SR *) -let call_sm_functions ~__context ~sR f = - let srtype = Db.SR.get_type ~__context ~self:sR - and srconf = __get_my_devconf_for_sr __context sR in - let subtask_of = Some (Context.get_task_id __context) in - f (subtask_of,srconf) srtype +let call_sm_functions ~__context ~sR f = + let srtype = Db.SR.get_type ~__context ~self:sR + and srconf = __get_my_devconf_for_sr __context sR in + let subtask_of = Some (Context.get_task_id __context) in + f (subtask_of,srconf) srtype (** Make it easier to call SM backend functions on a VDI directly *) -let call_sm_vdi_functions ~__context ~vdi f = - let sr = Db.VDI.get_SR ~__context ~self:vdi in - let srtype = Db.SR.get_type ~__context ~self:sr - and srconf = __get_my_devconf_for_sr __context sr in - let subtask_of = Some (Context.get_task_id __context) in - f (subtask_of,srconf) srtype sr +let call_sm_vdi_functions ~__context ~vdi f = + let sr = Db.VDI.get_SR ~__context ~self:vdi in + let srtype = Db.SR.get_type ~__context ~self:sr + and srconf = __get_my_devconf_for_sr __context sr in + let subtask_of = Some (Context.get_task_id __context) in + f (subtask_of,srconf) srtype sr (* Use the sr_content_type cache *) -let sr_content_type ~__context ~sr = +let sr_content_type ~__context ~sr = Threadext.Mutex.execute sr_content_type_cache_m (fun () -> if Hashtbl.mem sr_content_type_cache sr then Hashtbl.find sr_content_type_cache sr - else - let ty = call_sm_functions ~__context ~sR:sr (fun srconf srtype -> (sr_content_type srconf srtype sr)) in - Hashtbl.replace sr_content_type_cache sr ty; - ty) + else + let ty = call_sm_functions ~__context ~sR:sr (fun srconf srtype -> (sr_content_type srconf srtype sr)) in + Hashtbl.replace sr_content_type_cache sr ty; + ty) diff --git a/ocaml/xapi/sm_exec.ml b/ocaml/xapi/sm_exec.ml index bf1829d2e2d..e52f7c2dd28 100644 --- a/ocaml/xapi/sm_exec.ml +++ b/ocaml/xapi/sm_exec.ml @@ -13,7 +13,7 @@ *) (** Storage manager backend: external operations through exec * @group Storage - *) +*) open Stdext open Pervasiveext @@ -37,7 +37,7 @@ type call = { host_ref: API.ref_host; session_ref: API.ref_session option; device_config: (string * string) list; - + (* SR probe takes sm config at the SR level *) sr_sm_config: (string * string) list option; @@ -73,53 +73,53 @@ type call = { let make_call ?driver_params ?sr_sm_config ?vdi_sm_config ?vdi_type ?vdi_location ?new_uuid ?sr_ref ?vdi_ref (subtask_of,device_config) cmd args = Server_helpers.exec_with_new_task "sm_exec" (fun __context -> - (* Only allow a subset of calls if the SR has been introduced by a DR task. *) - Opt.iter (fun sr -> - if Db.is_valid_ref __context (Db.SR.get_introduced_by ~__context ~self:sr) then - if not(List.mem cmd ["sr_attach"; "sr_detach"; "vdi_attach"; "vdi_detach"; "vdi_activate"; "vdi_deactivate"; "sr_probe"; "sr_scan"; "sr_content_type"]) then - raise (Storage_interface.Backend_error(Api_errors.operation_not_allowed, - [Printf.sprintf "The operation %s is not allowed on this SR as it is being used for disaster recovery." cmd])); - ) sr_ref; - let vdi_location = - if vdi_location <> None - then vdi_location - else may (fun self -> Db.VDI.get_location ~__context ~self) vdi_ref in + (* Only allow a subset of calls if the SR has been introduced by a DR task. *) + Opt.iter (fun sr -> + if Db.is_valid_ref __context (Db.SR.get_introduced_by ~__context ~self:sr) then + if not(List.mem cmd ["sr_attach"; "sr_detach"; "vdi_attach"; "vdi_detach"; "vdi_activate"; "vdi_deactivate"; "sr_probe"; "sr_scan"; "sr_content_type"]) then + raise (Storage_interface.Backend_error(Api_errors.operation_not_allowed, + [Printf.sprintf "The operation %s is not allowed on this SR as it is being used for disaster recovery." cmd])); + ) sr_ref; + let vdi_location = + if vdi_location <> None + then vdi_location + else may (fun self -> Db.VDI.get_location ~__context ~self) vdi_ref in let vdi_uuid = may (fun self -> Db.VDI.get_uuid ~__context ~self) vdi_ref in - let vdi_on_boot = may (fun self -> - match Db.VDI.get_on_boot ~__context ~self with `persist -> "persist" | `reset -> "reset") vdi_ref in - let vdi_allow_caching = may (fun self -> string_of_bool (Db.VDI.get_allow_caching ~__context ~self)) vdi_ref in - let local_cache_sr = try Some (Db.SR.get_uuid ~__context ~self:(Db.Host.get_local_cache_sr ~__context ~self:(Helpers.get_localhost __context))) with _ -> None in + let vdi_on_boot = may (fun self -> + match Db.VDI.get_on_boot ~__context ~self with `persist -> "persist" | `reset -> "reset") vdi_ref in + let vdi_allow_caching = may (fun self -> string_of_bool (Db.VDI.get_allow_caching ~__context ~self)) vdi_ref in + let local_cache_sr = try Some (Db.SR.get_uuid ~__context ~self:(Db.Host.get_local_cache_sr ~__context ~self:(Helpers.get_localhost __context))) with _ -> None in let sr_uuid = may (fun self -> Db.SR.get_uuid ~__context ~self) sr_ref in { host_ref = !Xapi_globs.localhost_ref; - session_ref = None; (* filled in at the last minute *) - device_config = device_config; - sr_ref = sr_ref; - sr_uuid = sr_uuid; - driver_params = driver_params; - sr_sm_config = sr_sm_config; - vdi_sm_config = vdi_sm_config; - vdi_type = vdi_type; - vdi_ref = vdi_ref; - vdi_location = vdi_location; - vdi_uuid = vdi_uuid; - vdi_on_boot = vdi_on_boot; - vdi_allow_caching = vdi_allow_caching; - new_uuid = new_uuid; - subtask_of = subtask_of; - local_cache_sr = local_cache_sr; - cmd = cmd; - args = args + session_ref = None; (* filled in at the last minute *) + device_config = device_config; + sr_ref = sr_ref; + sr_uuid = sr_uuid; + driver_params = driver_params; + sr_sm_config = sr_sm_config; + vdi_sm_config = vdi_sm_config; + vdi_type = vdi_type; + vdi_ref = vdi_ref; + vdi_location = vdi_location; + vdi_uuid = vdi_uuid; + vdi_on_boot = vdi_on_boot; + vdi_allow_caching = vdi_allow_caching; + new_uuid = new_uuid; + subtask_of = subtask_of; + local_cache_sr = local_cache_sr; + cmd = cmd; + args = args }) let xmlrpc_of_call (call: call) = - let kvpairs kvpairs = - XMLRPC.To.structure + let kvpairs kvpairs = + XMLRPC.To.structure (List.map (fun (k, v) -> k, XMLRPC.To.string v) kvpairs) in - + let common = [ "host_ref", XMLRPC.To.string (Ref.string_of call.host_ref); - "command", XMLRPC.To.string (call.cmd); - "args", XMLRPC.To.array (List.map XMLRPC.To.string call.args); - ] in + "command", XMLRPC.To.string (call.cmd); + "args", XMLRPC.To.array (List.map XMLRPC.To.string call.args); + ] in let dc = [ "device_config", kvpairs call.device_config ] in let session_ref = default [] (may (fun x -> [ "session_ref", XMLRPC.To.string (Ref.string_of x) ]) call.session_ref) in let sr_sm_config = default [] (may (fun x -> [ "sr_sm_config", kvpairs x ]) call.sr_sm_config) in @@ -143,7 +143,7 @@ let xmlrpc_of_call (call: call) = let methodResponse xml = match xml with | Xml.Element("methodResponse", _, [ Xml.Element("params", _, [ Xml.Element("param", _, [ param ]) ]) ]) -> - XMLRPC.Success [ param ] + XMLRPC.Success [ param ] | xml -> XMLRPC.From.methodResponse xml @@ -152,108 +152,108 @@ let methodResponse xml = let with_session sr f = Server_helpers.exec_with_new_task "sm_exec" (fun __context -> - let create_session () = - let host = !Xapi_globs.localhost_ref in - let session=Xapi_session.login_no_password ~__context ~uname:None ~host ~pool:false ~is_local_superuser:true ~subject:(Ref.null) ~auth_user_sid:"" ~auth_user_name:sm_username ~rbac_permissions:[] in - (* Give this session access to this particular SR *) - maybe (fun sr -> - Db.Session.add_to_other_config ~__context ~self:session - ~key:Xapi_globs._sm_session ~value:(Ref.string_of sr)) sr; - session - in - let destroy_session session_id = - Xapi_session.destroy_db_session ~__context ~self:session_id - in - let session_id = create_session () in - Pervasiveext.finally (fun () -> f session_id) (fun () -> destroy_session session_id)) + let create_session () = + let host = !Xapi_globs.localhost_ref in + let session=Xapi_session.login_no_password ~__context ~uname:None ~host ~pool:false ~is_local_superuser:true ~subject:(Ref.null) ~auth_user_sid:"" ~auth_user_name:sm_username ~rbac_permissions:[] in + (* Give this session access to this particular SR *) + maybe (fun sr -> + Db.Session.add_to_other_config ~__context ~self:session + ~key:Xapi_globs._sm_session ~value:(Ref.string_of sr)) sr; + session + in + let destroy_session session_id = + Xapi_session.destroy_db_session ~__context ~self:session_id + in + let session_id = create_session () in + Pervasiveext.finally (fun () -> f session_id) (fun () -> destroy_session session_id)) let exec_xmlrpc ?context ?(needs_session=true) (driver: string) (call: call) = - let do_call call = + let do_call call = let xml = xmlrpc_of_call call in let name = Printf.sprintf "sm_exec: %s" call.cmd in - let (xml,stderr) = Stats.time_this name (fun () -> - let exe = cmd_name driver in - begin try - (* Logging call.cmd is safe, but call.args could contain a password. *) - E.debug "smapiv2=>smapiv1 [label=\"%s\"];" call.cmd; - let output, stderr = Forkhelpers.execute_command_get_output exe [ Xml.to_string xml ] in - begin try - (Xml.parse_string output), stderr - with e -> - error "Failed to parse result from %s: stdout:%s stderr:%s exception:%s" exe output stderr (Printexc.to_string e); - raise (Storage_interface.Backend_error(Api_errors.sr_backend_failure, [ Printexc.to_string e; output; stderr ])) - end - with - | Forkhelpers.Spawn_internal_error(log, output, Unix.WSTOPPED i) -> - raise (Storage_interface.Backend_error (Api_errors.sr_backend_failure, ["exit code: " ^ (string_of_int i); output; log ])) - | Forkhelpers.Spawn_internal_error(log, output, Unix.WSIGNALED i) -> - raise (Storage_interface.Backend_error (Api_errors.sr_backend_failure, ["received signal: " ^ (Unixext.string_of_signal i); output; log ])) - | Forkhelpers.Spawn_internal_error(log, output, Unix.WEXITED i) -> - raise (Storage_interface.Backend_error (Api_errors.sr_backend_failure, ["non-zero exit"; output; log ])) - end - ) + let (xml,stderr) = Stats.time_this name (fun () -> + let exe = cmd_name driver in + begin try + (* Logging call.cmd is safe, but call.args could contain a password. *) + E.debug "smapiv2=>smapiv1 [label=\"%s\"];" call.cmd; + let output, stderr = Forkhelpers.execute_command_get_output exe [ Xml.to_string xml ] in + begin try + (Xml.parse_string output), stderr + with e -> + error "Failed to parse result from %s: stdout:%s stderr:%s exception:%s" exe output stderr (Printexc.to_string e); + raise (Storage_interface.Backend_error(Api_errors.sr_backend_failure, [ Printexc.to_string e; output; stderr ])) + end + with + | Forkhelpers.Spawn_internal_error(log, output, Unix.WSTOPPED i) -> + raise (Storage_interface.Backend_error (Api_errors.sr_backend_failure, ["exit code: " ^ (string_of_int i); output; log ])) + | Forkhelpers.Spawn_internal_error(log, output, Unix.WSIGNALED i) -> + raise (Storage_interface.Backend_error (Api_errors.sr_backend_failure, ["received signal: " ^ (Unixext.string_of_signal i); output; log ])) + | Forkhelpers.Spawn_internal_error(log, output, Unix.WEXITED i) -> + raise (Storage_interface.Backend_error (Api_errors.sr_backend_failure, ["non-zero exit"; output; log ])) + end + ) in match methodResponse xml with | XMLRPC.Fault(38l, _) -> raise Not_implemented_in_backend | XMLRPC.Fault(39l, _) -> - raise (Storage_interface.Backend_error (Api_errors.sr_not_empty, [])) - | XMLRPC.Fault(24l, _) -> - raise (Storage_interface.Backend_error (Api_errors.vdi_in_use, [])) - | XMLRPC.Fault(16l, _) -> - raise (Storage_interface.Backend_error (Api_errors.sr_device_in_use, [])) + raise (Storage_interface.Backend_error (Api_errors.sr_not_empty, [])) + | XMLRPC.Fault(24l, _) -> + raise (Storage_interface.Backend_error (Api_errors.vdi_in_use, [])) + | XMLRPC.Fault(16l, _) -> + raise (Storage_interface.Backend_error (Api_errors.sr_device_in_use, [])) | XMLRPC.Fault(144l, _) -> - (* Any call which returns this 'VDIMissing' error really ought to have - been provided both an SR and VDI reference... *) - let sr = default "" (may Ref.string_of call.sr_ref) - and vdi = default "" (may Ref.string_of call.vdi_ref) in - raise (Storage_interface.Backend_error (Api_errors.vdi_missing, [ sr; vdi ])) - + (* Any call which returns this 'VDIMissing' error really ought to have + been provided both an SR and VDI reference... *) + let sr = default "" (may Ref.string_of call.sr_ref) + and vdi = default "" (may Ref.string_of call.vdi_ref) in + raise (Storage_interface.Backend_error (Api_errors.vdi_missing, [ sr; vdi ])) + | XMLRPC.Fault(code, reason) -> - let xenapi_code = Api_errors.sr_backend_failure ^ "_" ^ (Int32.to_string code) in - raise (Storage_interface.Backend_error(xenapi_code, [ ""; reason; stderr ])) - + let xenapi_code = Api_errors.sr_backend_failure ^ "_" ^ (Int32.to_string code) in + raise (Storage_interface.Backend_error(xenapi_code, [ ""; reason; stderr ])) + | XMLRPC.Success [ result ] -> result - | _ -> - raise (Storage_interface.Backend_error(Api_errors.internal_error, ["Unexpected response from SM plugin"])) - in + | _ -> + raise (Storage_interface.Backend_error(Api_errors.internal_error, ["Unexpected response from SM plugin"])) + in if needs_session then with_session call.sr_ref (fun session_id -> do_call { call with session_ref = Some session_id }) else do_call call (********************************************************************) -(** Some functions to cope with the XML that the SM backends return *) +(** Some functions to cope with the XML that the SM backends return *) -let xmlrpc_parse_failure (xml: string) (reason: string) = +let xmlrpc_parse_failure (xml: string) (reason: string) = raise (Storage_interface.Backend_error (Api_errors.sr_backend_failure, - [ ""; "XML parse failure: " ^xml; reason ])) + [ ""; "XML parse failure: " ^xml; reason ])) -let rethrow_parse_failures xml f = +let rethrow_parse_failures xml f = try f () - with + with | Backend_missing_field s -> - xmlrpc_parse_failure xml (Printf.sprintf " missing field: %s" s) + xmlrpc_parse_failure xml (Printf.sprintf " missing field: %s" s) | XMLRPC.RunTimeTypeError(s, x) -> - xmlrpc_parse_failure xml (Printf.sprintf "XMLRPC unmarshall RunTimeTypeError: looking for %s found %s" s (Xml.to_string_fmt x)) + xmlrpc_parse_failure xml (Printf.sprintf "XMLRPC unmarshall RunTimeTypeError: looking for %s found %s" s (Xml.to_string_fmt x)) | e -> - xmlrpc_parse_failure xml (Printexc.to_string e) + xmlrpc_parse_failure xml (Printexc.to_string e) -let safe_assoc key pairs = - try List.assoc key pairs +let safe_assoc key pairs = + try List.assoc key pairs with Not_found -> raise (Backend_missing_field key) (* Used for both sr_scan, vdi_create and vdi_resize *) -let parse_vdi_info (vdi_info_struct: Xml.xml) = +let parse_vdi_info (vdi_info_struct: Xml.xml) = rethrow_parse_failures (Xml.to_string_fmt vdi_info_struct) (fun () -> let pairs = List.map (fun (key, v) -> key, XMLRPC.From.string v) - (XMLRPC.From.structure vdi_info_struct) in + (XMLRPC.From.structure vdi_info_struct) in { - vdi_info_uuid = Some (safe_assoc "uuid" pairs); - vdi_info_location = safe_assoc "location" pairs + vdi_info_uuid = Some (safe_assoc "uuid" pairs); + vdi_info_location = safe_assoc "location" pairs } ) @@ -264,40 +264,40 @@ let parse_string (xml: Xml.xml) = XMLRPC.From.string xml let parse_unit (xml: Xml.xml) = XMLRPC.From.nil xml -let parse_attach_result (xml : Xml.xml) = - rethrow_parse_failures (Xml.to_string_fmt xml) (fun () -> - let info = XMLRPC.From.structure xml in - let params = XMLRPC.From.string (safe_assoc "params" info) in - let o_direct = - try XMLRPC.From.boolean (safe_assoc "o_direct" info) - with _ -> true - in - let o_direct_reason = - try XMLRPC.From.string (safe_assoc "o_direct_reason" info) - with _ -> "" - in - let xenstore_data = - try - List.map (fun (x,y) -> (x,XMLRPC.From.string y)) - (XMLRPC.From.structure (safe_assoc "xenstore_data" info)) - with _ -> - [] - in - { - params; - o_direct; - o_direct_reason; - xenstore_data; - } - ) +let parse_attach_result (xml : Xml.xml) = + rethrow_parse_failures (Xml.to_string_fmt xml) (fun () -> + let info = XMLRPC.From.structure xml in + let params = XMLRPC.From.string (safe_assoc "params" info) in + let o_direct = + try XMLRPC.From.boolean (safe_assoc "o_direct" info) + with _ -> true + in + let o_direct_reason = + try XMLRPC.From.string (safe_assoc "o_direct_reason" info) + with _ -> "" + in + let xenstore_data = + try + List.map (fun (x,y) -> (x,XMLRPC.From.string y)) + (XMLRPC.From.structure (safe_assoc "xenstore_data" info)) + with _ -> + [] + in + { + params; + o_direct; + o_direct_reason; + xenstore_data; + } + ) let parse_attach_result_legacy (xml : Xml.xml) = parse_string xml -let parse_sr_get_driver_info driver (xml: Xml.xml) = +let parse_sr_get_driver_info driver (xml: Xml.xml) = let info = XMLRPC.From.structure xml in (* Parse the standard strings *) - let name = XMLRPC.From.string (safe_assoc "name" info) + let name = XMLRPC.From.string (safe_assoc "name" info) and description = XMLRPC.From.string (safe_assoc "description" info) and vendor = XMLRPC.From.string (safe_assoc "vendor" info) and copyright = XMLRPC.From.string (safe_assoc "copyright" info) @@ -305,17 +305,17 @@ let parse_sr_get_driver_info driver (xml: Xml.xml) = and required_api_version = XMLRPC.From.string (safe_assoc "required_api_version" info) in let strings = XMLRPC.From.array XMLRPC.From.string (safe_assoc "capabilities" info) in - + let features = Smint.parse_capability_int64_features strings in let text_features = List.map Smint.string_of_feature features in - + (* Parse the driver options *) - let configuration = - List.map (fun kvpairs -> - XMLRPC.From.string (safe_assoc "key" kvpairs), - XMLRPC.From.string (safe_assoc "description" kvpairs)) + let configuration = + List.map (fun kvpairs -> + XMLRPC.From.string (safe_assoc "key" kvpairs), + XMLRPC.From.string (safe_assoc "description" kvpairs)) (XMLRPC.From.array XMLRPC.From.structure (safe_assoc "configuration" info)) in - + { sr_driver_filename = driver; sr_driver_name = name; sr_driver_description = description; @@ -329,37 +329,37 @@ let parse_sr_get_driver_info driver (xml: Xml.xml) = sr_driver_required_cluster_stack = []; } -let sr_get_driver_info driver = +let sr_get_driver_info driver = let call = make_call (None,[]) "sr_get_driver_info" [] in parse_sr_get_driver_info driver (exec_xmlrpc ~needs_session:false driver call) - + (* Call the supplied function (passing driver name and driver_info) for every * backend and daemon found. *) let get_supported add_fn = let check_driver entry = - if String.endswith "SR" entry then ( - let driver = String.sub entry 0 (String.length entry - 2) in - if not(Xapi_globs.accept_sm_plugin driver) - then info "Skipping SMAPIv1 plugin %s: not in sm-plugins whitelist in configuration file" driver - else begin - try - Unix.access (cmd_name driver) [ Unix.X_OK ]; - let i = sr_get_driver_info driver in - add_fn driver i; - with e -> - error "Rejecting SM plugin: %s because of exception: %s (executable)" driver (Printexc.to_string e) - end - ) in - - List.iter + if String.endswith "SR" entry then ( + let driver = String.sub entry 0 (String.length entry - 2) in + if not(Xapi_globs.accept_sm_plugin driver) + then info "Skipping SMAPIv1 plugin %s: not in sm-plugins whitelist in configuration file" driver + else begin + try + Unix.access (cmd_name driver) [ Unix.X_OK ]; + let i = sr_get_driver_info driver in + add_fn driver i; + with e -> + error "Rejecting SM plugin: %s because of exception: %s (executable)" driver (Printexc.to_string e) + end + ) in + + List.iter (fun (f, dir) -> - if Sys.file_exists dir then begin - debug "Scanning directory %s for SM plugins" dir; - try Array.iter f (Sys.readdir dir) - with e -> - error "Error checking directory %s for SM backends: %s" dir (ExnHelper.string_of_exn e); - end else error "Not scanning %s for SM backends: directory does not exist" dir - ) + if Sys.file_exists dir then begin + debug "Scanning directory %s for SM plugins" dir; + try Array.iter f (Sys.readdir dir) + with e -> + error "Error checking directory %s for SM backends: %s" dir (ExnHelper.string_of_exn e); + end else error "Not scanning %s for SM backends: directory does not exist" dir + ) [ check_driver, !Xapi_globs.sm_dir ] (*********************************************************************) diff --git a/ocaml/xapi/sm_fs_ops.ml b/ocaml/xapi/sm_fs_ops.ml index b047959b9d0..c889c98e84f 100644 --- a/ocaml/xapi/sm_fs_ops.ml +++ b/ocaml/xapi/sm_fs_ops.ml @@ -20,144 +20,144 @@ module D=Debug.Make(struct let name="xapi" end) open D let make_tmp_dir() = - let tmp_file = Filename.temp_file "xapi_mount_" "" in - Unix.unlink tmp_file; - Unix.mkdir tmp_file 0o640; - tmp_file + let tmp_file = Filename.temp_file "xapi_mount_" "" in + Unix.unlink tmp_file; + Unix.mkdir tmp_file 0o640; + tmp_file (** Block-attach a VDI to dom0 and run 'f' with the device name *) let with_block_attached_device __context rpc session_id vdi mode f = - let dom0 = Helpers.get_domain_zero ~__context in - Attach_helpers.with_vbds rpc session_id __context dom0 [ vdi ] mode - (fun vbds -> - let vbd = List.hd vbds in - f ("/dev/" ^ (Db.VBD.get_device ~__context ~self:vbd))) + let dom0 = Helpers.get_domain_zero ~__context in + Attach_helpers.with_vbds rpc session_id __context dom0 [ vdi ] mode + (fun vbds -> + let vbd = List.hd vbds in + f ("/dev/" ^ (Db.VBD.get_device ~__context ~self:vbd))) (** Block-attach a VDI to dom0, open the device and pass the file descriptor to [f] *) let with_open_block_attached_device __context rpc session_id vdi mode f = - with_block_attached_device __context rpc session_id vdi mode - (fun path -> - let mode' = match mode with - | `RO -> [ Unix.O_RDONLY ] - | `RW -> [ Unix.O_RDWR ] in - let fd = Unix.openfile path mode' 0 in - Stdext.Pervasiveext.finally - (fun () -> f fd) - (fun () -> Unix.close fd) - ) + with_block_attached_device __context rpc session_id vdi mode + (fun path -> + let mode' = match mode with + | `RO -> [ Unix.O_RDONLY ] + | `RW -> [ Unix.O_RDWR ] in + let fd = Unix.openfile path mode' 0 in + Stdext.Pervasiveext.finally + (fun () -> f fd) + (fun () -> Unix.close fd) + ) (** Execute a function with a list of device paths after attaching a bunch of VDIs to dom0 *) let with_block_attached_devices (__context: Context.t) rpc (session_id: API.ref_session) (vdis: API.ref_VDI list) mode f = - let rec loop acc = function - | [] -> f (List.rev acc) - | vdi :: vdis -> with_block_attached_device __context rpc session_id vdi mode (fun path -> loop (path :: acc) vdis) in - loop [] vdis + let rec loop acc = function + | [] -> f (List.rev acc) + | vdi :: vdis -> with_block_attached_device __context rpc session_id vdi mode (fun path -> loop (path :: acc) vdis) in + loop [] vdis (** Return a URL suitable for passing to the sparse_dd process *) let import_vdi_url ~__context ?(prefer_slaves=false) rpc session_id task_id vdi = - (* Find a suitable host for the SR containing the VDI *) - let sr = Db.VDI.get_SR ~__context ~self:vdi in - let host = Importexport.find_host_for_sr ~__context ~prefer_slaves sr in - let address = Db.Host.get_address ~__context ~self:host in - Printf.sprintf "https://%s%s?vdi=%s&session_id=%s&task_id=%s" - address Constants.import_raw_vdi_uri (Ref.string_of vdi) - (Ref.string_of session_id) (Ref.string_of task_id) + (* Find a suitable host for the SR containing the VDI *) + let sr = Db.VDI.get_SR ~__context ~self:vdi in + let host = Importexport.find_host_for_sr ~__context ~prefer_slaves sr in + let address = Db.Host.get_address ~__context ~self:host in + Printf.sprintf "https://%s%s?vdi=%s&session_id=%s&task_id=%s" + address Constants.import_raw_vdi_uri (Ref.string_of vdi) + (Ref.string_of session_id) (Ref.string_of task_id) (** Catch those smint exceptions and convert into meaningful internal errors *) let with_api_errors f x = - try f x - with - | Smint.Command_failed(ret, status, stdout_log, stderr_log) - | Smint.Command_killed(ret, status, stdout_log, stderr_log) -> - let msg = Printf.sprintf "Smint.Command_{failed,killed} ret = %d; status = %s; stdout = %s; stderr = %s" - ret status stdout_log stderr_log in - raise (Api_errors.Server_error (Api_errors.internal_error, [msg])) + try f x + with + | Smint.Command_failed(ret, status, stdout_log, stderr_log) + | Smint.Command_killed(ret, status, stdout_log, stderr_log) -> + let msg = Printf.sprintf "Smint.Command_{failed,killed} ret = %d; status = %s; stdout = %s; stderr = %s" + ret status stdout_log stderr_log in + raise (Api_errors.Server_error (Api_errors.internal_error, [msg])) (** Mount a filesystem somewhere, with optional type *) let mount ?ty:(ty = None) src dest = - let ty = match ty with None -> [] | Some ty -> [ "-t"; ty ] in - ignore(Forkhelpers.execute_command_get_output "/bin/mount" (ty @ [ src; dest ])) + let ty = match ty with None -> [] | Some ty -> [ "-t"; ty ] in + ignore(Forkhelpers.execute_command_get_output "/bin/mount" (ty @ [ src; dest ])) let timeout = 300. (* 5 minutes: something is seriously wrong if we hit this timeout *) exception Umount_timeout (** Unmount a mountpoint. Retries every 5 secs for a total of 5mins before returning failure *) let umount ?(retry=true) dest = - let finished = ref false in - let start = Unix.gettimeofday () in + let finished = ref false in + let start = Unix.gettimeofday () in - while not(!finished) && (Unix.gettimeofday () -. start < timeout) do - try - ignore(Forkhelpers.execute_command_get_output "/bin/umount" [dest] ); - finished := true - with e -> - if not(retry) then raise e; - debug "Caught exception (%s) while unmounting %s: pausing before retrying" - (ExnHelper.string_of_exn e) dest; - Thread.delay 5. - done; - if not(!finished) then raise Umount_timeout + while not(!finished) && (Unix.gettimeofday () -. start < timeout) do + try + ignore(Forkhelpers.execute_command_get_output "/bin/umount" [dest] ); + finished := true + with e -> + if not(retry) then raise e; + debug "Caught exception (%s) while unmounting %s: pausing before retrying" + (ExnHelper.string_of_exn e) dest; + Thread.delay 5. + done; + if not(!finished) then raise Umount_timeout let with_mounted_dir device mount_point rmdir f = - Stdext.Pervasiveext.finally - (fun () -> - debug "About to create mount point (perhaps)"; - let output, _ = Forkhelpers.execute_command_get_output "/bin/mkdir" ["-p"; mount_point] in - debug "Mountpoint created (output=%s)" output; - with_api_errors (mount ~ty:(Some "ext2") device) mount_point; - debug "Mounted"; - f mount_point) - (fun () -> - Helpers.log_exn_continue ("with_fs_vdi: unmounting " ^ mount_point) - (fun () -> umount mount_point) (); - Helpers.log_exn_continue ("with_fs_vdi: rmdir " ^ mount_point) - (fun () -> if rmdir then Unix.rmdir mount_point) ()) + Stdext.Pervasiveext.finally + (fun () -> + debug "About to create mount point (perhaps)"; + let output, _ = Forkhelpers.execute_command_get_output "/bin/mkdir" ["-p"; mount_point] in + debug "Mountpoint created (output=%s)" output; + with_api_errors (mount ~ty:(Some "ext2") device) mount_point; + debug "Mounted"; + f mount_point) + (fun () -> + Helpers.log_exn_continue ("with_fs_vdi: unmounting " ^ mount_point) + (fun () -> umount mount_point) (); + Helpers.log_exn_continue ("with_fs_vdi: rmdir " ^ mount_point) + (fun () -> if rmdir then Unix.rmdir mount_point) ()) (** Block-attach a VDI to dom0, mount an ext2 filesystem and run 'f' with the mountpoint *) let with_fs_vdi __context vdi f = - Helpers.call_api_functions ~__context - (fun rpc session_id -> - with_block_attached_device __context rpc session_id vdi `RW - (fun device -> - let mount_point = make_tmp_dir () in - with_mounted_dir device mount_point true f - ) - ) + Helpers.call_api_functions ~__context + (fun rpc session_id -> + with_block_attached_device __context rpc session_id vdi `RW + (fun device -> + let mount_point = make_tmp_dir () in + with_mounted_dir device mount_point true f + ) + ) (** Stick ext2 filesystem on VDI and turn off maximal mount count + checking interval *) let mke2fs device = - ignore(Forkhelpers.execute_command_get_output "/sbin/mkfs" ["-t"; "ext2"; device]); - ignore(Forkhelpers.execute_command_get_output "/sbin/tune2fs" ["-i"; "0"; "-c"; "0"; device]) + ignore(Forkhelpers.execute_command_get_output "/sbin/mkfs" ["-t"; "ext2"; device]); + ignore(Forkhelpers.execute_command_get_output "/sbin/tune2fs" ["-i"; "0"; "-c"; "0"; device]) (** Create a new VDI, block attach it to dom0, create an ext2 filesystem, run 'f' with the vdi_ref and the mountpoint. Leave the VDI around, unless there is an exception in which case we delete it. *) let with_new_fs_vdi __context ~name_label ~name_description ~sR ~required_space ~_type ~sm_config f = - let add_fs_overhead req = - let fs_overhead_factor = 1.05 (* allow 5% overhead for ext2 *) in - (Int64.of_float ((Int64.to_float req)*.fs_overhead_factor)) - in - Helpers.call_api_functions ~__context - (fun rpc session_id -> - let vdi_ref = Client.VDI.create ~rpc ~session_id - ~name_label ~name_description ~sR ~virtual_size:(add_fs_overhead required_space) - ~sharable:false ~read_only:false ~_type ~other_config:[] ~xenstore_data:[] ~sm_config ~tags:[] in - try - with_block_attached_device __context rpc session_id vdi_ref `RW - (fun device -> - with_api_errors - (fun () -> - mke2fs device; - (* Mount it *) - let mount_point = make_tmp_dir() in - with_mounted_dir device mount_point true (f vdi_ref) - ) () - ) - with e -> - debug "Caught error (%s) during with_new_fs_vdi: deleting created VDI" (ExnHelper.string_of_exn e); - Client.VDI.destroy ~rpc ~session_id ~self:vdi_ref; - raise e - ) + let add_fs_overhead req = + let fs_overhead_factor = 1.05 (* allow 5% overhead for ext2 *) in + (Int64.of_float ((Int64.to_float req)*.fs_overhead_factor)) + in + Helpers.call_api_functions ~__context + (fun rpc session_id -> + let vdi_ref = Client.VDI.create ~rpc ~session_id + ~name_label ~name_description ~sR ~virtual_size:(add_fs_overhead required_space) + ~sharable:false ~read_only:false ~_type ~other_config:[] ~xenstore_data:[] ~sm_config ~tags:[] in + try + with_block_attached_device __context rpc session_id vdi_ref `RW + (fun device -> + with_api_errors + (fun () -> + mke2fs device; + (* Mount it *) + let mount_point = make_tmp_dir() in + with_mounted_dir device mount_point true (f vdi_ref) + ) () + ) + with e -> + debug "Caught error (%s) during with_new_fs_vdi: deleting created VDI" (ExnHelper.string_of_exn e); + Client.VDI.destroy ~rpc ~session_id ~self:vdi_ref; + raise e + ) (* SCTX-286: thin provisioning is thrown away over VDI.copy, VM.import(VM.export). Return true if the newly created vdi must have zeroes written into it; default to false @@ -167,115 +167,115 @@ let with_new_fs_vdi __context ~name_label ~name_description ~sR ~required_space This knowledge clearly ought to be in the SM backend rather than here. *) let must_write_zeroes_into_new_vdi ~__context vdi = - let vdi_r = Db.VDI.get_record ~__context ~self:vdi in - let sr_r = Db.SR.get_record ~__context ~self:vdi_r.API.vDI_SR in - let potentially_using_lvhd sr_r = List.mem (String.lowercase sr_r.API.sR_type) [ "lvm"; "lvmoiscsi"; "lvmohba" ] in - let requested_raw_vdi vdi_r = List.mem (List.hd Xha_statefile.statefile_sm_config) vdi_r.API.vDI_sm_config in - let upgraded_to_lvhd sr_r = List.mem ("use_vhd", "true") sr_r.API.sR_sm_config in + let vdi_r = Db.VDI.get_record ~__context ~self:vdi in + let sr_r = Db.SR.get_record ~__context ~self:vdi_r.API.vDI_SR in + let potentially_using_lvhd sr_r = List.mem (String.lowercase sr_r.API.sR_type) [ "lvm"; "lvmoiscsi"; "lvmohba" ] in + let requested_raw_vdi vdi_r = List.mem (List.hd Xha_statefile.statefile_sm_config) vdi_r.API.vDI_sm_config in + let upgraded_to_lvhd sr_r = List.mem ("use_vhd", "true") sr_r.API.sR_sm_config in - (* Equallogic arrays in 'thick' mode don't zero disks *) - let using_eql sr_r = String.lowercase sr_r.API.sR_type = "equal" in - let using_eql_thick sr_r = List.mem ("allocation", "thick") (List.map (fun (x, y) -> String.lowercase x, String.lowercase y) sr_r.API.sR_sm_config) in + (* Equallogic arrays in 'thick' mode don't zero disks *) + let using_eql sr_r = String.lowercase sr_r.API.sR_type = "equal" in + let using_eql_thick sr_r = List.mem ("allocation", "thick") (List.map (fun (x, y) -> String.lowercase x, String.lowercase y) sr_r.API.sR_sm_config) in - (* We presume that storagelink arrays don't zero disks either *) - let using_csl sr_r = String.lowercase sr_r.API.sR_type = "cslg" in + (* We presume that storagelink arrays don't zero disks either *) + let using_csl sr_r = String.lowercase sr_r.API.sR_type = "cslg" in - (* Julian agreed with the following logic by email + chat: *) - false - || (potentially_using_lvhd sr_r - && ((requested_raw_vdi vdi_r) || (not (upgraded_to_lvhd sr_r))) - ) - (* After speaking to Julian again: *) - || (using_eql sr_r && (using_eql_thick sr_r)) - || (using_csl sr_r) + (* Julian agreed with the following logic by email + chat: *) + false + || (potentially_using_lvhd sr_r + && ((requested_raw_vdi vdi_r) || (not (upgraded_to_lvhd sr_r))) + ) + (* After speaking to Julian again: *) + || (using_eql sr_r && (using_eql_thick sr_r)) + || (using_csl sr_r) let copy_vdi ~__context ?base vdi_src vdi_dst = - TaskHelper.set_cancellable ~__context; - Helpers.call_api_functions ~__context (fun rpc session_id -> + TaskHelper.set_cancellable ~__context; + Helpers.call_api_functions ~__context (fun rpc session_id -> - (* Use the sparse copy unless we must write zeroes into the new VDI *) - let sparse = not (must_write_zeroes_into_new_vdi ~__context vdi_dst) in - if not sparse && (base <> None) then begin - (* This doesn't make sense because we will be forced to write zeroes - into the destination VDI, and then the user will not be able to tell - the difference between zeroes that exist in the user's virtual disk, - and zeroes that we had to write to clear the disk. *) - error "VDI.copy: destination VDI does not support sparse copy BUT a delta-only copy requested. The output will be useless so I refuse to make it."; - raise (Api_errors.Server_error(Api_errors.vdi_not_sparse, [ Ref.string_of vdi_dst ])) - end; + (* Use the sparse copy unless we must write zeroes into the new VDI *) + let sparse = not (must_write_zeroes_into_new_vdi ~__context vdi_dst) in + if not sparse && (base <> None) then begin + (* This doesn't make sense because we will be forced to write zeroes + into the destination VDI, and then the user will not be able to tell + the difference between zeroes that exist in the user's virtual disk, + and zeroes that we had to write to clear the disk. *) + error "VDI.copy: destination VDI does not support sparse copy BUT a delta-only copy requested. The output will be useless so I refuse to make it."; + raise (Api_errors.Server_error(Api_errors.vdi_not_sparse, [ Ref.string_of vdi_dst ])) + end; - (* Copy locally unless this host can't see the destination SR *) - let can_local_copy = Importexport.check_sr_availability ~__context (Db.VDI.get_SR ~__context ~self:vdi_dst) in + (* Copy locally unless this host can't see the destination SR *) + let can_local_copy = Importexport.check_sr_availability ~__context (Db.VDI.get_SR ~__context ~self:vdi_dst) in - let size = Db.VDI.get_virtual_size ~__context ~self:vdi_src in + let size = Db.VDI.get_virtual_size ~__context ~self:vdi_src in - let local_copy = can_local_copy && not (Xapi_fist.force_remote_vdi_copy ()) in + let local_copy = can_local_copy && not (Xapi_fist.force_remote_vdi_copy ()) in - debug "Sm_fs_ops.copy_vdi: %s-copying %Ld%s preserving sparseness %s" - (if local_copy then "locally" else "remotely") - size - (if sparse then "" else " NOT") - (match base with None -> "" | Some x -> Printf.sprintf "copying only differences from %s" (Ref.string_of x)); - let progress_cb progress = - TaskHelper.exn_if_cancelling ~__context; - TaskHelper.operate_on_db_task ~__context - (fun self -> Db.Task.set_progress ~__context ~self ~value:progress) in - let copy base = - try - with_block_attached_device __context rpc session_id vdi_src `RO - (fun device_src -> - if local_copy - then with_block_attached_device __context rpc session_id vdi_dst `RW - (fun device_dst -> - Sparse_dd_wrapper.dd ~progress_cb ?base sparse device_src device_dst size - ) - else - (* Create a new subtask for the inter-host sparse_dd. Without - * this there was a race in VM.copy, as both VDI.copy and VM.copy - * would be waiting on the same VDI.copy task. - * - * Now, VDI.copy waits for the sparse_dd task, and VM.copy in turn - * waits for the VDI.copy task. - * - * Note that progress updates are still applied directly to the - * VDI.copy task. *) - Server_helpers.exec_with_subtask ~__context ~task_in_database:true "sparse_dd" - (fun ~__context -> - let import_task_id = Context.get_task_id __context in - let remote_uri = - import_vdi_url ~__context ~prefer_slaves:true - rpc session_id import_task_id vdi_dst - in - debug "remote_uri = %s" remote_uri; - try - Sparse_dd_wrapper.dd ~progress_cb ?base sparse device_src remote_uri size; - Tasks.wait_for_all ~rpc ~session_id ~tasks:[import_task_id]; - match Db.Task.get_status ~__context ~self:import_task_id with - | `success -> () - | _ -> - begin match Db.Task.get_error_info ~__context ~self:import_task_id with - | [] -> (* This should never happen *) - failwith("Copy of VDI to remote failed with unspecified error!") - | code :: params -> - debug "Copy of VDI to remote failed: %s [ %s ]" code (String.concat "; " params); - raise (Api_errors.Server_error (code, params)) - end - with e -> - Tasks.wait_for_all ~rpc ~session_id ~tasks:[import_task_id]; - raise e - ) - ) - with - | Unix.Unix_error(Unix.EIO, _, _) as e -> - let e' = Api_errors.Server_error (Api_errors.vdi_io_error, ["Device I/O error"]) in - Backtrace.reraise e e' - | e -> - Backtrace.is_important e; - raise e in - match base with - | None -> copy None - | Some base_vdi -> - with_block_attached_device __context rpc session_id base_vdi `RO - (fun device_base -> copy (Some device_base)) - ) + debug "Sm_fs_ops.copy_vdi: %s-copying %Ld%s preserving sparseness %s" + (if local_copy then "locally" else "remotely") + size + (if sparse then "" else " NOT") + (match base with None -> "" | Some x -> Printf.sprintf "copying only differences from %s" (Ref.string_of x)); + let progress_cb progress = + TaskHelper.exn_if_cancelling ~__context; + TaskHelper.operate_on_db_task ~__context + (fun self -> Db.Task.set_progress ~__context ~self ~value:progress) in + let copy base = + try + with_block_attached_device __context rpc session_id vdi_src `RO + (fun device_src -> + if local_copy + then with_block_attached_device __context rpc session_id vdi_dst `RW + (fun device_dst -> + Sparse_dd_wrapper.dd ~progress_cb ?base sparse device_src device_dst size + ) + else + (* Create a new subtask for the inter-host sparse_dd. Without + * this there was a race in VM.copy, as both VDI.copy and VM.copy + * would be waiting on the same VDI.copy task. + * + * Now, VDI.copy waits for the sparse_dd task, and VM.copy in turn + * waits for the VDI.copy task. + * + * Note that progress updates are still applied directly to the + * VDI.copy task. *) + Server_helpers.exec_with_subtask ~__context ~task_in_database:true "sparse_dd" + (fun ~__context -> + let import_task_id = Context.get_task_id __context in + let remote_uri = + import_vdi_url ~__context ~prefer_slaves:true + rpc session_id import_task_id vdi_dst + in + debug "remote_uri = %s" remote_uri; + try + Sparse_dd_wrapper.dd ~progress_cb ?base sparse device_src remote_uri size; + Tasks.wait_for_all ~rpc ~session_id ~tasks:[import_task_id]; + match Db.Task.get_status ~__context ~self:import_task_id with + | `success -> () + | _ -> + begin match Db.Task.get_error_info ~__context ~self:import_task_id with + | [] -> (* This should never happen *) + failwith("Copy of VDI to remote failed with unspecified error!") + | code :: params -> + debug "Copy of VDI to remote failed: %s [ %s ]" code (String.concat "; " params); + raise (Api_errors.Server_error (code, params)) + end + with e -> + Tasks.wait_for_all ~rpc ~session_id ~tasks:[import_task_id]; + raise e + ) + ) + with + | Unix.Unix_error(Unix.EIO, _, _) as e -> + let e' = Api_errors.Server_error (Api_errors.vdi_io_error, ["Device I/O error"]) in + Backtrace.reraise e e' + | e -> + Backtrace.is_important e; + raise e in + match base with + | None -> copy None + | Some base_vdi -> + with_block_attached_device __context rpc session_id base_vdi `RO + (fun device_base -> copy (Some device_base)) + ) diff --git a/ocaml/xapi/sm_fs_ops.mli b/ocaml/xapi/sm_fs_ops.mli index b48779189c2..f59a2fa0807 100644 --- a/ocaml/xapi/sm_fs_ops.mli +++ b/ocaml/xapi/sm_fs_ops.mli @@ -13,7 +13,7 @@ *) (** * @group Storage - *) +*) val with_block_attached_devices : Context.t -> (Rpc.call -> Rpc.response) -> API.ref_session -> API.ref_VDI list -> API.vbd_mode -> (string list -> 'a) -> 'a val with_block_attached_device : Context.t -> (Rpc.call -> Rpc.response) -> API.ref_session -> API.ref_VDI -> API.vbd_mode -> (string -> 'a) -> 'a diff --git a/ocaml/xapi/smint.ml b/ocaml/xapi/smint.ml index dd68232b2b3..97e94f205db 100644 --- a/ocaml/xapi/smint.ml +++ b/ocaml/xapi/smint.ml @@ -13,35 +13,35 @@ *) (** * @group Storage - *) - +*) + module D=Debug.Make(struct let name="smint" end) open D type vdi_info = { - vdi_info_uuid: string option; - vdi_info_location: string; + vdi_info_uuid: string option; + vdi_info_location: string; } -let make_vdi_info ~location ?uuid () = +let make_vdi_info ~location ?uuid () = { vdi_info_uuid = uuid; vdi_info_location = location; } (** Very primitive first attempt at a set of backend features *) type capability = - | Sr_create | Sr_delete | Sr_attach | Sr_detach | Sr_scan | Sr_probe | Sr_update - | Sr_supports_local_caching - | Sr_stats - | Sr_metadata - | Sr_trim - | Vdi_create | Vdi_delete | Vdi_attach | Vdi_detach | Vdi_mirror - | Vdi_clone | Vdi_snapshot | Vdi_resize | Vdi_activate | Vdi_deactivate - | Vdi_update | Vdi_introduce - | Vdi_resize_online - | Vdi_generate_config - | Vdi_attach_offline - | Vdi_reset_on_boot + | Sr_create | Sr_delete | Sr_attach | Sr_detach | Sr_scan | Sr_probe | Sr_update + | Sr_supports_local_caching + | Sr_stats + | Sr_metadata + | Sr_trim + | Vdi_create | Vdi_delete | Vdi_attach | Vdi_detach | Vdi_mirror + | Vdi_clone | Vdi_snapshot | Vdi_resize | Vdi_activate | Vdi_deactivate + | Vdi_update | Vdi_introduce + | Vdi_resize_online + | Vdi_generate_config + | Vdi_attach_offline + | Vdi_reset_on_boot type feature = capability * int64 @@ -58,35 +58,35 @@ let all_capabilites = ] let string_to_capability_table = [ - "SR_PROBE", Sr_probe; - "SR_UPDATE", Sr_update; - "SR_SUPPORTS_LOCAL_CACHING", Sr_supports_local_caching; - "SR_METADATA", Sr_metadata; - "SR_TRIM", Sr_trim; - "VDI_CREATE", Vdi_create; - "VDI_DELETE", Vdi_delete; - "VDI_ATTACH", Vdi_attach; - "VDI_DETACH", Vdi_detach; - "VDI_MIRROR", Vdi_mirror; - "VDI_RESIZE", Vdi_resize; - "VDI_RESIZE_ONLINE",Vdi_resize_online; - "VDI_CLONE", Vdi_clone; - "VDI_SNAPSHOT", Vdi_snapshot; - "VDI_ACTIVATE", Vdi_activate; - "VDI_DEACTIVATE", Vdi_deactivate; - "VDI_UPDATE", Vdi_update; - "VDI_INTRODUCE", Vdi_introduce; - "VDI_GENERATE_CONFIG", Vdi_generate_config; - "VDI_ATTACH_OFFLINE", Vdi_attach_offline; - "VDI_RESET_ON_BOOT", Vdi_reset_on_boot; - "SR_STATS", Sr_stats; + "SR_PROBE", Sr_probe; + "SR_UPDATE", Sr_update; + "SR_SUPPORTS_LOCAL_CACHING", Sr_supports_local_caching; + "SR_METADATA", Sr_metadata; + "SR_TRIM", Sr_trim; + "VDI_CREATE", Vdi_create; + "VDI_DELETE", Vdi_delete; + "VDI_ATTACH", Vdi_attach; + "VDI_DETACH", Vdi_detach; + "VDI_MIRROR", Vdi_mirror; + "VDI_RESIZE", Vdi_resize; + "VDI_RESIZE_ONLINE",Vdi_resize_online; + "VDI_CLONE", Vdi_clone; + "VDI_SNAPSHOT", Vdi_snapshot; + "VDI_ACTIVATE", Vdi_activate; + "VDI_DEACTIVATE", Vdi_deactivate; + "VDI_UPDATE", Vdi_update; + "VDI_INTRODUCE", Vdi_introduce; + "VDI_GENERATE_CONFIG", Vdi_generate_config; + "VDI_ATTACH_OFFLINE", Vdi_attach_offline; + "VDI_RESET_ON_BOOT", Vdi_reset_on_boot; + "SR_STATS", Sr_stats; ] let capability_to_string_table = List.map (fun (k, v) -> v, k) string_to_capability_table let string_of_capability c = List.assoc c capability_to_string_table let string_of_feature (c,v) = - Printf.sprintf "%s/%Ld" (string_of_capability c) v + Printf.sprintf "%s/%Ld" (string_of_capability c) v let has_feature (f : feature) fl = List.mem f fl @@ -95,67 +95,67 @@ let has_capability (c : capability) fl = List.mem_assoc c fl let capability_of_feature : feature -> capability = fst let parse_string_int64_features strings = - let text_features = - List.filter - (fun s -> - let s = List.hd (Stdext.Xstringext.String.split '/' s) in - let p = List.mem s (List.map fst string_to_capability_table) in - if not p then debug "SM.feature: unknown feature %s" s; - p) - strings in - List.map - (fun c -> - match Stdext.Xstringext.String.split '/' c with - | [] -> failwith "parse_feature" (* not possible *) - | [cs] -> (cs, 1L) (* default version *) - | [cs; vs] - | cs :: vs :: _ -> - try - let v = int_of_string vs in - (cs, if v < 1 then 1L else Int64.of_int v) - with _ -> - debug "SM.feature %s has bad version %s, defaulting to 1" cs vs; - (cs, 1L)) - text_features + let text_features = + List.filter + (fun s -> + let s = List.hd (Stdext.Xstringext.String.split '/' s) in + let p = List.mem s (List.map fst string_to_capability_table) in + if not p then debug "SM.feature: unknown feature %s" s; + p) + strings in + List.map + (fun c -> + match Stdext.Xstringext.String.split '/' c with + | [] -> failwith "parse_feature" (* not possible *) + | [cs] -> (cs, 1L) (* default version *) + | [cs; vs] + | cs :: vs :: _ -> + try + let v = int_of_string vs in + (cs, if v < 1 then 1L else Int64.of_int v) + with _ -> + debug "SM.feature %s has bad version %s, defaulting to 1" cs vs; + (cs, 1L)) + text_features let parse_capability_int64_features strings = - List.map - (function c,v -> - ((List.assoc c string_to_capability_table), v)) - (parse_string_int64_features strings) + List.map + (function c,v -> + ((List.assoc c string_to_capability_table), v)) + (parse_string_int64_features strings) type sr_driver_info = { - sr_driver_filename: string; - sr_driver_name: string; - sr_driver_description: string; - sr_driver_vendor: string; - sr_driver_copyright: string; - sr_driver_version: string; - sr_driver_required_api_version: string; - sr_driver_features: feature list; - sr_driver_text_features: string list; - sr_driver_configuration: (string * string) list; - sr_driver_required_cluster_stack: string list; + sr_driver_filename: string; + sr_driver_name: string; + sr_driver_description: string; + sr_driver_vendor: string; + sr_driver_copyright: string; + sr_driver_version: string; + sr_driver_required_api_version: string; + sr_driver_features: feature list; + sr_driver_text_features: string list; + sr_driver_configuration: (string * string) list; + sr_driver_required_cluster_stack: string list; } let query_result_of_sr_driver_info x = { - Storage_interface.driver = x.sr_driver_filename; - name = x.sr_driver_name; - description = x.sr_driver_description; - vendor = x.sr_driver_vendor; - copyright = x.sr_driver_copyright; - version = x.sr_driver_version; - required_api_version = x.sr_driver_required_api_version; - features = x.sr_driver_text_features; - configuration = x.sr_driver_configuration; - required_cluster_stack = x.sr_driver_required_cluster_stack; + Storage_interface.driver = x.sr_driver_filename; + name = x.sr_driver_name; + description = x.sr_driver_description; + vendor = x.sr_driver_vendor; + copyright = x.sr_driver_copyright; + version = x.sr_driver_version; + required_api_version = x.sr_driver_required_api_version; + features = x.sr_driver_text_features; + configuration = x.sr_driver_configuration; + required_cluster_stack = x.sr_driver_required_cluster_stack; } type attach_info = { - params : string; - o_direct : bool; - o_direct_reason : string; - xenstore_data : (string * string) list; + params : string; + o_direct : bool; + o_direct_reason : string; + xenstore_data : (string * string) list; } @@ -175,5 +175,5 @@ exception Device_in_use type request = string option let string_of_request = function - | Some x -> Printf.sprintf "Some %s" x - | None -> "None" + | Some x -> Printf.sprintf "Some %s" x + | None -> "None" diff --git a/ocaml/xapi/sparse_dd_wrapper.ml b/ocaml/xapi/sparse_dd_wrapper.ml index 9bb89edee44..6a4850aa3f6 100644 --- a/ocaml/xapi/sparse_dd_wrapper.ml +++ b/ocaml/xapi/sparse_dd_wrapper.ml @@ -22,9 +22,9 @@ module D=Debug.Make(struct let name="xapi" end) open D type progress = - | Started of Forkhelpers.pidty - | Continuing of float - | Finished of exn option + | Started of Forkhelpers.pidty + | Continuing of float + | Finished of exn option type t = { m : Mutex.t; @@ -58,135 +58,135 @@ exception Cancelled (** Use the new external sparse_dd program *) let dd_internal progress_cb base prezeroed infile outfile size = - let pipe_read, pipe_write = Unix.pipe () in - let to_close = ref [ pipe_read; pipe_write ] in - let close x = if List.mem x !to_close then (Unix.close x; to_close := List.filter (fun y -> y <> x) !to_close) in - Pervasiveext.finally - (fun () -> - try match Forkhelpers.with_logfile_fd "sparse_dd" - (fun log_fd -> - let sparse_dd_path = !Xapi_globs.sparse_dd in - let args = [ - "-machine"; - "-src"; infile; - "-dest"; outfile; - "-size"; Int64.to_string size; - "-good-ciphersuites"; (match !Xapi_globs.ciphersuites_good_outbound with - | Some s -> s - | None -> raise (Api_errors.Server_error - (Api_errors.internal_error,["Vdi_copy found no good ciphersuites in Xapi_globs."])) - ); - "-legacy-ciphersuites"; !Xapi_globs.ciphersuites_legacy_outbound - ] @ (if Stunnel.is_legacy_protocol_and_ciphersuites_allowed () then [ "-ssl-legacy" ] else [] - ) @ (if prezeroed then [ "-prezeroed" ] else [] - ) @ (Opt.default [] (Opt.map (fun x -> [ "-base"; x ]) base)) in - debug "%s %s" sparse_dd_path (String.concat " " args); - let pid = Forkhelpers.safe_close_and_exec None (Some pipe_write) (Some log_fd) [] - sparse_dd_path args in - let intpid = Forkhelpers.getpid pid in - State.add intpid; - close pipe_write; - progress_cb (Started pid); - (* Read Progress: output from the binary *) - let open Sparse_encoding in - Chunk.fold - (fun () chunk -> - debug "sparse_dd: %s" chunk.Chunk.data; - try - Scanf.sscanf chunk.Chunk.data "Progress: %d" - (fun progress -> - progress_cb (Continuing (float_of_int progress /. 100.)) - ) - with e -> begin - Unix.kill (Forkhelpers.getpid pid) Sys.sigterm; - raise e - end - ) () pipe_read; - let r = Forkhelpers.waitpid pid in - State.remove intpid; - match r with - | (_, Unix.WEXITED 0) -> progress_cb (Finished None) - | (_, Unix.WEXITED 5) -> error "sparse_dd received NBD error"; failwith "sparse_dd NBD error" - | (_, Unix.WEXITED n) -> error "sparse_dd exit: %d" n; failwith "sparse_dd" - | _ -> error "sparse_dd exit with WSTOPPED or WSIGNALED"; failwith "sparse_dd" - ) with - | Forkhelpers.Success _ -> progress_cb (Finished None) - | Forkhelpers.Failure (log, exn) -> - error "Failure from sparse_dd: %s raising %s" log (Printexc.to_string exn); - raise (Api_errors.Server_error ((Api_errors.vdi_copy_failed , [Printexc.to_string exn]))); - with e -> - progress_cb (Finished (Some e)); - raise e - ) - (fun () -> - close pipe_read; - close pipe_write) + let pipe_read, pipe_write = Unix.pipe () in + let to_close = ref [ pipe_read; pipe_write ] in + let close x = if List.mem x !to_close then (Unix.close x; to_close := List.filter (fun y -> y <> x) !to_close) in + Pervasiveext.finally + (fun () -> + try match Forkhelpers.with_logfile_fd "sparse_dd" + (fun log_fd -> + let sparse_dd_path = !Xapi_globs.sparse_dd in + let args = [ + "-machine"; + "-src"; infile; + "-dest"; outfile; + "-size"; Int64.to_string size; + "-good-ciphersuites"; (match !Xapi_globs.ciphersuites_good_outbound with + | Some s -> s + | None -> raise (Api_errors.Server_error + (Api_errors.internal_error,["Vdi_copy found no good ciphersuites in Xapi_globs."])) + ); + "-legacy-ciphersuites"; !Xapi_globs.ciphersuites_legacy_outbound + ] @ (if Stunnel.is_legacy_protocol_and_ciphersuites_allowed () then [ "-ssl-legacy" ] else [] + ) @ (if prezeroed then [ "-prezeroed" ] else [] + ) @ (Opt.default [] (Opt.map (fun x -> [ "-base"; x ]) base)) in + debug "%s %s" sparse_dd_path (String.concat " " args); + let pid = Forkhelpers.safe_close_and_exec None (Some pipe_write) (Some log_fd) [] + sparse_dd_path args in + let intpid = Forkhelpers.getpid pid in + State.add intpid; + close pipe_write; + progress_cb (Started pid); + (* Read Progress: output from the binary *) + let open Sparse_encoding in + Chunk.fold + (fun () chunk -> + debug "sparse_dd: %s" chunk.Chunk.data; + try + Scanf.sscanf chunk.Chunk.data "Progress: %d" + (fun progress -> + progress_cb (Continuing (float_of_int progress /. 100.)) + ) + with e -> begin + Unix.kill (Forkhelpers.getpid pid) Sys.sigterm; + raise e + end + ) () pipe_read; + let r = Forkhelpers.waitpid pid in + State.remove intpid; + match r with + | (_, Unix.WEXITED 0) -> progress_cb (Finished None) + | (_, Unix.WEXITED 5) -> error "sparse_dd received NBD error"; failwith "sparse_dd NBD error" + | (_, Unix.WEXITED n) -> error "sparse_dd exit: %d" n; failwith "sparse_dd" + | _ -> error "sparse_dd exit with WSTOPPED or WSIGNALED"; failwith "sparse_dd" + ) with + | Forkhelpers.Success _ -> progress_cb (Finished None) + | Forkhelpers.Failure (log, exn) -> + error "Failure from sparse_dd: %s raising %s" log (Printexc.to_string exn); + raise (Api_errors.Server_error ((Api_errors.vdi_copy_failed , [Printexc.to_string exn]))); + with e -> + progress_cb (Finished (Some e)); + raise e + ) + (fun () -> + close pipe_read; + close pipe_write) let dd ?(progress_cb=(fun _ -> ())) ?base prezeroed = - dd_internal (function | Continuing x -> progress_cb x | _ -> ()) base prezeroed + dd_internal (function | Continuing x -> progress_cb x | _ -> ()) base prezeroed let start ?(progress_cb=(fun _ -> ())) ?base prezeroed infile outfile size = - let m = Mutex.create () in - let c = Condition.create () in - let pid = ref None in - let finished = ref false in - let cancelled = ref false in - let exn = ref None in - let thread_progress_cb = function - | Started pid' -> - pid := Some pid'; - Mutex.execute m (fun () -> Condition.broadcast c) - | Continuing progress -> progress_cb progress - | Finished exn' -> - finished := true; - exn := exn'; - Mutex.execute m (fun () -> Condition.broadcast c) - in - let _ = Thread.create (fun () -> - dd_internal thread_progress_cb base prezeroed infile outfile size) () in - Mutex.execute m (fun () -> - while (!pid = None) && (!finished = false) && (!cancelled = false) do - Condition.wait c m - done); - match (!pid,!exn) with - | Some pid, None -> - {m; c; pid; finished; cancelled; exn} - | _, Some e -> - raise e - | _ -> - failwith "Unexpected error in start_dd" + let m = Mutex.create () in + let c = Condition.create () in + let pid = ref None in + let finished = ref false in + let cancelled = ref false in + let exn = ref None in + let thread_progress_cb = function + | Started pid' -> + pid := Some pid'; + Mutex.execute m (fun () -> Condition.broadcast c) + | Continuing progress -> progress_cb progress + | Finished exn' -> + finished := true; + exn := exn'; + Mutex.execute m (fun () -> Condition.broadcast c) + in + let _ = Thread.create (fun () -> + dd_internal thread_progress_cb base prezeroed infile outfile size) () in + Mutex.execute m (fun () -> + while (!pid = None) && (!finished = false) && (!cancelled = false) do + Condition.wait c m + done); + match (!pid,!exn) with + | Some pid, None -> + {m; c; pid; finished; cancelled; exn} + | _, Some e -> + raise e + | _ -> + failwith "Unexpected error in start_dd" let wait t = - Mutex.execute t.m (fun () -> - while (!(t.finished) = false) do - Condition.wait t.c t.m - done); - if !(t.cancelled) then raise Cancelled; - match !(t.exn) with - | Some exn -> raise exn - | None -> () + Mutex.execute t.m (fun () -> + while (!(t.finished) = false) do + Condition.wait t.c t.m + done); + if !(t.cancelled) then raise Cancelled; + match !(t.exn) with + | Some exn -> raise exn + | None -> () let cancel t = - t.cancelled := true; - let pid = Forkhelpers.getpid t.pid in - try Unix.kill pid Sys.sigkill with _ -> () + t.cancelled := true; + let pid = Forkhelpers.getpid t.pid in + try Unix.kill pid Sys.sigkill with _ -> () (* This function will kill all sparse_dd pids that have been started by xapi *) (* Only to be used on xapi restart *) let killall () = - let pids = State.list () in - List.iter (fun pid -> - try - Pervasiveext.finally - (fun () -> - let exe = Unix.readlink (Printf.sprintf "/proc/%d/exe" pid) in - debug "checking pid %d exe=%s globs=%s" pid exe !Xapi_globs.sparse_dd; - if Filename.basename exe = Filename.basename !Xapi_globs.sparse_dd - then Unix.kill pid Sys.sigkill - else ()) - (fun () -> - State.remove pid) - with _ -> () - ) pids + let pids = State.list () in + List.iter (fun pid -> + try + Pervasiveext.finally + (fun () -> + let exe = Unix.readlink (Printf.sprintf "/proc/%d/exe" pid) in + debug "checking pid %d exe=%s globs=%s" pid exe !Xapi_globs.sparse_dd; + if Filename.basename exe = Filename.basename !Xapi_globs.sparse_dd + then Unix.kill pid Sys.sigkill + else ()) + (fun () -> + State.remove pid) + with _ -> () + ) pids diff --git a/ocaml/xapi/sparse_encoding.ml b/ocaml/xapi/sparse_encoding.ml index 4afa88c6566..c7ce673ac59 100644 --- a/ocaml/xapi/sparse_encoding.ml +++ b/ocaml/xapi/sparse_encoding.ml @@ -13,111 +13,111 @@ *) (** Utility functions for reading and writing disk blocks to/from a network stream. * @group Import and Export - *) +*) module Unmarshal = struct - let int64 (s, offset) = - let (<<<) a b = Int64.shift_left a b - and (|||) a b = Int64.logor a b in - let a = Int64.of_int (int_of_char (s.[offset + 0])) - and b = Int64.of_int (int_of_char (s.[offset + 1])) - and c = Int64.of_int (int_of_char (s.[offset + 2])) - and d = Int64.of_int (int_of_char (s.[offset + 3])) - and e = Int64.of_int (int_of_char (s.[offset + 4])) - and f = Int64.of_int (int_of_char (s.[offset + 5])) - and g = Int64.of_int (int_of_char (s.[offset + 6])) - and h = Int64.of_int (int_of_char (s.[offset + 7])) in - (a <<< 0) ||| (b <<< 8) ||| (c <<< 16) ||| (d <<< 24) ||| (e <<< 32) ||| (f <<< 40) ||| (g <<< 48) ||| (h <<< 56), - (s, offset + 8) - let int32 (s, offset) = - let (<<<) a b = Int32.shift_left a b - and (|||) a b = Int32.logor a b in - let a = Int32.of_int (int_of_char (s.[offset + 0])) - and b = Int32.of_int (int_of_char (s.[offset + 1])) - and c = Int32.of_int (int_of_char (s.[offset + 2])) - and d = Int32.of_int (int_of_char (s.[offset + 3])) in - (a <<< 0) ||| (b <<< 8) ||| (c <<< 16) ||| (d <<< 24), (s, offset + 4) + let int64 (s, offset) = + let (<<<) a b = Int64.shift_left a b + and (|||) a b = Int64.logor a b in + let a = Int64.of_int (int_of_char (s.[offset + 0])) + and b = Int64.of_int (int_of_char (s.[offset + 1])) + and c = Int64.of_int (int_of_char (s.[offset + 2])) + and d = Int64.of_int (int_of_char (s.[offset + 3])) + and e = Int64.of_int (int_of_char (s.[offset + 4])) + and f = Int64.of_int (int_of_char (s.[offset + 5])) + and g = Int64.of_int (int_of_char (s.[offset + 6])) + and h = Int64.of_int (int_of_char (s.[offset + 7])) in + (a <<< 0) ||| (b <<< 8) ||| (c <<< 16) ||| (d <<< 24) ||| (e <<< 32) ||| (f <<< 40) ||| (g <<< 48) ||| (h <<< 56), + (s, offset + 8) + let int32 (s, offset) = + let (<<<) a b = Int32.shift_left a b + and (|||) a b = Int32.logor a b in + let a = Int32.of_int (int_of_char (s.[offset + 0])) + and b = Int32.of_int (int_of_char (s.[offset + 1])) + and c = Int32.of_int (int_of_char (s.[offset + 2])) + and d = Int32.of_int (int_of_char (s.[offset + 3])) in + (a <<< 0) ||| (b <<< 8) ||| (c <<< 16) ||| (d <<< 24), (s, offset + 4) end module Marshal = struct - let int64 x = - let (>>>) a b = Int64.shift_right_logical a b - and (&&&) a b = Int64.logand a b in - let a = (x >>> 0) &&& 0xffL - and b = (x >>> 8) &&& 0xffL - and c = (x >>> 16) &&& 0xffL - and d = (x >>> 24) &&& 0xffL - and e = (x >>> 32) &&& 0xffL - and f = (x >>> 40) &&& 0xffL - and g = (x >>> 48) &&& 0xffL - and h = (x >>> 56) &&& 0xffL in - let result = String.make 8 '\000' in - result.[0] <- char_of_int (Int64.to_int a); - result.[1] <- char_of_int (Int64.to_int b); - result.[2] <- char_of_int (Int64.to_int c); - result.[3] <- char_of_int (Int64.to_int d); - result.[4] <- char_of_int (Int64.to_int e); - result.[5] <- char_of_int (Int64.to_int f); - result.[6] <- char_of_int (Int64.to_int g); - result.[7] <- char_of_int (Int64.to_int h); - result - let int32 x = - let (>>>) a b = Int32.shift_right_logical a b - and (&&&) a b = Int32.logand a b in - let a = (x >>> 0) &&& 0xffl - and b = (x >>> 8) &&& 0xffl - and c = (x >>> 16) &&& 0xffl - and d = (x >>> 24) &&& 0xffl in - let result = String.make 4 '\000' in - result.[0] <- char_of_int (Int32.to_int a); - result.[1] <- char_of_int (Int32.to_int b); - result.[2] <- char_of_int (Int32.to_int c); - result.[3] <- char_of_int (Int32.to_int d); - result + let int64 x = + let (>>>) a b = Int64.shift_right_logical a b + and (&&&) a b = Int64.logand a b in + let a = (x >>> 0) &&& 0xffL + and b = (x >>> 8) &&& 0xffL + and c = (x >>> 16) &&& 0xffL + and d = (x >>> 24) &&& 0xffL + and e = (x >>> 32) &&& 0xffL + and f = (x >>> 40) &&& 0xffL + and g = (x >>> 48) &&& 0xffL + and h = (x >>> 56) &&& 0xffL in + let result = String.make 8 '\000' in + result.[0] <- char_of_int (Int64.to_int a); + result.[1] <- char_of_int (Int64.to_int b); + result.[2] <- char_of_int (Int64.to_int c); + result.[3] <- char_of_int (Int64.to_int d); + result.[4] <- char_of_int (Int64.to_int e); + result.[5] <- char_of_int (Int64.to_int f); + result.[6] <- char_of_int (Int64.to_int g); + result.[7] <- char_of_int (Int64.to_int h); + result + let int32 x = + let (>>>) a b = Int32.shift_right_logical a b + and (&&&) a b = Int32.logand a b in + let a = (x >>> 0) &&& 0xffl + and b = (x >>> 8) &&& 0xffl + and c = (x >>> 16) &&& 0xffl + and d = (x >>> 24) &&& 0xffl in + let result = String.make 4 '\000' in + result.[0] <- char_of_int (Int32.to_int a); + result.[1] <- char_of_int (Int32.to_int b); + result.[2] <- char_of_int (Int32.to_int c); + result.[3] <- char_of_int (Int32.to_int d); + result end module Chunk = struct - (** Represents an single block of data to write *) - type t = { - start: int64; - data: string; - } + (** Represents an single block of data to write *) + type t = { + start: int64; + data: string; + } - let really_write fd offset buf off len = - let n = Unix.write fd buf off len in - if n < len - then failwith (Printf.sprintf "Short write: attempted to write %d bytes at %Ld, only wrote %d" len offset n) + let really_write fd offset buf off len = + let n = Unix.write fd buf off len in + if n < len + then failwith (Printf.sprintf "Short write: attempted to write %d bytes at %Ld, only wrote %d" len offset n) - (** Writes a single block of data to the output device *) - let write fd x = - ignore(Unix.LargeFile.lseek fd x.start Unix.SEEK_SET); - really_write fd x.start x.data 0 (String.length x.data) + (** Writes a single block of data to the output device *) + let write fd x = + ignore(Unix.LargeFile.lseek fd x.start Unix.SEEK_SET); + really_write fd x.start x.data 0 (String.length x.data) - (** Reads a type t from a file descriptor *) - let unmarshal fd = - let buf = String.make 12 '\000' in - Stdext.Unixext.really_read fd buf 0 (String.length buf); - let stream = (buf, 0) in - let start, stream = Unmarshal.int64 stream in - let len, stream = Unmarshal.int32 stream in - let payload = String.make (Int32.to_int len) '\000' in - Stdext.Unixext.really_read fd payload 0 (String.length payload); - { start = start; data = payload } + (** Reads a type t from a file descriptor *) + let unmarshal fd = + let buf = String.make 12 '\000' in + Stdext.Unixext.really_read fd buf 0 (String.length buf); + let stream = (buf, 0) in + let start, stream = Unmarshal.int64 stream in + let len, stream = Unmarshal.int32 stream in + let payload = String.make (Int32.to_int len) '\000' in + Stdext.Unixext.really_read fd payload 0 (String.length payload); + { start = start; data = payload } - (** Writes a type t from a file descriptor *) - let marshal fd x = - let start' = Marshal.int64 x.start in - let len' = Marshal.int32 (Int32.of_int (String.length x.data)) in - really_write fd 0L start' 0 (String.length start'); - really_write fd 8L len' 0 (String.length len'); - really_write fd 12L x.data 0 (String.length x.data) + (** Writes a type t from a file descriptor *) + let marshal fd x = + let start' = Marshal.int64 x.start in + let len' = Marshal.int32 (Int32.of_int (String.length x.data)) in + really_write fd 0L start' 0 (String.length start'); + really_write fd 8L len' 0 (String.length len'); + really_write fd 12L x.data 0 (String.length x.data) - (** Fold [f] across all ts unmarshalled from [fd] *) - let rec fold f init fd = - let x = unmarshal fd in - if x.data = "" - then init - else fold f (f init x) fd + (** Fold [f] across all ts unmarshalled from [fd] *) + let rec fold f init fd = + let x = unmarshal fd in + if x.data = "" + then init + else fold f (f init x) fd end diff --git a/ocaml/xapi/startup.ml b/ocaml/xapi/startup.ml index 655891ab689..2515824d345 100644 --- a/ocaml/xapi/startup.ml +++ b/ocaml/xapi/startup.ml @@ -13,10 +13,10 @@ *) (** * @group Main Loop and Start-up - *) +*) open Stdext.Threadext - + module D=Debug.Make(struct let name="startup" end) open D @@ -24,10 +24,10 @@ type flag = OnlyMaster | OnlySlave | NoExnRaising | OnThread let thread_exn_wrapper thread_name f = begin try - f (); - with exn -> - warn "thread [%s] dying on exception: %s" thread_name (Printexc.to_string exn); - raise exn + f (); + with exn -> + warn "thread [%s] dying on exception: %s" thread_name (Printexc.to_string exn); + raise exn end; warn "thread [%s] died" thread_name; () @@ -43,46 +43,46 @@ let run ~__context tasks = let only_master = ref false and only_slave = ref false and exnraise = ref true and onthread = ref false in List.iter (fun flag -> - match flag with - | OnlyMaster -> only_master := true - | OnlySlave -> only_slave := true - | NoExnRaising -> exnraise := false - | OnThread -> onthread := true - ) flags; + match flag with + | OnlyMaster -> only_master := true + | OnlySlave -> only_slave := true + | NoExnRaising -> exnraise := false + | OnThread -> onthread := true + ) flags; !only_master, !only_slave, !exnraise, !onthread - in + in (* get pool role status *) let is_master = Pool_role.is_master() in (* iterate tasks *) List.iter (fun (tsk_name, tsk_flags, tsk_fct) -> - (* Wrap the function with a timer *) - let tsk_fct () = Stats.time_this tsk_name tsk_fct in + (* Wrap the function with a timer *) + let tsk_fct () = Stats.time_this tsk_name tsk_fct in - let only_master, only_slave, exnraise, onthread = get_flags_of_list tsk_flags in - try - if (only_master && is_master) - || (only_slave && (not is_master)) - || ((not only_slave) && (not only_master)) then ( - if not dummy_task then begin - Db.Task.remove_from_other_config ~__context ~self:task_id ~key:"startup_operation"; - Db.Task.add_to_other_config ~__context ~self:task_id ~key:"startup_operation" ~value:tsk_name - end; - if onthread then ( - debug "task [starting thread %s]" tsk_name; - ignore (Thread.create (fun tsk_fct -> - Server_helpers.exec_with_new_task ~subtask_of:(Context.get_task_id __context) tsk_name (fun __context -> - thread_exn_wrapper tsk_name tsk_fct)) tsk_fct) - ) else ( - debug "task [%s]" tsk_name; - Server_helpers.exec_with_new_task tsk_name ~subtask_of:(Context.get_task_id __context) (fun __context -> tsk_fct ()) - ) - ) - with exn -> - warn "task [%s] exception: %s" tsk_name (Printexc.to_string exn); - if exnraise then - raise exn - ) tasks + let only_master, only_slave, exnraise, onthread = get_flags_of_list tsk_flags in + try + if (only_master && is_master) + || (only_slave && (not is_master)) + || ((not only_slave) && (not only_master)) then ( + if not dummy_task then begin + Db.Task.remove_from_other_config ~__context ~self:task_id ~key:"startup_operation"; + Db.Task.add_to_other_config ~__context ~self:task_id ~key:"startup_operation" ~value:tsk_name + end; + if onthread then ( + debug "task [starting thread %s]" tsk_name; + ignore (Thread.create (fun tsk_fct -> + Server_helpers.exec_with_new_task ~subtask_of:(Context.get_task_id __context) tsk_name (fun __context -> + thread_exn_wrapper tsk_name tsk_fct)) tsk_fct) + ) else ( + debug "task [%s]" tsk_name; + Server_helpers.exec_with_new_task tsk_name ~subtask_of:(Context.get_task_id __context) (fun __context -> tsk_fct ()) + ) + ) + with exn -> + warn "task [%s] exception: %s" tsk_name (Printexc.to_string exn); + if exnraise then + raise exn + ) tasks let run ~__context tasks = Stats.time_this "overall xapi startup" (fun () -> run ~__context tasks) diff --git a/ocaml/xapi/static_vdis.ml b/ocaml/xapi/static_vdis.ml index 612f33ee3aa..2dec306cb92 100644 --- a/ocaml/xapi/static_vdis.ml +++ b/ocaml/xapi/static_vdis.ml @@ -13,7 +13,7 @@ *) (** Manage VDIs which are attached to dom0 on boot (eg HA statefile, remote database) * @group Storage - *) +*) module D = Debug.Make(struct let name="xapi" end) open D @@ -25,73 +25,73 @@ include Static_vdis_list (* include the vdi type and the list() function *) (** Generate the static configuration and attach the VDI now *) let permanent_vdi_attach ~__context ~vdi ~reason = - info "permanent_vdi_attach: vdi = %s; sr = %s" - (Ref.string_of vdi) (Ref.string_of (Db.VDI.get_SR ~__context ~self:vdi)); - ignore (Helpers.call_script !Xapi_globs.static_vdis [ "add"; Db.VDI.get_uuid ~__context ~self:vdi; reason ]); - (* VDI will be attached on next boot; attach it now too *) - String.rtrim (Helpers.call_script !Xapi_globs.static_vdis - [ "attach"; Db.VDI.get_uuid ~__context ~self:vdi ]) + info "permanent_vdi_attach: vdi = %s; sr = %s" + (Ref.string_of vdi) (Ref.string_of (Db.VDI.get_SR ~__context ~self:vdi)); + ignore (Helpers.call_script !Xapi_globs.static_vdis [ "add"; Db.VDI.get_uuid ~__context ~self:vdi; reason ]); + (* VDI will be attached on next boot; attach it now too *) + String.rtrim (Helpers.call_script !Xapi_globs.static_vdis + [ "attach"; Db.VDI.get_uuid ~__context ~self:vdi ]) (** Detach the VDI (by reference) now and destroy the static configuration *) let permanent_vdi_detach ~__context ~vdi = - info "permanent_vdi_detach: vdi = %s; sr = %s" - (Ref.string_of vdi) (Ref.string_of (Db.VDI.get_SR ~__context ~self:vdi)); - Sm.call_sm_vdi_functions ~__context ~vdi - (fun srconf srtype sr -> Sm.vdi_detach srconf srtype sr vdi); - ignore(Helpers.call_script !Xapi_globs.static_vdis - [ "del"; Db.VDI.get_uuid ~__context ~self:vdi ]) + info "permanent_vdi_detach: vdi = %s; sr = %s" + (Ref.string_of vdi) (Ref.string_of (Db.VDI.get_SR ~__context ~self:vdi)); + Sm.call_sm_vdi_functions ~__context ~vdi + (fun srconf srtype sr -> Sm.vdi_detach srconf srtype sr vdi); + ignore(Helpers.call_script !Xapi_globs.static_vdis + [ "del"; Db.VDI.get_uuid ~__context ~self:vdi ]) (** Detach the VDI (by uuid) now and destroy the static configuration *) let permanent_vdi_detach_by_uuid ~__context ~uuid = - info "permanent_vdi_detach: vdi-uuid = %s" uuid; - begin - try - (* This might fail because the VDI has been destroyed *) - let vdi = Db.VDI.get_by_uuid ~__context ~uuid in - Sm.call_sm_vdi_functions ~__context ~vdi - (fun srconf srtype sr -> Sm.vdi_detach srconf srtype sr vdi) - with e -> - warn "Ignoring exception calling SM vdi_detach for VDI uuid %s: %s (possibly VDI has been deleted while we were offline" uuid (ExnHelper.string_of_exn e) - end; - ignore(Helpers.call_script !Xapi_globs.static_vdis [ "del"; uuid ]) + info "permanent_vdi_detach: vdi-uuid = %s" uuid; + begin + try + (* This might fail because the VDI has been destroyed *) + let vdi = Db.VDI.get_by_uuid ~__context ~uuid in + Sm.call_sm_vdi_functions ~__context ~vdi + (fun srconf srtype sr -> Sm.vdi_detach srconf srtype sr vdi) + with e -> + warn "Ignoring exception calling SM vdi_detach for VDI uuid %s: %s (possibly VDI has been deleted while we were offline" uuid (ExnHelper.string_of_exn e) + end; + ignore(Helpers.call_script !Xapi_globs.static_vdis [ "del"; uuid ]) let detach_only vdi = - if vdi.currently_attached then begin - info "vdi_detach_by_uuid: vdi-uuid = %s" vdi.uuid; - ignore (Helpers.call_script !Xapi_globs.static_vdis ["detach"; vdi.uuid]) - end + if vdi.currently_attached then begin + info "vdi_detach_by_uuid: vdi-uuid = %s" vdi.uuid; + ignore (Helpers.call_script !Xapi_globs.static_vdis ["detach"; vdi.uuid]) + end (** Added for CA-48539. Deactivates a vdi. You should probably follow - this call with one of the previous vdi_detach functions. *) + this call with one of the previous vdi_detach functions. *) let permanent_vdi_deactivate_by_uuid ~__context ~uuid = - info "permanent_vdi_detach: vdi-uuid = %s" uuid ; - try - let vdi = Db.VDI.get_by_uuid ~__context ~uuid in - Sm.call_sm_vdi_functions ~__context ~vdi - (fun srconf srtype sr -> Sm.vdi_deactivate srconf srtype sr vdi) - with e -> - warn "Ignoring exception calling SM vdi_deactivate for VDI uuid %s: %s (possibly VDI has been deleted while we were offline" - uuid - (ExnHelper.string_of_exn e) + info "permanent_vdi_detach: vdi-uuid = %s" uuid ; + try + let vdi = Db.VDI.get_by_uuid ~__context ~uuid in + Sm.call_sm_vdi_functions ~__context ~vdi + (fun srconf srtype sr -> Sm.vdi_deactivate srconf srtype sr vdi) + with e -> + warn "Ignoring exception calling SM vdi_deactivate for VDI uuid %s: %s (possibly VDI has been deleted while we were offline" + uuid + (ExnHelper.string_of_exn e) (** Detaches and removes records for VDIs which have been deleted *) let gc () = - Server_helpers.exec_with_new_task "GCing on-boot VDIs" (fun __context -> - List.iter - (fun vdi -> - let exists = try ignore(Db.VDI.get_by_uuid ~__context ~uuid:vdi.uuid); true with _ -> false in - if not(exists) then begin - warn "static-vdi %s cannot be found in database; removing on-boot configuration" vdi.uuid; - (* NB we can't call the SM functions since the record has gone *) - ignore(Helpers.call_script !Xapi_globs.static_vdis [ "del"; vdi.uuid ]) - end - ) (list ())) + Server_helpers.exec_with_new_task "GCing on-boot VDIs" (fun __context -> + List.iter + (fun vdi -> + let exists = try ignore(Db.VDI.get_by_uuid ~__context ~uuid:vdi.uuid); true with _ -> false in + if not(exists) then begin + warn "static-vdi %s cannot be found in database; removing on-boot configuration" vdi.uuid; + (* NB we can't call the SM functions since the record has gone *) + ignore(Helpers.call_script !Xapi_globs.static_vdis [ "del"; vdi.uuid ]) + end + ) (list ())) (** If we just rebooted and failed to attach our static VDIs then this can be called to reattempt the attach: - this is necessary for HA to start. *) + this is necessary for HA to start. *) let reattempt_on_boot_attach () = - let script = "attach-static-vdis" in - try - ignore(Helpers.call_script "/sbin/service" [ script; "start" ]) - with e -> - warn "Attempt to reattach static VDIs via '%s start' failed: %s" script (ExnHelper.string_of_exn e) + let script = "attach-static-vdis" in + try + ignore(Helpers.call_script "/sbin/service" [ script; "start" ]) + with e -> + warn "Attempt to reattach static VDIs via '%s start' failed: %s" script (ExnHelper.string_of_exn e) diff --git a/ocaml/xapi/static_vdis_list.ml b/ocaml/xapi/static_vdis_list.ml index 61e480ddee1..0a064b83914 100644 --- a/ocaml/xapi/static_vdis_list.ml +++ b/ocaml/xapi/static_vdis_list.ml @@ -13,7 +13,7 @@ *) (** * @group Storage - *) +*) open Stdext @@ -27,24 +27,24 @@ type vdi = { } (** Returns a list of vdi records, one for each VDI statically configured on this host *) -let list () = +let list () = (* Read the filesystem structure directly *) let main_dir = !Xapi_globs.static_vdis_dir in let all = try Array.to_list (Sys.readdir main_dir) with Sys_error _ -> [] in List.map (fun x -> - let path = Filename.concat main_dir x in - let uuid = Unixext.string_of_file (Filename.concat path "vdi-uuid") in - let reason = Unixext.string_of_file (Filename.concat path "reason") in - (* let bool_of_string x = String.lowercase x = "true" in *) - let delete_next_boot = - try ignore(Unix.stat (Filename.concat path "delete-next-boot")); true - with _ -> false in - let currently_attached = - try ignore(Unix.stat (Filename.concat path "disk")); true - with _ -> false in - let path = - try Some (Unix.readlink (Filename.concat path "disk")) - with _ -> None in - { uuid = uuid; reason = reason; delete_next_boot = delete_next_boot; - currently_attached = currently_attached; path = path }) all + let path = Filename.concat main_dir x in + let uuid = Unixext.string_of_file (Filename.concat path "vdi-uuid") in + let reason = Unixext.string_of_file (Filename.concat path "reason") in + (* let bool_of_string x = String.lowercase x = "true" in *) + let delete_next_boot = + try ignore(Unix.stat (Filename.concat path "delete-next-boot")); true + with _ -> false in + let currently_attached = + try ignore(Unix.stat (Filename.concat path "disk")); true + with _ -> false in + let path = + try Some (Unix.readlink (Filename.concat path "disk")) + with _ -> None in + { uuid = uuid; reason = reason; delete_next_boot = delete_next_boot; + currently_attached = currently_attached; path = path }) all diff --git a/ocaml/xapi/storage_access.ml b/ocaml/xapi/storage_access.ml index 8dc3934a115..d60b71214dc 100644 --- a/ocaml/xapi/storage_access.ml +++ b/ocaml/xapi/storage_access.ml @@ -24,881 +24,881 @@ module D=Debug.Make(struct let name="storage_access" end) open D let transform_storage_exn f = - try - f () - with - | Backend_error(code, params) as e -> - Backtrace.reraise e (Api_errors.Server_error(code, params)) - | Backend_error_with_backtrace(code, backtrace :: params) as e -> - let backtrace = Backtrace.Interop.of_json "SM" backtrace in - Backtrace.add e backtrace; - Backtrace.reraise e (Api_errors.Server_error(code, params)) - | Api_errors.Server_error(code, params) as e -> raise e - | No_storage_plugin_for_sr sr as e -> - Server_helpers.exec_with_new_task "transform_storage_exn" - (fun __context -> - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in - Backtrace.reraise e (Api_errors.Server_error(Api_errors.sr_not_attached, [ Ref.string_of sr ])) - ) - | e -> - Backtrace.reraise e (Api_errors.Server_error(Api_errors.internal_error, [ Printexc.to_string e ])) + try + f () + with + | Backend_error(code, params) as e -> + Backtrace.reraise e (Api_errors.Server_error(code, params)) + | Backend_error_with_backtrace(code, backtrace :: params) as e -> + let backtrace = Backtrace.Interop.of_json "SM" backtrace in + Backtrace.add e backtrace; + Backtrace.reraise e (Api_errors.Server_error(code, params)) + | Api_errors.Server_error(code, params) as e -> raise e + | No_storage_plugin_for_sr sr as e -> + Server_helpers.exec_with_new_task "transform_storage_exn" + (fun __context -> + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in + Backtrace.reraise e (Api_errors.Server_error(Api_errors.sr_not_attached, [ Ref.string_of sr ])) + ) + | e -> + Backtrace.reraise e (Api_errors.Server_error(Api_errors.internal_error, [ Printexc.to_string e ])) exception No_VDI (* Find a VDI given a storage-layer SR and VDI *) let find_vdi ~__context sr vdi = - let open Db_filter_types in - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in - match Db.VDI.get_records_where ~__context ~expr:(And((Eq (Field "location", Literal vdi)),Eq (Field "SR", Literal (Ref.string_of sr)))) with - | x :: _ -> x - | _ -> raise No_VDI + let open Db_filter_types in + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in + match Db.VDI.get_records_where ~__context ~expr:(And((Eq (Field "location", Literal vdi)),Eq (Field "SR", Literal (Ref.string_of sr)))) with + | x :: _ -> x + | _ -> raise No_VDI (* Find a VDI reference given a name *) let find_content ~__context ?sr name = - (* PR-1255: the backend should do this for us *) - let open Db_filter_types in - let expr = Opt.default True (Opt.map (fun sr -> Eq(Field "SR", Literal (Ref.string_of (Db.SR.get_by_uuid ~__context ~uuid:sr)))) sr) in - let all = Db.VDI.get_records_where ~__context ~expr in - List.find - (fun (_, vdi_rec) -> - false - || (vdi_rec.API.vDI_location = name) (* PR-1255 *) - ) all + (* PR-1255: the backend should do this for us *) + let open Db_filter_types in + let expr = Opt.default True (Opt.map (fun sr -> Eq(Field "SR", Literal (Ref.string_of (Db.SR.get_by_uuid ~__context ~uuid:sr)))) sr) in + let all = Db.VDI.get_records_where ~__context ~expr in + List.find + (fun (_, vdi_rec) -> + false + || (vdi_rec.API.vDI_location = name) (* PR-1255 *) + ) all let redirect sr = - raise (Redirect (Some (Pool_role.get_master_address ()))) + raise (Redirect (Some (Pool_role.get_master_address ()))) module SMAPIv1 = struct - (** xapi's builtin ability to call local SM plugins using the existing - protocol. The code here should only call the SM functions and encapsulate - the return or error properly. It should not perform side-effects on - the xapi database: these should be handled in the layer above so they - can be shared with other SM implementation types. - - Where this layer has to perform interface adjustments (see VDI.activate - and the read/write debacle), this highlights desirable improvements to - the backend interface. - *) - - type context = Smint.request - - let vdi_info_of_vdi_rec __context vdi_rec = - let content_id = - if List.mem_assoc "content_id" vdi_rec.API.vDI_other_config - then List.assoc "content_id" vdi_rec.API.vDI_other_config - else vdi_rec.API.vDI_location (* PR-1255 *) - in { - vdi = vdi_rec.API.vDI_location; - uuid = Some vdi_rec.API.vDI_uuid; - content_id = content_id; (* PR-1255 *) - name_label = vdi_rec.API.vDI_name_label; - name_description = vdi_rec.API.vDI_name_description; - ty = Storage_utils.string_of_vdi_type vdi_rec.API.vDI_type; - metadata_of_pool = Ref.string_of vdi_rec.API.vDI_metadata_of_pool; - is_a_snapshot = vdi_rec.API.vDI_is_a_snapshot; - snapshot_time = Date.to_string vdi_rec.API.vDI_snapshot_time; - snapshot_of = - if Db.is_valid_ref __context vdi_rec.API.vDI_snapshot_of - then Db.VDI.get_uuid ~__context ~self:vdi_rec.API.vDI_snapshot_of - else ""; - read_only = vdi_rec.API.vDI_read_only; - virtual_size = vdi_rec.API.vDI_virtual_size; - physical_utilisation = vdi_rec.API.vDI_physical_utilisation; - persistent = vdi_rec.API.vDI_on_boot = `persist; - sm_config = vdi_rec.API.vDI_sm_config; - } - - let vdi_info_from_db ~__context self = - let vdi_rec = Db.VDI.get_record ~__context ~self in - vdi_info_of_vdi_rec __context vdi_rec - - (* For SMAPIv1, is_a_snapshot, snapshot_time and snapshot_of are stored in - * xapi's database. For SMAPIv2 they should be implemented by the storage - * backend. *) - let set_is_a_snapshot context ~dbg ~sr ~vdi ~is_a_snapshot = - Server_helpers.exec_with_new_task "VDI.set_is_a_snapshot" - ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let vdi, _ = find_vdi ~__context sr vdi in - Db.VDI.set_is_a_snapshot ~__context ~self:vdi ~value:is_a_snapshot - ) - - let set_snapshot_time context ~dbg ~sr ~vdi ~snapshot_time = - Server_helpers.exec_with_new_task "VDI.set_snapshot_time" - ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let vdi, _ = find_vdi ~__context sr vdi in - let snapshot_time = Date.of_string snapshot_time in - Db.VDI.set_snapshot_time ~__context ~self:vdi ~value:snapshot_time - ) - - let set_snapshot_of context ~dbg ~sr ~vdi ~snapshot_of = - Server_helpers.exec_with_new_task "VDI.set_snapshot_of" - ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let vdi, _ = find_vdi ~__context sr vdi in - let snapshot_of, _ = find_vdi ~__context sr snapshot_of in - Db.VDI.set_snapshot_of ~__context ~self:vdi ~value:snapshot_of - ) - - module Query = struct - let query context ~dbg = { - driver = "storage_access"; - name = "SMAPIv1 adapter"; - description = "Allows legacy SMAPIv1 adapters to expose an SMAPIv2 interface"; - vendor = "XCP"; - copyright = "see the source code"; - version = "2.0"; - required_api_version = "2.0"; - features = []; - configuration = []; - required_cluster_stack = []; - } - - let diagnostics context ~dbg = - "No diagnostics are available for SMAPIv1 plugins" - end - - module DP = struct - let create context ~dbg ~id = assert false - let destroy context ~dbg ~dp = assert false - let diagnostics context () = assert false - let attach_info context ~dbg ~sr ~vdi ~dp = assert false - let stat_vdi context ~dbg ~sr ~vdi = assert false - end - - module SR = struct - include Storage_skeleton.SR - - let probe context ~dbg ~queue ~device_config ~sm_config = - let _type = - (* SMAPIv1 plugins have no namespaces, so strip off everything up to - the final dot *) - try - let i = String.rindex queue '.' in - String.sub queue (i + 1) (String.length queue -i - 1) - with Not_found -> - queue in - Server_helpers.exec_with_new_task "SR.create" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let task = Context.get_task_id __context in - Storage_interface.Raw (Sm.sr_probe (Some task,(Sm.sm_master true :: device_config)) _type sm_config) - ) - - let create context ~dbg ~sr ~name_label ~name_description ~device_config ~physical_size = - Server_helpers.exec_with_new_task "SR.create" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let subtask_of = Some (Context.get_task_id __context) in - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in - Db.SR.set_name_label ~__context ~self:sr ~value:name_label; - Db.SR.set_name_description ~__context ~self:sr ~value:name_description; - let device_config = (Sm.sm_master true) :: device_config in - Sm.call_sm_functions ~__context ~sR:sr - (fun _ _type -> - try - Sm.sr_create (subtask_of, device_config) _type sr physical_size - with - | Smint.Not_implemented_in_backend -> - error "SR.create failed SR:%s Not_implemented_in_backend" (Ref.string_of sr); - raise (Storage_interface.Backend_error(Api_errors.sr_operation_not_supported, [ Ref.string_of sr ])) - | Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - | e -> - let e' = ExnHelper.string_of_exn e in - error "SR.create failed SR:%s error:%s" (Ref.string_of sr) e'; - raise e - ) - ) - - let set_name_label context ~dbg ~sr ~new_name_label = - Server_helpers.exec_with_new_task "SR.set_name_label" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in - Db.SR.set_name_label ~__context ~self:sr ~value:new_name_label - ) - - let set_name_description context ~dbg ~sr ~new_name_description = - Server_helpers.exec_with_new_task "SR.set_name_description" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in - Db.SR.set_name_description ~__context ~self:sr ~value:new_name_description - ) - - let attach context ~dbg ~sr ~device_config = - Server_helpers.exec_with_new_task "SR.attach" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in - - (* Existing backends expect an SRMaster flag to be added - through the device-config. *) - let srmaster = Helpers.i_am_srmaster ~__context ~sr in - let device_config = (Sm.sm_master srmaster) :: device_config in - Sm.call_sm_functions ~__context ~sR:sr - (fun _ _type -> - try - Sm.sr_attach (Some (Context.get_task_id __context), device_config) _type sr - with - | Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - | e -> - let e' = ExnHelper.string_of_exn e in - error "SR.attach failed SR:%s error:%s" (Ref.string_of sr) e'; - raise e - ) - ) - let detach context ~dbg ~sr = - Server_helpers.exec_with_new_task "SR.detach" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in - - Sm.call_sm_functions ~__context ~sR:sr - (fun device_config _type -> - try - Sm.sr_detach device_config _type sr - with - | Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - | e -> - let e' = ExnHelper.string_of_exn e in - error "SR.detach failed SR:%s error:%s" (Ref.string_of sr) e'; - raise e - ) - ) - - let reset context ~dbg ~sr = assert false - - let destroy context ~dbg ~sr = - Server_helpers.exec_with_new_task "SR.destroy" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in - - Sm.call_sm_functions ~__context ~sR:sr - (fun device_config _type -> - try - Sm.sr_delete device_config _type sr - with - | Smint.Not_implemented_in_backend -> - raise (Storage_interface.Backend_error(Api_errors.sr_operation_not_supported, [ Ref.string_of sr ])) - | Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - | e -> - let e' = ExnHelper.string_of_exn e in - error "SR.detach failed SR:%s error:%s" (Ref.string_of sr) e'; - raise e - ) - ) - - let stat context ~dbg ~sr:sr' = - Server_helpers.exec_with_new_task "SR.stat" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr' in - Sm.call_sm_functions ~__context ~sR:sr - (fun device_config _type -> - try - Sm.sr_update device_config _type sr; - let r = Db.SR.get_record ~__context ~self:sr in - let name_label = r.API.sR_name_label in - let name_description = r.API.sR_name_description in - let total_space = r.API.sR_physical_size in - let free_space = Int64.sub r.API.sR_physical_size r.API.sR_physical_utilisation in - let clustered = false in - let health = Storage_interface.Healthy in - { name_label; name_description; total_space; free_space; clustered; health } - with - | Smint.Not_implemented_in_backend -> - raise (Storage_interface.Backend_error(Api_errors.sr_operation_not_supported, [ Ref.string_of sr ])) - | Api_errors.Server_error(code, params) -> - error "SR.scan failed SR:%s code=%s params=[%s]" (Ref.string_of sr) code (String.concat "; " params); - raise (Backend_error(code, params)) - | Sm.MasterOnly -> redirect sr - | e -> - let e' = ExnHelper.string_of_exn e in - error "SR.scan failed SR:%s error:%s" (Ref.string_of sr) e'; - raise e - ) - ) - - let scan context ~dbg ~sr:sr' = - Server_helpers.exec_with_new_task "SR.scan" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr' in - Sm.call_sm_functions ~__context ~sR:sr - (fun device_config _type -> - try - Sm.sr_scan device_config _type sr; - let open Db_filter_types in - let vdis = Db.VDI.get_records_where ~__context ~expr:(Eq(Field "SR", Literal (Ref.string_of sr))) |> List.map snd in - List.map (vdi_info_of_vdi_rec __context) vdis - with - | Smint.Not_implemented_in_backend -> - raise (Storage_interface.Backend_error(Api_errors.sr_operation_not_supported, [ Ref.string_of sr ])) - | Api_errors.Server_error(code, params) -> - error "SR.scan failed SR:%s code=%s params=[%s]" (Ref.string_of sr) code (String.concat "; " params); - raise (Backend_error(code, params)) - | Sm.MasterOnly -> redirect sr - | e -> - let e' = ExnHelper.string_of_exn e in - error "SR.scan failed SR:%s error:%s" (Ref.string_of sr) e'; - raise e - ) - ) - - - let list context ~dbg = assert false - - let update_snapshot_info_src context ~dbg ~sr ~vdi - ~url ~dest ~dest_vdi ~snapshot_pairs = - assert false - - let update_snapshot_info_dest context ~dbg ~sr ~vdi ~src_vdi ~snapshot_pairs = - Server_helpers.exec_with_new_task "SR.update_snapshot_info_dest" - ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let local_vdis = scan __context ~dbg ~sr in - let find_sm_vdi ~vdi ~vdi_info_list = - try List.find (fun x -> x.vdi = vdi) vdi_info_list - with Not_found -> raise (Vdi_does_not_exist vdi) - in - let assert_content_ids_match ~vdi_info1 ~vdi_info2 = - if vdi_info1.content_id <> vdi_info2.content_id - then raise (Content_ids_do_not_match (vdi_info1.vdi, vdi_info2.vdi)) - in - (* For each (local snapshot vdi, source snapshot vdi) pair: - * - Check that the content_ids are the same - * - Copy snapshot_time from the source VDI to the local VDI - * - Set the local VDI's snapshot_of to vdi - * - Set is_a_snapshot = true for the local snapshot *) - List.iter - (fun (local_snapshot, src_snapshot_info) -> - let local_snapshot_info = - find_sm_vdi ~vdi:local_snapshot ~vdi_info_list:local_vdis in - assert_content_ids_match local_snapshot_info src_snapshot_info; - set_snapshot_time __context ~dbg ~sr - ~vdi:local_snapshot - ~snapshot_time:src_snapshot_info.snapshot_time; - set_snapshot_of __context ~dbg ~sr - ~vdi:local_snapshot ~snapshot_of:vdi; - set_is_a_snapshot __context ~dbg ~sr - ~vdi:local_snapshot ~is_a_snapshot:true;) - snapshot_pairs) - end - - module VDI = struct - let for_vdi ~dbg ~sr ~vdi op_name f = - Server_helpers.exec_with_new_task op_name ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let open Db_filter_types in - let self = find_vdi ~__context sr vdi |> fst in - Sm.call_sm_vdi_functions ~__context ~vdi:self - (fun device_config _type sr -> - f device_config _type sr self - ) - ) - (* Allow us to remember whether a VDI is attached read/only or read/write. - If this is meaningful to the backend then this should be recorded there! *) - let vdi_read_write = Hashtbl.create 10 - let vdi_read_write_m = Mutex.create () - let vdi_read_caching_m = Mutex.create () - let per_host_key ~__context ~prefix = - let host_uuid = Db.Host.get_uuid ~__context ~self:(Helpers.get_localhost ~__context) in - Printf.sprintf "%s-%s" prefix host_uuid - let read_caching_key ~__context = - per_host_key ~__context ~prefix:"read-caching-enabled-on" - let read_caching_reason_key ~__context = - per_host_key ~__context ~prefix:"read-caching-reason" - - - let epoch_begin context ~dbg ~sr ~vdi ~persistent = - try - for_vdi ~dbg ~sr ~vdi "VDI.epoch_begin" - (fun device_config _type sr self -> - Sm.vdi_epoch_begin device_config _type sr self) - with - | Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - - let attach context ~dbg ~dp ~sr ~vdi ~read_write = - try - let attach_info = - for_vdi ~dbg ~sr ~vdi "VDI.attach" - (fun device_config _type sr self -> - let attach_info_v1 = Sm.vdi_attach device_config _type sr self read_write in - (* Record whether the VDI is benefiting from read caching *) - Server_helpers.exec_with_new_task "VDI.attach" ~subtask_of:(Ref.of_string dbg) (fun __context -> - let read_caching = not attach_info_v1.Smint.o_direct in - let on_key = read_caching_key ~__context in - let reason_key = read_caching_reason_key ~__context in - Mutex.execute vdi_read_caching_m (fun () -> - Db.VDI.remove_from_sm_config ~__context ~self ~key:on_key; - Db.VDI.remove_from_sm_config ~__context ~self ~key:reason_key; - Db.VDI.add_to_sm_config ~__context ~self ~key:on_key - ~value:(string_of_bool read_caching); - if not read_caching then - Db.VDI.add_to_sm_config ~__context ~self ~key:reason_key - ~value:(attach_info_v1.Smint.o_direct_reason) - ) - ); - { params = attach_info_v1.Smint.params; - o_direct = attach_info_v1.Smint.o_direct; - o_direct_reason = attach_info_v1.Smint.o_direct_reason; - xenstore_data = attach_info_v1.Smint.xenstore_data; } - ) in - Mutex.execute vdi_read_write_m - (fun () -> Hashtbl.replace vdi_read_write (sr, vdi) read_write); - attach_info - with Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - - let activate context ~dbg ~dp ~sr ~vdi = - try - let read_write = Mutex.execute vdi_read_write_m - (fun () -> - if not (Hashtbl.mem vdi_read_write (sr, vdi)) then error "VDI.activate: doesn't know if sr:%s vdi:%s is RO or RW" sr vdi; - Hashtbl.find vdi_read_write (sr, vdi)) in - for_vdi ~dbg ~sr ~vdi "VDI.activate" - (fun device_config _type sr self -> - Server_helpers.exec_with_new_task "VDI.activate" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - (if read_write - then Db.VDI.remove_from_other_config ~__context ~self ~key:"content_id")); - (* If the backend doesn't advertise the capability then do nothing *) - if List.mem_assoc Smint.Vdi_activate (Sm.features_of_driver _type) - then Sm.vdi_activate device_config _type sr self read_write - else info "%s sr:%s does not support vdi_activate: doing nothing" dp (Ref.string_of sr) - ) - with Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - - let deactivate context ~dbg ~dp ~sr ~vdi = - try - for_vdi ~dbg ~sr ~vdi "VDI.deactivate" - (fun device_config _type sr self -> - Server_helpers.exec_with_new_task "VDI.deactivate" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let other_config = Db.VDI.get_other_config ~__context ~self in - if not (List.mem_assoc "content_id" other_config) - then Db.VDI.add_to_other_config ~__context ~self ~key:"content_id" ~value:(Uuid.string_of_uuid (Uuid.make_uuid ()))); - (* If the backend doesn't advertise the capability then do nothing *) - if List.mem_assoc Smint.Vdi_deactivate (Sm.features_of_driver _type) - then Sm.vdi_deactivate device_config _type sr self - else info "%s sr:%s does not support vdi_deactivate: doing nothing" dp (Ref.string_of sr) - ) - with Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - - let detach context ~dbg ~dp ~sr ~vdi = - try - for_vdi ~dbg ~sr ~vdi "VDI.detach" - (fun device_config _type sr self -> - Sm.vdi_detach device_config _type sr self; - Server_helpers.exec_with_new_task "VDI.detach" ~subtask_of:(Ref.of_string dbg) (fun __context -> - let on_key = read_caching_key ~__context in - let reason_key = read_caching_reason_key ~__context in - Mutex.execute vdi_read_caching_m (fun () -> - Db.VDI.remove_from_sm_config ~__context ~self ~key:on_key; - Db.VDI.remove_from_sm_config ~__context ~self ~key:reason_key - ) - ) - ); - Mutex.execute vdi_read_write_m - (fun () -> Hashtbl.remove vdi_read_write (sr, vdi)) - with Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - - let epoch_end context ~dbg ~sr ~vdi = - try - for_vdi ~dbg ~sr ~vdi "VDI.epoch_end" - (fun device_config _type sr self -> - Sm.vdi_epoch_end device_config _type sr self) - with - | Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - - let require_uuid vdi_info = - match vdi_info.Smint.vdi_info_uuid with - | Some uuid -> uuid - | None -> failwith "SM backend failed to return field" - - let newvdi ~__context vi = - (* The current backends stash data directly in the db *) - let uuid = require_uuid vi in - vdi_info_from_db ~__context (Db.VDI.get_by_uuid ~__context ~uuid) - - let create context ~dbg ~sr ~vdi_info = - try - Server_helpers.exec_with_new_task "VDI.create" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in - let vi = - Sm.call_sm_functions ~__context ~sR:sr - (fun device_config _type -> - Sm.vdi_create device_config _type sr vdi_info.sm_config vdi_info.ty - vdi_info.virtual_size vdi_info.name_label vdi_info.name_description - vdi_info.metadata_of_pool vdi_info.is_a_snapshot - vdi_info.snapshot_time vdi_info.snapshot_of vdi_info.read_only - ) in - newvdi ~__context vi - ) - with - | Api_errors.Server_error(code, params) -> raise (Backend_error(code, params)) - | Sm.MasterOnly -> redirect sr - - (* A list of keys in sm-config that will be preserved on clone/snapshot *) - let sm_config_keys_to_preserve_on_clone = [ - "base_mirror" - ] - - let snapshot_and_clone call_name call_f is_a_snapshot context ~dbg ~sr ~vdi_info = - try - Server_helpers.exec_with_new_task call_name ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let vi = for_vdi ~dbg ~sr ~vdi:vdi_info.vdi call_name - (fun device_config _type sr self -> - call_f device_config _type vdi_info.sm_config sr self - ) in - (* PR-1255: modify clone, snapshot to take the same parameters as create? *) - let self, _ = find_vdi ~__context sr vi.Smint.vdi_info_location in - let clonee, _ = find_vdi ~__context sr vdi_info.vdi in - let content_id = - try - List.assoc "content_id" - (Db.VDI.get_other_config ~__context ~self:clonee) - with _ -> - Uuid.string_of_uuid (Uuid.make_uuid ()) - in - Db.VDI.set_name_label ~__context ~self ~value:vdi_info.name_label; - Db.VDI.set_name_description ~__context ~self ~value:vdi_info.name_description; - Db.VDI.set_snapshot_time ~__context ~self ~value:(Date.of_string vdi_info.snapshot_time); - Db.VDI.set_is_a_snapshot ~__context ~self ~value:is_a_snapshot; - Db.VDI.remove_from_other_config ~__context ~self ~key:"content_id"; - Db.VDI.add_to_other_config ~__context ~self ~key:"content_id" ~value:content_id; - debug "copying sm-config"; - List.iter (fun (key, value) -> - let preserve = List.mem key sm_config_keys_to_preserve_on_clone in - if preserve then ( - Db.VDI.remove_from_sm_config ~__context ~self ~key; - Db.VDI.add_to_sm_config ~__context ~self ~key ~value; - ) - ) vdi_info.sm_config; - for_vdi ~dbg ~sr ~vdi:vi.Smint.vdi_info_location "VDI.update" - (fun device_config _type sr self -> - Sm.vdi_update device_config _type sr self - ); - let vdi = vdi_info_from_db ~__context self in - debug "vdi = %s" (string_of_vdi_info vdi); - vdi - ) - with - | Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - | Smint.Not_implemented_in_backend -> - raise (Unimplemented call_name) - | Sm.MasterOnly -> redirect sr - - - let snapshot = snapshot_and_clone "VDI.snapshot" Sm.vdi_snapshot true - let clone = snapshot_and_clone "VDI.clone" Sm.vdi_clone false - - let set_name_label context ~dbg ~sr ~vdi ~new_name_label = - Server_helpers.exec_with_new_task "VDI.set_name_label" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let self, _ = find_vdi ~__context sr vdi in - Db.VDI.set_name_label ~__context ~self ~value:new_name_label - ) - - let set_name_description context ~dbg ~sr ~vdi ~new_name_description = - Server_helpers.exec_with_new_task "VDI.set_name_description" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let self, _ = find_vdi ~__context sr vdi in - Db.VDI.set_name_description ~__context ~self ~value:new_name_description - ) - - let resize context ~dbg ~sr ~vdi ~new_size = - try - let vi = for_vdi ~dbg ~sr ~vdi "VDI.resize" - (fun device_config _type sr self -> - Sm.vdi_resize device_config _type sr self new_size - ) in - Server_helpers.exec_with_new_task "VDI.resize" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let self, _ = find_vdi ~__context sr vi.Smint.vdi_info_location in - Db.VDI.get_virtual_size ~__context ~self - ) - with - | Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - | Smint.Not_implemented_in_backend -> - raise (Unimplemented "VDI.resize") - | Sm.MasterOnly -> redirect sr - - let destroy context ~dbg ~sr ~vdi = - try - for_vdi ~dbg ~sr ~vdi "VDI.destroy" - (fun device_config _type sr self -> - Sm.vdi_delete device_config _type sr self - ); - Mutex.execute vdi_read_write_m - (fun () -> Hashtbl.remove vdi_read_write (sr, vdi)) - with - | Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - | No_VDI -> - raise (Vdi_does_not_exist vdi) - | Sm.MasterOnly -> redirect sr - - let stat context ~dbg ~sr ~vdi = - try - Server_helpers.exec_with_new_task "VDI.stat" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - for_vdi ~dbg ~sr ~vdi "VDI.stat" - (fun device_config _type sr self -> - Sm.vdi_update device_config _type sr self; - vdi_info_of_vdi_rec __context (Db.VDI.get_record ~__context ~self) - ) - ) - with e -> - error "VDI.stat caught: %s" (Printexc.to_string e); - raise (Vdi_does_not_exist vdi) - - let introduce context ~dbg ~sr ~uuid ~sm_config ~location = - try - Server_helpers.exec_with_new_task "VDI.introduce" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in - let vi = - Sm.call_sm_functions ~__context ~sR:sr - (fun device_config sr_type -> - Sm.vdi_introduce device_config sr_type sr uuid sm_config location) in - newvdi ~__context vi - ) - with e -> - error "VDI.introduce caught: %s" (Printexc.to_string e); - raise (Vdi_does_not_exist location) - - let set_persistent context ~dbg ~sr ~vdi ~persistent = - try - Server_helpers.exec_with_new_task "VDI.set_persistent" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - if not persistent then begin - info "VDI.set_persistent: calling VDI.clone and VDI.destroy to make an empty vhd-leaf"; - let location = for_vdi ~dbg ~sr ~vdi "VDI.clone" - (fun device_config _type sr self -> - let vi = Sm.vdi_clone device_config _type [] sr self in - vi.Smint.vdi_info_location - ) in - for_vdi ~dbg ~sr ~vdi:location "VDI.destroy" - (fun device_config _type sr self -> - Sm.vdi_delete device_config _type sr self - ) - end - ) - with - | Api_errors.Server_error(code, params) -> raise (Backend_error(code, params)) - | Sm.MasterOnly -> redirect sr - - let get_by_name context ~dbg ~sr ~name = - info "VDI.get_by_name dbg:%s sr:%s name:%s" dbg sr name; - (* PR-1255: the backend should do this for us *) - Server_helpers.exec_with_new_task "VDI.get_by_name" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - (* PR-1255: the backend should do this for us *) - try - let _, vdi = find_content ~__context ~sr name in - let vi = vdi_info_of_vdi_rec __context vdi in - debug "VDI.get_by_name returning successfully"; - vi - with e -> - error "VDI.get_by_name caught: %s" (Printexc.to_string e); - raise (Vdi_does_not_exist name) - ) - - let set_content_id context ~dbg ~sr ~vdi ~content_id = - info "VDI.get_by_content dbg:%s sr:%s vdi:%s content_id:%s" dbg sr vdi content_id; - (* PR-1255: the backend should do this for us *) - Server_helpers.exec_with_new_task "VDI.set_content_id" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let vdi, _ = find_vdi ~__context sr vdi in - Db.VDI.remove_from_other_config ~__context ~self:vdi ~key:"content_id"; - Db.VDI.add_to_other_config ~__context ~self:vdi ~key:"content_id" ~value:content_id - ) - - let similar_content context ~dbg ~sr ~vdi = - info "VDI.similar_content dbg:%s sr:%s vdi:%s" dbg sr vdi; - Server_helpers.exec_with_new_task "VDI.similar_content" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - (* PR-1255: the backend should do this for us. *) - let sr_ref = Db.SR.get_by_uuid ~__context ~uuid:sr in - (* Return a nearest-first list of similar VDIs. "near" should mean - "has similar blocks" but we approximate this with distance in the tree *) - let module StringMap = Map.Make(struct type t = string let compare = compare end) in - let _vhdparent = "vhd-parent" in - let open Db_filter_types in - let all = Db.VDI.get_records_where ~__context ~expr:(Eq (Field "SR", Literal (Ref.string_of sr_ref))) in - let locations = List.fold_left - (fun acc (_, vdi_rec) -> StringMap.add vdi_rec.API.vDI_location vdi_rec acc) - StringMap.empty all in - (* Compute a map of parent location -> children locations *) - let children, parents = List.fold_left - (fun (children, parents) (vdi_r, vdi_rec) -> - if List.mem_assoc _vhdparent vdi_rec.API.vDI_sm_config then begin - let me = vdi_rec.API.vDI_location in - let parent = List.assoc _vhdparent vdi_rec.API.vDI_sm_config in - let other_children = if StringMap.mem parent children then StringMap.find parent children else [] in - (StringMap.add parent (me :: other_children) children), - (StringMap.add me parent parents) - end else (children, parents)) (StringMap.empty, StringMap.empty) all in - - let rec explore current_distance acc vdi = - (* add me *) - let acc = StringMap.add vdi current_distance acc in - (* add the parent *) - let parent = if StringMap.mem vdi parents then [ StringMap.find vdi parents ] else [] in - let children = if StringMap.mem vdi children then StringMap.find vdi children else [] in - List.fold_left - (fun acc vdi -> - if not(StringMap.mem vdi acc) - then explore (current_distance + 1) acc vdi - else acc) acc (parent @ children) in - let module IntMap = Map.Make(struct type t = int let compare = compare end) in - let invert map = - StringMap.fold - (fun vdi n acc -> - let current = if IntMap.mem n acc then IntMap.find n acc else [] in - IntMap.add n (vdi :: current) acc - ) map IntMap.empty in - let _, vdi_rec = find_vdi ~__context sr vdi in - let vdis = explore 0 StringMap.empty vdi_rec.API.vDI_location |> invert |> IntMap.bindings |> List.map snd |> List.concat in - let vdi_recs = List.map (fun l -> StringMap.find l locations) vdis in - List.map (fun x -> vdi_info_of_vdi_rec __context x) vdi_recs - ) - - let compose context ~dbg ~sr ~vdi1 ~vdi2 = - info "VDI.compose dbg:%s sr:%s vdi1:%s vdi2:%s" dbg sr vdi1 vdi2; - try - Server_helpers.exec_with_new_task "VDI.compose" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - (* This call 'operates' on vdi2 *) - let vdi1 = find_vdi ~__context sr vdi1 |> fst in - for_vdi ~dbg ~sr ~vdi:vdi2 "VDI.activate" - (fun device_config _type sr self -> - Sm.vdi_compose device_config _type sr vdi1 self - ) - ) - with - | Smint.Not_implemented_in_backend -> - raise (Unimplemented "VDI.compose") - | Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - | No_VDI -> - raise (Vdi_does_not_exist vdi1) - | Sm.MasterOnly -> redirect sr - - let add_to_sm_config context ~dbg ~sr ~vdi ~key ~value = - info "VDI.add_to_sm_config dbg:%s sr:%s vdi:%s key:%s value:%s" dbg sr vdi key value; - Server_helpers.exec_with_new_task "VDI.add_to_sm_config" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let self = find_vdi ~__context sr vdi |> fst in - Db.VDI.add_to_sm_config ~__context ~self ~key ~value) - - let remove_from_sm_config context ~dbg ~sr ~vdi ~key = - info "VDI.remove_from_sm_config dbg:%s sr:%s vdi:%s key:%s" dbg sr vdi key; - Server_helpers.exec_with_new_task "VDI.remove_from_sm_config" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let self = find_vdi ~__context sr vdi |> fst in - Db.VDI.remove_from_sm_config ~__context ~self ~key) - - let get_url context ~dbg ~sr ~vdi = - info "VDI.get_url dbg:%s sr:%s vdi:%s" dbg sr vdi; - (* XXX: PR-1255: tapdisk shouldn't hardcode xapi urls *) - (* peer_ip/session_ref/vdi_ref *) - Server_helpers.exec_with_new_task "VDI.compose" ~subtask_of:(Ref.of_string dbg) - (fun __context -> - let ip = Helpers.get_management_ip_addr ~__context |> Opt.unbox in - let rpc = Helpers.make_rpc ~__context in - let localhost = Helpers.get_localhost ~__context in - (* XXX: leaked *) - let session_ref = XenAPI.Session.slave_login rpc localhost !Xapi_globs.pool_secret in - let vdi, _ = find_vdi ~__context sr vdi in - Printf.sprintf "%s/%s/%s" ip (Ref.string_of session_ref) (Ref.string_of vdi)) - - end - - let get_by_name context ~dbg ~name = assert false - - module DATA = struct - let copy_into context ~dbg ~sr ~vdi ~url ~dest = assert false - let copy context ~dbg ~sr ~vdi ~dp ~url ~dest = assert false - module MIRROR = struct - let start context ~dbg ~sr ~vdi ~dp ~url ~dest = assert false - let stop context ~dbg ~id = assert false - let list context ~dbg = assert false - let stat context ~dbg ~id = assert false - let receive_start context ~dbg ~sr ~vdi_info ~id ~similar = assert false - let receive_finalize context ~dbg ~id = assert false - let receive_cancel context ~dbg ~id = assert false - end - end - - module Policy = struct - let get_backend_vm context ~dbg ~vm ~sr ~vdi = assert false - end - - module TASK = struct - let stat context ~dbg ~task = assert false - let destroy context ~dbg ~task = assert false - let cancel context ~dbg ~task = assert false - let list context ~dbg = assert false - end - - module UPDATES = struct - let get context ~dbg ~from ~timeout = assert false - end + (** xapi's builtin ability to call local SM plugins using the existing + protocol. The code here should only call the SM functions and encapsulate + the return or error properly. It should not perform side-effects on + the xapi database: these should be handled in the layer above so they + can be shared with other SM implementation types. + + Where this layer has to perform interface adjustments (see VDI.activate + and the read/write debacle), this highlights desirable improvements to + the backend interface. + *) + + type context = Smint.request + + let vdi_info_of_vdi_rec __context vdi_rec = + let content_id = + if List.mem_assoc "content_id" vdi_rec.API.vDI_other_config + then List.assoc "content_id" vdi_rec.API.vDI_other_config + else vdi_rec.API.vDI_location (* PR-1255 *) + in { + vdi = vdi_rec.API.vDI_location; + uuid = Some vdi_rec.API.vDI_uuid; + content_id = content_id; (* PR-1255 *) + name_label = vdi_rec.API.vDI_name_label; + name_description = vdi_rec.API.vDI_name_description; + ty = Storage_utils.string_of_vdi_type vdi_rec.API.vDI_type; + metadata_of_pool = Ref.string_of vdi_rec.API.vDI_metadata_of_pool; + is_a_snapshot = vdi_rec.API.vDI_is_a_snapshot; + snapshot_time = Date.to_string vdi_rec.API.vDI_snapshot_time; + snapshot_of = + if Db.is_valid_ref __context vdi_rec.API.vDI_snapshot_of + then Db.VDI.get_uuid ~__context ~self:vdi_rec.API.vDI_snapshot_of + else ""; + read_only = vdi_rec.API.vDI_read_only; + virtual_size = vdi_rec.API.vDI_virtual_size; + physical_utilisation = vdi_rec.API.vDI_physical_utilisation; + persistent = vdi_rec.API.vDI_on_boot = `persist; + sm_config = vdi_rec.API.vDI_sm_config; + } + + let vdi_info_from_db ~__context self = + let vdi_rec = Db.VDI.get_record ~__context ~self in + vdi_info_of_vdi_rec __context vdi_rec + + (* For SMAPIv1, is_a_snapshot, snapshot_time and snapshot_of are stored in + * xapi's database. For SMAPIv2 they should be implemented by the storage + * backend. *) + let set_is_a_snapshot context ~dbg ~sr ~vdi ~is_a_snapshot = + Server_helpers.exec_with_new_task "VDI.set_is_a_snapshot" + ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let vdi, _ = find_vdi ~__context sr vdi in + Db.VDI.set_is_a_snapshot ~__context ~self:vdi ~value:is_a_snapshot + ) + + let set_snapshot_time context ~dbg ~sr ~vdi ~snapshot_time = + Server_helpers.exec_with_new_task "VDI.set_snapshot_time" + ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let vdi, _ = find_vdi ~__context sr vdi in + let snapshot_time = Date.of_string snapshot_time in + Db.VDI.set_snapshot_time ~__context ~self:vdi ~value:snapshot_time + ) + + let set_snapshot_of context ~dbg ~sr ~vdi ~snapshot_of = + Server_helpers.exec_with_new_task "VDI.set_snapshot_of" + ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let vdi, _ = find_vdi ~__context sr vdi in + let snapshot_of, _ = find_vdi ~__context sr snapshot_of in + Db.VDI.set_snapshot_of ~__context ~self:vdi ~value:snapshot_of + ) + + module Query = struct + let query context ~dbg = { + driver = "storage_access"; + name = "SMAPIv1 adapter"; + description = "Allows legacy SMAPIv1 adapters to expose an SMAPIv2 interface"; + vendor = "XCP"; + copyright = "see the source code"; + version = "2.0"; + required_api_version = "2.0"; + features = []; + configuration = []; + required_cluster_stack = []; + } + + let diagnostics context ~dbg = + "No diagnostics are available for SMAPIv1 plugins" + end + + module DP = struct + let create context ~dbg ~id = assert false + let destroy context ~dbg ~dp = assert false + let diagnostics context () = assert false + let attach_info context ~dbg ~sr ~vdi ~dp = assert false + let stat_vdi context ~dbg ~sr ~vdi = assert false + end + + module SR = struct + include Storage_skeleton.SR + + let probe context ~dbg ~queue ~device_config ~sm_config = + let _type = + (* SMAPIv1 plugins have no namespaces, so strip off everything up to + the final dot *) + try + let i = String.rindex queue '.' in + String.sub queue (i + 1) (String.length queue -i - 1) + with Not_found -> + queue in + Server_helpers.exec_with_new_task "SR.create" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let task = Context.get_task_id __context in + Storage_interface.Raw (Sm.sr_probe (Some task,(Sm.sm_master true :: device_config)) _type sm_config) + ) + + let create context ~dbg ~sr ~name_label ~name_description ~device_config ~physical_size = + Server_helpers.exec_with_new_task "SR.create" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let subtask_of = Some (Context.get_task_id __context) in + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in + Db.SR.set_name_label ~__context ~self:sr ~value:name_label; + Db.SR.set_name_description ~__context ~self:sr ~value:name_description; + let device_config = (Sm.sm_master true) :: device_config in + Sm.call_sm_functions ~__context ~sR:sr + (fun _ _type -> + try + Sm.sr_create (subtask_of, device_config) _type sr physical_size + with + | Smint.Not_implemented_in_backend -> + error "SR.create failed SR:%s Not_implemented_in_backend" (Ref.string_of sr); + raise (Storage_interface.Backend_error(Api_errors.sr_operation_not_supported, [ Ref.string_of sr ])) + | Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + | e -> + let e' = ExnHelper.string_of_exn e in + error "SR.create failed SR:%s error:%s" (Ref.string_of sr) e'; + raise e + ) + ) + + let set_name_label context ~dbg ~sr ~new_name_label = + Server_helpers.exec_with_new_task "SR.set_name_label" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in + Db.SR.set_name_label ~__context ~self:sr ~value:new_name_label + ) + + let set_name_description context ~dbg ~sr ~new_name_description = + Server_helpers.exec_with_new_task "SR.set_name_description" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in + Db.SR.set_name_description ~__context ~self:sr ~value:new_name_description + ) + + let attach context ~dbg ~sr ~device_config = + Server_helpers.exec_with_new_task "SR.attach" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in + + (* Existing backends expect an SRMaster flag to be added + through the device-config. *) + let srmaster = Helpers.i_am_srmaster ~__context ~sr in + let device_config = (Sm.sm_master srmaster) :: device_config in + Sm.call_sm_functions ~__context ~sR:sr + (fun _ _type -> + try + Sm.sr_attach (Some (Context.get_task_id __context), device_config) _type sr + with + | Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + | e -> + let e' = ExnHelper.string_of_exn e in + error "SR.attach failed SR:%s error:%s" (Ref.string_of sr) e'; + raise e + ) + ) + let detach context ~dbg ~sr = + Server_helpers.exec_with_new_task "SR.detach" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in + + Sm.call_sm_functions ~__context ~sR:sr + (fun device_config _type -> + try + Sm.sr_detach device_config _type sr + with + | Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + | e -> + let e' = ExnHelper.string_of_exn e in + error "SR.detach failed SR:%s error:%s" (Ref.string_of sr) e'; + raise e + ) + ) + + let reset context ~dbg ~sr = assert false + + let destroy context ~dbg ~sr = + Server_helpers.exec_with_new_task "SR.destroy" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in + + Sm.call_sm_functions ~__context ~sR:sr + (fun device_config _type -> + try + Sm.sr_delete device_config _type sr + with + | Smint.Not_implemented_in_backend -> + raise (Storage_interface.Backend_error(Api_errors.sr_operation_not_supported, [ Ref.string_of sr ])) + | Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + | e -> + let e' = ExnHelper.string_of_exn e in + error "SR.detach failed SR:%s error:%s" (Ref.string_of sr) e'; + raise e + ) + ) + + let stat context ~dbg ~sr:sr' = + Server_helpers.exec_with_new_task "SR.stat" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr' in + Sm.call_sm_functions ~__context ~sR:sr + (fun device_config _type -> + try + Sm.sr_update device_config _type sr; + let r = Db.SR.get_record ~__context ~self:sr in + let name_label = r.API.sR_name_label in + let name_description = r.API.sR_name_description in + let total_space = r.API.sR_physical_size in + let free_space = Int64.sub r.API.sR_physical_size r.API.sR_physical_utilisation in + let clustered = false in + let health = Storage_interface.Healthy in + { name_label; name_description; total_space; free_space; clustered; health } + with + | Smint.Not_implemented_in_backend -> + raise (Storage_interface.Backend_error(Api_errors.sr_operation_not_supported, [ Ref.string_of sr ])) + | Api_errors.Server_error(code, params) -> + error "SR.scan failed SR:%s code=%s params=[%s]" (Ref.string_of sr) code (String.concat "; " params); + raise (Backend_error(code, params)) + | Sm.MasterOnly -> redirect sr + | e -> + let e' = ExnHelper.string_of_exn e in + error "SR.scan failed SR:%s error:%s" (Ref.string_of sr) e'; + raise e + ) + ) + + let scan context ~dbg ~sr:sr' = + Server_helpers.exec_with_new_task "SR.scan" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr' in + Sm.call_sm_functions ~__context ~sR:sr + (fun device_config _type -> + try + Sm.sr_scan device_config _type sr; + let open Db_filter_types in + let vdis = Db.VDI.get_records_where ~__context ~expr:(Eq(Field "SR", Literal (Ref.string_of sr))) |> List.map snd in + List.map (vdi_info_of_vdi_rec __context) vdis + with + | Smint.Not_implemented_in_backend -> + raise (Storage_interface.Backend_error(Api_errors.sr_operation_not_supported, [ Ref.string_of sr ])) + | Api_errors.Server_error(code, params) -> + error "SR.scan failed SR:%s code=%s params=[%s]" (Ref.string_of sr) code (String.concat "; " params); + raise (Backend_error(code, params)) + | Sm.MasterOnly -> redirect sr + | e -> + let e' = ExnHelper.string_of_exn e in + error "SR.scan failed SR:%s error:%s" (Ref.string_of sr) e'; + raise e + ) + ) + + + let list context ~dbg = assert false + + let update_snapshot_info_src context ~dbg ~sr ~vdi + ~url ~dest ~dest_vdi ~snapshot_pairs = + assert false + + let update_snapshot_info_dest context ~dbg ~sr ~vdi ~src_vdi ~snapshot_pairs = + Server_helpers.exec_with_new_task "SR.update_snapshot_info_dest" + ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let local_vdis = scan __context ~dbg ~sr in + let find_sm_vdi ~vdi ~vdi_info_list = + try List.find (fun x -> x.vdi = vdi) vdi_info_list + with Not_found -> raise (Vdi_does_not_exist vdi) + in + let assert_content_ids_match ~vdi_info1 ~vdi_info2 = + if vdi_info1.content_id <> vdi_info2.content_id + then raise (Content_ids_do_not_match (vdi_info1.vdi, vdi_info2.vdi)) + in + (* For each (local snapshot vdi, source snapshot vdi) pair: + * - Check that the content_ids are the same + * - Copy snapshot_time from the source VDI to the local VDI + * - Set the local VDI's snapshot_of to vdi + * - Set is_a_snapshot = true for the local snapshot *) + List.iter + (fun (local_snapshot, src_snapshot_info) -> + let local_snapshot_info = + find_sm_vdi ~vdi:local_snapshot ~vdi_info_list:local_vdis in + assert_content_ids_match local_snapshot_info src_snapshot_info; + set_snapshot_time __context ~dbg ~sr + ~vdi:local_snapshot + ~snapshot_time:src_snapshot_info.snapshot_time; + set_snapshot_of __context ~dbg ~sr + ~vdi:local_snapshot ~snapshot_of:vdi; + set_is_a_snapshot __context ~dbg ~sr + ~vdi:local_snapshot ~is_a_snapshot:true;) + snapshot_pairs) + end + + module VDI = struct + let for_vdi ~dbg ~sr ~vdi op_name f = + Server_helpers.exec_with_new_task op_name ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let open Db_filter_types in + let self = find_vdi ~__context sr vdi |> fst in + Sm.call_sm_vdi_functions ~__context ~vdi:self + (fun device_config _type sr -> + f device_config _type sr self + ) + ) + (* Allow us to remember whether a VDI is attached read/only or read/write. + If this is meaningful to the backend then this should be recorded there! *) + let vdi_read_write = Hashtbl.create 10 + let vdi_read_write_m = Mutex.create () + let vdi_read_caching_m = Mutex.create () + let per_host_key ~__context ~prefix = + let host_uuid = Db.Host.get_uuid ~__context ~self:(Helpers.get_localhost ~__context) in + Printf.sprintf "%s-%s" prefix host_uuid + let read_caching_key ~__context = + per_host_key ~__context ~prefix:"read-caching-enabled-on" + let read_caching_reason_key ~__context = + per_host_key ~__context ~prefix:"read-caching-reason" + + + let epoch_begin context ~dbg ~sr ~vdi ~persistent = + try + for_vdi ~dbg ~sr ~vdi "VDI.epoch_begin" + (fun device_config _type sr self -> + Sm.vdi_epoch_begin device_config _type sr self) + with + | Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + + let attach context ~dbg ~dp ~sr ~vdi ~read_write = + try + let attach_info = + for_vdi ~dbg ~sr ~vdi "VDI.attach" + (fun device_config _type sr self -> + let attach_info_v1 = Sm.vdi_attach device_config _type sr self read_write in + (* Record whether the VDI is benefiting from read caching *) + Server_helpers.exec_with_new_task "VDI.attach" ~subtask_of:(Ref.of_string dbg) (fun __context -> + let read_caching = not attach_info_v1.Smint.o_direct in + let on_key = read_caching_key ~__context in + let reason_key = read_caching_reason_key ~__context in + Mutex.execute vdi_read_caching_m (fun () -> + Db.VDI.remove_from_sm_config ~__context ~self ~key:on_key; + Db.VDI.remove_from_sm_config ~__context ~self ~key:reason_key; + Db.VDI.add_to_sm_config ~__context ~self ~key:on_key + ~value:(string_of_bool read_caching); + if not read_caching then + Db.VDI.add_to_sm_config ~__context ~self ~key:reason_key + ~value:(attach_info_v1.Smint.o_direct_reason) + ) + ); + { params = attach_info_v1.Smint.params; + o_direct = attach_info_v1.Smint.o_direct; + o_direct_reason = attach_info_v1.Smint.o_direct_reason; + xenstore_data = attach_info_v1.Smint.xenstore_data; } + ) in + Mutex.execute vdi_read_write_m + (fun () -> Hashtbl.replace vdi_read_write (sr, vdi) read_write); + attach_info + with Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + + let activate context ~dbg ~dp ~sr ~vdi = + try + let read_write = Mutex.execute vdi_read_write_m + (fun () -> + if not (Hashtbl.mem vdi_read_write (sr, vdi)) then error "VDI.activate: doesn't know if sr:%s vdi:%s is RO or RW" sr vdi; + Hashtbl.find vdi_read_write (sr, vdi)) in + for_vdi ~dbg ~sr ~vdi "VDI.activate" + (fun device_config _type sr self -> + Server_helpers.exec_with_new_task "VDI.activate" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + (if read_write + then Db.VDI.remove_from_other_config ~__context ~self ~key:"content_id")); + (* If the backend doesn't advertise the capability then do nothing *) + if List.mem_assoc Smint.Vdi_activate (Sm.features_of_driver _type) + then Sm.vdi_activate device_config _type sr self read_write + else info "%s sr:%s does not support vdi_activate: doing nothing" dp (Ref.string_of sr) + ) + with Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + + let deactivate context ~dbg ~dp ~sr ~vdi = + try + for_vdi ~dbg ~sr ~vdi "VDI.deactivate" + (fun device_config _type sr self -> + Server_helpers.exec_with_new_task "VDI.deactivate" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let other_config = Db.VDI.get_other_config ~__context ~self in + if not (List.mem_assoc "content_id" other_config) + then Db.VDI.add_to_other_config ~__context ~self ~key:"content_id" ~value:(Uuid.string_of_uuid (Uuid.make_uuid ()))); + (* If the backend doesn't advertise the capability then do nothing *) + if List.mem_assoc Smint.Vdi_deactivate (Sm.features_of_driver _type) + then Sm.vdi_deactivate device_config _type sr self + else info "%s sr:%s does not support vdi_deactivate: doing nothing" dp (Ref.string_of sr) + ) + with Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + + let detach context ~dbg ~dp ~sr ~vdi = + try + for_vdi ~dbg ~sr ~vdi "VDI.detach" + (fun device_config _type sr self -> + Sm.vdi_detach device_config _type sr self; + Server_helpers.exec_with_new_task "VDI.detach" ~subtask_of:(Ref.of_string dbg) (fun __context -> + let on_key = read_caching_key ~__context in + let reason_key = read_caching_reason_key ~__context in + Mutex.execute vdi_read_caching_m (fun () -> + Db.VDI.remove_from_sm_config ~__context ~self ~key:on_key; + Db.VDI.remove_from_sm_config ~__context ~self ~key:reason_key + ) + ) + ); + Mutex.execute vdi_read_write_m + (fun () -> Hashtbl.remove vdi_read_write (sr, vdi)) + with Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + + let epoch_end context ~dbg ~sr ~vdi = + try + for_vdi ~dbg ~sr ~vdi "VDI.epoch_end" + (fun device_config _type sr self -> + Sm.vdi_epoch_end device_config _type sr self) + with + | Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + + let require_uuid vdi_info = + match vdi_info.Smint.vdi_info_uuid with + | Some uuid -> uuid + | None -> failwith "SM backend failed to return field" + + let newvdi ~__context vi = + (* The current backends stash data directly in the db *) + let uuid = require_uuid vi in + vdi_info_from_db ~__context (Db.VDI.get_by_uuid ~__context ~uuid) + + let create context ~dbg ~sr ~vdi_info = + try + Server_helpers.exec_with_new_task "VDI.create" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in + let vi = + Sm.call_sm_functions ~__context ~sR:sr + (fun device_config _type -> + Sm.vdi_create device_config _type sr vdi_info.sm_config vdi_info.ty + vdi_info.virtual_size vdi_info.name_label vdi_info.name_description + vdi_info.metadata_of_pool vdi_info.is_a_snapshot + vdi_info.snapshot_time vdi_info.snapshot_of vdi_info.read_only + ) in + newvdi ~__context vi + ) + with + | Api_errors.Server_error(code, params) -> raise (Backend_error(code, params)) + | Sm.MasterOnly -> redirect sr + + (* A list of keys in sm-config that will be preserved on clone/snapshot *) + let sm_config_keys_to_preserve_on_clone = [ + "base_mirror" + ] + + let snapshot_and_clone call_name call_f is_a_snapshot context ~dbg ~sr ~vdi_info = + try + Server_helpers.exec_with_new_task call_name ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let vi = for_vdi ~dbg ~sr ~vdi:vdi_info.vdi call_name + (fun device_config _type sr self -> + call_f device_config _type vdi_info.sm_config sr self + ) in + (* PR-1255: modify clone, snapshot to take the same parameters as create? *) + let self, _ = find_vdi ~__context sr vi.Smint.vdi_info_location in + let clonee, _ = find_vdi ~__context sr vdi_info.vdi in + let content_id = + try + List.assoc "content_id" + (Db.VDI.get_other_config ~__context ~self:clonee) + with _ -> + Uuid.string_of_uuid (Uuid.make_uuid ()) + in + Db.VDI.set_name_label ~__context ~self ~value:vdi_info.name_label; + Db.VDI.set_name_description ~__context ~self ~value:vdi_info.name_description; + Db.VDI.set_snapshot_time ~__context ~self ~value:(Date.of_string vdi_info.snapshot_time); + Db.VDI.set_is_a_snapshot ~__context ~self ~value:is_a_snapshot; + Db.VDI.remove_from_other_config ~__context ~self ~key:"content_id"; + Db.VDI.add_to_other_config ~__context ~self ~key:"content_id" ~value:content_id; + debug "copying sm-config"; + List.iter (fun (key, value) -> + let preserve = List.mem key sm_config_keys_to_preserve_on_clone in + if preserve then ( + Db.VDI.remove_from_sm_config ~__context ~self ~key; + Db.VDI.add_to_sm_config ~__context ~self ~key ~value; + ) + ) vdi_info.sm_config; + for_vdi ~dbg ~sr ~vdi:vi.Smint.vdi_info_location "VDI.update" + (fun device_config _type sr self -> + Sm.vdi_update device_config _type sr self + ); + let vdi = vdi_info_from_db ~__context self in + debug "vdi = %s" (string_of_vdi_info vdi); + vdi + ) + with + | Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + | Smint.Not_implemented_in_backend -> + raise (Unimplemented call_name) + | Sm.MasterOnly -> redirect sr + + + let snapshot = snapshot_and_clone "VDI.snapshot" Sm.vdi_snapshot true + let clone = snapshot_and_clone "VDI.clone" Sm.vdi_clone false + + let set_name_label context ~dbg ~sr ~vdi ~new_name_label = + Server_helpers.exec_with_new_task "VDI.set_name_label" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let self, _ = find_vdi ~__context sr vdi in + Db.VDI.set_name_label ~__context ~self ~value:new_name_label + ) + + let set_name_description context ~dbg ~sr ~vdi ~new_name_description = + Server_helpers.exec_with_new_task "VDI.set_name_description" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let self, _ = find_vdi ~__context sr vdi in + Db.VDI.set_name_description ~__context ~self ~value:new_name_description + ) + + let resize context ~dbg ~sr ~vdi ~new_size = + try + let vi = for_vdi ~dbg ~sr ~vdi "VDI.resize" + (fun device_config _type sr self -> + Sm.vdi_resize device_config _type sr self new_size + ) in + Server_helpers.exec_with_new_task "VDI.resize" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let self, _ = find_vdi ~__context sr vi.Smint.vdi_info_location in + Db.VDI.get_virtual_size ~__context ~self + ) + with + | Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + | Smint.Not_implemented_in_backend -> + raise (Unimplemented "VDI.resize") + | Sm.MasterOnly -> redirect sr + + let destroy context ~dbg ~sr ~vdi = + try + for_vdi ~dbg ~sr ~vdi "VDI.destroy" + (fun device_config _type sr self -> + Sm.vdi_delete device_config _type sr self + ); + Mutex.execute vdi_read_write_m + (fun () -> Hashtbl.remove vdi_read_write (sr, vdi)) + with + | Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + | No_VDI -> + raise (Vdi_does_not_exist vdi) + | Sm.MasterOnly -> redirect sr + + let stat context ~dbg ~sr ~vdi = + try + Server_helpers.exec_with_new_task "VDI.stat" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + for_vdi ~dbg ~sr ~vdi "VDI.stat" + (fun device_config _type sr self -> + Sm.vdi_update device_config _type sr self; + vdi_info_of_vdi_rec __context (Db.VDI.get_record ~__context ~self) + ) + ) + with e -> + error "VDI.stat caught: %s" (Printexc.to_string e); + raise (Vdi_does_not_exist vdi) + + let introduce context ~dbg ~sr ~uuid ~sm_config ~location = + try + Server_helpers.exec_with_new_task "VDI.introduce" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr in + let vi = + Sm.call_sm_functions ~__context ~sR:sr + (fun device_config sr_type -> + Sm.vdi_introduce device_config sr_type sr uuid sm_config location) in + newvdi ~__context vi + ) + with e -> + error "VDI.introduce caught: %s" (Printexc.to_string e); + raise (Vdi_does_not_exist location) + + let set_persistent context ~dbg ~sr ~vdi ~persistent = + try + Server_helpers.exec_with_new_task "VDI.set_persistent" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + if not persistent then begin + info "VDI.set_persistent: calling VDI.clone and VDI.destroy to make an empty vhd-leaf"; + let location = for_vdi ~dbg ~sr ~vdi "VDI.clone" + (fun device_config _type sr self -> + let vi = Sm.vdi_clone device_config _type [] sr self in + vi.Smint.vdi_info_location + ) in + for_vdi ~dbg ~sr ~vdi:location "VDI.destroy" + (fun device_config _type sr self -> + Sm.vdi_delete device_config _type sr self + ) + end + ) + with + | Api_errors.Server_error(code, params) -> raise (Backend_error(code, params)) + | Sm.MasterOnly -> redirect sr + + let get_by_name context ~dbg ~sr ~name = + info "VDI.get_by_name dbg:%s sr:%s name:%s" dbg sr name; + (* PR-1255: the backend should do this for us *) + Server_helpers.exec_with_new_task "VDI.get_by_name" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + (* PR-1255: the backend should do this for us *) + try + let _, vdi = find_content ~__context ~sr name in + let vi = vdi_info_of_vdi_rec __context vdi in + debug "VDI.get_by_name returning successfully"; + vi + with e -> + error "VDI.get_by_name caught: %s" (Printexc.to_string e); + raise (Vdi_does_not_exist name) + ) + + let set_content_id context ~dbg ~sr ~vdi ~content_id = + info "VDI.get_by_content dbg:%s sr:%s vdi:%s content_id:%s" dbg sr vdi content_id; + (* PR-1255: the backend should do this for us *) + Server_helpers.exec_with_new_task "VDI.set_content_id" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let vdi, _ = find_vdi ~__context sr vdi in + Db.VDI.remove_from_other_config ~__context ~self:vdi ~key:"content_id"; + Db.VDI.add_to_other_config ~__context ~self:vdi ~key:"content_id" ~value:content_id + ) + + let similar_content context ~dbg ~sr ~vdi = + info "VDI.similar_content dbg:%s sr:%s vdi:%s" dbg sr vdi; + Server_helpers.exec_with_new_task "VDI.similar_content" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + (* PR-1255: the backend should do this for us. *) + let sr_ref = Db.SR.get_by_uuid ~__context ~uuid:sr in + (* Return a nearest-first list of similar VDIs. "near" should mean + "has similar blocks" but we approximate this with distance in the tree *) + let module StringMap = Map.Make(struct type t = string let compare = compare end) in + let _vhdparent = "vhd-parent" in + let open Db_filter_types in + let all = Db.VDI.get_records_where ~__context ~expr:(Eq (Field "SR", Literal (Ref.string_of sr_ref))) in + let locations = List.fold_left + (fun acc (_, vdi_rec) -> StringMap.add vdi_rec.API.vDI_location vdi_rec acc) + StringMap.empty all in + (* Compute a map of parent location -> children locations *) + let children, parents = List.fold_left + (fun (children, parents) (vdi_r, vdi_rec) -> + if List.mem_assoc _vhdparent vdi_rec.API.vDI_sm_config then begin + let me = vdi_rec.API.vDI_location in + let parent = List.assoc _vhdparent vdi_rec.API.vDI_sm_config in + let other_children = if StringMap.mem parent children then StringMap.find parent children else [] in + (StringMap.add parent (me :: other_children) children), + (StringMap.add me parent parents) + end else (children, parents)) (StringMap.empty, StringMap.empty) all in + + let rec explore current_distance acc vdi = + (* add me *) + let acc = StringMap.add vdi current_distance acc in + (* add the parent *) + let parent = if StringMap.mem vdi parents then [ StringMap.find vdi parents ] else [] in + let children = if StringMap.mem vdi children then StringMap.find vdi children else [] in + List.fold_left + (fun acc vdi -> + if not(StringMap.mem vdi acc) + then explore (current_distance + 1) acc vdi + else acc) acc (parent @ children) in + let module IntMap = Map.Make(struct type t = int let compare = compare end) in + let invert map = + StringMap.fold + (fun vdi n acc -> + let current = if IntMap.mem n acc then IntMap.find n acc else [] in + IntMap.add n (vdi :: current) acc + ) map IntMap.empty in + let _, vdi_rec = find_vdi ~__context sr vdi in + let vdis = explore 0 StringMap.empty vdi_rec.API.vDI_location |> invert |> IntMap.bindings |> List.map snd |> List.concat in + let vdi_recs = List.map (fun l -> StringMap.find l locations) vdis in + List.map (fun x -> vdi_info_of_vdi_rec __context x) vdi_recs + ) + + let compose context ~dbg ~sr ~vdi1 ~vdi2 = + info "VDI.compose dbg:%s sr:%s vdi1:%s vdi2:%s" dbg sr vdi1 vdi2; + try + Server_helpers.exec_with_new_task "VDI.compose" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + (* This call 'operates' on vdi2 *) + let vdi1 = find_vdi ~__context sr vdi1 |> fst in + for_vdi ~dbg ~sr ~vdi:vdi2 "VDI.activate" + (fun device_config _type sr self -> + Sm.vdi_compose device_config _type sr vdi1 self + ) + ) + with + | Smint.Not_implemented_in_backend -> + raise (Unimplemented "VDI.compose") + | Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + | No_VDI -> + raise (Vdi_does_not_exist vdi1) + | Sm.MasterOnly -> redirect sr + + let add_to_sm_config context ~dbg ~sr ~vdi ~key ~value = + info "VDI.add_to_sm_config dbg:%s sr:%s vdi:%s key:%s value:%s" dbg sr vdi key value; + Server_helpers.exec_with_new_task "VDI.add_to_sm_config" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let self = find_vdi ~__context sr vdi |> fst in + Db.VDI.add_to_sm_config ~__context ~self ~key ~value) + + let remove_from_sm_config context ~dbg ~sr ~vdi ~key = + info "VDI.remove_from_sm_config dbg:%s sr:%s vdi:%s key:%s" dbg sr vdi key; + Server_helpers.exec_with_new_task "VDI.remove_from_sm_config" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let self = find_vdi ~__context sr vdi |> fst in + Db.VDI.remove_from_sm_config ~__context ~self ~key) + + let get_url context ~dbg ~sr ~vdi = + info "VDI.get_url dbg:%s sr:%s vdi:%s" dbg sr vdi; + (* XXX: PR-1255: tapdisk shouldn't hardcode xapi urls *) + (* peer_ip/session_ref/vdi_ref *) + Server_helpers.exec_with_new_task "VDI.compose" ~subtask_of:(Ref.of_string dbg) + (fun __context -> + let ip = Helpers.get_management_ip_addr ~__context |> Opt.unbox in + let rpc = Helpers.make_rpc ~__context in + let localhost = Helpers.get_localhost ~__context in + (* XXX: leaked *) + let session_ref = XenAPI.Session.slave_login rpc localhost !Xapi_globs.pool_secret in + let vdi, _ = find_vdi ~__context sr vdi in + Printf.sprintf "%s/%s/%s" ip (Ref.string_of session_ref) (Ref.string_of vdi)) + + end + + let get_by_name context ~dbg ~name = assert false + + module DATA = struct + let copy_into context ~dbg ~sr ~vdi ~url ~dest = assert false + let copy context ~dbg ~sr ~vdi ~dp ~url ~dest = assert false + module MIRROR = struct + let start context ~dbg ~sr ~vdi ~dp ~url ~dest = assert false + let stop context ~dbg ~id = assert false + let list context ~dbg = assert false + let stat context ~dbg ~id = assert false + let receive_start context ~dbg ~sr ~vdi_info ~id ~similar = assert false + let receive_finalize context ~dbg ~id = assert false + let receive_cancel context ~dbg ~id = assert false + end + end + + module Policy = struct + let get_backend_vm context ~dbg ~vm ~sr ~vdi = assert false + end + + module TASK = struct + let stat context ~dbg ~task = assert false + let destroy context ~dbg ~task = assert false + let cancel context ~dbg ~task = assert false + let list context ~dbg = assert false + end + + module UPDATES = struct + let get context ~dbg ~from ~timeout = assert false + end end module type SERVER = sig - val process : Smint.request -> Rpc.call -> Rpc.response + val process : Smint.request -> Rpc.call -> Rpc.response end (* Start a set of servers for all SMAPIv1 plugins *) let start_smapiv1_servers () = - let drivers = - if Pool_role.is_master () - then Sm.supported_drivers () - else Server_helpers.exec_with_new_task "start_smapiv1_servers" (fun __context -> - let all = Db.SM.get_all_records ~__context in - let needed = List.filter (fun (_, x) -> - let version = Xapi_sm.version_of_string x.API.sM_version in - version < [ 2; 0 ] - ) all in - List.map (fun (_, x) -> x.API.sM_type) needed - ) in - List.iter (fun ty -> - let path = !Storage_interface.default_path ^ ".d/" ^ ty in - let queue_name = !Storage_interface.queue_name ^ "." ^ ty in - let module S = Storage_interface.Server(SMAPIv1) in - let s = Xcp_service.make ~path ~queue_name ~rpc_fn:(S.process None) () in - let (_: Thread.t) = Thread.create (fun () -> Xcp_service.serve_forever s) () in - () - ) drivers + let drivers = + if Pool_role.is_master () + then Sm.supported_drivers () + else Server_helpers.exec_with_new_task "start_smapiv1_servers" (fun __context -> + let all = Db.SM.get_all_records ~__context in + let needed = List.filter (fun (_, x) -> + let version = Xapi_sm.version_of_string x.API.sM_version in + version < [ 2; 0 ] + ) all in + List.map (fun (_, x) -> x.API.sM_type) needed + ) in + List.iter (fun ty -> + let path = !Storage_interface.default_path ^ ".d/" ^ ty in + let queue_name = !Storage_interface.queue_name ^ "." ^ ty in + let module S = Storage_interface.Server(SMAPIv1) in + let s = Xcp_service.make ~path ~queue_name ~rpc_fn:(S.process None) () in + let (_: Thread.t) = Thread.create (fun () -> Xcp_service.serve_forever s) () in + () + ) drivers let make_service uuid ty = - { - System_domains.uuid = uuid; - ty = Constants._SM; - instance = ty; - url = Constants.path [ Constants._services; Constants._driver; uuid; Constants._SM; ty ]; - } + { + System_domains.uuid = uuid; + ty = Constants._SM; + instance = ty; + url = Constants.path [ Constants._services; Constants._driver; uuid; Constants._SM; ty ]; + } let external_rpc queue_name uri = - fun call -> - let open Xcp_client in - if !use_switch - then json_switch_rpc queue_name call - else xml_http_rpc - ~srcstr:(get_user_agent ()) - ~dststr:queue_name - uri - call + fun call -> + let open Xcp_client in + if !use_switch + then json_switch_rpc queue_name call + else xml_http_rpc + ~srcstr:(get_user_agent ()) + ~dststr:queue_name + uri + call (* Internal exception, never escapes the module *) exception Message_switch_failure @@ -906,179 +906,179 @@ exception Message_switch_failure (** Synchronise the SM table with the SMAPIv1 plugins on the disk and the SMAPIv2 plugins mentioned in the configuration file whitelist. *) let on_xapi_start ~__context = - let existing = List.map (fun (rf, rc) -> rc.API.sM_type, (rf, rc)) (Db.SM.get_all_records ~__context) in - let explicitly_configured_drivers = List.filter_map (function `Sm x -> Some x | `All -> None) !Xapi_globs.sm_plugins in - let smapiv1_drivers = Sm.supported_drivers () in - let configured_drivers = explicitly_configured_drivers @ smapiv1_drivers in - let in_use_drivers = List.map (fun (rf, rc) -> rc.API.sR_type) (Db.SR.get_all_records ~__context) in - let to_keep = configured_drivers @ in_use_drivers in - (* Delete all records which aren't configured or in-use *) - List.iter - (fun ty -> - info "Unregistering SM plugin %s since not in the whitelist and not in-use" ty; - let self, _ = List.assoc ty existing in - try - Db.SM.destroy ~__context ~self - with _ -> () - ) (List.set_difference (List.map fst existing) to_keep); - (* Create all missing SMAPIv1 plugins *) - List.iter - (fun ty -> - let query_result = Sm.info_of_driver ty |> Smint.query_result_of_sr_driver_info in - Xapi_sm.create_from_query_result ~__context query_result - ) (List.set_difference smapiv1_drivers (List.map fst existing)); - (* Update all existing SMAPIv1 plugins *) - List.iter - (fun ty -> - let query_result = Sm.info_of_driver ty |> Smint.query_result_of_sr_driver_info in - Xapi_sm.update_from_query_result ~__context (List.assoc ty existing) query_result - ) (List.intersect smapiv1_drivers (List.map fst existing)); - let smapiv2_drivers = List.set_difference to_keep smapiv1_drivers in - (* Query the message switch to detect running SMAPIv2 plugins. *) - let running_smapiv2_drivers = - if !Xcp_client.use_switch then begin - try - let open Message_switch in - let open Protocol_unix in - let (>>|) result f = - match Client.error_to_msg result with - | `Error (`Msg x) -> - error "Error %s while querying message switch queues" x; - raise Message_switch_failure - | `Ok x -> f x - in - Client.connect ~switch:!Xcp_client.switch_path () - >>| fun t -> - Client.list ~t ~prefix:!Storage_interface.queue_name ~filter:`Alive () - >>| fun running_smapiv2_driver_queues -> - List.filter - (fun driver -> - List.exists - (Xstringext.String.endswith driver) - running_smapiv2_driver_queues) - smapiv2_drivers - with - | Message_switch_failure -> [] (* no more logging *) - | e -> - error "Unexpected error querying the message switch: %s" (Printexc.to_string e); - Debug.log_backtrace e (Backtrace.get e); - [] - end - else smapiv2_drivers - in - (* Create all missing SMAPIv2 plugins *) - let query ty = - let queue_name = !Storage_interface.queue_name ^ "." ^ ty in - let uri () = Storage_interface.uri () ^ ".d/" ^ ty in - let rpc = external_rpc queue_name uri in - let module C = Storage_interface.Client(struct let rpc = rpc end) in - let dbg = Context.string_of_task __context in - C.Query.query ~dbg in - List.iter - (fun ty -> - Xapi_sm.create_from_query_result ~__context (query ty) - ) (List.set_difference running_smapiv2_drivers (List.map fst existing)); - (* Update all existing SMAPIv2 plugins *) - List.iter - (fun ty -> - Xapi_sm.update_from_query_result ~__context (List.assoc ty existing) (query ty) - ) (List.intersect running_smapiv2_drivers (List.map fst existing)) + let existing = List.map (fun (rf, rc) -> rc.API.sM_type, (rf, rc)) (Db.SM.get_all_records ~__context) in + let explicitly_configured_drivers = List.filter_map (function `Sm x -> Some x | `All -> None) !Xapi_globs.sm_plugins in + let smapiv1_drivers = Sm.supported_drivers () in + let configured_drivers = explicitly_configured_drivers @ smapiv1_drivers in + let in_use_drivers = List.map (fun (rf, rc) -> rc.API.sR_type) (Db.SR.get_all_records ~__context) in + let to_keep = configured_drivers @ in_use_drivers in + (* Delete all records which aren't configured or in-use *) + List.iter + (fun ty -> + info "Unregistering SM plugin %s since not in the whitelist and not in-use" ty; + let self, _ = List.assoc ty existing in + try + Db.SM.destroy ~__context ~self + with _ -> () + ) (List.set_difference (List.map fst existing) to_keep); + (* Create all missing SMAPIv1 plugins *) + List.iter + (fun ty -> + let query_result = Sm.info_of_driver ty |> Smint.query_result_of_sr_driver_info in + Xapi_sm.create_from_query_result ~__context query_result + ) (List.set_difference smapiv1_drivers (List.map fst existing)); + (* Update all existing SMAPIv1 plugins *) + List.iter + (fun ty -> + let query_result = Sm.info_of_driver ty |> Smint.query_result_of_sr_driver_info in + Xapi_sm.update_from_query_result ~__context (List.assoc ty existing) query_result + ) (List.intersect smapiv1_drivers (List.map fst existing)); + let smapiv2_drivers = List.set_difference to_keep smapiv1_drivers in + (* Query the message switch to detect running SMAPIv2 plugins. *) + let running_smapiv2_drivers = + if !Xcp_client.use_switch then begin + try + let open Message_switch in + let open Protocol_unix in + let (>>|) result f = + match Client.error_to_msg result with + | `Error (`Msg x) -> + error "Error %s while querying message switch queues" x; + raise Message_switch_failure + | `Ok x -> f x + in + Client.connect ~switch:!Xcp_client.switch_path () + >>| fun t -> + Client.list ~t ~prefix:!Storage_interface.queue_name ~filter:`Alive () + >>| fun running_smapiv2_driver_queues -> + List.filter + (fun driver -> + List.exists + (Xstringext.String.endswith driver) + running_smapiv2_driver_queues) + smapiv2_drivers + with + | Message_switch_failure -> [] (* no more logging *) + | e -> + error "Unexpected error querying the message switch: %s" (Printexc.to_string e); + Debug.log_backtrace e (Backtrace.get e); + [] + end + else smapiv2_drivers + in + (* Create all missing SMAPIv2 plugins *) + let query ty = + let queue_name = !Storage_interface.queue_name ^ "." ^ ty in + let uri () = Storage_interface.uri () ^ ".d/" ^ ty in + let rpc = external_rpc queue_name uri in + let module C = Storage_interface.Client(struct let rpc = rpc end) in + let dbg = Context.string_of_task __context in + C.Query.query ~dbg in + List.iter + (fun ty -> + Xapi_sm.create_from_query_result ~__context (query ty) + ) (List.set_difference running_smapiv2_drivers (List.map fst existing)); + (* Update all existing SMAPIv2 plugins *) + List.iter + (fun ty -> + Xapi_sm.update_from_query_result ~__context (List.assoc ty existing) (query ty) + ) (List.intersect running_smapiv2_drivers (List.map fst existing)) let bind ~__context ~pbd = - (* Start the VM if necessary, record its uuid *) - let driver = System_domains.storage_driver_domain_of_pbd ~__context ~pbd in - if Db.VM.get_power_state ~__context ~self:driver = `Halted then begin - info "PBD %s driver domain %s is offline: starting" (Ref.string_of pbd) (Ref.string_of driver); - try - Helpers.call_api_functions ~__context - (fun rpc session_id -> XenAPI.VM.start rpc session_id driver false false) - with (Api_errors.Server_error(code, params)) when code = Api_errors.vm_bad_power_state -> - error "Caught VM_BAD_POWER_STATE [ %s ]" (String.concat "; " params); - (* ignore for now *) - end; - let uuid = Db.VM.get_uuid ~__context ~self:driver in - - let sr = Db.PBD.get_SR ~__context ~self:pbd in - let ty = Db.SR.get_type ~__context ~self:sr in - let sr = Db.SR.get_uuid ~__context ~self:sr in - let queue_name = !Storage_interface.queue_name ^ "." ^ ty in - let uri () = Storage_interface.uri () ^ ".d/" ^ ty in - let rpc = external_rpc queue_name uri in - let service = make_service uuid ty in - System_domains.register_service service queue_name; - let module Client = Storage_interface.Client(struct let rpc = rpc end) in - let dbg = Context.string_of_task __context in - let info = Client.Query.query ~dbg in - Storage_mux.register sr rpc uuid info; - info + (* Start the VM if necessary, record its uuid *) + let driver = System_domains.storage_driver_domain_of_pbd ~__context ~pbd in + if Db.VM.get_power_state ~__context ~self:driver = `Halted then begin + info "PBD %s driver domain %s is offline: starting" (Ref.string_of pbd) (Ref.string_of driver); + try + Helpers.call_api_functions ~__context + (fun rpc session_id -> XenAPI.VM.start rpc session_id driver false false) + with (Api_errors.Server_error(code, params)) when code = Api_errors.vm_bad_power_state -> + error "Caught VM_BAD_POWER_STATE [ %s ]" (String.concat "; " params); + (* ignore for now *) + end; + let uuid = Db.VM.get_uuid ~__context ~self:driver in + + let sr = Db.PBD.get_SR ~__context ~self:pbd in + let ty = Db.SR.get_type ~__context ~self:sr in + let sr = Db.SR.get_uuid ~__context ~self:sr in + let queue_name = !Storage_interface.queue_name ^ "." ^ ty in + let uri () = Storage_interface.uri () ^ ".d/" ^ ty in + let rpc = external_rpc queue_name uri in + let service = make_service uuid ty in + System_domains.register_service service queue_name; + let module Client = Storage_interface.Client(struct let rpc = rpc end) in + let dbg = Context.string_of_task __context in + let info = Client.Query.query ~dbg in + Storage_mux.register sr rpc uuid info; + info let unbind ~__context ~pbd = - let driver = System_domains.storage_driver_domain_of_pbd ~__context ~pbd in - let uuid = Db.VM.get_uuid ~__context ~self:driver in + let driver = System_domains.storage_driver_domain_of_pbd ~__context ~pbd in + let uuid = Db.VM.get_uuid ~__context ~self:driver in - let sr = Db.PBD.get_SR ~__context ~self:pbd in - let ty = Db.SR.get_type ~__context ~self:sr in + let sr = Db.PBD.get_SR ~__context ~self:pbd in + let ty = Db.SR.get_type ~__context ~self:sr in - let sr = Db.SR.get_uuid ~__context ~self:sr in - info "SR %s will nolonger be implemented by VM %s" sr (Ref.string_of driver); - Storage_mux.unregister sr; + let sr = Db.SR.get_uuid ~__context ~self:sr in + info "SR %s will nolonger be implemented by VM %s" sr (Ref.string_of driver); + Storage_mux.unregister sr; - let service = make_service uuid ty in - System_domains.unregister_service service + let service = make_service uuid ty in + System_domains.unregister_service service let rpc call = Storage_mux.Server.process None call module Client = Client(struct let rpc = rpc end) let print_delta d = - debug "Received update: %s" (Jsonrpc.to_string (Storage_interface.Dynamic.rpc_of_id d)) + debug "Received update: %s" (Jsonrpc.to_string (Storage_interface.Dynamic.rpc_of_id d)) let event_wait dbg p = - let finished = ref false in - let event_id = ref "" in - while not !finished do - debug "Calling UPDATES.get %s %s 30" dbg !event_id; - let deltas, next_id = Client.UPDATES.get dbg !event_id (Some 30) in - List.iter (fun d -> print_delta d) deltas; - event_id := next_id; - List.iter (fun d -> if p d then finished := true) deltas; - done + let finished = ref false in + let event_id = ref "" in + while not !finished do + debug "Calling UPDATES.get %s %s 30" dbg !event_id; + let deltas, next_id = Client.UPDATES.get dbg !event_id (Some 30) in + List.iter (fun d -> print_delta d) deltas; + event_id := next_id; + List.iter (fun d -> if p d then finished := true) deltas; + done let task_ended dbg id = - match (Client.TASK.stat dbg id).Task.state with - | Task.Completed _ - | Task.Failed _ -> true - | Task.Pending _ -> false + match (Client.TASK.stat dbg id).Task.state with + | Task.Completed _ + | Task.Failed _ -> true + | Task.Pending _ -> false let success_task dbg id = - let t = Client.TASK.stat dbg id in - Client.TASK.destroy dbg id; - match t.Task.state with - | Task.Completed _ -> t - | Task.Failed x -> raise (exn_of_exnty (Exception.exnty_of_rpc x)) - | Task.Pending _ -> failwith "task pending" + let t = Client.TASK.stat dbg id in + Client.TASK.destroy dbg id; + match t.Task.state with + | Task.Completed _ -> t + | Task.Failed x -> raise (exn_of_exnty (Exception.exnty_of_rpc x)) + | Task.Pending _ -> failwith "task pending" let wait_for_task dbg id = - debug "Waiting for task id=%s to finish" id; - let finished = function - | Dynamic.Task id' -> - id = id' && (task_ended dbg id) - | _ -> - false in - event_wait dbg finished; - id + debug "Waiting for task id=%s to finish" id; + let finished = function + | Dynamic.Task id' -> + id = id' && (task_ended dbg id) + | _ -> + false in + event_wait dbg finished; + id let vdi_of_task dbg t = - match t.Task.state with - | Task.Completed { Task.result = Some Vdi_info v } -> v - | Task.Completed _ -> failwith "Runtime type error in vdi_of_task" - | _ -> failwith "Task not completed" + match t.Task.state with + | Task.Completed { Task.result = Some Vdi_info v } -> v + | Task.Completed _ -> failwith "Runtime type error in vdi_of_task" + | _ -> failwith "Task not completed" let mirror_of_task dbg t = - match t.Task.state with - | Task.Completed { Task.result = Some Mirror_id i } -> i - | Task.Completed _ -> failwith "Runtime type error in mirror_of_task" - | _ -> failwith "Task not complete" + match t.Task.state with + | Task.Completed { Task.result = Some Mirror_id i } -> i + | Task.Completed _ -> failwith "Runtime type error in mirror_of_task" + | _ -> failwith "Task not complete" let progress_map_tbl = Hashtbl.create 10 let mirror_task_tbl = Hashtbl.create 10 @@ -1088,10 +1088,10 @@ let add_to_progress_map f id = Mutex.execute progress_map_m (fun () -> Hashtbl.a let remove_from_progress_map id = Mutex.execute progress_map_m (fun () -> Hashtbl.remove progress_map_tbl id); id let get_progress_map id = Mutex.execute progress_map_m (fun () -> try Hashtbl.find progress_map_tbl id with _ -> (fun x -> x)) -let register_mirror __context vdi = - let task = Context.get_task_id __context in - debug "Registering mirror of vdi %s with task %s" vdi (Ref.string_of task); - Mutex.execute progress_map_m (fun () -> Hashtbl.add mirror_task_tbl vdi task); vdi +let register_mirror __context vdi = + let task = Context.get_task_id __context in + debug "Registering mirror of vdi %s with task %s" vdi (Ref.string_of task); + Mutex.execute progress_map_m (fun () -> Hashtbl.add mirror_task_tbl vdi task); vdi let unregister_mirror vdi = Mutex.execute progress_map_m (fun () -> Hashtbl.remove mirror_task_tbl vdi); vdi let get_mirror_task vdi = Mutex.execute progress_map_m (fun () -> Hashtbl.find mirror_task_tbl vdi) @@ -1102,218 +1102,218 @@ let register_task __context id = TaskHelper.register_task __context (wrap id); i let unregister_task __context id = TaskHelper.unregister_task __context (wrap id); id let update_task ~__context id = - try - let self = TaskHelper.id_to_task_exn (TaskHelper.Sm id) in (* throws Not_found *) - let dbg = Context.string_of_task __context in - let task_t = Client.TASK.stat dbg id in - let map = get_progress_map id in - match task_t.Task.state with - | Task.Pending x -> - Db.Task.set_progress ~__context ~self ~value:(map x) - | _ -> () - with Not_found -> - (* Since this is called on all tasks, possibly after the task has been - destroyed, it's safe to ignore a Not_found exception here. *) - () - | e -> - error "storage event: Caught %s while updating task" (Printexc.to_string e) + try + let self = TaskHelper.id_to_task_exn (TaskHelper.Sm id) in (* throws Not_found *) + let dbg = Context.string_of_task __context in + let task_t = Client.TASK.stat dbg id in + let map = get_progress_map id in + match task_t.Task.state with + | Task.Pending x -> + Db.Task.set_progress ~__context ~self ~value:(map x) + | _ -> () + with Not_found -> + (* Since this is called on all tasks, possibly after the task has been + destroyed, it's safe to ignore a Not_found exception here. *) + () + | e -> + error "storage event: Caught %s while updating task" (Printexc.to_string e) let update_mirror ~__context id = - try - let dbg = Context.string_of_task __context in - let m = Client.DATA.MIRROR.stat dbg id in - if m.Mirror.failed - then - debug "Mirror %s has failed" id; - let task = get_mirror_task m.Mirror.source_vdi in - debug "Mirror associated with task: %s" (Ref.string_of task); - (* Just to get a nice error message *) - Db.Task.remove_from_other_config ~__context ~self:task ~key:"mirror_failed"; - Db.Task.add_to_other_config ~__context ~self:task ~key:"mirror_failed" ~value:m.Mirror.source_vdi; - Helpers.call_api_functions ~__context - (fun rpc session_id -> XenAPI.Task.cancel rpc session_id task) - with - | Not_found -> - debug "Couldn't find mirror id: %s" id - | Does_not_exist _ -> () - | e -> - error "storage event: Caught %s while updating mirror" (Printexc.to_string e) - + try + let dbg = Context.string_of_task __context in + let m = Client.DATA.MIRROR.stat dbg id in + if m.Mirror.failed + then + debug "Mirror %s has failed" id; + let task = get_mirror_task m.Mirror.source_vdi in + debug "Mirror associated with task: %s" (Ref.string_of task); + (* Just to get a nice error message *) + Db.Task.remove_from_other_config ~__context ~self:task ~key:"mirror_failed"; + Db.Task.add_to_other_config ~__context ~self:task ~key:"mirror_failed" ~value:m.Mirror.source_vdi; + Helpers.call_api_functions ~__context + (fun rpc session_id -> XenAPI.Task.cancel rpc session_id task) + with + | Not_found -> + debug "Couldn't find mirror id: %s" id + | Does_not_exist _ -> () + | e -> + error "storage event: Caught %s while updating mirror" (Printexc.to_string e) + let rec events_watch ~__context from = - let dbg = Context.string_of_task __context in - let events, next = Client.UPDATES.get dbg from None in - let open Dynamic in - List.iter - (function - | Task id -> - debug "sm event on Task %s" id; - update_task ~__context id - | Vdi vdi -> - debug "sm event on VDI %s: ignoring" vdi - | Dp dp -> - debug "sm event on DP %s: ignoring" dp - | Mirror id -> - debug "sm event on mirror: %s" id; - update_mirror ~__context id - ) events; - events_watch ~__context next + let dbg = Context.string_of_task __context in + let events, next = Client.UPDATES.get dbg from None in + let open Dynamic in + List.iter + (function + | Task id -> + debug "sm event on Task %s" id; + update_task ~__context id + | Vdi vdi -> + debug "sm event on VDI %s: ignoring" vdi + | Dp dp -> + debug "sm event on DP %s: ignoring" dp + | Mirror id -> + debug "sm event on mirror: %s" id; + update_mirror ~__context id + ) events; + events_watch ~__context next let events_from_sm () = - ignore(Thread.create (fun () -> - Server_helpers.exec_with_new_task "sm_events" - (fun __context -> - while true do - try - events_watch ~__context ""; - with e -> - error "event thread caught: %s" (Printexc.to_string e); - Thread.delay 10. - done - )) ()) + ignore(Thread.create (fun () -> + Server_helpers.exec_with_new_task "sm_events" + (fun __context -> + while true do + try + events_watch ~__context ""; + with e -> + error "event thread caught: %s" (Printexc.to_string e); + Thread.delay 10. + done + )) ()) let start () = - let open Storage_impl.Local_domain_socket in - let s = Xcp_service.make ~path:Xapi_globs.storage_unix_domain_socket ~queue_name:"org.xen.xapi.storage" ~rpc_fn:(Storage_mux.Server.process None) () in - info "Started service on org.xen.xapi.storage"; - let (_: Thread.t) = Thread.create (fun () -> Xcp_service.serve_forever s) () in - () + let open Storage_impl.Local_domain_socket in + let s = Xcp_service.make ~path:Xapi_globs.storage_unix_domain_socket ~queue_name:"org.xen.xapi.storage" ~rpc_fn:(Storage_mux.Server.process None) () in + info "Started service on org.xen.xapi.storage"; + let (_: Thread.t) = Thread.create (fun () -> Xcp_service.serve_forever s) () in + () (** [datapath_of_vbd domid userdevice] returns the name of the datapath which corresponds to device [userdevice] on domain [domid] *) let datapath_of_vbd ~domid ~device = - Printf.sprintf "vbd/%d/%s" domid device + Printf.sprintf "vbd/%d/%s" domid device let presentative_datapath_of_vbd ~__context ~vm ~vdi = - try - let vbds = Db.VDI.get_VBDs ~__context ~self:vdi in - let vbd = List.find (fun self -> Db.VBD.get_VM ~__context ~self = vm) vbds in - let domid = Int64.to_int (Db.VM.get_domid ~__context ~self:vm) in - let device = Db.VBD.get_device ~__context ~self:vbd in - if domid < 0 || device = "" then raise Not_found; - datapath_of_vbd ~domid ~device - with Not_found -> - let vm_uuid = Db.VM.get_uuid ~__context ~self:vm in - let vdi_uuid = Db.VDI.get_uuid ~__context ~self:vdi in - Printf.sprintf "vbd/%s/%s" vm_uuid vdi_uuid + try + let vbds = Db.VDI.get_VBDs ~__context ~self:vdi in + let vbd = List.find (fun self -> Db.VBD.get_VM ~__context ~self = vm) vbds in + let domid = Int64.to_int (Db.VM.get_domid ~__context ~self:vm) in + let device = Db.VBD.get_device ~__context ~self:vbd in + if domid < 0 || device = "" then raise Not_found; + datapath_of_vbd ~domid ~device + with Not_found -> + let vm_uuid = Db.VM.get_uuid ~__context ~self:vm in + let vdi_uuid = Db.VDI.get_uuid ~__context ~self:vdi in + Printf.sprintf "vbd/%s/%s" vm_uuid vdi_uuid let of_vbd ~__context ~vbd ~domid = - let vdi = Db.VBD.get_VDI ~__context ~self:vbd in - let location = Db.VDI.get_location ~__context ~self:vdi in - let sr = Db.VDI.get_SR ~__context ~self:vdi in - let userdevice = Db.VBD.get_userdevice ~__context ~self:vbd in - let hvm = Helpers.will_boot_hvm ~__context ~self:(Db.VBD.get_VM ~__context ~self:vbd) in - let dbg = Context.get_task_id __context in - let device_number = Device_number.of_string hvm userdevice in - let device = Device_number.to_linux_device device_number in - let dp = datapath_of_vbd ~domid ~device in - rpc, (Ref.string_of dbg), dp, (Db.SR.get_uuid ~__context ~self:sr), location + let vdi = Db.VBD.get_VDI ~__context ~self:vbd in + let location = Db.VDI.get_location ~__context ~self:vdi in + let sr = Db.VDI.get_SR ~__context ~self:vdi in + let userdevice = Db.VBD.get_userdevice ~__context ~self:vbd in + let hvm = Helpers.will_boot_hvm ~__context ~self:(Db.VBD.get_VM ~__context ~self:vbd) in + let dbg = Context.get_task_id __context in + let device_number = Device_number.of_string hvm userdevice in + let device = Device_number.to_linux_device device_number in + let dp = datapath_of_vbd ~domid ~device in + rpc, (Ref.string_of dbg), dp, (Db.SR.get_uuid ~__context ~self:sr), location (** [is_attached __context vbd] returns true if the [vbd] has an attached or activated datapath. *) let is_attached ~__context ~vbd ~domid = - transform_storage_exn - (fun () -> - let rpc, dbg, dp, sr, vdi = of_vbd ~__context ~vbd ~domid in - let open Vdi_automaton in - let module C = Storage_interface.Client(struct let rpc = rpc end) in - try - let x = C.DP.stat_vdi ~dbg ~sr ~vdi () in - x.superstate <> Detached - with - | e -> error "Unable to query state of VDI: %s, %s" vdi (Printexc.to_string e); false - ) + transform_storage_exn + (fun () -> + let rpc, dbg, dp, sr, vdi = of_vbd ~__context ~vbd ~domid in + let open Vdi_automaton in + let module C = Storage_interface.Client(struct let rpc = rpc end) in + try + let x = C.DP.stat_vdi ~dbg ~sr ~vdi () in + x.superstate <> Detached + with + | e -> error "Unable to query state of VDI: %s, %s" vdi (Printexc.to_string e); false + ) (** [on_vdi __context vbd domid f] calls [f rpc dp sr vdi] which is useful for executing Storage_interface.Client.VDI functions *) let on_vdi ~__context ~vbd ~domid f = - let rpc, dbg, dp, sr, vdi = of_vbd ~__context ~vbd ~domid in - let module C = Storage_interface.Client(struct let rpc = rpc end) in - let dp = C.DP.create dbg dp in - transform_storage_exn - (fun () -> - f rpc dbg dp sr vdi - ) + let rpc, dbg, dp, sr, vdi = of_vbd ~__context ~vbd ~domid in + let module C = Storage_interface.Client(struct let rpc = rpc end) in + let dp = C.DP.create dbg dp in + transform_storage_exn + (fun () -> + f rpc dbg dp sr vdi + ) let reset ~__context ~vm = - let dbg = Context.get_task_id __context in - transform_storage_exn - (fun () -> - Opt.iter - (fun pbd -> - let sr = Db.SR.get_uuid ~__context ~self:(Db.PBD.get_SR ~__context ~self:pbd) in - info "Resetting all state associated with SR: %s" sr; - Client.SR.reset (Ref.string_of dbg) sr; - Db.PBD.set_currently_attached ~__context ~self:pbd ~value:false; - ) (System_domains.pbd_of_vm ~__context ~vm) - ) + let dbg = Context.get_task_id __context in + transform_storage_exn + (fun () -> + Opt.iter + (fun pbd -> + let sr = Db.SR.get_uuid ~__context ~self:(Db.PBD.get_SR ~__context ~self:pbd) in + info "Resetting all state associated with SR: %s" sr; + Client.SR.reset (Ref.string_of dbg) sr; + Db.PBD.set_currently_attached ~__context ~self:pbd ~value:false; + ) (System_domains.pbd_of_vm ~__context ~vm) + ) (** [attach_and_activate __context vbd domid f] calls [f attach_info] where [attach_info] is the result of attaching a VDI which is also activated. This should be used everywhere except the migrate code, where we want fine-grained control of the ordering of attach/activate/deactivate/detach *) let attach_and_activate ~__context ~vbd ~domid ~hvm f = - transform_storage_exn - (fun () -> - let read_write = Db.VBD.get_mode ~__context ~self:vbd = `RW in - on_vdi ~__context ~vbd ~domid - (fun rpc dbg dp sr vdi -> - let module C = Storage_interface.Client(struct let rpc = rpc end) in - let attach_info = C.VDI.attach dbg dp sr vdi read_write in - C.VDI.activate dbg dp sr vdi; - f attach_info - ) - ) + transform_storage_exn + (fun () -> + let read_write = Db.VBD.get_mode ~__context ~self:vbd = `RW in + on_vdi ~__context ~vbd ~domid + (fun rpc dbg dp sr vdi -> + let module C = Storage_interface.Client(struct let rpc = rpc end) in + let attach_info = C.VDI.attach dbg dp sr vdi read_write in + C.VDI.activate dbg dp sr vdi; + f attach_info + ) + ) (** [deactivate_and_detach __context vbd domid] idempotent function which ensures that any attached or activated VDI gets properly deactivated and detached. *) let deactivate_and_detach ~__context ~vbd ~domid = - transform_storage_exn - (fun () -> - (* It suffices to destroy the datapath: any attached or activated VDIs will be - automatically detached and deactivated. *) - on_vdi ~__context ~vbd ~domid - (fun rpc dbg dp sr vdi -> - let module C = Storage_interface.Client(struct let rpc = rpc end) in - C.DP.destroy dbg dp false - ) - ) + transform_storage_exn + (fun () -> + (* It suffices to destroy the datapath: any attached or activated VDIs will be + automatically detached and deactivated. *) + on_vdi ~__context ~vbd ~domid + (fun rpc dbg dp sr vdi -> + let module C = Storage_interface.Client(struct let rpc = rpc end) in + C.DP.destroy dbg dp false + ) + ) let diagnostics ~__context = - let dbg = Context.get_task_id __context |> Ref.string_of in - String.concat "\n" [ - "DataPath information:"; - Client.DP.diagnostics (); - "Backend information:"; - Client.Query.diagnostics dbg - ] + let dbg = Context.get_task_id __context |> Ref.string_of in + String.concat "\n" [ + "DataPath information:"; + Client.DP.diagnostics (); + "Backend information:"; + Client.Query.diagnostics dbg + ] let dp_destroy ~__context dp allow_leak = - transform_storage_exn - (fun () -> - let dbg = Context.get_task_id __context in - Client.DP.destroy (Ref.string_of dbg) dp allow_leak - ) + transform_storage_exn + (fun () -> + let dbg = Context.get_task_id __context in + Client.DP.destroy (Ref.string_of dbg) dp allow_leak + ) (* Set my PBD.currently_attached fields in the Pool database to match the local one *) let resynchronise_pbds ~__context ~pbds = - let dbg = Context.get_task_id __context in - let srs = Client.SR.list (Ref.string_of dbg) in - debug "Currently-attached SRs: [ %s ]" (String.concat "; " srs); - List.iter - (fun self -> - let sr = Db.SR.get_uuid ~__context ~self:(Db.PBD.get_SR ~__context ~self) in - let value = List.mem sr srs in - debug "Setting PBD %s currently_attached <- %b" (Ref.string_of self) value; - try - if value then (let (_:query_result) = bind ~__context ~pbd:self in ()); - Db.PBD.set_currently_attached ~__context ~self ~value - with e -> - (* Unchecked this will block the dbsync code *) - error "Service implementing SR %s has failed. Performing emergency reset of SR state" sr; - Client.SR.reset (Ref.string_of dbg) sr; - Db.PBD.set_currently_attached ~__context ~self ~value:false; - ) pbds + let dbg = Context.get_task_id __context in + let srs = Client.SR.list (Ref.string_of dbg) in + debug "Currently-attached SRs: [ %s ]" (String.concat "; " srs); + List.iter + (fun self -> + let sr = Db.SR.get_uuid ~__context ~self:(Db.PBD.get_SR ~__context ~self) in + let value = List.mem sr srs in + debug "Setting PBD %s currently_attached <- %b" (Ref.string_of self) value; + try + if value then (let (_:query_result) = bind ~__context ~pbd:self in ()); + Db.PBD.set_currently_attached ~__context ~self ~value + with e -> + (* Unchecked this will block the dbsync code *) + error "Service implementing SR %s has failed. Performing emergency reset of SR state" sr; + Client.SR.reset (Ref.string_of dbg) sr; + Db.PBD.set_currently_attached ~__context ~self ~value:false; + ) pbds (* -------------------------------------------------------------------------------- *) (* The following functions are symptoms of a broken interface with the SM layer. @@ -1328,141 +1328,141 @@ let resynchronise_pbds ~__context ~pbds = and must be synchronised against the state of the world. Therefore we must synchronise the xapi view with the storage_impl view here. *) let refresh_local_vdi_activations ~__context = - let all_vdi_recs = Db.VDI.get_all_records ~__context in - let localhost = Helpers.get_localhost ~__context in - let all_hosts = Db.Host.get_all ~__context in - - let key host = Printf.sprintf "host_%s" (Ref.string_of host) in - let hosts_of vdi_t = - let prefix = "host_" in - let ks = List.map fst vdi_t.API.vDI_sm_config in - let ks = List.filter (String.startswith prefix) ks in - let ks = List.map (fun k -> String.sub k (String.length prefix) (String.length k - (String.length prefix))) ks in - List.map Ref.of_string ks in - - (* If this VDI is currently locked to this host, remove the lock. - If this VDI is currently locked to a non-existent host (note host references - change across pool join), remove the lock. *) - let unlock_vdi (vdi_ref, vdi_rec) = - (* VDI is already unlocked is the common case: avoid eggregious logspam *) - let hosts = hosts_of vdi_rec in - let i_locked_it = List.mem localhost hosts in - let all = List.fold_left (&&) true in - let someone_leaked_it = all (List.map (fun h -> not(List.mem h hosts)) all_hosts) in - if i_locked_it || someone_leaked_it then begin - info "Unlocking VDI %s (because %s)" (Ref.string_of vdi_ref) - (if i_locked_it then "I locked it and then restarted" else "it was leaked (pool join?)"); - try - List.iter (fun h -> Db.VDI.remove_from_sm_config ~__context ~self:vdi_ref ~key:(key h)) hosts - with e -> - error "Failed to unlock VDI %s: %s" (Ref.string_of vdi_ref) (ExnHelper.string_of_exn e) - end in - let open Vdi_automaton in - (* Lock this VDI to this host *) - let lock_vdi (vdi_ref, vdi_rec) ro_rw = - info "Locking VDI %s" (Ref.string_of vdi_ref); - if not(List.mem_assoc (key localhost) vdi_rec.API.vDI_sm_config) then begin - try - Db.VDI.add_to_sm_config ~__context ~self:vdi_ref ~key:(key localhost) ~value:(string_of_ro_rw ro_rw) - with e -> - error "Failed to lock VDI %s: %s" (Ref.string_of vdi_ref) (ExnHelper.string_of_exn e) - end in - let remember key ro_rw = - (* The module above contains a hashtable of R/O vs R/W-ness *) - Mutex.execute SMAPIv1.VDI.vdi_read_write_m - (fun () -> Hashtbl.replace SMAPIv1.VDI.vdi_read_write key (ro_rw = RW)) in - - let dbg = Ref.string_of (Context.get_task_id __context) in - let srs = Client.SR.list dbg in - let sr_uuids = List.map (fun sr -> (sr, Db.SR.get_uuid ~__context ~self:sr)) (Db.SR.get_all ~__context) in - List.iter - (fun (vdi_ref, vdi_rec) -> - let sr = List.assoc vdi_rec.API.vDI_SR sr_uuids in - let vdi = vdi_rec.API.vDI_location in - if List.mem sr srs - then - try - let x = Client.DP.stat_vdi ~dbg ~sr ~vdi () in - match x.superstate with - | Activated RO -> - lock_vdi (vdi_ref, vdi_rec) RO; - remember (sr, vdi) RO - | Activated RW -> - lock_vdi (vdi_ref, vdi_rec) RW; - remember (sr, vdi) RW - | Attached RO -> - unlock_vdi (vdi_ref, vdi_rec); - remember (sr, vdi) RO - | Attached RW -> - unlock_vdi (vdi_ref, vdi_rec); - remember (sr, vdi) RW - | Detached -> - unlock_vdi (vdi_ref, vdi_rec) - with - | e -> error "Unable to query state of VDI: %s, %s" vdi (Printexc.to_string e) - else unlock_vdi (vdi_ref, vdi_rec) - ) all_vdi_recs + let all_vdi_recs = Db.VDI.get_all_records ~__context in + let localhost = Helpers.get_localhost ~__context in + let all_hosts = Db.Host.get_all ~__context in + + let key host = Printf.sprintf "host_%s" (Ref.string_of host) in + let hosts_of vdi_t = + let prefix = "host_" in + let ks = List.map fst vdi_t.API.vDI_sm_config in + let ks = List.filter (String.startswith prefix) ks in + let ks = List.map (fun k -> String.sub k (String.length prefix) (String.length k - (String.length prefix))) ks in + List.map Ref.of_string ks in + + (* If this VDI is currently locked to this host, remove the lock. + If this VDI is currently locked to a non-existent host (note host references + change across pool join), remove the lock. *) + let unlock_vdi (vdi_ref, vdi_rec) = + (* VDI is already unlocked is the common case: avoid eggregious logspam *) + let hosts = hosts_of vdi_rec in + let i_locked_it = List.mem localhost hosts in + let all = List.fold_left (&&) true in + let someone_leaked_it = all (List.map (fun h -> not(List.mem h hosts)) all_hosts) in + if i_locked_it || someone_leaked_it then begin + info "Unlocking VDI %s (because %s)" (Ref.string_of vdi_ref) + (if i_locked_it then "I locked it and then restarted" else "it was leaked (pool join?)"); + try + List.iter (fun h -> Db.VDI.remove_from_sm_config ~__context ~self:vdi_ref ~key:(key h)) hosts + with e -> + error "Failed to unlock VDI %s: %s" (Ref.string_of vdi_ref) (ExnHelper.string_of_exn e) + end in + let open Vdi_automaton in + (* Lock this VDI to this host *) + let lock_vdi (vdi_ref, vdi_rec) ro_rw = + info "Locking VDI %s" (Ref.string_of vdi_ref); + if not(List.mem_assoc (key localhost) vdi_rec.API.vDI_sm_config) then begin + try + Db.VDI.add_to_sm_config ~__context ~self:vdi_ref ~key:(key localhost) ~value:(string_of_ro_rw ro_rw) + with e -> + error "Failed to lock VDI %s: %s" (Ref.string_of vdi_ref) (ExnHelper.string_of_exn e) + end in + let remember key ro_rw = + (* The module above contains a hashtable of R/O vs R/W-ness *) + Mutex.execute SMAPIv1.VDI.vdi_read_write_m + (fun () -> Hashtbl.replace SMAPIv1.VDI.vdi_read_write key (ro_rw = RW)) in + + let dbg = Ref.string_of (Context.get_task_id __context) in + let srs = Client.SR.list dbg in + let sr_uuids = List.map (fun sr -> (sr, Db.SR.get_uuid ~__context ~self:sr)) (Db.SR.get_all ~__context) in + List.iter + (fun (vdi_ref, vdi_rec) -> + let sr = List.assoc vdi_rec.API.vDI_SR sr_uuids in + let vdi = vdi_rec.API.vDI_location in + if List.mem sr srs + then + try + let x = Client.DP.stat_vdi ~dbg ~sr ~vdi () in + match x.superstate with + | Activated RO -> + lock_vdi (vdi_ref, vdi_rec) RO; + remember (sr, vdi) RO + | Activated RW -> + lock_vdi (vdi_ref, vdi_rec) RW; + remember (sr, vdi) RW + | Attached RO -> + unlock_vdi (vdi_ref, vdi_rec); + remember (sr, vdi) RO + | Attached RW -> + unlock_vdi (vdi_ref, vdi_rec); + remember (sr, vdi) RW + | Detached -> + unlock_vdi (vdi_ref, vdi_rec) + with + | e -> error "Unable to query state of VDI: %s, %s" vdi (Printexc.to_string e) + else unlock_vdi (vdi_ref, vdi_rec) + ) all_vdi_recs (* This is a symptom of the ordering-sensitivity of the SM backend: it is not possible to upgrade RO -> RW or downgrade RW -> RO on the fly. One possible fix is to always attach RW and enforce read/only-ness at the VBD-level. However we would need to fix the LVHD "attach provisioning mode". *) -let vbd_attach_order ~__context vbds = - (* return RW devices first since the storage layer can't upgrade a - 'RO attach' into a 'RW attach' *) - let rw, ro = List.partition (fun self -> Db.VBD.get_mode ~__context ~self = `RW) vbds in - rw @ ro +let vbd_attach_order ~__context vbds = + (* return RW devices first since the storage layer can't upgrade a + 'RO attach' into a 'RW attach' *) + let rw, ro = List.partition (fun self -> Db.VBD.get_mode ~__context ~self = `RW) vbds in + rw @ ro let vbd_detach_order ~__context vbds = List.rev (vbd_attach_order ~__context vbds) let create_sr ~__context ~sr ~name_label ~name_description ~physical_size = - transform_storage_exn - (fun () -> - let pbd, pbd_t = Sm.get_my_pbd_for_sr __context sr in - let (_ : query_result) = bind ~__context ~pbd in - let dbg = Ref.string_of (Context.get_task_id __context) in - Client.SR.create dbg (Db.SR.get_uuid ~__context ~self:sr) name_label name_description pbd_t.API.pBD_device_config physical_size; - unbind ~__context ~pbd - ) + transform_storage_exn + (fun () -> + let pbd, pbd_t = Sm.get_my_pbd_for_sr __context sr in + let (_ : query_result) = bind ~__context ~pbd in + let dbg = Ref.string_of (Context.get_task_id __context) in + Client.SR.create dbg (Db.SR.get_uuid ~__context ~self:sr) name_label name_description pbd_t.API.pBD_device_config physical_size; + unbind ~__context ~pbd + ) (* This is because the current backends want SR.attached <=> PBD.currently_attached=true. It would be better not to plug in the PBD, so that other API calls will be blocked. *) let destroy_sr ~__context ~sr ~and_vdis = - transform_storage_exn - (fun () -> - let pbd, pbd_t = Sm.get_my_pbd_for_sr __context sr in - let (_ : query_result) = bind ~__context ~pbd in - let dbg = Ref.string_of (Context.get_task_id __context) in - let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in - Client.SR.attach dbg sr_uuid pbd_t.API.pBD_device_config; - (* The current backends expect the PBD to be temporarily set to currently_attached = true *) - Db.PBD.set_currently_attached ~__context ~self:pbd ~value:true; - Pervasiveext.finally - (fun () -> - try - List.iter (fun vdi -> - let location = Db.VDI.get_location ~__context ~self:vdi in - Client.VDI.destroy dbg sr_uuid location) - and_vdis; - Client.SR.destroy dbg sr_uuid - with exn -> - (* Clean up: SR is left attached if destroy fails *) - Client.SR.detach dbg sr_uuid; - raise exn - ) - (fun () -> - (* All PBDs are clearly currently_attached = false now *) - Db.PBD.set_currently_attached ~__context ~self:pbd ~value:false); - unbind ~__context ~pbd - ) + transform_storage_exn + (fun () -> + let pbd, pbd_t = Sm.get_my_pbd_for_sr __context sr in + let (_ : query_result) = bind ~__context ~pbd in + let dbg = Ref.string_of (Context.get_task_id __context) in + let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in + Client.SR.attach dbg sr_uuid pbd_t.API.pBD_device_config; + (* The current backends expect the PBD to be temporarily set to currently_attached = true *) + Db.PBD.set_currently_attached ~__context ~self:pbd ~value:true; + Pervasiveext.finally + (fun () -> + try + List.iter (fun vdi -> + let location = Db.VDI.get_location ~__context ~self:vdi in + Client.VDI.destroy dbg sr_uuid location) + and_vdis; + Client.SR.destroy dbg sr_uuid + with exn -> + (* Clean up: SR is left attached if destroy fails *) + Client.SR.detach dbg sr_uuid; + raise exn + ) + (fun () -> + (* All PBDs are clearly currently_attached = false now *) + Db.PBD.set_currently_attached ~__context ~self:pbd ~value:false); + unbind ~__context ~pbd + ) let task_cancel ~__context ~self = - try - let id = TaskHelper.task_to_id_exn self |> unwrap in - let dbg = Context.string_of_task __context in - info "storage_access: TASK.cancel %s" id; - Client.TASK.cancel dbg id |> ignore; - true - with - | Not_found -> false - | Not_an_sm_task -> false + try + let id = TaskHelper.task_to_id_exn self |> unwrap in + let dbg = Context.string_of_task __context in + info "storage_access: TASK.cancel %s" id; + Client.TASK.cancel dbg id |> ignore; + true + with + | Not_found -> false + | Not_an_sm_task -> false diff --git a/ocaml/xapi/storage_access.mli b/ocaml/xapi/storage_access.mli index 2340eca65c1..34947016e8d 100644 --- a/ocaml/xapi/storage_access.mli +++ b/ocaml/xapi/storage_access.mli @@ -13,7 +13,7 @@ *) (** * @group Storage - *) +*) val start_smapiv1_servers: unit -> unit (** start listening for requests backed by SMAPIv1-style plugins *) @@ -26,7 +26,7 @@ val start: unit -> unit its unix domain socket. *) (** [find_vdi __context sr vdi] returns the XenAPI VDI ref associated - with (sr, vdi) *) + with (sr, vdi) *) val find_vdi: __context:Context.t -> Storage_interface.sr -> Storage_interface.vdi -> API.ref_VDI * API.vDI_t (** [find_content __context ?sr content_id] returns the XenAPI VDI ref associated @@ -90,16 +90,16 @@ val on_vdi: __context:Context.t -> vbd:API.ref_VBD -> domid:int -> ((Rpc.call -> each of [pbd] to match the state of the storage layer. *) val resynchronise_pbds: __context:Context.t -> pbds:API.ref_PBD list -> unit -(** [refresh_local_vdi_activations __context] updates the VDI.sm_config fields to +(** [refresh_local_vdi_activations __context] updates the VDI.sm_config fields to match the state stored within the storage layer. *) val refresh_local_vdi_activations: __context:Context.t -> unit (** [vbd_attach_order __context vbds] returns vbds in the order which xapi should - attempt to attach them. *) + attempt to attach them. *) val vbd_attach_order: __context:Context.t -> API.ref_VBD list -> API.ref_VBD list (** [vbd_detach_order __context vbds] returns vbds in the order which xapi should - attempt to detach them. *) + attempt to detach them. *) val vbd_detach_order: __context:Context.t -> API.ref_VBD list -> API.ref_VBD list (** [diagnostics __context] returns a printable snapshot of SM system state *) @@ -122,9 +122,9 @@ val success_task : Storage_interface.debug_info -> Storage_interface.Task.id -> val wait_for_task : Storage_interface.debug_info -> Storage_interface.Task.id -> Storage_interface.Task.id -val vdi_of_task : Storage_interface.debug_info -> Storage_interface.Task.t -> Storage_interface.vdi_info +val vdi_of_task : Storage_interface.debug_info -> Storage_interface.Task.t -> Storage_interface.vdi_info -val mirror_of_task : Storage_interface.debug_info -> Storage_interface.Task.t -> Storage_interface.Mirror.id +val mirror_of_task : Storage_interface.debug_info -> Storage_interface.Task.t -> Storage_interface.Mirror.id val register_task : Context.t -> Storage_interface.Task.id -> Storage_interface.Task.id diff --git a/ocaml/xapi/storage_impl.ml b/ocaml/xapi/storage_impl.ml index aaf9f42ea56..f3d2d321888 100644 --- a/ocaml/xapi/storage_impl.ml +++ b/ocaml/xapi/storage_impl.ml @@ -13,7 +13,7 @@ *) (** * @group Storage - *) +*) (** Notes on failure handling: FH1: we always perform SMAPI "side-effects" before @@ -59,7 +59,7 @@ VDI_1_a, VDI_1, b, ... VDI_n_z: one lock per VDI for serialising all VDI.* ops per-SR + various locks to protect accesses to individual tables - We hold locks in one of the following sequences: + We hold locks in one of the following sequences: VDI_p_q : for a VDI operation SR_p : for an SR.attach SR_p, VDI_p_a, VDI_p_b, ..., VDI_p_z : for an SR.detach (to "quiesce" the SR) @@ -74,21 +74,21 @@ open Storage_task let print_debug = ref false let log_to_stdout prefix (fmt: ('a , unit, string, unit) format4) = - let time_of_float x = - let time = Unix.gmtime x in - Printf.sprintf "%04d%02d%02dT%02d:%02d:%02dZ (%d)" - (time.Unix.tm_year+1900) - (time.Unix.tm_mon+1) - time.Unix.tm_mday - time.Unix.tm_hour - time.Unix.tm_min - time.Unix.tm_sec - (Thread.id (Thread.self ())) - in - Printf.kprintf - (fun s -> - Printf.printf "%s %s %s\n" (time_of_float (Unix.gettimeofday ())) prefix s; - flush stdout) fmt + let time_of_float x = + let time = Unix.gmtime x in + Printf.sprintf "%04d%02d%02dT%02d:%02d:%02dZ (%d)" + (time.Unix.tm_year+1900) + (time.Unix.tm_mon+1) + time.Unix.tm_mday + time.Unix.tm_hour + time.Unix.tm_min + time.Unix.tm_sec + (Thread.id (Thread.self ())) + in + Printf.kprintf + (fun s -> + Printf.printf "%s %s %s\n" (time_of_float (Unix.gettimeofday ())) prefix s; + flush stdout) fmt module D=Debug.Make(struct let name="storage_impl" end) let debug (fmt: ('a, unit, string, unit) format4) = if !print_debug then log_to_stdout "debug" fmt else D.debug fmt @@ -98,8 +98,8 @@ let info (fmt: ('a, unit, string, unit) format4) = if !print_debug then log_to_ let host_state_path = ref "/var/run/nonpersistent/xapi/storage.db" module Dp = struct - type t = string with rpc - let make username = username + type t = string with rpc + let make username = username end let indent x = " " ^ x @@ -107,798 +107,798 @@ let indent x = " " ^ x let string_of_date x = Date.to_string (Date.of_float x) module Vdi = struct - (** Represents the information known about a VDI *) - type t = { - attach_info : attach_info option; (** Some path when attached; None otherwise *) - dps: (Dp.t * Vdi_automaton.state) list; (** state of the VDI from each dp's PoV *) - leaked: Dp.t list; (** "leaked" dps *) - } with rpc - let empty () = { - attach_info = None; - dps = []; - leaked = []; - } - (** [superstate x] returns the actual state of the backing VDI by finding the "max" of - the states from the clients' PsoV *) - let superstate x = Vdi_automaton.superstate (List.map snd x.dps) - - let get_dp_state dp t = - if List.mem_assoc dp t.dps - then List.assoc dp t.dps - else Vdi_automaton.Detached - - let set_dp_state dp state t = - let rest = List.filter (fun (u, _) -> u <> dp) t.dps in - { t with dps = if state = Vdi_automaton.Detached then rest else (dp, state) :: rest } - - let get_leaked t = t.leaked - - let leaked t (x: Dp.t) = List.mem x t.leaked - let all _ _ = true - - let remove_leaked dp t = - { t with leaked = List.filter (fun u -> u <> dp) t.leaked } - - let add_leaked dp t = - let t' = remove_leaked dp t in - { t' with leaked = dp :: t'.leaked } - - let dps t = List.map fst t.dps - - (** [perform dp op t] updates VDI [t] given the request to perform [op] by [dp] *) - let perform dp op t = - let state = get_dp_state dp t in - let state' = Vdi_automaton.(+) state op in - set_dp_state dp state' t - - let to_string_list x = - let title = Printf.sprintf "%s (device=%s)" (Vdi_automaton.string_of_state (superstate x)) (Opt.default "None" (Opt.map (fun x -> "Some " ^ Jsonrpc.to_string (rpc_of_attach_info x)) x.attach_info)) in - let of_dp (dp, state) = Printf.sprintf "DP: %s: %s%s" dp (Vdi_automaton.string_of_state state) (if List.mem dp x.leaked then " ** LEAKED" else "") in - title :: (List.map indent (List.map of_dp x.dps)) + (** Represents the information known about a VDI *) + type t = { + attach_info : attach_info option; (** Some path when attached; None otherwise *) + dps: (Dp.t * Vdi_automaton.state) list; (** state of the VDI from each dp's PoV *) + leaked: Dp.t list; (** "leaked" dps *) + } with rpc + let empty () = { + attach_info = None; + dps = []; + leaked = []; + } + (** [superstate x] returns the actual state of the backing VDI by finding the "max" of + the states from the clients' PsoV *) + let superstate x = Vdi_automaton.superstate (List.map snd x.dps) + + let get_dp_state dp t = + if List.mem_assoc dp t.dps + then List.assoc dp t.dps + else Vdi_automaton.Detached + + let set_dp_state dp state t = + let rest = List.filter (fun (u, _) -> u <> dp) t.dps in + { t with dps = if state = Vdi_automaton.Detached then rest else (dp, state) :: rest } + + let get_leaked t = t.leaked + + let leaked t (x: Dp.t) = List.mem x t.leaked + let all _ _ = true + + let remove_leaked dp t = + { t with leaked = List.filter (fun u -> u <> dp) t.leaked } + + let add_leaked dp t = + let t' = remove_leaked dp t in + { t' with leaked = dp :: t'.leaked } + + let dps t = List.map fst t.dps + + (** [perform dp op t] updates VDI [t] given the request to perform [op] by [dp] *) + let perform dp op t = + let state = get_dp_state dp t in + let state' = Vdi_automaton.(+) state op in + set_dp_state dp state' t + + let to_string_list x = + let title = Printf.sprintf "%s (device=%s)" (Vdi_automaton.string_of_state (superstate x)) (Opt.default "None" (Opt.map (fun x -> "Some " ^ Jsonrpc.to_string (rpc_of_attach_info x)) x.attach_info)) in + let of_dp (dp, state) = Printf.sprintf "DP: %s: %s%s" dp (Vdi_automaton.string_of_state state) (if List.mem dp x.leaked then " ** LEAKED" else "") in + title :: (List.map indent (List.map of_dp x.dps)) end module Sr = struct - (** Represents the state of an SR *) - type vdis = (string, Vdi.t) Hashtbl.t with rpc - - type t = { - vdis: vdis; (** All tracked VDIs *) - } with rpc - - let empty () = { - vdis = Hashtbl.create 10; - } - - let m = Mutex.create () - let find vdi sr = Mutex.execute m (fun () -> try Some (Hashtbl.find sr.vdis vdi) with Not_found -> None) - let replace vdi vdi_t sr = - Mutex.execute m (fun () -> Hashtbl.replace sr.vdis vdi vdi_t) - let list sr = Mutex.execute m (fun () -> Hashtbl.fold (fun k v acc -> (k, v) :: acc) sr.vdis []) - let remove vdi sr = - Mutex.execute m (fun () -> Hashtbl.remove sr.vdis vdi) - let to_string_list x = - Hashtbl.fold (fun vdi vdi_t acc-> (Printf.sprintf "VDI %s" vdi :: (List.map indent (Vdi.to_string_list vdi_t))) @ acc) x.vdis [] + (** Represents the state of an SR *) + type vdis = (string, Vdi.t) Hashtbl.t with rpc + + type t = { + vdis: vdis; (** All tracked VDIs *) + } with rpc + + let empty () = { + vdis = Hashtbl.create 10; + } + + let m = Mutex.create () + let find vdi sr = Mutex.execute m (fun () -> try Some (Hashtbl.find sr.vdis vdi) with Not_found -> None) + let replace vdi vdi_t sr = + Mutex.execute m (fun () -> Hashtbl.replace sr.vdis vdi vdi_t) + let list sr = Mutex.execute m (fun () -> Hashtbl.fold (fun k v acc -> (k, v) :: acc) sr.vdis []) + let remove vdi sr = + Mutex.execute m (fun () -> Hashtbl.remove sr.vdis vdi) + let to_string_list x = + Hashtbl.fold (fun vdi vdi_t acc-> (Printf.sprintf "VDI %s" vdi :: (List.map indent (Vdi.to_string_list vdi_t))) @ acc) x.vdis [] end module Host = struct - (** Represents the state of a host *) - type t = { - srs: (string, Sr.t) Hashtbl.t; - } with rpc - - let empty () = { - srs = Hashtbl.create 10 - } - let m = Mutex.create () - let find sr h = Mutex.execute m (fun () -> try Some (Hashtbl.find h.srs sr) with Not_found -> None) - let remove sr h = Mutex.execute m (fun () -> Hashtbl.remove h.srs sr) - let replace sr sr_t h = Mutex.execute m (fun () -> Hashtbl.replace h.srs sr sr_t) - let list h = Mutex.execute m (fun () -> Hashtbl.fold (fun k v acc -> (k, v) :: acc) h.srs []) - - (** All global state held here *) - let host = ref (empty ()) + (** Represents the state of a host *) + type t = { + srs: (string, Sr.t) Hashtbl.t; + } with rpc + + let empty () = { + srs = Hashtbl.create 10 + } + let m = Mutex.create () + let find sr h = Mutex.execute m (fun () -> try Some (Hashtbl.find h.srs sr) with Not_found -> None) + let remove sr h = Mutex.execute m (fun () -> Hashtbl.remove h.srs sr) + let replace sr sr_t h = Mutex.execute m (fun () -> Hashtbl.replace h.srs sr sr_t) + let list h = Mutex.execute m (fun () -> Hashtbl.fold (fun k v acc -> (k, v) :: acc) h.srs []) + + (** All global state held here *) + let host = ref (empty ()) end module Errors = struct - (** Used for remembering the last [max] errors *) - type error = { - dp: string; (** person who triggered the error *) - time: float; (** time the error happened *) - sr: string; - vdi: string; - error: string - } with rpc - - type t = error list with rpc - - let max = 100 - let errors = ref [] - let errors_m = Mutex.create () - let add dp sr vdi code = - Mutex.execute errors_m - (fun () -> - let t = { - dp = dp; - time = Unix.gettimeofday (); - sr = sr; vdi = vdi; error = code - } in - errors := Listext.List.take 100 (t :: !errors) - ) - let list () = Mutex.execute errors_m (fun () -> !errors) - let to_string x = - Printf.sprintf "%s @ %s; sr:%s vdi:%s error:%s" x.dp - (string_of_date x.time) x.sr x.vdi x.error + (** Used for remembering the last [max] errors *) + type error = { + dp: string; (** person who triggered the error *) + time: float; (** time the error happened *) + sr: string; + vdi: string; + error: string + } with rpc + + type t = error list with rpc + + let max = 100 + let errors = ref [] + let errors_m = Mutex.create () + let add dp sr vdi code = + Mutex.execute errors_m + (fun () -> + let t = { + dp = dp; + time = Unix.gettimeofday (); + sr = sr; vdi = vdi; error = code + } in + errors := Listext.List.take 100 (t :: !errors) + ) + let list () = Mutex.execute errors_m (fun () -> !errors) + let to_string x = + Printf.sprintf "%s @ %s; sr:%s vdi:%s error:%s" x.dp + (string_of_date x.time) x.sr x.vdi x.error end module Everything = struct - type t = { - host: Host.t; - errors: Errors.t; - } with rpc - - let make () = { host = !Host.host; errors = !Errors.errors } - - let to_file filename h = - let rpc = Mutex.execute Host.m (fun () -> rpc_of_t h) in - let s = Jsonrpc.to_string rpc in - Unixext.write_string_to_file filename s - let of_file filename = - let s = Unixext.string_of_file filename in - let rpc = Jsonrpc.of_string s in - t_of_rpc rpc - - let set h = Host.host := h.host; Errors.errors := h.errors + type t = { + host: Host.t; + errors: Errors.t; + } with rpc + + let make () = { host = !Host.host; errors = !Errors.errors } + + let to_file filename h = + let rpc = Mutex.execute Host.m (fun () -> rpc_of_t h) in + let s = Jsonrpc.to_string rpc in + Unixext.write_string_to_file filename s + let of_file filename = + let s = Unixext.string_of_file filename in + let rpc = Jsonrpc.of_string s in + t_of_rpc rpc + + let set h = Host.host := h.host; Errors.errors := h.errors end module Wrapper = functor(Impl: Server_impl) -> struct - type context = Smint.request - - module Query = struct - let query = Impl.Query.query - let diagnostics = Impl.Query.diagnostics - end - - module VDI = struct - type vdi_locks = (string, unit) Storage_locks.t - - (** Map of SR name to vdi_locks table *) - let locks : (string, vdi_locks) Hashtbl.t = Hashtbl.create 10 - - (* This protects the 'locks' table only *) - let locks_m = Mutex.create () - let locks_find sr = - Mutex.execute locks_m - (fun () -> - if not(Hashtbl.mem locks sr) - then - let result = Storage_locks.make () in - Hashtbl.replace locks sr result; - result - else Hashtbl.find locks sr) - let locks_remove sr = - Mutex.execute locks_m (fun () -> Hashtbl.remove locks sr) - - let with_vdi sr vdi f = - let locks = locks_find sr in - Storage_locks.with_instance_lock locks vdi f - - let with_all_vdis sr f = - let locks = locks_find sr in - Storage_locks.with_master_lock locks f - - let side_effects context dbg dp sr sr_t vdi vdi_t ops = - let perform_one vdi_t (op, state_on_fail) = - try - let vdi_t = Vdi.perform (Dp.make dp) op vdi_t in - let new_vdi_t = match op with - | Vdi_automaton.Nothing -> vdi_t - | Vdi_automaton.Attach ro_rw -> - let read_write = (ro_rw = Vdi_automaton.RW) in - let x = Impl.VDI.attach context ~dbg ~dp ~sr ~vdi ~read_write in - { vdi_t with Vdi.attach_info = Some x } - | Vdi_automaton.Activate -> - Impl.VDI.activate context ~dbg ~dp ~sr ~vdi; vdi_t - | Vdi_automaton.Deactivate -> - Storage_migrate.pre_deactivate_hook ~dbg ~dp ~sr ~vdi; - Impl.VDI.deactivate context ~dbg ~dp ~sr ~vdi; vdi_t - | Vdi_automaton.Detach -> - Impl.VDI.detach context ~dbg ~dp ~sr ~vdi; - Storage_migrate.post_detach_hook ~sr ~vdi ~dp; - vdi_t - in - Sr.replace vdi new_vdi_t sr_t; - new_vdi_t - with - | Storage_interface.Internal_error("Storage_access.No_VDI") as e - when ( op == Vdi_automaton.Deactivate || op == Vdi_automaton.Detach ) -> - error "Storage_impl: caught exception %s while doing %s . Continuing as if succesful, being optimistic" - (Printexc.to_string e) (Vdi_automaton.string_of_op op); - vdi_t - | e -> - error "Storage_impl: dp:%s sr:%s vdi:%s op:%s error:%s backtrace:%s" dp sr vdi - (Vdi_automaton.string_of_op op) (Printexc.to_string e) (Printexc.get_backtrace ()); - raise e - in - List.fold_left perform_one vdi_t ops - - let perform_nolock context ~dbg ~dp ~sr ~vdi this_op = - match Host.find sr !Host.host with - | None -> raise (Sr_not_attached sr) - | Some sr_t -> - let vdi_t = Opt.default (Vdi.empty ()) (Sr.find vdi sr_t) in - let vdi_t' = - try - (* Compute the overall state ('superstate') of the VDI *) - let superstate = Vdi.superstate vdi_t in - (* We first assume the operation succeeds and compute the new - datapath+VDI state *) - let new_vdi_t = Vdi.perform (Dp.make dp) this_op vdi_t in - (* Compute the new overall state ('superstate') *) - let superstate' = Vdi.superstate new_vdi_t in - (* Compute the real operations which would drive the system from - superstate to superstate'. These may fail: if so we revert the - datapath+VDI state to the most appropriate value. *) - let ops = Vdi_automaton.(-) superstate superstate' in - side_effects context dbg dp sr sr_t vdi vdi_t ops - with e -> - let e = match e with Vdi_automaton.No_operation(a, b) -> Illegal_transition(a,b) | e -> e in - Errors.add dp sr vdi (Printexc.to_string e); - raise e - in - - (* Even if there were no side effects on the underlying VDI, we still need - to update the SR to update this DP's view of the state. - However if nothing changed (e.g. because this was the detach of a DP - which had not attached this VDI) then we won't need to update our on-disk state *) - let vdi_t' = Vdi.perform (Dp.make dp) this_op vdi_t' in - if vdi_t <> vdi_t' then begin - Sr.replace vdi vdi_t' sr_t; - (* If the new VDI state is "detached" then we remove it from the table - altogether *) - debug "dbg:%s dp:%s sr:%s vdi:%s superstate:%s" dbg dp sr vdi (Vdi_automaton.string_of_state (Vdi.superstate vdi_t')); - if Vdi.superstate vdi_t' = Vdi_automaton.Detached - then Sr.remove vdi sr_t; - - (* FH1: Perform the side-effect first: in the case of a failure half-way - through we would rather perform the side-effect twice than never at - all. *) - Everything.to_file !host_state_path (Everything.make ()); - end; - vdi_t' - - (* Attempt to remove a possibly-active datapath associated with [vdi] *) - let destroy_datapath_nolock context ~dbg ~dp ~sr ~vdi ~allow_leak = - match Host.find sr !Host.host with - | None -> raise (Sr_not_attached sr) - | Some sr_t -> - Opt.iter (fun vdi_t -> - let current_state = Vdi.get_dp_state dp vdi_t in - let desired_state = Vdi_automaton.Detached in - let ops = List.map fst (Vdi_automaton.(-) current_state desired_state) in - begin - try - ignore(List.fold_left (fun _ op -> - perform_nolock context ~dbg ~dp ~sr ~vdi op - ) vdi_t ops) - with e -> - if not allow_leak - then (ignore(Vdi.add_leaked dp vdi_t); raise e) - else begin - (* allow_leak means we can forget this dp *) - info "setting dp:%s state to %s, even though operation failed because allow_leak set" dp (Vdi_automaton.string_of_state desired_state); - let vdi_t = Vdi.set_dp_state dp desired_state vdi_t in - - if Vdi.superstate vdi_t = Vdi_automaton.Detached - then Sr.remove vdi sr_t - else Sr.replace vdi vdi_t sr_t; - - Everything.to_file !host_state_path (Everything.make ()); - end - end) (Sr.find vdi sr_t) - - (* Attempt to clear leaked datapaths associed with this vdi *) - let remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi which next = - let dps = match Host.find sr !Host.host with - | None -> [] - | Some sr_t -> - begin match Sr.find vdi sr_t with - | Some vdi_t -> - List.filter (which vdi_t) (Vdi.dps vdi_t) - | None -> [] - end in - let failures = List.fold_left (fun acc dp -> - info "Attempting to destroy datapath dp:%s sr:%s vdi:%s" dp sr vdi; - try - destroy_datapath_nolock context ~dbg ~dp ~sr ~vdi ~allow_leak:false; - acc - with e -> e :: acc - ) [] dps in - match failures with - | [] -> next () - | f :: fs -> raise f - - let epoch_begin context ~dbg ~sr ~vdi ~persistent = - info "VDI.epoch_begin dbg:%s sr:%s vdi:%s persistent:%b" dbg sr vdi persistent; - with_vdi sr vdi - (fun () -> - remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi Vdi.leaked - (fun () -> - Impl.VDI.epoch_begin context ~dbg ~sr ~vdi ~persistent - )) - - let attach context ~dbg ~dp ~sr ~vdi ~read_write = - info "VDI.attach dbg:%s dp:%s sr:%s vdi:%s read_write:%b" dbg dp sr vdi read_write; - with_vdi sr vdi - (fun () -> - remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi Vdi.leaked - (fun () -> - let state = perform_nolock context ~dbg ~dp ~sr ~vdi - (Vdi_automaton.Attach (if read_write then Vdi_automaton.RW else Vdi_automaton.RO)) in - Opt.unbox state.Vdi.attach_info - )) - - let activate context ~dbg ~dp ~sr ~vdi = - info "VDI.activate dbg:%s dp:%s sr:%s vdi:%s" dbg dp sr vdi; - with_vdi sr vdi - (fun () -> - remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi Vdi.leaked - (fun () -> - ignore(perform_nolock context ~dbg ~dp ~sr ~vdi Vdi_automaton.Activate))) - - let deactivate context ~dbg ~dp ~sr ~vdi = - info "VDI.deactivate dbg:%s dp:%s sr:%s vdi:%s" dbg dp sr vdi; - with_vdi sr vdi - (fun () -> - remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi Vdi.leaked - (fun () -> - ignore (perform_nolock context ~dbg ~dp ~sr ~vdi Vdi_automaton.Deactivate))) - - let detach context ~dbg ~dp ~sr ~vdi = - info "VDI.detach dbg:%s dp:%s sr:%s vdi:%s" dbg dp sr vdi; - with_vdi sr vdi - (fun () -> - remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi Vdi.leaked - (fun () -> - ignore (perform_nolock context ~dbg ~dp ~sr ~vdi Vdi_automaton.Detach))) - - let epoch_end context ~dbg ~sr ~vdi = - info "VDI.epoch_end dbg:%s sr:%s vdi:%s" dbg sr vdi; - with_vdi sr vdi - (fun () -> - remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi Vdi.leaked - (fun () -> - Impl.VDI.epoch_end context ~dbg ~sr ~vdi - )) - - let create context ~dbg ~sr ~vdi_info = - info "VDI.create dbg:%s sr:%s vdi_info:%s" dbg sr (string_of_vdi_info vdi_info); - let result = Impl.VDI.create context ~dbg ~sr ~vdi_info in - match result with - | { virtual_size = virtual_size' } when virtual_size' < vdi_info.virtual_size -> - error "VDI.create dbg:%s created a smaller VDI (%Ld)" dbg virtual_size'; - raise (Backend_error("SR_BACKEND_FAILURE", ["Disk too small"; Int64.to_string vdi_info.virtual_size; Int64.to_string virtual_size'])) - | result -> result - - let snapshot_and_clone call_name call_f context ~dbg ~sr ~vdi_info = - info "%s dbg:%s sr:%s vdi_info:%s" call_name dbg sr (string_of_vdi_info vdi_info); - with_vdi sr vdi_info.vdi - (fun () -> - call_f context ~dbg ~sr ~vdi_info - ) - - let snapshot = snapshot_and_clone "VDI.snapshot" Impl.VDI.snapshot - let clone = snapshot_and_clone "VDI.clone" Impl.VDI.clone - - let set_name_label context ~dbg ~sr ~vdi ~new_name_label = - info "VDI.set_name_label dbg:%s sr:%s vdi:%s new_name_label:%s" dbg sr vdi new_name_label; - with_vdi sr vdi - (fun () -> - Impl.VDI.set_name_label context ~dbg ~sr ~vdi ~new_name_label - ) - - let set_name_description context ~dbg ~sr ~vdi ~new_name_description = - info "VDI.set_name_description dbg:%s sr:%s vdi:%s new_name_description:%s" dbg sr vdi new_name_description; - with_vdi sr vdi - (fun () -> - Impl.VDI.set_name_description context ~dbg ~sr ~vdi ~new_name_description - ) - - let resize context ~dbg ~sr ~vdi ~new_size = - info "VDI.resize dbg:%s sr:%s vdi:%s new_size:%Ld" dbg sr vdi new_size; - with_vdi sr vdi - (fun () -> - Impl.VDI.resize context ~dbg ~sr ~vdi ~new_size - ) - - let destroy context ~dbg ~sr ~vdi = - info "VDI.destroy dbg:%s sr:%s vdi:%s" dbg sr vdi; - with_vdi sr vdi - (fun () -> - remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi Vdi.all - (fun () -> - Impl.VDI.destroy context ~dbg ~sr ~vdi - ) - ) - - let stat context ~dbg ~sr ~vdi = - info "VDI.stat dbg:%s sr:%s vdi:%s" dbg sr vdi; - Impl.VDI.stat context ~dbg ~sr ~vdi - - let introduce context ~dbg ~sr ~uuid ~sm_config ~location = - info "VDI.introduce dbg:%s sr:%s uuid:%s sm_config:%s location:%s" dbg sr uuid (String.concat ", " (List.map (fun (k, v) -> k ^ ":" ^ v) sm_config)) location; - Impl.VDI.introduce context ~dbg ~sr ~uuid ~sm_config ~location - - let set_persistent context ~dbg ~sr ~vdi ~persistent = - info "VDI.set_persistent dbg:%s sr:%s vdi:%s persistent:%b" dbg sr vdi persistent; - with_vdi sr vdi - (fun () -> - Impl.VDI.set_persistent context ~dbg ~sr ~vdi ~persistent - ) - - let get_by_name context ~dbg ~sr ~name = - info "VDI.get_by_name dbg:%s sr:%s name:%s" dbg sr name; - Impl.VDI.get_by_name context ~dbg ~sr ~name - - let set_content_id context ~dbg ~sr ~vdi ~content_id = - info "VDI.set_content_id dbg:%s sr:%s vdi:%s content_id:%s" dbg sr vdi content_id; - Impl.VDI.set_content_id context ~dbg ~sr ~vdi ~content_id - - let similar_content context ~dbg ~sr ~vdi = - info "VDI.similar_content dbg:%s sr:%s vdi:%s" dbg sr vdi; - Impl.VDI.similar_content context ~dbg ~sr ~vdi - - let compose context ~dbg ~sr ~vdi1 ~vdi2 = - info "VDI.compose dbg:%s sr:%s vdi1:%s vdi2:%s" dbg sr vdi1 vdi2; - Impl.VDI.compose context ~dbg ~sr ~vdi1 ~vdi2 - - let add_to_sm_config context ~dbg ~sr ~vdi ~key ~value = - info "VDI.add_to_other_config dbg:%s sr:%s vdi:%s key:%s valu:%s" dbg sr vdi key value; - Impl.VDI.add_to_sm_config context ~dbg ~sr ~vdi ~key ~value - - let remove_from_sm_config context ~dbg ~sr ~vdi ~key = - info "VDI.remove_from_sm_config dbg:%s sr:%s vdi:%s key:%s" dbg sr vdi key; - Impl.VDI.remove_from_sm_config context ~dbg ~sr ~vdi ~key - - let get_url context ~dbg ~sr ~vdi = - info "VDI.get_url dbg:%s sr:%s vdi:%s" dbg sr vdi; - Impl.VDI.get_url context ~dbg ~sr ~vdi - - end - - let get_by_name context ~dbg ~name = - debug "get_by_name dbg:%s name:%s" dbg name; - Impl.get_by_name context ~dbg ~name - - module DATA = struct - let copy_into context ~dbg ~sr ~vdi ~url ~dest = - info "DATA.copy_into dbg:%s sr:%s vdi:%s url:%s dest:%s" dbg sr vdi url dest; - Impl.DATA.copy_into context ~dbg ~sr ~vdi ~url ~dest - - let copy context ~dbg ~sr ~vdi ~dp ~url ~dest = - info "DATA.copy dbg:%s sr:%s vdi:%s url:%s dest:%s" dbg sr vdi url dest; - Impl.DATA.copy context ~dbg ~sr ~vdi ~dp ~url ~dest - - module MIRROR = struct - let start context ~dbg ~sr ~vdi ~dp ~url ~dest = - info "DATA.MIRROR.start dbg:%s sr:%s vdi:%s url:%s dest:%s" dbg sr vdi url dest; - Impl.DATA.MIRROR.start context ~dbg ~sr ~vdi ~dp ~url ~dest - - let stop context ~dbg ~id = - info "DATA.MIRROR.stop dbg:%s id:%s" dbg id; - Impl.DATA.MIRROR.stop context ~dbg ~id - - let list context ~dbg = - info "DATA.MIRROR.active dbg:%s" dbg; - Impl.DATA.MIRROR.list context ~dbg - - let stat context ~dbg ~id = - info "DATA.MIRROR.stat dbg:%s id:%s" dbg id; - Impl.DATA.MIRROR.stat context ~dbg ~id - - let receive_start context ~dbg ~sr ~vdi_info ~id ~similar = - info "DATA.MIRROR.receive_start dbg:%s sr:%s id:%s similar:[%s]" - dbg sr id (String.concat "," similar); - Impl.DATA.MIRROR.receive_start context ~dbg ~sr ~vdi_info ~id ~similar - - let receive_finalize context ~dbg ~id = - info "DATA.MIRROR.receive_finalize dbg:%s id:%s" dbg id; - Impl.DATA.MIRROR.receive_finalize context ~dbg ~id - - let receive_cancel context ~dbg ~id = - info "DATA.MIRROR.receive_cancel dbg:%s id:%s" dbg id; - Impl.DATA.MIRROR.receive_cancel context ~dbg ~id - - end - - end - - module DP = struct - let create context ~dbg ~id = id - - (** [destroy_sr context dp sr allow_leak vdi_already_locked] attempts to free - the resources associated with [dp] in [sr]. If [vdi_already_locked] then - it is assumed that all VDIs are already locked. *) - let destroy_sr context ~dbg ~dp ~sr ~sr_t ~allow_leak vdi_already_locked = - (* Every VDI in use by this session should be detached and deactivated *) - let vdis = Sr.list sr_t in - List.fold_left (fun acc (vdi, vdi_t) -> - let locker = - if vdi_already_locked - then fun f -> f () - else VDI.with_vdi sr vdi in - locker - (fun () -> - try - VDI.destroy_datapath_nolock context ~dbg ~dp ~sr ~vdi ~allow_leak; acc - with e -> e::acc - )) [] vdis - - - let destroy context ~dbg ~dp ~allow_leak = - info "DP.destroy dbg:%s dp:%s allow_leak:%b" dbg dp allow_leak; - let failures = List.fold_left (fun acc (sr, sr_t) -> acc @ (destroy_sr context ~dbg ~dp ~sr ~sr_t ~allow_leak false)) [] (Host.list !Host.host) in - match failures, allow_leak with - | [], _ -> () - | f :: _, false -> - error "Leaked datapath: dp: %s" dp; - raise f - | _ :: _, true -> - info "Forgetting leaked datapath: dp: %s" dp; - () - - let diagnostics context () = - let srs = Host.list !Host.host in - let of_sr (sr, sr_t) = - let title = Printf.sprintf "SR %s" sr in - title :: (List.map indent (Sr.to_string_list sr_t)) in - let srs = List.concat (List.map of_sr srs) in - let errors = List.map Errors.to_string (Errors.list ()) in - let errors = (if errors <> [] then "The following errors have been logged:" else "No errors have been logged.") :: errors in - let lines = [ "The following SRs are attached:" ] @ (List.map indent srs) @ [ "" ] @ errors in - String.concat "" (List.map (fun x -> x ^ "\n") lines) - - let attach_info context ~dbg ~sr ~vdi ~dp = - let srs = Host.list !Host.host in - let sr_state = List.assoc sr srs in - let vdi_state = Hashtbl.find sr_state.Sr.vdis vdi in - let dp_state = Vdi.get_dp_state dp vdi_state in - debug "Looking for dp: %s" dp; - match dp_state,vdi_state.Vdi.attach_info with - | Vdi_automaton.Activated _, Some attach_info -> - attach_info - | _ -> - raise (Internal_error (Printf.sprintf "sr: %s vdi: %s Datapath %s not attached" sr vdi dp)) - - - let stat_vdi context ~dbg ~sr ~vdi () = - info "DP.stat_vdi dbg:%s sr:%s vdi:%s" dbg sr vdi; - VDI.with_vdi sr vdi - (fun () -> - match Host.find sr !Host.host with - | None -> raise (Sr_not_attached sr) - | Some sr_t -> - let vdi_t = Opt.default (Vdi.empty ()) (Sr.find vdi sr_t) in - { - superstate = Vdi.superstate vdi_t; - dps = List.map (fun dp -> dp, Vdi.get_dp_state dp vdi_t) (Vdi.dps vdi_t) - } - ) - - end - - module SR = struct - include Storage_skeleton.SR - let locks : (string, unit) Storage_locks.t = Storage_locks.make () - let with_sr sr f = Storage_locks.with_instance_lock locks sr f - - let probe context ~dbg ~queue ~device_config ~sm_config = - Impl.SR.probe context ~dbg ~queue ~device_config ~sm_config - - let list context ~dbg = - List.map fst (Host.list !Host.host) - - let stat context ~dbg ~sr = - info "SR.stat dbg:%s sr:%s" dbg sr; - with_sr sr - (fun () -> - match Host.find sr !Host.host with - | None -> raise (Sr_not_attached sr) - | Some _ -> - Impl.SR.stat context ~dbg ~sr - ) - - let scan context ~dbg ~sr = - info "SR.scan dbg:%s sr:%s" dbg sr; - with_sr sr - (fun () -> - match Host.find sr !Host.host with - | None -> raise (Sr_not_attached sr) - | Some _ -> - Impl.SR.scan context ~dbg ~sr - ) - - let create context ~dbg ~sr ~name_label ~name_description ~device_config ~physical_size = - with_sr sr - (fun () -> - match Host.find sr !Host.host with - | None -> - Impl.SR.create context ~dbg ~sr ~name_label ~name_description ~device_config ~physical_size - | Some _ -> - error "SR %s is already attached" sr; - raise (Sr_attached sr) - ) - - let set_name_label context ~dbg ~sr ~new_name_label = - info "SR.set_name_label dbg:%s sr:%s new_name_label:%s" dbg sr new_name_label; - Impl.SR.set_name_label context ~dbg ~sr ~new_name_label - - let set_name_description context ~dbg ~sr ~new_name_description = - info "SR.set_name_description dbg:%s sr:%s new_name_description:%s" dbg sr new_name_description; - Impl.SR.set_name_description context ~dbg ~sr ~new_name_description - - let attach context ~dbg ~sr ~device_config = - let censor_key = ["password"] in - let device_config_str = String.concat "; " (List.map (fun (k, v) -> - let v' = (if List.exists (Xstringext.String.has_substr k) censor_key then "(omitted)" else v) in - (k ^ ":" ^ v')) device_config) - in - info "SR.attach dbg:%s sr:%s device_config:[%s]" dbg sr device_config_str; - with_sr sr - (fun () -> - match Host.find sr !Host.host with - | None -> - Impl.SR.attach context ~dbg ~sr ~device_config; - Host.replace sr (Sr.empty ()) !Host.host; - (* FH1: Perform the side-effect first: in the case of a - failure half-way through we would rather perform the - side-effect twice than never at all. *) - Everything.to_file !host_state_path (Everything.make ()) - | Some _ -> - (* Operation is idempotent *) - () - ) - - let detach_destroy_common context ~dbg ~sr f = - let active_dps sr_t = - (* Enumerate all active datapaths *) - List.concat (List.map (fun (_, vdi_t) -> Vdi.dps vdi_t) (Sr.list sr_t)) in - - with_sr sr - (fun () -> - match Host.find sr !Host.host with - | None -> raise (Sr_not_attached sr) - | Some sr_t -> - VDI.with_all_vdis sr - (fun () -> - let dps = active_dps sr_t in - List.iter - (fun dp -> - let ( _ : exn list) = DP.destroy_sr context ~dbg ~dp ~sr ~sr_t ~allow_leak:false true in () - ) dps; - let dps = active_dps sr_t in - if dps <> [] - then error "The following datapaths have leaked: %s" (String.concat "; " dps); - f context ~dbg ~sr; - Host.remove sr !Host.host; - Everything.to_file !host_state_path (Everything.make ()); - VDI.locks_remove sr - ) - ) - - let detach context ~dbg ~sr = - info "SR.detach dbg:%s sr:%s" dbg sr; - detach_destroy_common context ~dbg ~sr Impl.SR.detach - - let reset context ~dbg ~sr = - info "SR.reset dbg:%s sr:%s" dbg sr; - with_sr sr - (fun () -> - Host.remove sr !Host.host; - Everything.to_file !host_state_path (Everything.make ()); - VDI.locks_remove sr - ) - - let destroy context ~dbg ~sr = - info "SR.destroy dbg:%s sr:%s" dbg sr; - detach_destroy_common context ~dbg ~sr Impl.SR.destroy - - let update_snapshot_info_src context ~dbg ~sr ~vdi ~url - ~dest ~dest_vdi ~snapshot_pairs= - info - "SR.update_snapshot_info_src dbg:%s sr:%s vdi:%s url:%s dest:%s dest_vdi:%s snapshot_pairs:%s" - dbg sr vdi url dest dest_vdi - (List.map - (fun (local_snapshot, dest_snapshot) -> - Printf.sprintf "local:%s, dest:%s" local_snapshot dest_snapshot) - snapshot_pairs - |> String.concat "; " - |> Printf.sprintf "[%s]"); - Impl.SR.update_snapshot_info_src context ~dbg ~sr ~vdi ~url - ~dest ~dest_vdi ~snapshot_pairs - - let update_snapshot_info_dest context ~dbg ~sr ~vdi ~src_vdi ~snapshot_pairs = - info - "SR.update_snapshot_info_dest dbg:%s sr:%s vdi:%s ~src_vdi:%s snapshot_pairs:%s" - dbg sr vdi src_vdi.vdi - (List.map - (fun (local_snapshot, src_snapshot_info) -> - Printf.sprintf "local:%s, src:%s" local_snapshot src_snapshot_info.vdi) - snapshot_pairs - |> String.concat "; " - |> Printf.sprintf "[%s]"); - Impl.SR.update_snapshot_info_dest context ~dbg ~sr ~vdi - ~src_vdi ~snapshot_pairs - end - - module Policy = struct - let get_backend_vm context ~dbg ~vm ~sr ~vdi = - Impl.Policy.get_backend_vm context ~dbg ~vm ~sr ~vdi - end - - module TASK = struct - open Storage_task - let t x = { - Task.id = x.id; - debug_info = x.dbg; - ctime = x.ctime; - state = x.state; - subtasks = x.subtasks; - } - - let cancel _ ~dbg ~task = - Storage_task.cancel tasks task - let stat' task = - Mutex.execute tasks.m - (fun () -> - find_locked tasks task |> t - ) - let stat _ ~dbg ~task = stat' task - let destroy' ~task = - destroy tasks task; - Updates.remove (Dynamic.Task task) updates - let destroy _ ~dbg ~task = destroy' ~task - let list _ ~dbg = list tasks |> List.map t - end - - module UPDATES = struct - let get _ ~dbg ~from ~timeout = - let from = try Some (int_of_string from) with _ -> None in - let _, ids, next = Updates.get dbg from timeout updates in - (ids, string_of_int next) - end + type context = Smint.request + + module Query = struct + let query = Impl.Query.query + let diagnostics = Impl.Query.diagnostics + end + + module VDI = struct + type vdi_locks = (string, unit) Storage_locks.t + + (** Map of SR name to vdi_locks table *) + let locks : (string, vdi_locks) Hashtbl.t = Hashtbl.create 10 + + (* This protects the 'locks' table only *) + let locks_m = Mutex.create () + let locks_find sr = + Mutex.execute locks_m + (fun () -> + if not(Hashtbl.mem locks sr) + then + let result = Storage_locks.make () in + Hashtbl.replace locks sr result; + result + else Hashtbl.find locks sr) + let locks_remove sr = + Mutex.execute locks_m (fun () -> Hashtbl.remove locks sr) + + let with_vdi sr vdi f = + let locks = locks_find sr in + Storage_locks.with_instance_lock locks vdi f + + let with_all_vdis sr f = + let locks = locks_find sr in + Storage_locks.with_master_lock locks f + + let side_effects context dbg dp sr sr_t vdi vdi_t ops = + let perform_one vdi_t (op, state_on_fail) = + try + let vdi_t = Vdi.perform (Dp.make dp) op vdi_t in + let new_vdi_t = match op with + | Vdi_automaton.Nothing -> vdi_t + | Vdi_automaton.Attach ro_rw -> + let read_write = (ro_rw = Vdi_automaton.RW) in + let x = Impl.VDI.attach context ~dbg ~dp ~sr ~vdi ~read_write in + { vdi_t with Vdi.attach_info = Some x } + | Vdi_automaton.Activate -> + Impl.VDI.activate context ~dbg ~dp ~sr ~vdi; vdi_t + | Vdi_automaton.Deactivate -> + Storage_migrate.pre_deactivate_hook ~dbg ~dp ~sr ~vdi; + Impl.VDI.deactivate context ~dbg ~dp ~sr ~vdi; vdi_t + | Vdi_automaton.Detach -> + Impl.VDI.detach context ~dbg ~dp ~sr ~vdi; + Storage_migrate.post_detach_hook ~sr ~vdi ~dp; + vdi_t + in + Sr.replace vdi new_vdi_t sr_t; + new_vdi_t + with + | Storage_interface.Internal_error("Storage_access.No_VDI") as e + when ( op == Vdi_automaton.Deactivate || op == Vdi_automaton.Detach ) -> + error "Storage_impl: caught exception %s while doing %s . Continuing as if succesful, being optimistic" + (Printexc.to_string e) (Vdi_automaton.string_of_op op); + vdi_t + | e -> + error "Storage_impl: dp:%s sr:%s vdi:%s op:%s error:%s backtrace:%s" dp sr vdi + (Vdi_automaton.string_of_op op) (Printexc.to_string e) (Printexc.get_backtrace ()); + raise e + in + List.fold_left perform_one vdi_t ops + + let perform_nolock context ~dbg ~dp ~sr ~vdi this_op = + match Host.find sr !Host.host with + | None -> raise (Sr_not_attached sr) + | Some sr_t -> + let vdi_t = Opt.default (Vdi.empty ()) (Sr.find vdi sr_t) in + let vdi_t' = + try + (* Compute the overall state ('superstate') of the VDI *) + let superstate = Vdi.superstate vdi_t in + (* We first assume the operation succeeds and compute the new + datapath+VDI state *) + let new_vdi_t = Vdi.perform (Dp.make dp) this_op vdi_t in + (* Compute the new overall state ('superstate') *) + let superstate' = Vdi.superstate new_vdi_t in + (* Compute the real operations which would drive the system from + superstate to superstate'. These may fail: if so we revert the + datapath+VDI state to the most appropriate value. *) + let ops = Vdi_automaton.(-) superstate superstate' in + side_effects context dbg dp sr sr_t vdi vdi_t ops + with e -> + let e = match e with Vdi_automaton.No_operation(a, b) -> Illegal_transition(a,b) | e -> e in + Errors.add dp sr vdi (Printexc.to_string e); + raise e + in + + (* Even if there were no side effects on the underlying VDI, we still need + to update the SR to update this DP's view of the state. + However if nothing changed (e.g. because this was the detach of a DP + which had not attached this VDI) then we won't need to update our on-disk state *) + let vdi_t' = Vdi.perform (Dp.make dp) this_op vdi_t' in + if vdi_t <> vdi_t' then begin + Sr.replace vdi vdi_t' sr_t; + (* If the new VDI state is "detached" then we remove it from the table + altogether *) + debug "dbg:%s dp:%s sr:%s vdi:%s superstate:%s" dbg dp sr vdi (Vdi_automaton.string_of_state (Vdi.superstate vdi_t')); + if Vdi.superstate vdi_t' = Vdi_automaton.Detached + then Sr.remove vdi sr_t; + + (* FH1: Perform the side-effect first: in the case of a failure half-way + through we would rather perform the side-effect twice than never at + all. *) + Everything.to_file !host_state_path (Everything.make ()); + end; + vdi_t' + + (* Attempt to remove a possibly-active datapath associated with [vdi] *) + let destroy_datapath_nolock context ~dbg ~dp ~sr ~vdi ~allow_leak = + match Host.find sr !Host.host with + | None -> raise (Sr_not_attached sr) + | Some sr_t -> + Opt.iter (fun vdi_t -> + let current_state = Vdi.get_dp_state dp vdi_t in + let desired_state = Vdi_automaton.Detached in + let ops = List.map fst (Vdi_automaton.(-) current_state desired_state) in + begin + try + ignore(List.fold_left (fun _ op -> + perform_nolock context ~dbg ~dp ~sr ~vdi op + ) vdi_t ops) + with e -> + if not allow_leak + then (ignore(Vdi.add_leaked dp vdi_t); raise e) + else begin + (* allow_leak means we can forget this dp *) + info "setting dp:%s state to %s, even though operation failed because allow_leak set" dp (Vdi_automaton.string_of_state desired_state); + let vdi_t = Vdi.set_dp_state dp desired_state vdi_t in + + if Vdi.superstate vdi_t = Vdi_automaton.Detached + then Sr.remove vdi sr_t + else Sr.replace vdi vdi_t sr_t; + + Everything.to_file !host_state_path (Everything.make ()); + end + end) (Sr.find vdi sr_t) + + (* Attempt to clear leaked datapaths associed with this vdi *) + let remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi which next = + let dps = match Host.find sr !Host.host with + | None -> [] + | Some sr_t -> + begin match Sr.find vdi sr_t with + | Some vdi_t -> + List.filter (which vdi_t) (Vdi.dps vdi_t) + | None -> [] + end in + let failures = List.fold_left (fun acc dp -> + info "Attempting to destroy datapath dp:%s sr:%s vdi:%s" dp sr vdi; + try + destroy_datapath_nolock context ~dbg ~dp ~sr ~vdi ~allow_leak:false; + acc + with e -> e :: acc + ) [] dps in + match failures with + | [] -> next () + | f :: fs -> raise f + + let epoch_begin context ~dbg ~sr ~vdi ~persistent = + info "VDI.epoch_begin dbg:%s sr:%s vdi:%s persistent:%b" dbg sr vdi persistent; + with_vdi sr vdi + (fun () -> + remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi Vdi.leaked + (fun () -> + Impl.VDI.epoch_begin context ~dbg ~sr ~vdi ~persistent + )) + + let attach context ~dbg ~dp ~sr ~vdi ~read_write = + info "VDI.attach dbg:%s dp:%s sr:%s vdi:%s read_write:%b" dbg dp sr vdi read_write; + with_vdi sr vdi + (fun () -> + remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi Vdi.leaked + (fun () -> + let state = perform_nolock context ~dbg ~dp ~sr ~vdi + (Vdi_automaton.Attach (if read_write then Vdi_automaton.RW else Vdi_automaton.RO)) in + Opt.unbox state.Vdi.attach_info + )) + + let activate context ~dbg ~dp ~sr ~vdi = + info "VDI.activate dbg:%s dp:%s sr:%s vdi:%s" dbg dp sr vdi; + with_vdi sr vdi + (fun () -> + remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi Vdi.leaked + (fun () -> + ignore(perform_nolock context ~dbg ~dp ~sr ~vdi Vdi_automaton.Activate))) + + let deactivate context ~dbg ~dp ~sr ~vdi = + info "VDI.deactivate dbg:%s dp:%s sr:%s vdi:%s" dbg dp sr vdi; + with_vdi sr vdi + (fun () -> + remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi Vdi.leaked + (fun () -> + ignore (perform_nolock context ~dbg ~dp ~sr ~vdi Vdi_automaton.Deactivate))) + + let detach context ~dbg ~dp ~sr ~vdi = + info "VDI.detach dbg:%s dp:%s sr:%s vdi:%s" dbg dp sr vdi; + with_vdi sr vdi + (fun () -> + remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi Vdi.leaked + (fun () -> + ignore (perform_nolock context ~dbg ~dp ~sr ~vdi Vdi_automaton.Detach))) + + let epoch_end context ~dbg ~sr ~vdi = + info "VDI.epoch_end dbg:%s sr:%s vdi:%s" dbg sr vdi; + with_vdi sr vdi + (fun () -> + remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi Vdi.leaked + (fun () -> + Impl.VDI.epoch_end context ~dbg ~sr ~vdi + )) + + let create context ~dbg ~sr ~vdi_info = + info "VDI.create dbg:%s sr:%s vdi_info:%s" dbg sr (string_of_vdi_info vdi_info); + let result = Impl.VDI.create context ~dbg ~sr ~vdi_info in + match result with + | { virtual_size = virtual_size' } when virtual_size' < vdi_info.virtual_size -> + error "VDI.create dbg:%s created a smaller VDI (%Ld)" dbg virtual_size'; + raise (Backend_error("SR_BACKEND_FAILURE", ["Disk too small"; Int64.to_string vdi_info.virtual_size; Int64.to_string virtual_size'])) + | result -> result + + let snapshot_and_clone call_name call_f context ~dbg ~sr ~vdi_info = + info "%s dbg:%s sr:%s vdi_info:%s" call_name dbg sr (string_of_vdi_info vdi_info); + with_vdi sr vdi_info.vdi + (fun () -> + call_f context ~dbg ~sr ~vdi_info + ) + + let snapshot = snapshot_and_clone "VDI.snapshot" Impl.VDI.snapshot + let clone = snapshot_and_clone "VDI.clone" Impl.VDI.clone + + let set_name_label context ~dbg ~sr ~vdi ~new_name_label = + info "VDI.set_name_label dbg:%s sr:%s vdi:%s new_name_label:%s" dbg sr vdi new_name_label; + with_vdi sr vdi + (fun () -> + Impl.VDI.set_name_label context ~dbg ~sr ~vdi ~new_name_label + ) + + let set_name_description context ~dbg ~sr ~vdi ~new_name_description = + info "VDI.set_name_description dbg:%s sr:%s vdi:%s new_name_description:%s" dbg sr vdi new_name_description; + with_vdi sr vdi + (fun () -> + Impl.VDI.set_name_description context ~dbg ~sr ~vdi ~new_name_description + ) + + let resize context ~dbg ~sr ~vdi ~new_size = + info "VDI.resize dbg:%s sr:%s vdi:%s new_size:%Ld" dbg sr vdi new_size; + with_vdi sr vdi + (fun () -> + Impl.VDI.resize context ~dbg ~sr ~vdi ~new_size + ) + + let destroy context ~dbg ~sr ~vdi = + info "VDI.destroy dbg:%s sr:%s vdi:%s" dbg sr vdi; + with_vdi sr vdi + (fun () -> + remove_datapaths_andthen_nolock context ~dbg ~sr ~vdi Vdi.all + (fun () -> + Impl.VDI.destroy context ~dbg ~sr ~vdi + ) + ) + + let stat context ~dbg ~sr ~vdi = + info "VDI.stat dbg:%s sr:%s vdi:%s" dbg sr vdi; + Impl.VDI.stat context ~dbg ~sr ~vdi + + let introduce context ~dbg ~sr ~uuid ~sm_config ~location = + info "VDI.introduce dbg:%s sr:%s uuid:%s sm_config:%s location:%s" dbg sr uuid (String.concat ", " (List.map (fun (k, v) -> k ^ ":" ^ v) sm_config)) location; + Impl.VDI.introduce context ~dbg ~sr ~uuid ~sm_config ~location + + let set_persistent context ~dbg ~sr ~vdi ~persistent = + info "VDI.set_persistent dbg:%s sr:%s vdi:%s persistent:%b" dbg sr vdi persistent; + with_vdi sr vdi + (fun () -> + Impl.VDI.set_persistent context ~dbg ~sr ~vdi ~persistent + ) + + let get_by_name context ~dbg ~sr ~name = + info "VDI.get_by_name dbg:%s sr:%s name:%s" dbg sr name; + Impl.VDI.get_by_name context ~dbg ~sr ~name + + let set_content_id context ~dbg ~sr ~vdi ~content_id = + info "VDI.set_content_id dbg:%s sr:%s vdi:%s content_id:%s" dbg sr vdi content_id; + Impl.VDI.set_content_id context ~dbg ~sr ~vdi ~content_id + + let similar_content context ~dbg ~sr ~vdi = + info "VDI.similar_content dbg:%s sr:%s vdi:%s" dbg sr vdi; + Impl.VDI.similar_content context ~dbg ~sr ~vdi + + let compose context ~dbg ~sr ~vdi1 ~vdi2 = + info "VDI.compose dbg:%s sr:%s vdi1:%s vdi2:%s" dbg sr vdi1 vdi2; + Impl.VDI.compose context ~dbg ~sr ~vdi1 ~vdi2 + + let add_to_sm_config context ~dbg ~sr ~vdi ~key ~value = + info "VDI.add_to_other_config dbg:%s sr:%s vdi:%s key:%s valu:%s" dbg sr vdi key value; + Impl.VDI.add_to_sm_config context ~dbg ~sr ~vdi ~key ~value + + let remove_from_sm_config context ~dbg ~sr ~vdi ~key = + info "VDI.remove_from_sm_config dbg:%s sr:%s vdi:%s key:%s" dbg sr vdi key; + Impl.VDI.remove_from_sm_config context ~dbg ~sr ~vdi ~key + + let get_url context ~dbg ~sr ~vdi = + info "VDI.get_url dbg:%s sr:%s vdi:%s" dbg sr vdi; + Impl.VDI.get_url context ~dbg ~sr ~vdi + + end + + let get_by_name context ~dbg ~name = + debug "get_by_name dbg:%s name:%s" dbg name; + Impl.get_by_name context ~dbg ~name + + module DATA = struct + let copy_into context ~dbg ~sr ~vdi ~url ~dest = + info "DATA.copy_into dbg:%s sr:%s vdi:%s url:%s dest:%s" dbg sr vdi url dest; + Impl.DATA.copy_into context ~dbg ~sr ~vdi ~url ~dest + + let copy context ~dbg ~sr ~vdi ~dp ~url ~dest = + info "DATA.copy dbg:%s sr:%s vdi:%s url:%s dest:%s" dbg sr vdi url dest; + Impl.DATA.copy context ~dbg ~sr ~vdi ~dp ~url ~dest + + module MIRROR = struct + let start context ~dbg ~sr ~vdi ~dp ~url ~dest = + info "DATA.MIRROR.start dbg:%s sr:%s vdi:%s url:%s dest:%s" dbg sr vdi url dest; + Impl.DATA.MIRROR.start context ~dbg ~sr ~vdi ~dp ~url ~dest + + let stop context ~dbg ~id = + info "DATA.MIRROR.stop dbg:%s id:%s" dbg id; + Impl.DATA.MIRROR.stop context ~dbg ~id + + let list context ~dbg = + info "DATA.MIRROR.active dbg:%s" dbg; + Impl.DATA.MIRROR.list context ~dbg + + let stat context ~dbg ~id = + info "DATA.MIRROR.stat dbg:%s id:%s" dbg id; + Impl.DATA.MIRROR.stat context ~dbg ~id + + let receive_start context ~dbg ~sr ~vdi_info ~id ~similar = + info "DATA.MIRROR.receive_start dbg:%s sr:%s id:%s similar:[%s]" + dbg sr id (String.concat "," similar); + Impl.DATA.MIRROR.receive_start context ~dbg ~sr ~vdi_info ~id ~similar + + let receive_finalize context ~dbg ~id = + info "DATA.MIRROR.receive_finalize dbg:%s id:%s" dbg id; + Impl.DATA.MIRROR.receive_finalize context ~dbg ~id + + let receive_cancel context ~dbg ~id = + info "DATA.MIRROR.receive_cancel dbg:%s id:%s" dbg id; + Impl.DATA.MIRROR.receive_cancel context ~dbg ~id + + end + + end + + module DP = struct + let create context ~dbg ~id = id + + (** [destroy_sr context dp sr allow_leak vdi_already_locked] attempts to free + the resources associated with [dp] in [sr]. If [vdi_already_locked] then + it is assumed that all VDIs are already locked. *) + let destroy_sr context ~dbg ~dp ~sr ~sr_t ~allow_leak vdi_already_locked = + (* Every VDI in use by this session should be detached and deactivated *) + let vdis = Sr.list sr_t in + List.fold_left (fun acc (vdi, vdi_t) -> + let locker = + if vdi_already_locked + then fun f -> f () + else VDI.with_vdi sr vdi in + locker + (fun () -> + try + VDI.destroy_datapath_nolock context ~dbg ~dp ~sr ~vdi ~allow_leak; acc + with e -> e::acc + )) [] vdis + + + let destroy context ~dbg ~dp ~allow_leak = + info "DP.destroy dbg:%s dp:%s allow_leak:%b" dbg dp allow_leak; + let failures = List.fold_left (fun acc (sr, sr_t) -> acc @ (destroy_sr context ~dbg ~dp ~sr ~sr_t ~allow_leak false)) [] (Host.list !Host.host) in + match failures, allow_leak with + | [], _ -> () + | f :: _, false -> + error "Leaked datapath: dp: %s" dp; + raise f + | _ :: _, true -> + info "Forgetting leaked datapath: dp: %s" dp; + () + + let diagnostics context () = + let srs = Host.list !Host.host in + let of_sr (sr, sr_t) = + let title = Printf.sprintf "SR %s" sr in + title :: (List.map indent (Sr.to_string_list sr_t)) in + let srs = List.concat (List.map of_sr srs) in + let errors = List.map Errors.to_string (Errors.list ()) in + let errors = (if errors <> [] then "The following errors have been logged:" else "No errors have been logged.") :: errors in + let lines = [ "The following SRs are attached:" ] @ (List.map indent srs) @ [ "" ] @ errors in + String.concat "" (List.map (fun x -> x ^ "\n") lines) + + let attach_info context ~dbg ~sr ~vdi ~dp = + let srs = Host.list !Host.host in + let sr_state = List.assoc sr srs in + let vdi_state = Hashtbl.find sr_state.Sr.vdis vdi in + let dp_state = Vdi.get_dp_state dp vdi_state in + debug "Looking for dp: %s" dp; + match dp_state,vdi_state.Vdi.attach_info with + | Vdi_automaton.Activated _, Some attach_info -> + attach_info + | _ -> + raise (Internal_error (Printf.sprintf "sr: %s vdi: %s Datapath %s not attached" sr vdi dp)) + + + let stat_vdi context ~dbg ~sr ~vdi () = + info "DP.stat_vdi dbg:%s sr:%s vdi:%s" dbg sr vdi; + VDI.with_vdi sr vdi + (fun () -> + match Host.find sr !Host.host with + | None -> raise (Sr_not_attached sr) + | Some sr_t -> + let vdi_t = Opt.default (Vdi.empty ()) (Sr.find vdi sr_t) in + { + superstate = Vdi.superstate vdi_t; + dps = List.map (fun dp -> dp, Vdi.get_dp_state dp vdi_t) (Vdi.dps vdi_t) + } + ) + + end + + module SR = struct + include Storage_skeleton.SR + let locks : (string, unit) Storage_locks.t = Storage_locks.make () + let with_sr sr f = Storage_locks.with_instance_lock locks sr f + + let probe context ~dbg ~queue ~device_config ~sm_config = + Impl.SR.probe context ~dbg ~queue ~device_config ~sm_config + + let list context ~dbg = + List.map fst (Host.list !Host.host) + + let stat context ~dbg ~sr = + info "SR.stat dbg:%s sr:%s" dbg sr; + with_sr sr + (fun () -> + match Host.find sr !Host.host with + | None -> raise (Sr_not_attached sr) + | Some _ -> + Impl.SR.stat context ~dbg ~sr + ) + + let scan context ~dbg ~sr = + info "SR.scan dbg:%s sr:%s" dbg sr; + with_sr sr + (fun () -> + match Host.find sr !Host.host with + | None -> raise (Sr_not_attached sr) + | Some _ -> + Impl.SR.scan context ~dbg ~sr + ) + + let create context ~dbg ~sr ~name_label ~name_description ~device_config ~physical_size = + with_sr sr + (fun () -> + match Host.find sr !Host.host with + | None -> + Impl.SR.create context ~dbg ~sr ~name_label ~name_description ~device_config ~physical_size + | Some _ -> + error "SR %s is already attached" sr; + raise (Sr_attached sr) + ) + + let set_name_label context ~dbg ~sr ~new_name_label = + info "SR.set_name_label dbg:%s sr:%s new_name_label:%s" dbg sr new_name_label; + Impl.SR.set_name_label context ~dbg ~sr ~new_name_label + + let set_name_description context ~dbg ~sr ~new_name_description = + info "SR.set_name_description dbg:%s sr:%s new_name_description:%s" dbg sr new_name_description; + Impl.SR.set_name_description context ~dbg ~sr ~new_name_description + + let attach context ~dbg ~sr ~device_config = + let censor_key = ["password"] in + let device_config_str = String.concat "; " (List.map (fun (k, v) -> + let v' = (if List.exists (Xstringext.String.has_substr k) censor_key then "(omitted)" else v) in + (k ^ ":" ^ v')) device_config) + in + info "SR.attach dbg:%s sr:%s device_config:[%s]" dbg sr device_config_str; + with_sr sr + (fun () -> + match Host.find sr !Host.host with + | None -> + Impl.SR.attach context ~dbg ~sr ~device_config; + Host.replace sr (Sr.empty ()) !Host.host; + (* FH1: Perform the side-effect first: in the case of a + failure half-way through we would rather perform the + side-effect twice than never at all. *) + Everything.to_file !host_state_path (Everything.make ()) + | Some _ -> + (* Operation is idempotent *) + () + ) + + let detach_destroy_common context ~dbg ~sr f = + let active_dps sr_t = + (* Enumerate all active datapaths *) + List.concat (List.map (fun (_, vdi_t) -> Vdi.dps vdi_t) (Sr.list sr_t)) in + + with_sr sr + (fun () -> + match Host.find sr !Host.host with + | None -> raise (Sr_not_attached sr) + | Some sr_t -> + VDI.with_all_vdis sr + (fun () -> + let dps = active_dps sr_t in + List.iter + (fun dp -> + let ( _ : exn list) = DP.destroy_sr context ~dbg ~dp ~sr ~sr_t ~allow_leak:false true in () + ) dps; + let dps = active_dps sr_t in + if dps <> [] + then error "The following datapaths have leaked: %s" (String.concat "; " dps); + f context ~dbg ~sr; + Host.remove sr !Host.host; + Everything.to_file !host_state_path (Everything.make ()); + VDI.locks_remove sr + ) + ) + + let detach context ~dbg ~sr = + info "SR.detach dbg:%s sr:%s" dbg sr; + detach_destroy_common context ~dbg ~sr Impl.SR.detach + + let reset context ~dbg ~sr = + info "SR.reset dbg:%s sr:%s" dbg sr; + with_sr sr + (fun () -> + Host.remove sr !Host.host; + Everything.to_file !host_state_path (Everything.make ()); + VDI.locks_remove sr + ) + + let destroy context ~dbg ~sr = + info "SR.destroy dbg:%s sr:%s" dbg sr; + detach_destroy_common context ~dbg ~sr Impl.SR.destroy + + let update_snapshot_info_src context ~dbg ~sr ~vdi ~url + ~dest ~dest_vdi ~snapshot_pairs= + info + "SR.update_snapshot_info_src dbg:%s sr:%s vdi:%s url:%s dest:%s dest_vdi:%s snapshot_pairs:%s" + dbg sr vdi url dest dest_vdi + (List.map + (fun (local_snapshot, dest_snapshot) -> + Printf.sprintf "local:%s, dest:%s" local_snapshot dest_snapshot) + snapshot_pairs + |> String.concat "; " + |> Printf.sprintf "[%s]"); + Impl.SR.update_snapshot_info_src context ~dbg ~sr ~vdi ~url + ~dest ~dest_vdi ~snapshot_pairs + + let update_snapshot_info_dest context ~dbg ~sr ~vdi ~src_vdi ~snapshot_pairs = + info + "SR.update_snapshot_info_dest dbg:%s sr:%s vdi:%s ~src_vdi:%s snapshot_pairs:%s" + dbg sr vdi src_vdi.vdi + (List.map + (fun (local_snapshot, src_snapshot_info) -> + Printf.sprintf "local:%s, src:%s" local_snapshot src_snapshot_info.vdi) + snapshot_pairs + |> String.concat "; " + |> Printf.sprintf "[%s]"); + Impl.SR.update_snapshot_info_dest context ~dbg ~sr ~vdi + ~src_vdi ~snapshot_pairs + end + + module Policy = struct + let get_backend_vm context ~dbg ~vm ~sr ~vdi = + Impl.Policy.get_backend_vm context ~dbg ~vm ~sr ~vdi + end + + module TASK = struct + open Storage_task + let t x = { + Task.id = x.id; + debug_info = x.dbg; + ctime = x.ctime; + state = x.state; + subtasks = x.subtasks; + } + + let cancel _ ~dbg ~task = + Storage_task.cancel tasks task + let stat' task = + Mutex.execute tasks.m + (fun () -> + find_locked tasks task |> t + ) + let stat _ ~dbg ~task = stat' task + let destroy' ~task = + destroy tasks task; + Updates.remove (Dynamic.Task task) updates + let destroy _ ~dbg ~task = destroy' ~task + let list _ ~dbg = list tasks |> List.map t + end + + module UPDATES = struct + let get _ ~dbg ~from ~timeout = + let from = try Some (int_of_string from) with _ -> None in + let _, ids, next = Updates.get dbg from timeout updates in + (ids, string_of_int next) + end end let initialise () = - Unixext.mkdir_rec (Filename.dirname !host_state_path) 0o700; - if Sys.file_exists !host_state_path then begin - info "Loading storage state from: %s" !host_state_path; - try - let state = Everything.of_file !host_state_path in - Everything.set state - with e -> - error "Failed to load storage state from: %s; creating blank database (error: %s)" !host_state_path (Printexc.to_string e) - end else info "No storage state is persisted in %s; creating blank database" !host_state_path + Unixext.mkdir_rec (Filename.dirname !host_state_path) 0o700; + if Sys.file_exists !host_state_path then begin + info "Loading storage state from: %s" !host_state_path; + try + let state = Everything.of_file !host_state_path in + Everything.set state + with e -> + error "Failed to load storage state from: %s; creating blank database (error: %s)" !host_state_path (Printexc.to_string e) + end else info "No storage state is persisted in %s; creating blank database" !host_state_path module Local_domain_socket = struct - let path = Filename.concat "/var/lib/xcp" "storage" - - let xmlrpc_handler process req bio _ = - let body = Http_svr.read_body req bio in - let s = Buf_io.fd_of bio in - let rpc = Xmlrpc.call_of_string body in - (* Printf.fprintf stderr "Request: %s %s\n%!" rpc.Rpc.name (Rpc.to_string (List.hd rpc.Rpc.params)); *) - let result = process (Some req.Http.Request.uri) rpc in - (* Printf.fprintf stderr "Response: %s\n%!" (Rpc.to_string result.Rpc.contents); *) - let str = Xmlrpc.string_of_response result in - Http_svr.response_str req s str + let path = Filename.concat "/var/lib/xcp" "storage" + + let xmlrpc_handler process req bio _ = + let body = Http_svr.read_body req bio in + let s = Buf_io.fd_of bio in + let rpc = Xmlrpc.call_of_string body in + (* Printf.fprintf stderr "Request: %s %s\n%!" rpc.Rpc.name (Rpc.to_string (List.hd rpc.Rpc.params)); *) + let result = process (Some req.Http.Request.uri) rpc in + (* Printf.fprintf stderr "Response: %s\n%!" (Rpc.to_string result.Rpc.contents); *) + let str = Xmlrpc.string_of_response result in + Http_svr.response_str req s str end open Xmlrpc_client let local_url = Http.Url.(File { path = Filename.concat "/var/lib/xcp" "storage" }, { uri = "/"; query_params = [] }) let rpc ~srcstr ~dststr url call = - XMLRPC_protocol.rpc ~transport:(transport_of_url url) - ~srcstr ~dststr ~http:(xmlrpc ~version:"1.0" ?auth:(Http.Url.auth_of url) ~query:(Http.Url.get_query_params url) (Http.Url.get_uri url)) call + XMLRPC_protocol.rpc ~transport:(transport_of_url url) + ~srcstr ~dststr ~http:(xmlrpc ~version:"1.0" ?auth:(Http.Url.auth_of url) ~query:(Http.Url.get_query_params url) (Http.Url.get_uri url)) call module Local = Client(struct let rpc = rpc ~srcstr:"smapiv2" ~dststr:"smapiv2" local_url end) diff --git a/ocaml/xapi/storage_impl_test.ml b/ocaml/xapi/storage_impl_test.ml index 36f5c4e17da..eb126ba729e 100644 --- a/ocaml/xapi/storage_impl_test.ml +++ b/ocaml/xapi/storage_impl_test.ml @@ -13,7 +13,7 @@ *) (** * @group Storage - *) +*) open Stdext open Pervasiveext @@ -28,215 +28,215 @@ let error = Storage_impl.error let info = Storage_impl.info let inc_errors () = - Mutex.execute total_errors_m - (fun () -> - incr total_errors; - ) + Mutex.execute total_errors_m + (fun () -> + incr total_errors; + ) exception Api_error of string * (string list) module Debug_print_impl = struct - type context = Smint.request - module Query = struct - let query context ~dbg = assert false - let diagnostics context ~dbg = assert false - end - module DP = struct - let create context ~dbg ~id = assert false - let destroy context ~dbg ~dp = assert false - let diagnostics context () = assert false - let attach_info context ~dbg ~sr ~vdi ~dp = assert false - let stat_vdi context ~dbg ~sr ~vdi () = assert false - end - module VDI = struct - let m = Mutex.create () - let attached = Hashtbl.create 10 - let activated = Hashtbl.create 10 - let created = Hashtbl.create 10 - let key_of sr vdi = Printf.sprintf "%s/%s" sr vdi - - let create context ~dbg ~sr ~vdi_info = - let vdi = "newvdi" in - let info = - if List.mem_assoc "toosmall" vdi_info.sm_config - then { vdi_info with virtual_size = Int64.sub vdi_info.virtual_size 1L } - else vdi_info in - Mutex.execute m - (fun () -> - let key = key_of sr vdi in - Hashtbl.replace created key info - ); - info - - let set_name_label context ~dbg ~sr ~vdi ~new_name_label = () - let set_name_description context ~dbg ~sr ~vdi ~new_name_description = () - - let snapshot context ~dbg ~sr ~vdi_info = - create context ~dbg ~sr ~vdi_info - let clone = snapshot - - let destroy context ~dbg ~sr ~vdi = - Mutex.execute m - (fun () -> - let key = key_of sr vdi in - if not(Hashtbl.mem created key) - then raise (Backend_error("ENOENT", [ sr; vdi ])) - else if Hashtbl.mem activated key - then raise (Backend_error("Still activated", [ sr; vdi])) - else if Hashtbl.mem attached key - then raise (Backend_error("Still attached", [ sr; vdi])) - else begin - Hashtbl.remove created key - end - ) - - let epoch_begin context ~dbg ~sr ~vdi ~persistent = () - - let stat context ~dbg ~sr ~vdi = assert false - - let introduce context ~dbg ~sr ~uuid ~sm_config ~location = assert false - - let set_persistent context ~dbg ~sr ~vdi ~persistent = () - - let attach context ~dbg ~dp ~sr ~vdi ~read_write = - info "VDI.attach dp:%s sr:%s vdi:%s read_write:%b" dp sr vdi read_write; - if dp = "error" - then raise (Api_error("SR_BACKEND_FAILURE_test", [ "this"; "is"; "an"; "example" ])); - if dp = "error2" - then raise (Backend_error ("SR_BACKEND_FAILURE_test", [ "this"; "is"; "an"; "example" ])) - else begin - Mutex.execute m - (fun () -> - let key = key_of sr vdi in - if Hashtbl.mem attached key then begin - inc_errors (); - error "VDI.attach dp:%s sr:%s vdi:%s : double attach" dp sr vdi; - failwith "double attach" - end else Hashtbl.replace attached key ()); - { params="XXX"; o_direct=true; o_direct_reason=""; xenstore_data=[] } - end - let activate context ~dbg ~dp ~sr ~vdi = - Mutex.execute m - (fun () -> - let key = key_of sr vdi in - if Hashtbl.mem activated key then begin - inc_errors (); - error "VDI.detach dp:%s sr:%s vdi:%s : double activate" dp sr vdi; - failwith "double activate" - end else Hashtbl.replace activated key ()); - info "VDI.activate dp:%s sr:%s vdi:%s" dp sr vdi - - let working = ref false - - let epoch_end context ~dbg ~sr ~vdi = () - - let detach context ~dbg ~dp ~sr ~vdi = - if vdi = "error" && not(!working) - then raise (Api_error("SR_BACKEND_FAILURE_test", [ "this"; "is"; "an"; "example" ])); - if vdi = "error2" && not(!working) - then raise (Backend_error ("SR_BACKEND_FAILURE_test", [ "this"; "is"; "an"; "example" ])) - else begin - Mutex.execute m - (fun () -> - let key = key_of sr vdi in - if not (Hashtbl.mem attached key) then begin - inc_errors (); - error "VDI.detach dp:%s sr:%s vdi:%s : double detach" dp sr vdi; - failwith "double detach" - end else Hashtbl.remove attached key); - info "VDI.detach dp:%s sr:%s vdi:%s" dp sr vdi - end - let deactivate context ~dbg ~dp ~sr ~vdi = - Mutex.execute m - (fun () -> - let key = key_of sr vdi in - if not (Hashtbl.mem activated key) then begin - inc_errors (); - error "VDI.deactivate dp:%s sr:%s vdi:%s : double deactivate" dp sr vdi; - failwith "double deactivate" - end else Hashtbl.remove activated key); - info "VDI.deactivate dp:%s sr:%s vdi:%s" dp sr vdi - - let resize context ~dbg ~sr ~vdi ~new_size = assert false - - let get_url context ~dbg ~sr ~vdi = assert false - let compose context ~dbg ~sr ~vdi1 ~vdi2 = assert false - let add_to_sm_config context ~dbg ~sr ~vdi ~key ~value = assert false - let remove_from_sm_config context ~dbg ~sr ~vdi ~key = assert false - let set_content_id context ~dbg ~sr ~vdi ~content_id = assert false - let get_by_name context ~dbg ~sr ~name = assert false - let similar_content context ~dbg ~sr ~vdi = assert false - - - end - - let get_by_name context ~dbg ~name = assert false - - module DATA = struct - let copy context ~dbg ~sr ~vdi ~dp ~url ~dest = assert false - let copy_into context ~dbg ~sr ~vdi ~url ~dest = assert false - module MIRROR = struct - let start context ~dbg ~sr ~vdi ~dp ~url ~dest = assert false - let stop context ~dbg ~id = assert false - let list context ~dbg = assert false - let stat context ~dbg ~id = assert false - let receive_start context ~dbg ~sr ~vdi_info ~id ~similar = assert false - let receive_finalize context ~dbg ~id = assert false - let receive_cancel context ~dbg ~id = assert false - end - end - - module SR = struct - include Storage_skeleton.SR - let list context ~dbg = assert false - let scan context ~dbg ~sr = assert false - let create context ~dbg ~sr ~name_label ~name_description ~device_config ~physical_size = assert false - let attach context ~dbg ~sr ~device_config = - info "SR.attach sr:%s" sr - let fail_if_anything_leaked () = - Mutex.execute VDI.m - (fun () -> - Hashtbl.iter - (fun k _ -> - error "leaked attach: %s" k; - inc_errors (); - ) VDI.attached; - Hashtbl.iter - (fun k _ -> - error "leaked activate: %s" k; - inc_errors (); - ) VDI.activated - ) - let detach context ~dbg ~sr = - info "SR.detach sr:%s" sr; - fail_if_anything_leaked () - let reset context ~dbg ~sr = assert false - let stat context ~dbg ~sr = assert false - let destroy context ~dbg ~sr = - info "SR.destroy sr:%s" sr; - fail_if_anything_leaked () - let update_snapshot_info_src context ~dbg ~sr ~vdi - ~url ~dest ~dest_vdi ~snapshot_pairs = - assert false - let update_snapshot_info_dest context ~dbg ~sr ~vdi - ~src_vdi ~snapshot_pairs = - assert false - end - - module Policy = struct - let get_backend_vm context ~dbg ~vm ~sr ~vdi = assert false - end - - module TASK = struct - let stat context ~dbg ~task = assert false - let destroy context ~dbg ~task = assert false - let cancel context ~dbg ~task = assert false - let list context ~dbg = assert false - end - - module UPDATES = struct - let get context ~dbg ~from ~timeout = assert false - end + type context = Smint.request + module Query = struct + let query context ~dbg = assert false + let diagnostics context ~dbg = assert false + end + module DP = struct + let create context ~dbg ~id = assert false + let destroy context ~dbg ~dp = assert false + let diagnostics context () = assert false + let attach_info context ~dbg ~sr ~vdi ~dp = assert false + let stat_vdi context ~dbg ~sr ~vdi () = assert false + end + module VDI = struct + let m = Mutex.create () + let attached = Hashtbl.create 10 + let activated = Hashtbl.create 10 + let created = Hashtbl.create 10 + let key_of sr vdi = Printf.sprintf "%s/%s" sr vdi + + let create context ~dbg ~sr ~vdi_info = + let vdi = "newvdi" in + let info = + if List.mem_assoc "toosmall" vdi_info.sm_config + then { vdi_info with virtual_size = Int64.sub vdi_info.virtual_size 1L } + else vdi_info in + Mutex.execute m + (fun () -> + let key = key_of sr vdi in + Hashtbl.replace created key info + ); + info + + let set_name_label context ~dbg ~sr ~vdi ~new_name_label = () + let set_name_description context ~dbg ~sr ~vdi ~new_name_description = () + + let snapshot context ~dbg ~sr ~vdi_info = + create context ~dbg ~sr ~vdi_info + let clone = snapshot + + let destroy context ~dbg ~sr ~vdi = + Mutex.execute m + (fun () -> + let key = key_of sr vdi in + if not(Hashtbl.mem created key) + then raise (Backend_error("ENOENT", [ sr; vdi ])) + else if Hashtbl.mem activated key + then raise (Backend_error("Still activated", [ sr; vdi])) + else if Hashtbl.mem attached key + then raise (Backend_error("Still attached", [ sr; vdi])) + else begin + Hashtbl.remove created key + end + ) + + let epoch_begin context ~dbg ~sr ~vdi ~persistent = () + + let stat context ~dbg ~sr ~vdi = assert false + + let introduce context ~dbg ~sr ~uuid ~sm_config ~location = assert false + + let set_persistent context ~dbg ~sr ~vdi ~persistent = () + + let attach context ~dbg ~dp ~sr ~vdi ~read_write = + info "VDI.attach dp:%s sr:%s vdi:%s read_write:%b" dp sr vdi read_write; + if dp = "error" + then raise (Api_error("SR_BACKEND_FAILURE_test", [ "this"; "is"; "an"; "example" ])); + if dp = "error2" + then raise (Backend_error ("SR_BACKEND_FAILURE_test", [ "this"; "is"; "an"; "example" ])) + else begin + Mutex.execute m + (fun () -> + let key = key_of sr vdi in + if Hashtbl.mem attached key then begin + inc_errors (); + error "VDI.attach dp:%s sr:%s vdi:%s : double attach" dp sr vdi; + failwith "double attach" + end else Hashtbl.replace attached key ()); + { params="XXX"; o_direct=true; o_direct_reason=""; xenstore_data=[] } + end + let activate context ~dbg ~dp ~sr ~vdi = + Mutex.execute m + (fun () -> + let key = key_of sr vdi in + if Hashtbl.mem activated key then begin + inc_errors (); + error "VDI.detach dp:%s sr:%s vdi:%s : double activate" dp sr vdi; + failwith "double activate" + end else Hashtbl.replace activated key ()); + info "VDI.activate dp:%s sr:%s vdi:%s" dp sr vdi + + let working = ref false + + let epoch_end context ~dbg ~sr ~vdi = () + + let detach context ~dbg ~dp ~sr ~vdi = + if vdi = "error" && not(!working) + then raise (Api_error("SR_BACKEND_FAILURE_test", [ "this"; "is"; "an"; "example" ])); + if vdi = "error2" && not(!working) + then raise (Backend_error ("SR_BACKEND_FAILURE_test", [ "this"; "is"; "an"; "example" ])) + else begin + Mutex.execute m + (fun () -> + let key = key_of sr vdi in + if not (Hashtbl.mem attached key) then begin + inc_errors (); + error "VDI.detach dp:%s sr:%s vdi:%s : double detach" dp sr vdi; + failwith "double detach" + end else Hashtbl.remove attached key); + info "VDI.detach dp:%s sr:%s vdi:%s" dp sr vdi + end + let deactivate context ~dbg ~dp ~sr ~vdi = + Mutex.execute m + (fun () -> + let key = key_of sr vdi in + if not (Hashtbl.mem activated key) then begin + inc_errors (); + error "VDI.deactivate dp:%s sr:%s vdi:%s : double deactivate" dp sr vdi; + failwith "double deactivate" + end else Hashtbl.remove activated key); + info "VDI.deactivate dp:%s sr:%s vdi:%s" dp sr vdi + + let resize context ~dbg ~sr ~vdi ~new_size = assert false + + let get_url context ~dbg ~sr ~vdi = assert false + let compose context ~dbg ~sr ~vdi1 ~vdi2 = assert false + let add_to_sm_config context ~dbg ~sr ~vdi ~key ~value = assert false + let remove_from_sm_config context ~dbg ~sr ~vdi ~key = assert false + let set_content_id context ~dbg ~sr ~vdi ~content_id = assert false + let get_by_name context ~dbg ~sr ~name = assert false + let similar_content context ~dbg ~sr ~vdi = assert false + + + end + + let get_by_name context ~dbg ~name = assert false + + module DATA = struct + let copy context ~dbg ~sr ~vdi ~dp ~url ~dest = assert false + let copy_into context ~dbg ~sr ~vdi ~url ~dest = assert false + module MIRROR = struct + let start context ~dbg ~sr ~vdi ~dp ~url ~dest = assert false + let stop context ~dbg ~id = assert false + let list context ~dbg = assert false + let stat context ~dbg ~id = assert false + let receive_start context ~dbg ~sr ~vdi_info ~id ~similar = assert false + let receive_finalize context ~dbg ~id = assert false + let receive_cancel context ~dbg ~id = assert false + end + end + + module SR = struct + include Storage_skeleton.SR + let list context ~dbg = assert false + let scan context ~dbg ~sr = assert false + let create context ~dbg ~sr ~name_label ~name_description ~device_config ~physical_size = assert false + let attach context ~dbg ~sr ~device_config = + info "SR.attach sr:%s" sr + let fail_if_anything_leaked () = + Mutex.execute VDI.m + (fun () -> + Hashtbl.iter + (fun k _ -> + error "leaked attach: %s" k; + inc_errors (); + ) VDI.attached; + Hashtbl.iter + (fun k _ -> + error "leaked activate: %s" k; + inc_errors (); + ) VDI.activated + ) + let detach context ~dbg ~sr = + info "SR.detach sr:%s" sr; + fail_if_anything_leaked () + let reset context ~dbg ~sr = assert false + let stat context ~dbg ~sr = assert false + let destroy context ~dbg ~sr = + info "SR.destroy sr:%s" sr; + fail_if_anything_leaked () + let update_snapshot_info_src context ~dbg ~sr ~vdi + ~url ~dest ~dest_vdi ~snapshot_pairs = + assert false + let update_snapshot_info_dest context ~dbg ~sr ~vdi + ~src_vdi ~snapshot_pairs = + assert false + end + + module Policy = struct + let get_backend_vm context ~dbg ~vm ~sr ~vdi = assert false + end + + module TASK = struct + let stat context ~dbg ~task = assert false + let destroy context ~dbg ~task = assert false + let cancel context ~dbg ~task = assert false + let list context ~dbg = assert false + end + + module UPDATES = struct + let get context ~dbg ~from ~timeout = assert false + end end @@ -244,9 +244,9 @@ module Server=Server(Storage_impl.Wrapper(Debug_print_impl)) let path = "/tmp/storage" -let rpc_unix call = - let open Xmlrpc_client in - XMLRPC_protocol.rpc ~transport:(Unix path) ~http:(xmlrpc ~version:"1.0" "/") call +let rpc_unix call = + let open Xmlrpc_client in + XMLRPC_protocol.rpc ~transport:(Unix path) ~http:(xmlrpc ~version:"1.0" "/") call let rpc_inprocess call = Server.process (Some "") call let use_inprocess_rpc = ref true @@ -260,271 +260,271 @@ module Client=Client(struct let rpc=rpc end) let datapath_of_id id = Client.DP.create ~dbg ~id let expect expected f x = - if not(f x) then begin - error "error: expected %s" expected; - inc_errors (); - end - -let backend_error f = - try ignore(f ()); false with - | (Backend_error(code, params)) when code = "SR_BACKEND_FAILURE_test" -> true - | e -> - debug "backend_error: Expecting SR_BACKEND_FAILURE_test, got '%s'" (Printexc.to_string e); - false - -let too_small_backend_error f = - try ignore(f ()); false with - | (Backend_error(code, params)) when code = "SR_BACKEND_FAILURE" && (List.hd params = "Disk too small") -> true - | _ -> false - -let internal_error f = - try ignore(f ()); false with - | (Internal_error "Storage_impl_test.Api_error(\"SR_BACKEND_FAILURE_test\", _)") -> true - | _ -> false + if not(f x) then begin + error "error: expected %s" expected; + inc_errors (); + end + +let backend_error f = + try ignore(f ()); false with + | (Backend_error(code, params)) when code = "SR_BACKEND_FAILURE_test" -> true + | e -> + debug "backend_error: Expecting SR_BACKEND_FAILURE_test, got '%s'" (Printexc.to_string e); + false + +let too_small_backend_error f = + try ignore(f ()); false with + | (Backend_error(code, params)) when code = "SR_BACKEND_FAILURE" && (List.hd params = "Disk too small") -> true + | _ -> false + +let internal_error f = + try ignore(f ()); false with + | (Internal_error "Storage_impl_test.Api_error(\"SR_BACKEND_FAILURE_test\", _)") -> true + | _ -> false let dp_is dp state s = - if not (List.mem_assoc dp s.dps) - then state = Vdi_automaton.Detached - else - let state' = List.assoc dp s.dps in - let result = state = state' in - if not result then begin - debug "dp_is: returning false: actual state=%s passed state=%s" - (Vdi_automaton.string_of_state state') - (Vdi_automaton.string_of_state state) - end; - result + if not (List.mem_assoc dp s.dps) + then state = Vdi_automaton.Detached + else + let state' = List.assoc dp s.dps in + let result = state = state' in + if not result then begin + debug "dp_is: returning false: actual state=%s passed state=%s" + (Vdi_automaton.string_of_state state') + (Vdi_automaton.string_of_state state) + end; + result let test_vdis sr : unit = - let num_users = 10 in - let num_vdis = 10 in - let iterations = 10 in - let one id sr vdi () = - let dp = datapath_of_id id in - for i = 0 to iterations - 1 do - expect "_" (function _ -> true) - (Client.VDI.attach ~dbg ~dp ~sr ~vdi ~read_write:false); - expect "Attached(RO) 1" (dp_is dp (Vdi_automaton.Attached Vdi_automaton.RO)) - (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); - expect "() 2" (fun x -> x = ()) - (Client.VDI.detach ~dbg ~dp ~sr ~vdi); - expect "Detached 3" (dp_is dp Vdi_automaton.Detached) - (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); - expect "()" (fun x -> x = ()) - (Client.VDI.detach ~dbg ~dp ~sr ~vdi); - expect "Params _ 4" (function _ -> true) - (Client.VDI.attach ~dbg ~dp ~sr ~vdi ~read_write:false); - expect "Attached(RO) 5" (dp_is dp (Vdi_automaton.Attached Vdi_automaton.RO)) - (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); - expect "() 6" (fun x -> x = ()) - (Client.VDI.activate ~dbg ~dp ~sr ~vdi); - expect "Activated(RO) 7" (dp_is dp (Vdi_automaton.Activated Vdi_automaton.RO)) - (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); - expect "() 8" (fun x -> x = ()) - (Client.VDI.deactivate ~dbg ~dp ~sr ~vdi); - expect "Attached(RO) 9" (dp_is dp (Vdi_automaton.Attached Vdi_automaton.RO)) - (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); - expect "() 10" (fun x -> x = ()) - (Client.VDI.detach ~dbg ~dp ~sr ~vdi); - expect "Detached 11" (dp_is dp Vdi_automaton.Detached) - (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); - done in - let vdis = Range.to_list (Range.make 0 num_vdis) in - let users = Range.to_list (Range.make 0 num_users) in - let bodies = - List.concat ( - List.map (fun user -> - List.map (fun vdi -> one (Printf.sprintf "vdi:%d/user:%d" vdi user) sr (string_of_int vdi)) vdis - ) users - ) in - info "Starting %d threads%!" (List.length bodies); - let threads = List.map (fun f -> Thread.create f ()) bodies in - info "Joining %d threads%!" (List.length bodies); - List.iter Thread.join threads + let num_users = 10 in + let num_vdis = 10 in + let iterations = 10 in + let one id sr vdi () = + let dp = datapath_of_id id in + for i = 0 to iterations - 1 do + expect "_" (function _ -> true) + (Client.VDI.attach ~dbg ~dp ~sr ~vdi ~read_write:false); + expect "Attached(RO) 1" (dp_is dp (Vdi_automaton.Attached Vdi_automaton.RO)) + (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); + expect "() 2" (fun x -> x = ()) + (Client.VDI.detach ~dbg ~dp ~sr ~vdi); + expect "Detached 3" (dp_is dp Vdi_automaton.Detached) + (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); + expect "()" (fun x -> x = ()) + (Client.VDI.detach ~dbg ~dp ~sr ~vdi); + expect "Params _ 4" (function _ -> true) + (Client.VDI.attach ~dbg ~dp ~sr ~vdi ~read_write:false); + expect "Attached(RO) 5" (dp_is dp (Vdi_automaton.Attached Vdi_automaton.RO)) + (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); + expect "() 6" (fun x -> x = ()) + (Client.VDI.activate ~dbg ~dp ~sr ~vdi); + expect "Activated(RO) 7" (dp_is dp (Vdi_automaton.Activated Vdi_automaton.RO)) + (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); + expect "() 8" (fun x -> x = ()) + (Client.VDI.deactivate ~dbg ~dp ~sr ~vdi); + expect "Attached(RO) 9" (dp_is dp (Vdi_automaton.Attached Vdi_automaton.RO)) + (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); + expect "() 10" (fun x -> x = ()) + (Client.VDI.detach ~dbg ~dp ~sr ~vdi); + expect "Detached 11" (dp_is dp Vdi_automaton.Detached) + (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); + done in + let vdis = Range.to_list (Range.make 0 num_vdis) in + let users = Range.to_list (Range.make 0 num_users) in + let bodies = + List.concat ( + List.map (fun user -> + List.map (fun vdi -> one (Printf.sprintf "vdi:%d/user:%d" vdi user) sr (string_of_int vdi)) vdis + ) users + ) in + info "Starting %d threads%!" (List.length bodies); + let threads = List.map (fun f -> Thread.create f ()) bodies in + info "Joining %d threads%!" (List.length bodies); + List.iter Thread.join threads let leak dp sr activate vdi = - info "Leaking some attaches and activates"; - expect "Params _" (function _ -> true) - (Client.VDI.attach ~dbg ~dp ~sr ~vdi ~read_write:true); - if activate - then expect "()" (function () -> true) - (Client.VDI.activate ~dbg ~dp ~sr ~vdi) + info "Leaking some attaches and activates"; + expect "Params _" (function _ -> true) + (Client.VDI.attach ~dbg ~dp ~sr ~vdi ~read_write:true); + if activate + then expect "()" (function () -> true) + (Client.VDI.activate ~dbg ~dp ~sr ~vdi) let test_sr sr = - let dp = datapath_of_id "pbd" in - expect "()" (fun x -> x = ()) - (Client.SR.attach ~dbg ~sr ~device_config:[]); - test_vdis sr; - leak dp sr false "leaked"; - leak dp sr true "leaked2"; - info "About to SR.detach"; - expect "()" (fun x -> x = ()) - (Client.SR.detach ~dbg ~sr); - expect "()" (fun x -> x = ()) - (Client.SR.attach ~dbg ~sr ~device_config:[]); - leak dp sr false "leaked"; - leak dp sr true "leaked2"; - info "About to logout"; - expect "()" (fun x -> x = ()) - (Client.DP.destroy ~dbg ~dp ~allow_leak:false); - info "About to SR.detach"; - expect "()" (function () -> true) - (Client.SR.detach ~dbg ~sr); - (* About to check the error handling *) - let dp = datapath_of_id "error" in - expect "()" (fun x -> x = ()) - (Client.SR.attach ~dbg ~sr ~device_config:[]); - debug "This VDI.attach should fail:"; - expect "internal_error" internal_error - (fun () -> Client.VDI.attach ~dbg ~dp ~sr ~vdi:"leaked" ~read_write:true); - let dp = datapath_of_id "error2" in - debug "This VDI.attach should fail:"; - expect "backend_error" backend_error - (fun () -> Client.VDI.attach ~dbg ~dp ~sr ~vdi:"leaked" ~read_write:true); - debug "Detaching and cleaning up"; - expect "()" (fun x -> x = ()) - (Client.SR.detach ~dbg ~sr) + let dp = datapath_of_id "pbd" in + expect "()" (fun x -> x = ()) + (Client.SR.attach ~dbg ~sr ~device_config:[]); + test_vdis sr; + leak dp sr false "leaked"; + leak dp sr true "leaked2"; + info "About to SR.detach"; + expect "()" (fun x -> x = ()) + (Client.SR.detach ~dbg ~sr); + expect "()" (fun x -> x = ()) + (Client.SR.attach ~dbg ~sr ~device_config:[]); + leak dp sr false "leaked"; + leak dp sr true "leaked2"; + info "About to logout"; + expect "()" (fun x -> x = ()) + (Client.DP.destroy ~dbg ~dp ~allow_leak:false); + info "About to SR.detach"; + expect "()" (function () -> true) + (Client.SR.detach ~dbg ~sr); + (* About to check the error handling *) + let dp = datapath_of_id "error" in + expect "()" (fun x -> x = ()) + (Client.SR.attach ~dbg ~sr ~device_config:[]); + debug "This VDI.attach should fail:"; + expect "internal_error" internal_error + (fun () -> Client.VDI.attach ~dbg ~dp ~sr ~vdi:"leaked" ~read_write:true); + let dp = datapath_of_id "error2" in + debug "This VDI.attach should fail:"; + expect "backend_error" backend_error + (fun () -> Client.VDI.attach ~dbg ~dp ~sr ~vdi:"leaked" ~read_write:true); + debug "Detaching and cleaning up"; + expect "()" (fun x -> x = ()) + (Client.SR.detach ~dbg ~sr) (* Check the DP.stat_vdi function works *) -let test_stat sr vdi = - expect "()" (fun x -> x = ()) - (Client.SR.attach ~dbg ~sr ~device_config:[]); - let dp1 = datapath_of_id "dp1" (* will be r/w *) - and dp2 = datapath_of_id "dp2" (* will be r/o *) in - expect "Params _" (function _ -> true) - (Client.VDI.attach ~dbg ~dp:dp1 ~sr ~vdi ~read_write:true); - (* dp1: Attached(RW) dp2: Detached superstate: Attached(RW) *) - expect "Attached(RW)" (dp_is dp1 (Vdi_automaton.Attached Vdi_automaton.RW)) - (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); - expect "Attached(RW)" (function x -> x.superstate = Vdi_automaton.Attached Vdi_automaton.RW) - (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); - expect "Params _" (function _ -> true) - (Client.VDI.attach ~dbg ~dp:dp2 ~sr ~vdi ~read_write:false); - (* dp1: Attached(RW) dp2: Attached(RO) superstate: Attached(RW) *) - expect "Attached(RO)" (dp_is dp2 (Vdi_automaton.Attached Vdi_automaton.RO)) - (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); - expect "Attached(RW)" (function x -> x.superstate = Vdi_automaton.Attached Vdi_automaton.RW) - (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); - expect "Illegal transition" (fun () -> - try - Client.VDI.detach ~dbg ~dp:dp1 ~sr ~vdi; - false - with - | Illegal_transition(Vdi_automaton.Attached(Vdi_automaton.RW), Vdi_automaton.Attached(Vdi_automaton.RO)) -> true - | e -> false) (); - expect "()" (fun () -> true) - (Client.VDI.detach ~dbg ~dp:dp2 ~sr ~vdi); - (* dp1: Attached(RW) dp2: Detached superstate: Attached(RW) *) - expect "Detached" (dp_is dp2 Vdi_automaton.Detached) - (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); - expect "Attached(RW)" (function x -> x.superstate = Vdi_automaton.Attached Vdi_automaton.RW) - (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); - expect "()" (fun x -> x = ()) - (Client.VDI.detach ~dbg ~dp:dp1 ~sr ~vdi); - (* dp1: Detached dp1: Detached superstate: Detached *) - expect "Detached" (function x -> x.superstate = Vdi_automaton.Detached) - (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); - expect "()" (fun x -> x = ()) - (Client.SR.detach ~dbg ~sr) +let test_stat sr vdi = + expect "()" (fun x -> x = ()) + (Client.SR.attach ~dbg ~sr ~device_config:[]); + let dp1 = datapath_of_id "dp1" (* will be r/w *) + and dp2 = datapath_of_id "dp2" (* will be r/o *) in + expect "Params _" (function _ -> true) + (Client.VDI.attach ~dbg ~dp:dp1 ~sr ~vdi ~read_write:true); + (* dp1: Attached(RW) dp2: Detached superstate: Attached(RW) *) + expect "Attached(RW)" (dp_is dp1 (Vdi_automaton.Attached Vdi_automaton.RW)) + (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); + expect "Attached(RW)" (function x -> x.superstate = Vdi_automaton.Attached Vdi_automaton.RW) + (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); + expect "Params _" (function _ -> true) + (Client.VDI.attach ~dbg ~dp:dp2 ~sr ~vdi ~read_write:false); + (* dp1: Attached(RW) dp2: Attached(RO) superstate: Attached(RW) *) + expect "Attached(RO)" (dp_is dp2 (Vdi_automaton.Attached Vdi_automaton.RO)) + (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); + expect "Attached(RW)" (function x -> x.superstate = Vdi_automaton.Attached Vdi_automaton.RW) + (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); + expect "Illegal transition" (fun () -> + try + Client.VDI.detach ~dbg ~dp:dp1 ~sr ~vdi; + false + with + | Illegal_transition(Vdi_automaton.Attached(Vdi_automaton.RW), Vdi_automaton.Attached(Vdi_automaton.RO)) -> true + | e -> false) (); + expect "()" (fun () -> true) + (Client.VDI.detach ~dbg ~dp:dp2 ~sr ~vdi); + (* dp1: Attached(RW) dp2: Detached superstate: Attached(RW) *) + expect "Detached" (dp_is dp2 Vdi_automaton.Detached) + (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); + expect "Attached(RW)" (function x -> x.superstate = Vdi_automaton.Attached Vdi_automaton.RW) + (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); + expect "()" (fun x -> x = ()) + (Client.VDI.detach ~dbg ~dp:dp1 ~sr ~vdi); + (* dp1: Detached dp1: Detached superstate: Detached *) + expect "Detached" (function x -> x.superstate = Vdi_automaton.Detached) + (Client.DP.stat_vdi ~dbg ~sr ~vdi ()); + expect "()" (fun x -> x = ()) + (Client.SR.detach ~dbg ~sr) (* Manual cleanup with VDI.detach (fails) and then SR.detach (succeeds) *) let test_sr_detach_cleanup_errors_1 sr vdi = - Debug_print_impl.VDI.working := false; - expect "()" (fun x -> x = ()) - (Client.SR.attach ~dbg ~sr ~device_config:[]); - let dp = datapath_of_id "datapath" in - leak dp sr true vdi; - expect "()" (fun x -> x = ()) - (Client.VDI.deactivate ~dbg ~dp ~sr ~vdi); - if vdi = "error2" - then expect "backend_error in test_sr_detach_cleanup" backend_error - (fun () -> Client.VDI.detach ~dbg ~dp ~sr ~vdi) - else expect "internal_error in test sr detach cleanup" internal_error - (fun () -> Client.VDI.detach ~dbg ~dp ~sr ~vdi); - debug "Detaching and cleaning up"; - Debug_print_impl.VDI.working := true; - (* Should succeed because the VDI.attach state will have been forgotten: FH2 *) - expect "()" (fun x -> x = ()) - (Client.SR.detach ~dbg ~sr) + Debug_print_impl.VDI.working := false; + expect "()" (fun x -> x = ()) + (Client.SR.attach ~dbg ~sr ~device_config:[]); + let dp = datapath_of_id "datapath" in + leak dp sr true vdi; + expect "()" (fun x -> x = ()) + (Client.VDI.deactivate ~dbg ~dp ~sr ~vdi); + if vdi = "error2" + then expect "backend_error in test_sr_detach_cleanup" backend_error + (fun () -> Client.VDI.detach ~dbg ~dp ~sr ~vdi) + else expect "internal_error in test sr detach cleanup" internal_error + (fun () -> Client.VDI.detach ~dbg ~dp ~sr ~vdi); + debug "Detaching and cleaning up"; + Debug_print_impl.VDI.working := true; + (* Should succeed because the VDI.attach state will have been forgotten: FH2 *) + expect "()" (fun x -> x = ()) + (Client.SR.detach ~dbg ~sr) let test_sr_detach_cleanup_errors_2 sr vdi = - Debug_print_impl.VDI.working := false; - expect "()" (fun x -> x = ()) - (Client.SR.attach ~dbg ~sr ~device_config:[]); - let dp = datapath_of_id "datapath" in - leak dp sr true vdi; - if vdi = "error2" - then expect "backend_error" backend_error - (fun () -> Client.DP.destroy ~dbg ~dp ~allow_leak:false) - else expect "internal_error" internal_error - (fun () -> Client.DP.destroy ~dbg ~dp ~allow_leak:false); - Debug_print_impl.VDI.working := true; - debug "Attempting to attach RO (having failed a detach of a RW detach)"; - expect "Params _" (function _ -> true) - (Client.VDI.attach ~dbg ~dp ~sr ~vdi ~read_write:false); - debug "Detaching and cleaning up"; - expect "()" (fun x -> x = ()) - (Client.SR.detach ~dbg ~sr) + Debug_print_impl.VDI.working := false; + expect "()" (fun x -> x = ()) + (Client.SR.attach ~dbg ~sr ~device_config:[]); + let dp = datapath_of_id "datapath" in + leak dp sr true vdi; + if vdi = "error2" + then expect "backend_error" backend_error + (fun () -> Client.DP.destroy ~dbg ~dp ~allow_leak:false) + else expect "internal_error" internal_error + (fun () -> Client.DP.destroy ~dbg ~dp ~allow_leak:false); + Debug_print_impl.VDI.working := true; + debug "Attempting to attach RO (having failed a detach of a RW detach)"; + expect "Params _" (function _ -> true) + (Client.VDI.attach ~dbg ~dp ~sr ~vdi ~read_write:false); + debug "Detaching and cleaning up"; + expect "()" (fun x -> x = ()) + (Client.SR.detach ~dbg ~sr) let create_vdi_test sr = - let dp = datapath_of_id "datapath" in - expect "()" (fun x -> x = ()) - (Client.SR.attach ~dbg ~sr ~device_config:[]); - let vdi_info = { - vdi = ""; - uuid = None; - content_id = ""; - name_label = "name_label"; - name_description = "name_description"; - virtual_size = 10L; - ty = "user"; - is_a_snapshot = false; - snapshot_time = ""; - snapshot_of = ""; - read_only = false; - physical_utilisation = 10L; - metadata_of_pool = ""; - persistent = true; - sm_config = []; - } in - expect "too_small_backend_error" too_small_backend_error - (fun () -> - let vdi_info = { vdi_info with sm_config = ["toosmall", ""] } in - Client.VDI.create ~dbg ~sr ~vdi_info); - let vdi = Client.VDI.create ~dbg ~sr ~vdi_info in - expect "attach_info" (fun _ -> true) - (Client.VDI.attach ~dbg ~dp ~sr ~vdi:vdi.vdi ~read_write:false); - debug "Detaching and cleaning up"; - expect "()" (fun x -> x = ()) - (Client.SR.detach ~dbg ~sr) + let dp = datapath_of_id "datapath" in + expect "()" (fun x -> x = ()) + (Client.SR.attach ~dbg ~sr ~device_config:[]); + let vdi_info = { + vdi = ""; + uuid = None; + content_id = ""; + name_label = "name_label"; + name_description = "name_description"; + virtual_size = 10L; + ty = "user"; + is_a_snapshot = false; + snapshot_time = ""; + snapshot_of = ""; + read_only = false; + physical_utilisation = 10L; + metadata_of_pool = ""; + persistent = true; + sm_config = []; + } in + expect "too_small_backend_error" too_small_backend_error + (fun () -> + let vdi_info = { vdi_info with sm_config = ["toosmall", ""] } in + Client.VDI.create ~dbg ~sr ~vdi_info); + let vdi = Client.VDI.create ~dbg ~sr ~vdi_info in + expect "attach_info" (fun _ -> true) + (Client.VDI.attach ~dbg ~dp ~sr ~vdi:vdi.vdi ~read_write:false); + debug "Detaching and cleaning up"; + expect "()" (fun x -> x = ()) + (Client.SR.detach ~dbg ~sr) let _ = - Storage_impl.print_debug := true; - Storage_impl.host_state_path := "/tmp/storage.db"; - Vdi_automaton.test (); - Unixext.unlink_safe !Storage_impl.host_state_path; - let s = Xcp_service.make ~path:Xapi_globs.storage_unix_domain_socket ~queue_name:"org.xen.xapi.storage" ~rpc_fn:(Server.process None) () in - info "Started service on org.xen.xapi.storage"; - let (_: Thread.t) = Thread.create (fun () -> Xcp_service.serve_forever s) () in - - info "Listening on %s" Xapi_globs.storage_unix_domain_socket; - test_sr "sr"; - - test_sr_detach_cleanup_errors_1 "sr" "error2"; - test_sr_detach_cleanup_errors_1 "sr" "error"; - - test_sr_detach_cleanup_errors_2 "sr" "error2"; - test_sr_detach_cleanup_errors_2 "sr" "error"; - - test_stat "sr" "vdi"; - - create_vdi_test "sr"; - - if !total_errors = 0 then begin - info "OK"; - exit 0; - end else begin - info "%d errors detected" !total_errors; - exit 1 - end + Storage_impl.print_debug := true; + Storage_impl.host_state_path := "/tmp/storage.db"; + Vdi_automaton.test (); + Unixext.unlink_safe !Storage_impl.host_state_path; + let s = Xcp_service.make ~path:Xapi_globs.storage_unix_domain_socket ~queue_name:"org.xen.xapi.storage" ~rpc_fn:(Server.process None) () in + info "Started service on org.xen.xapi.storage"; + let (_: Thread.t) = Thread.create (fun () -> Xcp_service.serve_forever s) () in + + info "Listening on %s" Xapi_globs.storage_unix_domain_socket; + test_sr "sr"; + + test_sr_detach_cleanup_errors_1 "sr" "error2"; + test_sr_detach_cleanup_errors_1 "sr" "error"; + + test_sr_detach_cleanup_errors_2 "sr" "error2"; + test_sr_detach_cleanup_errors_2 "sr" "error"; + + test_stat "sr" "vdi"; + + create_vdi_test "sr"; + + if !total_errors = 0 then begin + info "OK"; + exit 0; + end else begin + info "%d errors detected" !total_errors; + exit 1 + end diff --git a/ocaml/xapi/storage_locks.ml b/ocaml/xapi/storage_locks.ml index 8f704799beb..1375c6d1b9d 100644 --- a/ocaml/xapi/storage_locks.ml +++ b/ocaml/xapi/storage_locks.ml @@ -16,62 +16,62 @@ open Stdext.Threadext (** A table of 'instance' locks with a single master lock *) type ('a, 'b) t = { - m: Mutex.t; - c: Condition.t; - t: ('a, 'b) Hashtbl.t; - mutable master_lock: bool; (* Acquire this to prevent other locks being held *) + m: Mutex.t; + c: Condition.t; + t: ('a, 'b) Hashtbl.t; + mutable master_lock: bool; (* Acquire this to prevent other locks being held *) } let make () = { - m = Mutex.create (); - c = Condition.create (); - t = Hashtbl.create 10; - master_lock = false + m = Mutex.create (); + c = Condition.create (); + t = Hashtbl.create 10; + master_lock = false } (** Execute the function with the specified instance locked *) let with_instance_lock t key f = - let r = Locking_helpers.Lock ("SM/" ^ (Ref.really_pretty_and_small (Ref.of_string key))) in - Locking_helpers.Thread_state.waiting_for r; - Mutex.execute t.m - (fun () -> - (* Wait for the lock to be free (ie the table entry to be removed and the master lock to be released *) - while Hashtbl.mem t.t key || t.master_lock do Condition.wait t.c t.m done; - Hashtbl.replace t.t key () - ); - Locking_helpers.Thread_state.acquired r; - Stdext.Pervasiveext.finally f - (fun () -> - Mutex.execute t.m - (fun () -> - Hashtbl.remove t.t key; - Condition.broadcast t.c - ); - Locking_helpers.Thread_state.released r; - ) + let r = Locking_helpers.Lock ("SM/" ^ (Ref.really_pretty_and_small (Ref.of_string key))) in + Locking_helpers.Thread_state.waiting_for r; + Mutex.execute t.m + (fun () -> + (* Wait for the lock to be free (ie the table entry to be removed and the master lock to be released *) + while Hashtbl.mem t.t key || t.master_lock do Condition.wait t.c t.m done; + Hashtbl.replace t.t key () + ); + Locking_helpers.Thread_state.acquired r; + Stdext.Pervasiveext.finally f + (fun () -> + Mutex.execute t.m + (fun () -> + Hashtbl.remove t.t key; + Condition.broadcast t.c + ); + Locking_helpers.Thread_state.released r; + ) (** Execute the function with the master_lock held and no instance locks held *) let with_master_lock t f = - let r = Locking_helpers.Lock "SM" in - Locking_helpers.Thread_state.waiting_for r; - Mutex.execute t.m - (fun () -> - (* Wait for the master_lock to be released *) - while t.master_lock do Condition.wait t.c t.m done; - (* Grab the master_lock *) - t.master_lock <- true; - (* Wait for all instance locks to be released *) - while Hashtbl.length t.t > 0 do Condition.wait t.c t.m done - ); - Locking_helpers.Thread_state.acquired r; - Stdext.Pervasiveext.finally f - (fun () -> - Mutex.execute t.m - (fun () -> - t.master_lock <- false; - Condition.broadcast t.c - ); - Locking_helpers.Thread_state.released r; - ) + let r = Locking_helpers.Lock "SM" in + Locking_helpers.Thread_state.waiting_for r; + Mutex.execute t.m + (fun () -> + (* Wait for the master_lock to be released *) + while t.master_lock do Condition.wait t.c t.m done; + (* Grab the master_lock *) + t.master_lock <- true; + (* Wait for all instance locks to be released *) + while Hashtbl.length t.t > 0 do Condition.wait t.c t.m done + ); + Locking_helpers.Thread_state.acquired r; + Stdext.Pervasiveext.finally f + (fun () -> + Mutex.execute t.m + (fun () -> + t.master_lock <- false; + Condition.broadcast t.c + ); + Locking_helpers.Thread_state.released r; + ) diff --git a/ocaml/xapi/storage_migrate.ml b/ocaml/xapi/storage_migrate.ml index 940650eb79d..51bc9135b91 100644 --- a/ocaml/xapi/storage_migrate.ml +++ b/ocaml/xapi/storage_migrate.ml @@ -33,878 +33,878 @@ open Storage_task module State = struct - module Receive_state = struct - type t = { - sr : sr; - dummy_vdi : vdi; - leaf_vdi : vdi; - leaf_dp : dp; - parent_vdi : vdi; - remote_vdi : vdi; - } with rpc - end - - module Send_state = struct - type t = { - url : string; - dest_sr : sr; - remote_dp : dp; - local_dp : dp; - mirror_vdi : vdi; - remote_url : string; - tapdev : Tapctl.tapdev; - mutable failed : bool; - mutable watchdog : Updates.Scheduler.t option; - } with rpc - end - - module Copy_state = struct - type t = { - base_dp : dp; - leaf_dp : dp; - remote_dp : dp; - dest_sr: sr; - copy_vdi: vdi; - remote_url : string; - } with rpc - end - - let loaded = ref false - let mutex = Mutex.create () - - type send_table = (string, Send_state.t) Hashtbl.t with rpc - type recv_table = (string, Receive_state.t) Hashtbl.t with rpc - type copy_table = (string, Copy_state.t) Hashtbl.t with rpc - - type osend - type orecv - type ocopy - - type _ operation = - | Send_op : Send_state.t -> osend operation - | Recv_op : Receive_state.t -> orecv operation - | Copy_op : Copy_state.t -> ocopy operation - - type _ table = - | Send_table : send_table -> osend table - | Recv_table : recv_table -> orecv table - | Copy_table : copy_table -> ocopy table - - let active_send : send_table = Hashtbl.create 10 - let active_recv : recv_table = Hashtbl.create 10 - let active_copy : copy_table = Hashtbl.create 10 - - let table_of_op : type a. a operation -> a table = function - | Send_op _ -> Send_table active_send - | Recv_op _ -> Recv_table active_recv - | Copy_op _ -> Copy_table active_copy - - let persist_root = ref "/var/run/nonpersistent" - let path_of_table : type a. a table -> string = function - | Send_table _ -> Filename.concat !persist_root "storage_mirrors_send.json" - | Recv_table _ -> Filename.concat !persist_root "storage_mirrors_recv.json" - | Copy_table _ -> Filename.concat !persist_root "storage_mirrors_copy.json" - - let rpc_of_table : type a. a table -> Rpc.t = function - | Send_table send_table -> rpc_of_send_table send_table - | Recv_table recv_table -> rpc_of_recv_table recv_table - | Copy_table copy_table -> rpc_of_copy_table copy_table - - let to_string : type a. a table -> string = - (fun table -> rpc_of_table table |> Jsonrpc.to_string) - - let rpc_of_path path = - Unixext.string_of_file path |> Jsonrpc.of_string - - let load_one : type a. a table -> unit = (fun table -> - let rpc = path_of_table table |> rpc_of_path in - match table with - | Send_table table -> - Hashtbl.iter (Hashtbl.replace table) (send_table_of_rpc rpc) - | Recv_table table -> - Hashtbl.iter (Hashtbl.replace table) (recv_table_of_rpc rpc) - | Copy_table table -> - Hashtbl.iter (Hashtbl.replace table) (copy_table_of_rpc rpc)) - - let load () = - try load_one (Send_table active_send) with _ -> (); - try load_one (Recv_table active_recv) with _ -> (); - try load_one (Copy_table active_copy) with _ -> (); - loaded := true - - let save_one : type a. a table -> unit = (fun table -> - to_string table |> Unixext.write_string_to_file (path_of_table table)) - - let save () = - Unixext.mkdir_rec !persist_root 0o700; - save_one (Send_table active_send); - save_one (Recv_table active_recv); - save_one (Copy_table active_copy) - - let access_table ~save_after f table = - Mutex.execute mutex - (fun () -> - if not !loaded then load (); - let result = f table in - if save_after then save (); - result) - - let map_of () = - let contents_of table = Hashtbl.fold (fun k v acc -> (k,v)::acc) table [] in - let send_ops = access_table ~save_after:false contents_of active_send in - let recv_ops = access_table ~save_after:false contents_of active_recv in - let copy_ops = access_table ~save_after:false contents_of active_copy in - send_ops, recv_ops, copy_ops - - let add : type a. string -> a operation -> unit = fun id op -> - let add' : type a. string -> a operation -> a table -> unit = fun id op table -> - match (table, op) with - | Send_table table, Send_op op -> Hashtbl.replace table id op - | Recv_table table, Recv_op op -> Hashtbl.replace table id op - | Copy_table table, Copy_op op -> Hashtbl.replace table id op - in - access_table ~save_after:true - (fun table -> add' id op table) (table_of_op op) - - let find id table = - access_table ~save_after:false - (fun table -> try Some (Hashtbl.find table id) with Not_found -> None) - table - - let remove id table = - access_table ~save_after:true - (fun table -> Hashtbl.remove table id) - table - - let clear () = - access_table ~save_after:true (fun table -> Hashtbl.clear table) active_send; - access_table ~save_after:true (fun table -> Hashtbl.clear table) active_recv; - access_table ~save_after:true (fun table -> Hashtbl.clear table) active_copy - - let remove_local_mirror id = remove id active_send - let remove_receive_mirror id = remove id active_recv - let remove_copy id = remove id active_copy - - let find_active_local_mirror id = find id active_send - let find_active_receive_mirror id = find id active_recv - let find_active_copy id = find id active_copy - - let mirror_id_of (sr,vdi) = Printf.sprintf "%s/%s" sr vdi - let of_mirror_id id = match String.split '/' id with - | sr::rest -> (sr,String.concat "/" rest) - | _ -> failwith "Bad id" - let copy_id_of (sr,vdi) = Printf.sprintf "copy/%s/%s" sr vdi - let of_copy_id id = - match String.split '/' id with - | op :: sr :: rest when op="copy" -> (sr,(String.concat "/" rest)) - | _ -> failwith "Bad id" + module Receive_state = struct + type t = { + sr : sr; + dummy_vdi : vdi; + leaf_vdi : vdi; + leaf_dp : dp; + parent_vdi : vdi; + remote_vdi : vdi; + } with rpc + end + + module Send_state = struct + type t = { + url : string; + dest_sr : sr; + remote_dp : dp; + local_dp : dp; + mirror_vdi : vdi; + remote_url : string; + tapdev : Tapctl.tapdev; + mutable failed : bool; + mutable watchdog : Updates.Scheduler.t option; + } with rpc + end + + module Copy_state = struct + type t = { + base_dp : dp; + leaf_dp : dp; + remote_dp : dp; + dest_sr: sr; + copy_vdi: vdi; + remote_url : string; + } with rpc + end + + let loaded = ref false + let mutex = Mutex.create () + + type send_table = (string, Send_state.t) Hashtbl.t with rpc + type recv_table = (string, Receive_state.t) Hashtbl.t with rpc + type copy_table = (string, Copy_state.t) Hashtbl.t with rpc + + type osend + type orecv + type ocopy + + type _ operation = + | Send_op : Send_state.t -> osend operation + | Recv_op : Receive_state.t -> orecv operation + | Copy_op : Copy_state.t -> ocopy operation + + type _ table = + | Send_table : send_table -> osend table + | Recv_table : recv_table -> orecv table + | Copy_table : copy_table -> ocopy table + + let active_send : send_table = Hashtbl.create 10 + let active_recv : recv_table = Hashtbl.create 10 + let active_copy : copy_table = Hashtbl.create 10 + + let table_of_op : type a. a operation -> a table = function + | Send_op _ -> Send_table active_send + | Recv_op _ -> Recv_table active_recv + | Copy_op _ -> Copy_table active_copy + + let persist_root = ref "/var/run/nonpersistent" + let path_of_table : type a. a table -> string = function + | Send_table _ -> Filename.concat !persist_root "storage_mirrors_send.json" + | Recv_table _ -> Filename.concat !persist_root "storage_mirrors_recv.json" + | Copy_table _ -> Filename.concat !persist_root "storage_mirrors_copy.json" + + let rpc_of_table : type a. a table -> Rpc.t = function + | Send_table send_table -> rpc_of_send_table send_table + | Recv_table recv_table -> rpc_of_recv_table recv_table + | Copy_table copy_table -> rpc_of_copy_table copy_table + + let to_string : type a. a table -> string = + (fun table -> rpc_of_table table |> Jsonrpc.to_string) + + let rpc_of_path path = + Unixext.string_of_file path |> Jsonrpc.of_string + + let load_one : type a. a table -> unit = (fun table -> + let rpc = path_of_table table |> rpc_of_path in + match table with + | Send_table table -> + Hashtbl.iter (Hashtbl.replace table) (send_table_of_rpc rpc) + | Recv_table table -> + Hashtbl.iter (Hashtbl.replace table) (recv_table_of_rpc rpc) + | Copy_table table -> + Hashtbl.iter (Hashtbl.replace table) (copy_table_of_rpc rpc)) + + let load () = + try load_one (Send_table active_send) with _ -> (); + try load_one (Recv_table active_recv) with _ -> (); + try load_one (Copy_table active_copy) with _ -> (); + loaded := true + + let save_one : type a. a table -> unit = (fun table -> + to_string table |> Unixext.write_string_to_file (path_of_table table)) + + let save () = + Unixext.mkdir_rec !persist_root 0o700; + save_one (Send_table active_send); + save_one (Recv_table active_recv); + save_one (Copy_table active_copy) + + let access_table ~save_after f table = + Mutex.execute mutex + (fun () -> + if not !loaded then load (); + let result = f table in + if save_after then save (); + result) + + let map_of () = + let contents_of table = Hashtbl.fold (fun k v acc -> (k,v)::acc) table [] in + let send_ops = access_table ~save_after:false contents_of active_send in + let recv_ops = access_table ~save_after:false contents_of active_recv in + let copy_ops = access_table ~save_after:false contents_of active_copy in + send_ops, recv_ops, copy_ops + + let add : type a. string -> a operation -> unit = fun id op -> + let add' : type a. string -> a operation -> a table -> unit = fun id op table -> + match (table, op) with + | Send_table table, Send_op op -> Hashtbl.replace table id op + | Recv_table table, Recv_op op -> Hashtbl.replace table id op + | Copy_table table, Copy_op op -> Hashtbl.replace table id op + in + access_table ~save_after:true + (fun table -> add' id op table) (table_of_op op) + + let find id table = + access_table ~save_after:false + (fun table -> try Some (Hashtbl.find table id) with Not_found -> None) + table + + let remove id table = + access_table ~save_after:true + (fun table -> Hashtbl.remove table id) + table + + let clear () = + access_table ~save_after:true (fun table -> Hashtbl.clear table) active_send; + access_table ~save_after:true (fun table -> Hashtbl.clear table) active_recv; + access_table ~save_after:true (fun table -> Hashtbl.clear table) active_copy + + let remove_local_mirror id = remove id active_send + let remove_receive_mirror id = remove id active_recv + let remove_copy id = remove id active_copy + + let find_active_local_mirror id = find id active_send + let find_active_receive_mirror id = find id active_recv + let find_active_copy id = find id active_copy + + let mirror_id_of (sr,vdi) = Printf.sprintf "%s/%s" sr vdi + let of_mirror_id id = match String.split '/' id with + | sr::rest -> (sr,String.concat "/" rest) + | _ -> failwith "Bad id" + let copy_id_of (sr,vdi) = Printf.sprintf "copy/%s/%s" sr vdi + let of_copy_id id = + match String.split '/' id with + | op :: sr :: rest when op="copy" -> (sr,(String.concat "/" rest)) + | _ -> failwith "Bad id" end let rec rpc ~srcstr ~dststr url call = - let result = XMLRPC_protocol.rpc ~transport:(transport_of_url url) - ~srcstr ~dststr ~http:(xmlrpc ~version:"1.0" ?auth:(Http.Url.auth_of url) ~query:(Http.Url.get_query_params url) (Http.Url.get_uri url)) call - in - if not result.Rpc.success then begin - debug "Got failure: checking for redirect"; - debug "Call was: %s" (Rpc.string_of_call call); - debug "result.contents: %s" (Jsonrpc.to_string result.Rpc.contents); - match Storage_interface.Exception.exnty_of_rpc result.Rpc.contents with - | Storage_interface.Exception.Redirect (Some ip) -> - let open Http.Url in - let newurl = - match url with - | (Http h, d) -> - (Http {h with host=ip}, d) - | _ -> - remote_url ip in - debug "Redirecting to ip: %s" ip; - let r = rpc ~srcstr ~dststr newurl call in - debug "Successfully redirected. Returning"; - r - | _ -> - debug "Not a redirect"; - result - end - else result + let result = XMLRPC_protocol.rpc ~transport:(transport_of_url url) + ~srcstr ~dststr ~http:(xmlrpc ~version:"1.0" ?auth:(Http.Url.auth_of url) ~query:(Http.Url.get_query_params url) (Http.Url.get_uri url)) call + in + if not result.Rpc.success then begin + debug "Got failure: checking for redirect"; + debug "Call was: %s" (Rpc.string_of_call call); + debug "result.contents: %s" (Jsonrpc.to_string result.Rpc.contents); + match Storage_interface.Exception.exnty_of_rpc result.Rpc.contents with + | Storage_interface.Exception.Redirect (Some ip) -> + let open Http.Url in + let newurl = + match url with + | (Http h, d) -> + (Http {h with host=ip}, d) + | _ -> + remote_url ip in + debug "Redirecting to ip: %s" ip; + let r = rpc ~srcstr ~dststr newurl call in + debug "Successfully redirected. Returning"; + r + | _ -> + debug "Not a redirect"; + result + end + else result let vdi_info x = - match x with - | Some (Vdi_info v) -> v - | _ -> failwith "Runtime type error: expecting Vdi_info" + match x with + | Some (Vdi_info v) -> v + | _ -> failwith "Runtime type error: expecting Vdi_info" module Local = Client(struct let rpc call = rpc ~srcstr:"smapiv2" ~dststr:"smapiv2" (local_url ()) call end) let tapdisk_of_attach_info attach_info = - let path = attach_info.params in - try - match Tapctl.of_device (Tapctl.create ()) path with - | tapdev, _, _ -> Some tapdev - with Tapctl.Not_blktap -> - debug "Device %s is not controlled by blktap" path; - None - | Tapctl.Not_a_device -> - debug "%s is not a device" path; - None - | _ -> - debug "Device %s has an unknown driver" path; - None + let path = attach_info.params in + try + match Tapctl.of_device (Tapctl.create ()) path with + | tapdev, _, _ -> Some tapdev + with Tapctl.Not_blktap -> + debug "Device %s is not controlled by blktap" path; + None + | Tapctl.Not_a_device -> + debug "%s is not a device" path; + None + | _ -> + debug "Device %s has an unknown driver" path; + None let with_activated_disk ~dbg ~sr ~vdi ~dp f = - let path = - Opt.map (fun vdi -> - let attach_info = Local.VDI.attach ~dbg ~dp ~sr ~vdi ~read_write:false in - let path = attach_info.params in - Local.VDI.activate ~dbg ~dp ~sr ~vdi; - path) vdi in - finally - (fun () -> f path) - (fun () -> - Opt.iter - (fun vdi -> - Local.VDI.deactivate ~dbg ~dp ~sr ~vdi; - Local.VDI.detach ~dbg ~dp ~sr ~vdi) - vdi) + let path = + Opt.map (fun vdi -> + let attach_info = Local.VDI.attach ~dbg ~dp ~sr ~vdi ~read_write:false in + let path = attach_info.params in + Local.VDI.activate ~dbg ~dp ~sr ~vdi; + path) vdi in + finally + (fun () -> f path) + (fun () -> + Opt.iter + (fun vdi -> + Local.VDI.deactivate ~dbg ~dp ~sr ~vdi; + Local.VDI.detach ~dbg ~dp ~sr ~vdi) + vdi) let perform_cleanup_actions = - List.iter - (fun f -> - try f () with e -> error "Caught %s while performing cleanup actions" (Printexc.to_string e) - ) + List.iter + (fun f -> + try f () with e -> error "Caught %s while performing cleanup actions" (Printexc.to_string e) + ) let progress_callback start len t y = - let new_progress = start +. (y *. len) in - t.Storage_task.state <- Task.Pending new_progress; - signal t.Storage_task.id + let new_progress = start +. (y *. len) in + t.Storage_task.state <- Task.Pending new_progress; + signal t.Storage_task.id let copy' ~task ~dbg ~sr ~vdi ~url ~dest ~dest_vdi = - let remote_url = Http.Url.of_string url in - let module Remote = Client(struct let rpc = rpc ~srcstr:"smapiv2" ~dststr:"dst_smapiv2" remote_url end) in - debug "copy local=%s/%s url=%s remote=%s/%s" sr vdi url dest dest_vdi; - - (* Check the remote SR exists *) - let srs = Remote.SR.list ~dbg in - if not(List.mem dest srs) - then failwith (Printf.sprintf "Remote SR %s not found" dest); - - let vdis = Remote.SR.scan ~dbg ~sr:dest in - let remote_vdi = - try List.find (fun x -> x.vdi = dest_vdi) vdis - with Not_found -> failwith (Printf.sprintf "Remote VDI %s not found" dest_vdi) - in - - let dest_content_id = remote_vdi.content_id in - - (* Find the local VDI *) - let vdis = Local.SR.scan ~dbg ~sr in - let local_vdi = - try List.find (fun x -> x.vdi = vdi) vdis - with Not_found -> failwith (Printf.sprintf "Local VDI %s not found" vdi) in - - debug "copy local=%s/%s content_id=%s" sr vdi local_vdi.content_id; - debug "copy remote=%s/%s content_id=%s" dest dest_vdi remote_vdi.content_id; - - if local_vdi.virtual_size > remote_vdi.virtual_size then begin - (* This should never happen provided the higher-level logic is working properly *) - error "copy local=%s/%s virtual_size=%Ld > remote=%s/%s virtual_size = %Ld" sr vdi local_vdi.virtual_size dest dest_vdi remote_vdi.virtual_size; - failwith "local VDI is larger than the remote VDI"; - end; - - let on_fail : (unit -> unit) list ref = ref [] in - - let base_vdi = - try - let x = (List.find (fun x -> x.content_id = dest_content_id) vdis).vdi in - debug "local VDI %s has content_id = %s; we will perform an incremental copy" x dest_content_id; - Some x - with _ -> - debug "no local VDI has content_id = %s; we will perform a full copy" dest_content_id; - None - in - - - try - let remote_dp = Uuid.string_of_uuid (Uuid.make_uuid ()) in - let base_dp = Uuid.string_of_uuid (Uuid.make_uuid ()) in - let leaf_dp = Uuid.string_of_uuid (Uuid.make_uuid ()) in - let dest_vdi_url = Http.Url.set_uri remote_url (Printf.sprintf "%s/nbd/%s/%s/%s" (Http.Url.get_uri remote_url) dest dest_vdi remote_dp) |> Http.Url.to_string in - - debug "copy remote=%s/%s NBD URL = %s" dest dest_vdi dest_vdi_url; - - let id=State.copy_id_of (sr,vdi) in - debug "Persisting state for copy (id=%s)" id; - State.add id State.(Copy_op Copy_state.({ - base_dp; leaf_dp; remote_dp; dest_sr=dest; copy_vdi=remote_vdi.vdi; remote_url=url})); - - SMPERF.debug "mirror.copy: copy initiated local_vdi:%s dest_vdi:%s" vdi dest_vdi; - - Pervasiveext.finally (fun () -> - debug "activating RW datapath %s on remote=%s/%s" remote_dp dest dest_vdi; - ignore(Remote.VDI.attach ~dbg ~sr:dest ~vdi:dest_vdi ~dp:remote_dp ~read_write:true); - Remote.VDI.activate ~dbg ~dp:remote_dp ~sr:dest ~vdi:dest_vdi; - - with_activated_disk ~dbg ~sr ~vdi:base_vdi ~dp:base_dp - (fun base_path -> - with_activated_disk ~dbg ~sr ~vdi:(Some vdi) ~dp:leaf_dp - (fun src -> - let dd = Sparse_dd_wrapper.start ~progress_cb:(progress_callback 0.05 0.9 task) ?base:base_path true (Opt.unbox src) - dest_vdi_url remote_vdi.virtual_size in - Storage_task.with_cancel task - (fun () -> Sparse_dd_wrapper.cancel dd) - (fun () -> - try Sparse_dd_wrapper.wait dd - with Sparse_dd_wrapper.Cancelled -> Storage_task.raise_cancelled task) - ) - ); - ) - (fun () -> - Remote.DP.destroy ~dbg ~dp:remote_dp ~allow_leak:false; - State.remove_copy id - ); - - SMPERF.debug "mirror.copy: copy complete local_vdi:%s dest_vdi:%s" vdi dest_vdi; - - debug "setting remote=%s/%s content_id <- %s" dest dest_vdi local_vdi.content_id; - Remote.VDI.set_content_id ~dbg ~sr:dest ~vdi:dest_vdi ~content_id:local_vdi.content_id; - (* PR-1255: XXX: this is useful because we don't have content_ids by default *) - debug "setting local=%s/%s content_id <- %s" sr local_vdi.vdi local_vdi.content_id; - Local.VDI.set_content_id ~dbg ~sr ~vdi:local_vdi.vdi ~content_id:local_vdi.content_id; - Some (Vdi_info remote_vdi) - with e -> - error "Caught %s: performing cleanup actions" (Printexc.to_string e); - perform_cleanup_actions !on_fail; - raise e - - -let copy_into ~task ~dbg ~sr ~vdi ~url ~dest ~dest_vdi = - copy' ~task ~dbg ~sr ~vdi ~url ~dest ~dest_vdi + let remote_url = Http.Url.of_string url in + let module Remote = Client(struct let rpc = rpc ~srcstr:"smapiv2" ~dststr:"dst_smapiv2" remote_url end) in + debug "copy local=%s/%s url=%s remote=%s/%s" sr vdi url dest dest_vdi; + + (* Check the remote SR exists *) + let srs = Remote.SR.list ~dbg in + if not(List.mem dest srs) + then failwith (Printf.sprintf "Remote SR %s not found" dest); + + let vdis = Remote.SR.scan ~dbg ~sr:dest in + let remote_vdi = + try List.find (fun x -> x.vdi = dest_vdi) vdis + with Not_found -> failwith (Printf.sprintf "Remote VDI %s not found" dest_vdi) + in + + let dest_content_id = remote_vdi.content_id in + + (* Find the local VDI *) + let vdis = Local.SR.scan ~dbg ~sr in + let local_vdi = + try List.find (fun x -> x.vdi = vdi) vdis + with Not_found -> failwith (Printf.sprintf "Local VDI %s not found" vdi) in + + debug "copy local=%s/%s content_id=%s" sr vdi local_vdi.content_id; + debug "copy remote=%s/%s content_id=%s" dest dest_vdi remote_vdi.content_id; + + if local_vdi.virtual_size > remote_vdi.virtual_size then begin + (* This should never happen provided the higher-level logic is working properly *) + error "copy local=%s/%s virtual_size=%Ld > remote=%s/%s virtual_size = %Ld" sr vdi local_vdi.virtual_size dest dest_vdi remote_vdi.virtual_size; + failwith "local VDI is larger than the remote VDI"; + end; + + let on_fail : (unit -> unit) list ref = ref [] in + + let base_vdi = + try + let x = (List.find (fun x -> x.content_id = dest_content_id) vdis).vdi in + debug "local VDI %s has content_id = %s; we will perform an incremental copy" x dest_content_id; + Some x + with _ -> + debug "no local VDI has content_id = %s; we will perform a full copy" dest_content_id; + None + in + + + try + let remote_dp = Uuid.string_of_uuid (Uuid.make_uuid ()) in + let base_dp = Uuid.string_of_uuid (Uuid.make_uuid ()) in + let leaf_dp = Uuid.string_of_uuid (Uuid.make_uuid ()) in + let dest_vdi_url = Http.Url.set_uri remote_url (Printf.sprintf "%s/nbd/%s/%s/%s" (Http.Url.get_uri remote_url) dest dest_vdi remote_dp) |> Http.Url.to_string in + + debug "copy remote=%s/%s NBD URL = %s" dest dest_vdi dest_vdi_url; + + let id=State.copy_id_of (sr,vdi) in + debug "Persisting state for copy (id=%s)" id; + State.add id State.(Copy_op Copy_state.({ + base_dp; leaf_dp; remote_dp; dest_sr=dest; copy_vdi=remote_vdi.vdi; remote_url=url})); + + SMPERF.debug "mirror.copy: copy initiated local_vdi:%s dest_vdi:%s" vdi dest_vdi; + + Pervasiveext.finally (fun () -> + debug "activating RW datapath %s on remote=%s/%s" remote_dp dest dest_vdi; + ignore(Remote.VDI.attach ~dbg ~sr:dest ~vdi:dest_vdi ~dp:remote_dp ~read_write:true); + Remote.VDI.activate ~dbg ~dp:remote_dp ~sr:dest ~vdi:dest_vdi; + + with_activated_disk ~dbg ~sr ~vdi:base_vdi ~dp:base_dp + (fun base_path -> + with_activated_disk ~dbg ~sr ~vdi:(Some vdi) ~dp:leaf_dp + (fun src -> + let dd = Sparse_dd_wrapper.start ~progress_cb:(progress_callback 0.05 0.9 task) ?base:base_path true (Opt.unbox src) + dest_vdi_url remote_vdi.virtual_size in + Storage_task.with_cancel task + (fun () -> Sparse_dd_wrapper.cancel dd) + (fun () -> + try Sparse_dd_wrapper.wait dd + with Sparse_dd_wrapper.Cancelled -> Storage_task.raise_cancelled task) + ) + ); + ) + (fun () -> + Remote.DP.destroy ~dbg ~dp:remote_dp ~allow_leak:false; + State.remove_copy id + ); + + SMPERF.debug "mirror.copy: copy complete local_vdi:%s dest_vdi:%s" vdi dest_vdi; + + debug "setting remote=%s/%s content_id <- %s" dest dest_vdi local_vdi.content_id; + Remote.VDI.set_content_id ~dbg ~sr:dest ~vdi:dest_vdi ~content_id:local_vdi.content_id; + (* PR-1255: XXX: this is useful because we don't have content_ids by default *) + debug "setting local=%s/%s content_id <- %s" sr local_vdi.vdi local_vdi.content_id; + Local.VDI.set_content_id ~dbg ~sr ~vdi:local_vdi.vdi ~content_id:local_vdi.content_id; + Some (Vdi_info remote_vdi) + with e -> + error "Caught %s: performing cleanup actions" (Printexc.to_string e); + perform_cleanup_actions !on_fail; + raise e + + +let copy_into ~task ~dbg ~sr ~vdi ~url ~dest ~dest_vdi = + copy' ~task ~dbg ~sr ~vdi ~url ~dest ~dest_vdi let remove_from_sm_config vdi_info key = { vdi_info with sm_config = List.filter (fun (k,v) -> k <> key) vdi_info.sm_config } let add_to_sm_config vdi_info key value = - let vdi_info = remove_from_sm_config vdi_info key in - { vdi_info with sm_config = (key,value) :: vdi_info.sm_config } + let vdi_info = remove_from_sm_config vdi_info key in + { vdi_info with sm_config = (key,value) :: vdi_info.sm_config } let stop ~dbg ~id = - (* Find the local VDI *) - let alm = State.find_active_local_mirror id in - match alm with - | Some alm -> - let sr,vdi = State.of_mirror_id id in - let vdis = Local.SR.scan ~dbg ~sr in - let local_vdi = - try List.find (fun x -> x.vdi = vdi) vdis - with Not_found -> failwith (Printf.sprintf "Local VDI %s not found" vdi) in - let local_vdi = add_to_sm_config local_vdi "mirror" "null" in - let local_vdi = remove_from_sm_config local_vdi "base_mirror" in - (* Disable mirroring on the local machine *) - let snapshot = Local.VDI.snapshot ~dbg ~sr ~vdi_info:local_vdi in - Local.VDI.destroy ~dbg ~sr ~vdi:snapshot.vdi; - (* Destroy the snapshot, if it still exists *) - let snap = try Some (List.find (fun x -> List.mem_assoc "base_mirror" x.sm_config && List.assoc "base_mirror" x.sm_config = id) vdis) with _ -> None in - begin - match snap with - | Some s -> - debug "Found snapshot VDI: %s" s.vdi; - Local.VDI.destroy ~dbg ~sr ~vdi:s.vdi - | None -> - debug "Snapshot VDI already cleaned up" - end; - let remote_url = Http.Url.of_string alm.State.Send_state.remote_url in - let module Remote = Client(struct let rpc = rpc ~srcstr:"smapiv2" ~dststr:"dst_smapiv2" remote_url end) in - (try Remote.DATA.MIRROR.receive_cancel ~dbg ~id with _ -> ()); - State.remove_local_mirror id - | None -> - raise (Does_not_exist ("mirror",id)) + (* Find the local VDI *) + let alm = State.find_active_local_mirror id in + match alm with + | Some alm -> + let sr,vdi = State.of_mirror_id id in + let vdis = Local.SR.scan ~dbg ~sr in + let local_vdi = + try List.find (fun x -> x.vdi = vdi) vdis + with Not_found -> failwith (Printf.sprintf "Local VDI %s not found" vdi) in + let local_vdi = add_to_sm_config local_vdi "mirror" "null" in + let local_vdi = remove_from_sm_config local_vdi "base_mirror" in + (* Disable mirroring on the local machine *) + let snapshot = Local.VDI.snapshot ~dbg ~sr ~vdi_info:local_vdi in + Local.VDI.destroy ~dbg ~sr ~vdi:snapshot.vdi; + (* Destroy the snapshot, if it still exists *) + let snap = try Some (List.find (fun x -> List.mem_assoc "base_mirror" x.sm_config && List.assoc "base_mirror" x.sm_config = id) vdis) with _ -> None in + begin + match snap with + | Some s -> + debug "Found snapshot VDI: %s" s.vdi; + Local.VDI.destroy ~dbg ~sr ~vdi:s.vdi + | None -> + debug "Snapshot VDI already cleaned up" + end; + let remote_url = Http.Url.of_string alm.State.Send_state.remote_url in + let module Remote = Client(struct let rpc = rpc ~srcstr:"smapiv2" ~dststr:"dst_smapiv2" remote_url end) in + (try Remote.DATA.MIRROR.receive_cancel ~dbg ~id with _ -> ()); + State.remove_local_mirror id + | None -> + raise (Does_not_exist ("mirror",id)) let start' ~task ~dbg ~sr ~vdi ~dp ~url ~dest = - debug "Mirror.start sr:%s vdi:%s url:%s dest:%s" sr vdi url dest; - SMPERF.debug "mirror.start called sr:%s vdi:%s url:%s dest:%s" sr vdi url dest; - let remote_url = Http.Url.of_string url in - let module Remote = Client(struct let rpc = rpc ~srcstr:"smapiv2" ~dststr:"dst_smapiv2" remote_url end) in - - (* Find the local VDI *) - let vdis = Local.SR.scan ~dbg ~sr in - let local_vdi = - try List.find (fun x -> x.vdi = vdi) vdis - with Not_found -> failwith (Printf.sprintf "Local VDI %s not found" vdi) in - - let id = State.mirror_id_of (sr,local_vdi.vdi) in - - (* A list of cleanup actions to perform if the operation should fail. *) - let on_fail : (unit -> unit) list ref = ref [] in - try - let similar_vdis = Local.VDI.similar_content ~dbg ~sr ~vdi in - let similars = List.filter (fun x -> x <> "") (List.map (fun vdi -> vdi.content_id) similar_vdis) in - debug "Similar VDIs to %s = [ %s ]" vdi (String.concat "; " (List.map (fun x -> Printf.sprintf "(vdi=%s,content_id=%s)" x.vdi x.content_id) vdis)); - let result_ty = Remote.DATA.MIRROR.receive_start ~dbg ~sr:dest ~vdi_info:local_vdi ~id ~similar:similars in - let result = match result_ty with - Mirror.Vhd_mirror x -> x - in - - (* Enable mirroring on the local machine *) - let mirror_dp = result.Mirror.mirror_datapath in - - let uri = (Printf.sprintf "/services/SM/nbd/%s/%s/%s" dest result.Mirror.mirror_vdi.vdi mirror_dp) in - let dest_url = Http.Url.set_uri remote_url uri in - let request = Http.Request.make ~query:(Http.Url.get_query_params dest_url) ~version:"1.0" ~user_agent:"smapiv2" Http.Put uri in - let transport = Xmlrpc_client.transport_of_url dest_url in - debug "Searching for data path: %s" dp; - let attach_info = Local.DP.attach_info ~dbg:"nbd" ~sr ~vdi ~dp in - debug "Got it!"; - - let tapdev = match tapdisk_of_attach_info attach_info with - | Some tapdev -> - debug "Got tapdev"; - let pid = Tapctl.get_tapdisk_pid tapdev in - let path = Printf.sprintf "/var/run/blktap-control/nbdclient%d" pid in - with_transport transport (with_http request (fun (response, s) -> - debug "Here inside the with_transport"; - let control_fd = Unix.socket Unix.PF_UNIX Unix.SOCK_STREAM 0 in - finally - (fun () -> - debug "Connecting to path: %s" path; - Unix.connect control_fd (Unix.ADDR_UNIX path); - let msg = dp in - let len = String.length msg in - let written = Unixext.send_fd control_fd msg 0 len [] s in - debug "Sent fd"; - if written <> len then begin - error "Failed to transfer fd to %s" path; - failwith "foo" - end) - (fun () -> - Unix.close control_fd))); - tapdev - | None -> - failwith "Not attached" - in - debug "Adding to active local mirrors: id=%s" id; - let alm = State.Send_state.({ - url; - dest_sr=dest; - remote_dp=mirror_dp; - local_dp=dp; - mirror_vdi=result.Mirror.mirror_vdi.vdi; - remote_url=url; - tapdev; - failed=false; - watchdog=None}) in - State.add id (State.Send_op alm); - debug "Added"; - - debug "About to snapshot VDI = %s" (string_of_vdi_info local_vdi); - let local_vdi = add_to_sm_config local_vdi "mirror" ("nbd:" ^ dp) in - let local_vdi = add_to_sm_config local_vdi "base_mirror" id in - let snapshot = Local.VDI.snapshot ~dbg ~sr ~vdi_info:local_vdi in - debug "Done!"; - - SMPERF.debug "mirror.start: snapshot created, mirror initiated vdi:%s snapshot_of:%s" - snapshot.vdi local_vdi.vdi ; - - on_fail := (fun () -> Local.VDI.destroy ~dbg ~sr ~vdi:snapshot.vdi) :: !on_fail; - - begin - let rec inner () = - debug "tapdisk watchdog"; - let alm_opt = State.find_active_local_mirror id in - match alm_opt with - | Some alm -> - let stats = Tapctl.stats (Tapctl.create ()) tapdev in - if stats.Tapctl.Stats.nbd_mirror_failed = 1 then - Updates.add (Dynamic.Mirror id) updates; - alm.State.Send_state.watchdog <- Some (Updates.Scheduler.one_shot (Updates.Scheduler.Delta 5) "tapdisk_watchdog" inner) - | None -> () - in inner () - end; - - on_fail := (fun () -> stop ~dbg ~id) :: !on_fail; - (* Copy the snapshot to the remote *) - let new_parent = Storage_task.with_subtask task "copy" (fun () -> - copy' ~task ~dbg ~sr ~vdi:snapshot.vdi ~url ~dest ~dest_vdi:result.Mirror.copy_diffs_to) |> vdi_info in - debug "Local VDI %s == remote VDI %s" snapshot.vdi new_parent.vdi; - Remote.VDI.compose ~dbg ~sr:dest ~vdi1:result.Mirror.copy_diffs_to ~vdi2:result.Mirror.mirror_vdi.vdi; - Remote.VDI.remove_from_sm_config ~dbg ~sr:dest ~vdi:result.Mirror.mirror_vdi.vdi ~key:"base_mirror"; - debug "Local VDI %s now mirrored to remote VDI: %s" local_vdi.vdi result.Mirror.mirror_vdi.vdi; - - debug "Destroying dummy VDI %s on remote" result.Mirror.dummy_vdi; - Remote.VDI.destroy ~dbg ~sr:dest ~vdi:result.Mirror.dummy_vdi; - debug "Destroying snapshot %s on src" snapshot.vdi; - Local.VDI.destroy ~dbg ~sr ~vdi:snapshot.vdi; - - Some (Mirror_id id) - with - | Sr_not_attached(sr_uuid) -> - error " Caught exception %s:%s. Performing cleanup." Api_errors.sr_not_attached sr_uuid; - perform_cleanup_actions !on_fail; - raise (Api_errors.Server_error(Api_errors.sr_not_attached,[sr_uuid])) - | e -> - error "Caught %s: performing cleanup actions" (Printexc.to_string e); - perform_cleanup_actions !on_fail; - raise e + debug "Mirror.start sr:%s vdi:%s url:%s dest:%s" sr vdi url dest; + SMPERF.debug "mirror.start called sr:%s vdi:%s url:%s dest:%s" sr vdi url dest; + let remote_url = Http.Url.of_string url in + let module Remote = Client(struct let rpc = rpc ~srcstr:"smapiv2" ~dststr:"dst_smapiv2" remote_url end) in + + (* Find the local VDI *) + let vdis = Local.SR.scan ~dbg ~sr in + let local_vdi = + try List.find (fun x -> x.vdi = vdi) vdis + with Not_found -> failwith (Printf.sprintf "Local VDI %s not found" vdi) in + + let id = State.mirror_id_of (sr,local_vdi.vdi) in + + (* A list of cleanup actions to perform if the operation should fail. *) + let on_fail : (unit -> unit) list ref = ref [] in + try + let similar_vdis = Local.VDI.similar_content ~dbg ~sr ~vdi in + let similars = List.filter (fun x -> x <> "") (List.map (fun vdi -> vdi.content_id) similar_vdis) in + debug "Similar VDIs to %s = [ %s ]" vdi (String.concat "; " (List.map (fun x -> Printf.sprintf "(vdi=%s,content_id=%s)" x.vdi x.content_id) vdis)); + let result_ty = Remote.DATA.MIRROR.receive_start ~dbg ~sr:dest ~vdi_info:local_vdi ~id ~similar:similars in + let result = match result_ty with + Mirror.Vhd_mirror x -> x + in + + (* Enable mirroring on the local machine *) + let mirror_dp = result.Mirror.mirror_datapath in + + let uri = (Printf.sprintf "/services/SM/nbd/%s/%s/%s" dest result.Mirror.mirror_vdi.vdi mirror_dp) in + let dest_url = Http.Url.set_uri remote_url uri in + let request = Http.Request.make ~query:(Http.Url.get_query_params dest_url) ~version:"1.0" ~user_agent:"smapiv2" Http.Put uri in + let transport = Xmlrpc_client.transport_of_url dest_url in + debug "Searching for data path: %s" dp; + let attach_info = Local.DP.attach_info ~dbg:"nbd" ~sr ~vdi ~dp in + debug "Got it!"; + + let tapdev = match tapdisk_of_attach_info attach_info with + | Some tapdev -> + debug "Got tapdev"; + let pid = Tapctl.get_tapdisk_pid tapdev in + let path = Printf.sprintf "/var/run/blktap-control/nbdclient%d" pid in + with_transport transport (with_http request (fun (response, s) -> + debug "Here inside the with_transport"; + let control_fd = Unix.socket Unix.PF_UNIX Unix.SOCK_STREAM 0 in + finally + (fun () -> + debug "Connecting to path: %s" path; + Unix.connect control_fd (Unix.ADDR_UNIX path); + let msg = dp in + let len = String.length msg in + let written = Unixext.send_fd control_fd msg 0 len [] s in + debug "Sent fd"; + if written <> len then begin + error "Failed to transfer fd to %s" path; + failwith "foo" + end) + (fun () -> + Unix.close control_fd))); + tapdev + | None -> + failwith "Not attached" + in + debug "Adding to active local mirrors: id=%s" id; + let alm = State.Send_state.({ + url; + dest_sr=dest; + remote_dp=mirror_dp; + local_dp=dp; + mirror_vdi=result.Mirror.mirror_vdi.vdi; + remote_url=url; + tapdev; + failed=false; + watchdog=None}) in + State.add id (State.Send_op alm); + debug "Added"; + + debug "About to snapshot VDI = %s" (string_of_vdi_info local_vdi); + let local_vdi = add_to_sm_config local_vdi "mirror" ("nbd:" ^ dp) in + let local_vdi = add_to_sm_config local_vdi "base_mirror" id in + let snapshot = Local.VDI.snapshot ~dbg ~sr ~vdi_info:local_vdi in + debug "Done!"; + + SMPERF.debug "mirror.start: snapshot created, mirror initiated vdi:%s snapshot_of:%s" + snapshot.vdi local_vdi.vdi ; + + on_fail := (fun () -> Local.VDI.destroy ~dbg ~sr ~vdi:snapshot.vdi) :: !on_fail; + + begin + let rec inner () = + debug "tapdisk watchdog"; + let alm_opt = State.find_active_local_mirror id in + match alm_opt with + | Some alm -> + let stats = Tapctl.stats (Tapctl.create ()) tapdev in + if stats.Tapctl.Stats.nbd_mirror_failed = 1 then + Updates.add (Dynamic.Mirror id) updates; + alm.State.Send_state.watchdog <- Some (Updates.Scheduler.one_shot (Updates.Scheduler.Delta 5) "tapdisk_watchdog" inner) + | None -> () + in inner () + end; + + on_fail := (fun () -> stop ~dbg ~id) :: !on_fail; + (* Copy the snapshot to the remote *) + let new_parent = Storage_task.with_subtask task "copy" (fun () -> + copy' ~task ~dbg ~sr ~vdi:snapshot.vdi ~url ~dest ~dest_vdi:result.Mirror.copy_diffs_to) |> vdi_info in + debug "Local VDI %s == remote VDI %s" snapshot.vdi new_parent.vdi; + Remote.VDI.compose ~dbg ~sr:dest ~vdi1:result.Mirror.copy_diffs_to ~vdi2:result.Mirror.mirror_vdi.vdi; + Remote.VDI.remove_from_sm_config ~dbg ~sr:dest ~vdi:result.Mirror.mirror_vdi.vdi ~key:"base_mirror"; + debug "Local VDI %s now mirrored to remote VDI: %s" local_vdi.vdi result.Mirror.mirror_vdi.vdi; + + debug "Destroying dummy VDI %s on remote" result.Mirror.dummy_vdi; + Remote.VDI.destroy ~dbg ~sr:dest ~vdi:result.Mirror.dummy_vdi; + debug "Destroying snapshot %s on src" snapshot.vdi; + Local.VDI.destroy ~dbg ~sr ~vdi:snapshot.vdi; + + Some (Mirror_id id) + with + | Sr_not_attached(sr_uuid) -> + error " Caught exception %s:%s. Performing cleanup." Api_errors.sr_not_attached sr_uuid; + perform_cleanup_actions !on_fail; + raise (Api_errors.Server_error(Api_errors.sr_not_attached,[sr_uuid])) + | e -> + error "Caught %s: performing cleanup actions" (Printexc.to_string e); + perform_cleanup_actions !on_fail; + raise e (* XXX: PR-1255: copy the xenopsd 'raise Exception' pattern *) let stop ~dbg ~id = - try - stop ~dbg ~id - with - | Backend_error(code, params) - | Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - | e -> - raise e + try + stop ~dbg ~id + with + | Backend_error(code, params) + | Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + | e -> + raise e let stat ~dbg ~id = - let recv_opt = State.find_active_receive_mirror id in - let send_opt = State.find_active_local_mirror id in - let copy_opt = State.find_active_copy id in - let open State in - let failed = match send_opt with - | Some send_state -> - let failed = - try - let stats = Tapctl.stats (Tapctl.create ()) send_state.Send_state.tapdev in - stats.Tapctl.Stats.nbd_mirror_failed = 1 - with e -> - debug "Using cached copy of failure status"; - send_state.Send_state.failed - in - send_state.Send_state.failed <- failed; - failed - | None -> false - in - let open Mirror in - let state = - (match recv_opt with Some _ -> [Receiving] | None -> []) @ - (match send_opt with Some _ -> [Sending] | None -> []) @ - (match copy_opt with Some _ -> [Copying] | None -> []) - in - if state = [] then raise (Does_not_exist ("mirror", id)); - let src, dst = match (recv_opt, send_opt, copy_opt) with - | (Some receive_state, _, _) -> - receive_state.Receive_state.remote_vdi, receive_state.Receive_state.leaf_vdi - | (_, Some send_state, _) -> - snd (of_mirror_id id), send_state.Send_state.mirror_vdi - | (_, _, Some copy_state) -> - snd (of_copy_id id), copy_state.Copy_state.copy_vdi - | _ -> failwith "Invalid" in - { Mirror.source_vdi = src; dest_vdi = dst; state; failed; } + let recv_opt = State.find_active_receive_mirror id in + let send_opt = State.find_active_local_mirror id in + let copy_opt = State.find_active_copy id in + let open State in + let failed = match send_opt with + | Some send_state -> + let failed = + try + let stats = Tapctl.stats (Tapctl.create ()) send_state.Send_state.tapdev in + stats.Tapctl.Stats.nbd_mirror_failed = 1 + with e -> + debug "Using cached copy of failure status"; + send_state.Send_state.failed + in + send_state.Send_state.failed <- failed; + failed + | None -> false + in + let open Mirror in + let state = + (match recv_opt with Some _ -> [Receiving] | None -> []) @ + (match send_opt with Some _ -> [Sending] | None -> []) @ + (match copy_opt with Some _ -> [Copying] | None -> []) + in + if state = [] then raise (Does_not_exist ("mirror", id)); + let src, dst = match (recv_opt, send_opt, copy_opt) with + | (Some receive_state, _, _) -> + receive_state.Receive_state.remote_vdi, receive_state.Receive_state.leaf_vdi + | (_, Some send_state, _) -> + snd (of_mirror_id id), send_state.Send_state.mirror_vdi + | (_, _, Some copy_state) -> + snd (of_copy_id id), copy_state.Copy_state.copy_vdi + | _ -> failwith "Invalid" in + { Mirror.source_vdi = src; dest_vdi = dst; state; failed; } let list ~dbg = - let send_ops, recv_ops, copy_ops = State.map_of () in - let get_ids map = List.map fst map in - let ids = - (get_ids send_ops) @ (get_ids recv_ops) @ (get_ids copy_ops) - |> Listext.List.setify - in - List.map (fun id -> - (id,stat dbg id)) ids + let send_ops, recv_ops, copy_ops = State.map_of () in + let get_ids map = List.map fst map in + let ids = + (get_ids send_ops) @ (get_ids recv_ops) @ (get_ids copy_ops) + |> Listext.List.setify + in + List.map (fun id -> + (id,stat dbg id)) ids let killall ~dbg = - let send_ops, recv_ops, copy_ops = State.map_of () in - List.iter - (fun (id, send_state) -> - begin - debug "Send in progress: %s" id; - List.iter log_and_ignore_exn - [ (fun () -> stop dbg id); - (fun () -> Local.DP.destroy ~dbg ~dp:send_state.State.Send_state.local_dp ~allow_leak:true) ] - end) - send_ops; - List.iter - (fun (id, copy_state) -> - begin - debug "Copy in progress: %s" id; - List.iter log_and_ignore_exn - [ (fun () -> Local.DP.destroy ~dbg ~dp:copy_state.State.Copy_state.leaf_dp ~allow_leak:true); - (fun () -> Local.DP.destroy ~dbg ~dp:copy_state.State.Copy_state.base_dp ~allow_leak:true) ]; - let remote_url = Http.Url.of_string copy_state.State.Copy_state.remote_url in - let module Remote = Client(struct let rpc = rpc ~srcstr:"smapiv2" ~dststr:"dst_smapiv2" remote_url end) in - List.iter log_and_ignore_exn - [ (fun () -> Remote.DP.destroy ~dbg ~dp:copy_state.State.Copy_state.remote_dp ~allow_leak:true); - (fun () -> Remote.VDI.destroy ~dbg ~sr:copy_state.State.Copy_state.dest_sr ~vdi:copy_state.State.Copy_state.copy_vdi) ] - end) - copy_ops; - List.iter - (fun (id, recv_state) -> - begin - debug "Receive in progress: %s" id; - log_and_ignore_exn (fun () -> Local.DATA.MIRROR.receive_cancel ~dbg ~id) - end) - recv_ops; - State.clear () + let send_ops, recv_ops, copy_ops = State.map_of () in + List.iter + (fun (id, send_state) -> + begin + debug "Send in progress: %s" id; + List.iter log_and_ignore_exn + [ (fun () -> stop dbg id); + (fun () -> Local.DP.destroy ~dbg ~dp:send_state.State.Send_state.local_dp ~allow_leak:true) ] + end) + send_ops; + List.iter + (fun (id, copy_state) -> + begin + debug "Copy in progress: %s" id; + List.iter log_and_ignore_exn + [ (fun () -> Local.DP.destroy ~dbg ~dp:copy_state.State.Copy_state.leaf_dp ~allow_leak:true); + (fun () -> Local.DP.destroy ~dbg ~dp:copy_state.State.Copy_state.base_dp ~allow_leak:true) ]; + let remote_url = Http.Url.of_string copy_state.State.Copy_state.remote_url in + let module Remote = Client(struct let rpc = rpc ~srcstr:"smapiv2" ~dststr:"dst_smapiv2" remote_url end) in + List.iter log_and_ignore_exn + [ (fun () -> Remote.DP.destroy ~dbg ~dp:copy_state.State.Copy_state.remote_dp ~allow_leak:true); + (fun () -> Remote.VDI.destroy ~dbg ~sr:copy_state.State.Copy_state.dest_sr ~vdi:copy_state.State.Copy_state.copy_vdi) ] + end) + copy_ops; + List.iter + (fun (id, recv_state) -> + begin + debug "Receive in progress: %s" id; + log_and_ignore_exn (fun () -> Local.DATA.MIRROR.receive_cancel ~dbg ~id) + end) + recv_ops; + State.clear () let receive_start ~dbg ~sr ~vdi_info ~id ~similar = - let on_fail : (unit -> unit) list ref = ref [] in - - let vdis = Local.SR.scan ~dbg ~sr in - - let leaf_dp = Local.DP.create ~dbg ~id:(Uuid.string_of_uuid (Uuid.make_uuid ())) in - - try - let vdi_info = { vdi_info with sm_config = ["base_mirror", id] } in - let leaf = Local.VDI.create ~dbg ~sr ~vdi_info in - info "Created leaf VDI for mirror receive: %s" (string_of_vdi_info leaf); - on_fail := (fun () -> Local.VDI.destroy ~dbg ~sr ~vdi:leaf.vdi) :: !on_fail; - let dummy = Local.VDI.snapshot ~dbg ~sr ~vdi_info:leaf in - on_fail := (fun () -> Local.VDI.destroy ~dbg ~sr ~vdi:dummy.vdi) :: !on_fail; - debug "Created dummy snapshot for mirror receive: %s" (string_of_vdi_info dummy); - - let _ = Local.VDI.attach ~dbg ~dp:leaf_dp ~sr ~vdi:leaf.vdi ~read_write:true in - Local.VDI.activate ~dbg ~dp:leaf_dp ~sr ~vdi:leaf.vdi; - - let nearest = List.fold_left - (fun acc content_id -> match acc with - | Some x -> acc - | None -> - try Some (List.find (fun vdi -> vdi.content_id = content_id) vdis) - with Not_found -> None) None similar in - - debug "Nearest VDI: content_id=%s vdi=%s" - (Opt.default "None" (Opt.map (fun x -> x.content_id) nearest)) - (Opt.default "None" (Opt.map (fun x -> x.vdi) nearest)); - - let parent = match nearest with - | Some vdi -> - debug "Cloning VDI %s" vdi.vdi; - let vdi = add_to_sm_config vdi "base_mirror" id in - let vdi_clone = Local.VDI.clone ~dbg ~sr ~vdi_info:vdi in - if vdi_clone.virtual_size <> vdi_info.virtual_size then begin - let new_size = Local.VDI.resize ~dbg ~sr ~vdi:vdi_clone.vdi ~new_size:vdi_info.virtual_size in - debug "Resize local VDI %s to %Ld: result %Ld" vdi_clone.vdi vdi_info.virtual_size new_size; - end; - vdi_clone - | None -> - debug "Creating a blank remote VDI"; - Local.VDI.create ~dbg ~sr ~vdi_info - in - - debug "Parent disk content_id=%s" parent.content_id; - - State.add id State.(Recv_op Receive_state.({ - sr; - dummy_vdi=dummy.vdi; - leaf_vdi=leaf.vdi; - leaf_dp; - parent_vdi=parent.vdi; - remote_vdi=vdi_info.vdi})); - - let nearest_content_id = Opt.map (fun x -> x.content_id) nearest in - - Mirror.Vhd_mirror { - Mirror.mirror_vdi = leaf; - mirror_datapath = leaf_dp; - copy_diffs_from = nearest_content_id; - copy_diffs_to = parent.vdi; - dummy_vdi = dummy.vdi } - with e -> - List.iter (fun op -> try op () with e -> debug "Caught exception in on_fail: %s" (Printexc.to_string e)) !on_fail; - raise e + let on_fail : (unit -> unit) list ref = ref [] in + + let vdis = Local.SR.scan ~dbg ~sr in + + let leaf_dp = Local.DP.create ~dbg ~id:(Uuid.string_of_uuid (Uuid.make_uuid ())) in + + try + let vdi_info = { vdi_info with sm_config = ["base_mirror", id] } in + let leaf = Local.VDI.create ~dbg ~sr ~vdi_info in + info "Created leaf VDI for mirror receive: %s" (string_of_vdi_info leaf); + on_fail := (fun () -> Local.VDI.destroy ~dbg ~sr ~vdi:leaf.vdi) :: !on_fail; + let dummy = Local.VDI.snapshot ~dbg ~sr ~vdi_info:leaf in + on_fail := (fun () -> Local.VDI.destroy ~dbg ~sr ~vdi:dummy.vdi) :: !on_fail; + debug "Created dummy snapshot for mirror receive: %s" (string_of_vdi_info dummy); + + let _ = Local.VDI.attach ~dbg ~dp:leaf_dp ~sr ~vdi:leaf.vdi ~read_write:true in + Local.VDI.activate ~dbg ~dp:leaf_dp ~sr ~vdi:leaf.vdi; + + let nearest = List.fold_left + (fun acc content_id -> match acc with + | Some x -> acc + | None -> + try Some (List.find (fun vdi -> vdi.content_id = content_id) vdis) + with Not_found -> None) None similar in + + debug "Nearest VDI: content_id=%s vdi=%s" + (Opt.default "None" (Opt.map (fun x -> x.content_id) nearest)) + (Opt.default "None" (Opt.map (fun x -> x.vdi) nearest)); + + let parent = match nearest with + | Some vdi -> + debug "Cloning VDI %s" vdi.vdi; + let vdi = add_to_sm_config vdi "base_mirror" id in + let vdi_clone = Local.VDI.clone ~dbg ~sr ~vdi_info:vdi in + if vdi_clone.virtual_size <> vdi_info.virtual_size then begin + let new_size = Local.VDI.resize ~dbg ~sr ~vdi:vdi_clone.vdi ~new_size:vdi_info.virtual_size in + debug "Resize local VDI %s to %Ld: result %Ld" vdi_clone.vdi vdi_info.virtual_size new_size; + end; + vdi_clone + | None -> + debug "Creating a blank remote VDI"; + Local.VDI.create ~dbg ~sr ~vdi_info + in + + debug "Parent disk content_id=%s" parent.content_id; + + State.add id State.(Recv_op Receive_state.({ + sr; + dummy_vdi=dummy.vdi; + leaf_vdi=leaf.vdi; + leaf_dp; + parent_vdi=parent.vdi; + remote_vdi=vdi_info.vdi})); + + let nearest_content_id = Opt.map (fun x -> x.content_id) nearest in + + Mirror.Vhd_mirror { + Mirror.mirror_vdi = leaf; + mirror_datapath = leaf_dp; + copy_diffs_from = nearest_content_id; + copy_diffs_to = parent.vdi; + dummy_vdi = dummy.vdi } + with e -> + List.iter (fun op -> try op () with e -> debug "Caught exception in on_fail: %s" (Printexc.to_string e)) !on_fail; + raise e let receive_finalize ~dbg ~id = - let recv_state = State.find_active_receive_mirror id in - let open State.Receive_state in Opt.iter (fun r -> Local.DP.destroy ~dbg ~dp:r.leaf_dp ~allow_leak:false) recv_state; - State.remove_receive_mirror id + let recv_state = State.find_active_receive_mirror id in + let open State.Receive_state in Opt.iter (fun r -> Local.DP.destroy ~dbg ~dp:r.leaf_dp ~allow_leak:false) recv_state; + State.remove_receive_mirror id let receive_cancel ~dbg ~id = - let receive_state = State.find_active_receive_mirror id in - let open State.Receive_state in Opt.iter (fun r -> - log_and_ignore_exn (fun () -> Local.DP.destroy ~dbg ~dp:r.leaf_dp ~allow_leak:false); - List.iter (fun v -> - log_and_ignore_exn (fun () -> Local.VDI.destroy ~dbg ~sr:r.sr ~vdi:v) - ) [r.dummy_vdi; r.leaf_vdi; r.parent_vdi] - ) receive_state; - State.remove_receive_mirror id + let receive_state = State.find_active_receive_mirror id in + let open State.Receive_state in Opt.iter (fun r -> + log_and_ignore_exn (fun () -> Local.DP.destroy ~dbg ~dp:r.leaf_dp ~allow_leak:false); + List.iter (fun v -> + log_and_ignore_exn (fun () -> Local.VDI.destroy ~dbg ~sr:r.sr ~vdi:v) + ) [r.dummy_vdi; r.leaf_vdi; r.parent_vdi] + ) receive_state; + State.remove_receive_mirror id exception Timeout let reqs_outstanding_timeout = 150.0 (* Tapdisk should time out after 2 mins. We can wait a little longer *) let pre_deactivate_hook ~dbg ~dp ~sr ~vdi = - let open State.Send_state in - let id = State.mirror_id_of (sr,vdi) in - let start_time = Oclock.gettime Oclock.monotonic in - let get_delta () = (Int64.to_float (Int64.sub (Oclock.gettime Oclock.monotonic) start_time)) /. 1.0e9 in - State.find_active_local_mirror id |> - Opt.iter (fun s -> - try - (* We used to pause here and then check the nbd_mirror_failed key. Now, we poll - until the number of outstanding requests has gone to zero, then check the - status. This avoids confusing the backend (CA-128460) *) - let open Tapctl in - let ctx = create () in - let rec wait () = - if get_delta () > reqs_outstanding_timeout then raise Timeout; - let st = stats ctx s.tapdev in - if st.Stats.reqs_outstanding > 0 - then (Thread.delay 1.0; wait ()) - else st - in - let st = wait () in - debug "Got final stats after waiting %f seconds" (get_delta ()); - if st.Stats.nbd_mirror_failed = 1 - then begin - error "tapdisk reports mirroring failed"; - s.failed <- true - end; - with - | Timeout -> - error "Timeout out after %f seconds waiting for tapdisk to complete all outstanding requests" (get_delta ()); - s.failed <- true - | e -> - error "Caught exception while finally checking mirror state: %s" - (Printexc.to_string e); - s.failed <- true - ) - -let post_detach_hook ~sr ~vdi ~dp = - let open State.Send_state in - let id = State.mirror_id_of (sr,vdi) in - State.find_active_local_mirror id |> - Opt.iter (fun r -> - let remote_url = Http.Url.of_string r.url in - let module Remote = Client(struct let rpc = rpc ~srcstr:"smapiv2" ~dststr:"dst_smapiv2" remote_url end) in - let t = Thread.create (fun () -> - debug "Calling receive_finalize"; - log_and_ignore_exn - (fun () -> Remote.DATA.MIRROR.receive_finalize ~dbg:"Mirror-cleanup" ~id); - debug "Finished calling receive_finalize"; - State.remove_local_mirror id; - debug "Removed active local mirror: %s" id - ) () in - Opt.iter (fun id -> Updates.Scheduler.cancel id) r.watchdog; - debug "Created thread %d to call receive finalize and dp destroy" (Thread.id t)) + let open State.Send_state in + let id = State.mirror_id_of (sr,vdi) in + let start_time = Oclock.gettime Oclock.monotonic in + let get_delta () = (Int64.to_float (Int64.sub (Oclock.gettime Oclock.monotonic) start_time)) /. 1.0e9 in + State.find_active_local_mirror id |> + Opt.iter (fun s -> + try + (* We used to pause here and then check the nbd_mirror_failed key. Now, we poll + until the number of outstanding requests has gone to zero, then check the + status. This avoids confusing the backend (CA-128460) *) + let open Tapctl in + let ctx = create () in + let rec wait () = + if get_delta () > reqs_outstanding_timeout then raise Timeout; + let st = stats ctx s.tapdev in + if st.Stats.reqs_outstanding > 0 + then (Thread.delay 1.0; wait ()) + else st + in + let st = wait () in + debug "Got final stats after waiting %f seconds" (get_delta ()); + if st.Stats.nbd_mirror_failed = 1 + then begin + error "tapdisk reports mirroring failed"; + s.failed <- true + end; + with + | Timeout -> + error "Timeout out after %f seconds waiting for tapdisk to complete all outstanding requests" (get_delta ()); + s.failed <- true + | e -> + error "Caught exception while finally checking mirror state: %s" + (Printexc.to_string e); + s.failed <- true + ) + +let post_detach_hook ~sr ~vdi ~dp = + let open State.Send_state in + let id = State.mirror_id_of (sr,vdi) in + State.find_active_local_mirror id |> + Opt.iter (fun r -> + let remote_url = Http.Url.of_string r.url in + let module Remote = Client(struct let rpc = rpc ~srcstr:"smapiv2" ~dststr:"dst_smapiv2" remote_url end) in + let t = Thread.create (fun () -> + debug "Calling receive_finalize"; + log_and_ignore_exn + (fun () -> Remote.DATA.MIRROR.receive_finalize ~dbg:"Mirror-cleanup" ~id); + debug "Finished calling receive_finalize"; + State.remove_local_mirror id; + debug "Removed active local mirror: %s" id + ) () in + Opt.iter (fun id -> Updates.Scheduler.cancel id) r.watchdog; + debug "Created thread %d to call receive finalize and dp destroy" (Thread.id t)) let nbd_handler req s sr vdi dp = - debug "sr=%s vdi=%s dp=%s" sr vdi dp; - let attach_info = Local.DP.attach_info ~dbg:"nbd" ~sr ~vdi ~dp in - req.Http.Request.close <- true; - match tapdisk_of_attach_info attach_info with - | Some tapdev -> - let minor = Tapctl.get_minor tapdev in - let pid = Tapctl.get_tapdisk_pid tapdev in - let path = Printf.sprintf "/var/run/blktap-control/nbdserver%d.%d" pid minor in - Http_svr.headers s (Http.http_200_ok () @ ["Transfer-encoding: nbd"]); - let control_fd = Unix.socket Unix.PF_UNIX Unix.SOCK_STREAM 0 in - finally - (fun () -> - Unix.connect control_fd (Unix.ADDR_UNIX path); - let msg = dp in - let len = String.length msg in - let written = Unixext.send_fd control_fd msg 0 len [] s in - if written <> len then begin - error "Failed to transfer fd to %s" path; - Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()); - req.Http.Request.close <- true - end; - ) - (fun () -> Unix.close control_fd) - | None -> - () + debug "sr=%s vdi=%s dp=%s" sr vdi dp; + let attach_info = Local.DP.attach_info ~dbg:"nbd" ~sr ~vdi ~dp in + req.Http.Request.close <- true; + match tapdisk_of_attach_info attach_info with + | Some tapdev -> + let minor = Tapctl.get_minor tapdev in + let pid = Tapctl.get_tapdisk_pid tapdev in + let path = Printf.sprintf "/var/run/blktap-control/nbdserver%d.%d" pid minor in + Http_svr.headers s (Http.http_200_ok () @ ["Transfer-encoding: nbd"]); + let control_fd = Unix.socket Unix.PF_UNIX Unix.SOCK_STREAM 0 in + finally + (fun () -> + Unix.connect control_fd (Unix.ADDR_UNIX path); + let msg = dp in + let len = String.length msg in + let written = Unixext.send_fd control_fd msg 0 len [] s in + if written <> len then begin + error "Failed to transfer fd to %s" path; + Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()); + req.Http.Request.close <- true + end; + ) + (fun () -> Unix.close control_fd) + | None -> + () let copy ~task ~dbg ~sr ~vdi ~dp ~url ~dest = - debug "copy sr:%s vdi:%s url:%s dest:%s" sr vdi url dest; - let remote_url = Http.Url.of_string url in - let module Remote = Client(struct let rpc = rpc ~srcstr:"smapiv2" ~dststr:"dst_smapiv2" remote_url end) in - try - (* Find the local VDI *) - let vdis = Local.SR.scan ~dbg ~sr in - let local_vdi = - try List.find (fun x -> x.vdi = vdi) vdis - with Not_found -> failwith (Printf.sprintf "Local VDI %s not found" vdi) in - try - let similar_vdis = Local.VDI.similar_content ~dbg ~sr ~vdi in - let similars = List.map (fun vdi -> vdi.content_id) similar_vdis in - debug "Similar VDIs to %s = [ %s ]" vdi (String.concat "; " (List.map (fun x -> Printf.sprintf "(vdi=%s,content_id=%s)" x.vdi x.content_id) vdis)); - let remote_vdis = Remote.SR.scan ~dbg ~sr:dest in - let nearest = List.fold_left - (fun acc content_id -> match acc with - | Some x -> acc - | None -> - try Some (List.find (fun vdi -> vdi.content_id = content_id) remote_vdis) - with Not_found -> None) None similars in - - debug "Nearest VDI: content_id=%s vdi=%s" - (Opt.default "None" (Opt.map (fun x -> x.content_id) nearest)) - (Opt.default "None" (Opt.map (fun x -> x.vdi) nearest)); - let remote_base = match nearest with - | Some vdi -> - debug "Cloning VDI %s" vdi.vdi; - let vdi_clone = Remote.VDI.clone ~dbg ~sr:dest ~vdi_info:vdi in - if vdi_clone.virtual_size <> local_vdi.virtual_size then begin - let new_size = Remote.VDI.resize ~dbg ~sr:dest ~vdi:vdi_clone.vdi ~new_size:local_vdi.virtual_size in - debug "Resize remote VDI %s to %Ld: result %Ld" vdi_clone.vdi local_vdi.virtual_size new_size; - end; - vdi_clone - | None -> - debug "Creating a blank remote VDI"; - Remote.VDI.create ~dbg ~sr:dest ~vdi_info:{ local_vdi with sm_config = [] } in - let remote_copy = copy' ~task ~dbg ~sr ~vdi ~url ~dest ~dest_vdi:remote_base.vdi |> vdi_info in - let snapshot = Remote.VDI.snapshot ~dbg ~sr:dest ~vdi_info:remote_copy in - Remote.VDI.destroy ~dbg ~sr:dest ~vdi:remote_copy.vdi; - Some (Vdi_info snapshot) - with e -> - error "Caught %s: copying snapshots vdi" (Printexc.to_string e); - raise (Internal_error (Printexc.to_string e)) - with - | Backend_error(code, params) - | Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - | e -> - raise (Internal_error(Printexc.to_string e)) + debug "copy sr:%s vdi:%s url:%s dest:%s" sr vdi url dest; + let remote_url = Http.Url.of_string url in + let module Remote = Client(struct let rpc = rpc ~srcstr:"smapiv2" ~dststr:"dst_smapiv2" remote_url end) in + try + (* Find the local VDI *) + let vdis = Local.SR.scan ~dbg ~sr in + let local_vdi = + try List.find (fun x -> x.vdi = vdi) vdis + with Not_found -> failwith (Printf.sprintf "Local VDI %s not found" vdi) in + try + let similar_vdis = Local.VDI.similar_content ~dbg ~sr ~vdi in + let similars = List.map (fun vdi -> vdi.content_id) similar_vdis in + debug "Similar VDIs to %s = [ %s ]" vdi (String.concat "; " (List.map (fun x -> Printf.sprintf "(vdi=%s,content_id=%s)" x.vdi x.content_id) vdis)); + let remote_vdis = Remote.SR.scan ~dbg ~sr:dest in + let nearest = List.fold_left + (fun acc content_id -> match acc with + | Some x -> acc + | None -> + try Some (List.find (fun vdi -> vdi.content_id = content_id) remote_vdis) + with Not_found -> None) None similars in + + debug "Nearest VDI: content_id=%s vdi=%s" + (Opt.default "None" (Opt.map (fun x -> x.content_id) nearest)) + (Opt.default "None" (Opt.map (fun x -> x.vdi) nearest)); + let remote_base = match nearest with + | Some vdi -> + debug "Cloning VDI %s" vdi.vdi; + let vdi_clone = Remote.VDI.clone ~dbg ~sr:dest ~vdi_info:vdi in + if vdi_clone.virtual_size <> local_vdi.virtual_size then begin + let new_size = Remote.VDI.resize ~dbg ~sr:dest ~vdi:vdi_clone.vdi ~new_size:local_vdi.virtual_size in + debug "Resize remote VDI %s to %Ld: result %Ld" vdi_clone.vdi local_vdi.virtual_size new_size; + end; + vdi_clone + | None -> + debug "Creating a blank remote VDI"; + Remote.VDI.create ~dbg ~sr:dest ~vdi_info:{ local_vdi with sm_config = [] } in + let remote_copy = copy' ~task ~dbg ~sr ~vdi ~url ~dest ~dest_vdi:remote_base.vdi |> vdi_info in + let snapshot = Remote.VDI.snapshot ~dbg ~sr:dest ~vdi_info:remote_copy in + Remote.VDI.destroy ~dbg ~sr:dest ~vdi:remote_copy.vdi; + Some (Vdi_info snapshot) + with e -> + error "Caught %s: copying snapshots vdi" (Printexc.to_string e); + raise (Internal_error (Printexc.to_string e)) + with + | Backend_error(code, params) + | Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + | e -> + raise (Internal_error(Printexc.to_string e)) let wrap ~dbg f = - let task = Storage_task.add tasks dbg (fun task -> - try - f task - with - | Backend_error(code, params) - | Api_errors.Server_error(code, params) -> - raise (Backend_error(code, params)) - | Unimplemented msg -> raise (Unimplemented msg) - | e -> - raise (Internal_error(Printexc.to_string e))) in - let _ = Thread.create - (Debug.with_thread_associated dbg (fun () -> - Storage_task.run task; - signal task.Storage_task.id - )) () in - task.Storage_task.id - -let start ~dbg ~sr ~vdi ~dp ~url ~dest = - wrap ~dbg (fun task -> start' ~task ~dbg ~sr ~vdi ~dp ~url ~dest) + let task = Storage_task.add tasks dbg (fun task -> + try + f task + with + | Backend_error(code, params) + | Api_errors.Server_error(code, params) -> + raise (Backend_error(code, params)) + | Unimplemented msg -> raise (Unimplemented msg) + | e -> + raise (Internal_error(Printexc.to_string e))) in + let _ = Thread.create + (Debug.with_thread_associated dbg (fun () -> + Storage_task.run task; + signal task.Storage_task.id + )) () in + task.Storage_task.id + +let start ~dbg ~sr ~vdi ~dp ~url ~dest = + wrap ~dbg (fun task -> start' ~task ~dbg ~sr ~vdi ~dp ~url ~dest) let copy ~dbg ~sr ~vdi ~dp ~url ~dest = - wrap ~dbg (fun task -> copy ~task ~dbg ~sr ~vdi ~dp ~url ~dest) + wrap ~dbg (fun task -> copy ~task ~dbg ~sr ~vdi ~dp ~url ~dest) -let copy_into ~dbg ~sr ~vdi ~url ~dest ~dest_vdi = - wrap ~dbg (fun task -> copy_into ~task ~dbg ~sr ~vdi ~url ~dest ~dest_vdi) +let copy_into ~dbg ~sr ~vdi ~url ~dest ~dest_vdi = + wrap ~dbg (fun task -> copy_into ~task ~dbg ~sr ~vdi ~url ~dest ~dest_vdi) (* The remote end of this call, SR.update_snapshot_info_dest, is implemented in * the SMAPIv1 section of storage_migrate.ml. It needs to access the setters * for snapshot_of, snapshot_time and is_a_snapshot, which we don't want to add * to SMAPI. *) let update_snapshot_info_src ~dbg ~sr ~vdi ~url ~dest ~dest_vdi ~snapshot_pairs = - let remote_url = Http.Url.of_string url in - let module Remote = - Client(struct - let rpc = rpc ~srcstr:"smapiv2" ~dststr:"dst_smapiv2" remote_url - end) - in - let local_vdis = Local.SR.scan ~dbg ~sr in - let find_vdi ~vdi ~vdi_info_list = - try List.find (fun x -> x.vdi = vdi) vdi_info_list - with Not_found -> raise (Vdi_does_not_exist vdi) - in - let snapshot_pairs_for_remote = - List.map - (fun (local_snapshot, remote_snapshot) -> - (remote_snapshot, - find_vdi ~vdi:local_snapshot ~vdi_info_list:local_vdis)) - snapshot_pairs - in - Remote.SR.update_snapshot_info_dest ~dbg ~sr:dest ~vdi:dest_vdi - ~src_vdi:(find_vdi ~vdi ~vdi_info_list:local_vdis) - ~snapshot_pairs:snapshot_pairs_for_remote + let remote_url = Http.Url.of_string url in + let module Remote = + Client(struct + let rpc = rpc ~srcstr:"smapiv2" ~dststr:"dst_smapiv2" remote_url + end) + in + let local_vdis = Local.SR.scan ~dbg ~sr in + let find_vdi ~vdi ~vdi_info_list = + try List.find (fun x -> x.vdi = vdi) vdi_info_list + with Not_found -> raise (Vdi_does_not_exist vdi) + in + let snapshot_pairs_for_remote = + List.map + (fun (local_snapshot, remote_snapshot) -> + (remote_snapshot, + find_vdi ~vdi:local_snapshot ~vdi_info_list:local_vdis)) + snapshot_pairs + in + Remote.SR.update_snapshot_info_dest ~dbg ~sr:dest ~vdi:dest_vdi + ~src_vdi:(find_vdi ~vdi ~vdi_info_list:local_vdis) + ~snapshot_pairs:snapshot_pairs_for_remote diff --git a/ocaml/xapi/storage_mux.ml b/ocaml/xapi/storage_mux.ml index 5f4ce139e93..94b29b59e91 100644 --- a/ocaml/xapi/storage_mux.ml +++ b/ocaml/xapi/storage_mux.ml @@ -22,52 +22,52 @@ open Threadext open Storage_interface type plugin = { - processor: processor; - backend_domain: string; - query_result: query_result; + processor: processor; + backend_domain: string; + query_result: query_result; } let plugins : (sr, plugin) Hashtbl.t = Hashtbl.create 10 let m = Mutex.create () let debug_printer rpc call = - (* debug "Rpc.call = %s" (Xmlrpc.string_of_call call); *) - let result = rpc call in - (* debug "Rpc.response = %s" (Xmlrpc.string_of_response result); *) - result + (* debug "Rpc.call = %s" (Xmlrpc.string_of_call call); *) + let result = rpc call in + (* debug "Rpc.response = %s" (Xmlrpc.string_of_response result); *) + result let register sr rpc d info = - Mutex.execute m - (fun () -> - Hashtbl.replace plugins sr { processor = debug_printer rpc; backend_domain = d; query_result = info }; - debug "register SR %s (currently-registered = [ %s ])" sr (String.concat ", " (Hashtbl.fold (fun sr _ acc -> sr :: acc) plugins [])) - ) + Mutex.execute m + (fun () -> + Hashtbl.replace plugins sr { processor = debug_printer rpc; backend_domain = d; query_result = info }; + debug "register SR %s (currently-registered = [ %s ])" sr (String.concat ", " (Hashtbl.fold (fun sr _ acc -> sr :: acc) plugins [])) + ) let unregister sr = - Mutex.execute m - (fun () -> - Hashtbl.remove plugins sr; - debug "unregister SR %s (currently-registered = [ %s ])" sr (String.concat ", " (Hashtbl.fold (fun sr _ acc -> sr :: acc) plugins [])) - ) + Mutex.execute m + (fun () -> + Hashtbl.remove plugins sr; + debug "unregister SR %s (currently-registered = [ %s ])" sr (String.concat ", " (Hashtbl.fold (fun sr _ acc -> sr :: acc) plugins [])) + ) let query_result_of_sr sr = - try - Mutex.execute m - (fun () -> - Some (Hashtbl.find plugins sr).query_result - ) - with _ -> None + try + Mutex.execute m + (fun () -> + Some (Hashtbl.find plugins sr).query_result + ) + with _ -> None let features_of_sr sr = Opt.default [] (Opt.map (fun x -> x.features) (query_result_of_sr sr)) (* This is the policy: *) let of_sr sr = - Mutex.execute m - (fun () -> - if not (Hashtbl.mem plugins sr) then begin - error "No storage plugin for SR: %s (currently-registered = [ %s ])" sr (String.concat ", " (Hashtbl.fold (fun sr _ acc -> sr :: acc) plugins [])); - raise (No_storage_plugin_for_sr sr) - end else (Hashtbl.find plugins sr).processor - ) + Mutex.execute m + (fun () -> + if not (Hashtbl.mem plugins sr) then begin + error "No storage plugin for SR: %s (currently-registered = [ %s ])" sr (String.concat ", " (Hashtbl.fold (fun sr _ acc -> sr :: acc) plugins [])); + raise (No_storage_plugin_for_sr sr) + end else (Hashtbl.find plugins sr).processor + ) open Fun @@ -77,238 +77,238 @@ let multicast f = Hashtbl.fold (fun sr plugin acc -> (sr, try SMSuccess (f sr pl let success = function | SMSuccess _ -> true | _ -> false -let string_of_sm_result f = function - | SMSuccess x -> Printf.sprintf "Success: %s" (f x) - | SMFailure e -> Printf.sprintf "Failure: %s" (Printexc.to_string e) +let string_of_sm_result f = function + | SMSuccess x -> Printf.sprintf "Success: %s" (f x) + | SMFailure e -> Printf.sprintf "Failure: %s" (Printexc.to_string e) let partition l = List.partition (success ++ snd) l let choose x = snd(List.hd x) let fail_or f results = - let successes, errors = partition results in - if errors <> [] then choose errors else f successes + let successes, errors = partition results in + if errors <> [] then choose errors else f successes let success_or f results = - let successes, errors = partition results in - if successes <> [] then f successes else f errors + let successes, errors = partition results in + if successes <> [] then f successes else f errors module Mux = struct - type context = Smint.request - - let forall f = - let combine results = - let all = List.fold_left (fun acc (sr, result) -> - (Printf.sprintf "For SR: %s" sr :: (string_of_sm_result (fun s -> s) result) :: acc)) [] results in - SMSuccess (String.concat "\n" all) in - match fail_or combine (multicast f) with - | SMSuccess x -> x - | SMFailure e -> raise e - - module Query = struct - let query context ~dbg = { - driver = "mux"; - name = "storage multiplexor"; - description = "forwards calls to other plugins"; - vendor = "XCP"; - copyright = "see the source code"; - version = "2.0"; - required_api_version = "2.0"; - features = []; - configuration = []; - required_cluster_stack = []; - } - let diagnostics context ~dbg = - forall (fun sr rpc -> - let module C = Client(struct let rpc = of_sr sr end) in - C.Query.diagnostics dbg - ) - end - module DP = struct - let create context ~dbg ~id = id (* XXX: is this pointless? *) - let destroy context ~dbg ~dp ~allow_leak = - (* Tell each plugin about this *) - match fail_or choose (multicast (fun sr rpc -> - let module C = Client(struct let rpc = of_sr sr end) in - C.DP.destroy ~dbg ~dp ~allow_leak)) with - | SMSuccess x -> x - | SMFailure e -> raise e - - let diagnostics context () = - forall (fun sr rpc -> - let module C = Client(struct let rpc = of_sr sr end) in - C.DP.diagnostics () - ) - - let attach_info context ~dbg ~sr ~vdi ~dp = - let module C = Client(struct let rpc = of_sr sr end) in - C.DP.attach_info ~dbg ~sr ~vdi ~dp - - let stat_vdi context ~dbg ~sr ~vdi = - let module C = Client(struct let rpc = of_sr sr end) in - C.DP.stat_vdi ~dbg ~sr ~vdi - - end - module SR = struct - include Storage_skeleton.SR - let create context ~dbg ~sr ~name_label ~name_description ~device_config ~physical_size = - let module C = Client(struct let rpc = of_sr sr end) in - C.SR.create ~dbg ~sr ~name_label ~name_description ~device_config ~physical_size - let attach context ~dbg ~sr = - let module C = Client(struct let rpc = of_sr sr end) in - C.SR.attach ~dbg ~sr - let set_name_label context ~dbg ~sr ~new_name_label = - let module C = Client(struct let rpc = of_sr sr end) in - C.SR.set_name_label ~dbg ~sr ~new_name_label - let set_name_description context ~dbg ~sr ~new_name_description = - let module C = Client(struct let rpc = of_sr sr end) in - C.SR.set_name_description ~dbg ~sr ~new_name_description - let detach context ~dbg ~sr = - let module C = Client(struct let rpc = of_sr sr end) in - C.SR.detach ~dbg ~sr - let destroy context ~dbg ~sr = - let module C = Client(struct let rpc = of_sr sr end) in - C.SR.destroy ~dbg ~sr - let stat context ~dbg ~sr = - let module C = Client(struct let rpc = of_sr sr end) in - C.SR.stat ~dbg ~sr - let scan context ~dbg ~sr = - let module C = Client(struct let rpc = of_sr sr end) in - C.SR.scan ~dbg ~sr - let list context ~dbg = - List.fold_left (fun acc (sr, list) -> match list with SMSuccess l -> l @ acc | x -> acc) [] (multicast (fun sr rpc -> - let module C = Client(struct let rpc = of_sr sr end) in - C.SR.list ~dbg)) - let reset context ~dbg ~sr = assert false - let update_snapshot_info_src context = Storage_migrate.update_snapshot_info_src - let update_snapshot_info_dest context ~dbg ~sr ~vdi ~src_vdi ~snapshot_pairs = - let module C = Client(struct let rpc = of_sr sr end) in - C.SR.update_snapshot_info_dest ~dbg ~sr ~vdi ~src_vdi ~snapshot_pairs - end - module VDI = struct - let create context ~dbg ~sr ~vdi_info = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.create ~dbg ~sr ~vdi_info - let set_name_label context ~dbg ~sr ~vdi ~new_name_label = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.set_name_label ~dbg ~sr ~vdi ~new_name_label - let set_name_description context ~dbg ~sr ~vdi ~new_name_description = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.set_name_description ~dbg ~sr ~vdi ~new_name_description - let snapshot context ~dbg ~sr ~vdi_info = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.snapshot ~dbg ~sr ~vdi_info - let clone context ~dbg ~sr ~vdi_info = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.clone ~dbg ~sr ~vdi_info - let resize context ~dbg ~sr ~vdi ~new_size = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.resize ~dbg ~sr ~vdi ~new_size - let destroy context ~dbg ~sr ~vdi = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.destroy ~dbg ~sr ~vdi - let stat context ~dbg ~sr ~vdi = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.stat ~dbg ~sr ~vdi - let introduce context ~dbg ~sr ~uuid ~sm_config ~location = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.introduce ~dbg ~sr ~uuid ~sm_config ~location - let set_persistent context ~dbg ~sr ~vdi ~persistent = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.set_persistent ~dbg ~sr ~vdi ~persistent - let epoch_begin context ~dbg ~sr ~vdi ~persistent = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.epoch_begin ~dbg ~sr ~vdi ~persistent - let attach context ~dbg ~dp ~sr ~vdi ~read_write = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.attach ~dbg ~dp ~sr ~vdi ~read_write - let activate context ~dbg ~dp ~sr ~vdi = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.activate ~dbg ~dp ~sr ~vdi - let deactivate context ~dbg ~dp ~sr ~vdi = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.deactivate ~dbg ~dp ~sr ~vdi - let detach context ~dbg ~dp ~sr ~vdi = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.detach ~dbg ~dp ~sr ~vdi - let epoch_end context ~dbg ~sr ~vdi = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.epoch_end ~dbg ~sr ~vdi - let get_by_name context ~dbg ~sr ~name = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.get_by_name ~dbg ~sr ~name - let set_content_id context ~dbg ~sr ~vdi ~content_id = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.set_content_id ~dbg ~sr ~vdi ~content_id - let similar_content context ~dbg ~sr ~vdi = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.similar_content ~dbg ~sr ~vdi - let compose context ~dbg ~sr ~vdi1 ~vdi2 = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.compose ~dbg ~sr ~vdi1 ~vdi2 - let add_to_sm_config context ~dbg ~sr ~vdi ~key = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.add_to_sm_config ~dbg ~sr ~vdi ~key - let remove_from_sm_config context ~dbg ~sr ~vdi ~key = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.remove_from_sm_config ~dbg ~sr ~vdi ~key - let get_url context ~dbg ~sr ~vdi = - let module C = Client(struct let rpc = of_sr sr end) in - C.VDI.get_url ~dbg ~sr ~vdi - - end - - let get_by_name context ~dbg ~name = - (* Assume it has either the format: - SR/VDI -- for a particular SR and VDI - content_id -- for a particular content *) - let open Xstringext in - match List.filter (fun x -> x <> "") (String.split ~limit:2 '/' name) with - | [ sr; name ] -> - let module C = Client(struct let rpc = of_sr sr end) in - sr, C.VDI.get_by_name ~dbg ~sr ~name - | [ name ] -> - (match success_or choose (multicast (fun sr rpc -> - let module C = Client(struct let rpc = of_sr sr end) in - sr, C.VDI.get_by_name ~dbg ~sr ~name - )) with SMSuccess x -> x - | SMFailure e -> raise e) - | _ -> - raise (Vdi_does_not_exist name) - - module DATA = struct - let copy context = Storage_migrate.copy - let copy_into context = Storage_migrate.copy_into - - module MIRROR = struct - let start context = Storage_migrate.start - let stop context = Storage_migrate.stop - let list context = Storage_migrate.list - let stat context = Storage_migrate.stat - let receive_start context = Storage_migrate.receive_start - let receive_finalize context = Storage_migrate.receive_finalize - let receive_cancel context = Storage_migrate.receive_cancel - end - end - - module Policy = struct - let get_backend_vm context ~dbg ~vm ~sr ~vdi = - if not(Hashtbl.mem plugins sr) then begin - error "No registered plugin for sr = %s" sr; - raise (No_storage_plugin_for_sr sr) - end else (Hashtbl.find plugins sr).backend_domain - end - - module TASK = struct - let stat context ~dbg ~task = assert false - let cancel context ~dbg ~task = assert false - let destroy context ~dbg ~task = assert false - let list context ~dbg = assert false + type context = Smint.request + + let forall f = + let combine results = + let all = List.fold_left (fun acc (sr, result) -> + (Printf.sprintf "For SR: %s" sr :: (string_of_sm_result (fun s -> s) result) :: acc)) [] results in + SMSuccess (String.concat "\n" all) in + match fail_or combine (multicast f) with + | SMSuccess x -> x + | SMFailure e -> raise e + + module Query = struct + let query context ~dbg = { + driver = "mux"; + name = "storage multiplexor"; + description = "forwards calls to other plugins"; + vendor = "XCP"; + copyright = "see the source code"; + version = "2.0"; + required_api_version = "2.0"; + features = []; + configuration = []; + required_cluster_stack = []; + } + let diagnostics context ~dbg = + forall (fun sr rpc -> + let module C = Client(struct let rpc = of_sr sr end) in + C.Query.diagnostics dbg + ) + end + module DP = struct + let create context ~dbg ~id = id (* XXX: is this pointless? *) + let destroy context ~dbg ~dp ~allow_leak = + (* Tell each plugin about this *) + match fail_or choose (multicast (fun sr rpc -> + let module C = Client(struct let rpc = of_sr sr end) in + C.DP.destroy ~dbg ~dp ~allow_leak)) with + | SMSuccess x -> x + | SMFailure e -> raise e + + let diagnostics context () = + forall (fun sr rpc -> + let module C = Client(struct let rpc = of_sr sr end) in + C.DP.diagnostics () + ) + + let attach_info context ~dbg ~sr ~vdi ~dp = + let module C = Client(struct let rpc = of_sr sr end) in + C.DP.attach_info ~dbg ~sr ~vdi ~dp + + let stat_vdi context ~dbg ~sr ~vdi = + let module C = Client(struct let rpc = of_sr sr end) in + C.DP.stat_vdi ~dbg ~sr ~vdi + + end + module SR = struct + include Storage_skeleton.SR + let create context ~dbg ~sr ~name_label ~name_description ~device_config ~physical_size = + let module C = Client(struct let rpc = of_sr sr end) in + C.SR.create ~dbg ~sr ~name_label ~name_description ~device_config ~physical_size + let attach context ~dbg ~sr = + let module C = Client(struct let rpc = of_sr sr end) in + C.SR.attach ~dbg ~sr + let set_name_label context ~dbg ~sr ~new_name_label = + let module C = Client(struct let rpc = of_sr sr end) in + C.SR.set_name_label ~dbg ~sr ~new_name_label + let set_name_description context ~dbg ~sr ~new_name_description = + let module C = Client(struct let rpc = of_sr sr end) in + C.SR.set_name_description ~dbg ~sr ~new_name_description + let detach context ~dbg ~sr = + let module C = Client(struct let rpc = of_sr sr end) in + C.SR.detach ~dbg ~sr + let destroy context ~dbg ~sr = + let module C = Client(struct let rpc = of_sr sr end) in + C.SR.destroy ~dbg ~sr + let stat context ~dbg ~sr = + let module C = Client(struct let rpc = of_sr sr end) in + C.SR.stat ~dbg ~sr + let scan context ~dbg ~sr = + let module C = Client(struct let rpc = of_sr sr end) in + C.SR.scan ~dbg ~sr + let list context ~dbg = + List.fold_left (fun acc (sr, list) -> match list with SMSuccess l -> l @ acc | x -> acc) [] (multicast (fun sr rpc -> + let module C = Client(struct let rpc = of_sr sr end) in + C.SR.list ~dbg)) + let reset context ~dbg ~sr = assert false + let update_snapshot_info_src context = Storage_migrate.update_snapshot_info_src + let update_snapshot_info_dest context ~dbg ~sr ~vdi ~src_vdi ~snapshot_pairs = + let module C = Client(struct let rpc = of_sr sr end) in + C.SR.update_snapshot_info_dest ~dbg ~sr ~vdi ~src_vdi ~snapshot_pairs + end + module VDI = struct + let create context ~dbg ~sr ~vdi_info = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.create ~dbg ~sr ~vdi_info + let set_name_label context ~dbg ~sr ~vdi ~new_name_label = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.set_name_label ~dbg ~sr ~vdi ~new_name_label + let set_name_description context ~dbg ~sr ~vdi ~new_name_description = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.set_name_description ~dbg ~sr ~vdi ~new_name_description + let snapshot context ~dbg ~sr ~vdi_info = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.snapshot ~dbg ~sr ~vdi_info + let clone context ~dbg ~sr ~vdi_info = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.clone ~dbg ~sr ~vdi_info + let resize context ~dbg ~sr ~vdi ~new_size = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.resize ~dbg ~sr ~vdi ~new_size + let destroy context ~dbg ~sr ~vdi = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.destroy ~dbg ~sr ~vdi + let stat context ~dbg ~sr ~vdi = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.stat ~dbg ~sr ~vdi + let introduce context ~dbg ~sr ~uuid ~sm_config ~location = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.introduce ~dbg ~sr ~uuid ~sm_config ~location + let set_persistent context ~dbg ~sr ~vdi ~persistent = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.set_persistent ~dbg ~sr ~vdi ~persistent + let epoch_begin context ~dbg ~sr ~vdi ~persistent = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.epoch_begin ~dbg ~sr ~vdi ~persistent + let attach context ~dbg ~dp ~sr ~vdi ~read_write = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.attach ~dbg ~dp ~sr ~vdi ~read_write + let activate context ~dbg ~dp ~sr ~vdi = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.activate ~dbg ~dp ~sr ~vdi + let deactivate context ~dbg ~dp ~sr ~vdi = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.deactivate ~dbg ~dp ~sr ~vdi + let detach context ~dbg ~dp ~sr ~vdi = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.detach ~dbg ~dp ~sr ~vdi + let epoch_end context ~dbg ~sr ~vdi = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.epoch_end ~dbg ~sr ~vdi + let get_by_name context ~dbg ~sr ~name = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.get_by_name ~dbg ~sr ~name + let set_content_id context ~dbg ~sr ~vdi ~content_id = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.set_content_id ~dbg ~sr ~vdi ~content_id + let similar_content context ~dbg ~sr ~vdi = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.similar_content ~dbg ~sr ~vdi + let compose context ~dbg ~sr ~vdi1 ~vdi2 = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.compose ~dbg ~sr ~vdi1 ~vdi2 + let add_to_sm_config context ~dbg ~sr ~vdi ~key = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.add_to_sm_config ~dbg ~sr ~vdi ~key + let remove_from_sm_config context ~dbg ~sr ~vdi ~key = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.remove_from_sm_config ~dbg ~sr ~vdi ~key + let get_url context ~dbg ~sr ~vdi = + let module C = Client(struct let rpc = of_sr sr end) in + C.VDI.get_url ~dbg ~sr ~vdi + + end + + let get_by_name context ~dbg ~name = + (* Assume it has either the format: + SR/VDI -- for a particular SR and VDI + content_id -- for a particular content *) + let open Xstringext in + match List.filter (fun x -> x <> "") (String.split ~limit:2 '/' name) with + | [ sr; name ] -> + let module C = Client(struct let rpc = of_sr sr end) in + sr, C.VDI.get_by_name ~dbg ~sr ~name + | [ name ] -> + (match success_or choose (multicast (fun sr rpc -> + let module C = Client(struct let rpc = of_sr sr end) in + sr, C.VDI.get_by_name ~dbg ~sr ~name + )) with SMSuccess x -> x + | SMFailure e -> raise e) + | _ -> + raise (Vdi_does_not_exist name) + + module DATA = struct + let copy context = Storage_migrate.copy + let copy_into context = Storage_migrate.copy_into + + module MIRROR = struct + let start context = Storage_migrate.start + let stop context = Storage_migrate.stop + let list context = Storage_migrate.list + let stat context = Storage_migrate.stat + let receive_start context = Storage_migrate.receive_start + let receive_finalize context = Storage_migrate.receive_finalize + let receive_cancel context = Storage_migrate.receive_cancel end - - module UPDATES = struct - let get context ~dbg ~from ~timeout = assert false - end + end + + module Policy = struct + let get_backend_vm context ~dbg ~vm ~sr ~vdi = + if not(Hashtbl.mem plugins sr) then begin + error "No registered plugin for sr = %s" sr; + raise (No_storage_plugin_for_sr sr) + end else (Hashtbl.find plugins sr).backend_domain + end + + module TASK = struct + let stat context ~dbg ~task = assert false + let cancel context ~dbg ~task = assert false + let destroy context ~dbg ~task = assert false + let list context ~dbg = assert false + end + + module UPDATES = struct + let get context ~dbg ~from ~timeout = assert false + end end diff --git a/ocaml/xapi/storage_proxy.ml b/ocaml/xapi/storage_proxy.ml index ccee8057b82..6121f83f7e3 100644 --- a/ocaml/xapi/storage_proxy.ml +++ b/ocaml/xapi/storage_proxy.ml @@ -18,95 +18,95 @@ open Storage_interface module type RPC = sig - val rpc : Rpc.call -> Rpc.response + val rpc : Rpc.call -> Rpc.response end module Proxy = functor(RPC: RPC) -> struct - type context = Smint.request + type context = Smint.request - module Client = Client(RPC) + module Client = Client(RPC) - module Query = struct - let query _ = Client.Query.query - let diagnostics _ = Client.Query.diagnostics - end - module DP = struct - let create _ = Client.DP.create - let destroy _ = Client.DP.destroy - let diagnostics _ = Client.DP.diagnostics - let attach_info _ = Client.DP.attach_info - let stat_vdi _ = Client.DP.stat_vdi - end - module SR = struct - let probe _ = Client.SR.probe - let create _ = Client.SR.create - let set_name_label _ = Client.SR.set_name_label - let set_name_description _ = Client.SR.set_name_description - let attach _ = Client.SR.attach - let detach _ = Client.SR.detach - let reset _ = Client.SR.reset - let destroy _ = Client.SR.destroy - let scan _ = Client.SR.scan - let stat _ = Client.SR.stat - let list _ = Client.SR.list - let update_snapshot_info_src _ = Client.SR.update_snapshot_info_src - let update_snapshot_info_dest _ = Client.SR.update_snapshot_info_dest - end - module VDI = struct - let epoch_begin _ = Client.VDI.epoch_begin - let attach _ = Client.VDI.attach - let activate _ = Client.VDI.activate - let deactivate _ = Client.VDI.deactivate - let detach _ = Client.VDI.detach - let epoch_end _ = Client.VDI.epoch_end + module Query = struct + let query _ = Client.Query.query + let diagnostics _ = Client.Query.diagnostics + end + module DP = struct + let create _ = Client.DP.create + let destroy _ = Client.DP.destroy + let diagnostics _ = Client.DP.diagnostics + let attach_info _ = Client.DP.attach_info + let stat_vdi _ = Client.DP.stat_vdi + end + module SR = struct + let probe _ = Client.SR.probe + let create _ = Client.SR.create + let set_name_label _ = Client.SR.set_name_label + let set_name_description _ = Client.SR.set_name_description + let attach _ = Client.SR.attach + let detach _ = Client.SR.detach + let reset _ = Client.SR.reset + let destroy _ = Client.SR.destroy + let scan _ = Client.SR.scan + let stat _ = Client.SR.stat + let list _ = Client.SR.list + let update_snapshot_info_src _ = Client.SR.update_snapshot_info_src + let update_snapshot_info_dest _ = Client.SR.update_snapshot_info_dest + end + module VDI = struct + let epoch_begin _ = Client.VDI.epoch_begin + let attach _ = Client.VDI.attach + let activate _ = Client.VDI.activate + let deactivate _ = Client.VDI.deactivate + let detach _ = Client.VDI.detach + let epoch_end _ = Client.VDI.epoch_end - let create _ = Client.VDI.create - let set_name_label _ = Client.VDI.set_name_label - let set_name_description _ = Client.VDI.set_name_description - let snapshot _ = Client.VDI.snapshot - let clone _ = Client.VDI.clone - let destroy _ = Client.VDI.destroy - let resize _ = Client.VDI.resize - let stat _ = Client.VDI.stat - let introduce _ = Client.VDI.introduce - let set_persistent _ = Client.VDI.set_persistent - let get_by_name _ = Client.VDI.get_by_name - let set_content_id _ = Client.VDI.set_content_id - let similar_content _ = Client.VDI.similar_content - let compose _ = Client.VDI.compose - let add_to_sm_config _ = Client.VDI.add_to_sm_config - let remove_from_sm_config _ = Client.VDI.remove_from_sm_config - let get_url _ = Client.VDI.get_url - end + let create _ = Client.VDI.create + let set_name_label _ = Client.VDI.set_name_label + let set_name_description _ = Client.VDI.set_name_description + let snapshot _ = Client.VDI.snapshot + let clone _ = Client.VDI.clone + let destroy _ = Client.VDI.destroy + let resize _ = Client.VDI.resize + let stat _ = Client.VDI.stat + let introduce _ = Client.VDI.introduce + let set_persistent _ = Client.VDI.set_persistent + let get_by_name _ = Client.VDI.get_by_name + let set_content_id _ = Client.VDI.set_content_id + let similar_content _ = Client.VDI.similar_content + let compose _ = Client.VDI.compose + let add_to_sm_config _ = Client.VDI.add_to_sm_config + let remove_from_sm_config _ = Client.VDI.remove_from_sm_config + let get_url _ = Client.VDI.get_url + end - let get_by_name _ = Client.get_by_name + let get_by_name _ = Client.get_by_name - module Policy = struct - let get_backend_vm _ = Client.Policy.get_backend_vm - end + module Policy = struct + let get_backend_vm _ = Client.Policy.get_backend_vm + end - module DATA = struct - let copy_into _ = Client.DATA.copy_into - let copy _ = Client.DATA.copy - module MIRROR = struct - let start _ = Client.DATA.MIRROR.start - let stop _ = Client.DATA.MIRROR.stop - let stat _ = Client.DATA.MIRROR.stat - let receive_start _ = Client.DATA.MIRROR.receive_start - let receive_finalize _ = Client.DATA.MIRROR.receive_finalize - let receive_cancel _ = Client.DATA.MIRROR.receive_cancel - let list _ = Client.DATA.MIRROR.list - end - end + module DATA = struct + let copy_into _ = Client.DATA.copy_into + let copy _ = Client.DATA.copy + module MIRROR = struct + let start _ = Client.DATA.MIRROR.start + let stop _ = Client.DATA.MIRROR.stop + let stat _ = Client.DATA.MIRROR.stat + let receive_start _ = Client.DATA.MIRROR.receive_start + let receive_finalize _ = Client.DATA.MIRROR.receive_finalize + let receive_cancel _ = Client.DATA.MIRROR.receive_cancel + let list _ = Client.DATA.MIRROR.list + end + end - module TASK = struct - let stat _ = Client.TASK.stat - let cancel _ = Client.TASK.cancel - let destroy _ = Client.TASK.destroy - let list _ = Client.TASK.list - end + module TASK = struct + let stat _ = Client.TASK.stat + let cancel _ = Client.TASK.cancel + let destroy _ = Client.TASK.destroy + let list _ = Client.TASK.list + end - module UPDATES = struct - let get _ = Client.UPDATES.get - end + module UPDATES = struct + let get _ = Client.UPDATES.get + end end diff --git a/ocaml/xapi/storage_task.ml b/ocaml/xapi/storage_task.ml index ccace4943fd..4689d956b09 100644 --- a/ocaml/xapi/storage_task.ml +++ b/ocaml/xapi/storage_task.ml @@ -8,14 +8,14 @@ module Updates = Updates.Updates(Storage_interface) let updates = Updates.empty () let tasks = Storage_task.empty () - + let signal task = - let open Storage_task in - Stdext.Threadext.Mutex.execute tasks.m - (fun () -> - if exists_locked tasks task then begin - debug "TASK.signal %s = %s" task ((find_locked tasks task).state |> Task.rpc_of_state |> Jsonrpc.to_string); - Updates.add (Dynamic.Task task) updates; - end else debug "TASK.signal %s (object deleted)" task - ) - + let open Storage_task in + Stdext.Threadext.Mutex.execute tasks.m + (fun () -> + if exists_locked tasks task then begin + debug "TASK.signal %s = %s" task ((find_locked tasks task).state |> Task.rpc_of_state |> Jsonrpc.to_string); + Updates.add (Dynamic.Task task) updates; + end else debug "TASK.signal %s (object deleted)" task + ) + diff --git a/ocaml/xapi/storage_utils.ml b/ocaml/xapi/storage_utils.ml index 04907765ce2..4837ca9227c 100644 --- a/ocaml/xapi/storage_utils.ml +++ b/ocaml/xapi/storage_utils.ml @@ -13,7 +13,7 @@ *) let string_of_vdi_type vdi_type = - Rpc.string_of_rpc (API.rpc_of_vdi_type vdi_type) + Rpc.string_of_rpc (API.rpc_of_vdi_type vdi_type) let vdi_type_of_string str = - API.vdi_type_of_rpc (Rpc.String str) + API.vdi_type_of_rpc (Rpc.String str) diff --git a/ocaml/xapi/stream_vdi.ml b/ocaml/xapi/stream_vdi.ml index f5bd95d7706..72d76870362 100644 --- a/ocaml/xapi/stream_vdi.ml +++ b/ocaml/xapi/stream_vdi.ml @@ -13,7 +13,7 @@ *) (** Utility functions for streaming VDI images * @group Storage - *) +*) open Stdext open Xstringext @@ -28,24 +28,24 @@ module D = Debug.Make(struct let name="stream_vdi" end) open D (** Inside the tar we divide each VDI into small 'chunk_size' blocks: *) -let chunk_size = Int64.mul 1024L 1024L (* 1 MiB *) +let chunk_size = Int64.mul 1024L 1024L (* 1 MiB *) let checksum_extension = ".checksum" (** Helper function to prevent double-closes of file descriptors *) -let close to_close fd = +let close to_close fd = if List.mem fd !to_close then Unix.close fd; - to_close := List.filter (fun x -> fd <> x) !to_close + to_close := List.filter (fun x -> fd <> x) !to_close type vdi = string (* directory prefix in tar file *) * API.ref_VDI * Int64.t (* size to send/recieve *) -(** Open the device corresponding to a VDI with and and apply the +(** Open the device corresponding to a VDI with and and apply the resulting file descriptor to . Guarantees to close the file descriptor afterwards. *) let with_open_vdi __context rpc session_id vdi_ref mode flags perms f = Sm_fs_ops.with_block_attached_device __context rpc session_id vdi_ref mode (fun dom0_path -> debug "with_open_vdi opening: %s" dom0_path; - let ofd = Unix.openfile dom0_path flags perms in + let ofd = Unix.openfile dom0_path flags perms in Pervasiveext.finally (fun () -> f ofd) (fun () -> Unix.close ofd)) (** Used to sort VDI prefixes into a canonical order for streaming. Currently lexicographic @@ -59,15 +59,15 @@ let for_each_vdi __context f prefix_vdis = List.iter f sorted_prefix_vdis (** Represent the progress made streaming a set of disks *) -type progress = - { total_size: int64; - mutable transmitted_so_far: int64; - mutable time_of_last_update: float; - __context: Context.t - } +type progress = + { total_size: int64; + mutable transmitted_so_far: int64; + mutable time_of_last_update: float; + __context: Context.t + } (** Create a fresh progress record from a set of VDIs *) -let new_progress_record __context (prefix_vdis: vdi list) = +let new_progress_record __context (prefix_vdis: vdi list) = { total_size = List.fold_left (fun tot (_,_,s) -> Int64.add tot s) 0L prefix_vdis; transmitted_so_far = 0L; time_of_last_update = 0.0; @@ -75,7 +75,7 @@ let new_progress_record __context (prefix_vdis: vdi list) = (** Called every time (uncompressed) bytes have been read or written. Updates the task record in the database if no update has been sent for 10 seconds. *) -let made_progress __context progress n = +let made_progress __context progress n = let total_size_MiB = Int64.div progress.total_size 1048576L in progress.transmitted_so_far <- Int64.add progress.transmitted_so_far n; let so_far_MiB = Int64.div progress.transmitted_so_far 1048576L in @@ -90,26 +90,26 @@ let made_progress __context progress n = (** Write a block of checksummed data of length [len] with name [filename] to [ofd] *) -let write_block ~__context filename buffer ofd len = +let write_block ~__context filename buffer ofd len = let hdr = Tar_unix.Header.make filename (Int64.of_int len) in try - let csum = Sha1.to_hex (Sha1.string buffer) in - Tar_unix.write_block hdr (fun ofd -> Tar_unix.Archive.multicast_n_string buffer [ ofd ] len ) ofd; - (* Write the checksum as a separate file *) - let hdr' = Tar_unix.Header.make (filename ^ checksum_extension) (Int64.of_int (String.length csum)) in - Tar_unix.write_block hdr' (fun ofd -> ignore(Unix.write ofd csum 0 (String.length csum))) ofd + let csum = Sha1.to_hex (Sha1.string buffer) in + Tar_unix.write_block hdr (fun ofd -> Tar_unix.Archive.multicast_n_string buffer [ ofd ] len ) ofd; + (* Write the checksum as a separate file *) + let hdr' = Tar_unix.Header.make (filename ^ checksum_extension) (Int64.of_int (String.length csum)) in + Tar_unix.write_block hdr' (fun ofd -> ignore(Unix.write ofd csum 0 (String.length csum))) ofd with - Unix.Unix_error (a,b,c) as e -> - TaskHelper.exn_if_cancelling ~__context; - if b="write" - then raise (Api_errors.Server_error (Api_errors.client_error, [ExnHelper.string_of_exn e])) - else raise e + Unix.Unix_error (a,b,c) as e -> + TaskHelper.exn_if_cancelling ~__context; + if b="write" + then raise (Api_errors.Server_error (Api_errors.client_error, [ExnHelper.string_of_exn e])) + else raise e (** Stream a set of VDIs split into chunks in a tar format in a defined order. Return an association list mapping tar filename -> string (containing the SHA1 checksums) *) -let send_all refresh_session ofd ~__context rpc session_id (prefix_vdis: vdi list) = +let send_all refresh_session ofd ~__context rpc session_id (prefix_vdis: vdi list) = TaskHelper.set_cancellable ~__context; let progress = new_progress_record __context prefix_vdis in @@ -117,55 +117,55 @@ let send_all refresh_session ofd ~__context rpc session_id (prefix_vdis: vdi lis (* Remember when we last wrote something so that we can work around firewalls which close 'idle' connections *) let last_transmission_time = ref 0. in - let send_one ofd (__context:Context.t) (prefix, vdi_ref, size) = + let send_one ofd (__context:Context.t) (prefix, vdi_ref, size) = let size = Db.VDI.get_virtual_size ~__context ~self:vdi_ref in with_open_vdi __context rpc session_id vdi_ref `RO [Unix.O_RDONLY] 0o644 (fun ifd -> - let reusable_buffer = String.make (Int64.to_int chunk_size) '\000' in - - (* NB. It used to be that chunks could be larger than a native int *) - (* could handle, but this is no longer the case! Ensure all chunks *) - (* are strictly less than 2^30 bytes *) - let rec stream_from (chunk_no: int) (offset: int64) = - refresh_session (); - let remaining = Int64.sub size offset in - if remaining > 0L - then - begin - let this_chunk = (min remaining chunk_size) in - let last_chunk = this_chunk = remaining in - let this_chunk = Int64.to_int this_chunk in - let filename = Printf.sprintf "%s/%08d" prefix chunk_no in - - let now = Unix.gettimeofday () in - let time_since_transmission = now -. !last_transmission_time in - - (* We always include the first and last blocks *) - let first_or_last = chunk_no = 0 || last_chunk in - - if time_since_transmission > 5. && not first_or_last then begin - last_transmission_time := now; - write_block ~__context filename "" ofd 0; - (* no progress has been made *) - stream_from (chunk_no + 1) offset - end else begin - let buffer = if (Int64.of_int this_chunk) = chunk_size - then reusable_buffer - else String.make this_chunk '\000' - in - Unixext.really_read ifd buffer 0 this_chunk; - if not (Zerocheck.is_all_zeros buffer this_chunk) || first_or_last then begin - last_transmission_time := now; - write_block ~__context filename buffer ofd this_chunk; - end; - made_progress __context progress (Int64.of_int this_chunk); - stream_from (chunk_no + 1) (Int64.add offset chunk_size); - end - end - in - stream_from 0 0L); + let reusable_buffer = String.make (Int64.to_int chunk_size) '\000' in + + (* NB. It used to be that chunks could be larger than a native int *) + (* could handle, but this is no longer the case! Ensure all chunks *) + (* are strictly less than 2^30 bytes *) + let rec stream_from (chunk_no: int) (offset: int64) = + refresh_session (); + let remaining = Int64.sub size offset in + if remaining > 0L + then + begin + let this_chunk = (min remaining chunk_size) in + let last_chunk = this_chunk = remaining in + let this_chunk = Int64.to_int this_chunk in + let filename = Printf.sprintf "%s/%08d" prefix chunk_no in + + let now = Unix.gettimeofday () in + let time_since_transmission = now -. !last_transmission_time in + + (* We always include the first and last blocks *) + let first_or_last = chunk_no = 0 || last_chunk in + + if time_since_transmission > 5. && not first_or_last then begin + last_transmission_time := now; + write_block ~__context filename "" ofd 0; + (* no progress has been made *) + stream_from (chunk_no + 1) offset + end else begin + let buffer = if (Int64.of_int this_chunk) = chunk_size + then reusable_buffer + else String.make this_chunk '\000' + in + Unixext.really_read ifd buffer 0 this_chunk; + if not (Zerocheck.is_all_zeros buffer this_chunk) || first_or_last then begin + last_transmission_time := now; + write_block ~__context filename buffer ofd this_chunk; + end; + made_progress __context progress (Int64.of_int this_chunk); + stream_from (chunk_no + 1) (Int64.add offset chunk_size); + end + end + in + stream_from 0 0L); debug "Finished streaming VDI" in for_each_vdi __context (send_one ofd __context) prefix_vdis @@ -173,32 +173,32 @@ exception Invalid_checksum of string (* Rio GA and later only *) let verify_inline_checksum ifd checksum_table = - let hdr = Tar_unix.Header.get_next_header ifd in - let file_name = hdr.Tar_unix.Header.file_name in - let length = hdr.Tar_unix.Header.file_size in - if not(String.endswith checksum_extension file_name) then begin - let msg = Printf.sprintf "Expected to find an inline checksum, found file called: %s" file_name in - error "%s" msg; - raise (Failure msg) - end; - try - let length' = Int64.to_int length in - let csum = String.make length' ' ' in - Unixext.really_read ifd csum 0 length'; - Tar_unix.Archive.skip ifd (Tar_unix.Header.compute_zero_padding_length hdr); - (* Look up the relevant file_name in the checksum_table *) - let original_file_name = String.sub file_name 0 (String.length file_name - (String.length checksum_extension)) in - let csum' = List.assoc original_file_name !checksum_table in - if csum <> csum' then begin - error "File %s checksum mismatch (%s <> %s)" original_file_name csum csum'; - raise (Invalid_checksum (Printf.sprintf "Block %s checksum failed: original = %s; recomputed = %s" original_file_name csum csum')); - end - with e -> - error "Error validating checksums on import: %s" (ExnHelper.string_of_exn e); - raise e + let hdr = Tar_unix.Header.get_next_header ifd in + let file_name = hdr.Tar_unix.Header.file_name in + let length = hdr.Tar_unix.Header.file_size in + if not(String.endswith checksum_extension file_name) then begin + let msg = Printf.sprintf "Expected to find an inline checksum, found file called: %s" file_name in + error "%s" msg; + raise (Failure msg) + end; + try + let length' = Int64.to_int length in + let csum = String.make length' ' ' in + Unixext.really_read ifd csum 0 length'; + Tar_unix.Archive.skip ifd (Tar_unix.Header.compute_zero_padding_length hdr); + (* Look up the relevant file_name in the checksum_table *) + let original_file_name = String.sub file_name 0 (String.length file_name - (String.length checksum_extension)) in + let csum' = List.assoc original_file_name !checksum_table in + if csum <> csum' then begin + error "File %s checksum mismatch (%s <> %s)" original_file_name csum csum'; + raise (Invalid_checksum (Printf.sprintf "Block %s checksum failed: original = %s; recomputed = %s" original_file_name csum csum')); + end + with e -> + error "Error validating checksums on import: %s" (ExnHelper.string_of_exn e); + raise e (** Receive a set of VDIs split into chunks in a tar format in a defined order *) -let recv_all refresh_session ifd (__context:Context.t) rpc session_id vsn force prefix_vdis = +let recv_all refresh_session ifd (__context:Context.t) rpc session_id vsn force prefix_vdis = TaskHelper.set_cancellable ~__context; let progress = new_progress_record __context prefix_vdis in @@ -207,101 +207,101 @@ let recv_all refresh_session ifd (__context:Context.t) rpc session_id vsn force let firstchunklength = ref (-1) in let zerochunkstring = ref "" in - + let recv_one ifd (__context:Context.t) (prefix, vdi_ref, size) = let vdi_skip_zeros = not (Sm_fs_ops.must_write_zeroes_into_new_vdi ~__context vdi_ref) in (* If this is true, we skip writing zeros. Only for sparse files (vhd only atm) *) debug "begun import of VDI%s preserving sparseness" (if vdi_skip_zeros then "" else " NOT"); - + with_open_vdi __context rpc session_id vdi_ref `RW [Unix.O_WRONLY] 0o644 (fun ofd -> - let reusable_buffer = String.make (Int64.to_int chunk_size) '\000' in - - let rec stream_from (last_suffix: string) (offset: int64) = - refresh_session (); - - let remaining = Int64.sub size offset in - if remaining > 0L - then begin - let hdr = Tar_unix.Header.get_next_header ifd in - let file_name = hdr.Tar_unix.Header.file_name in - let length = hdr.Tar_unix.Header.file_size in - - (* First chunk will always be there *) - if !firstchunklength < 0 - then - begin - firstchunklength := (Int64.to_int length); - zerochunkstring := String.make !firstchunklength '\000' - end; - - if not(String.startswith prefix file_name) then begin - error "Expected VDI chunk prefixed %s; got %s" prefix file_name; - raise (Failure "Invalid XVA file"); - end; - - (* add one to strip off the '/' from the filename *) - let suffix = String.sub file_name (1 + String.length prefix) (String.length file_name - (String.length prefix) - 1) in - - if suffix <= last_suffix then begin - error "Expected VDI chunk suffix to have increased under lexicograpic ordering; last = %s; this = %s" last_suffix suffix; - raise (Failure "Invalid XVA file") - end; - - (* Here we find the number of skipped blocks *) - debug "suffix=%s last_suffix=%s" suffix last_suffix; - let num_zero_blocks = (int_of_string suffix) - (int_of_string last_suffix) - 1 in - let skipped_size = Int64.mul (Int64.of_int !firstchunklength) (Int64.of_int num_zero_blocks) in - if (num_zero_blocks > 0) then - begin - if vdi_skip_zeros then - (* If we're skipping zeros, seek to the correct place *) - ignore(Unix.LargeFile.lseek ofd skipped_size Unix.SEEK_CUR) - else - (* Write some blocks of zeros *) - for i=1 to num_zero_blocks do - ignore(Unix.write ofd !zerochunkstring 0 (!firstchunklength)) - done - end; - - let buffer = if length = chunk_size - then reusable_buffer - else String.make (Int64.to_int length) '\000' - in - Unixext.really_read ifd buffer 0 (Int64.to_int length); - Tar_unix.Archive.multicast_n_string buffer [ ofd ] (Int64.to_int length); - let csum = Sha1.to_hex (Sha1.string buffer) in - - checksum_table := (file_name, csum) :: !checksum_table; - - Tar_unix.Archive.skip ifd (Tar_unix.Header.compute_zero_padding_length hdr); - made_progress __context progress (Int64.add skipped_size length); - - - if vsn.Importexport.export_vsn > 0 then - begin - try - verify_inline_checksum ifd checksum_table; - with - | Invalid_checksum s as e -> - if not(force) then raise e - end; - - stream_from suffix (Int64.add skipped_size (Int64.add offset length)) - end in - stream_from "-1" 0L; - Unixext.fsync ofd) in + let reusable_buffer = String.make (Int64.to_int chunk_size) '\000' in + + let rec stream_from (last_suffix: string) (offset: int64) = + refresh_session (); + + let remaining = Int64.sub size offset in + if remaining > 0L + then begin + let hdr = Tar_unix.Header.get_next_header ifd in + let file_name = hdr.Tar_unix.Header.file_name in + let length = hdr.Tar_unix.Header.file_size in + + (* First chunk will always be there *) + if !firstchunklength < 0 + then + begin + firstchunklength := (Int64.to_int length); + zerochunkstring := String.make !firstchunklength '\000' + end; + + if not(String.startswith prefix file_name) then begin + error "Expected VDI chunk prefixed %s; got %s" prefix file_name; + raise (Failure "Invalid XVA file"); + end; + + (* add one to strip off the '/' from the filename *) + let suffix = String.sub file_name (1 + String.length prefix) (String.length file_name - (String.length prefix) - 1) in + + if suffix <= last_suffix then begin + error "Expected VDI chunk suffix to have increased under lexicograpic ordering; last = %s; this = %s" last_suffix suffix; + raise (Failure "Invalid XVA file") + end; + + (* Here we find the number of skipped blocks *) + debug "suffix=%s last_suffix=%s" suffix last_suffix; + let num_zero_blocks = (int_of_string suffix) - (int_of_string last_suffix) - 1 in + let skipped_size = Int64.mul (Int64.of_int !firstchunklength) (Int64.of_int num_zero_blocks) in + if (num_zero_blocks > 0) then + begin + if vdi_skip_zeros then + (* If we're skipping zeros, seek to the correct place *) + ignore(Unix.LargeFile.lseek ofd skipped_size Unix.SEEK_CUR) + else + (* Write some blocks of zeros *) + for i=1 to num_zero_blocks do + ignore(Unix.write ofd !zerochunkstring 0 (!firstchunklength)) + done + end; + + let buffer = if length = chunk_size + then reusable_buffer + else String.make (Int64.to_int length) '\000' + in + Unixext.really_read ifd buffer 0 (Int64.to_int length); + Tar_unix.Archive.multicast_n_string buffer [ ofd ] (Int64.to_int length); + let csum = Sha1.to_hex (Sha1.string buffer) in + + checksum_table := (file_name, csum) :: !checksum_table; + + Tar_unix.Archive.skip ifd (Tar_unix.Header.compute_zero_padding_length hdr); + made_progress __context progress (Int64.add skipped_size length); + + + if vsn.Importexport.export_vsn > 0 then + begin + try + verify_inline_checksum ifd checksum_table; + with + | Invalid_checksum s as e -> + if not(force) then raise e + end; + + stream_from suffix (Int64.add skipped_size (Int64.add offset length)) + end in + stream_from "-1" 0L; + Unixext.fsync ofd) in begin try - for_each_vdi __context (recv_one ifd __context) prefix_vdis; - with Unix.Unix_error(Unix.EIO, _, _) -> - raise (Api_errors.Server_error (Api_errors.vdi_io_error, ["Device I/O error"])) + for_each_vdi __context (recv_one ifd __context) prefix_vdis; + with Unix.Unix_error(Unix.EIO, _, _) -> + raise (Api_errors.Server_error (Api_errors.vdi_io_error, ["Device I/O error"])) end; !checksum_table (** Receive a set of VDIs split into chunks in a tar format created out of a Zurich/Geneva exported VM. Each chunk has been independently compressed.*) -let recv_all_zurich refresh_session ifd (__context:Context.t) rpc session_id prefix_vdis = +let recv_all_zurich refresh_session ifd (__context:Context.t) rpc session_id prefix_vdis = TaskHelper.set_cancellable ~__context; TaskHelper.set_description ~__context "Importing Virtual Machine"; @@ -311,46 +311,46 @@ let recv_all_zurich refresh_session ifd (__context:Context.t) rpc session_id pre let hdr = ref None in let next () = hdr := (try Some(Tar_unix.Header.get_next_header ifd) with Tar_unix.Header.End_of_stream -> None | e -> raise e) in next(); - + let recv_one ifd (__context:Context.t) (prefix, vdi_ref, size) = (* Open this VDI and stream in all the blocks. Return when hdr represents a chunk which is not part of this VDI or the end of stream is reached. *) with_open_vdi __context rpc session_id vdi_ref `RW [Unix.O_WRONLY] 0o644 (fun ofd -> - let rec stream_from (last_suffix: string) = match !hdr with - | Some hdr -> - refresh_session (); - - let file_name = hdr.Tar_unix.Header.file_name in - let length = hdr.Tar_unix.Header.file_size in - if String.startswith prefix file_name then begin - let suffix = String.sub file_name (String.length prefix) (String.length file_name - (String.length prefix)) in - if suffix <= last_suffix then begin - error "Expected VDI chunk suffix to have increased under lexicograpic ordering; last = %s; this = %s" last_suffix suffix; - raise (Failure "Invalid XVA file") - end; - debug "Decompressing %Ld bytes from %s\n" length file_name; - Gzip.decompress ofd (fun zcat_in -> Tar_unix.Archive.copy_n ifd zcat_in length); - Tar_unix.Archive.skip ifd (Tar_unix.Header.compute_zero_padding_length hdr); - (* XXX: this is totally wrong: *) - made_progress __context progress length; - next (); - stream_from suffix - end - | None -> - (* Since we don't count uncompressed bytes we aren't sure if we've - really finished unfortunately. We can at least check to see if we - were cancelled... *) - TaskHelper.exn_if_cancelling ~__context; - () in - stream_from ""; - Unixext.fsync ofd) in + let rec stream_from (last_suffix: string) = match !hdr with + | Some hdr -> + refresh_session (); + + let file_name = hdr.Tar_unix.Header.file_name in + let length = hdr.Tar_unix.Header.file_size in + if String.startswith prefix file_name then begin + let suffix = String.sub file_name (String.length prefix) (String.length file_name - (String.length prefix)) in + if suffix <= last_suffix then begin + error "Expected VDI chunk suffix to have increased under lexicograpic ordering; last = %s; this = %s" last_suffix suffix; + raise (Failure "Invalid XVA file") + end; + debug "Decompressing %Ld bytes from %s\n" length file_name; + Gzip.decompress ofd (fun zcat_in -> Tar_unix.Archive.copy_n ifd zcat_in length); + Tar_unix.Archive.skip ifd (Tar_unix.Header.compute_zero_padding_length hdr); + (* XXX: this is totally wrong: *) + made_progress __context progress length; + next (); + stream_from suffix + end + | None -> + (* Since we don't count uncompressed bytes we aren't sure if we've + really finished unfortunately. We can at least check to see if we + were cancelled... *) + TaskHelper.exn_if_cancelling ~__context; + () in + stream_from ""; + Unixext.fsync ofd) in begin try - for_each_vdi __context (recv_one ifd __context) prefix_vdis; - with Unix.Unix_error(Unix.EIO, _, _) -> - raise (Api_errors.Server_error (Api_errors.vdi_io_error, ["Device I/O error"])) + for_each_vdi __context (recv_one ifd __context) prefix_vdis; + with Unix.Unix_error(Unix.EIO, _, _) -> + raise (Api_errors.Server_error (Api_errors.vdi_io_error, ["Device I/O error"])) end; if !hdr <> None then begin - error "Failed to import XVA; some chunks were not processed."; - raise (Failure "Some XVA data not processed") - end + error "Failed to import XVA; some chunks were not processed."; + raise (Failure "Some XVA data not processed") + end diff --git a/ocaml/xapi/sync_networking.ml b/ocaml/xapi/sync_networking.ml index a1cd9371d57..80e9e0f0548 100644 --- a/ocaml/xapi/sync_networking.ml +++ b/ocaml/xapi/sync_networking.ml @@ -24,140 +24,140 @@ open D (* This, and the associated startup item in xapi.ml, can be removed as soon as upgrades from anything * pre-Boston are no longer supported. *) let fix_bonds ~__context () = - let me = !Xapi_globs.localhost_ref in - let my_slave_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of me)), - Not (Eq (Field "bond_slave_of", Literal (Ref.string_of Ref.null))) - )) in - (* Fix incorrect PIF.bond_slave_of fields *) - List.iter (fun (rf, rc) -> - if not (Db.is_valid_ref __context rc.API.pIF_bond_slave_of) then - Db.PIF.set_bond_slave_of ~__context ~self:rf ~value:Ref.null - ) my_slave_pifs; + let me = !Xapi_globs.localhost_ref in + let my_slave_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of me)), + Not (Eq (Field "bond_slave_of", Literal (Ref.string_of Ref.null))) + )) in + (* Fix incorrect PIF.bond_slave_of fields *) + List.iter (fun (rf, rc) -> + if not (Db.is_valid_ref __context rc.API.pIF_bond_slave_of) then + Db.PIF.set_bond_slave_of ~__context ~self:rf ~value:Ref.null + ) my_slave_pifs; - let my_bond_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of me)), - Not (Eq (Field "bond_master_of", Literal "()")) - )) in - let my_bonds = List.map (fun (_, pif) -> List.hd pif.API.pIF_bond_master_of) my_bond_pifs in - List.iter (fun bond -> Xapi_bond.fix_bond ~__context ~bond) my_bonds + let my_bond_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of me)), + Not (Eq (Field "bond_master_of", Literal "()")) + )) in + let my_bonds = List.map (fun (_, pif) -> List.hd pif.API.pIF_bond_master_of) my_bond_pifs in + List.iter (fun bond -> Xapi_bond.fix_bond ~__context ~bond) my_bonds (** Copy Bonds from master *) let copy_bonds_from_master ~__context () = - (* if slave: then inherit network config (bonds and vlans) from master (if we don't already have them) *) - let me = !Xapi_globs.localhost_ref in - let master = Helpers.get_master ~__context in + (* if slave: then inherit network config (bonds and vlans) from master (if we don't already have them) *) + let me = !Xapi_globs.localhost_ref in + let master = Helpers.get_master ~__context in - let master_bond_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of master)), - Not (Eq (Field "bond_master_of", Literal "()")) - )) in - let master_bonds = List.map (fun (_, pif) -> - Db.Bond.get_record ~__context ~self:(List.hd pif.API.pIF_bond_master_of)) master_bond_pifs in + let master_bond_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of master)), + Not (Eq (Field "bond_master_of", Literal "()")) + )) in + let master_bonds = List.map (fun (_, pif) -> + Db.Bond.get_record ~__context ~self:(List.hd pif.API.pIF_bond_master_of)) master_bond_pifs in - let my_bond_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of me)), - Not (Eq (Field "bond_master_of", Literal "()")) - )) in - let my_phy_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of me)), - Eq (Field "physical", Literal "true") - )) in + let my_bond_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of me)), + Not (Eq (Field "bond_master_of", Literal "()")) + )) in + let my_phy_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of me)), + Eq (Field "physical", Literal "true") + )) in - (* Consider Bonds *) - debug "Resynchronising bonds"; - let maybe_create_bond_for_me bond = - let network = Db.PIF.get_network ~__context ~self:bond.API.bond_master in - let slaves_to_mac_and_device_map = - List.map (fun self -> self, Db.PIF.get_MAC ~__context ~self, Db.PIF.get_device ~__context ~self) - bond.API.bond_slaves in - (* Take the MAC addr of the bond and figure out whether this is the MAC address of any of the - * slaves. If it is then we will use this to ensure that we inherit the MAC address from the _same_ - * slave when we re-create on the slave *) - let master_bond_mac = Db.PIF.get_MAC ~__context ~self:bond.API.bond_master in - (* The bond mode used on the master. We will use the same mode on the slave, when creating a new bond. *) - let bond_mode = bond.API.bond_mode in - let bond_properties = bond.API.bond_properties in - let master_slaves_with_same_mac_as_bond (* expecting a list of at most 1 here *) = - List.filter (fun (pifref,mac,device) -> mac=master_bond_mac) slaves_to_mac_and_device_map in - (* This tells us the device that the master used to inherit the bond's MAC address - * (if indeed that is what it did; we set it to None if we think it didn't do this) *) - let device_of_primary_slave = - match master_slaves_with_same_mac_as_bond with - | [] -> None - | [_,_,device] -> - debug "Master bond has MAC address derived from %s" device; - (* found single slave with mac matching bond master => - * this was one that we inherited mac from *) - Some device - | _ -> None - in - (* Look at the master's slaves and find the corresponding slave PIFs. Note that the slave - * might not have the necessary devices: in this case we'll try to make partial bonds *) - let slave_devices = List.map (fun (_,_,device)->device) slaves_to_mac_and_device_map in - let my_slave_pifs = List.filter (fun (_, pif) -> List.mem pif.API.pIF_device slave_devices) my_phy_pifs in - let my_slave_pif_refs = List.map fst my_slave_pifs in - (* Do I have a pif that I should treat as a primary pif - - * i.e. the one to inherit the MAC address from on my bond create? *) - let my_primary_slave = - match device_of_primary_slave with - | None -> None (* don't care cos we couldn't even figure out who master's primary slave was *) - | Some master_primary -> - begin - match List.filter (fun (_,pif) -> pif.API.pIF_device=master_primary) my_slave_pifs with - | [] -> None - | [pifref,_] -> - debug "I have found a PIF to use as primary bond slave (will inherit MAC address of bond from this PIF)."; - Some pifref (* this is my pif corresponding to the master's primary slave *) - | _ -> None - end - in - (* If I do have a pif that I need to treat as my primary slave then I need to put it - * first in the list so the bond master will inherit it's MAC address *) - let my_slave_pif_refs = - match my_primary_slave with - | None -> my_slave_pif_refs (* no change *) - | Some primary_pif -> primary_pif :: (List.filter (fun x-> x<>primary_pif) my_slave_pif_refs) (* remove primary pif ref and stick it on the front *) - in - match List.filter (fun (_, pif) -> pif.API.pIF_network = network) my_bond_pifs, my_slave_pifs with - | [], [] -> - (* No bond currently exists but neither do any slave interfaces -> do nothing *) - warn "Cannot create bond %s at all: no PIFs exist on slave" bond.API.bond_uuid - | [], _ -> - (* No bond currently exists but some slave interfaces do -> create a (partial?) bond *) - (* CA-56957: changed the following from Client.Bond.... to Xapi_bond.... *) - let (_: API.ref_Bond) = Xapi_bond.create ~__context ~network ~members:my_slave_pif_refs ~mAC:"" ~mode:bond_mode ~properties:bond_properties in () - | [ _, { API.pIF_bond_master_of = [ slave_bond ] } ], _ -> - (* Some bond exists, check whether the existing set of slaves is the same as the potential set *) - let current_slave_pifs = Db.Bond.get_slaves ~__context ~self:slave_bond in - if not (List.set_equiv (List.setify current_slave_pifs) (List.setify my_slave_pif_refs)) then - begin - debug "Partial bond exists; recreating"; - (* CA-56957: changed the following from Client.Bond.... to Xapi_bond.... *) - Xapi_bond.destroy ~__context ~self:slave_bond; - let (_: API.ref_Bond) = Xapi_bond.create ~__context ~network ~members:my_slave_pif_refs ~mAC:"" ~mode:bond_mode ~properties:bond_properties in () - end - | [ _, { API.pIF_uuid = uuid } ], _ -> - warn "Couldn't create bond on slave because PIF %s already on network %s" - uuid (Db.Network.get_uuid ~__context ~self:network) - | _ -> warn "Unexpected bond configuration" - in - List.iter (Helpers.log_exn_continue "resynchronising bonds on slave" maybe_create_bond_for_me) master_bonds + (* Consider Bonds *) + debug "Resynchronising bonds"; + let maybe_create_bond_for_me bond = + let network = Db.PIF.get_network ~__context ~self:bond.API.bond_master in + let slaves_to_mac_and_device_map = + List.map (fun self -> self, Db.PIF.get_MAC ~__context ~self, Db.PIF.get_device ~__context ~self) + bond.API.bond_slaves in + (* Take the MAC addr of the bond and figure out whether this is the MAC address of any of the + * slaves. If it is then we will use this to ensure that we inherit the MAC address from the _same_ + * slave when we re-create on the slave *) + let master_bond_mac = Db.PIF.get_MAC ~__context ~self:bond.API.bond_master in + (* The bond mode used on the master. We will use the same mode on the slave, when creating a new bond. *) + let bond_mode = bond.API.bond_mode in + let bond_properties = bond.API.bond_properties in + let master_slaves_with_same_mac_as_bond (* expecting a list of at most 1 here *) = + List.filter (fun (pifref,mac,device) -> mac=master_bond_mac) slaves_to_mac_and_device_map in + (* This tells us the device that the master used to inherit the bond's MAC address + * (if indeed that is what it did; we set it to None if we think it didn't do this) *) + let device_of_primary_slave = + match master_slaves_with_same_mac_as_bond with + | [] -> None + | [_,_,device] -> + debug "Master bond has MAC address derived from %s" device; + (* found single slave with mac matching bond master => + * this was one that we inherited mac from *) + Some device + | _ -> None + in + (* Look at the master's slaves and find the corresponding slave PIFs. Note that the slave + * might not have the necessary devices: in this case we'll try to make partial bonds *) + let slave_devices = List.map (fun (_,_,device)->device) slaves_to_mac_and_device_map in + let my_slave_pifs = List.filter (fun (_, pif) -> List.mem pif.API.pIF_device slave_devices) my_phy_pifs in + let my_slave_pif_refs = List.map fst my_slave_pifs in + (* Do I have a pif that I should treat as a primary pif - + * i.e. the one to inherit the MAC address from on my bond create? *) + let my_primary_slave = + match device_of_primary_slave with + | None -> None (* don't care cos we couldn't even figure out who master's primary slave was *) + | Some master_primary -> + begin + match List.filter (fun (_,pif) -> pif.API.pIF_device=master_primary) my_slave_pifs with + | [] -> None + | [pifref,_] -> + debug "I have found a PIF to use as primary bond slave (will inherit MAC address of bond from this PIF)."; + Some pifref (* this is my pif corresponding to the master's primary slave *) + | _ -> None + end + in + (* If I do have a pif that I need to treat as my primary slave then I need to put it + * first in the list so the bond master will inherit it's MAC address *) + let my_slave_pif_refs = + match my_primary_slave with + | None -> my_slave_pif_refs (* no change *) + | Some primary_pif -> primary_pif :: (List.filter (fun x-> x<>primary_pif) my_slave_pif_refs) (* remove primary pif ref and stick it on the front *) + in + match List.filter (fun (_, pif) -> pif.API.pIF_network = network) my_bond_pifs, my_slave_pifs with + | [], [] -> + (* No bond currently exists but neither do any slave interfaces -> do nothing *) + warn "Cannot create bond %s at all: no PIFs exist on slave" bond.API.bond_uuid + | [], _ -> + (* No bond currently exists but some slave interfaces do -> create a (partial?) bond *) + (* CA-56957: changed the following from Client.Bond.... to Xapi_bond.... *) + let (_: API.ref_Bond) = Xapi_bond.create ~__context ~network ~members:my_slave_pif_refs ~mAC:"" ~mode:bond_mode ~properties:bond_properties in () + | [ _, { API.pIF_bond_master_of = [ slave_bond ] } ], _ -> + (* Some bond exists, check whether the existing set of slaves is the same as the potential set *) + let current_slave_pifs = Db.Bond.get_slaves ~__context ~self:slave_bond in + if not (List.set_equiv (List.setify current_slave_pifs) (List.setify my_slave_pif_refs)) then + begin + debug "Partial bond exists; recreating"; + (* CA-56957: changed the following from Client.Bond.... to Xapi_bond.... *) + Xapi_bond.destroy ~__context ~self:slave_bond; + let (_: API.ref_Bond) = Xapi_bond.create ~__context ~network ~members:my_slave_pif_refs ~mAC:"" ~mode:bond_mode ~properties:bond_properties in () + end + | [ _, { API.pIF_uuid = uuid } ], _ -> + warn "Couldn't create bond on slave because PIF %s already on network %s" + uuid (Db.Network.get_uuid ~__context ~self:network) + | _ -> warn "Unexpected bond configuration" + in + List.iter (Helpers.log_exn_continue "resynchronising bonds on slave" maybe_create_bond_for_me) master_bonds (** Copy VLANs from master *) (* This is now executed fully on the master, once asked by the slave when the slave's Xapi starts up *) let copy_vlans_from_master ~__context () = - let host = !Xapi_globs.localhost_ref in - let oc = Db.Host.get_other_config ~__context ~self:host in - if not (List.mem_assoc Xapi_globs.sync_vlans oc && - List.assoc Xapi_globs.sync_vlans oc = Xapi_globs.sync_switch_off) then begin - debug "Resynchronising VLANs"; - Helpers.call_api_functions ~__context (fun rpc session_id -> Client.Host.sync_vlans ~rpc ~session_id ~host) - end + let host = !Xapi_globs.localhost_ref in + let oc = Db.Host.get_other_config ~__context ~self:host in + if not (List.mem_assoc Xapi_globs.sync_vlans oc && + List.assoc Xapi_globs.sync_vlans oc = Xapi_globs.sync_switch_off) then begin + debug "Resynchronising VLANs"; + Helpers.call_api_functions ~__context (fun rpc session_id -> Client.Host.sync_vlans ~rpc ~session_id ~host) + end (** Copy tunnels from master *) let copy_tunnels_from_master ~__context () = - debug "Resynchronising tunnels"; - let host = !Xapi_globs.localhost_ref in - Helpers.call_api_functions ~__context (fun rpc session_id -> Client.Host.sync_tunnels ~rpc ~session_id ~host) + debug "Resynchronising tunnels"; + let host = !Xapi_globs.localhost_ref in + Helpers.call_api_functions ~__context (fun rpc session_id -> Client.Host.sync_tunnels ~rpc ~session_id ~host) diff --git a/ocaml/xapi/sync_networking.mli b/ocaml/xapi/sync_networking.mli index 28f42cf9cbc..27b3e8a38c5 100644 --- a/ocaml/xapi/sync_networking.mli +++ b/ocaml/xapi/sync_networking.mli @@ -13,7 +13,7 @@ *) (** * @group Main Loop and Start-up - *) +*) val fix_bonds : __context:Context.t -> unit -> unit val copy_bonds_from_master : __context:Context.t -> unit -> unit diff --git a/ocaml/xapi/system_domains.ml b/ocaml/xapi/system_domains.ml index cdc36d6f46f..879bcfebcbf 100644 --- a/ocaml/xapi/system_domains.ml +++ b/ocaml/xapi/system_domains.ml @@ -13,7 +13,7 @@ *) (** * @group Helper functions for handling system domains - *) +*) open Stdext.Threadext @@ -27,12 +27,12 @@ let system_domain_key = "is_system_domain" let bool_of_string x = try bool_of_string x with _ -> false let is_system_domain snapshot = - snapshot.API.vM_is_control_domain - || (let oc = snapshot.API.vM_other_config in - List.mem_assoc system_domain_key oc && (bool_of_string (List.assoc system_domain_key oc))) + snapshot.API.vM_is_control_domain + || (let oc = snapshot.API.vM_other_config in + List.mem_assoc system_domain_key oc && (bool_of_string (List.assoc system_domain_key oc))) let get_is_system_domain ~__context ~self = - is_system_domain (Db.VM.get_record ~__context ~self) + is_system_domain (Db.VM.get_record ~__context ~self) (* Notes on other_config keys: in the future these should become first-class fields. For now note that although two threads may attempt to update these keys in parallel, @@ -40,159 +40,159 @@ let get_is_system_domain ~__context ~self = It's therefore safe to throw away exceptions. *) let set_is_system_domain ~__context ~self ~value = - Helpers.log_exn_continue (Printf.sprintf "set_is_system_domain self = %s" (Ref.string_of self)) - (fun () -> - Db.VM.remove_from_other_config ~__context ~self ~key:system_domain_key; - Db.VM.add_to_other_config ~__context ~self ~key:system_domain_key ~value - ) () + Helpers.log_exn_continue (Printf.sprintf "set_is_system_domain self = %s" (Ref.string_of self)) + (fun () -> + Db.VM.remove_from_other_config ~__context ~self ~key:system_domain_key; + Db.VM.add_to_other_config ~__context ~self ~key:system_domain_key ~value + ) () (** If a VM is a driver domain then it hosts backends for either disk or network - devices. We link PBD.other_config:storage_driver_domain_key to + devices. We link PBD.other_config:storage_driver_domain_key to VM.other_config:storage_driver_domain_key and we ensure the VM is marked as a system domain. *) let storage_driver_domain_key = "storage_driver_domain" let pbd_set_storage_driver_domain ~__context ~self ~value = - Helpers.log_exn_continue (Printf.sprintf "pbd_set_storage_driver_domain self = %s" (Ref.string_of self)) - (fun () -> - Db.PBD.remove_from_other_config ~__context ~self ~key:storage_driver_domain_key; - Db.PBD.add_to_other_config ~__context ~self ~key:storage_driver_domain_key ~value - ) () + Helpers.log_exn_continue (Printf.sprintf "pbd_set_storage_driver_domain self = %s" (Ref.string_of self)) + (fun () -> + Db.PBD.remove_from_other_config ~__context ~self ~key:storage_driver_domain_key; + Db.PBD.add_to_other_config ~__context ~self ~key:storage_driver_domain_key ~value + ) () let vm_set_storage_driver_domain ~__context ~self ~value = - Helpers.log_exn_continue (Printf.sprintf "vm_set_storage_driver_domain self = %s" (Ref.string_of self)) - (fun () -> - Db.VM.remove_from_other_config ~__context ~self ~key:storage_driver_domain_key; - Db.VM.add_to_other_config ~__context ~self ~key:storage_driver_domain_key ~value - ) () + Helpers.log_exn_continue (Printf.sprintf "vm_set_storage_driver_domain self = %s" (Ref.string_of self)) + (fun () -> + Db.VM.remove_from_other_config ~__context ~self ~key:storage_driver_domain_key; + Db.VM.add_to_other_config ~__context ~self ~key:storage_driver_domain_key ~value + ) () let record_pbd_storage_driver_domain ~__context ~pbd ~domain = - set_is_system_domain ~__context ~self:domain ~value:"true"; - pbd_set_storage_driver_domain ~__context ~self:pbd ~value:(Ref.string_of domain); - vm_set_storage_driver_domain ~__context ~self:domain ~value:(Ref.string_of pbd) + set_is_system_domain ~__context ~self:domain ~value:"true"; + pbd_set_storage_driver_domain ~__context ~self:pbd ~value:(Ref.string_of domain); + vm_set_storage_driver_domain ~__context ~self:domain ~value:(Ref.string_of pbd) let pbd_of_vm ~__context ~vm = - let other_config = Db.VM.get_other_config ~__context ~self:vm in - if List.mem_assoc storage_driver_domain_key other_config - then Some(Ref.of_string (List.assoc storage_driver_domain_key other_config)) - else None + let other_config = Db.VM.get_other_config ~__context ~self:vm in + if List.mem_assoc storage_driver_domain_key other_config + then Some(Ref.of_string (List.assoc storage_driver_domain_key other_config)) + else None let storage_driver_domain_of_pbd ~__context ~pbd = - let other_config = Db.PBD.get_other_config ~__context ~self:pbd in - let dom0 = Helpers.get_domain_zero ~__context in - if List.mem_assoc storage_driver_domain_key other_config then begin - let v = List.assoc storage_driver_domain_key other_config in - if Db.is_valid_ref __context (Ref.of_string v) - then Ref.of_string v - else - try - Db.VM.get_by_uuid ~__context ~uuid:v - with _ -> - error "PBD %s has invalid %s key: falling back to dom0" (Ref.string_of pbd) storage_driver_domain_key; - dom0 - end else dom0 + let other_config = Db.PBD.get_other_config ~__context ~self:pbd in + let dom0 = Helpers.get_domain_zero ~__context in + if List.mem_assoc storage_driver_domain_key other_config then begin + let v = List.assoc storage_driver_domain_key other_config in + if Db.is_valid_ref __context (Ref.of_string v) + then Ref.of_string v + else + try + Db.VM.get_by_uuid ~__context ~uuid:v + with _ -> + error "PBD %s has invalid %s key: falling back to dom0" (Ref.string_of pbd) storage_driver_domain_key; + dom0 + end else dom0 let storage_driver_domain_of_pbd ~__context ~pbd = - let domain = storage_driver_domain_of_pbd ~__context ~pbd in - set_is_system_domain ~__context ~self:domain ~value:"true"; - pbd_set_storage_driver_domain ~__context ~self:pbd ~value:(Ref.string_of domain); - vm_set_storage_driver_domain ~__context ~self:domain ~value:(Ref.string_of pbd); - domain + let domain = storage_driver_domain_of_pbd ~__context ~pbd in + set_is_system_domain ~__context ~self:domain ~value:"true"; + pbd_set_storage_driver_domain ~__context ~self:pbd ~value:(Ref.string_of domain); + vm_set_storage_driver_domain ~__context ~self:domain ~value:(Ref.string_of pbd); + domain let storage_driver_domain_of_vbd ~__context ~vbd = - let dom0 = Helpers.get_domain_zero ~__context in - let vdi = Db.VBD.get_VDI ~__context ~self:vbd in - if Db.is_valid_ref __context vdi - then - let sr = Db.VDI.get_SR ~__context ~self:vdi in - let sr_pbds = Db.SR.get_PBDs ~__context ~self:sr in - let my_pbds = List.map fst (Helpers.get_my_pbds __context) in - match Stdext.Listext.List.intersect sr_pbds my_pbds with - | pbd :: _ -> - storage_driver_domain_of_pbd ~__context ~pbd - | _ -> - dom0 - else dom0 + let dom0 = Helpers.get_domain_zero ~__context in + let vdi = Db.VBD.get_VDI ~__context ~self:vbd in + if Db.is_valid_ref __context vdi + then + let sr = Db.VDI.get_SR ~__context ~self:vdi in + let sr_pbds = Db.SR.get_PBDs ~__context ~self:sr in + let my_pbds = List.map fst (Helpers.get_my_pbds __context) in + match Stdext.Listext.List.intersect sr_pbds my_pbds with + | pbd :: _ -> + storage_driver_domain_of_pbd ~__context ~pbd + | _ -> + dom0 + else dom0 let storage_driver_domain_of_sr_type ~__context ~_type = - let dom0 = Helpers.get_domain_zero ~__context in - dom0 + let dom0 = Helpers.get_domain_zero ~__context in + dom0 let is_in_use ~__context ~self = - let other_config = Db.VM.get_other_config ~__context ~self in - List.mem_assoc storage_driver_domain_key other_config - && ( - let pbd = Ref.of_string (List.assoc storage_driver_domain_key other_config) in - if Db.is_valid_ref __context pbd - then Db.PBD.get_currently_attached ~__context ~self:pbd - else false - ) + let other_config = Db.VM.get_other_config ~__context ~self in + List.mem_assoc storage_driver_domain_key other_config + && ( + let pbd = Ref.of_string (List.assoc storage_driver_domain_key other_config) in + if Db.is_valid_ref __context pbd + then Db.PBD.get_currently_attached ~__context ~self:pbd + else false + ) (* [wait_for ?timeout f] returns true if [f()] (called at 1Hz) returns true within the [timeout] period and false otherwise *) let wait_for ?(timeout=120.) f = - let start = Unix.gettimeofday () in - let finished = ref false in - let success = ref false in - while not(!finished) do - let remaining = timeout -. (Unix.gettimeofday () -. start) in - if remaining < 0. - then finished := true - else - try - if f () then begin - success := true; - finished := true - end else Thread.delay 1. - with _ -> - Thread.delay 1. - done; - !success + let start = Unix.gettimeofday () in + let finished = ref false in + let success = ref false in + while not(!finished) do + let remaining = timeout -. (Unix.gettimeofday () -. start) in + if remaining < 0. + then finished := true + else + try + if f () then begin + success := true; + finished := true + end else Thread.delay 1. + with _ -> + Thread.delay 1. + done; + !success let pingable ip () = - try - let (_: string * string) = Forkhelpers.execute_command_get_output "/bin/ping" [ "-c"; "1"; "-w"; "1"; ip ] in - true - with _ -> false + try + let (_: string * string) = Forkhelpers.execute_command_get_output "/bin/ping" [ "-c"; "1"; "-w"; "1"; ip ] in + true + with _ -> false let queryable ~__context transport () = - let open Xmlrpc_client in - let rpc = XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"remote_smapiv2" ~transport ~http:(xmlrpc ~version:"1.0" "/") in - let listMethods = Rpc.call "system.listMethods" [] in - try - let _ = rpc listMethods in - info "XMLRPC service found at %s" (string_of_transport transport); - true - with e -> - debug "Temporary failure querying storage service on %s: %s" (string_of_transport transport) (Printexc.to_string e); - false + let open Xmlrpc_client in + let rpc = XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"remote_smapiv2" ~transport ~http:(xmlrpc ~version:"1.0" "/") in + let listMethods = Rpc.call "system.listMethods" [] in + try + let _ = rpc listMethods in + info "XMLRPC service found at %s" (string_of_transport transport); + true + with e -> + debug "Temporary failure querying storage service on %s: %s" (string_of_transport transport) (Printexc.to_string e); + false let ip_of ~__context driver = - (* Find the VIF on the Host internal management network *) - let vifs = Db.VM.get_VIFs ~__context ~self:driver in - let hin = Helpers.get_host_internal_management_network ~__context in - let ip = - let vif = - try - List.find (fun vif -> Db.VIF.get_network ~__context ~self:vif = hin) vifs - with Not_found -> failwith (Printf.sprintf "driver domain %s has no VIF on host internal management network" (Ref.string_of driver)) in - match Xapi_udhcpd.get_ip ~__context vif with - | Some (a, b, c, d) -> Printf.sprintf "%d.%d.%d.%d" a b c d - | None -> failwith (Printf.sprintf "driver domain %s has no IP on the host internal management network" (Ref.string_of driver)) in - - info "driver domain uuid:%s ip:%s" (Db.VM.get_uuid ~__context ~self:driver) ip; - if not(wait_for (pingable ip)) - then failwith (Printf.sprintf "driver domain %s is not responding to IP ping" (Ref.string_of driver)); - if not(wait_for (queryable ~__context (Xmlrpc_client.TCP(ip, 80)))) - then failwith (Printf.sprintf "driver domain %s is not responding to XMLRPC query" (Ref.string_of driver)); - ip + (* Find the VIF on the Host internal management network *) + let vifs = Db.VM.get_VIFs ~__context ~self:driver in + let hin = Helpers.get_host_internal_management_network ~__context in + let ip = + let vif = + try + List.find (fun vif -> Db.VIF.get_network ~__context ~self:vif = hin) vifs + with Not_found -> failwith (Printf.sprintf "driver domain %s has no VIF on host internal management network" (Ref.string_of driver)) in + match Xapi_udhcpd.get_ip ~__context vif with + | Some (a, b, c, d) -> Printf.sprintf "%d.%d.%d.%d" a b c d + | None -> failwith (Printf.sprintf "driver domain %s has no IP on the host internal management network" (Ref.string_of driver)) in + + info "driver domain uuid:%s ip:%s" (Db.VM.get_uuid ~__context ~self:driver) ip; + if not(wait_for (pingable ip)) + then failwith (Printf.sprintf "driver domain %s is not responding to IP ping" (Ref.string_of driver)); + if not(wait_for (queryable ~__context (Xmlrpc_client.TCP(ip, 80)))) + then failwith (Printf.sprintf "driver domain %s is not responding to XMLRPC query" (Ref.string_of driver)); + ip type service = { - uuid: string; - ty: string; - instance: string; - url: string; + uuid: string; + ty: string; + instance: string; + url: string; } with rpc type services = service list with rpc @@ -201,24 +201,24 @@ let service_to_queue = Hashtbl.create 10 let service_to_queue_m = Mutex.create () let register_service service queue = - Mutex.execute service_to_queue_m - (fun () -> - Hashtbl.replace service_to_queue service queue - ) + Mutex.execute service_to_queue_m + (fun () -> + Hashtbl.replace service_to_queue service queue + ) let unregister_service service = - Mutex.execute service_to_queue_m - (fun () -> - Hashtbl.remove service_to_queue service - ) + Mutex.execute service_to_queue_m + (fun () -> + Hashtbl.remove service_to_queue service + ) let get_service service = - Mutex.execute service_to_queue_m - (fun () -> - try Some(Hashtbl.find service_to_queue service) with Not_found -> None - ) + Mutex.execute service_to_queue_m + (fun () -> + try Some(Hashtbl.find service_to_queue service) with Not_found -> None + ) let list_services () = - Mutex.execute service_to_queue_m - (fun () -> - Hashtbl.fold (fun service _ acc -> service :: acc) service_to_queue [] - ) + Mutex.execute service_to_queue_m + (fun () -> + Hashtbl.fold (fun service _ acc -> service :: acc) service_to_queue [] + ) diff --git a/ocaml/xapi/system_domains.mli b/ocaml/xapi/system_domains.mli index ec6292101d6..ebe8b47b8b9 100644 --- a/ocaml/xapi/system_domains.mli +++ b/ocaml/xapi/system_domains.mli @@ -13,7 +13,7 @@ *) (** * @group Helper functions for handling system domains - *) +*) (** [is_system_domain vm] returns true if [vm] is a special system domain *) @@ -39,7 +39,7 @@ val record_pbd_storage_driver_domain: __context:Context.t -> pbd:API.ref_PBD -> val storage_driver_domain_of_sr_type: __context:Context.t -> _type:string -> API.ref_VM (** [pbd_of_vm __context vm] returns (Some pbd) if [vm] is a driver domain - for [pbd] and None otherwise. *) + for [pbd] and None otherwise. *) val pbd_of_vm: __context:Context.t -> vm:API.ref_VM -> API.ref_PBD option (** [is_in_use __context self] returns true if [self] is in use as a system domain *) @@ -53,10 +53,10 @@ val ip_of: __context:Context.t -> API.ref_VM -> string (** One of many service running in a driver domain *) type service = { - uuid: string; - ty: string; - instance: string; - url: string; + uuid: string; + ty: string; + instance: string; + url: string; } val rpc_of_service: service -> Rpc.t val service_of_rpc: Rpc.t -> service diff --git a/ocaml/xapi/system_status.ml b/ocaml/xapi/system_status.ml index b6324cf00f1..17a0a6431de 100644 --- a/ocaml/xapi/system_status.ml +++ b/ocaml/xapi/system_status.ml @@ -34,68 +34,68 @@ let get_capabilities () = for tar output. It should work on embedded edition *) let send_via_fd __context s entries output = let s_uuid = Uuid.to_string (Uuid.make_uuid ()) in - - let params = + + let params = [sprintf "--entries=%s" entries; "--silent"; "--yestoall"; sprintf "--output=%s" output; "--outfd="^s_uuid] in - let cmd = + let cmd = sprintf "%s %s" xen_bugtool (String.concat " " params) in debug "running %s" cmd; try - let headers = + let headers = Http.http_200_ok ~keep_alive:false ~version:"1.0" () @ - [ "Server: "^Xapi_globs.xapi_user_agent; - Http.Hdr.content_type ^": " ^ content_type; - "Content-Disposition: attachment; filename=\"system_status.tgz\""] + [ "Server: "^Xapi_globs.xapi_user_agent; + Http.Hdr.content_type ^": " ^ content_type; + "Content-Disposition: attachment; filename=\"system_status.tgz\""] in Http_svr.headers s headers; - + let result = with_logfile_fd "get-system-status" - (fun log_fd -> - let pid = - safe_close_and_exec None (Some log_fd) (Some log_fd) [(s_uuid,s)] xen_bugtool params - in - waitpid_fail_if_bad_exit pid - ) + (fun log_fd -> + let pid = + safe_close_and_exec None (Some log_fd) (Some log_fd) [(s_uuid,s)] xen_bugtool params + in + waitpid_fail_if_bad_exit pid + ) in match result with - | Success _ -> debug "xen-bugtool exited successfully" - - | Failure (log, exn) -> - debug "xen-bugtool failed with output: %s" log; - raise exn + | Success _ -> debug "xen-bugtool exited successfully" + + | Failure (log, exn) -> + debug "xen-bugtool failed with output: %s" log; + raise exn with e -> let msg = "xen-bugtool failed: " ^ (Printexc.to_string e) in error "%s" msg; raise (Api_errors.Server_error (Api_errors.system_status_retrieval_failed, [msg])) - -(* This fn outputs xen-bugtool into a file and then write the - file out to the socket, to deal with zipped bugtool outputs + +(* This fn outputs xen-bugtool into a file and then write the + file out to the socket, to deal with zipped bugtool outputs It will not work on embedded edition *) let send_via_cp __context s entries output = let cmd = sprintf "%s --entries=%s --silent --yestoall --output=%s" - xen_bugtool entries output + xen_bugtool entries output in let () = debug "running %s" cmd in - try - let filename = String.rtrim (Helpers.get_process_output cmd) in - finally - (fun () -> - debug "bugball path: %s" filename; - Http_svr.response_file ~mime_content_type:content_type s filename - ) - (fun () -> Helpers.log_exn_continue "deleting xen-bugtool output" Unix.unlink filename) - with e -> - let msg = "xen-bugtool failed: " ^ (ExnHelper.string_of_exn e) in - error "%s" msg; - raise (Api_errors.Server_error (Api_errors.system_status_retrieval_failed, [msg])) - + try + let filename = String.rtrim (Helpers.get_process_output cmd) in + finally + (fun () -> + debug "bugball path: %s" filename; + Http_svr.response_file ~mime_content_type:content_type s filename + ) + (fun () -> Helpers.log_exn_continue "deleting xen-bugtool output" Unix.unlink filename) + with e -> + let msg = "xen-bugtool failed: " ^ (ExnHelper.string_of_exn e) in + error "%s" msg; + raise (Api_errors.Server_error (Api_errors.system_status_retrieval_failed, [msg])) + let handler (req: Request.t) s _ = debug "In system status http handler..."; req.Request.close <- true; @@ -108,9 +108,9 @@ let handler (req: Request.t) s _ = let () = debug "session_id: %s" (get_param "session_id") in Xapi_http.with_context task_label req s (fun __context -> - if Helpers.on_oem __context && output <> "tar" - then raise (Api_errors.Server_error (Api_errors.system_status_must_use_tar_on_oem, [])) - else if output = "tar" - then send_via_fd __context s entries output - else send_via_cp __context s entries output + if Helpers.on_oem __context && output <> "tar" + then raise (Api_errors.Server_error (Api_errors.system_status_must_use_tar_on_oem, [])) + else if output = "tar" + then send_via_fd __context s entries output + else send_via_cp __context s entries output ) diff --git a/ocaml/xapi/task_server.ml b/ocaml/xapi/task_server.ml index efe22d4320d..e7e4557a4c3 100644 --- a/ocaml/xapi/task_server.ml +++ b/ocaml/xapi/task_server.ml @@ -13,7 +13,7 @@ *) (** * @group Xenops - *) +*) open Stdext open Threadext @@ -23,211 +23,211 @@ open Fun type stringpair = string * string module type INTERFACE = sig - val service_name : string + val service_name : string + + exception Does_not_exist of stringpair + exception Cancelled of string + + module Task : sig + type id = string - exception Does_not_exist of stringpair - exception Cancelled of string + type async_result - module Task : sig - type id = string + val rpc_of_async_result : async_result -> Rpc.t + val async_result_of_rpc : Rpc.t -> async_result - type async_result + type completion_t = { + duration : float; + result : async_result option + } - val rpc_of_async_result : async_result -> Rpc.t - val async_result_of_rpc : Rpc.t -> async_result + type state = + | Pending of float + | Completed of completion_t + | Failed of Rpc.t - type completion_t = { - duration : float; - result : async_result option - } - - type state = - | Pending of float - | Completed of completion_t - | Failed of Rpc.t + end - end + (* The following stuff comes from rpc-light.idl *) - (* The following stuff comes from rpc-light.idl *) + module Exception : sig + type exnty + val rpc_of_exnty : exnty -> Rpc.t + end - module Exception : sig - type exnty - val rpc_of_exnty : exnty -> Rpc.t - end + val exnty_of_exn : exn -> Exception.exnty + val exn_of_exnty : Exception.exnty -> exn - val exnty_of_exn : exn -> Exception.exnty - val exn_of_exnty : Exception.exnty -> exn + exception Internal_error of string - exception Internal_error of string - end -module Task = functor (Interface : INTERFACE) -> struct +module Task = functor (Interface : INTERFACE) -> struct -module D = Debug.Make(struct let name = Interface.service_name end) -open D + module D = Debug.Make(struct let name = Interface.service_name end) + open D -module SMap = Map.Make(struct type t = string let compare = compare end) + module SMap = Map.Make(struct type t = string let compare = compare end) -(* Tasks are stored in an id -> t map *) + (* Tasks are stored in an id -> t map *) -(* A task is associated with every running operation *) -type t = { - id: string; (* unique task id *) - ctime: float; (* created timestamp *) - dbg: string; (* token sent by client *) - mutable state: Interface.Task.state; (* current completion state *) - mutable subtasks: (string * Interface.Task.state) list; (* one level of "subtasks" *) - f: t -> Interface.Task.async_result option; (* body of the function *) - tm: Mutex.t; (* protects cancelling state: *) + (* A task is associated with every running operation *) + type t = { + id: string; (* unique task id *) + ctime: float; (* created timestamp *) + dbg: string; (* token sent by client *) + mutable state: Interface.Task.state; (* current completion state *) + mutable subtasks: (string * Interface.Task.state) list; (* one level of "subtasks" *) + f: t -> Interface.Task.async_result option; (* body of the function *) + tm: Mutex.t; (* protects cancelling state: *) mutable cancelling: bool; (* set by cancel *) - mutable cancel: (unit -> unit) list; (* attempt to cancel [f] *) - mutable cancel_points_seen: int; (* incremented every time we pass a cancellation point *) - test_cancel_at: int option; (* index of the cancel point to trigger *) -} - -type tasks = { - tasks : t SMap.t ref; - mutable test_cancel_trigger : (string * int) option; - m : Mutex.t; - c : Condition.t; -} - -let empty () = - let tasks = ref SMap.empty in - let m = Mutex.create () in - let c = Condition.create () in - { tasks; test_cancel_trigger = None; m; c } - -(* [next_task_id ()] returns a fresh task id *) -let next_task_id = - let counter = ref 0 in - fun () -> - let result = string_of_int !counter in - incr counter; - result - -let set_cancel_trigger tasks dbg n = - Mutex.execute tasks.m - (fun () -> - tasks.test_cancel_trigger <- Some (dbg, n) - ) - -let clear_cancel_trigger tasks = - Mutex.execute tasks.m - (fun () -> - tasks.test_cancel_trigger <- None - ) - -(* [add dbg f] creates a fresh [t], registers and returns it *) -let add tasks dbg (f: t -> Interface.Task.async_result option) = - let t = { - id = next_task_id (); - ctime = Unix.gettimeofday (); - dbg = dbg; - state = Interface.Task.Pending 0.; - subtasks = []; - f = f; - tm = Mutex.create (); - cancelling = false; - cancel = []; - cancel_points_seen = 0; - test_cancel_at = match tasks.test_cancel_trigger with - | Some (dbg', n) when dbg = dbg' -> - clear_cancel_trigger tasks; (* one shot *) - Some n - | _ -> None - } in - Mutex.execute tasks.m - (fun () -> - tasks.tasks := SMap.add t.id t !(tasks.tasks) - ); - t - -(* [run t] executes the task body, updating the fields of [t] *) -let run item = - try - let start = Unix.gettimeofday () in - let result = item.f item in - let duration = Unix.gettimeofday () -. start in - item.state <- Interface.Task.Completed { Interface.Task.duration; result }; - debug "Task %s completed; duration = %.0f" item.id duration - with - | e -> - let e = e |> Interface.exnty_of_exn |> Interface.Exception.rpc_of_exnty in - debug "Task %s failed; exception = %s" item.id (e |> Jsonrpc.to_string); - debug "%s" (Printexc.get_backtrace ()); - item.state <- Interface.Task.Failed e - -let exists_locked tasks id = SMap.mem id !(tasks.tasks) - -let find_locked tasks id = - if not (exists_locked tasks id) then raise (Interface.Does_not_exist("task", id)); - SMap.find id !(tasks.tasks) - -let with_subtask t name f = - let start = Unix.gettimeofday () in - try - t.subtasks <- (name, Interface.Task.Pending 0.) :: t.subtasks; - let result = f () in - let duration = Unix.gettimeofday () -. start in - t.subtasks <- List.replace_assoc name (Interface.Task.Completed {Interface.Task.duration; result=None}) t.subtasks; - result - with e -> - t.subtasks <- List.replace_assoc name (Interface.Task.Failed (Interface.Exception.rpc_of_exnty (Interface.exnty_of_exn (Interface.Internal_error (Printexc.to_string e))))) t.subtasks; - raise e - -let list tasks = - Mutex.execute tasks.m - (fun () -> - SMap.bindings !(tasks.tasks) |> List.map snd - ) - -(* Remove the task from the id -> task mapping. NB any active thread will still continue. *) -let destroy tasks id = - Mutex.execute tasks.m - (fun () -> - tasks.tasks := SMap.remove id !(tasks.tasks) - ) - -let cancel tasks id = - let t = Mutex.execute tasks.m (fun () -> find_locked tasks id) in - let callbacks = Mutex.execute t.tm - (fun () -> - t.cancelling <- true; - t.cancel - ) in - List.iter - (fun f -> - try - f () - with e -> - debug "Task.cancel %s: ignore exception %s" id (Printexc.to_string e) - ) callbacks - -let raise_cancelled t = - info "Task %s has been cancelled: raising Cancelled exception" t.id; - raise (Interface.Cancelled(t.id)) - -let check_cancelling t = - Mutex.execute t.tm - (fun () -> - t.cancel_points_seen <- t.cancel_points_seen + 1; - if t.cancelling then raise_cancelled t; - Opt.iter (fun x -> if t.cancel_points_seen = x then begin - info "Task %s has been triggered by the test-case (cancel_point = %d)" t.id t.cancel_points_seen; - raise_cancelled t - end) t.test_cancel_at - ) - -let with_cancel t cancel_fn f = - Mutex.execute t.tm (fun () -> t.cancel <- cancel_fn :: t.cancel); - Stdext.Pervasiveext.finally - (fun () -> - check_cancelling t; - f () - ) - (fun () -> Mutex.execute t.tm (fun () -> t.cancel <- List.tl t.cancel)) + mutable cancel: (unit -> unit) list; (* attempt to cancel [f] *) + mutable cancel_points_seen: int; (* incremented every time we pass a cancellation point *) + test_cancel_at: int option; (* index of the cancel point to trigger *) + } + + type tasks = { + tasks : t SMap.t ref; + mutable test_cancel_trigger : (string * int) option; + m : Mutex.t; + c : Condition.t; + } + + let empty () = + let tasks = ref SMap.empty in + let m = Mutex.create () in + let c = Condition.create () in + { tasks; test_cancel_trigger = None; m; c } + + (* [next_task_id ()] returns a fresh task id *) + let next_task_id = + let counter = ref 0 in + fun () -> + let result = string_of_int !counter in + incr counter; + result + + let set_cancel_trigger tasks dbg n = + Mutex.execute tasks.m + (fun () -> + tasks.test_cancel_trigger <- Some (dbg, n) + ) + + let clear_cancel_trigger tasks = + Mutex.execute tasks.m + (fun () -> + tasks.test_cancel_trigger <- None + ) + + (* [add dbg f] creates a fresh [t], registers and returns it *) + let add tasks dbg (f: t -> Interface.Task.async_result option) = + let t = { + id = next_task_id (); + ctime = Unix.gettimeofday (); + dbg = dbg; + state = Interface.Task.Pending 0.; + subtasks = []; + f = f; + tm = Mutex.create (); + cancelling = false; + cancel = []; + cancel_points_seen = 0; + test_cancel_at = match tasks.test_cancel_trigger with + | Some (dbg', n) when dbg = dbg' -> + clear_cancel_trigger tasks; (* one shot *) + Some n + | _ -> None + } in + Mutex.execute tasks.m + (fun () -> + tasks.tasks := SMap.add t.id t !(tasks.tasks) + ); + t + + (* [run t] executes the task body, updating the fields of [t] *) + let run item = + try + let start = Unix.gettimeofday () in + let result = item.f item in + let duration = Unix.gettimeofday () -. start in + item.state <- Interface.Task.Completed { Interface.Task.duration; result }; + debug "Task %s completed; duration = %.0f" item.id duration + with + | e -> + let e = e |> Interface.exnty_of_exn |> Interface.Exception.rpc_of_exnty in + debug "Task %s failed; exception = %s" item.id (e |> Jsonrpc.to_string); + debug "%s" (Printexc.get_backtrace ()); + item.state <- Interface.Task.Failed e + + let exists_locked tasks id = SMap.mem id !(tasks.tasks) + + let find_locked tasks id = + if not (exists_locked tasks id) then raise (Interface.Does_not_exist("task", id)); + SMap.find id !(tasks.tasks) + + let with_subtask t name f = + let start = Unix.gettimeofday () in + try + t.subtasks <- (name, Interface.Task.Pending 0.) :: t.subtasks; + let result = f () in + let duration = Unix.gettimeofday () -. start in + t.subtasks <- List.replace_assoc name (Interface.Task.Completed {Interface.Task.duration; result=None}) t.subtasks; + result + with e -> + t.subtasks <- List.replace_assoc name (Interface.Task.Failed (Interface.Exception.rpc_of_exnty (Interface.exnty_of_exn (Interface.Internal_error (Printexc.to_string e))))) t.subtasks; + raise e + + let list tasks = + Mutex.execute tasks.m + (fun () -> + SMap.bindings !(tasks.tasks) |> List.map snd + ) + + (* Remove the task from the id -> task mapping. NB any active thread will still continue. *) + let destroy tasks id = + Mutex.execute tasks.m + (fun () -> + tasks.tasks := SMap.remove id !(tasks.tasks) + ) + + let cancel tasks id = + let t = Mutex.execute tasks.m (fun () -> find_locked tasks id) in + let callbacks = Mutex.execute t.tm + (fun () -> + t.cancelling <- true; + t.cancel + ) in + List.iter + (fun f -> + try + f () + with e -> + debug "Task.cancel %s: ignore exception %s" id (Printexc.to_string e) + ) callbacks + + let raise_cancelled t = + info "Task %s has been cancelled: raising Cancelled exception" t.id; + raise (Interface.Cancelled(t.id)) + + let check_cancelling t = + Mutex.execute t.tm + (fun () -> + t.cancel_points_seen <- t.cancel_points_seen + 1; + if t.cancelling then raise_cancelled t; + Opt.iter (fun x -> if t.cancel_points_seen = x then begin + info "Task %s has been triggered by the test-case (cancel_point = %d)" t.id t.cancel_points_seen; + raise_cancelled t + end) t.test_cancel_at + ) + + let with_cancel t cancel_fn f = + Mutex.execute t.tm (fun () -> t.cancel <- cancel_fn :: t.cancel); + Stdext.Pervasiveext.finally + (fun () -> + check_cancelling t; + f () + ) + (fun () -> Mutex.execute t.tm (fun () -> t.cancel <- List.tl t.cancel)) end diff --git a/ocaml/xapi/thread_queue.ml b/ocaml/xapi/thread_queue.ml index 92d27f1879c..f8707ba95e0 100644 --- a/ocaml/xapi/thread_queue.ml +++ b/ocaml/xapi/thread_queue.ml @@ -29,69 +29,69 @@ type 'a process_fn = 'a -> unit type 'a push_fn = string -> 'a -> bool type 'a t = { - push_fn: 'a push_fn; - name: string; + push_fn: 'a push_fn; + name: string; } -(** Given an optional maximum queue length and a function for processing elements (which will be called in a +(** Given an optional maximum queue length and a function for processing elements (which will be called in a single background thread), return a function which pushes items onto the queue. *) -let make ?max_q_length ?(name="unknown") (process_fn: 'a process_fn) : 'a t = +let make ?max_q_length ?(name="unknown") (process_fn: 'a process_fn) : 'a t = let q = Queue.create () in let c = Condition.create () in let m = Mutex.create () in - let string_of_queue q = + let string_of_queue q = let items = List.rev (Queue.fold (fun acc (description, _) -> description::acc) [] q) in Printf.sprintf "[ %s ](%d)" (String.concat "; " items) (List.length items) in (** The background thread *) let t = ref None in - - let thread_body () = + + let thread_body () = Mutex.execute m (fun () -> - while true do - (* Wait until there is work to do *) - while Queue.length q = 0 do Condition.wait c m done; - (* Make a copy of the items in the q so we can drop the lock and process them *) - let local_q = Queue.copy q in - Queue.clear q; + while true do + (* Wait until there is work to do *) + while Queue.length q = 0 do Condition.wait c m done; + (* Make a copy of the items in the q so we can drop the lock and process them *) + let local_q = Queue.copy q in + Queue.clear q; - Mutex.unlock m; - (* Process the items dropping any exceptions (process function should do whatever logging it wants) *) - finally - (fun () -> - Queue.iter - (fun (description, x) -> - debug "pop(%s) = %s" name description; - try process_fn x with _ -> ()) - local_q - ) - (fun () -> Mutex.lock m); - debug "%s: completed processing %d items: queue = %s" name (Queue.length local_q) (string_of_queue q); - done + Mutex.unlock m; + (* Process the items dropping any exceptions (process function should do whatever logging it wants) *) + finally + (fun () -> + Queue.iter + (fun (description, x) -> + debug "pop(%s) = %s" name description; + try process_fn x with _ -> ()) + local_q + ) + (fun () -> Mutex.lock m); + debug "%s: completed processing %d items: queue = %s" name (Queue.length local_q) (string_of_queue q); + done ) in - + (* Called with lock already held *) - let maybe_start_thread () = + let maybe_start_thread () = match !t with | Some _ -> () | None -> t := Some (Thread.create thread_body ()) in - - let push description x = + + let push description x = Mutex.execute m - (fun () -> - let q_length = Queue.length q in - match max_q_length with - | Some max when q_length > max -> - warn "%s: Maximum length exceeded (%d): dropping item" name max; - false - | _ -> - Queue.push (description, x) q; - debug "push(%s, %s): queue = %s" name description (string_of_queue q); - Condition.signal c; - maybe_start_thread (); - true + (fun () -> + let q_length = Queue.length q in + match max_q_length with + | Some max when q_length > max -> + warn "%s: Maximum length exceeded (%d): dropping item" name max; + false + | _ -> + Queue.push (description, x) q; + debug "push(%s, %s): queue = %s" name description (string_of_queue q); + Condition.signal c; + maybe_start_thread (); + true ) in { push_fn = push; name = name } diff --git a/ocaml/xapi/updates.ml b/ocaml/xapi/updates.ml index fc2709cc486..67e7cb68f44 100644 --- a/ocaml/xapi/updates.ml +++ b/ocaml/xapi/updates.ml @@ -3,335 +3,335 @@ module type INTERFACE = sig - val service_name : string + val service_name : string - module Dynamic : sig - type id - val rpc_of_id : id -> Rpc.t - val id_of_rpc : Rpc.t -> id - end + module Dynamic : sig + type id + val rpc_of_id : id -> Rpc.t + val id_of_rpc : Rpc.t -> id + end end module Updates = functor(Interface : INTERFACE) -> struct -module D = Debug.Make(struct let name = Interface.service_name end) -open D - -module Int64Map = Map.Make(struct type t = int64 let compare = compare end) - -module Scheduler = struct - open Stdext.Threadext - type item = { - id: int; - name: string; - fn: unit -> unit - } - let schedule = ref Int64Map.empty - let delay = Delay.make () - let next_id = ref 0 - let m = Mutex.create () - - type time = - | Absolute of int64 - | Delta of int with rpc - - type t = int64 * int with rpc - - let now () = Unix.gettimeofday () |> ceil |> Int64.of_float - - module Dump = struct - type u = { - time: int64; - thing: string; - } with rpc - type t = u list with rpc - let make () = - let now = now () in - Mutex.execute m - (fun () -> - Int64Map.fold (fun time xs acc -> List.map (fun i -> { time = Int64.sub time now; thing = i.name }) xs @ acc) !schedule [] - ) - end - - let one_shot time (name: string) f = - let time = match time with - | Absolute x -> x - | Delta x -> Int64.(add (of_int x) (now ())) in - let id = Mutex.execute m - (fun () -> - let existing = - if Int64Map.mem time !schedule - then Int64Map.find time !schedule - else [] in - let id = !next_id in - incr next_id; - let item = { - id = id; - name = name; - fn = f - } in - schedule := Int64Map.add time (item :: existing) !schedule; - Delay.signal delay; - id - ) in - (time, id) - - let cancel (time, id) = - Mutex.execute m - (fun () -> - let existing = - if Int64Map.mem time !schedule - then Int64Map.find time !schedule - else [] in - schedule := Int64Map.add time (List.filter (fun i -> i.id <> id) existing) !schedule - ) - - let process_expired () = - let t = now () in - let expired = - Mutex.execute m - (fun () -> - let expired, unexpired = Int64Map.partition (fun t' _ -> t' <= t) !schedule in - schedule := unexpired; - Int64Map.fold (fun _ stuff acc -> acc @ stuff) expired [] |> List.rev) in - (* This might take a while *) - List.iter - (fun i -> - try - i.fn () - with e -> - debug "Scheduler ignoring exception: %s" (Printexc.to_string e) - ) expired; - expired <> [] (* true if work was done *) - - let rec main_loop () = - while process_expired () do () done; - let sleep_until = - Mutex.execute m - (fun () -> - try - Int64Map.min_binding !schedule |> fst - with Not_found -> - Int64.add 3600L (now ()) - ) in - let seconds = Int64.sub sleep_until (now ()) in - debug "Scheduler sleep until %Ld (another %Ld seconds)" sleep_until seconds; - let (_: bool) = Delay.wait delay (Int64.to_float seconds) in - main_loop () - - let start () = - let (_: Thread.t) = Thread.create main_loop () in - () -end - -module UpdateRecorder = functor(Ord: Map.OrderedType) -> struct - (* Map of thing -> last update counter *) - module M = Map.Make(struct - type t = Ord.t - let compare = compare - end) - - type id = int - - type t = { - map: int M.t; - barriers: (int * (int M.t)) list; - next: id - } - - let initial = 0 - - let empty = { - map = M.empty; - barriers = []; - next = initial + 1; - } - - let add x t = { - map = M.add x t.next t.map; - barriers = t.barriers; - next = t.next + 1 - }, t.next + 1 - - let remove x t = { - map = M.remove x t.map; - barriers = t.barriers; - next = t.next + 1 - }, t.next + 1 - - let filter f t = { - map = M.filter f t.map; - barriers = t.barriers; - next = t.next + 1 - }, t.next + 1 - - let inject_barrier id filterfn t = { - map = t.map; - barriers = (id,M.filter filterfn t.map)::t.barriers; - next = t.next + 1 - }, t.next + 1 - - let remove_barrier id t = { - map = t.map; - barriers = List.filter (fun x -> fst x <> id) t.barriers; - next = t.next + 1 - }, t.next + 1 - - let get from t = - (* [from] is the id of the most recent event already seen *) - let get_from_map map = - let before, after = M.partition (fun _ time -> time <= from) map in - let xs, last = M.fold (fun key v (acc, m) -> (key, v) :: acc, max m v) after ([], from) in - let xs = List.sort (fun (_, v1) (_, v2) -> compare v1 v2) xs - |> List.map fst - in - xs, last - in - let barriers = List.map (fun (id,map) -> (id,get_from_map map |> fst)) t.barriers in - let rest,last = get_from_map t.map in - (barriers,rest,last) - - let last_id t = t.next - 1 - - let fold f t init = M.fold f t.map init -end - -open Stdext.Threadext - -module U = UpdateRecorder(struct type t = Interface.Dynamic.id let compare = compare end) - -type id = U.id - -type t = { - mutable u: U.t; - c: Condition.t; - m: Mutex.t; -} - -let empty () = { - u = U.empty; - c = Condition.create (); - m = Mutex.create (); -} - -type rpcable_t = { - u' : (Interface.Dynamic.id * int) list; - b : (int * (Interface.Dynamic.id * int) list) list; - next : int; -} with rpc - -let rpc_of_t t = - let get_u u = U.M.fold (fun x y acc -> (x,y)::acc) u [] in - let b = List.map (fun (id,t) -> (id,get_u t)) t.u.U.barriers in - rpc_of_rpcable_t { u'=get_u t.u.U.map; b=b; next = t.u.U.next } - -let t_of_rpc rpc = - let u' = rpcable_t_of_rpc rpc in - let map_of u = - let map = U.M.empty in - List.fold_left (fun map (x,y) -> U.M.add x y map) map u - in - let map = map_of u'.u' in - let barriers = List.map (fun (id,u) -> (id,map_of u)) u'.b in - { u = { U.map = map; next=u'.next; barriers }; - c = Condition.create (); - m = Mutex.create (); -} - -let get dbg ?(with_cancel=(fun _ f -> f ())) from timeout t = - let from = Stdext.Opt.default U.initial from in - let cancel = ref false in - let cancel_fn () = - debug "Cancelling: Update.get"; - Mutex.execute t.m - (fun () -> - cancel := true; - Condition.broadcast t.c - ) - in - let id = Stdext.Opt.map (fun timeout -> - Scheduler.one_shot (Scheduler.Delta timeout) dbg cancel_fn - ) timeout in - with_cancel cancel_fn (fun () -> - Stdext.Pervasiveext.finally (fun () -> - Mutex.execute t.m (fun () -> - let is_empty (x,y,_) = x=[] && y=[] in - - let rec wait () = - let result = U.get from t.u in - if is_empty result && not (!cancel) then - begin Condition.wait t.c t.m; wait () end - else result - in - wait () - ) - ) (fun () -> Stdext.Opt.iter Scheduler.cancel id)) - -let last_id dbg t = - Mutex.execute t.m - (fun () -> - U.last_id t.u - ) - -let add x t = - Mutex.execute t.m - (fun () -> - let result, id = U.add x t.u in - t.u <- result; - Condition.broadcast t.c - ) - -let remove x t = - Mutex.execute t.m - (fun () -> - let result, id = U.remove x t.u in - t.u <- result; - Condition.broadcast t.c - ) - -let filter f t = - Mutex.execute t.m - (fun () -> - let result, id = U.filter (fun x y -> f x) t.u in - t.u <- result; - Condition.broadcast t.c - ) - -let inject_barrier id filter t = - Mutex.execute t.m - (fun () -> - let result, id = U.inject_barrier id filter t.u in - t.u <- result; - Condition.broadcast t.c) - -let remove_barrier id t = - Mutex.execute t.m - (fun () -> - let result, id = U.remove_barrier id t.u in - t.u <- result; - Condition.broadcast t.c) - -module Dump = struct - type u = { - id: int; - v: string; - } with rpc - type t = { - updates: u list; - barriers : (int * (u list)) list; - } with rpc - let make_list updates = - U.M.fold (fun key v acc -> { id = v; v = (key |> Interface.Dynamic.rpc_of_id |> Jsonrpc.to_string) } :: acc) updates [] - let make_raw u = - { updates = make_list u.U.map; - barriers = List.map (fun (id,map) -> (id, make_list map)) u.U.barriers; - } - let make t = - Mutex.execute t.m - (fun () -> - make_raw t.u - ) -end + module D = Debug.Make(struct let name = Interface.service_name end) + open D + + module Int64Map = Map.Make(struct type t = int64 let compare = compare end) + + module Scheduler = struct + open Stdext.Threadext + type item = { + id: int; + name: string; + fn: unit -> unit + } + let schedule = ref Int64Map.empty + let delay = Delay.make () + let next_id = ref 0 + let m = Mutex.create () + + type time = + | Absolute of int64 + | Delta of int with rpc + + type t = int64 * int with rpc + + let now () = Unix.gettimeofday () |> ceil |> Int64.of_float + + module Dump = struct + type u = { + time: int64; + thing: string; + } with rpc + type t = u list with rpc + let make () = + let now = now () in + Mutex.execute m + (fun () -> + Int64Map.fold (fun time xs acc -> List.map (fun i -> { time = Int64.sub time now; thing = i.name }) xs @ acc) !schedule [] + ) + end + + let one_shot time (name: string) f = + let time = match time with + | Absolute x -> x + | Delta x -> Int64.(add (of_int x) (now ())) in + let id = Mutex.execute m + (fun () -> + let existing = + if Int64Map.mem time !schedule + then Int64Map.find time !schedule + else [] in + let id = !next_id in + incr next_id; + let item = { + id = id; + name = name; + fn = f + } in + schedule := Int64Map.add time (item :: existing) !schedule; + Delay.signal delay; + id + ) in + (time, id) + + let cancel (time, id) = + Mutex.execute m + (fun () -> + let existing = + if Int64Map.mem time !schedule + then Int64Map.find time !schedule + else [] in + schedule := Int64Map.add time (List.filter (fun i -> i.id <> id) existing) !schedule + ) + + let process_expired () = + let t = now () in + let expired = + Mutex.execute m + (fun () -> + let expired, unexpired = Int64Map.partition (fun t' _ -> t' <= t) !schedule in + schedule := unexpired; + Int64Map.fold (fun _ stuff acc -> acc @ stuff) expired [] |> List.rev) in + (* This might take a while *) + List.iter + (fun i -> + try + i.fn () + with e -> + debug "Scheduler ignoring exception: %s" (Printexc.to_string e) + ) expired; + expired <> [] (* true if work was done *) + + let rec main_loop () = + while process_expired () do () done; + let sleep_until = + Mutex.execute m + (fun () -> + try + Int64Map.min_binding !schedule |> fst + with Not_found -> + Int64.add 3600L (now ()) + ) in + let seconds = Int64.sub sleep_until (now ()) in + debug "Scheduler sleep until %Ld (another %Ld seconds)" sleep_until seconds; + let (_: bool) = Delay.wait delay (Int64.to_float seconds) in + main_loop () + + let start () = + let (_: Thread.t) = Thread.create main_loop () in + () + end + + module UpdateRecorder = functor(Ord: Map.OrderedType) -> struct + (* Map of thing -> last update counter *) + module M = Map.Make(struct + type t = Ord.t + let compare = compare + end) + + type id = int + + type t = { + map: int M.t; + barriers: (int * (int M.t)) list; + next: id + } + + let initial = 0 + + let empty = { + map = M.empty; + barriers = []; + next = initial + 1; + } + + let add x t = { + map = M.add x t.next t.map; + barriers = t.barriers; + next = t.next + 1 + }, t.next + 1 + + let remove x t = { + map = M.remove x t.map; + barriers = t.barriers; + next = t.next + 1 + }, t.next + 1 + + let filter f t = { + map = M.filter f t.map; + barriers = t.barriers; + next = t.next + 1 + }, t.next + 1 + + let inject_barrier id filterfn t = { + map = t.map; + barriers = (id,M.filter filterfn t.map)::t.barriers; + next = t.next + 1 + }, t.next + 1 + + let remove_barrier id t = { + map = t.map; + barriers = List.filter (fun x -> fst x <> id) t.barriers; + next = t.next + 1 + }, t.next + 1 + + let get from t = + (* [from] is the id of the most recent event already seen *) + let get_from_map map = + let before, after = M.partition (fun _ time -> time <= from) map in + let xs, last = M.fold (fun key v (acc, m) -> (key, v) :: acc, max m v) after ([], from) in + let xs = List.sort (fun (_, v1) (_, v2) -> compare v1 v2) xs + |> List.map fst + in + xs, last + in + let barriers = List.map (fun (id,map) -> (id,get_from_map map |> fst)) t.barriers in + let rest,last = get_from_map t.map in + (barriers,rest,last) + + let last_id t = t.next - 1 + + let fold f t init = M.fold f t.map init + end + + open Stdext.Threadext + + module U = UpdateRecorder(struct type t = Interface.Dynamic.id let compare = compare end) + + type id = U.id + + type t = { + mutable u: U.t; + c: Condition.t; + m: Mutex.t; + } + + let empty () = { + u = U.empty; + c = Condition.create (); + m = Mutex.create (); + } + + type rpcable_t = { + u' : (Interface.Dynamic.id * int) list; + b : (int * (Interface.Dynamic.id * int) list) list; + next : int; + } with rpc + + let rpc_of_t t = + let get_u u = U.M.fold (fun x y acc -> (x,y)::acc) u [] in + let b = List.map (fun (id,t) -> (id,get_u t)) t.u.U.barriers in + rpc_of_rpcable_t { u'=get_u t.u.U.map; b=b; next = t.u.U.next } + + let t_of_rpc rpc = + let u' = rpcable_t_of_rpc rpc in + let map_of u = + let map = U.M.empty in + List.fold_left (fun map (x,y) -> U.M.add x y map) map u + in + let map = map_of u'.u' in + let barriers = List.map (fun (id,u) -> (id,map_of u)) u'.b in + { u = { U.map = map; next=u'.next; barriers }; + c = Condition.create (); + m = Mutex.create (); + } + + let get dbg ?(with_cancel=(fun _ f -> f ())) from timeout t = + let from = Stdext.Opt.default U.initial from in + let cancel = ref false in + let cancel_fn () = + debug "Cancelling: Update.get"; + Mutex.execute t.m + (fun () -> + cancel := true; + Condition.broadcast t.c + ) + in + let id = Stdext.Opt.map (fun timeout -> + Scheduler.one_shot (Scheduler.Delta timeout) dbg cancel_fn + ) timeout in + with_cancel cancel_fn (fun () -> + Stdext.Pervasiveext.finally (fun () -> + Mutex.execute t.m (fun () -> + let is_empty (x,y,_) = x=[] && y=[] in + + let rec wait () = + let result = U.get from t.u in + if is_empty result && not (!cancel) then + begin Condition.wait t.c t.m; wait () end + else result + in + wait () + ) + ) (fun () -> Stdext.Opt.iter Scheduler.cancel id)) + + let last_id dbg t = + Mutex.execute t.m + (fun () -> + U.last_id t.u + ) + + let add x t = + Mutex.execute t.m + (fun () -> + let result, id = U.add x t.u in + t.u <- result; + Condition.broadcast t.c + ) + + let remove x t = + Mutex.execute t.m + (fun () -> + let result, id = U.remove x t.u in + t.u <- result; + Condition.broadcast t.c + ) + + let filter f t = + Mutex.execute t.m + (fun () -> + let result, id = U.filter (fun x y -> f x) t.u in + t.u <- result; + Condition.broadcast t.c + ) + + let inject_barrier id filter t = + Mutex.execute t.m + (fun () -> + let result, id = U.inject_barrier id filter t.u in + t.u <- result; + Condition.broadcast t.c) + + let remove_barrier id t = + Mutex.execute t.m + (fun () -> + let result, id = U.remove_barrier id t.u in + t.u <- result; + Condition.broadcast t.c) + + module Dump = struct + type u = { + id: int; + v: string; + } with rpc + type t = { + updates: u list; + barriers : (int * (u list)) list; + } with rpc + let make_list updates = + U.M.fold (fun key v acc -> { id = v; v = (key |> Interface.Dynamic.rpc_of_id |> Jsonrpc.to_string) } :: acc) updates [] + let make_raw u = + { updates = make_list u.U.map; + barriers = List.map (fun (id,map) -> (id, make_list map)) u.U.barriers; + } + let make t = + Mutex.execute t.m + (fun () -> + make_raw t.u + ) + end end diff --git a/ocaml/xapi/upload_receive.ml b/ocaml/xapi/upload_receive.ml index 8bd5187171d..83eec3a6651 100644 --- a/ocaml/xapi/upload_receive.ml +++ b/ocaml/xapi/upload_receive.ml @@ -16,28 +16,28 @@ open Threadext (** Start the XML-RPC server. *) let _ = - let http_port = ref Xapi_globs.default_cleartext_port in - Arg.parse ([ - "-log", Arg.String (fun s -> - if s = "all" then - Logs.set_default Log.Debug [ "stderr" ] - else - Logs.add s [ "stderr" ]), - "open a logger to stderr to the argument key name"; - "-http-port", Arg.Set_int http_port, "set http port"; - ] @ Debug.args )(fun x -> printf "Warning, ignoring unknown argument: %s" x) - "Receive file uploads by HTTP"; + let http_port = ref Xapi_globs.default_cleartext_port in + Arg.parse ([ + "-log", Arg.String (fun s -> + if s = "all" then + Logs.set_default Log.Debug [ "stderr" ] + else + Logs.add s [ "stderr" ]), + "open a logger to stderr to the argument key name"; + "-http-port", Arg.Set_int http_port, "set http port"; + ] @ Debug.args )(fun x -> printf "Warning, ignoring unknown argument: %s" x) + "Receive file uploads by HTTP"; - printf "Starting server on port %d\n%!" !http_port; - let server = Http_svr.Server.empty in - try - Http_svr.add_handler server Put "/upload" (Http_svr.BufIO Fileupload.upload_file); - let sockaddr = Unix.ADDR_INET(Unix.inet_addr_of_string Xapi_globs.ips_to_listen_on, !http_port) in - let inet_sock = Http_svr.bind sockaddr "inet_rpc" in - Http_svr.start server inet_sock; - print_endline "Receiving upload requests on:"; - Printf.printf "http://%s:%d/upload\n" (Helpers.get_main_ip_address ()) !http_port; - flush stdout; - with - | exn -> (eprintf "Caught exception: %s\n!" - (ExnHelper.string_of_exn exn)) + printf "Starting server on port %d\n%!" !http_port; + let server = Http_svr.Server.empty in + try + Http_svr.add_handler server Put "/upload" (Http_svr.BufIO Fileupload.upload_file); + let sockaddr = Unix.ADDR_INET(Unix.inet_addr_of_string Xapi_globs.ips_to_listen_on, !http_port) in + let inet_sock = Http_svr.bind sockaddr "inet_rpc" in + Http_svr.start server inet_sock; + print_endline "Receiving upload requests on:"; + Printf.printf "http://%s:%d/upload\n" (Helpers.get_main_ip_address ()) !http_port; + flush stdout; + with + | exn -> (eprintf "Caught exception: %s\n!" + (ExnHelper.string_of_exn exn)) diff --git a/ocaml/xapi/vbdops.ml b/ocaml/xapi/vbdops.ml index f2850a85457..c4bf4eefe59 100644 --- a/ocaml/xapi/vbdops.ml +++ b/ocaml/xapi/vbdops.ml @@ -13,8 +13,8 @@ *) (** * @group Storage - *) - +*) + module D = Debug.Make(struct let name="xapi" end) open D @@ -26,15 +26,15 @@ exception Only_CD_VBDs_may_be_empty let translate_vbd_device vbd_ref name is_hvm = - try - let i = Device_number.of_string is_hvm name in - debug "VBD device name %s interpreted as %s (hvm = %b)" name (Device_number.to_debug_string i) is_hvm; - i - with _ -> - raise (Api_errors.Server_error(Api_errors.illegal_vbd_device, [ Ref.string_of vbd_ref; name ])) + try + let i = Device_number.of_string is_hvm name in + debug "VBD device name %s interpreted as %s (hvm = %b)" name (Device_number.to_debug_string i) is_hvm; + i + with _ -> + raise (Api_errors.Server_error(Api_errors.illegal_vbd_device, [ Ref.string_of vbd_ref; name ])) (** Create a debug-friendly string from a VBD *) -let string_of_vbd ~__context ~vbd = +let string_of_vbd ~__context ~vbd = let r = Db.VBD.get_record ~__context ~self:vbd in let name = r.API.vBD_userdevice ^ "/" ^ r.API.vBD_device in let vdi = if r.API.vBD_empty then "empty" else try Db.VDI.get_uuid ~__context ~self:r.API.vBD_VDI with _ -> "missing" in diff --git a/ocaml/xapi/vgpuops.ml b/ocaml/xapi/vgpuops.ml index f72a375bd3a..a2a00957b53 100644 --- a/ocaml/xapi/vgpuops.ml +++ b/ocaml/xapi/vgpuops.ml @@ -19,153 +19,153 @@ open Listext open Xstringext type vgpu = { - vgpu_ref: API.ref_VGPU; - gpu_group_ref: API.ref_GPU_group; - devid: int; - other_config: (string * string) list; - type_ref: API.ref_VGPU_type; + vgpu_ref: API.ref_VGPU; + gpu_group_ref: API.ref_GPU_group; + devid: int; + other_config: (string * string) list; + type_ref: API.ref_VGPU_type; } let vgpu_of_vgpu ~__context vm_r vgpu = - let vgpu_r = Db.VGPU.get_record ~__context ~self:vgpu in - { - vgpu_ref = vgpu; - gpu_group_ref = vgpu_r.API.vGPU_GPU_group; - devid = int_of_string vgpu_r.API.vGPU_device; - other_config = vgpu_r.API.vGPU_other_config; - type_ref = vgpu_r.API.vGPU_type; - } + let vgpu_r = Db.VGPU.get_record ~__context ~self:vgpu in + { + vgpu_ref = vgpu; + gpu_group_ref = vgpu_r.API.vGPU_GPU_group; + devid = int_of_string vgpu_r.API.vGPU_device; + other_config = vgpu_r.API.vGPU_other_config; + type_ref = vgpu_r.API.vGPU_type; + } let vgpus_of_vm ~__context vm_r = - List.map (vgpu_of_vgpu ~__context vm_r) vm_r.API.vM_VGPUs + List.map (vgpu_of_vgpu ~__context vm_r) vm_r.API.vM_VGPUs let create_passthrough_vgpu ~__context ~vm vgpu available_pgpus pcis = - debug "Creating passthrough VGPUs"; - let compatible_pgpus = Db.GPU_group.get_PGPUs ~__context ~self:vgpu.gpu_group_ref in - let pgpus = List.intersect compatible_pgpus available_pgpus in - let rec choose_pgpu = function - | [] -> None - | pgpu :: remaining -> - try - Xapi_pgpu_helpers.assert_capacity_exists_for_VGPU_type ~__context - ~self:pgpu ~vgpu_type:vgpu.type_ref; - Some (pgpu, Db.PGPU.get_PCI ~__context ~self:pgpu) - with _ -> choose_pgpu remaining - in - match choose_pgpu pgpus with - | None -> - raise (Api_errors.Server_error (Api_errors.vm_requires_gpu, [ - Ref.string_of vm; - Ref.string_of vgpu.gpu_group_ref - ])) - | Some (pgpu, pci) -> - Db.VGPU.set_scheduled_to_be_resident_on ~__context - ~self:vgpu.vgpu_ref ~value:pgpu; - List.filter (fun g -> g <> pgpu) available_pgpus, - pci :: pcis + debug "Creating passthrough VGPUs"; + let compatible_pgpus = Db.GPU_group.get_PGPUs ~__context ~self:vgpu.gpu_group_ref in + let pgpus = List.intersect compatible_pgpus available_pgpus in + let rec choose_pgpu = function + | [] -> None + | pgpu :: remaining -> + try + Xapi_pgpu_helpers.assert_capacity_exists_for_VGPU_type ~__context + ~self:pgpu ~vgpu_type:vgpu.type_ref; + Some (pgpu, Db.PGPU.get_PCI ~__context ~self:pgpu) + with _ -> choose_pgpu remaining + in + match choose_pgpu pgpus with + | None -> + raise (Api_errors.Server_error (Api_errors.vm_requires_gpu, [ + Ref.string_of vm; + Ref.string_of vgpu.gpu_group_ref + ])) + | Some (pgpu, pci) -> + Db.VGPU.set_scheduled_to_be_resident_on ~__context + ~self:vgpu.vgpu_ref ~value:pgpu; + List.filter (fun g -> g <> pgpu) available_pgpus, + pci :: pcis let add_pcis_to_vm ~__context host vm passthru_vgpus = - let pcis = - if passthru_vgpus <> [] then begin - let pgpus = Db.Host.get_PGPUs ~__context ~self:host in - let _, pcis = - List.fold_left - (fun (pgpus, pcis) passthru_vgpu -> - create_passthrough_vgpu ~__context ~vm passthru_vgpu pgpus pcis) - (pgpus, []) passthru_vgpus - in - pcis - end else - [] in - (* Add a platform key to the VM if any of the PCIs are integrated GPUs; - * otherwise remove the key. *) - Db.VM.remove_from_platform ~__context - ~self:vm ~key:Xapi_globs.igd_passthru_key; - if List.exists - (fun pci -> - let (_, pci_bus, _, _) = Pciops.pcidev_of_pci ~__context pci in - (pci_bus = 0) && (Xapi_pci_helpers.igd_is_whitelisted ~__context pci)) - pcis - then Db.VM.add_to_platform ~__context ~self:vm ~key:Xapi_globs.igd_passthru_key ~value:"true"; - (* The GPU PCI devices which xapi manages may have dependencies: *) - let dependent_pcis = List.setify (List.flatten - (List.map (fun pci -> Db.PCI.get_dependencies ~__context ~self:pci) pcis)) in - let devs : (int * int * int * int) list = List.sort compare (List.map (Pciops.pcidev_of_pci ~__context) (pcis @ dependent_pcis)) in - (* Add a hotplug ordering (see pcidevs_of_pci) *) - let devs : ((int * (int * int * int * int))) list = List.rev (snd (List.fold_left (fun (i, acc) pci -> i + 1, (i, pci) :: acc) (0, []) devs)) in - (* Update VM other_config for PCI passthrough *) - (try Db.VM.remove_from_other_config ~__context ~self:vm ~key:Xapi_globs.vgpu_pci with _ -> ()); - let value = String.concat "," (List.map Pciops.to_string devs) in - Db.VM.add_to_other_config ~__context ~self:vm ~key:Xapi_globs.vgpu_pci ~value + let pcis = + if passthru_vgpus <> [] then begin + let pgpus = Db.Host.get_PGPUs ~__context ~self:host in + let _, pcis = + List.fold_left + (fun (pgpus, pcis) passthru_vgpu -> + create_passthrough_vgpu ~__context ~vm passthru_vgpu pgpus pcis) + (pgpus, []) passthru_vgpus + in + pcis + end else + [] in + (* Add a platform key to the VM if any of the PCIs are integrated GPUs; + * otherwise remove the key. *) + Db.VM.remove_from_platform ~__context + ~self:vm ~key:Xapi_globs.igd_passthru_key; + if List.exists + (fun pci -> + let (_, pci_bus, _, _) = Pciops.pcidev_of_pci ~__context pci in + (pci_bus = 0) && (Xapi_pci_helpers.igd_is_whitelisted ~__context pci)) + pcis + then Db.VM.add_to_platform ~__context ~self:vm ~key:Xapi_globs.igd_passthru_key ~value:"true"; + (* The GPU PCI devices which xapi manages may have dependencies: *) + let dependent_pcis = List.setify (List.flatten + (List.map (fun pci -> Db.PCI.get_dependencies ~__context ~self:pci) pcis)) in + let devs : (int * int * int * int) list = List.sort compare (List.map (Pciops.pcidev_of_pci ~__context) (pcis @ dependent_pcis)) in + (* Add a hotplug ordering (see pcidevs_of_pci) *) + let devs : ((int * (int * int * int * int))) list = List.rev (snd (List.fold_left (fun (i, acc) pci -> i + 1, (i, pci) :: acc) (0, []) devs)) in + (* Update VM other_config for PCI passthrough *) + (try Db.VM.remove_from_other_config ~__context ~self:vm ~key:Xapi_globs.vgpu_pci with _ -> ()); + let value = String.concat "," (List.map Pciops.to_string devs) in + Db.VM.add_to_other_config ~__context ~self:vm ~key:Xapi_globs.vgpu_pci ~value let create_virtual_vgpu ~__context host vm vgpu = - debug "Creating virtual VGPUs"; - let available_pgpus = Db.Host.get_PGPUs ~__context ~self:host in - let compatible_pgpus = Db.GPU_group.get_PGPUs ~__context ~self:vgpu.gpu_group_ref in - let pgpus = List.intersect compatible_pgpus available_pgpus in - (* Sort the pgpus in lists of equal optimality for vGPU placement based on - * the GPU groups allocation algorithm *) - let sort_desc = - match Db.GPU_group.get_allocation_algorithm ~__context ~self:vgpu.gpu_group_ref with - | `depth_first -> false - | `breadth_first -> true - in - let rec allocate_vgpu vgpu_type = function - | [] -> None - | pgpu :: remaining_pgpus -> - try - Xapi_pgpu_helpers.assert_capacity_exists_for_VGPU_type - ~__context ~self:pgpu ~vgpu_type; - Some pgpu - with _ -> allocate_vgpu vgpu_type remaining_pgpus - in - let sorted_pgpus = Helpers.sort_by_schwarzian ~descending:sort_desc - (fun pgpu -> - Helpers.call_api_functions ~__context (fun rpc session_id -> - Client.Client.PGPU.get_remaining_capacity ~rpc ~session_id - ~self:pgpu ~vgpu_type:vgpu.type_ref)) - pgpus - in - match allocate_vgpu vgpu.type_ref sorted_pgpus with - | None -> - raise (Api_errors.Server_error (Api_errors.vm_requires_vgpu, [ - Ref.string_of vm; - Ref.string_of vgpu.gpu_group_ref; - Ref.string_of vgpu.type_ref - ])) - | Some pgpu -> - Db.VGPU.set_scheduled_to_be_resident_on ~__context - ~self:vgpu.vgpu_ref ~value:pgpu + debug "Creating virtual VGPUs"; + let available_pgpus = Db.Host.get_PGPUs ~__context ~self:host in + let compatible_pgpus = Db.GPU_group.get_PGPUs ~__context ~self:vgpu.gpu_group_ref in + let pgpus = List.intersect compatible_pgpus available_pgpus in + (* Sort the pgpus in lists of equal optimality for vGPU placement based on + * the GPU groups allocation algorithm *) + let sort_desc = + match Db.GPU_group.get_allocation_algorithm ~__context ~self:vgpu.gpu_group_ref with + | `depth_first -> false + | `breadth_first -> true + in + let rec allocate_vgpu vgpu_type = function + | [] -> None + | pgpu :: remaining_pgpus -> + try + Xapi_pgpu_helpers.assert_capacity_exists_for_VGPU_type + ~__context ~self:pgpu ~vgpu_type; + Some pgpu + with _ -> allocate_vgpu vgpu_type remaining_pgpus + in + let sorted_pgpus = Helpers.sort_by_schwarzian ~descending:sort_desc + (fun pgpu -> + Helpers.call_api_functions ~__context (fun rpc session_id -> + Client.Client.PGPU.get_remaining_capacity ~rpc ~session_id + ~self:pgpu ~vgpu_type:vgpu.type_ref)) + pgpus + in + match allocate_vgpu vgpu.type_ref sorted_pgpus with + | None -> + raise (Api_errors.Server_error (Api_errors.vm_requires_vgpu, [ + Ref.string_of vm; + Ref.string_of vgpu.gpu_group_ref; + Ref.string_of vgpu.type_ref + ])) + | Some pgpu -> + Db.VGPU.set_scheduled_to_be_resident_on ~__context + ~self:vgpu.vgpu_ref ~value:pgpu let add_vgpus_to_vm ~__context host vm vgpus = - (* Only support a maximum of one virtual GPU per VM for now. *) - match vgpus with - | [] -> () - | vgpu :: _ -> create_virtual_vgpu ~__context host vm vgpu + (* Only support a maximum of one virtual GPU per VM for now. *) + match vgpus with + | [] -> () + | vgpu :: _ -> create_virtual_vgpu ~__context host vm vgpu let vgpu_manual_setup_of_vm vm_r = - List.mem_assoc Xapi_globs.vgpu_manual_setup_key vm_r.API.vM_platform && - (List.assoc Xapi_globs.vgpu_manual_setup_key vm_r.API.vM_platform = "true") + List.mem_assoc Xapi_globs.vgpu_manual_setup_key vm_r.API.vM_platform && + (List.assoc Xapi_globs.vgpu_manual_setup_key vm_r.API.vM_platform = "true") let create_vgpus ~__context host (vm, vm_r) hvm = - let vgpus = vgpus_of_vm ~__context vm_r in - if vgpus <> [] then begin - if not hvm then - raise (Api_errors.Server_error (Api_errors.feature_requires_hvm, ["vGPU- and GPU-passthrough needs HVM"])) - end; - let (passthru_vgpus, virtual_vgpus) = - List.partition - (fun v -> Xapi_vgpu.requires_passthrough ~__context ~self:v.vgpu_ref) - vgpus - in - if virtual_vgpus <> [] && not (Pool_features.is_enabled ~__context Features.VGPU) then - raise (Api_errors.Server_error (Api_errors.feature_restricted, [])); - add_pcis_to_vm ~__context host vm passthru_vgpus; - if not (vgpu_manual_setup_of_vm vm_r) - then add_vgpus_to_vm ~__context host vm virtual_vgpus + let vgpus = vgpus_of_vm ~__context vm_r in + if vgpus <> [] then begin + if not hvm then + raise (Api_errors.Server_error (Api_errors.feature_requires_hvm, ["vGPU- and GPU-passthrough needs HVM"])) + end; + let (passthru_vgpus, virtual_vgpus) = + List.partition + (fun v -> Xapi_vgpu.requires_passthrough ~__context ~self:v.vgpu_ref) + vgpus + in + if virtual_vgpus <> [] && not (Pool_features.is_enabled ~__context Features.VGPU) then + raise (Api_errors.Server_error (Api_errors.feature_restricted, [])); + add_pcis_to_vm ~__context host vm passthru_vgpus; + if not (vgpu_manual_setup_of_vm vm_r) + then add_vgpus_to_vm ~__context host vm virtual_vgpus let list_pcis_for_passthrough ~__context ~vm = - try - let value = List.assoc Xapi_globs.vgpu_pci (Db.VM.get_other_config ~__context ~self:vm) in - List.map Pciops.of_string (String.split ',' value) - with _ -> [] + try + let value = List.assoc Xapi_globs.vgpu_pci (Db.VM.get_other_config ~__context ~self:vm) in + List.map Pciops.of_string (String.split ',' value) + with _ -> [] diff --git a/ocaml/xapi/vgpuops.mli b/ocaml/xapi/vgpuops.mli index 8a28431fb84..6e403fce658 100644 --- a/ocaml/xapi/vgpuops.mli +++ b/ocaml/xapi/vgpuops.mli @@ -11,14 +11,14 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) - + (** Module that handles assigning vGPUs to VMs. * @group Virtual-Machine Management - *) +*) (** Assign a list of PCI devices to a VM for GPU passthrough, store them in - other_config:vgpu_pci *) + other_config:vgpu_pci *) val create_vgpus : __context:Context.t -> (API.ref_host) ->(API.ref_VM * API.vM_t) -> bool -> unit diff --git a/ocaml/xapi/vhd_tool_wrapper.ml b/ocaml/xapi/vhd_tool_wrapper.ml index 6685b4baccd..0a4b7c33868 100644 --- a/ocaml/xapi/vhd_tool_wrapper.ml +++ b/ocaml/xapi/vhd_tool_wrapper.ml @@ -13,7 +13,7 @@ *) (** HTTP handler for importing a raw VDI. * @group Import and Export - *) +*) module D=Debug.Make(struct let name="vhd_tool_wrapper" end) open D @@ -35,31 +35,31 @@ let run_vhd_tool progress_cb args s s' path = let to_close = ref [ pipe_read; pipe_write ] in let close x = if List.mem x !to_close then (Unix.close x; to_close := List.filter (fun y -> y <> x) !to_close) in Stdext.Pervasiveext.finally - (fun () -> - match with_logfile_fd "vhd-tool" - (fun log_fd -> - let pid = safe_close_and_exec None (Some pipe_write) (Some log_fd) [ s', s ] vhd_tool args in - close pipe_write; - begin - try - let buf = String.make 3 '\000' in - while true do - Stdext.Unixext.really_read pipe_read buf 0 (String.length buf); - progress_cb (int_of_string buf) - done - with End_of_file -> () - | e -> - warn "unexpected error reading progress from vhd-tool: %s" (Printexc.to_string e) - end; - let (_, status) = waitpid pid in - if status <> Unix.WEXITED 0 then begin - error "vhd-tool failed, returning VDI_IO_ERROR"; - raise (Api_errors.Server_error (Api_errors.vdi_io_error, ["Device I/O errors"])) - end - ) with - | Success(out, _) -> debug "%s" out - | Failure(out, e) -> error "vhd-tool output: %s" out; raise e - ) (fun () -> close pipe_read; close pipe_write) + (fun () -> + match with_logfile_fd "vhd-tool" + (fun log_fd -> + let pid = safe_close_and_exec None (Some pipe_write) (Some log_fd) [ s', s ] vhd_tool args in + close pipe_write; + begin + try + let buf = String.make 3 '\000' in + while true do + Stdext.Unixext.really_read pipe_read buf 0 (String.length buf); + progress_cb (int_of_string buf) + done + with End_of_file -> () + | e -> + warn "unexpected error reading progress from vhd-tool: %s" (Printexc.to_string e) + end; + let (_, status) = waitpid pid in + if status <> Unix.WEXITED 0 then begin + error "vhd-tool failed, returning VDI_IO_ERROR"; + raise (Api_errors.Server_error (Api_errors.vdi_io_error, ["Device I/O errors"])) + end + ) with + | Success(out, _) -> debug "%s" out + | Failure(out, e) -> error "vhd-tool output: %s" out; raise e + ) (fun () -> close pipe_read; close pipe_write) let receive progress_cb format protocol (s: Unix.file_descr) (length: int64 option) (path: string) (prefix: string) (prezeroed: bool) = let s' = Uuidm.to_string (Uuidm.create `V4) in @@ -83,54 +83,54 @@ let receive progress_cb format protocol (s: Unix.file_descr) (length: int64 opti open Stdext.Fun let startswith prefix x = - let prefix' = String.length prefix - and x' = String.length x in - prefix' <= x' && (String.sub x 0 prefix' = prefix) + let prefix' = String.length prefix + and x' = String.length x in + prefix' <= x' && (String.sub x 0 prefix' = prefix) (** [find_backend_device path] returns [Some path'] where [path'] is the backend path in the driver domain corresponding to the frontend device [path] in this domain. *) let find_backend_device path = - try - let open Xenstore in - (* If we're looking at a xen frontend device, see if the backend - is in the same domain. If so check if it looks like a .vhd *) - let rdev = (Unix.stat path).Unix.st_rdev in - let major = rdev / 256 and minor = rdev mod 256 in - let link = Unix.readlink (Printf.sprintf "/sys/dev/block/%d:%d/device" major minor) in - match List.rev (String.split '/' link) with - | id :: "xen" :: "devices" :: _ when startswith "vbd-" id -> - let id = int_of_string (String.sub id 4 (String.length id - 4)) in - with_xs (fun xs -> - let self = xs.Xs.read "domid" in - let backend = xs.Xs.read (Printf.sprintf "device/vbd/%d/backend" id) in - let params = xs.Xs.read (Printf.sprintf "%s/params" backend) in - match String.split '/' backend with - | "local" :: "domain" :: bedomid :: _ -> - assert (self = bedomid); - Some params - | _ -> raise Not_found - ) - | _ -> raise Not_found - with _ -> None + try + let open Xenstore in + (* If we're looking at a xen frontend device, see if the backend + is in the same domain. If so check if it looks like a .vhd *) + let rdev = (Unix.stat path).Unix.st_rdev in + let major = rdev / 256 and minor = rdev mod 256 in + let link = Unix.readlink (Printf.sprintf "/sys/dev/block/%d:%d/device" major minor) in + match List.rev (String.split '/' link) with + | id :: "xen" :: "devices" :: _ when startswith "vbd-" id -> + let id = int_of_string (String.sub id 4 (String.length id - 4)) in + with_xs (fun xs -> + let self = xs.Xs.read "domid" in + let backend = xs.Xs.read (Printf.sprintf "device/vbd/%d/backend" id) in + let params = xs.Xs.read (Printf.sprintf "%s/params" backend) in + match String.split '/' backend with + | "local" :: "domain" :: bedomid :: _ -> + assert (self = bedomid); + Some params + | _ -> raise Not_found + ) + | _ -> raise Not_found + with _ -> None (** [vhd_of_device path] returns (Some vhd) where 'vhd' is the vhd leaf backing a particular device [path] or None. [path] may either be a blktap2 device *or* a blkfront device backed by a blktap2 device. If the latter then the script must be run in the same domain as blkback. *) let vhd_of_device path = - let tapdisk_of_path path = - try - match Tapctl.of_device (Tapctl.create ()) path with - | _, _, (Some ("vhd", vhd)) -> Some vhd - | _, _, _ -> raise Not_found - with Tapctl.Not_blktap -> - debug "Device %s is not controlled by blktap" path; - None - | Tapctl.Not_a_device -> - debug "%s is not a device" path; - None - | _ -> - debug "Device %s has an unknown driver" path; - None in - find_backend_device path |> Stdext.Opt.default path |> tapdisk_of_path + let tapdisk_of_path path = + try + match Tapctl.of_device (Tapctl.create ()) path with + | _, _, (Some ("vhd", vhd)) -> Some vhd + | _, _, _ -> raise Not_found + with Tapctl.Not_blktap -> + debug "Device %s is not controlled by blktap" path; + None + | Tapctl.Not_a_device -> + debug "%s is not a device" path; + None + | _ -> + debug "Device %s has an unknown driver" path; + None in + find_backend_device path |> Stdext.Opt.default path |> tapdisk_of_path let send progress_cb ?relative_to (protocol: string) (dest_format: string) (s: Unix.file_descr) (path: string) (prefix: string) = let s' = Uuidm.to_string (Uuidm.create `V4) in @@ -140,11 +140,11 @@ let send progress_cb ?relative_to (protocol: string) (dest_format: string) (s: U let relative_to = match relative_to with | Some path -> begin match vhd_of_device path with - | Some vhd -> Some vhd - | None -> - error "base VDI is not a vhd; cannot compute differences"; - failwith "base VDI is not a vhd; cannot compute differences" - end + | Some vhd -> Some vhd + | None -> + error "base VDI is not a vhd; cannot compute differences"; + failwith "base VDI is not a vhd; cannot compute differences" + end | None -> None in let args = [ "stream"; "--source-protocol"; "none"; @@ -159,7 +159,7 @@ let send progress_cb ?relative_to (protocol: string) (dest_format: string) (s: U "--direct"; "--path"; vhd_search_path; ] @ (match relative_to with - | None -> [] - | Some x -> [ "--relative-to"; x ]) in + | None -> [] + | Some x -> [ "--relative-to"; x ]) in run_vhd_tool progress_cb args s s' path diff --git a/ocaml/xapi/vm_placement.ml b/ocaml/xapi/vm_placement.ml index 818ef222fcb..545b42d3c12 100644 --- a/ocaml/xapi/vm_placement.ml +++ b/ocaml/xapi/vm_placement.ml @@ -13,7 +13,7 @@ *) (** * @group Virtual-Machine Management - *) +*) let ( ++ ) = Int64.add let ( -- ) = Int64.sub @@ -23,29 +23,29 @@ let ( ** ) = Int64.mul (* === Snapshot types ======================================================= *) module Guest_snapshot = struct type t = - { id : string - ; memory_overhead : int64 - ; memory_static_min : int64 - ; memory_dynamic_min : int64 - ; memory_dynamic_max : int64 - ; memory_static_max : int64 - } + { id : string + ; memory_overhead : int64 + ; memory_static_min : int64 + ; memory_dynamic_min : int64 + ; memory_dynamic_max : int64 + ; memory_static_max : int64 + } end module Host_snapshot = struct type t = - { id : string - ; is_pool_master : bool - ; guests_resident : Guest_snapshot.t list - ; guests_scheduled : Guest_snapshot.t list - ; memory_overhead : int64 - ; memory_total : int64 -} + { id : string + ; is_pool_master : bool + ; guests_resident : Guest_snapshot.t list + ; guests_scheduled : Guest_snapshot.t list + ; memory_overhead : int64 + ; memory_total : int64 + } end module Pool_snapshot = struct type t = - { id : string - ; hosts : Host_snapshot.t list - } + { id : string + ; hosts : Host_snapshot.t list + } end module GS = Guest_snapshot @@ -55,20 +55,20 @@ module PS = Pool_snapshot (* === Snapshot summary types =============================================== *) module Host_snapshot_summary = struct type t = - { id : string - ; is_pool_master : bool - ; memory_static_min_sum : int64 - ; memory_dynamic_min_sum : int64 - ; memory_dynamic_max_sum : int64 - ; memory_static_max_sum : int64 - ; memory_available_sum : int64 - } + { id : string + ; is_pool_master : bool + ; memory_static_min_sum : int64 + ; memory_dynamic_min_sum : int64 + ; memory_dynamic_max_sum : int64 + ; memory_static_max_sum : int64 + ; memory_available_sum : int64 + } end module Pool_snapshot_summary = struct type t = - { id : string - ; hosts : Host_snapshot_summary.t list - } + { id : string + ; hosts : Host_snapshot_summary.t list + } end module HSS = Host_snapshot_summary @@ -77,106 +77,106 @@ module PSS = Pool_snapshot_summary (* === Snapshot summary constructors ======================================== *) let summarise_host_snapshot extra_guests host = - let guests = host.HS.guests_resident @ host.HS.guests_scheduled @ - extra_guests in - let sum host_value guest_value = - (List.fold_left (++) host_value (List.map guest_value guests)) in - { HSS.id = host.HS.id - ; HSS.is_pool_master = host.HS.is_pool_master - ; HSS.memory_static_min_sum = sum 0L (fun g -> g.GS.memory_static_min) - ; HSS.memory_dynamic_min_sum = sum 0L (fun g -> g.GS.memory_dynamic_min) - ; HSS.memory_dynamic_max_sum = sum 0L (fun g -> g.GS.memory_dynamic_max) - ; HSS.memory_static_max_sum = sum 0L (fun g -> g.GS.memory_static_max) - ; HSS.memory_available_sum = host.HS.memory_total -- - (sum host.HS.memory_overhead (fun g -> g.GS.memory_overhead)) - } + let guests = host.HS.guests_resident @ host.HS.guests_scheduled @ + extra_guests in + let sum host_value guest_value = + (List.fold_left (++) host_value (List.map guest_value guests)) in + { HSS.id = host.HS.id + ; HSS.is_pool_master = host.HS.is_pool_master + ; HSS.memory_static_min_sum = sum 0L (fun g -> g.GS.memory_static_min) + ; HSS.memory_dynamic_min_sum = sum 0L (fun g -> g.GS.memory_dynamic_min) + ; HSS.memory_dynamic_max_sum = sum 0L (fun g -> g.GS.memory_dynamic_max) + ; HSS.memory_static_max_sum = sum 0L (fun g -> g.GS.memory_static_max) + ; HSS.memory_available_sum = host.HS.memory_total -- + (sum host.HS.memory_overhead (fun g -> g.GS.memory_overhead)) + } let summarise_pool_snapshot extra_guests pool = - { PSS.id = pool.PS.id - ; PSS.hosts = List.map (summarise_host_snapshot extra_guests) pool.PS.hosts - } + { PSS.id = pool.PS.id + ; PSS.hosts = List.map (summarise_host_snapshot extra_guests) pool.PS.hosts + } (* === Generic list functions =============================================== *) (** Drops the first [n] elements from the given [list] and returns a new list -containing the remaining elements. @raise Invalid_argument if [n] is negative or -greater than the length of [list]. *) + containing the remaining elements. @raise Invalid_argument if [n] is negative or + greater than the length of [list]. *) let drop n list = - if (n < 0 || n > (List.length list)) then raise (Invalid_argument "n"); - let rec drop n list = - if (n = 0) then list - else drop (n - 1) (List.tl list) in - drop n list + if (n < 0 || n > (List.length list)) then raise (Invalid_argument "n"); + let rec drop n list = + if (n = 0) then list + else drop (n - 1) (List.tl list) in + drop n list (** Takes the first [n] elements from the given [list] and returns a new list -containing the taken elements. @raise Invalid_argument if [n] is negative or -greater than the length of [list]. *) + containing the taken elements. @raise Invalid_argument if [n] is negative or + greater than the length of [list]. *) let take n list = - if (n < 0 || n > (List.length list)) then raise (Invalid_argument "n"); - let rec take n list acc = - if (n = 0) then (List.rev acc) - else take (n - 1) (List.tl list) ((List.hd list) :: acc) in - take n list [] + if (n < 0 || n > (List.length list)) then raise (Invalid_argument "n"); + let rec take n list acc = + if (n = 0) then (List.rev acc) + else take (n - 1) (List.tl list) ((List.hd list) :: acc) in + take n list [] (** Takes the element at index [n] from the given [list] and returns a pair -containing the taken element and the remaining list. @raise Invalid_argument -if [n] is negative or greater than or equal to the length of [list].*) + containing the taken element and the remaining list. @raise Invalid_argument + if [n] is negative or greater than or equal to the length of [list].*) let take_nth n list = - if (n < 0 || n >= (List.length list)) then raise (Invalid_argument "n"); - let rec take_nth n list1 list2 = - if (n = 0) then (List.hd list2), ((List.rev list1) @ List.tl list2) - else take_nth (n - 1) ((List.hd list2) :: list1) (List.tl list2) - in - take_nth n [] list + if (n < 0 || n >= (List.length list)) then raise (Invalid_argument "n"); + let rec take_nth n list1 list2 = + if (n = 0) then (List.hd list2), ((List.rev list1) @ List.tl list2) + else take_nth (n - 1) ((List.hd list2) :: list1) (List.tl list2) + in + take_nth n [] list (** Evaluates the given function [generate_value], capable of generating a value -r in the range 0 ≤ r < 1, and linearly scales the result to generate an index i -into the given [list] where 0 ≤ i < [length list]. @raise Invalid_argument if -the [list] is empty or if the given [generate_value] function generates a value -r outside the range 0 ≤ r < 1. *) + r in the range 0 ≤ r < 1, and linearly scales the result to generate an index i + into the given [list] where 0 ≤ i < [length list]. @raise Invalid_argument if + the [list] is empty or if the given [generate_value] function generates a value + r outside the range 0 ≤ r < 1. *) let generate_list_index generate_value list = - let length = List.length list in - if (length = 0) then - raise (Invalid_argument "list"); - let value = generate_value () in - if (value < 0.0 || value >= 1.0) then - raise (Invalid_argument "generate_value"); - int_of_float (float_of_int length *. value) + let length = List.length list in + if (length = 0) then + raise (Invalid_argument "list"); + let value = generate_value () in + if (value < 0.0 || value >= 1.0) then + raise (Invalid_argument "generate_value"); + int_of_float (float_of_int length *. value) (** Evaluates the given function [generate_random_value], capable of generating -a random value r in the range 0 ≤ r < 1, and uses the result to select and take -a random element from the given [list]. Returns a pair containing the taken -element and the remaining list. @raise Invalid_argument if the [list] is empty -or if [generate_random_value] generates a value r outside the range 0 ≤ r < 1.*) + a random value r in the range 0 ≤ r < 1, and uses the result to select and take + a random element from the given [list]. Returns a pair containing the taken + element and the remaining list. @raise Invalid_argument if the [list] is empty + or if [generate_random_value] generates a value r outside the range 0 ≤ r < 1.*) let take_random_element_from_list generate_random_value list = - let index = generate_list_index generate_random_value list in - take_nth index list + let index = generate_list_index generate_random_value list in + take_nth index list (** A derivative of the Schwartzian transform. *) let evaluate_sort_partition evaluate sort partition list = - let list_evaluated = List.map (fun x -> (x, evaluate (x))) list in - let list_sorted = List.sort - (fun (a, av) (b, bv) -> sort av bv) list_evaluated in - (* TODO: Use a more efficient partition. *) - let list_selected, list_unselected = List.partition - (fun (a, av) -> partition av) list_sorted in - (List.map (fun (a, av) -> a) list_selected), - (List.map (fun (a, av) -> a) list_unselected) + let list_evaluated = List.map (fun x -> (x, evaluate (x))) list in + let list_sorted = List.sort + (fun (a, av) (b, bv) -> sort av bv) list_evaluated in + (* TODO: Use a more efficient partition. *) + let list_selected, list_unselected = List.partition + (fun (a, av) -> partition av) list_sorted in + (List.map (fun (a, av) -> a) list_selected), + (List.map (fun (a, av) -> a) list_unselected) (* === Host categories======================================================= *) (** A host category defines a subset of hosts that match a set of criteria. -Each host category function acts as: -{ol -{- an indicator function for membership of the set, returning values: - {ul - {- ≥ 0 for hosts {i inside } the set.} - {- < 0 for hosts {i outside} the set.}}} -{- a valuation function to enable comparison between members of the set, where: - {ul - {- {i higher} values indicate {i more } desirable hosts.} - {- {i lower } values indicate {i less } desirable hosts.} - {- {i equal } values indicate {i equally} desirable hosts.}}}} + Each host category function acts as: + {ol + {- an indicator function for membership of the set, returning values: + {ul + {- ≥ 0 for hosts {i inside } the set.} + {- < 0 for hosts {i outside} the set.}}} + {- a valuation function to enable comparison between members of the set, where: + {ul + {- {i higher} values indicate {i more } desirable hosts.} + {- {i lower } values indicate {i less } desirable hosts.} + {- {i equal } values indicate {i equally} desirable hosts.}}}} *) type host_category = Host_snapshot_summary.t -> int64 @@ -184,129 +184,129 @@ type host_category = Host_snapshot_summary.t -> int64 let compression_ratio_resolution = 1000L (** Transforms the given host category into a derived host category with bias -against the pool master. The derived category function assigns the pool master -a value v' = (v - 1) / 2, where v is the value assigned by the original category -function. *) + against the pool master. The derived category function assigns the pool master + a value v' = (v - 1) / 2, where v is the value assigned by the original category + function. *) let bias_away_from_pool_master : host_category -> host_category = - fun host_category host -> - let value = host_category host in - if host.HSS.is_pool_master then (value --1L) // 2L else value + fun host_category host -> + let value = host_category host in + if host.HSS.is_pool_master then (value --1L) // 2L else value (** The {b definite} host category. Includes: -{ul - {- hosts that don't need to compress their guests.}} -This function values each host according to: -{ul - {- slaves: (available_memory - Σ memory_static_max)} - {- master: (available_memory - Σ memory_static_max - 1) / 2}} + {ul + {- hosts that don't need to compress their guests.}} + This function values each host according to: + {ul + {- slaves: (available_memory - Σ memory_static_max)} + {- master: (available_memory - Σ memory_static_max - 1) / 2}} *) let definite_host_category : host_category = - let unbiased_category host = - (host.HSS.memory_available_sum -- host.HSS.memory_static_max_sum) in - bias_away_from_pool_master unbiased_category + let unbiased_category host = + (host.HSS.memory_available_sum -- host.HSS.memory_static_max_sum) in + bias_away_from_pool_master unbiased_category (** The {b probable} host category. Includes the union of: -{ul - {- hosts that may need to compress their guests.} - {- hosts included in the {b definite} category.} -} -This function values each host according to: -{ul - {- slaves: (available_memory - Σ memory_dynamic_max)} - {- master: (available_memory - Σ memory_dynamic_max - 1) / 2}} + {ul + {- hosts that may need to compress their guests.} + {- hosts included in the {b definite} category.} + } + This function values each host according to: + {ul + {- slaves: (available_memory - Σ memory_dynamic_max)} + {- master: (available_memory - Σ memory_dynamic_max - 1) / 2}} *) let probable_host_category : host_category = - let unbiased_category host = - (host.HSS.memory_available_sum -- host.HSS.memory_dynamic_max_sum) in - bias_away_from_pool_master unbiased_category + let unbiased_category host = + (host.HSS.memory_available_sum -- host.HSS.memory_dynamic_max_sum) in + bias_away_from_pool_master unbiased_category (** The {b possible} host category. Includes the union of: -{ul - {- hosts that do need to compress their guests.} - {- hosts included in the {b probable} category.} -} -This function values masters and slaves identically: in proportion to their -projected memory compression ratios. *) + {ul + {- hosts that do need to compress their guests.} + {- hosts included in the {b probable} category.} + } + This function values masters and slaves identically: in proportion to their + projected memory compression ratios. *) let possible_host_category : host_category = - fun host -> - let ceiling = compression_ratio_resolution in - let available = host.HSS.memory_available_sum in - let minimum = host.HSS.memory_dynamic_min_sum in - let maximum = host.HSS.memory_dynamic_max_sum in - if available >= maximum then ceiling else - if available < minimum then -1L else - (* at this point we know that: *) - (* Σ memory_dynamic_min <= memory_available *) - (* Σ memory_dynamic_max > memory_available *) - (* which implies that: *) - (* Σ memory_dynamic_max > Σ memory_dynamic_min *) - (* which rules out division by zero and implies that: *) - (* 0 <= result < ceiling *) - (ceiling ** (available -- minimum)) // (maximum -- minimum) + fun host -> + let ceiling = compression_ratio_resolution in + let available = host.HSS.memory_available_sum in + let minimum = host.HSS.memory_dynamic_min_sum in + let maximum = host.HSS.memory_dynamic_max_sum in + if available >= maximum then ceiling else + if available < minimum then -1L else + (* at this point we know that: *) + (* Σ memory_dynamic_min <= memory_available *) + (* Σ memory_dynamic_max > memory_available *) + (* which implies that: *) + (* Σ memory_dynamic_max > Σ memory_dynamic_min *) + (* which rules out division by zero and implies that: *) + (* 0 <= result < ceiling *) + (ceiling ** (available -- minimum)) // (maximum -- minimum) (** The {b affinity} host category. Includes the intersection of: -{ul - {- hosts with identifiers in the given host identifier list.} - {- hosts included in the {b possible} category.} -} -This function values masters and slaves identically: in proportion to their -projected memory compression ratios. *) + {ul + {- hosts with identifiers in the given host identifier list.} + {- hosts included in the {b possible} category.} + } + This function values masters and slaves identically: in proportion to their + projected memory compression ratios. *) let affinity_host_category affinity_host_ids : host_category = - fun host -> - if List.mem host.HSS.id affinity_host_ids - then possible_host_category host else -1L + fun host -> + if List.mem host.HSS.id affinity_host_ids + then possible_host_category host else -1L (* === Selection functions ================================================== *) let select_host_from_category (category : host_category) hosts - validate_host generate_random_value = - let hosts_within_category, hosts_outside_category = - evaluate_sort_partition - category (fun x y -> compare y x) ((<=) 0L) hosts in - let rec select hosts = - if hosts = [] then None else - let (host, hosts_remaining) = - take_random_element_from_list generate_random_value hosts in - if (validate_host host.HSS.id) - then Some (host.HSS.id) - else select hosts_remaining - in - (select hosts_within_category, hosts_outside_category) + validate_host generate_random_value = + let hosts_within_category, hosts_outside_category = + evaluate_sort_partition + category (fun x y -> compare y x) ((<=) 0L) hosts in + let rec select hosts = + if hosts = [] then None else + let (host, hosts_remaining) = + take_random_element_from_list generate_random_value hosts in + if (validate_host host.HSS.id) + then Some (host.HSS.id) + else select hosts_remaining + in + (select hosts_within_category, hosts_outside_category) let select_host_from_categories categories hosts - validate_host generate_random_value = - let rec select hosts categories = - match hosts, categories with - | [], xx -> None - | xx, [] -> None - | hosts, (category :: categories_remaining) -> - begin - let host, hosts_remaining = select_host_from_category - category hosts validate_host generate_random_value in - if host != None then host else - select hosts_remaining categories_remaining - end - in - select hosts categories + validate_host generate_random_value = + let rec select hosts categories = + match hosts, categories with + | [], xx -> None + | xx, [] -> None + | hosts, (category :: categories_remaining) -> + begin + let host, hosts_remaining = select_host_from_category + category hosts validate_host generate_random_value in + if host != None then host else + select hosts_remaining categories_remaining + end + in + select hosts categories let select_host_from_summary pool affinity_host_ids - validate_host generate_random_value = - select_host_from_categories - [ affinity_host_category affinity_host_ids - ; definite_host_category - ; probable_host_category - ; possible_host_category - ] - pool.PSS.hosts validate_host generate_random_value + validate_host generate_random_value = + select_host_from_categories + [ affinity_host_category affinity_host_ids + ; definite_host_category + ; probable_host_category + ; possible_host_category + ] + pool.PSS.hosts validate_host generate_random_value (* === Random number generators ============================================= *) (** Generates random numbers within the range 0 ≤ r < 1 according to the -standard uniform random distribution. *) + standard uniform random distribution. *) let uniform_random_fn () = Random.float 1. (** Generates random numbers within the range 0 ≤ r < 1, biased towards 0 by -squaring the output of [uniform_random_fn]. *) + squaring the output of [uniform_random_fn]. *) let biased_random_fn () = let x = uniform_random_fn () in x *. x (** Generates zeros. *) diff --git a/ocaml/xapi/vm_platform.ml b/ocaml/xapi/vm_platform.ml index 0d3dabe05ac..8a9e8d125eb 100644 --- a/ocaml/xapi/vm_platform.ml +++ b/ocaml/xapi/vm_platform.ml @@ -84,8 +84,8 @@ let generation_id = "generation-id" let is_valid ~key ~platformdata = (not (List.mem_assoc key platformdata)) || (match List.assoc key platformdata |> String.lowercase with - | "true" | "1" | "false" | "0" -> true - | v -> false) + | "true" | "1" | "false" | "0" -> true + | v -> false) let is_true ~key ~platformdata ~default = try @@ -111,19 +111,19 @@ let sanity_check ~platformdata ~vcpu_max ~vcpu_at_startup ~hvm ~filter_out_unkno in (* Sanity check for HVM domains with invalid VCPU configuration*) if hvm && (List.mem_assoc "cores-per-socket" platformdata) then - begin - try - let cores_per_socket = int_of_string(List.assoc "cores-per-socket" platformdata) in - (* cores per socket has to be in multiples of VCPUs_max and VCPUs_at_startup *) - if (((Int64.to_int(vcpu_max) mod cores_per_socket) <> 0) - || ((Int64.to_int(vcpu_at_startup) mod cores_per_socket) <> 0)) then - raise (Api_errors.Server_error(Api_errors.invalid_value, - ["platform:cores-per-socket"; - "VCPUs_max/VCPUs_at_startup must be a multiple of this field"])) - with Failure msg -> - raise (Api_errors.Server_error(Api_errors.invalid_value, ["platform:cores-per-socket"; - Printf.sprintf "value = %s is not a valid int" (List.assoc "cores-per-socket" platformdata)])) - end; + begin + try + let cores_per_socket = int_of_string(List.assoc "cores-per-socket" platformdata) in + (* cores per socket has to be in multiples of VCPUs_max and VCPUs_at_startup *) + if (((Int64.to_int(vcpu_max) mod cores_per_socket) <> 0) + || ((Int64.to_int(vcpu_at_startup) mod cores_per_socket) <> 0)) then + raise (Api_errors.Server_error(Api_errors.invalid_value, + ["platform:cores-per-socket"; + "VCPUs_max/VCPUs_at_startup must be a multiple of this field"])) + with Failure msg -> + raise (Api_errors.Server_error(Api_errors.invalid_value, ["platform:cores-per-socket"; + Printf.sprintf "value = %s is not a valid int" (List.assoc "cores-per-socket" platformdata)])) + end; (* Add usb emulation flags. Make sure we don't send usb=false and usb_tablet=true, as that wouldn't make sense. *) @@ -137,7 +137,7 @@ let sanity_check ~platformdata ~vcpu_max ~vcpu_at_startup ~hvm ~filter_out_unkno let platformdata = List.update_assoc [(usb, string_of_bool usb_enabled); - (usb_tablet, string_of_bool usb_tablet_enabled)] + (usb_tablet, string_of_bool usb_tablet_enabled)] platformdata in (* Filter out invalid values for the "parallel" key. We don't want to give @@ -157,9 +157,9 @@ let sanity_check ~platformdata ~vcpu_max ~vcpu_at_startup ~hvm ~filter_out_unkno let check_restricted_flags ~__context platform = if not (is_valid nested_virt platform) then raise (Api_errors.Server_error - (Api_errors.invalid_value, - [Printf.sprintf "platform:%s" nested_virt; - List.assoc nested_virt platform])); + (Api_errors.invalid_value, + [Printf.sprintf "platform:%s" nested_virt; + List.assoc nested_virt platform])); if is_true nested_virt platform false then Pool_features.assert_enabled ~__context ~f:Features.Nested_virt diff --git a/ocaml/xapi/vpx.ml b/ocaml/xapi/vpx.ml index 2000eaed950..48df1706e8b 100644 --- a/ocaml/xapi/vpx.ml +++ b/ocaml/xapi/vpx.ml @@ -9,247 +9,247 @@ type importInfo = { sRuuid:string } type jobInfo = {source:serverInfo; sourceVmUUID:string; sourceVmName:string; importInfo:importInfo} type dateTime = Stdext.Date.iso8601 type jobInstance = { - id:string; jobName:string; jobDesc:string ; xenServerName:string; - sRName:string; createdTime:dateTime; startTime:dateTime; - completedTime:dateTime; errorString:string; compressedBytesRead:int64; - uncompressedBytesWritten:int64; stateDesc:string; - percentComplete:int64; state:jobState; clientIpEndPoint:string; jobInfo:jobInfo + id:string; jobName:string; jobDesc:string ; xenServerName:string; + sRName:string; createdTime:dateTime; startTime:dateTime; + completedTime:dateTime; errorString:string; compressedBytesRead:int64; + uncompressedBytesWritten:int64; stateDesc:string; + percentComplete:int64; state:jobState; clientIpEndPoint:string; jobInfo:jobInfo } type vmInstance = { - uUID:string; name:string; powerState:int32; oSType :string; - committedStorage:int64; uncommittedStorage:int64; template:bool + uUID:string; name:string; powerState:int32; oSType :string; + committedStorage:int64; uncommittedStorage:int64; template:bool } let rpc_type_error x structname expected = - let msg = Printf.sprintf "'%s_of_rpc:got '%s' when '%s' was expected" - structname (Rpc.to_string x) expected in - raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ "unexpected XMLRPC result"; msg ])) + let msg = Printf.sprintf "'%s_of_rpc:got '%s' when '%s' was expected" + structname (Rpc.to_string x) expected in + raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ "unexpected XMLRPC result"; msg ])) let get_dict dict key structname = - let error_key x = - let msg = Printf.sprintf "'%s_of_rpc:caught exception '%s' while looking for key '%s' in '%s'\n" - structname (Printexc.to_string x) key (Rpc.to_string dict) in - raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ "unexpected XMLRPC result"; msg ])) - in - match dict with - | Rpc.Dict assoc -> (try List.assoc key assoc with x -> error_key x) - | x -> rpc_type_error x structname "Dict" + let error_key x = + let msg = Printf.sprintf "'%s_of_rpc:caught exception '%s' while looking for key '%s' in '%s'\n" + structname (Printexc.to_string x) key (Rpc.to_string dict) in + raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ "unexpected XMLRPC result"; msg ])) + in + match dict with + | Rpc.Dict assoc -> (try List.assoc key assoc with x -> error_key x) + | x -> rpc_type_error x structname "Dict" let get_string_dict dict key structname = - let value = get_dict dict key structname in - match value with - | Rpc.String s -> s - | x -> rpc_type_error x structname "String(string)" + let value = get_dict dict key structname in + match value with + | Rpc.String s -> s + | x -> rpc_type_error x structname "String(string)" let get_int64_dict dict key structname = - let value = get_dict dict key structname in - match value with - | Rpc.Int v -> v - | x -> rpc_type_error x structname "Int(int64)" + let value = get_dict dict key structname in + match value with + | Rpc.Int v -> v + | x -> rpc_type_error x structname "Int(int64)" let get_bool_dict dict key structname = - let value = get_dict dict key structname in - match value with - | Rpc.Bool v -> v - | x -> rpc_type_error x structname "Bool(bool)" + let value = get_dict dict key structname in + match value with + | Rpc.Bool v -> v + | x -> rpc_type_error x structname "Bool(bool)" let string_of_serverType = function - | XenServer -> "XenServer" - | ESXServer -> "ESXServer" - | VirtualCenter -> "VirtualCenter" - | HyperVServer -> "HyperVServer" + | XenServer -> "XenServer" + | ESXServer -> "ESXServer" + | VirtualCenter -> "VirtualCenter" + | HyperVServer -> "HyperVServer" let serverType_of_string = function - | "default" - | "XenServer" -> XenServer - | "ESXServer" -> ESXServer - | "VirtualCenter" -> VirtualCenter - | "HyperVServer" -> HyperVServer - | x -> raise (Api_errors.Server_error (Api_errors.invalid_value, [x])) + | "default" + | "XenServer" -> XenServer + | "ESXServer" -> ESXServer + | "VirtualCenter" -> VirtualCenter + | "HyperVServer" -> HyperVServer + | x -> raise (Api_errors.Server_error (Api_errors.invalid_value, [x])) let serverType_of_rpc x = - match x with - | Rpc.Int n -> (match (Int64.to_int n) with - | 0 -> XenServer - | 1 -> ESXServer - | 2 -> VirtualCenter - | 3 -> HyperVServer - | _ -> rpc_type_error x "ServerType" "Int(int64)") - | y -> rpc_type_error y "ServerType" "Int(int64)" + match x with + | Rpc.Int n -> (match (Int64.to_int n) with + | 0 -> XenServer + | 1 -> ESXServer + | 2 -> VirtualCenter + | 3 -> HyperVServer + | _ -> rpc_type_error x "ServerType" "Int(int64)") + | y -> rpc_type_error y "ServerType" "Int(int64)" let rpc_of_serverType = function - | XenServer -> Rpc.rpc_of_int 0 - | ESXServer -> Rpc.rpc_of_int 1 - | VirtualCenter -> Rpc.rpc_of_int 2 - | HyperVServer -> Rpc.rpc_of_int 3 + | XenServer -> Rpc.rpc_of_int 0 + | ESXServer -> Rpc.rpc_of_int 1 + | VirtualCenter -> Rpc.rpc_of_int 2 + | HyperVServer -> Rpc.rpc_of_int 3 let string_of_jobState = function - | Created -> "Created" - | Queued -> "Queued" - | Running -> "Running" - | Completed -> "Completed" - | Aborted -> "Aborted" - | UserAborted -> "UserAborted" + | Created -> "Created" + | Queued -> "Queued" + | Running -> "Running" + | Completed -> "Completed" + | Aborted -> "Aborted" + | UserAborted -> "UserAborted" let jobState_of_rpc x = - match x with - | Rpc.Int n -> - (match (Int64.to_int n) with - | 0 -> Created - | 1 -> Queued - | 2 -> Running - | 3 -> Completed - | 4 -> Aborted - | 5 -> UserAborted - | _ -> rpc_type_error x "JobState" "Int(int64)") - | e -> rpc_type_error e "JobState" "Int(int64)" + match x with + | Rpc.Int n -> + (match (Int64.to_int n) with + | 0 -> Created + | 1 -> Queued + | 2 -> Running + | 3 -> Completed + | 4 -> Aborted + | 5 -> UserAborted + | _ -> rpc_type_error x "JobState" "Int(int64)") + | e -> rpc_type_error e "JobState" "Int(int64)" let rpc_of_jobState = function - | Created -> Rpc.rpc_of_int 0 - | Queued -> Rpc.rpc_of_int 1 - | Running -> Rpc.rpc_of_int 2 - | Completed -> Rpc.rpc_of_int 3 - | Aborted -> Rpc.rpc_of_int 4 - | UserAborted -> Rpc.rpc_of_int 5 + | Created -> Rpc.rpc_of_int 0 + | Queued -> Rpc.rpc_of_int 1 + | Running -> Rpc.rpc_of_int 2 + | Completed -> Rpc.rpc_of_int 3 + | Aborted -> Rpc.rpc_of_int 4 + | UserAborted -> Rpc.rpc_of_int 5 let rpc_of_serviceCred v = - Rpc.Dict [ - ("Username", (Rpc.String v.username)); - ("Password", (Rpc.String v.password)) - ] + Rpc.Dict [ + ("Username", (Rpc.String v.username)); + ("Password", (Rpc.String v.password)) + ] let serviceCred_of_rpc r = - let sn = "ServiceCred" in - { - username = (get_string_dict r "Username" sn); - password = (get_string_dict r "Password" sn) - } + let sn = "ServiceCred" in + { + username = (get_string_dict r "Username" sn); + password = (get_string_dict r "Password" sn) + } let rpc_of_serverInfo v = - Rpc.Dict [ - "ServerType", (rpc_of_serverType v.serverType); - "Hostname", (Rpc.String v.hostname); "Username", (Rpc.String v.cred.username); - "Password", (Rpc.String v.cred.password) - ] + Rpc.Dict [ + "ServerType", (rpc_of_serverType v.serverType); + "Hostname", (Rpc.String v.hostname); "Username", (Rpc.String v.cred.username); + "Password", (Rpc.String v.cred.password) + ] let serverInfo_of_rpc r = - let sn = "ServerInfo" in - { - serverType = serverType_of_rpc (get_dict r "ServerType" sn); - hostname = get_string_dict r "Hostname" sn; - cred = { - username = get_string_dict r "Username" sn; - password = get_string_dict r "Password" sn; - } - } + let sn = "ServerInfo" in + { + serverType = serverType_of_rpc (get_dict r "ServerType" sn); + hostname = get_string_dict r "Hostname" sn; + cred = { + username = get_string_dict r "Username" sn; + password = get_string_dict r "Password" sn; + } + } let rpc_of_dateTime v = Rpc.DateTime (Stdext.Date.to_string v) let dateTime_of_rpc r = - match r with - | Rpc.DateTime v -> Stdext.Date.of_string v - | x -> rpc_type_error x "DateTime" "DateTime(datetime)" + match r with + | Rpc.DateTime v -> Stdext.Date.of_string v + | x -> rpc_type_error x "DateTime" "DateTime(datetime)" let rpc_of_importInfo v = Rpc.Dict[ "SRuuid", (Rpc.String v.sRuuid) ] let importInfo_of_rpc r = {sRuuid = get_string_dict r "SRuuid" "ImportInfo"} let rpc_of_jobInfo v = - Rpc.Dict [ - "Source", (rpc_of_serverInfo v.source); - "SourceVmUUID", (Rpc.String v.sourceVmUUID); - "SourceVmName", (Rpc.String v.sourceVmName); - "ImportInfo", (rpc_of_importInfo v.importInfo) - ] + Rpc.Dict [ + "Source", (rpc_of_serverInfo v.source); + "SourceVmUUID", (Rpc.String v.sourceVmUUID); + "SourceVmName", (Rpc.String v.sourceVmName); + "ImportInfo", (rpc_of_importInfo v.importInfo) + ] let jobInfo_of_rpc r = - let sn = "JobInfo" in - { - source = serverInfo_of_rpc (get_dict r "Source" sn); - sourceVmUUID = get_string_dict r "SourceVmUUID" sn; - sourceVmName = get_string_dict r "SourceVmName" sn; - importInfo = importInfo_of_rpc (get_dict r "ImportInfo" sn) - } + let sn = "JobInfo" in + { + source = serverInfo_of_rpc (get_dict r "Source" sn); + sourceVmUUID = get_string_dict r "SourceVmUUID" sn; + sourceVmName = get_string_dict r "SourceVmName" sn; + importInfo = importInfo_of_rpc (get_dict r "ImportInfo" sn) + } let rpc_of_jobInstance v = - Rpc.Dict [ - "Id", (Rpc.String v.id); - "JobName", (Rpc.String v.jobName); - "JobDesc", (Rpc.String v.jobDesc); - "XenServerName", (Rpc.String v.xenServerName); - "SRName", (Rpc.String v.sRName); - "CreatedTime", (rpc_of_dateTime v.createdTime); - "StartTime", (rpc_of_dateTime v.startTime); - "CompletedTime", (rpc_of_dateTime v.completedTime); - "ErrorString", (Rpc.String v.errorString); - "CompressedBytesRead", (Rpc.Int v.compressedBytesRead); - "UncompressedBytesWritten", (Rpc.Int v.uncompressedBytesWritten); - "StateDesc", (Rpc.String v.stateDesc); - "PercentComplete", (Rpc.Int v.percentComplete); - "State", (rpc_of_jobState v.state); - "ClientIpEndPoint", (Rpc.String v.clientIpEndPoint); - "JobInfo", (rpc_of_jobInfo v.jobInfo) - ] + Rpc.Dict [ + "Id", (Rpc.String v.id); + "JobName", (Rpc.String v.jobName); + "JobDesc", (Rpc.String v.jobDesc); + "XenServerName", (Rpc.String v.xenServerName); + "SRName", (Rpc.String v.sRName); + "CreatedTime", (rpc_of_dateTime v.createdTime); + "StartTime", (rpc_of_dateTime v.startTime); + "CompletedTime", (rpc_of_dateTime v.completedTime); + "ErrorString", (Rpc.String v.errorString); + "CompressedBytesRead", (Rpc.Int v.compressedBytesRead); + "UncompressedBytesWritten", (Rpc.Int v.uncompressedBytesWritten); + "StateDesc", (Rpc.String v.stateDesc); + "PercentComplete", (Rpc.Int v.percentComplete); + "State", (rpc_of_jobState v.state); + "ClientIpEndPoint", (Rpc.String v.clientIpEndPoint); + "JobInfo", (rpc_of_jobInfo v.jobInfo) + ] let jobInstance_of_rpc r = - let sn = "JobInstance" in - { - id = get_string_dict r "Id" sn; - jobName = get_string_dict r "JobName" sn; - jobDesc = get_string_dict r "JobDesc" sn; - xenServerName = get_string_dict r "XenServerName" sn; - sRName = get_string_dict r "SRName" sn; - createdTime = dateTime_of_rpc (get_dict r "CreatedTime" sn); - startTime = dateTime_of_rpc (get_dict r "StartTime" sn); - completedTime = dateTime_of_rpc (get_dict r "CompletedTime" sn); - errorString = get_string_dict r "ErrorString" sn; - compressedBytesRead = get_int64_dict r "CompressedBytesRead" sn; - uncompressedBytesWritten = get_int64_dict r "UncompressedBytesWritten" sn; - stateDesc = get_string_dict r "StateDesc" sn; - percentComplete = get_int64_dict r "PercentComplete" sn; - state = jobState_of_rpc (get_dict r "State" sn); - clientIpEndPoint = get_string_dict r "ClientIpEndPoint" sn; - jobInfo = jobInfo_of_rpc (get_dict r "JobInfo" sn) - } + let sn = "JobInstance" in + { + id = get_string_dict r "Id" sn; + jobName = get_string_dict r "JobName" sn; + jobDesc = get_string_dict r "JobDesc" sn; + xenServerName = get_string_dict r "XenServerName" sn; + sRName = get_string_dict r "SRName" sn; + createdTime = dateTime_of_rpc (get_dict r "CreatedTime" sn); + startTime = dateTime_of_rpc (get_dict r "StartTime" sn); + completedTime = dateTime_of_rpc (get_dict r "CompletedTime" sn); + errorString = get_string_dict r "ErrorString" sn; + compressedBytesRead = get_int64_dict r "CompressedBytesRead" sn; + uncompressedBytesWritten = get_int64_dict r "UncompressedBytesWritten" sn; + stateDesc = get_string_dict r "StateDesc" sn; + percentComplete = get_int64_dict r "PercentComplete" sn; + state = jobState_of_rpc (get_dict r "State" sn); + clientIpEndPoint = get_string_dict r "ClientIpEndPoint" sn; + jobInfo = jobInfo_of_rpc (get_dict r "JobInfo" sn) + } let rpc_of_vmInstance v = - Rpc.Dict [ - "UUID", (Rpc.String v.uUID); - "Name", (Rpc.String v.name); - "PowerState", (Rpc.Int (Int64.of_int32 v.powerState)); - "OSType", (Rpc.String v.oSType); - "CommittedStorage", (Rpc.Int v.committedStorage); - "UncommittedStorage", (Rpc.Int v.uncommittedStorage); - "Template", (Rpc.Bool v.template) - ] + Rpc.Dict [ + "UUID", (Rpc.String v.uUID); + "Name", (Rpc.String v.name); + "PowerState", (Rpc.Int (Int64.of_int32 v.powerState)); + "OSType", (Rpc.String v.oSType); + "CommittedStorage", (Rpc.Int v.committedStorage); + "UncommittedStorage", (Rpc.Int v.uncommittedStorage); + "Template", (Rpc.Bool v.template) + ] let vmInstance_of_rpc r = - let sn = "VmInstance" in - { - uUID = get_string_dict r "UUID" sn; - name = get_string_dict r "Name" sn; - powerState = Int64.to_int32 (get_int64_dict r "PowerState" sn); - oSType = get_string_dict r "OSType" sn; - committedStorage = get_int64_dict r "CommittedStorage" sn; - uncommittedStorage = get_int64_dict r "UncommittedStorage" sn; - template = get_bool_dict r "Template" sn - } + let sn = "VmInstance" in + { + uUID = get_string_dict r "UUID" sn; + name = get_string_dict r "Name" sn; + powerState = Int64.to_int32 (get_int64_dict r "PowerState" sn); + oSType = get_string_dict r "OSType" sn; + committedStorage = get_int64_dict r "CommittedStorage" sn; + uncommittedStorage = get_int64_dict r "UncommittedStorage" sn; + template = get_bool_dict r "Template" sn + } let array_of_rpc lr typename = - match lr with - | Rpc.Enum l -> l - | x -> rpc_type_error x ("[" ^ typename ^ "]") "Enum(t)" + match lr with + | Rpc.Enum l -> l + | x -> rpc_type_error x ("[" ^ typename ^ "]") "Enum(t)" let array_of_vmInstances_of_rpc lr = - let l = array_of_rpc lr "VmInstance" in - List.map (fun r -> vmInstance_of_rpc r) l + let l = array_of_rpc lr "VmInstance" in + List.map (fun r -> vmInstance_of_rpc r) l let array_of_jobInstances_of_rpc lr = - let l = array_of_rpc lr "JobInstance" in - List.map (fun r -> jobInstance_of_rpc r) l - -let vpxrpc ip call = - let open Xmlrpc_client in - let transport = SSL(SSL.make (), ip, 443) in -(* debug "call = %s" (Xmlrpc.string_of_call call); *) - XMLRPC_protocol.rpc ~transport:transport - ~http:(xmlrpc ~version:"1.0" "/") call + let l = array_of_rpc lr "JobInstance" in + List.map (fun r -> jobInstance_of_rpc r) l + +let vpxrpc ip call = + let open Xmlrpc_client in + let transport = SSL(SSL.make (), ip, 443) in + (* debug "call = %s" (Xmlrpc.string_of_call call); *) + XMLRPC_protocol.rpc ~transport:transport + ~http:(xmlrpc ~version:"1.0" "/") call diff --git a/ocaml/xapi/wlb_reports.ml b/ocaml/xapi/wlb_reports.ml index 2099bffcd69..b97331c1008 100644 --- a/ocaml/xapi/wlb_reports.ml +++ b/ocaml/xapi/wlb_reports.ml @@ -13,43 +13,43 @@ *) (** Workload Balancing Reports and Diagnostics. * @group Workload Balancing - *) +*) (** - This module serves the /wlb_report and /wlb_diagnostics HTTP requests. - In the former case, we receive some basic parameters (report name, report - params) and pass those to the WLB server as a SOAP request. The latter - takes no parameters, but is also a SOAP request. + This module serves the /wlb_report and /wlb_diagnostics HTTP requests. + In the former case, we receive some basic parameters (report name, report + params) and pass those to the WLB server as a SOAP request. The latter + takes no parameters, but is also a SOAP request. - What comes back is a SOAP response, containing the report data as the - result. The result itself is an XML string, so this ends up as XML - escaped inside an envelope of SOAP/XML cruft. + What comes back is a SOAP response, containing the report data as the + result. The result itself is an XML string, so this ends up as XML + escaped inside an envelope of SOAP/XML cruft. - The response could potentially be large (megabytes), so we have to stream to - avoid OCaml's 16MB string limit. We can't use Xmlm, even in streaming mode, - because it's just one very large node, so we hit the same limit. + The response could potentially be large (megabytes), so we have to stream to + avoid OCaml's 16MB string limit. We can't use Xmlm, even in streaming mode, + because it's just one very large node, so we hit the same limit. - What we do instead is have a receive-side state machine, which passes - through three states: + What we do instead is have a receive-side state machine, which passes + through three states: + Looking for the tag, and discarding data. + Looking for the tag, and sending data. + Discarding data until EOF. - When sending, we have a separate two-state machine for entity decode: + When sending, we have a separate two-state machine for entity decode: + Looking for an ampersand, and sending data. + Found an ampersand, so looking for the ending semicolon. - If the response does not contain an node, then it's most - likely a WLB error response. We parse these using the normal XML parser, - through the routines in Workload_balancing. (Error responses are never - large.) + If the response does not contain an node, then it's most + likely a WLB error response. We parse these using the normal XML parser, + through the routines in Workload_balancing. (Error responses are never + large.) - If it parses through neither method, then it's malformed, and we raise an - appropriate exception. + If it parses through neither method, then it's malformed, and we raise an + appropriate exception. - The GetDiagnostics message is identical, except we look for different - start and end tags. - *) + The GetDiagnostics message is identical, except we look for different + start and end tags. +*) (* @@ -87,7 +87,7 @@ - + *) open Printf @@ -117,107 +117,107 @@ let trim_and_send method_name (start_str, end_str) recv_sock send_sock = let fill () = let s = String.create bufsize in let n = Unix.read recv_sock s 0 bufsize in - if n > 0 then - Buffer.add_string recv_buf (String.sub s 0 n); - n + if n > 0 then + Buffer.add_string recv_buf (String.sub s 0 n); + n in let send s = let s_len = String.length s in let rec send' i = let c = s.[i] in - (* debug "%c" c; *) - if !send_state = 1 then - begin - if c = '&' then - send_state := 2 - else - Buffer.add_char send_buf c - end - else - begin - if c = ';' then - let e = !entity in - Buffer.add_char send_buf - (if e = "lt" then - '<' - else if e = "gt" then - '>' - else if e = "amp" then - '&' - else if e = "apos" then - '\'' - else if e = "quot" then - '"' - else - hex_entity e); - send_state := 1; - entity := "" - else - entity := !entity ^ (String.of_char c) - end; - if i < s_len - 1 then - send' (i + 1) - else - () + (* debug "%c" c; *) + if !send_state = 1 then + begin + if c = '&' then + send_state := 2 + else + Buffer.add_char send_buf c + end + else + begin + if c = ';' then + let e = !entity in + Buffer.add_char send_buf + (if e = "lt" then + '<' + else if e = "gt" then + '>' + else if e = "amp" then + '&' + else if e = "apos" then + '\'' + else if e = "quot" then + '"' + else + hex_entity e); + send_state := 1; + entity := "" + else + entity := !entity ^ (String.of_char c) + end; + if i < s_len - 1 then + send' (i + 1) + else + () in - send' 0; - ignore (Unix.write send_sock (Buffer.contents send_buf) 0 - (Buffer.length send_buf)); - Buffer.clear send_buf + send' 0; + ignore (Unix.write send_sock (Buffer.contents send_buf) 0 + (Buffer.length send_buf)); + Buffer.clear send_buf in let rec pump () = - let n = fill() in - if Buffer.length recv_buf > 0 then - begin - let s = Buffer.contents recv_buf in - (* debug "%s %d" s !recv_state; *) - if !recv_state = 1 then - match String.find_all start_str s with - | n :: _ -> - Buffer.clear recv_buf; - let i = n + String.length start_str in - Buffer.add_substring recv_buf s i (String.length s - i); - recv_state := 2 - | [] -> - () - else if !recv_state = 2 then - match String.find_all end_str s with - | n :: _ -> - send (String.sub s 0 n); - Buffer.clear recv_buf; - recv_state := 3 - | [] -> - send s; - Buffer.clear recv_buf - else + let n = fill() in + if Buffer.length recv_buf > 0 then + begin + let s = Buffer.contents recv_buf in + (* debug "%s %d" s !recv_state; *) + if !recv_state = 1 then + match String.find_all start_str s with + | n :: _ -> Buffer.clear recv_buf; - if n > 0 then - pump() - else if !recv_state != 3 then - (* if in state 1 we are still looking for the opening tag of the data set, expect xml to be valid - if in state 2 we are still looking for the closing tag of the data set, expect xml to be truncated *) - let rec_data = (Buffer.contents recv_buf) in - if !recv_state = 1 then - begin - try - let xml_data = Xml.parse_string rec_data in - Workload_balancing.parse_result_code - method_name - (Workload_balancing.retrieve_inner_xml method_name xml_data true) - "Failed to detect end of XML, data could be truncated" - rec_data - true - with - | Xml.Error err -> - Workload_balancing.raise_malformed_response' method_name (Xml.error err) rec_data - end - else - Workload_balancing.raise_malformed_response' method_name "Expected data is truncated." rec_data - end + let i = n + String.length start_str in + Buffer.add_substring recv_buf s i (String.length s - i); + recv_state := 2 + | [] -> + () + else if !recv_state = 2 then + match String.find_all end_str s with + | n :: _ -> + send (String.sub s 0 n); + Buffer.clear recv_buf; + recv_state := 3 + | [] -> + send s; + Buffer.clear recv_buf + else + Buffer.clear recv_buf; + if n > 0 then + pump() + else if !recv_state != 3 then + (* if in state 1 we are still looking for the opening tag of the data set, expect xml to be valid + if in state 2 we are still looking for the closing tag of the data set, expect xml to be truncated *) + let rec_data = (Buffer.contents recv_buf) in + if !recv_state = 1 then + begin + try + let xml_data = Xml.parse_string rec_data in + Workload_balancing.parse_result_code + method_name + (Workload_balancing.retrieve_inner_xml method_name xml_data true) + "Failed to detect end of XML, data could be truncated" + rec_data + true + with + | Xml.Error err -> + Workload_balancing.raise_malformed_response' method_name (Xml.error err) rec_data + end + else + Workload_balancing.raise_malformed_response' method_name "Expected data is truncated." rec_data + end in - pump() + pump() let handle req bio method_name tokens (method_name, request_func) = @@ -238,15 +238,15 @@ let handle req bio method_name tokens (method_name, request_func) = Http_svr.headers client_sock (Http.http_200_ok ()); trim_and_send method_name tokens wlb_sock client_sock in - try - request_func ~__context ~handler:parse - with - | Api_errors.Server_error (_, _) as exn -> - raise exn - | exn -> - warn "WLB %s request failed: %s" method_name - (ExnHelper.string_of_exn exn); - raise (Api_errors.Server_error (Api_errors.internal_error, [])) + try + request_func ~__context ~handler:parse + with + | Api_errors.Server_error (_, _) as exn -> + raise exn + | exn -> + warn "WLB %s request failed: %s" method_name + (ExnHelper.string_of_exn exn); + raise (Api_errors.Server_error (Api_errors.internal_error, [])) ) diff --git a/ocaml/xapi/workload_balancing.ml b/ocaml/xapi/workload_balancing.ml index a5e856763a6..b0300a31a6e 100644 --- a/ocaml/xapi/workload_balancing.ml +++ b/ocaml/xapi/workload_balancing.ml @@ -13,7 +13,7 @@ *) (** Workload Balancing * @group Workload Balancing - *) +*) open Stdext open Printf @@ -64,35 +64,35 @@ let raise_unknown_host () = raise (Api_errors.Server_error (Api_errors.wlb_unknown_host, [])) let raise_connection_reset () = - raise (Api_errors.Server_error (Api_errors.wlb_connection_reset, [])) + raise (Api_errors.Server_error (Api_errors.wlb_connection_reset, [])) let raise_internal_error args = - raise (Api_errors.Server_error (Api_errors.wlb_internal_error, args)) + raise (Api_errors.Server_error (Api_errors.wlb_internal_error, args)) let split_host_port url = try if url.[0] = '[' then (* IPv6 *) begin - let host_end = String.rindex url ']' in - if url.[host_end + 1] <> ':' then - raise_url_invalid url; - let host = String.sub url 1 (host_end - 1) in - let port = String.sub url (host_end + 2) (String.length url - host_end - 2) in - (host, int_of_string port) + let host_end = String.rindex url ']' in + if url.[host_end + 1] <> ':' then + raise_url_invalid url; + let host = String.sub url 1 (host_end - 1) in + let port = String.sub url (host_end + 2) (String.length url - host_end - 2) in + (host, int_of_string port) end else match String.split_f (fun a -> a = ':') url with - | [host; port] -> - (host, int_of_string port) - | _ -> - raise_url_invalid url + | [host; port] -> + (host, int_of_string port) + | _ -> + raise_url_invalid url with - | _ -> raise_url_invalid url + | _ -> raise_url_invalid url let wlb_host_port ~__context = let pool = Helpers.get_pool ~__context in let url = Db.Pool.get_wlb_url ~__context ~self:pool in - split_host_port url + split_host_port url let assert_wlb_licensed ~__context = Pool_features.assert_enabled ~__context ~f:Features.WLB @@ -144,16 +144,16 @@ let unexpected_data meth tag xml = xml (* Function walks down the xml tree matching nodes in a path defined by tag_names , returning the element at the end of the path. - Throws Xml_parse_failure to be handled in context of the calling function *) + Throws Xml_parse_failure to be handled in context of the calling function *) let rec descend_and_match tag_names xml = match tag_names, xml with | [], elem -> elem (* have reached end of the list, with all correct matches so return this element*) | hd_tag::tail, Xml.Element ( _, _, [Xml.PCData data]) -> (*we have a leaf node, check that we are at the end of the path and return it *) - raise_malformed_response "unknown" - (sprintf "Method descend_and_match failed. Found leaf node with tag\ -%s, but expected path to continue with: %s->%s" - hd_tag hd_tag (String.concat "->" tag_names)) xml + raise_malformed_response "unknown" + (sprintf "Method descend_and_match failed. Found leaf node with tag\ + %s, but expected path to continue with: %s->%s" + hd_tag hd_tag (String.concat "->" tag_names)) xml | hd_tag::tail, Xml.Element ( _, _, xml_elements) -> (* take the tag off the head of the list and search the children of this element for it *) begin @@ -163,7 +163,7 @@ let rec descend_and_match tag_names xml = with | Not_found -> raise (Xml_parse_failure - (sprintf "Descend_and_match failed. Node %s not found." hd_tag)) + (sprintf "Descend_and_match failed. Node %s not found." hd_tag)) end | _, Xml.PCData _ -> assert false (*This should never happen as a leaf node is detected in an earlier match and returned *) @@ -172,9 +172,9 @@ let data_from_leaf element = match element with | Xml.Element ( _, _, [Xml.PCData data]) ->data | Xml.Element ( _, _, _) -> "" - | _ -> - raise_malformed_response "unknown" "Expected element to be leaf node" - element + | _ -> + raise_malformed_response "unknown" "Expected element to be leaf node" + element let path_to_exception = ["Body"; "Fault"; "Reason"; "Text"] @@ -183,7 +183,7 @@ let path_to_inner meth = let pool_uuid_param ~__context = let pool = Helpers.get_pool ~__context in - sprintf "%s" (Db.Pool.get_uuid ~__context ~self:pool) + sprintf "%s" (Db.Pool.get_uuid ~__context ~self:pool) let wlb_body meth params = Printf.sprintf @@ -209,14 +209,14 @@ let wlb_request host meth body encoded_auth = let filtered_headers headers = List.map (fun s -> - if String.startswith "Authorization:" s then - "Authorization: Basic " - else - s) + if String.startswith "Authorization:" s then + "Authorization: Basic " + else + s) headers let encoded_auth un pw = - Base64.encode (Printf.sprintf "%s:%s" un pw) + Base64.encode (Printf.sprintf "%s:%s" un pw) let wlb_encoded_auth ~__context = let pool = Helpers.get_pool ~__context in @@ -237,15 +237,15 @@ let parse_result_code meth xml_data response initial_error enable_log = | Xml_parse_failure error -> raise_malformed_response' meth (sprintf "After failing to retrieve valid response, an error code\ -could not be found. Some data is missing or corrupt. + could not be found. Some data is missing or corrupt. Attempt retrieve valid response: (%s) Attempt to retrieve error code: (%s)" - initial_error error) + initial_error error) (if enable_log - then - response - else - "Logging output disabled for this call.") + then + response + else + "Logging output disabled for this call.") in let message = try @@ -262,7 +262,7 @@ let retrieve_inner_xml meth response enable_log= | Xml_parse_failure error -> try raise_internal_error ["Exception:"; - data_from_leaf (descend_and_match path_to_exception response)] + data_from_leaf (descend_and_match path_to_exception response)] with | Xml_parse_failure msg -> if enable_log @@ -289,57 +289,57 @@ let wlb_request ~__context ~host ~port ~auth ~meth ~params ~handler ~enable_log else timeout_default with - | _ -> - timeout_default + | _ -> + timeout_default in if enable_log then debug "%s\n%s" (String.concat "\n" (filtered_headers (Http.Request.to_header_list request))) body; try Remote_requests.perform_request ~__context ~timeout ~verify_cert - ~host ~port ~request ~handler ~enable_log + ~host ~port ~request ~handler ~enable_log with - | Remote_requests.Timed_out -> - raise_timeout timeout - | Http_client.Http_request_rejected _ | Http_client.Http_error _ -> - raise_authentication_failed () - | Xmlrpc_client.Connection_reset -> - raise_connection_reset () - | Stunnel.Stunnel_verify_error reason -> - raise_verify_error reason - | Stunnel.Stunnel_error error_msg as exc-> - begin - match error_msg with - | "Connection refused" -> - raise_connection_refused () - | "No route to host" - | "No host resolved" -> - raise_unknown_host () - | "Invalid argument" -> - raise_url_invalid (sprintf "%s:%s" host (string_of_int port)) - | "" -> raise_connection_reset() - | _ -> - raise exc - end - | Unix.Unix_error(Unix.ECONNREFUSED, _, _) -> + | Remote_requests.Timed_out -> + raise_timeout timeout + | Http_client.Http_request_rejected _ | Http_client.Http_error _ -> + raise_authentication_failed () + | Xmlrpc_client.Connection_reset -> + raise_connection_reset () + | Stunnel.Stunnel_verify_error reason -> + raise_verify_error reason + | Stunnel.Stunnel_error error_msg as exc-> + begin + match error_msg with + | "Connection refused" -> raise_connection_refused () + | "No route to host" + | "No host resolved" -> + raise_unknown_host () + | "Invalid argument" -> + raise_url_invalid (sprintf "%s:%s" host (string_of_int port)) + | "" -> raise_connection_reset() + | _ -> + raise exc + end + | Unix.Unix_error(Unix.ECONNREFUSED, _, _) -> + raise_connection_refused () let perform_wlb_request ?auth ?url ?enable_log ~meth ~params - ~handle_response ~__context () = + ~handle_response ~__context () = (* now assumes naming policy of xml repsonses is uniform Envelope->Body-> Response-> Result where is method name *) let enable_log = match enable_log with - | Some b -> b - | None -> true + | Some b -> b + | None -> true in let host, port = match url with - | Some u -> split_host_port u - | None -> wlb_host_port ~__context + | Some u -> split_host_port u + | None -> wlb_host_port ~__context in let auth' = match auth with - | Some x -> x - | None -> wlb_encoded_auth ~__context + | Some x -> x + | None -> wlb_encoded_auth ~__context in let result = ref None in (* this function attempts to parse the result into xml , and pass the 'result' section through to the handler*) @@ -348,17 +348,17 @@ let perform_wlb_request ?auth ?url ?enable_log ~meth ~params try Xmlrpc_client.XML_protocol.read_response response s with - | Xml.Error err -> - raise_malformed_response' meth (Xml.error err) "" + | Xml.Error err -> + raise_malformed_response' meth (Xml.error err) "" in debug "\n\n%s\n\n" (Xml.to_string response); let inner_xml = retrieve_inner_xml meth response enable_log in result := Some ( - let code = - try - data_from_leaf (descend_and_match ["ResultCode"] inner_xml) - with - | Xml_parse_failure error -> "0" (* If it failed trying to get ResultCode, assume the call was successful *) + let code = + try + data_from_leaf (descend_and_match ["ResultCode"] inner_xml) + with + | Xml_parse_failure error -> "0" (* If it failed trying to get ResultCode, assume the call was successful *) in if code <> "0" then (* Call failed, get error message and raise internal error *) @@ -367,7 +367,7 @@ let perform_wlb_request ?auth ?url ?enable_log ~meth ~params data_from_leaf (descend_and_match ["ErrorMessage"] inner_xml) with | Xml_parse_failure error -> "" - in raise_internal_error [code; message] + in raise_internal_error [code; message] else (* Call was successful, parse inner xml *) try @@ -404,29 +404,29 @@ let retrieve_vm_recommendations ~__context ~vm = let extract_data place_recommendation = try let h = Db.Host.get_by_uuid ~__context - ~uuid:(data_from_leaf - (descend_and_match ["HostUuid"] place_recommendation)) in + ~uuid:(data_from_leaf + (descend_and_match ["HostUuid"] place_recommendation)) in if (is_parent_to place_recommendation "Stars") then (h, ["WLB"; - val_num (data_from_leaf (descend_and_match - ["Stars"] place_recommendation)); - val_num (data_from_leaf (descend_and_match - ["RecommendationId"] place_recommendation))]) + val_num (data_from_leaf (descend_and_match + ["Stars"] place_recommendation)); + val_num (data_from_leaf (descend_and_match + ["RecommendationId"] place_recommendation))]) else - (h, ["WLB"; "0.0"; - val_num (data_from_leaf (descend_and_match - ["RecommendationId"] place_recommendation)); - data_from_leaf (descend_and_match - ["ZeroScoreReason"] place_recommendation)]) + (h, ["WLB"; "0.0"; + val_num (data_from_leaf (descend_and_match + ["RecommendationId"] place_recommendation)); + data_from_leaf (descend_and_match + ["ZeroScoreReason"] place_recommendation)]) with - | Xml_parse_failure error -> - (* let this parse error carry on upwards, perform_wlb_request will catch it and check the rest of the xml for an error code *) - raise (Xml_parse_failure error) - | Db_exn.Read_missing_uuid (_,_,_) - | Db_exn.Too_many_values (_,_,_) -> - raise_malformed_response' "VMGetRecommendations" - "Invalid VM or host UUID" "unknown" + | Xml_parse_failure error -> + (* let this parse error carry on upwards, perform_wlb_request will catch it and check the rest of the xml for an error code *) + raise (Xml_parse_failure error) + | Db_exn.Read_missing_uuid (_,_,_) + | Db_exn.Too_many_values (_,_,_) -> + raise_malformed_response' "VMGetRecommendations" + "Invalid VM or host UUID" "unknown" in let recs = descend_and_match ["Recommendations"] inner_xml in if (is_childless recs) @@ -452,17 +452,17 @@ let init_wlb ~__context ~wlb_url ~wlb_username ~wlb_password ~xenserver_username let pool = Helpers.get_pool ~__context in let master = Db.Pool.get_master ~__context ~self:pool in let params = (sprintf "%s\n%s\n%s\n%s\n" - (generate_safe_param "Password" xenserver_password) - (pool_uuid_param ~__context) - (generate_safe_param "UserName" xenserver_username) - (generate_safe_param "XenServerUrl" - (let address_type = Record_util.primary_address_type_of_string (Xapi_inventory.lookup Xapi_inventory._management_address_type ~default:"ipv4") in - let master_address = Db.Host.get_address ~__context ~self:master in - if address_type = `IPv4 then - sprintf "http://%s:80/" master_address - else - (*This is an ipv6 address, put [] around the address so that WLB can properly parse the url*) - sprintf "http://[%s]:80/" master_address))) + (generate_safe_param "Password" xenserver_password) + (pool_uuid_param ~__context) + (generate_safe_param "UserName" xenserver_username) + (generate_safe_param "XenServerUrl" + (let address_type = Record_util.primary_address_type_of_string (Xapi_inventory.lookup Xapi_inventory._management_address_type ~default:"ipv4") in + let master_address = Db.Host.get_address ~__context ~self:master in + if address_type = `IPv4 then + sprintf "http://%s:80/" master_address + else + (*This is an ipv6 address, put [] around the address so that WLB can properly parse the url*) + sprintf "http://[%s]:80/" master_address))) in let handle_response inner_xml = (*A succesful result has an ID inside the addxenserverresult *) @@ -477,9 +477,9 @@ let init_wlb ~__context ~wlb_url ~wlb_username ~wlb_password ~xenserver_username Pervasiveext.ignore_exn (fun _ -> Db.Secret.destroy ~__context ~self:old_secret_ref); in Locking_helpers.Named_mutex.execute request_mutex (perform_wlb_request ~enable_log:false - ~meth:"AddXenServer" ~params - ~auth:(encoded_auth wlb_username wlb_password) ~url:wlb_url - ~handle_response ~__context) + ~meth:"AddXenServer" ~params + ~auth:(encoded_auth wlb_username wlb_password) ~url:wlb_url + ~handle_response ~__context) let decon_wlb ~__context = let clear_wlb_config ~__context ~pool = @@ -506,7 +506,7 @@ let decon_wlb ~__context = else let params = pool_uuid_param ~__context in try Locking_helpers.Named_mutex.execute request_mutex (perform_wlb_request ~meth:"RemoveXenServer" - ~params ~handle_response ~__context) with + ~params ~handle_response ~__context) with (*Based on CA-60147,CA-93312 and CA-137044 - XAPI is designed to handle the error *) | _ -> clear_wlb_config ~__context ~pool @@ -536,9 +536,9 @@ xmlns:b=\"http://schemas.microsoft.com/2003/10/Serialization/Arrays\"> then () else - (*child elements are errors. Raise an exception to force an error check *) + (*child elements are errors. Raise an exception to force an error check *) raise (Xml_parse_failure - "Expected blank result from a send_wlb_config") + "Expected blank result from a send_wlb_config") in perform_wlb_request ~meth:"SetXenPoolConfiguration" ~params ~handle_response ~__context () @@ -552,7 +552,7 @@ let retrieve_wlb_config ~__context = match key_value_parents with | Xml.Element (_, _, _) as key_value_parent :: tl -> (data_from_leaf (descend_and_match ["Key"] key_value_parent), - data_from_leaf (descend_and_match ["Value"] key_value_parent)) + data_from_leaf (descend_and_match ["Value"] key_value_parent)) :: gen_map tl | Xml.PCData _ :: tl -> unexpected_data "GetXenPoolConfiguration" "Configuration" @@ -564,13 +564,13 @@ let retrieve_wlb_config ~__context = gen_map children | _ -> raise_malformed_response "GetXenPoolConfiguration" - "Expected children to OptimizationParms node" inner_xml + "Expected children to OptimizationParms node" inner_xml in perform_wlb_request ~meth:"GetXenPoolConfiguration" ~params ~handle_response ~__context () - let get_dom0_vm ~__context host = - Db.Host.get_control_domain ~__context ~self:(Db.Host.get_by_uuid ~__context ~uuid:host) +let get_dom0_vm ~__context host = + Db.Host.get_control_domain ~__context ~self:(Db.Host.get_by_uuid ~__context ~uuid:host) let get_opt_recommendations ~__context = assert_wlb_enabled ~__context; @@ -578,21 +578,21 @@ let get_opt_recommendations ~__context = let handle_response inner_xml = let rec gen_map key_value_parents = match key_value_parents with - | Xml.Element (_, _, kvalues) :: tl -> - List.map ( - fun elem -> match elem with + | Xml.Element (_, _, kvalues) :: tl -> + List.map ( + fun elem -> match elem with | (Xml.Element (key, _, _)) as leaf -> (key, data_from_leaf leaf) | Xml.PCData _ -> unexpected_data "GetOptimizationRecommendations" "PoolOptimizationRecommendation" inner_xml - ) kvalues :: gen_map tl - | Xml.PCData _ :: tl -> - unexpected_data "GetOptimizationRecommendations" - "Recommendations node" - inner_xml - | [] -> []; + ) kvalues :: gen_map tl + | Xml.PCData _ :: tl -> + unexpected_data "GetOptimizationRecommendations" + "Recommendations node" + inner_xml + | [] -> []; in if (is_childless inner_xml) then @@ -601,7 +601,7 @@ let get_opt_recommendations ~__context = match (descend_and_match ["Recommendations"] inner_xml) with | Xml.Element (_, _, children) -> (gen_map children, - data_from_leaf (descend_and_match ["OptimizationId"] inner_xml)) + data_from_leaf (descend_and_match ["OptimizationId"] inner_xml)) | _ -> debug "IS CHILDLESS"; assert false (*is_childless should prevent this case *) in let result_map = @@ -627,12 +627,12 @@ let get_opt_recommendations ~__context = | [] -> begin match (vm, hostfrom, hostto, reason, rec_id, opt_id) with - | (Some vm', _, Some hostto', Some reason', Some rec_id', opt_id') -> (vm', ["WLB"; hostto'; val_num (opt_id'); val_num (rec_id'); reason']) - | (None, Some hostfrom',_, Some reason', Some rec_id', opt_id') -> ( (get_dom0_vm ~__context hostfrom'), ["WLB"; hostfrom'; val_num (opt_id'); val_num (rec_id'); reason']) - | _ -> + | (Some vm', _, Some hostto', Some reason', Some rec_id', opt_id') -> (vm', ["WLB"; hostto'; val_num (opt_id'); val_num (rec_id'); reason']) + | (None, Some hostfrom',_, Some reason', Some rec_id', opt_id') -> ( (get_dom0_vm ~__context hostfrom'), ["WLB"; hostfrom'; val_num (opt_id'); val_num (rec_id'); reason']) + | _ -> raise_malformed_response' "GetOptimizationRecommendations" "Missing VmToMoveUuid, RecID, MoveToHostUuid, or Reason" "unknown" - end + end in try match result_map with (map, opt_id) -> @@ -656,17 +656,17 @@ let get_evacuation_recoms ~__context ~uuid = | Xml.Element (_, _, kvalues) :: tl -> List.map ( fun elem -> match elem with - | (Xml.Element (key, _, _)) as leaf -> - (key, data_from_leaf leaf) - | Xml.PCData _ -> + | (Xml.Element (key, _, _)) as leaf -> + (key, data_from_leaf leaf) + | Xml.PCData _ -> unexpected_data "HostGetRecommendations" "HostEvacuationRecommendation" inner_xml ) kvalues :: gen_map tl | Xml.PCData _ :: tl -> unexpected_data "HostGetRecommendations" - "Recommendations" - inner_xml + "Recommendations" + inner_xml | [] -> [] in if (is_childless inner_xml) @@ -675,8 +675,8 @@ let get_evacuation_recoms ~__context ~uuid = else match inner_xml with | (Xml.Element ( _, _, [Xml.Element("CanPlaceAllVMs",_,_)])) -> - (*CanPlaceAllVMs tag, No recommendations to give. *) - [] + (*CanPlaceAllVMs tag, No recommendations to give. *) + [] | (Xml.Element ( _, _, _)) -> begin match (descend_and_match ["Recommendations"] inner_xml) with @@ -685,31 +685,31 @@ let get_evacuation_recoms ~__context ~uuid = | _ -> [] (* just data, which we are treating as an empty response *) end | Xml.PCData _ -> unexpected_data "HostGetRecommendations" - "HostGetRecommendationsResult" inner_xml + "HostGetRecommendationsResult" inner_xml in let result_map = perform_wlb_request ~meth:"HostGetRecommendations" ~params ~handle_response ~__context () in let rec remap vm host rec_id = function - | (k, v) :: vs -> - if k = "VmUuid" && v<> "" then - remap (Some (Db.VM.get_by_uuid ~__context ~uuid:v)) host rec_id vs - else if k = "HostUuid" then - remap vm (Some v) rec_id vs - else if k = "RecommendationId" then - remap vm host (Some v) vs - else - remap vm host rec_id vs - | [] -> - begin - match (vm, host, rec_id) with - | (Some vm', Some host', Some rec_id') -> (vm', ["WLB"; host'; val_num (rec_id')]) - | (None, Some host', Some rec_id') -> ((get_dom0_vm ~__context host'), ["WLB"; host'; val_num (rec_id')]) - | _ -> - raise_malformed_response' "HostGetRecommendations" - "Missing VmUuid, RecID or HostUuid" "unknown" - end + | (k, v) :: vs -> + if k = "VmUuid" && v<> "" then + remap (Some (Db.VM.get_by_uuid ~__context ~uuid:v)) host rec_id vs + else if k = "HostUuid" then + remap vm (Some v) rec_id vs + else if k = "RecommendationId" then + remap vm host (Some v) vs + else + remap vm host rec_id vs + | [] -> + begin + match (vm, host, rec_id) with + | (Some vm', Some host', Some rec_id') -> (vm', ["WLB"; host'; val_num (rec_id')]) + | (None, Some host', Some rec_id') -> ((get_dom0_vm ~__context host'), ["WLB"; host'; val_num (rec_id')]) + | _ -> + raise_malformed_response' "HostGetRecommendations" + "Missing VmUuid, RecID or HostUuid" "unknown" + end in try List.map (fun kvs -> remap None None None kvs) result_map @@ -722,8 +722,8 @@ let get_evacuation_recoms ~__context ~uuid = let make_param = function | (n, v) -> Xml.Element("ReportParameter", [], - [Xml.Element("ParameterName", [], [Xml.PCData n]); - Xml.Element("ParameterValue", [], [Xml.PCData v])]) + [Xml.Element("ParameterName", [], [Xml.PCData n]); + Xml.Element("ParameterValue", [], [Xml.PCData v])]) let wlb_context_request meth params ~__context ~handler = assert_wlb_licensed ~__context; @@ -740,7 +740,7 @@ let wlb_report_request report params = let p = ((Xml.to_string (Xml.Element("ReportName", [], [Xml.PCData report]))) ^ (Xml.to_string (Xml.Element("ReportParms", [], - List.map make_param params)))) + List.map make_param params)))) in debug "%s" p; (meth, wlb_context_request meth p) diff --git a/ocaml/xapi/xapi.ml b/ocaml/xapi/xapi.ml index 597d80cbae1..e1f5d309bba 100644 --- a/ocaml/xapi/xapi.ml +++ b/ocaml/xapi/xapi.ml @@ -13,7 +13,7 @@ *) (** * @group Main Loop and Start-up - *) +*) open Stdext open Printf @@ -36,7 +36,7 @@ module W=Debug.Make(struct let name="watchdog" end) instead we rely on the init.d scripts to start other services. *) let startup_check () = Sanitycheck.check_for_bad_link () - + (* Parse db conf file from disk and use this to initialise database connections. This is done on both master and slave. On masters the parsed data is used to flush databases to and to populate cache; on the slave the parsed data is used to determine where to put backups. @@ -57,37 +57,37 @@ open Db_cache_types (if it is present). Perform an initial flush to the database connections which were already setup, then delete the restore file. *) let populate_db backend = - let schema = Datamodel_schema.of_datamodel () in - - let output_connections = Db_conn_store.read_db_connections () in - (* If the temporary restore file is present then we must populate from that *) - let restoring = Sys.file_exists Xapi_globs.db_temporary_restore_path in - let input_connections = - if restoring - then [ Parse_db_conf.make Xapi_globs.db_temporary_restore_path ] - else output_connections - in - debug "Attempting to populate database from one of these locations: [%s]" - (String.concat "; " - (List.map (fun conn -> conn.Parse_db_conf.path) input_connections)); - Db_cache_impl.make backend input_connections schema; - Db_cache_impl.sync output_connections (Db_ref.get_database backend); - (* Delete the temporary restore file so that we don't revert to it again at next startup. *) - if restoring then begin - Unixext.unlink_safe Xapi_globs.db_temporary_restore_path; - Unixext.unlink_safe (Xapi_globs.db_temporary_restore_path ^ ".generation") - end - -(** Starts the main database engine: this should be done only on the master node. + let schema = Datamodel_schema.of_datamodel () in + + let output_connections = Db_conn_store.read_db_connections () in + (* If the temporary restore file is present then we must populate from that *) + let restoring = Sys.file_exists Xapi_globs.db_temporary_restore_path in + let input_connections = + if restoring + then [ Parse_db_conf.make Xapi_globs.db_temporary_restore_path ] + else output_connections + in + debug "Attempting to populate database from one of these locations: [%s]" + (String.concat "; " + (List.map (fun conn -> conn.Parse_db_conf.path) input_connections)); + Db_cache_impl.make backend input_connections schema; + Db_cache_impl.sync output_connections (Db_ref.get_database backend); + (* Delete the temporary restore file so that we don't revert to it again at next startup. *) + if restoring then begin + Unixext.unlink_safe Xapi_globs.db_temporary_restore_path; + Unixext.unlink_safe (Xapi_globs.db_temporary_restore_path ^ ".generation") + end + +(** Starts the main database engine: this should be done only on the master node. The db connections must have been parsed from db.conf file and initialised before this fn is called. Also this function depends on being able to call API functions through the external interface. *) let start_database_engine () = - let t = Db_backend.make () in - populate_db t; + let t = Db_backend.make () in + populate_db t; - Db_ref.update_database t (Database.register_callback "redo_log" Redo_log.database_callback); - Db_ref.update_database t (Database.register_callback "events" Eventgen.database_callback); + Db_ref.update_database t (Database.register_callback "redo_log" Redo_log.database_callback); + Db_ref.update_database t (Database.register_callback "events" Eventgen.database_callback); debug "Performing initial DB GC"; Db_gc.single_pass (); @@ -110,56 +110,56 @@ let start_database_engine () = ) (* Block premature incoming client requests until the database engine is ready *) -let wait_until_database_is_ready_for_clients () = - Mutex.execute database_ready_for_clients_m - (fun () -> +let wait_until_database_is_ready_for_clients () = + Mutex.execute database_ready_for_clients_m + (fun () -> while not !database_ready_for_clients do Condition.wait database_ready_for_clients_c database_ready_for_clients_m done) (** Handler for the remote database access URL *) let remote_database_access_handler req bio c = - wait_until_database_is_ready_for_clients (); - Db_remote_cache_access_v1.handler req bio c + wait_until_database_is_ready_for_clients (); + Db_remote_cache_access_v1.handler req bio c (** Handler for the remote database access URL *) let remote_database_access_handler_v2 req bio c = - wait_until_database_is_ready_for_clients (); - Db_remote_cache_access_v2.handler req bio c + wait_until_database_is_ready_for_clients (); + Db_remote_cache_access_v2.handler req bio c (** Handler for the legacy remote stats URL *) let remote_stats_handler req bio _ = wait_until_database_is_ready_for_clients (); - let fd = Buf_io.fd_of bio in (* fd only used for writing *) + let fd = Buf_io.fd_of bio in (* fd only used for writing *) (* CA-20487: need to authenticate this URL, but only when we're not in pool rolling-upgrade mode; this URL is depricated and should be removed ASAP.. *) let auth_failed() = - raise (Http.Unauthorised "remote stats") in + raise (Http.Unauthorised "remote stats") in let rolling_upgrade_in_progress = Server_helpers.exec_with_new_task "performance_monitor_auth" ~task_in_database:false (fun __context -> Helpers.rolling_upgrade_in_progress ~__context) in if not rolling_upgrade_in_progress then begin try - let pool_secret = List.assoc "pool_secret" req.Http.Request.cookie in - if pool_secret <> !Xapi_globs.pool_secret then auth_failed(); + let pool_secret = List.assoc "pool_secret" req.Http.Request.cookie in + if pool_secret <> !Xapi_globs.pool_secret then auth_failed(); with _ -> - auth_failed() + auth_failed() end; let body = Http_svr.read_body ~limit:Xapi_globs.http_limit_max_rpc_size req bio in - let body_xml = Xml.parse_string body in + let body_xml = Xml.parse_string body in Stats.time_this "remote_stats" (fun () -> let stats = Monitor_transfer.unmarshall body_xml in Server_helpers.exec_with_new_task "performance monitor" - (fun __context -> Monitor_master.update_all ~__context stats); + (fun __context -> Monitor_master.update_all ~__context stats); let response = Xml.to_string (Db_rpc_common_v1.marshall_unit ()) in Http_svr.response_str req fd response ) let cleanup_handler i = debug "Executing cleanup handler"; -(* Monitor_rrds.cleanup ();*) + (* Monitor_rrds.cleanup ();*) Db_connections.exit_on_next_flush := true; if not(Pool_role.is_master ()) then exit 0; debug "cleanup handler exiting" @@ -186,22 +186,22 @@ let random_setup () = Random.full_init (Array.init n (fun i -> Char.code s.[i])) let register_callback_fns() = - let fake_rpc req sock xml : Rpc.response = - Api_server.callback1 false req sock None xml in - Helpers.rpc_fun := Some fake_rpc; - let set_stunnelpid task_opt pid = - Locking_helpers.Thread_state.acquired (Locking_helpers.Process("stunnel", pid)) in - let unset_stunnelpid task_opt pid = - Locking_helpers.Thread_state.released (Locking_helpers.Process("stunnel", pid)) in - Xmlrpc_client.Internal.set_stunnelpid_callback := Some set_stunnelpid; - Xmlrpc_client.Internal.unset_stunnelpid_callback := Some unset_stunnelpid; - TaskHelper.init () + let fake_rpc req sock xml : Rpc.response = + Api_server.callback1 false req sock None xml in + Helpers.rpc_fun := Some fake_rpc; + let set_stunnelpid task_opt pid = + Locking_helpers.Thread_state.acquired (Locking_helpers.Process("stunnel", pid)) in + let unset_stunnelpid task_opt pid = + Locking_helpers.Thread_state.released (Locking_helpers.Process("stunnel", pid)) in + Xmlrpc_client.Internal.set_stunnelpid_callback := Some set_stunnelpid; + Xmlrpc_client.Internal.unset_stunnelpid_callback := Some unset_stunnelpid; + TaskHelper.init () let noevents = ref false let debug_dummy_data = ref false -let show_version () = +let show_version () = List.iter (fun (x, y) -> printf "%s=%s\n" x y) [ "git_id", Version.git_id; "hostname", Version.hostname; @@ -221,7 +221,7 @@ let init_args() = Xapi_globs.xenopsd_queues := [ "xenopsd" ] end - + let wait_to_die() = (* don't call Thread.join cos this interacts strangely with OCAML runtime and stops @@ -236,25 +236,25 @@ let check_no_other_masters() = Server_helpers.exec_with_new_task "checking no other known hosts are masters" (fun __context -> let assert_is_slave href = - try - if not (Xapi_host.ask_host_if_it_is_a_slave ~__context ~host:href) then - begin - let master_address = Db.Host.get_address ~self:href ~__context in - error "Detected another master in my database of known hosts. Aborting xapi startup and restarting as slave of host '%s' (%s)" - (Db.Host.get_uuid ~self:href ~__context) master_address; - (* transition to slave and restart *) - begin - try - (* now become a slave of the new master we found... *) - Pool_role.set_role (Pool_role.Slave master_address); - with - e -> (error "Could not transition to slave '%s': xapi will abort completely and not start" (Printexc.to_string e); exit 1) - end; - exit Xapi_globs.restart_return_code; - end - with e -> (* if we couldn't contact slave then carry on regardless - --- this is just a sanity check, not a guarantee... *) - debug "Couldn't contact slave on startup check: %s" (Printexc.to_string e) + try + if not (Xapi_host.ask_host_if_it_is_a_slave ~__context ~host:href) then + begin + let master_address = Db.Host.get_address ~self:href ~__context in + error "Detected another master in my database of known hosts. Aborting xapi startup and restarting as slave of host '%s' (%s)" + (Db.Host.get_uuid ~self:href ~__context) master_address; + (* transition to slave and restart *) + begin + try + (* now become a slave of the new master we found... *) + Pool_role.set_role (Pool_role.Slave master_address); + with + e -> (error "Could not transition to slave '%s': xapi will abort completely and not start" (Printexc.to_string e); exit 1) + end; + exit Xapi_globs.restart_return_code; + end + with e -> (* if we couldn't contact slave then carry on regardless + --- this is just a sanity check, not a guarantee... *) + debug "Couldn't contact slave on startup check: %s" (Printexc.to_string e) in let hosts = Db.Host.get_all ~__context in let me = Helpers.get_localhost ~__context in @@ -279,9 +279,9 @@ let on_master_restart ~__context = try let host = Helpers.get_localhost ~__context in let metrics = Db.Host.get_metrics ~__context ~self:host in - let shutting_down = + let shutting_down = Mutex.execute Xapi_globs.hosts_which_are_shutting_down_m - (fun () -> List.exists (fun x -> x=host) !Xapi_globs.hosts_which_are_shutting_down) in + (fun () -> List.exists (fun x -> x=host) !Xapi_globs.hosts_which_are_shutting_down) in if not shutting_down then Db.Host_metrics.set_live ~__context ~self:metrics ~value:true with e -> @@ -291,7 +291,7 @@ let on_master_restart ~__context = let init_local_database () = (try let (_: string) = Localdb.get Constants.ha_armed in - () + () with Localdb.Missing_key _ -> Localdb.put Constants.ha_armed "false"; debug "%s = 'false' (by default)" Constants.ha_armed); @@ -303,163 +303,163 @@ let init_local_database () = if !Xapi_globs.on_system_boot then Localdb.put Constants.host_disabled_until_reboot "false" let bring_up_management_if ~__context () = - try - let management_if = Xapi_inventory.lookup Xapi_inventory._management_interface in - let management_address_type = Record_util.primary_address_type_of_string - (Xapi_inventory.lookup Xapi_inventory._management_address_type) in - if management_if = "" then begin - debug "No management interface defined (management is disabled)"; - Xapi_mgmt_iface.run ~__context ~mgmt_enabled:false; - end else begin - Xapi_mgmt_iface.change management_if management_address_type; - Xapi_mgmt_iface.run ~__context ~mgmt_enabled:true; - match Helpers.get_management_ip_addr ~__context with - | Some "127.0.0.1" -> - debug "Received 127.0.0.1 as a management IP address; ignoring" - | Some ip -> - debug "Management IP address is: %s" ip; - (* Make sure everyone is up to speed *) - ignore (Thread.create (fun () -> Server_helpers.exec_with_new_task "dom0 networking update" - ~subtask_of:(Context.get_task_id __context) - (fun __context -> Xapi_mgmt_iface.on_dom0_networking_change ~__context)) ()) - | None -> - warn "Failed to acquire a management IP address" - end; - (* Start the Host Internal Management Network, if needed. *) - Xapi_network.check_himn ~__context - with e -> - debug "Caught exception bringing up management interface: %s" (ExnHelper.string_of_exn e) + try + let management_if = Xapi_inventory.lookup Xapi_inventory._management_interface in + let management_address_type = Record_util.primary_address_type_of_string + (Xapi_inventory.lookup Xapi_inventory._management_address_type) in + if management_if = "" then begin + debug "No management interface defined (management is disabled)"; + Xapi_mgmt_iface.run ~__context ~mgmt_enabled:false; + end else begin + Xapi_mgmt_iface.change management_if management_address_type; + Xapi_mgmt_iface.run ~__context ~mgmt_enabled:true; + match Helpers.get_management_ip_addr ~__context with + | Some "127.0.0.1" -> + debug "Received 127.0.0.1 as a management IP address; ignoring" + | Some ip -> + debug "Management IP address is: %s" ip; + (* Make sure everyone is up to speed *) + ignore (Thread.create (fun () -> Server_helpers.exec_with_new_task "dom0 networking update" + ~subtask_of:(Context.get_task_id __context) + (fun __context -> Xapi_mgmt_iface.on_dom0_networking_change ~__context)) ()) + | None -> + warn "Failed to acquire a management IP address" + end; + (* Start the Host Internal Management Network, if needed. *) + Xapi_network.check_himn ~__context + with e -> + debug "Caught exception bringing up management interface: %s" (ExnHelper.string_of_exn e) (** Assuming a management interface is defined, return the IP address. Note this - call may block for a long time. *) + call may block for a long time. *) let wait_for_management_ip_address ~__context = - debug "Attempting to acquire a management IP address"; - Xapi_host.set_emergency_mode_error Api_errors.host_has_no_management_ip []; - let ip = Xapi_mgmt_iface.wait_for_management_ip ~__context in - debug "Acquired management IP address: %s" ip; - Xapi_host.set_emergency_mode_error Api_errors.host_still_booting []; - (* Check whether I am my own slave. *) - begin match Pool_role.get_role () with - | Pool_role.Slave masters_ip -> - if masters_ip = "127.0.0.1" || masters_ip = ip then begin - debug "Realised that I am my own slave!"; - Xapi_host.set_emergency_mode_error Api_errors.host_its_own_slave []; - end - | Pool_role.Master | Pool_role.Broken -> () - end; - ip + debug "Attempting to acquire a management IP address"; + Xapi_host.set_emergency_mode_error Api_errors.host_has_no_management_ip []; + let ip = Xapi_mgmt_iface.wait_for_management_ip ~__context in + debug "Acquired management IP address: %s" ip; + Xapi_host.set_emergency_mode_error Api_errors.host_still_booting []; + (* Check whether I am my own slave. *) + begin match Pool_role.get_role () with + | Pool_role.Slave masters_ip -> + if masters_ip = "127.0.0.1" || masters_ip = ip then begin + debug "Realised that I am my own slave!"; + Xapi_host.set_emergency_mode_error Api_errors.host_its_own_slave []; + end + | Pool_role.Master | Pool_role.Broken -> () + end; + ip type hello_error = | Permanent (* e.g. the pool secret is wrong i.e. wrong master *) | Temporary (* some glitch or other *) - + (** Attempt a Pool.hello, return None if ok or Some hello_error otherwise *) -let attempt_pool_hello my_ip = +let attempt_pool_hello my_ip = let localhost_uuid = Helpers.get_localhost_uuid () in try Helpers.call_emergency_mode_functions (Pool_role.get_master_address ()) (fun rpc session_id -> - match Client.Client.Pool.hello rpc session_id localhost_uuid my_ip with - | `cannot_talk_back -> - error "Master claims he cannot talk back to us on IP: %s" my_ip; - Xapi_host.set_emergency_mode_error Api_errors.host_master_cannot_talk_back [ my_ip ]; - Some Temporary - | `unknown_host -> - debug "Master claims he has no record of us being a slave"; - Xapi_host.set_emergency_mode_error Api_errors.host_unknown_to_master [ localhost_uuid ]; - Some Permanent - | `ok -> - None + match Client.Client.Pool.hello rpc session_id localhost_uuid my_ip with + | `cannot_talk_back -> + error "Master claims he cannot talk back to us on IP: %s" my_ip; + Xapi_host.set_emergency_mode_error Api_errors.host_master_cannot_talk_back [ my_ip ]; + Some Temporary + | `unknown_host -> + debug "Master claims he has no record of us being a slave"; + Xapi_host.set_emergency_mode_error Api_errors.host_unknown_to_master [ localhost_uuid ]; + Some Permanent + | `ok -> + None ) - with + with | Api_errors.Server_error(code, params) when code = Api_errors.session_authentication_failed -> - debug "Master did not recognise our pool secret: we must be pointing at the wrong master."; - Xapi_host.set_emergency_mode_error Api_errors.host_unknown_to_master [ localhost_uuid ]; - Some Permanent + debug "Master did not recognise our pool secret: we must be pointing at the wrong master."; + Xapi_host.set_emergency_mode_error Api_errors.host_unknown_to_master [ localhost_uuid ]; + Some Permanent | Api_errors.Server_error(code, params) as exn -> - debug "Caught exception: %s during Pool.hello" (ExnHelper.string_of_exn exn); - Xapi_host.set_emergency_mode_error code params; - Some Temporary + debug "Caught exception: %s during Pool.hello" (ExnHelper.string_of_exn exn); + Xapi_host.set_emergency_mode_error code params; + Some Temporary | exn -> - debug "Caught exception: %s during Pool.hello" (ExnHelper.string_of_exn exn); - Xapi_host.set_emergency_mode_error Api_errors.internal_error [ ExnHelper.string_of_exn exn ]; - Some Temporary - + debug "Caught exception: %s during Pool.hello" (ExnHelper.string_of_exn exn); + Xapi_host.set_emergency_mode_error Api_errors.internal_error [ ExnHelper.string_of_exn exn ]; + Some Temporary + (** Bring up the HA system if configured *) -let start_ha () = +let start_ha () = try Xapi_ha.on_server_restart () with e -> (* Critical that we don't continue as a master and use shared resources *) debug "Caught exception starting HA system: %s" (ExnHelper.string_of_exn e) - + (** Enable and load the redo log if we are the master, the local-DB flag is set * and HA is disabled *) let start_redo_log () = try if Pool_role.is_master () && - bool_of_string (Localdb.get_with_default Constants.redo_log_enabled "false") && - not (bool_of_string (Localdb.get Constants.ha_armed)) then - begin - debug "Redo log was enabled when shutting down, so restarting it"; - (* enable the use of the redo log *) - Redo_log.enable Xapi_ha.ha_redo_log Xapi_globs.gen_metadata_vdi_reason; - debug "Attempting to extract a database from a metadata VDI"; - (* read from redo log and store results in a staging file for use in the - * next step; best effort only: does not raise any exceptions *) - let db_ref = Db_backend.make () in - Redo_log_usage.read_from_redo_log Xapi_ha.ha_redo_log Xapi_globs.gen_metadata_db db_ref - end + bool_of_string (Localdb.get_with_default Constants.redo_log_enabled "false") && + not (bool_of_string (Localdb.get Constants.ha_armed)) then + begin + debug "Redo log was enabled when shutting down, so restarting it"; + (* enable the use of the redo log *) + Redo_log.enable Xapi_ha.ha_redo_log Xapi_globs.gen_metadata_vdi_reason; + debug "Attempting to extract a database from a metadata VDI"; + (* read from redo log and store results in a staging file for use in the + * next step; best effort only: does not raise any exceptions *) + let db_ref = Db_backend.make () in + Redo_log_usage.read_from_redo_log Xapi_ha.ha_redo_log Xapi_globs.gen_metadata_db db_ref + end with e -> debug "Caught exception starting non-HA redo log: %s" (ExnHelper.string_of_exn e) (* Attempt to start DR redo logs on all SRs which contain metadata VDIs for this pool. *) let start_dr_redo_logs () = - Server_helpers.exec_with_new_task "start_dr_redo_logs" - (fun __context -> - (* Find all SRs with metadata VDIs for this pool. *) - let pool = Helpers.get_pool ~__context in - let metadata_vdis = List.filter - (fun vdi -> - (Db.VDI.get_type ~__context ~self:vdi = `metadata) && - (Db.VDI.get_metadata_of_pool ~__context ~self:vdi = pool)) - (Db.VDI.get_all ~__context) - in - let metadata_srs = List.setify - (List.map (fun vdi -> Db.VDI.get_SR ~__context ~self:vdi) metadata_vdis) - in - (* Attempt to enable database replication to each SR. *) - List.iter - (fun sr -> - let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in - try - Xapi_sr.enable_database_replication ~__context ~sr; - debug "Re-enabled database replication to SR %s" sr_uuid - with e -> - (* Best-effort only. *) - debug "Could not re-enable database replication to SR %s - caught %s" - sr_uuid (Printexc.to_string e)) - metadata_srs) + Server_helpers.exec_with_new_task "start_dr_redo_logs" + (fun __context -> + (* Find all SRs with metadata VDIs for this pool. *) + let pool = Helpers.get_pool ~__context in + let metadata_vdis = List.filter + (fun vdi -> + (Db.VDI.get_type ~__context ~self:vdi = `metadata) && + (Db.VDI.get_metadata_of_pool ~__context ~self:vdi = pool)) + (Db.VDI.get_all ~__context) + in + let metadata_srs = List.setify + (List.map (fun vdi -> Db.VDI.get_SR ~__context ~self:vdi) metadata_vdis) + in + (* Attempt to enable database replication to each SR. *) + List.iter + (fun sr -> + let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in + try + Xapi_sr.enable_database_replication ~__context ~sr; + debug "Re-enabled database replication to SR %s" sr_uuid + with e -> + (* Best-effort only. *) + debug "Could not re-enable database replication to SR %s - caught %s" + sr_uuid (Printexc.to_string e)) + metadata_srs) (* Attempt to cache all metadata VDIs created by foreign pools *) let cache_metadata_vdis () = - Server_helpers.exec_with_new_task "cache_metadata_vdis" - (fun __context -> - let pool = Helpers.get_pool ~__context in - let metadata_vdis = List.filter - (fun vdi -> - (Db.VDI.get_type ~__context ~self:vdi = `metadata) && - (Db.VDI.get_metadata_of_pool ~__context ~self:vdi <> pool)) - (Db.VDI.get_all ~__context) - in - Xapi_dr.add_vdis_to_cache ~__context ~vdis:metadata_vdis) + Server_helpers.exec_with_new_task "cache_metadata_vdis" + (fun __context -> + let pool = Helpers.get_pool ~__context in + let metadata_vdis = List.filter + (fun vdi -> + (Db.VDI.get_type ~__context ~self:vdi = `metadata) && + (Db.VDI.get_metadata_of_pool ~__context ~self:vdi <> pool)) + (Db.VDI.get_all ~__context) + in + Xapi_dr.add_vdis_to_cache ~__context ~vdis:metadata_vdis) (* Called if we cannot contact master at init time *) let server_run_in_emergency_mode () = info "Cannot contact master: running in slave emergency mode"; Xapi_globs.slave_emergency_mode := true; (* signal the init script that it should succeed even though we're bust *) - Helpers.touch_file !Xapi_globs.ready_file; + Helpers.touch_file !Xapi_globs.ready_file; let emergency_reboot_delay = !Xapi_globs.emergency_reboot_delay_base +. Random.float !Xapi_globs.emergency_reboot_delay_extra in info "Will restart management software in %.1f seconds" emergency_reboot_delay; @@ -471,41 +471,41 @@ let server_run_in_emergency_mode () = (** Once the database is online we make sure our local ha.armed flag is in sync with the master's Pool.ha_enabled flag. *) let resynchronise_ha_state () = - try - Server_helpers.exec_with_new_task "resynchronise_ha_state" - (fun __context -> - (* Make sure the control domain is marked as "running" - in the case of *) - (* HA failover it will have been marked as "halted". *) - let control_domain_uuid = Inventory.lookup Inventory._control_domain_uuid in - let control_domain = Db.VM.get_by_uuid ~__context ~uuid:control_domain_uuid in - Db.VM.set_power_state ~__context ~self:control_domain ~value:`Running; - - let pool = Helpers.get_pool ~__context in - let pool_ha_enabled = Db.Pool.get_ha_enabled ~__context ~self:pool in - let local_ha_enabled = bool_of_string (Localdb.get Constants.ha_armed) in - match local_ha_enabled, pool_ha_enabled with - | true, true -> - info "HA is enabled on both localhost and the Pool" - | false, false -> - info "HA is disabled on both localhost and the Pool" - | true, false -> - info "HA has been disabled on the Pool while we were offline; disarming HA locally"; - Localdb.put Constants.ha_armed "false"; - Xapi_ha.Monitor.stop () - | false, true -> - info "HA has been disabled on localhost but not the Pool."; - if Pool_role.is_master () then begin - info "We are the master: disabling HA on the Pool."; - Db.Pool.set_ha_enabled ~__context ~self:pool ~value:false; - end else begin - info "We are a slave: we cannot join an HA-enabled Pool after being locally disarmed. Entering emergency mode."; - Xapi_host.set_emergency_mode_error Api_errors.ha_pool_is_enabled_but_host_is_disabled []; - server_run_in_emergency_mode() - end - ) - with e -> - (* Critical that we don't continue as a master and use shared resources *) - error "Caught exception resynchronising state of HA system: %s" (ExnHelper.string_of_exn e) + try + Server_helpers.exec_with_new_task "resynchronise_ha_state" + (fun __context -> + (* Make sure the control domain is marked as "running" - in the case of *) + (* HA failover it will have been marked as "halted". *) + let control_domain_uuid = Inventory.lookup Inventory._control_domain_uuid in + let control_domain = Db.VM.get_by_uuid ~__context ~uuid:control_domain_uuid in + Db.VM.set_power_state ~__context ~self:control_domain ~value:`Running; + + let pool = Helpers.get_pool ~__context in + let pool_ha_enabled = Db.Pool.get_ha_enabled ~__context ~self:pool in + let local_ha_enabled = bool_of_string (Localdb.get Constants.ha_armed) in + match local_ha_enabled, pool_ha_enabled with + | true, true -> + info "HA is enabled on both localhost and the Pool" + | false, false -> + info "HA is disabled on both localhost and the Pool" + | true, false -> + info "HA has been disabled on the Pool while we were offline; disarming HA locally"; + Localdb.put Constants.ha_armed "false"; + Xapi_ha.Monitor.stop () + | false, true -> + info "HA has been disabled on localhost but not the Pool."; + if Pool_role.is_master () then begin + info "We are the master: disabling HA on the Pool."; + Db.Pool.set_ha_enabled ~__context ~self:pool ~value:false; + end else begin + info "We are a slave: we cannot join an HA-enabled Pool after being locally disarmed. Entering emergency mode."; + Xapi_host.set_emergency_mode_error Api_errors.ha_pool_is_enabled_but_host_is_disabled []; + server_run_in_emergency_mode() + end + ) + with e -> + (* Critical that we don't continue as a master and use shared resources *) + error "Caught exception resynchronising state of HA system: %s" (ExnHelper.string_of_exn e) (* Calculates the amount of free memory on the host at boot time. *) (* Returns a result that is equivalent to (T - X), where: *) @@ -521,97 +521,97 @@ let resynchronise_ha_state () = (* 1. Domain 0 is currently in an unballooned state. *) (* 2. No other domains have been started. *) let calculate_boot_time_host_free_memory () = - let ( + ) = Nativeint.add in - let open Xenctrl in - let host_info = with_intf (fun xc -> physinfo xc) in - let host_free_pages = host_info.free_pages in - let host_scrub_pages = host_info.scrub_pages in - match Create_misc.read_dom0_memory_usage () with - | None -> failwith "can't query balloon driver" - | Some domain0_bytes -> - let domain0_total_pages = XenopsMemory.pages_of_bytes_used domain0_bytes in - let boot_time_host_free_pages = - host_free_pages + host_scrub_pages + (Int64.to_nativeint domain0_total_pages) in - let boot_time_host_free_kib = - pages_to_kib (Int64.of_nativeint boot_time_host_free_pages) in - Int64.mul 1024L boot_time_host_free_kib + let ( + ) = Nativeint.add in + let open Xenctrl in + let host_info = with_intf (fun xc -> physinfo xc) in + let host_free_pages = host_info.free_pages in + let host_scrub_pages = host_info.scrub_pages in + match Create_misc.read_dom0_memory_usage () with + | None -> failwith "can't query balloon driver" + | Some domain0_bytes -> + let domain0_total_pages = XenopsMemory.pages_of_bytes_used domain0_bytes in + let boot_time_host_free_pages = + host_free_pages + host_scrub_pages + (Int64.to_nativeint domain0_total_pages) in + let boot_time_host_free_kib = + pages_to_kib (Int64.of_nativeint boot_time_host_free_pages) in + Int64.mul 1024L boot_time_host_free_kib (* Read the free memory on the host and record this in the db. This is used *) (* as the baseline for memory calculations in the message forwarding layer. *) let record_boot_time_host_free_memory () = - if not (Unixext.file_exists Xapi_globs.initial_host_free_memory_file) then begin - try - let free_memory = calculate_boot_time_host_free_memory () in - Unixext.mkdir_rec (Filename.dirname Xapi_globs.initial_host_free_memory_file) 0o700; - Unixext.write_string_to_file - Xapi_globs.initial_host_free_memory_file - (Int64.to_string free_memory) - with e -> - error "Could not record host free memory. This may prevent VMs from being started on this host. (%s)" - (Printexc.to_string e) - end + if not (Unixext.file_exists Xapi_globs.initial_host_free_memory_file) then begin + try + let free_memory = calculate_boot_time_host_free_memory () in + Unixext.mkdir_rec (Filename.dirname Xapi_globs.initial_host_free_memory_file) 0o700; + Unixext.write_string_to_file + Xapi_globs.initial_host_free_memory_file + (Int64.to_string free_memory) + with e -> + error "Could not record host free memory. This may prevent VMs from being started on this host. (%s)" + (Printexc.to_string e) + end (** Reset the networking-related metadata for this host if the command [xe-reset-networking] * was executed before the restart. *) let check_network_reset () = - try - (* Raises exception if the file is not there and no reset is required *) - let reset_file = Unixext.string_of_file (Xapi_globs.network_reset_trigger) in - Server_helpers.exec_with_new_task "Performing emergency network reset" - (fun __context -> - let host = Helpers.get_localhost ~__context in - (* Parse reset file *) - let args = String.split '\n' reset_file in - let args = List.map (fun s -> match (String.split '=' s) with k :: [v] -> k, v | _ -> "", "") args in - let device = List.assoc "DEVICE" args in - let mode = match List.assoc "MODE" args with - | "static" -> `Static - | "dhcp" | _ -> `DHCP - in - let iP = if List.mem_assoc "IP" args then List.assoc "IP" args else "" in - let netmask = if List.mem_assoc "NETMASK" args then List.assoc "NETMASK" args else "" in - let gateway = if List.mem_assoc "GATEWAY" args then List.assoc "GATEWAY" args else "" in - let dNS = if List.mem_assoc "DNS" args then List.assoc "DNS" args else "" in - - (* Erase networking database objects for this host *) - Helpers.call_api_functions ~__context - (fun rpc session_id -> - Client.Client.Host.reset_networking rpc session_id host - ); - - (* Introduce PIFs for remaining interfaces *) - Xapi_pif.scan ~__context ~host; - - (* Introduce and configure the management PIF *) - let pifs = Db.PIF.get_refs_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of host)), - Eq (Field "device", Literal device) - )) in - match pifs with - | [] -> error "management PIF %s not found" device - | pif :: _ -> - Xapi_pif.reconfigure_ip ~__context ~self:pif ~mode ~iP ~netmask ~gateway ~dNS; - Xapi_host.management_reconfigure ~__context ~pif; - ); - (* Remove trigger file *) - Unix.unlink("/tmp/network-reset") - with _ -> () (* TODO: catch specific exception for missing fields in reset_file and inform user *) - + try + (* Raises exception if the file is not there and no reset is required *) + let reset_file = Unixext.string_of_file (Xapi_globs.network_reset_trigger) in + Server_helpers.exec_with_new_task "Performing emergency network reset" + (fun __context -> + let host = Helpers.get_localhost ~__context in + (* Parse reset file *) + let args = String.split '\n' reset_file in + let args = List.map (fun s -> match (String.split '=' s) with k :: [v] -> k, v | _ -> "", "") args in + let device = List.assoc "DEVICE" args in + let mode = match List.assoc "MODE" args with + | "static" -> `Static + | "dhcp" | _ -> `DHCP + in + let iP = if List.mem_assoc "IP" args then List.assoc "IP" args else "" in + let netmask = if List.mem_assoc "NETMASK" args then List.assoc "NETMASK" args else "" in + let gateway = if List.mem_assoc "GATEWAY" args then List.assoc "GATEWAY" args else "" in + let dNS = if List.mem_assoc "DNS" args then List.assoc "DNS" args else "" in + + (* Erase networking database objects for this host *) + Helpers.call_api_functions ~__context + (fun rpc session_id -> + Client.Client.Host.reset_networking rpc session_id host + ); + + (* Introduce PIFs for remaining interfaces *) + Xapi_pif.scan ~__context ~host; + + (* Introduce and configure the management PIF *) + let pifs = Db.PIF.get_refs_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of host)), + Eq (Field "device", Literal device) + )) in + match pifs with + | [] -> error "management PIF %s not found" device + | pif :: _ -> + Xapi_pif.reconfigure_ip ~__context ~self:pif ~mode ~iP ~netmask ~gateway ~dNS; + Xapi_host.management_reconfigure ~__context ~pif; + ); + (* Remove trigger file *) + Unix.unlink("/tmp/network-reset") + with _ -> () (* TODO: catch specific exception for missing fields in reset_file and inform user *) + (** Make sure our license is set correctly *) -let handle_licensing () = - Server_helpers.exec_with_new_task "Licensing host" - (fun __context -> - let host = Helpers.get_localhost ~__context in - License_init.initialise ~__context ~host - ) +let handle_licensing () = + Server_helpers.exec_with_new_task "Licensing host" + (fun __context -> + let host = Helpers.get_localhost ~__context in + License_init.initialise ~__context ~host + ) let startup_script () = - let startup_script_hook = !Xapi_globs.startup_script_hook in - if (try Unix.access startup_script_hook [ Unix.X_OK ]; true with _ -> false) then begin - debug "Executing startup script: %s" startup_script_hook; - ignore(Forkhelpers.execute_command_get_output startup_script_hook []) - end + let startup_script_hook = !Xapi_globs.startup_script_hook in + if (try Unix.access startup_script_hook [ Unix.X_OK ]; true with _ -> false) then begin + debug "Executing startup script: %s" startup_script_hook; + ignore(Forkhelpers.execute_command_get_output startup_script_hook []) + end let master_only_http_handlers = [ (* CA-26044: don't let people DoS random slaves *) @@ -675,11 +675,11 @@ let common_http_handlers = [ ] let listen_unix_socket sock_path = - (* Always listen on the Unix domain socket first *) - Unixext.mkdir_safe (Filename.dirname sock_path) 0o700; - Unixext.unlink_safe sock_path; - let domain_sock = Xapi_http.bind (Unix.ADDR_UNIX(sock_path)) in - ignore(Http_svr.start Xapi_http.server domain_sock) + (* Always listen on the Unix domain socket first *) + Unixext.mkdir_safe (Filename.dirname sock_path) 0o700; + Unixext.unlink_safe sock_path; + let domain_sock = Xapi_http.bind (Unix.ADDR_UNIX(sock_path)) in + ignore(Http_svr.start Xapi_http.server domain_sock) let set_stunnel_timeout () = try @@ -693,16 +693,16 @@ let set_stunnel_timeout () = * master, and to do that we need to start an outgoing stunnel. *) let set_stunnel_legacy_inv ~__context () = Stunnel.set_good_ciphersuites (match !Xapi_globs.ciphersuites_good_outbound with - | None -> raise (Api_errors.Server_error (Api_errors.internal_error,["Configuration file does not specify ciphersuites-good-outbound."])) - | Some s -> s - ); + | None -> raise (Api_errors.Server_error (Api_errors.internal_error,["Configuration file does not specify ciphersuites-good-outbound."])) + | Some s -> s + ); Stunnel.set_legacy_ciphersuites !Xapi_globs.ciphersuites_legacy_outbound; let s = Xapi_inventory.lookup Xapi_inventory._stunnel_legacy ~default:"true" in let legacy = try - bool_of_string s - with e -> - error "Invalid inventory value for %s: expected a Boolean; found %s" Xapi_inventory._stunnel_legacy s; - raise e + bool_of_string s + with e -> + error "Invalid inventory value for %s: expected a Boolean; found %s" Xapi_inventory._stunnel_legacy s; + raise e in Xapi_host.set_stunnel_legacy ~__context legacy @@ -733,11 +733,11 @@ let server_init() = let auth_type = Db.Host.get_external_auth_type ~__context ~self:host in (* only tries to call a hook script if there's some external auth_type defined, otherwise it is a useless operation *) if auth_type <> "" then - try - (* the extauth script runs mutually-exclusively with host-{enable,disable}-extauth on this host *) - (* if host-extauth is already disabled then the script will just return *) - ignore (Extauth.call_extauth_hook_script_in_host ~__context host Extauth.event_name_after_xapi_initialize); - with e -> () (* we ignore errors on the extauth_hook calls *) + try + (* the extauth script runs mutually-exclusively with host-{enable,disable}-extauth on this host *) + (* if host-extauth is already disabled then the script will just return *) + ignore (Extauth.call_extauth_hook_script_in_host ~__context host Extauth.event_name_after_xapi_initialize); + with e -> () (* we ignore errors on the extauth_hook calls *) in let call_extauth_hook_script_before_xapi_initialize ~__context = (* CP-709 *) (* 1. Try to immediately synchronize xapi's subject-list with any external user list -- e.g. in pam.d/sshd *) @@ -755,258 +755,258 @@ let server_init() = in let event_hook_auth_on_xapi_initialize_async ~__context = (* CP-695 *) (* this function should be called asynchronously because it can take a long time to complete *) - (* 1. we should already have synchronously called hook_script_before_xapi_initialize *) + (* 1. we should already have synchronously called hook_script_before_xapi_initialize *) let host = Helpers.get_localhost ~__context in let auth_type = Db.Host.get_external_auth_type ~__context ~self:host in (* only tries to initialize external authentication if there's some external auth_type defined *) if auth_type <> "" then - begin - (* 2. Then, we start extauth initialization procedures *) - let service_name = Db.Host.get_external_auth_service_name ~__context ~self:host in - let last_error = ref None in - (* watchdog to indicate that on_xapi_initialize wasn't successful after 2 min initializing *) - let (_: Thread.t) = Thread.create (fun ()-> Thread.delay (2.0 *. 60.0); (* wait 2min before testing for success *) - if not !Xapi_globs.event_hook_auth_on_xapi_initialize_succeeded then - begin (* no success after 2 min *) - let obj_uuid = Helpers.get_localhost_uuid () in - (* CP-729: alert to notify client if internal event hook ext_auth.on_xapi_initialize fails *) - ignore (Helpers.call_api_functions ~__context (fun rpc session_id -> - (* we need to create the alert on the *master* so that XenCenter will be able to pick it up *) - let (name, priority) = Api_messages.auth_external_init_failed in - Client.Client.Message.create ~rpc ~session_id ~name ~priority ~cls:`Host ~obj_uuid ~body:( - "host_external_auth_type="^auth_type^ - ", host_external_auth_service_name="^service_name^ - ", error="^ (match !last_error with None -> "timeout" | Some e -> - (match e with - | Auth_signature.Auth_service_error (errtag,errmsg) -> errmsg (* this is the expected error msg *) - | e -> (ExnHelper.string_of_exn e) (* unknown error msg *) - )) - ); - )); - end - ) () in (); (* ignore Thread.t *) - (* persistent loop trying to initialize the external authentication service *) - (* obs: this loop will also end after a host.disable_external_auth call *) - while (not !Xapi_globs.event_hook_auth_on_xapi_initialize_succeeded) do - (try - (* try to initialize external authentication service *) - (Ext_auth.d()).on_xapi_initialize !Xapi_globs.on_system_boot; - (* tell everybody the service initialized successfully *) - Xapi_globs.event_hook_auth_on_xapi_initialize_succeeded := true; - (* 3. Now that we are sure that the external authentication service is working,*) - (* we synchronize xapi's subject-list with any external user list -- e.g. in pam.d/sshd *) - (* That implements a secure No-Access-By-Default Policy for sshd and other Dom0 login services, because: *) - (* a) if sync succeeds, we are fine, that's the expected behavior *) - (* b) if sync fails, we are fine: the hook script should have wiped any external user lists due to the *) - (* inacessibility of the external authentication service, but (b) is unexpected because we have just*) - (* confirmed its accessibility *) - (* Call the hook script *after* extauth initialization, so that any access from outside xapi (e.g. in sshd) *) - (* will only include those users in xapi's current subject-list *) - (try call_extauth_hook_script_after_xapi_initialize ~__context with e-> ()) (* CP-709 *) - with e -> (* something failed during initialization of the external authentication subsystem *) - begin - debug "Failed initializing external authentication system auth_type=%s, service_name=%s: %s" auth_type service_name (ExnHelper.string_of_exn e); - last_error := Some e; (* store some error information so that the watchdog can report it later *) - (* do not bubble exception up, we (1) need to loop and (2) don't want xapi server_init to die *) - Thread.delay (5.0 *. 60.0) (* wait 5 mins before trying again *) - end - ); - done; - debug "Leaving loop" - end + begin + (* 2. Then, we start extauth initialization procedures *) + let service_name = Db.Host.get_external_auth_service_name ~__context ~self:host in + let last_error = ref None in + (* watchdog to indicate that on_xapi_initialize wasn't successful after 2 min initializing *) + let (_: Thread.t) = Thread.create (fun ()-> Thread.delay (2.0 *. 60.0); (* wait 2min before testing for success *) + if not !Xapi_globs.event_hook_auth_on_xapi_initialize_succeeded then + begin (* no success after 2 min *) + let obj_uuid = Helpers.get_localhost_uuid () in + (* CP-729: alert to notify client if internal event hook ext_auth.on_xapi_initialize fails *) + ignore (Helpers.call_api_functions ~__context (fun rpc session_id -> + (* we need to create the alert on the *master* so that XenCenter will be able to pick it up *) + let (name, priority) = Api_messages.auth_external_init_failed in + Client.Client.Message.create ~rpc ~session_id ~name ~priority ~cls:`Host ~obj_uuid ~body:( + "host_external_auth_type="^auth_type^ + ", host_external_auth_service_name="^service_name^ + ", error="^ (match !last_error with None -> "timeout" | Some e -> + (match e with + | Auth_signature.Auth_service_error (errtag,errmsg) -> errmsg (* this is the expected error msg *) + | e -> (ExnHelper.string_of_exn e) (* unknown error msg *) + )) + ); + )); + end + ) () in (); (* ignore Thread.t *) + (* persistent loop trying to initialize the external authentication service *) + (* obs: this loop will also end after a host.disable_external_auth call *) + while (not !Xapi_globs.event_hook_auth_on_xapi_initialize_succeeded) do + (try + (* try to initialize external authentication service *) + (Ext_auth.d()).on_xapi_initialize !Xapi_globs.on_system_boot; + (* tell everybody the service initialized successfully *) + Xapi_globs.event_hook_auth_on_xapi_initialize_succeeded := true; + (* 3. Now that we are sure that the external authentication service is working,*) + (* we synchronize xapi's subject-list with any external user list -- e.g. in pam.d/sshd *) + (* That implements a secure No-Access-By-Default Policy for sshd and other Dom0 login services, because: *) + (* a) if sync succeeds, we are fine, that's the expected behavior *) + (* b) if sync fails, we are fine: the hook script should have wiped any external user lists due to the *) + (* inacessibility of the external authentication service, but (b) is unexpected because we have just*) + (* confirmed its accessibility *) + (* Call the hook script *after* extauth initialization, so that any access from outside xapi (e.g. in sshd) *) + (* will only include those users in xapi's current subject-list *) + (try call_extauth_hook_script_after_xapi_initialize ~__context with e-> ()) (* CP-709 *) + with e -> (* something failed during initialization of the external authentication subsystem *) + begin + debug "Failed initializing external authentication system auth_type=%s, service_name=%s: %s" auth_type service_name (ExnHelper.string_of_exn e); + last_error := Some e; (* store some error information so that the watchdog can report it later *) + (* do not bubble exception up, we (1) need to loop and (2) don't want xapi server_init to die *) + Thread.delay (5.0 *. 60.0) (* wait 5 mins before trying again *) + end + ); + done; + debug "Leaving loop" + end in try Server_helpers.exec_with_new_task "server_init" (fun __context -> - Startup.run ~__context [ - "XAPI SERVER STARTING", [], print_server_starting_message; - "Parsing inventory file", [], Xapi_inventory.read_inventory; - "Config (from file) for incoming/outgoing stunnel instances", [], set_stunnel_legacy_inv ~__context; - "Setting stunnel timeout", [], set_stunnel_timeout; - "Initialising local database", [], init_local_database; - "Loading DHCP leases", [], Xapi_udhcpd.init; - "Reading pool secret", [], Helpers.get_pool_secret; - "Logging xapi version info", [], Xapi_config.dump_config; - "Setting signal handlers", [], signals_handling; - "Initialising random number generator", [], random_setup; - "Running startup check", [], startup_check; - "Registering SMAPIv1 plugins", [Startup.OnlyMaster], Sm.register; - "Starting SMAPIv1 proxies", [Startup.OnlyMaster], Storage_access.start_smapiv1_servers; - "Initialising SM state", [], Storage_impl.initialise; - "Starting SM internal event service", [], Storage_task.Updates.Scheduler.start; - "Starting SM service", [], Storage_access.start; - "Starting SM xapi event service", [], Storage_access.events_from_sm; - "Killing stray sparse_dd processes", [], Sparse_dd_wrapper.killall; - "Registering http handlers", [], (fun () -> List.iter Xapi_http.add_handler common_http_handlers); - "Registering master-only http handlers", [ Startup.OnlyMaster ], (fun () -> List.iter Xapi_http.add_handler master_only_http_handlers); - "Listening unix socket", [], (fun () -> listen_unix_socket Xapi_globs.unix_domain_socket); - "Metadata VDI liveness monitor", [ Startup.OnlyMaster; Startup.OnThread ], (fun () -> Redo_log_alert.loop ()); - "Checking HA configuration", [], start_ha; - "Checking for non-HA redo-log", [], start_redo_log; - (* It is a pre-requisite for starting db engine *) - "Setup DB configuration", [], setup_db_conf; - (* Start up database engine if we're a master. - NOTE: We have to start up the database engine before attempting to bring up network etc. because - the database engine start may attempt a schema upgrade + restart xapi. The last thing we want - is to have xapi half way through setting up networking, get restarted after a db schema upgrade and - then try and bring up networking again (now racing with itself since dhclient will already be - running etc.) -- see CA-11087 *) - "starting up database engine", [ Startup.OnlyMaster ], start_database_engine; - "hi-level database upgrade", [ Startup.OnlyMaster ], Xapi_db_upgrade.hi_level_db_upgrade_rules ~__context; - "bringing up management interface", [], bring_up_management_if ~__context; - "Starting periodic scheduler", [Startup.OnThread], Xapi_periodic_scheduler.loop; - "Remote requests", [Startup.OnThread], Remote_requests.handle_requests; - ]; - begin match Pool_role.get_role () with - | Pool_role.Master -> - () - | Pool_role.Broken -> - info "This node is broken; moving straight to emergency mode"; - Xapi_host.set_emergency_mode_error Api_errors.host_broken []; - - (* XXX: consider not restarting here *) - server_run_in_emergency_mode () - | Pool_role.Slave _ -> - info "Running in 'Pool Slave' mode"; - (* Set emergency mode until we actually talk to the master *) - Xapi_globs.slave_emergency_mode := true; - (* signal the init script that it should succeed even though we're bust *) - Helpers.touch_file !Xapi_globs.ready_file; - - (* Keep trying to log into master *) - let finished = ref false in - while not(!finished) do - (* Grab the management IP address (wait forever for it if necessary) *) - let ip = wait_for_management_ip_address ~__context in - - debug "Start master_connection watchdog"; - ignore (Master_connection.start_master_connection_watchdog ()); - - debug "Attempting to communicate with master"; - (* Try to say hello to the pool *) - begin match attempt_pool_hello ip with - | None -> finished := true - | Some Temporary -> - debug "I think the error is a temporary one, retrying in 5s"; - Thread.delay 5.; - | Some Permanent -> - error "Permanent error in Pool.hello, will retry after %.0fs just in case" !Xapi_globs.permanent_master_failure_retry_interval; - Thread.delay !Xapi_globs.permanent_master_failure_retry_interval - end; - done; - debug "Startup successful"; - Xapi_globs.slave_emergency_mode := false; - Master_connection.connection_timeout := initial_connection_timeout; - - begin - try - (* We can't tolerate an exception in db synchronization so fall back into emergency mode - if this happens and try again later.. *) - Master_connection.restart_on_connection_timeout := false; - Master_connection.connection_timeout := 10.; (* give up retrying after 10s *) - Db_cache_impl.initialise (); - Sm.register (); - Startup.run ~__context [ - "Starting SMAPIv1 proxies", [Startup.OnlySlave], Storage_access.start_smapiv1_servers; - ]; - Dbsync.setup () - with e -> + Startup.run ~__context [ + "XAPI SERVER STARTING", [], print_server_starting_message; + "Parsing inventory file", [], Xapi_inventory.read_inventory; + "Config (from file) for incoming/outgoing stunnel instances", [], set_stunnel_legacy_inv ~__context; + "Setting stunnel timeout", [], set_stunnel_timeout; + "Initialising local database", [], init_local_database; + "Loading DHCP leases", [], Xapi_udhcpd.init; + "Reading pool secret", [], Helpers.get_pool_secret; + "Logging xapi version info", [], Xapi_config.dump_config; + "Setting signal handlers", [], signals_handling; + "Initialising random number generator", [], random_setup; + "Running startup check", [], startup_check; + "Registering SMAPIv1 plugins", [Startup.OnlyMaster], Sm.register; + "Starting SMAPIv1 proxies", [Startup.OnlyMaster], Storage_access.start_smapiv1_servers; + "Initialising SM state", [], Storage_impl.initialise; + "Starting SM internal event service", [], Storage_task.Updates.Scheduler.start; + "Starting SM service", [], Storage_access.start; + "Starting SM xapi event service", [], Storage_access.events_from_sm; + "Killing stray sparse_dd processes", [], Sparse_dd_wrapper.killall; + "Registering http handlers", [], (fun () -> List.iter Xapi_http.add_handler common_http_handlers); + "Registering master-only http handlers", [ Startup.OnlyMaster ], (fun () -> List.iter Xapi_http.add_handler master_only_http_handlers); + "Listening unix socket", [], (fun () -> listen_unix_socket Xapi_globs.unix_domain_socket); + "Metadata VDI liveness monitor", [ Startup.OnlyMaster; Startup.OnThread ], (fun () -> Redo_log_alert.loop ()); + "Checking HA configuration", [], start_ha; + "Checking for non-HA redo-log", [], start_redo_log; + (* It is a pre-requisite for starting db engine *) + "Setup DB configuration", [], setup_db_conf; + (* Start up database engine if we're a master. + NOTE: We have to start up the database engine before attempting to bring up network etc. because + the database engine start may attempt a schema upgrade + restart xapi. The last thing we want + is to have xapi half way through setting up networking, get restarted after a db schema upgrade and + then try and bring up networking again (now racing with itself since dhclient will already be + running etc.) -- see CA-11087 *) + "starting up database engine", [ Startup.OnlyMaster ], start_database_engine; + "hi-level database upgrade", [ Startup.OnlyMaster ], Xapi_db_upgrade.hi_level_db_upgrade_rules ~__context; + "bringing up management interface", [], bring_up_management_if ~__context; + "Starting periodic scheduler", [Startup.OnThread], Xapi_periodic_scheduler.loop; + "Remote requests", [Startup.OnThread], Remote_requests.handle_requests; + ]; + begin match Pool_role.get_role () with + | Pool_role.Master -> + () + | Pool_role.Broken -> + info "This node is broken; moving straight to emergency mode"; + Xapi_host.set_emergency_mode_error Api_errors.host_broken []; + + (* XXX: consider not restarting here *) + server_run_in_emergency_mode () + | Pool_role.Slave _ -> + info "Running in 'Pool Slave' mode"; + (* Set emergency mode until we actually talk to the master *) + Xapi_globs.slave_emergency_mode := true; + (* signal the init script that it should succeed even though we're bust *) + Helpers.touch_file !Xapi_globs.ready_file; + + (* Keep trying to log into master *) + let finished = ref false in + while not(!finished) do + (* Grab the management IP address (wait forever for it if necessary) *) + let ip = wait_for_management_ip_address ~__context in + + debug "Start master_connection watchdog"; + ignore (Master_connection.start_master_connection_watchdog ()); + + debug "Attempting to communicate with master"; + (* Try to say hello to the pool *) + begin match attempt_pool_hello ip with + | None -> finished := true + | Some Temporary -> + debug "I think the error is a temporary one, retrying in 5s"; + Thread.delay 5.; + | Some Permanent -> + error "Permanent error in Pool.hello, will retry after %.0fs just in case" !Xapi_globs.permanent_master_failure_retry_interval; + Thread.delay !Xapi_globs.permanent_master_failure_retry_interval + end; + done; + debug "Startup successful"; + Xapi_globs.slave_emergency_mode := false; + Master_connection.connection_timeout := initial_connection_timeout; + begin - debug "Failure in slave dbsync; slave will pause and then restart to try again. Entering emergency mode."; - server_run_in_emergency_mode() - end - end; - Master_connection.connection_timeout := !Xapi_globs.master_connection_retry_timeout; - Master_connection.restart_on_connection_timeout := true; - Master_connection.on_database_connection_established := (fun () -> on_master_restart ~__context); - end); - - Server_helpers.exec_with_new_task "server_init" ~task_in_database:true (fun __context -> - Startup.run ~__context [ - "Checking emergency network reset", [], check_network_reset; - "Upgrade bonds to Boston", [Startup.NoExnRaising], Sync_networking.fix_bonds ~__context; - "Reconfig (from DB) for incoming/outgoing stunnel instances", [], set_stunnel_legacy_db ~__context; - "Initialise monitor configuration", [], Monitor_master.update_configuration_from_master; - "Initialising licensing", [], handle_licensing; - "message_hook_thread", [ Startup.NoExnRaising ], (Xapi_message.start_message_hook_thread ~__context); - "heartbeat thread", [ Startup.NoExnRaising; Startup.OnThread ], Db_gc.start_heartbeat_thread; - "resynchronising HA state", [ Startup.NoExnRaising ], resynchronise_ha_state; - "pool db backup", [ Startup.OnlyMaster; Startup.OnThread ], Pool_db_backup.pool_db_backup_thread; - "monitor_dbcalls", [Startup.OnThread], Monitor_dbcalls.monitor_dbcall_thread; - "touching ready file", [], (fun () -> Helpers.touch_file !Xapi_globs.ready_file); - (* -- CRITICAL: this check must be performed before touching shared storage *) - "Performing no-other-masters check", [ Startup.OnlyMaster ], check_no_other_masters; - "Registering periodic functions", [], Xapi_periodic_scheduler_init.register; - "executing startup scripts", [ Startup.NoExnRaising], startup_script; - - "considering executing on-master-start script", [], - (fun () -> Xapi_pool_transition.run_external_scripts (Pool_role.is_master ())); - "creating networks", [ Startup.OnlyMaster ], Create_networks.create_networks_localhost; - (* CA-22417: bring up all non-bond slaves so that the SM backends can use storage NIC IP addresses (if the routing - table happens to be right) *) - "Best-effort bring up of physical NICs", [ Startup.NoExnRaising ], Xapi_pif.start_of_day_best_effort_bring_up; - "updating the vswitch controller", [], (fun () -> Helpers.update_vswitch_controller ~__context ~host:(Helpers.get_localhost ~__context)); - "initialising storage", [ Startup.NoExnRaising ], - (fun () -> Helpers.call_api_functions ~__context Create_storage.create_storage_localhost); - (* CA-13878: make sure PBD plugging has happened before attempting to reboot any VMs *) - "resynchronising VM state", [], (fun () -> Xapi_xenops.on_xapi_restart ~__context); - "listening to events from xapi", [], (fun () -> if not (!noevents) then ignore (Thread.create Xapi_xenops.events_from_xapi ())); - (* CA-175353: moving VIFs between networks requires VMs to be resynced *) - "Synchronising bonds on slave with master", [Startup.OnlySlave; Startup.NoExnRaising], Sync_networking.copy_bonds_from_master ~__context; - "Synchronising VLANs on slave with master", [Startup.OnlySlave; Startup.NoExnRaising], Sync_networking.copy_vlans_from_master ~__context; - "Synchronising tunnels on slave with master", [Startup.OnlySlave; Startup.NoExnRaising], Sync_networking.copy_tunnels_from_master ~__context; - - "SR scanning", [ Startup.OnlyMaster; Startup.OnThread ], Xapi_sr.scanning_thread; - "Updating pool cpu_info", [], (fun () -> Create_misc.create_pool_cpuinfo ~__context); - "writing init complete", [], (fun () -> Helpers.touch_file !Xapi_globs.init_complete); -(* "Synchronising HA state with Pool", [ Startup.NoExnRaising ], Xapi_ha.synchronise_ha_state_with_pool; *) - "Starting DR redo-logs", [ Startup.OnlyMaster; ], start_dr_redo_logs; - "Starting SR physical utilisation scanning", [Startup.OnThread], (Xapi_sr.physical_utilisation_thread ~__context); - "Caching metadata VDIs created by foreign pools.", [ Startup.OnlyMaster; ], cache_metadata_vdis; - "Stats reporting thread", [], Xapi_stats.start; - ]; - - if !debug_dummy_data then ( - Startup.run ~__context [ "populating db with dummy data", [ Startup.OnlyMaster; Startup.NoExnRaising ], - (fun () -> Debug_populate.do_populate ~vms:1000 ~vdis_per_vm:3 ~networks:10 ~srs:10 ~tasks:1000) ] - ); - - let wait_management_interface () = - let management_if = Xapi_inventory.lookup Xapi_inventory._management_interface in - if management_if <> "" then ( - debug "Waiting forever for the management interface to gain an IP address"; - let ip = wait_for_management_ip_address ~__context in - debug "Management interface got IP address: %s; attempting to re-plug any unplugged PBDs" ip; - Helpers.call_api_functions ~__context (fun rpc session_id -> - Create_storage.plug_unplugged_pbds __context) - ) - in - - Startup.run ~__context [ - "fetching database backup", [ Startup.OnlySlave; Startup.NoExnRaising ], - (fun () -> Pool_db_backup.fetch_database_backup ~master_address:(Pool_role.get_master_address()) - ~pool_secret:!Xapi_globs.pool_secret ~force:None); - "wait management interface to come up", [ Startup.NoExnRaising ], wait_management_interface; - "considering sending a master transition alert", [ Startup.NoExnRaising; Startup.OnlyMaster ], + try + (* We can't tolerate an exception in db synchronization so fall back into emergency mode + if this happens and try again later.. *) + Master_connection.restart_on_connection_timeout := false; + Master_connection.connection_timeout := 10.; (* give up retrying after 10s *) + Db_cache_impl.initialise (); + Sm.register (); + Startup.run ~__context [ + "Starting SMAPIv1 proxies", [Startup.OnlySlave], Storage_access.start_smapiv1_servers; + ]; + Dbsync.setup () + with e -> + begin + debug "Failure in slave dbsync; slave will pause and then restart to try again. Entering emergency mode."; + server_run_in_emergency_mode() + end + end; + Master_connection.connection_timeout := !Xapi_globs.master_connection_retry_timeout; + Master_connection.restart_on_connection_timeout := true; + Master_connection.on_database_connection_established := (fun () -> on_master_restart ~__context); + end); + + Server_helpers.exec_with_new_task "server_init" ~task_in_database:true (fun __context -> + Startup.run ~__context [ + "Checking emergency network reset", [], check_network_reset; + "Upgrade bonds to Boston", [Startup.NoExnRaising], Sync_networking.fix_bonds ~__context; + "Reconfig (from DB) for incoming/outgoing stunnel instances", [], set_stunnel_legacy_db ~__context; + "Initialise monitor configuration", [], Monitor_master.update_configuration_from_master; + "Initialising licensing", [], handle_licensing; + "message_hook_thread", [ Startup.NoExnRaising ], (Xapi_message.start_message_hook_thread ~__context); + "heartbeat thread", [ Startup.NoExnRaising; Startup.OnThread ], Db_gc.start_heartbeat_thread; + "resynchronising HA state", [ Startup.NoExnRaising ], resynchronise_ha_state; + "pool db backup", [ Startup.OnlyMaster; Startup.OnThread ], Pool_db_backup.pool_db_backup_thread; + "monitor_dbcalls", [Startup.OnThread], Monitor_dbcalls.monitor_dbcall_thread; + "touching ready file", [], (fun () -> Helpers.touch_file !Xapi_globs.ready_file); + (* -- CRITICAL: this check must be performed before touching shared storage *) + "Performing no-other-masters check", [ Startup.OnlyMaster ], check_no_other_masters; + "Registering periodic functions", [], Xapi_periodic_scheduler_init.register; + "executing startup scripts", [ Startup.NoExnRaising], startup_script; + + "considering executing on-master-start script", [], + (fun () -> Xapi_pool_transition.run_external_scripts (Pool_role.is_master ())); + "creating networks", [ Startup.OnlyMaster ], Create_networks.create_networks_localhost; + (* CA-22417: bring up all non-bond slaves so that the SM backends can use storage NIC IP addresses (if the routing + table happens to be right) *) + "Best-effort bring up of physical NICs", [ Startup.NoExnRaising ], Xapi_pif.start_of_day_best_effort_bring_up; + "updating the vswitch controller", [], (fun () -> Helpers.update_vswitch_controller ~__context ~host:(Helpers.get_localhost ~__context)); + "initialising storage", [ Startup.NoExnRaising ], + (fun () -> Helpers.call_api_functions ~__context Create_storage.create_storage_localhost); + (* CA-13878: make sure PBD plugging has happened before attempting to reboot any VMs *) + "resynchronising VM state", [], (fun () -> Xapi_xenops.on_xapi_restart ~__context); + "listening to events from xapi", [], (fun () -> if not (!noevents) then ignore (Thread.create Xapi_xenops.events_from_xapi ())); + (* CA-175353: moving VIFs between networks requires VMs to be resynced *) + "Synchronising bonds on slave with master", [Startup.OnlySlave; Startup.NoExnRaising], Sync_networking.copy_bonds_from_master ~__context; + "Synchronising VLANs on slave with master", [Startup.OnlySlave; Startup.NoExnRaising], Sync_networking.copy_vlans_from_master ~__context; + "Synchronising tunnels on slave with master", [Startup.OnlySlave; Startup.NoExnRaising], Sync_networking.copy_tunnels_from_master ~__context; + + "SR scanning", [ Startup.OnlyMaster; Startup.OnThread ], Xapi_sr.scanning_thread; + "Updating pool cpu_info", [], (fun () -> Create_misc.create_pool_cpuinfo ~__context); + "writing init complete", [], (fun () -> Helpers.touch_file !Xapi_globs.init_complete); + (* "Synchronising HA state with Pool", [ Startup.NoExnRaising ], Xapi_ha.synchronise_ha_state_with_pool; *) + "Starting DR redo-logs", [ Startup.OnlyMaster; ], start_dr_redo_logs; + "Starting SR physical utilisation scanning", [Startup.OnThread], (Xapi_sr.physical_utilisation_thread ~__context); + "Caching metadata VDIs created by foreign pools.", [ Startup.OnlyMaster; ], cache_metadata_vdis; + "Stats reporting thread", [], Xapi_stats.start; + ]; + + if !debug_dummy_data then ( + Startup.run ~__context [ "populating db with dummy data", [ Startup.OnlyMaster; Startup.NoExnRaising ], + (fun () -> Debug_populate.do_populate ~vms:1000 ~vdis_per_vm:3 ~networks:10 ~srs:10 ~tasks:1000) ] + ); + + let wait_management_interface () = + let management_if = Xapi_inventory.lookup Xapi_inventory._management_interface in + if management_if <> "" then ( + debug "Waiting forever for the management interface to gain an IP address"; + let ip = wait_for_management_ip_address ~__context in + debug "Management interface got IP address: %s; attempting to re-plug any unplugged PBDs" ip; + Helpers.call_api_functions ~__context (fun rpc session_id -> + Create_storage.plug_unplugged_pbds __context) + ) + in + + Startup.run ~__context [ + "fetching database backup", [ Startup.OnlySlave; Startup.NoExnRaising ], + (fun () -> Pool_db_backup.fetch_database_backup ~master_address:(Pool_role.get_master_address()) + ~pool_secret:!Xapi_globs.pool_secret ~force:None); + "wait management interface to come up", [ Startup.NoExnRaising ], wait_management_interface; + "considering sending a master transition alert", [ Startup.NoExnRaising; Startup.OnlyMaster ], Xapi_pool_transition.consider_sending_alert __context; - "Cancelling in-progress storage migrations", [], (fun () -> Storage_migrate.killall ~dbg:"xapi init"); - (* Start the external authentification plugin *) - "Calling extauth_hook_script_before_xapi_initialize", [ Startup.NoExnRaising ], + "Cancelling in-progress storage migrations", [], (fun () -> Storage_migrate.killall ~dbg:"xapi init"); + (* Start the external authentification plugin *) + "Calling extauth_hook_script_before_xapi_initialize", [ Startup.NoExnRaising ], (fun () -> call_extauth_hook_script_before_xapi_initialize ~__context); - "Calling on_xapi_initialize event hook in the external authentication plugin", [ Startup.NoExnRaising; Startup.OnThread ], + "Calling on_xapi_initialize event hook in the external authentication plugin", [ Startup.NoExnRaising; Startup.OnThread ], (fun () -> event_hook_auth_on_xapi_initialize_async ~__context); - ]; + ]; - debug "startup: startup sequence finished"); + debug "startup: startup sequence finished"); wait_to_die() with | Sys.Break -> cleanup_handler 0 | (Unix.Unix_error (e,s1,s2)) as exn -> - Backtrace.is_important exn; - (debug "xapi top-level caught Unix_error: %s, %s, %s" (Unix.error_message e) s1 s2; raise exn) + Backtrace.is_important exn; + (debug "xapi top-level caught Unix_error: %s, %s, %s" (Unix.error_message e) s1 s2; raise exn) | exn -> - Backtrace.is_important exn; - debug "xapi top-level caught exception: %s" (ExnHelper.string_of_exn exn); raise exn + Backtrace.is_important exn; + debug "xapi top-level caught exception: %s" (ExnHelper.string_of_exn exn); raise exn (* Most likely cause of eintr in normal operation is a sigterm/sigint. In this case our handler will tell the db thread to exit after next flush (where flushes are schduled every 2s). Delay @@ -1018,20 +1018,20 @@ let delay_on_eintr f = f () with Unix.Unix_error(Unix.EINTR,_,_) -> - debug "received EINTR. waiting to enable db thread to flush"; - Thread.delay 60.; - exit(0) + debug "received EINTR. waiting to enable db thread to flush"; + Thread.delay 60.; + exit(0) | e -> Backtrace.is_important e; raise e let watchdog f = - if !Xapi_globs.nowatchdog then begin - try - ignore(Unix.sigprocmask Unix.SIG_UNBLOCK [Sys.sigint]); - delay_on_eintr f; - exit 127 - with e -> - Debug.log_backtrace e (Backtrace.get e); - exit 2 - end + if !Xapi_globs.nowatchdog then begin + try + ignore(Unix.sigprocmask Unix.SIG_UNBLOCK [Sys.sigint]); + delay_on_eintr f; + exit 127 + with e -> + Debug.log_backtrace e (Backtrace.get e); + exit 2 + end diff --git a/ocaml/xapi/xapi_alert.ml b/ocaml/xapi/xapi_alert.ml index e7a27f326ff..a7010c42823 100644 --- a/ocaml/xapi/xapi_alert.ml +++ b/ocaml/xapi/xapi_alert.ml @@ -18,20 +18,20 @@ open Client module Alert = struct type t = { name: string; - priority: int64; - cls: API.cls; - obj_uuid: string; - body: string } - let process (x: t) = - Server_helpers.exec_with_new_task "Sending an HA alert" ~task_in_database:false + priority: int64; + cls: API.cls; + obj_uuid: string; + body: string } + let process (x: t) = + Server_helpers.exec_with_new_task "Sending an HA alert" ~task_in_database:false (fun __context -> - Helpers.call_api_functions ~__context - (fun rpc session_id -> - try - let (_: 'a Ref.t) = Client.Message.create rpc session_id x.name x.priority x.cls x.obj_uuid x.body in () - with e -> - warn "Exception creating message: %s" (ExnHelper.string_of_exn e) - ) + Helpers.call_api_functions ~__context + (fun rpc session_id -> + try + let (_: 'a Ref.t) = Client.Message.create rpc session_id x.name x.priority x.cls x.obj_uuid x.body in () + with e -> + warn "Exception creating message: %s" (ExnHelper.string_of_exn e) + ) ) end @@ -42,20 +42,20 @@ let alert_queue_push = (Thread_queue.make ~name:"API messages" ~max_q_length:100 let add ~msg:(name, priority) ~cls ~obj_uuid ~body = let sent = if Pool_role.is_master () then begin - Server_helpers.exec_with_new_task "Sending an alert" ~task_in_database:false - (fun __context -> - let (_: 'a Ref.t) = Xapi_message.create ~__context ~name ~priority ~cls ~obj_uuid ~body in true - ) + Server_helpers.exec_with_new_task "Sending an alert" ~task_in_database:false + (fun __context -> + let (_: 'a Ref.t) = Xapi_message.create ~__context ~name ~priority ~cls ~obj_uuid ~body in true + ) end else alert_queue_push name { Alert.name = name; priority = priority; cls = cls; obj_uuid = obj_uuid; body = body } in if not sent then warn "Failed to send alert %s %s" name obj_uuid - - + + (** Repeated calls to this function call 'on_edge_fn' on every value transition *) -let edge_trigger on_edge_fn = +let edge_trigger on_edge_fn = let old_value = ref None in fun x -> - begin + begin match !old_value with | None -> () | Some ov -> if ov <> x then on_edge_fn ov x @@ -66,7 +66,7 @@ let edge_trigger on_edge_fn = let int_trigger = edge_trigger (fun _ _ -> add ~name:"foo" ~priority:1L ~cls:`Pool ~obj_uuid:"" ~body:"") let string_trigger = edge_trigger (fun _ _ -> add ~name:"foo" ~priority:1L ~cls:`Pool ~obj_uuid:"" ~body:"") -let _ = +let _ = int_trigger 0; int_trigger 1; string_trigger ""; diff --git a/ocaml/xapi/xapi_alert.mli b/ocaml/xapi/xapi_alert.mli index 9ef04100a63..f7fb762c8e2 100644 --- a/ocaml/xapi/xapi_alert.mli +++ b/ocaml/xapi/xapi_alert.mli @@ -13,7 +13,7 @@ *) (** Creates an alert/message and guarantees not to block. If called on the master it will use the - internal Xapi_message.create function. If called on a slave it will enqueue the alert and + internal Xapi_message.create function. If called on a slave it will enqueue the alert and leave it for processing by a background thread. *) val add : msg:(string * int64) -> cls:API.cls -> obj_uuid:string -> body:string -> unit diff --git a/ocaml/xapi/xapi_auth.ml b/ocaml/xapi/xapi_auth.ml index 63d57534cdd..2d2607fcfdd 100644 --- a/ocaml/xapi/xapi_auth.ml +++ b/ocaml/xapi/xapi_auth.ml @@ -13,35 +13,35 @@ *) (** * @group Access Control - *) - +*) + open Auth_signature open Extauth let call_with_exception_handler fn = - try fn () with - | Extauth.Extauth_is_disabled -> - raise (Api_errors.Server_error(Api_errors.auth_is_disabled, [])) - | Extauth.Unknown_extauth_type msg -> - raise (Api_errors.Server_error(Api_errors.auth_unknown_type, [msg])) - | Not_found - | Auth_signature.Subject_cannot_be_resolved -> - raise (Api_errors.Server_error(Api_errors.subject_cannot_be_resolved, [])) - | Auth_signature.Auth_service_error (errtag,msg) -> - raise (Api_errors.Server_error(Api_errors.auth_service_error, [msg])) - | e -> - raise (Api_errors.Server_error(Api_errors.auth_service_error, [ExnHelper.string_of_exn e])) + try fn () with + | Extauth.Extauth_is_disabled -> + raise (Api_errors.Server_error(Api_errors.auth_is_disabled, [])) + | Extauth.Unknown_extauth_type msg -> + raise (Api_errors.Server_error(Api_errors.auth_unknown_type, [msg])) + | Not_found + | Auth_signature.Subject_cannot_be_resolved -> + raise (Api_errors.Server_error(Api_errors.subject_cannot_be_resolved, [])) + | Auth_signature.Auth_service_error (errtag,msg) -> + raise (Api_errors.Server_error(Api_errors.auth_service_error, [msg])) + | e -> + raise (Api_errors.Server_error(Api_errors.auth_service_error, [ExnHelper.string_of_exn e])) (* PRECONDITION: All of these additional calls require a valid session to be presented.*) (* ==> the session validity is already checked in every server.ml call by using Session_check.check *) let get_subject_identifier ~__context ~subject_name = - call_with_exception_handler (fun () -> ((Ext_auth.d()).get_subject_identifier subject_name)) + call_with_exception_handler (fun () -> ((Ext_auth.d()).get_subject_identifier subject_name)) -let get_group_membership ~__context ~subject_identifier = - call_with_exception_handler (fun () -> ((Ext_auth.d()).query_group_membership subject_identifier)) +let get_group_membership ~__context ~subject_identifier = + call_with_exception_handler (fun () -> ((Ext_auth.d()).query_group_membership subject_identifier)) -let get_subject_information_from_identifier ~__context ~subject_identifier = - call_with_exception_handler (fun () -> ((Ext_auth.d()).query_subject_information subject_identifier)) +let get_subject_information_from_identifier ~__context ~subject_identifier = + call_with_exception_handler (fun () -> ((Ext_auth.d()).query_subject_information subject_identifier)) diff --git a/ocaml/xapi/xapi_blob.ml b/ocaml/xapi/xapi_blob.ml index 3805105ca76..bf4392b077a 100644 --- a/ocaml/xapi/xapi_blob.ml +++ b/ocaml/xapi/xapi_blob.ml @@ -27,9 +27,9 @@ let destroy ~__context ~self = (* This needs to be special-cased for all objects that contain blobs *) let vms = Db.VM.get_all_records ~__context in List.iter (fun (vm,vmr) -> - let blobs = vmr.API.vM_blobs in - List.iter (fun (r,b) -> if b=self then Db.VM.remove_from_blobs ~__context ~self:vm ~key:r) blobs) vms; - + let blobs = vmr.API.vM_blobs in + List.iter (fun (r,b) -> if b=self then Db.VM.remove_from_blobs ~__context ~self:vm ~key:r) blobs) vms; + let uuid = Db.Blob.get_uuid ~__context ~self in let path = Xapi_globs.xapi_blob_location ^ "/" ^ uuid in Stdext.Unixext.unlink_safe path; @@ -38,55 +38,55 @@ let destroy ~__context ~self = (* Send blobs to a remote host on a different pool. uuid_map is a map of remote blob uuids to local blob refs. *) let send_blobs ~__context ~remote_address ~session_id uuid_map = - let put_blob = function (new_ref, old_uuid) -> - try - let query = [ "session_id", Ref.string_of session_id - ; "ref", Ref.string_of new_ref ] in - let subtask_of = Context.string_of_task __context in - let path = Xapi_globs.xapi_blob_location ^ "/" ^ old_uuid in - let size = (Unix.LargeFile.stat path).Unix.LargeFile.st_size in - let request = Xapi_http.http_request ~query ~subtask_of ~length:size - Http.Put Constants.blob_uri in - - let open Xmlrpc_client in - let transport = SSL(SSL.make (), remote_address, - !Xapi_globs.https_port) in - with_transport transport - (with_http request (fun (response, put_fd) -> - let blob_fd = Unix.openfile path [Unix.O_RDONLY] 0o600 in - ignore (Stdext.Pervasiveext.finally - (fun () -> Stdext.Unixext.copy_file blob_fd put_fd) - (fun () -> Unix.close blob_fd)) )) - with e -> - debug "Ignoring exception in send_blobs: %s" (Printexc.to_string e); - () - in - List.iter put_blob uuid_map + let put_blob = function (new_ref, old_uuid) -> + try + let query = [ "session_id", Ref.string_of session_id + ; "ref", Ref.string_of new_ref ] in + let subtask_of = Context.string_of_task __context in + let path = Xapi_globs.xapi_blob_location ^ "/" ^ old_uuid in + let size = (Unix.LargeFile.stat path).Unix.LargeFile.st_size in + let request = Xapi_http.http_request ~query ~subtask_of ~length:size + Http.Put Constants.blob_uri in + + let open Xmlrpc_client in + let transport = SSL(SSL.make (), remote_address, + !Xapi_globs.https_port) in + with_transport transport + (with_http request (fun (response, put_fd) -> + let blob_fd = Unix.openfile path [Unix.O_RDONLY] 0o600 in + ignore (Stdext.Pervasiveext.finally + (fun () -> Stdext.Unixext.copy_file blob_fd put_fd) + (fun () -> Unix.close blob_fd)) )) + with e -> + debug "Ignoring exception in send_blobs: %s" (Printexc.to_string e); + () + in + List.iter put_blob uuid_map (* Send a VMs blobs to a remote host on another pool, and destroy the leftover blobs on this host. To be called from Xapi_vm_migrate.migrate. *) let migrate_push ~__context ~rpc ~remote_address ~session_id ~old_vm ~new_vm = - let vm_blobs = Db.VM.get_blobs ~__context ~self:old_vm in - (* Create new blob objects on remote host, and return a map - from new blob uuids to old blob refs *) - let uuid_map = List.map - (fun (name,self) -> - let mime_type = Db.Blob.get_mime_type ~__context ~self - and public = Db.Blob.get_public ~__context ~self - and old_uuid = Db.Blob.get_uuid ~__context ~self in - let new_ref = Client.Client.VM.create_new_blob - ~rpc ~session_id ~vm:new_vm ~name ~mime_type ~public in - let name = Db.Blob.get_name_label ~__context ~self in - Client.Client.Blob.set_name_label ~rpc ~session_id ~self:new_ref ~value:name; - (new_ref, old_uuid) ) - vm_blobs - in - send_blobs ~__context ~remote_address ~session_id uuid_map ; - (* Now destroy old blobs *) - List.iter (fun (_,self) -> - destroy ~__context ~self:(Db.Blob.get_by_uuid ~__context ~uuid:self)) - uuid_map + let vm_blobs = Db.VM.get_blobs ~__context ~self:old_vm in + (* Create new blob objects on remote host, and return a map + from new blob uuids to old blob refs *) + let uuid_map = List.map + (fun (name,self) -> + let mime_type = Db.Blob.get_mime_type ~__context ~self + and public = Db.Blob.get_public ~__context ~self + and old_uuid = Db.Blob.get_uuid ~__context ~self in + let new_ref = Client.Client.VM.create_new_blob + ~rpc ~session_id ~vm:new_vm ~name ~mime_type ~public in + let name = Db.Blob.get_name_label ~__context ~self in + Client.Client.Blob.set_name_label ~rpc ~session_id ~self:new_ref ~value:name; + (new_ref, old_uuid) ) + vm_blobs + in + send_blobs ~__context ~remote_address ~session_id uuid_map ; + (* Now destroy old blobs *) + List.iter (fun (_,self) -> + destroy ~__context ~self:(Db.Blob.get_by_uuid ~__context ~uuid:self)) + uuid_map exception Unknown_blob exception No_storage @@ -99,75 +99,75 @@ let handler (req: Http.Request.t) s _ = Http_svr.headers s headers; error "HTTP request for binary blob lacked 'ref' or 'uuid' parameter" end else - try - let self,public = - Server_helpers.exec_with_new_task "with_context" (fun __context -> - let self = try - Ref.of_string (List.assoc "ref" query) - with _ -> - try Db.Blob.get_by_uuid ~__context ~uuid:(List.assoc "uuid" query) with _ -> raise Unknown_blob - in - debug "blob handler: self=%s" (Ref.string_of self); - let public = - try - Db.Blob.get_public ~__context ~self - with e -> - debug "In exception handler: %s" (Printexc.to_string e); - false - in - debug "public=%b" public; - self,public) - in - let inner_fn __context = - let blob_uuid = - try Db.Blob.get_uuid ~__context ~self - with _ -> raise Unknown_blob - in - let blob_path = Xapi_globs.xapi_blob_location in - (try let (_: Unix.stats) = Unix.stat blob_path in () with _ -> raise No_storage); - let path = Xapi_globs.xapi_blob_location ^ "/" ^ blob_uuid in - - match req.Http.Request.m with - | Http.Get -> - begin - try - (* The following might raise an exception, in which case, 404 *) - let ifd = Unix.openfile path [Unix.O_RDONLY] 0o600 in - let size = (Unix.LargeFile.stat path).Unix.LargeFile.st_size in - Http_svr.headers s ((Http.http_200_ok_with_content - size ~version:"1.1" ~keep_alive:false ()) - @ [Http.Hdr.content_type ^": "^(Db.Blob.get_mime_type ~__context ~self)]); - ignore(Stdext.Pervasiveext.finally - (fun () -> Stdext.Unixext.copy_file ifd s) - (fun () -> Unix.close ifd)) - with _ -> - Http_svr.headers s (Http.http_404_missing ()) - end - | Http.Put -> - let ofd = Unix.openfile path [Unix.O_WRONLY; Unix.O_TRUNC; Unix.O_SYNC; Unix.O_CREAT] 0o600 in - let limit = match req.Http.Request.content_length with Some x -> x | None -> failwith "Need content length" in - let size = - Stdext.Pervasiveext.finally - (fun () -> - Http_svr.headers s (Http.http_200_ok () @ ["Access-Control-Allow-Origin: *"]); - Stdext.Unixext.copy_file ~limit s ofd) - (fun () -> - Unix.close ofd) - in - Db.Blob.set_size ~__context ~self ~value:size; - Db.Blob.set_last_updated ~__context ~self ~value:(Stdext.Date.of_float (Unix.gettimeofday ())) - | _ -> failwith "Unsupported method for BLOB" - - in - - if public && req.Http.Request.m = Http.Get - then Server_helpers.exec_with_new_task "get_blob" inner_fn - else Xapi_http.with_context ~dummy:true "Blob handler" req s inner_fn - with - | Unknown_blob -> - Http_svr.response_missing s "Unknown reference\n" - | No_storage -> - Http_svr.response_missing s "Local storage missing\n" + try + let self,public = + Server_helpers.exec_with_new_task "with_context" (fun __context -> + let self = try + Ref.of_string (List.assoc "ref" query) + with _ -> + try Db.Blob.get_by_uuid ~__context ~uuid:(List.assoc "uuid" query) with _ -> raise Unknown_blob + in + debug "blob handler: self=%s" (Ref.string_of self); + let public = + try + Db.Blob.get_public ~__context ~self + with e -> + debug "In exception handler: %s" (Printexc.to_string e); + false + in + debug "public=%b" public; + self,public) + in + let inner_fn __context = + let blob_uuid = + try Db.Blob.get_uuid ~__context ~self + with _ -> raise Unknown_blob + in + let blob_path = Xapi_globs.xapi_blob_location in + (try let (_: Unix.stats) = Unix.stat blob_path in () with _ -> raise No_storage); + let path = Xapi_globs.xapi_blob_location ^ "/" ^ blob_uuid in + + match req.Http.Request.m with + | Http.Get -> + begin + try + (* The following might raise an exception, in which case, 404 *) + let ifd = Unix.openfile path [Unix.O_RDONLY] 0o600 in + let size = (Unix.LargeFile.stat path).Unix.LargeFile.st_size in + Http_svr.headers s ((Http.http_200_ok_with_content + size ~version:"1.1" ~keep_alive:false ()) + @ [Http.Hdr.content_type ^": "^(Db.Blob.get_mime_type ~__context ~self)]); + ignore(Stdext.Pervasiveext.finally + (fun () -> Stdext.Unixext.copy_file ifd s) + (fun () -> Unix.close ifd)) + with _ -> + Http_svr.headers s (Http.http_404_missing ()) + end + | Http.Put -> + let ofd = Unix.openfile path [Unix.O_WRONLY; Unix.O_TRUNC; Unix.O_SYNC; Unix.O_CREAT] 0o600 in + let limit = match req.Http.Request.content_length with Some x -> x | None -> failwith "Need content length" in + let size = + Stdext.Pervasiveext.finally + (fun () -> + Http_svr.headers s (Http.http_200_ok () @ ["Access-Control-Allow-Origin: *"]); + Stdext.Unixext.copy_file ~limit s ofd) + (fun () -> + Unix.close ofd) + in + Db.Blob.set_size ~__context ~self ~value:size; + Db.Blob.set_last_updated ~__context ~self ~value:(Stdext.Date.of_float (Unix.gettimeofday ())) + | _ -> failwith "Unsupported method for BLOB" + + in + + if public && req.Http.Request.m = Http.Get + then Server_helpers.exec_with_new_task "get_blob" inner_fn + else Xapi_http.with_context ~dummy:true "Blob handler" req s inner_fn + with + | Unknown_blob -> + Http_svr.response_missing s "Unknown reference\n" + | No_storage -> + Http_svr.response_missing s "Local storage missing\n" diff --git a/ocaml/xapi/xapi_bond.ml b/ocaml/xapi/xapi_bond.ml index 6eb15ea6d08..caf8b657947 100644 --- a/ocaml/xapi/xapi_bond.ml +++ b/ocaml/xapi/xapi_bond.ml @@ -23,488 +23,488 @@ open Db_filter_types (* Returns the name of a new bond device, which is the string "bond" followed * by the smallest integer > 0 that does not yet appear in a bond name on this host. *) let choose_bond_device_name ~__context ~host = - (* list all the bond PIFs on this host *) - let pifs = Db.PIF.get_refs_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of host)), - Not (Eq (Field "bond_master_of", Literal "()")) - )) in - let devices = List.map (fun self -> Db.PIF.get_device ~__context ~self) pifs in - let rec choose n = - let name = Printf.sprintf "bond%d" n in - if List.mem name devices - then choose (n + 1) - else name in - choose 0 + (* list all the bond PIFs on this host *) + let pifs = Db.PIF.get_refs_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of host)), + Not (Eq (Field "bond_master_of", Literal "()")) + )) in + let devices = List.map (fun self -> Db.PIF.get_device ~__context ~self) pifs in + let rec choose n = + let name = Printf.sprintf "bond%d" n in + if List.mem name devices + then choose (n + 1) + else name in + choose 0 let move_configuration ~__context from_pif to_pif = - debug "Moving IP configuration from PIF %s to PIF %s" (Db.PIF.get_uuid ~__context ~self:from_pif) - (Db.PIF.get_uuid ~__context ~self:to_pif); - let mode = Db.PIF.get_ip_configuration_mode ~__context ~self:from_pif in - let ip = Db.PIF.get_IP ~__context ~self:from_pif in - let netmask = Db.PIF.get_netmask ~__context ~self:from_pif in - let gateway = Db.PIF.get_gateway ~__context ~self:from_pif in - let dns = Db.PIF.get_DNS ~__context ~self:from_pif in - Db.PIF.set_ip_configuration_mode ~__context ~self:to_pif ~value:mode; - Db.PIF.set_IP ~__context ~self:to_pif ~value:ip; - Db.PIF.set_netmask ~__context ~self:to_pif ~value:netmask; - Db.PIF.set_gateway ~__context ~self:to_pif ~value:gateway; - Db.PIF.set_DNS ~__context ~self:to_pif ~value:dns; - Db.PIF.set_ip_configuration_mode ~__context ~self:from_pif ~value:`None; - Db.PIF.set_IP ~__context ~self:from_pif ~value:""; - Db.PIF.set_netmask ~__context ~self:from_pif ~value:""; - Db.PIF.set_gateway ~__context ~self:from_pif ~value:""; - Db.PIF.set_DNS ~__context ~self:from_pif ~value:"" + debug "Moving IP configuration from PIF %s to PIF %s" (Db.PIF.get_uuid ~__context ~self:from_pif) + (Db.PIF.get_uuid ~__context ~self:to_pif); + let mode = Db.PIF.get_ip_configuration_mode ~__context ~self:from_pif in + let ip = Db.PIF.get_IP ~__context ~self:from_pif in + let netmask = Db.PIF.get_netmask ~__context ~self:from_pif in + let gateway = Db.PIF.get_gateway ~__context ~self:from_pif in + let dns = Db.PIF.get_DNS ~__context ~self:from_pif in + Db.PIF.set_ip_configuration_mode ~__context ~self:to_pif ~value:mode; + Db.PIF.set_IP ~__context ~self:to_pif ~value:ip; + Db.PIF.set_netmask ~__context ~self:to_pif ~value:netmask; + Db.PIF.set_gateway ~__context ~self:to_pif ~value:gateway; + Db.PIF.set_DNS ~__context ~self:to_pif ~value:dns; + Db.PIF.set_ip_configuration_mode ~__context ~self:from_pif ~value:`None; + Db.PIF.set_IP ~__context ~self:from_pif ~value:""; + Db.PIF.set_netmask ~__context ~self:from_pif ~value:""; + Db.PIF.set_gateway ~__context ~self:from_pif ~value:""; + Db.PIF.set_DNS ~__context ~self:from_pif ~value:"" (* Determine local VIFs: candidates for moving to the bond. * Local VIFs are those VIFs on the given networks that belong to VMs that * are either running on the current host, or can only start on the current host or nowhere. *) let get_local_vifs ~__context host networks = - (* Construct (VM -> VIFs) map for all VIFs on the given networks *) - let vms_with_vifs = Hashtbl.create 10 in - let all_vifs = List.concat (List.map (fun net -> Db.Network.get_VIFs ~__context ~self:net) networks) in - let add_vif vif = - let vm = Db.VIF.get_VM ~__context ~self:vif in - Hashtbl.add vms_with_vifs vm vif - in - List.iter add_vif all_vifs; - - (* This function is potentially expensive, so do not call it more often than necessary. *) - let is_local vm = - (* Only move the VIFs of a VM if this VM is resident, or can _only_ start on _this_ host or nowhere. *) - (* Do the latter check only if needed, as it is expensive. *) - let resident_on = Db.VM.get_resident_on ~__context ~self:vm in - if resident_on = host then - true - else if resident_on <> Ref.null then - false - else begin - let hosts = Xapi_vm.get_possible_hosts ~__context ~vm in - (List.mem host hosts && List.length hosts = 1) || (List.length hosts = 0) - end - in - - (* Make a list of the VIFs for local VMs *) - let vms = Hashtblext.fold_keys vms_with_vifs in - let local_vifs = List.concat (List.map (fun vm -> - if is_local vm then Hashtbl.find_all vms_with_vifs vm else [] - ) vms) in - debug "Found these local VIFs: %s" (String.concat ", " (List.map (fun v -> Db.VIF.get_uuid ~__context ~self:v) local_vifs)); - local_vifs + (* Construct (VM -> VIFs) map for all VIFs on the given networks *) + let vms_with_vifs = Hashtbl.create 10 in + let all_vifs = List.concat (List.map (fun net -> Db.Network.get_VIFs ~__context ~self:net) networks) in + let add_vif vif = + let vm = Db.VIF.get_VM ~__context ~self:vif in + Hashtbl.add vms_with_vifs vm vif + in + List.iter add_vif all_vifs; + + (* This function is potentially expensive, so do not call it more often than necessary. *) + let is_local vm = + (* Only move the VIFs of a VM if this VM is resident, or can _only_ start on _this_ host or nowhere. *) + (* Do the latter check only if needed, as it is expensive. *) + let resident_on = Db.VM.get_resident_on ~__context ~self:vm in + if resident_on = host then + true + else if resident_on <> Ref.null then + false + else begin + let hosts = Xapi_vm.get_possible_hosts ~__context ~vm in + (List.mem host hosts && List.length hosts = 1) || (List.length hosts = 0) + end + in + + (* Make a list of the VIFs for local VMs *) + let vms = Hashtblext.fold_keys vms_with_vifs in + let local_vifs = List.concat (List.map (fun vm -> + if is_local vm then Hashtbl.find_all vms_with_vifs vm else [] + ) vms) in + debug "Found these local VIFs: %s" (String.concat ", " (List.map (fun v -> Db.VIF.get_uuid ~__context ~self:v) local_vifs)); + local_vifs let move_vlan ~__context host new_slave old_vlan = - let old_master = Db.VLAN.get_untagged_PIF ~__context ~self:old_vlan in - let tag = Db.VLAN.get_tag ~__context ~self:old_vlan in - let network = Db.PIF.get_network ~__context ~self:old_master in - let plugged = Db.PIF.get_currently_attached ~__context ~self:old_master in - - if plugged then begin - debug "Unplugging old VLAN"; - Nm.bring_pif_down ~__context old_master - end; - - (* Only create new objects if the tag does not yet exist *) - let new_vlan, new_master = - let existing_vlans = Db.PIF.get_VLAN_slave_of ~__context ~self:new_slave in - let same_tag = List.filter (fun v -> Db.VLAN.get_tag ~__context ~self:v = tag) existing_vlans in - match same_tag with - | new_vlan :: _ -> - (* VLAN with this tag already on bond *) - debug "VLAN already present"; - let new_master = Db.VLAN.get_untagged_PIF ~__context ~self:new_vlan in - let new_network = Db.PIF.get_network ~__context ~self:new_master in - (* Move VIFs to other VLAN's network *) - let vifs = get_local_vifs ~__context host [network] in - ignore (List.map (Xapi_vif.move_internal ~__context ~network:new_network) vifs); - new_vlan, new_master - | [] -> - (* VLAN with this tag not yet on bond *) - debug "Creating new VLAN %d on bond" (Int64.to_int tag); - (* Keep the device name *) - let device = Db.PIF.get_device ~__context ~self:new_slave in - (* Create new VLAN master PIF and VLAN objects *) - Xapi_vlan.create_internal ~__context ~host ~tagged_PIF:new_slave ~tag ~network ~device - in - - (* Destroy old VLAN and VLAN-master objects *) - debug "Destroying old VLAN %d" (Int64.to_int tag); - Db.VLAN.destroy ~__context ~self:old_vlan; - Db.PIF.destroy ~__context ~self:old_master; - - (* Plug again if plugged before the move *) - if plugged then begin - debug "Plugging new VLAN"; - Nm.bring_pif_up ~__context new_master; - - (* Call Xapi_vif.move_internal on VIFs of running VMs to make sure they end up on the right vSwitch *) - let vifs = Db.Network.get_VIFs ~__context ~self:network in - let vifs = List.filter (fun vif -> - Db.VM.get_resident_on ~__context ~self:(Db.VIF.get_VM ~__context ~self:vif) = host) - vifs in - ignore (List.map (Xapi_vif.move_internal ~__context ~network:network) vifs); - end + let old_master = Db.VLAN.get_untagged_PIF ~__context ~self:old_vlan in + let tag = Db.VLAN.get_tag ~__context ~self:old_vlan in + let network = Db.PIF.get_network ~__context ~self:old_master in + let plugged = Db.PIF.get_currently_attached ~__context ~self:old_master in + + if plugged then begin + debug "Unplugging old VLAN"; + Nm.bring_pif_down ~__context old_master + end; + + (* Only create new objects if the tag does not yet exist *) + let new_vlan, new_master = + let existing_vlans = Db.PIF.get_VLAN_slave_of ~__context ~self:new_slave in + let same_tag = List.filter (fun v -> Db.VLAN.get_tag ~__context ~self:v = tag) existing_vlans in + match same_tag with + | new_vlan :: _ -> + (* VLAN with this tag already on bond *) + debug "VLAN already present"; + let new_master = Db.VLAN.get_untagged_PIF ~__context ~self:new_vlan in + let new_network = Db.PIF.get_network ~__context ~self:new_master in + (* Move VIFs to other VLAN's network *) + let vifs = get_local_vifs ~__context host [network] in + ignore (List.map (Xapi_vif.move_internal ~__context ~network:new_network) vifs); + new_vlan, new_master + | [] -> + (* VLAN with this tag not yet on bond *) + debug "Creating new VLAN %d on bond" (Int64.to_int tag); + (* Keep the device name *) + let device = Db.PIF.get_device ~__context ~self:new_slave in + (* Create new VLAN master PIF and VLAN objects *) + Xapi_vlan.create_internal ~__context ~host ~tagged_PIF:new_slave ~tag ~network ~device + in + + (* Destroy old VLAN and VLAN-master objects *) + debug "Destroying old VLAN %d" (Int64.to_int tag); + Db.VLAN.destroy ~__context ~self:old_vlan; + Db.PIF.destroy ~__context ~self:old_master; + + (* Plug again if plugged before the move *) + if plugged then begin + debug "Plugging new VLAN"; + Nm.bring_pif_up ~__context new_master; + + (* Call Xapi_vif.move_internal on VIFs of running VMs to make sure they end up on the right vSwitch *) + let vifs = Db.Network.get_VIFs ~__context ~self:network in + let vifs = List.filter (fun vif -> + Db.VM.get_resident_on ~__context ~self:(Db.VIF.get_VM ~__context ~self:vif) = host) + vifs in + ignore (List.map (Xapi_vif.move_internal ~__context ~network:network) vifs); + end let move_tunnel ~__context host new_transport_PIF old_tunnel = - let old_access_PIF = Db.Tunnel.get_access_PIF ~__context ~self:old_tunnel in - let network = Db.PIF.get_network ~__context ~self:old_access_PIF in - let plugged = Db.PIF.get_currently_attached ~__context ~self:old_access_PIF in - - (* Create new tunnel object and access PIF *) - let new_tunnel, new_access_PIF = - Xapi_tunnel.create_internal ~__context ~transport_PIF:new_transport_PIF ~network ~host in - debug "Created new tunnel %s on bond" (Ref.string_of new_tunnel); - - (* Destroy old VLAN and VLAN-master objects *) - debug "Destroying old tunnel %s" (Ref.string_of old_tunnel); - Db.Tunnel.destroy ~__context ~self:old_tunnel; - Db.PIF.destroy ~__context ~self:old_access_PIF; - - (* Plug again if plugged before the move *) - if plugged then begin - debug "Plugging moved tunnel"; - Nm.bring_pif_up ~__context new_access_PIF; - - (* Call Xapi_vif.move_internal to make sure vifs end up on the right vSwitch *) - let vifs = Db.Network.get_VIFs ~__context ~self:network in - let vifs = List.filter (fun vif -> - Db.VM.get_resident_on ~__context ~self:(Db.VIF.get_VM ~__context ~self:vif) = host) - vifs in - ignore (List.map (Xapi_vif.move_internal ~__context ~network:network) vifs); - end + let old_access_PIF = Db.Tunnel.get_access_PIF ~__context ~self:old_tunnel in + let network = Db.PIF.get_network ~__context ~self:old_access_PIF in + let plugged = Db.PIF.get_currently_attached ~__context ~self:old_access_PIF in + + (* Create new tunnel object and access PIF *) + let new_tunnel, new_access_PIF = + Xapi_tunnel.create_internal ~__context ~transport_PIF:new_transport_PIF ~network ~host in + debug "Created new tunnel %s on bond" (Ref.string_of new_tunnel); + + (* Destroy old VLAN and VLAN-master objects *) + debug "Destroying old tunnel %s" (Ref.string_of old_tunnel); + Db.Tunnel.destroy ~__context ~self:old_tunnel; + Db.PIF.destroy ~__context ~self:old_access_PIF; + + (* Plug again if plugged before the move *) + if plugged then begin + debug "Plugging moved tunnel"; + Nm.bring_pif_up ~__context new_access_PIF; + + (* Call Xapi_vif.move_internal to make sure vifs end up on the right vSwitch *) + let vifs = Db.Network.get_VIFs ~__context ~self:network in + let vifs = List.filter (fun vif -> + Db.VM.get_resident_on ~__context ~self:(Db.VIF.get_VM ~__context ~self:vif) = host) + vifs in + ignore (List.map (Xapi_vif.move_internal ~__context ~network:network) vifs); + end let move_management ~__context from_pif to_pif = - Nm.bring_pif_up ~__context ~management_interface:true to_pif; - let network = Db.PIF.get_network ~__context ~self:to_pif in - let bridge = Db.Network.get_bridge ~__context ~self:network in - let primary_address_type = Db.PIF.get_primary_address_type ~__context ~self:to_pif in - Xapi_host.change_management_interface ~__context bridge primary_address_type; - Xapi_pif.update_management_flags ~__context ~host:(Helpers.get_localhost ~__context) + Nm.bring_pif_up ~__context ~management_interface:true to_pif; + let network = Db.PIF.get_network ~__context ~self:to_pif in + let bridge = Db.Network.get_bridge ~__context ~self:network in + let primary_address_type = Db.PIF.get_primary_address_type ~__context ~self:to_pif in + Xapi_host.change_management_interface ~__context bridge primary_address_type; + Xapi_pif.update_management_flags ~__context ~host:(Helpers.get_localhost ~__context) let fix_bond ~__context ~bond = - let bond_rec = Db.Bond.get_record ~__context ~self:bond in - let members = bond_rec.API.bond_slaves in - let master = bond_rec.API.bond_master in - let network = Db.PIF.get_network ~__context ~self:master in - let host = Db.PIF.get_host ~__context ~self:master in - - let member_networks = List.map (fun pif -> Db.PIF.get_network ~__context ~self:pif) members in - - let local_vifs = get_local_vifs ~__context host member_networks in - let local_vlans = List.concat (List.map (fun pif -> Db.PIF.get_VLAN_slave_of ~__context ~self:pif) members) in - let local_tunnels = List.concat (List.map (fun pif -> Db.PIF.get_tunnel_transport_PIF_of ~__context ~self:pif) members) in - - (* Move VLANs from members to master *) - debug "Checking VLANs to move from slaves to master"; - List.iter (move_vlan ~__context host master) local_vlans; - - (* Move tunnels from members to master *) - debug "Checking tunnels to move from slaves to master"; - List.iter (move_tunnel ~__context host master) local_tunnels; - - (* Move VIFs from members to master *) - debug "Checking VIFs to move from slaves to master"; - List.iter (Xapi_vif.move_internal ~__context ~network) local_vifs; - - begin match List.filter (fun p -> Db.PIF.get_management ~__context ~self:p) members with - | management_pif :: _ -> - (* The bond contains the management interface: move management to the master. - * This interface will be plugged automatically. *) - debug "Moving management from slave to master"; - move_management ~__context management_pif master; - (* Set the primary slave to the former management PIF. *) - Db.Bond.set_primary_slave ~__context ~self:bond ~value:management_pif; - | [] -> - (* Set the primary slave, if not set (just pick the first slave) *) - if Db.Bond.get_primary_slave ~__context ~self:bond = Ref.null then - Db.Bond.set_primary_slave ~__context ~self:bond ~value:(List.hd members); - end + let bond_rec = Db.Bond.get_record ~__context ~self:bond in + let members = bond_rec.API.bond_slaves in + let master = bond_rec.API.bond_master in + let network = Db.PIF.get_network ~__context ~self:master in + let host = Db.PIF.get_host ~__context ~self:master in + + let member_networks = List.map (fun pif -> Db.PIF.get_network ~__context ~self:pif) members in + + let local_vifs = get_local_vifs ~__context host member_networks in + let local_vlans = List.concat (List.map (fun pif -> Db.PIF.get_VLAN_slave_of ~__context ~self:pif) members) in + let local_tunnels = List.concat (List.map (fun pif -> Db.PIF.get_tunnel_transport_PIF_of ~__context ~self:pif) members) in + + (* Move VLANs from members to master *) + debug "Checking VLANs to move from slaves to master"; + List.iter (move_vlan ~__context host master) local_vlans; + + (* Move tunnels from members to master *) + debug "Checking tunnels to move from slaves to master"; + List.iter (move_tunnel ~__context host master) local_tunnels; + + (* Move VIFs from members to master *) + debug "Checking VIFs to move from slaves to master"; + List.iter (Xapi_vif.move_internal ~__context ~network) local_vifs; + + begin match List.filter (fun p -> Db.PIF.get_management ~__context ~self:p) members with + | management_pif :: _ -> + (* The bond contains the management interface: move management to the master. + * This interface will be plugged automatically. *) + debug "Moving management from slave to master"; + move_management ~__context management_pif master; + (* Set the primary slave to the former management PIF. *) + Db.Bond.set_primary_slave ~__context ~self:bond ~value:management_pif; + | [] -> + (* Set the primary slave, if not set (just pick the first slave) *) + if Db.Bond.get_primary_slave ~__context ~self:bond = Ref.null then + Db.Bond.set_primary_slave ~__context ~self:bond ~value:(List.hd members); + end (* Protect a bunch of local operations with a mutex *) let local_m = Mutex.create () let with_local_lock f = Mutex.execute local_m f let requirements_of_mode = function - | `lacp -> [ - Map_check.({ - key = "hashing_algorithm"; - default_value = Some "tcpudp_ports"; - is_valid_value = (fun str -> List.mem str ["src_mac"; "tcpudp_ports"]); - }); - Map_check.({ - key = "lacp-time"; - default_value = Some "slow"; - is_valid_value = (fun str -> List.mem str ["fast"; "slow"]); - }); - Map_check.({ - key = "lacp-aggregation-key"; - default_value = None; - is_valid_value = (fun i -> try ignore (int_of_string i); true with _ -> false); - }); - Map_check.({ - key = "lacp-fallback-ab"; - default_value = Some "true"; - is_valid_value = (fun str -> List.mem str ["true"; "false"]); - }); - ] - | _ -> [] + | `lacp -> [ + Map_check.({ + key = "hashing_algorithm"; + default_value = Some "tcpudp_ports"; + is_valid_value = (fun str -> List.mem str ["src_mac"; "tcpudp_ports"]); + }); + Map_check.({ + key = "lacp-time"; + default_value = Some "slow"; + is_valid_value = (fun str -> List.mem str ["fast"; "slow"]); + }); + Map_check.({ + key = "lacp-aggregation-key"; + default_value = None; + is_valid_value = (fun i -> try ignore (int_of_string i); true with _ -> false); + }); + Map_check.({ + key = "lacp-fallback-ab"; + default_value = Some "true"; + is_valid_value = (fun str -> List.mem str ["true"; "false"]); + }); + ] + | _ -> [] let create ~__context ~network ~members ~mAC ~mode ~properties = - let host = Db.PIF.get_host ~__context ~self:(List.hd members) in - Xapi_pif.assert_no_other_local_pifs ~__context ~host ~network; - - (* Validate MAC parameter; note an empty string is OK here, since that means 'inherit MAC from - * primary slave PIF' (see below) *) - if mAC <> "" && (not (Helpers.is_valid_MAC mAC)) then - raise (Api_errors.Server_error (Api_errors.mac_invalid, [mAC])); - - let requirements = requirements_of_mode mode in - (* Check that each of the supplied properties is valid. *) - List.iter - (fun property -> Map_check.validate_kvpair "properties" requirements property) - properties; - (* Add default properties if necessary. *) - let properties = Map_check.add_defaults requirements properties in - - (* Prevent someone supplying the same PIF multiple times and bypassing the - * number of bond members check *) - let members = List.setify members in - let master = Ref.make () in - let bond = Ref.make () in - - with_local_lock (fun () -> - (* Collect information *) - let member_networks = List.map (fun pif -> Db.PIF.get_network ~__context ~self:pif) members in - - let local_vifs = get_local_vifs ~__context host member_networks in - let local_vlans = List.concat (List.map (fun pif -> Db.PIF.get_VLAN_slave_of ~__context ~self:pif) members) in - let local_tunnels = List.concat (List.map (fun pif -> Db.PIF.get_tunnel_transport_PIF_of ~__context ~self:pif) members) in - - let management_pif = - match List.filter (fun p -> Db.PIF.get_management ~__context ~self:p) members with - | management_pif :: _ -> Some management_pif - | [] -> None - in - - let pifs_with_ip_conf = - List.filter (fun self -> - Db.PIF.get_ip_configuration_mode ~__context ~self <> `None - ) members in - - (* The primary slave is the management PIF, or the first member with - * IP configuration, or otherwise simply the first member in the list. *) - let primary_slave = - match management_pif, pifs_with_ip_conf, members with - | Some management_pif, _, _ -> management_pif - | None, pif_with_ip::_, _ -> pif_with_ip - | None, [], pif::_ -> pif - | None, [], [] -> - raise (Api_errors.Server_error(Api_errors.pif_bond_needs_more_members, [])) - in - let mAC = - if mAC <> "" then - mAC - else - Db.PIF.get_MAC ~__context ~self:primary_slave - in - let disallow_unplug = - List.fold_left (fun a m -> Db.PIF.get_disallow_unplug ~__context ~self:m || a) false members - in - - (* Validate constraints: *) - (* 1. Members must not be in a bond already *) - (* 2. Members must not have a VLAN tag set *) - (* 3. Members must not be tunnel access PIFs *) - (* 4. Referenced PIFs must be on the same host *) - (* 5. Members must not be the management interface if HA is enabled *) - (* 6. Members must be PIFs that are managed by xapi *) - (* 7. Members must have the same PIF properties *) - (* 8. Only the primary PIF should have a non-None IP configuration *) - List.iter (fun self -> - let bond = Db.PIF.get_bond_slave_of ~__context ~self in - let bonded = try ignore(Db.Bond.get_uuid ~__context ~self:bond); true with _ -> false in - if bonded - then raise (Api_errors.Server_error (Api_errors.pif_already_bonded, [ Ref.string_of self ])); - if Db.PIF.get_VLAN ~__context ~self <> -1L - then raise (Api_errors.Server_error (Api_errors.pif_vlan_exists, [ Db.PIF.get_device_name ~__context ~self] )); - if Db.PIF.get_tunnel_access_PIF_of ~__context ~self <> [] - then raise (Api_errors.Server_error (Api_errors.is_tunnel_access_pif, [Ref.string_of self])); - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool && Db.PIF.get_management ~__context ~self - then raise (Api_errors.Server_error(Api_errors.ha_cannot_change_bond_status_of_mgmt_iface, [])); - if Db.PIF.get_managed ~__context ~self <> true - then raise (Api_errors.Server_error (Api_errors.pif_unmanaged, [Ref.string_of self])); - ) members; - let hosts = List.map (fun self -> Db.PIF.get_host ~__context ~self) members in - if List.length (List.setify hosts) <> 1 - then raise (Api_errors.Server_error (Api_errors.pif_cannot_bond_cross_host, [])); - let pif_properties = - if members = [] then - [] - else - let ps = List.map (fun self -> Db.PIF.get_properties ~__context ~self) members in - let p = List.hd ps in - let equal = List.fold_left (fun result p' -> result && (p = p')) true (List.tl ps) in - if not equal then - raise (Api_errors.Server_error (Api_errors.incompatible_pif_properties, [])) - else - p - in - if List.length pifs_with_ip_conf > 1 - then raise Api_errors.(Server_error (pif_bond_more_than_one_ip, [])); - - (* Create master PIF and Bond objects *) - let device = choose_bond_device_name ~__context ~host in - let device_name = device in - let metrics = Xapi_pif.make_pif_metrics ~__context in - Db.PIF.create ~__context ~ref:master ~uuid:(Uuid.to_string (Uuid.make_uuid ())) - ~device ~device_name ~network ~host ~mAC ~mTU:(-1L) ~vLAN:(-1L) ~metrics - ~physical:false ~currently_attached:false - ~ip_configuration_mode:`None ~iP:"" ~netmask:"" ~gateway:"" ~dNS:"" ~bond_slave_of:Ref.null - ~vLAN_master_of:Ref.null ~management:false ~other_config:[] ~disallow_unplug:false - ~ipv6_configuration_mode:`None ~iPv6:[""] ~ipv6_gateway:"" ~primary_address_type:`IPv4 ~managed:true - ~properties:pif_properties ~capabilities:[]; - Db.Bond.create ~__context ~ref:bond ~uuid:(Uuid.to_string (Uuid.make_uuid ())) ~master:master ~other_config:[] - ~primary_slave ~mode ~properties ~links_up:0L; - - (* Set the PIF.bond_slave_of fields of the members. - * The value of the Bond.slaves field is dynamically computed on request. *) - List.iter (fun slave -> Db.PIF.set_bond_slave_of ~__context ~self:slave ~value:bond) members; - - (* Copy the IP configuration of the primary member to the master *) - move_configuration ~__context primary_slave master; - - begin match management_pif with - | Some management_pif -> - (* The bond contains the management interface: move management to the master. - * This interface will be plugged automatically. *) - debug "Moving management from slave to master"; - move_management ~__context management_pif master - | None -> - debug "Plugging the bond"; - Nm.bring_pif_up ~__context master - end; - TaskHelper.set_progress ~__context 0.2; - - (* Move VLANs from members to master *) - debug "Check VLANs to move from slaves to master"; - List.iter (move_vlan ~__context host master) local_vlans; - TaskHelper.set_progress ~__context 0.4; - - (* Move tunnels from members to master *) - debug "Check tunnels to move from slaves to master"; - List.iter (move_tunnel ~__context host master) local_tunnels; - TaskHelper.set_progress ~__context 0.6; - - (* Move VIFs from members to master *) - debug "Check VIFs to move from slaves to master"; - List.iter (Xapi_vif.move_internal ~__context ~network) local_vifs; - TaskHelper.set_progress ~__context 0.8; - - (* Set disallow_unplug on the master, if one of the slaves had disallow_unplug = true (see above), - * and reset disallow_unplug of members. *) - if disallow_unplug then begin - debug "Setting disallow_unplug on master, and clearing slaves"; - Db.PIF.set_disallow_unplug ~__context ~self:master ~value:true; - List.iter (fun pif -> - Db.PIF.set_disallow_unplug ~__context ~self:pif ~value:false) - members - end; - TaskHelper.set_progress ~__context 1.0; - ); - (* return a ref to the new Bond object *) - bond + let host = Db.PIF.get_host ~__context ~self:(List.hd members) in + Xapi_pif.assert_no_other_local_pifs ~__context ~host ~network; + + (* Validate MAC parameter; note an empty string is OK here, since that means 'inherit MAC from + * primary slave PIF' (see below) *) + if mAC <> "" && (not (Helpers.is_valid_MAC mAC)) then + raise (Api_errors.Server_error (Api_errors.mac_invalid, [mAC])); + + let requirements = requirements_of_mode mode in + (* Check that each of the supplied properties is valid. *) + List.iter + (fun property -> Map_check.validate_kvpair "properties" requirements property) + properties; + (* Add default properties if necessary. *) + let properties = Map_check.add_defaults requirements properties in + + (* Prevent someone supplying the same PIF multiple times and bypassing the + * number of bond members check *) + let members = List.setify members in + let master = Ref.make () in + let bond = Ref.make () in + + with_local_lock (fun () -> + (* Collect information *) + let member_networks = List.map (fun pif -> Db.PIF.get_network ~__context ~self:pif) members in + + let local_vifs = get_local_vifs ~__context host member_networks in + let local_vlans = List.concat (List.map (fun pif -> Db.PIF.get_VLAN_slave_of ~__context ~self:pif) members) in + let local_tunnels = List.concat (List.map (fun pif -> Db.PIF.get_tunnel_transport_PIF_of ~__context ~self:pif) members) in + + let management_pif = + match List.filter (fun p -> Db.PIF.get_management ~__context ~self:p) members with + | management_pif :: _ -> Some management_pif + | [] -> None + in + + let pifs_with_ip_conf = + List.filter (fun self -> + Db.PIF.get_ip_configuration_mode ~__context ~self <> `None + ) members in + + (* The primary slave is the management PIF, or the first member with + * IP configuration, or otherwise simply the first member in the list. *) + let primary_slave = + match management_pif, pifs_with_ip_conf, members with + | Some management_pif, _, _ -> management_pif + | None, pif_with_ip::_, _ -> pif_with_ip + | None, [], pif::_ -> pif + | None, [], [] -> + raise (Api_errors.Server_error(Api_errors.pif_bond_needs_more_members, [])) + in + let mAC = + if mAC <> "" then + mAC + else + Db.PIF.get_MAC ~__context ~self:primary_slave + in + let disallow_unplug = + List.fold_left (fun a m -> Db.PIF.get_disallow_unplug ~__context ~self:m || a) false members + in + + (* Validate constraints: *) + (* 1. Members must not be in a bond already *) + (* 2. Members must not have a VLAN tag set *) + (* 3. Members must not be tunnel access PIFs *) + (* 4. Referenced PIFs must be on the same host *) + (* 5. Members must not be the management interface if HA is enabled *) + (* 6. Members must be PIFs that are managed by xapi *) + (* 7. Members must have the same PIF properties *) + (* 8. Only the primary PIF should have a non-None IP configuration *) + List.iter (fun self -> + let bond = Db.PIF.get_bond_slave_of ~__context ~self in + let bonded = try ignore(Db.Bond.get_uuid ~__context ~self:bond); true with _ -> false in + if bonded + then raise (Api_errors.Server_error (Api_errors.pif_already_bonded, [ Ref.string_of self ])); + if Db.PIF.get_VLAN ~__context ~self <> -1L + then raise (Api_errors.Server_error (Api_errors.pif_vlan_exists, [ Db.PIF.get_device_name ~__context ~self] )); + if Db.PIF.get_tunnel_access_PIF_of ~__context ~self <> [] + then raise (Api_errors.Server_error (Api_errors.is_tunnel_access_pif, [Ref.string_of self])); + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool && Db.PIF.get_management ~__context ~self + then raise (Api_errors.Server_error(Api_errors.ha_cannot_change_bond_status_of_mgmt_iface, [])); + if Db.PIF.get_managed ~__context ~self <> true + then raise (Api_errors.Server_error (Api_errors.pif_unmanaged, [Ref.string_of self])); + ) members; + let hosts = List.map (fun self -> Db.PIF.get_host ~__context ~self) members in + if List.length (List.setify hosts) <> 1 + then raise (Api_errors.Server_error (Api_errors.pif_cannot_bond_cross_host, [])); + let pif_properties = + if members = [] then + [] + else + let ps = List.map (fun self -> Db.PIF.get_properties ~__context ~self) members in + let p = List.hd ps in + let equal = List.fold_left (fun result p' -> result && (p = p')) true (List.tl ps) in + if not equal then + raise (Api_errors.Server_error (Api_errors.incompatible_pif_properties, [])) + else + p + in + if List.length pifs_with_ip_conf > 1 + then raise Api_errors.(Server_error (pif_bond_more_than_one_ip, [])); + + (* Create master PIF and Bond objects *) + let device = choose_bond_device_name ~__context ~host in + let device_name = device in + let metrics = Xapi_pif.make_pif_metrics ~__context in + Db.PIF.create ~__context ~ref:master ~uuid:(Uuid.to_string (Uuid.make_uuid ())) + ~device ~device_name ~network ~host ~mAC ~mTU:(-1L) ~vLAN:(-1L) ~metrics + ~physical:false ~currently_attached:false + ~ip_configuration_mode:`None ~iP:"" ~netmask:"" ~gateway:"" ~dNS:"" ~bond_slave_of:Ref.null + ~vLAN_master_of:Ref.null ~management:false ~other_config:[] ~disallow_unplug:false + ~ipv6_configuration_mode:`None ~iPv6:[""] ~ipv6_gateway:"" ~primary_address_type:`IPv4 ~managed:true + ~properties:pif_properties ~capabilities:[]; + Db.Bond.create ~__context ~ref:bond ~uuid:(Uuid.to_string (Uuid.make_uuid ())) ~master:master ~other_config:[] + ~primary_slave ~mode ~properties ~links_up:0L; + + (* Set the PIF.bond_slave_of fields of the members. + * The value of the Bond.slaves field is dynamically computed on request. *) + List.iter (fun slave -> Db.PIF.set_bond_slave_of ~__context ~self:slave ~value:bond) members; + + (* Copy the IP configuration of the primary member to the master *) + move_configuration ~__context primary_slave master; + + begin match management_pif with + | Some management_pif -> + (* The bond contains the management interface: move management to the master. + * This interface will be plugged automatically. *) + debug "Moving management from slave to master"; + move_management ~__context management_pif master + | None -> + debug "Plugging the bond"; + Nm.bring_pif_up ~__context master + end; + TaskHelper.set_progress ~__context 0.2; + + (* Move VLANs from members to master *) + debug "Check VLANs to move from slaves to master"; + List.iter (move_vlan ~__context host master) local_vlans; + TaskHelper.set_progress ~__context 0.4; + + (* Move tunnels from members to master *) + debug "Check tunnels to move from slaves to master"; + List.iter (move_tunnel ~__context host master) local_tunnels; + TaskHelper.set_progress ~__context 0.6; + + (* Move VIFs from members to master *) + debug "Check VIFs to move from slaves to master"; + List.iter (Xapi_vif.move_internal ~__context ~network) local_vifs; + TaskHelper.set_progress ~__context 0.8; + + (* Set disallow_unplug on the master, if one of the slaves had disallow_unplug = true (see above), + * and reset disallow_unplug of members. *) + if disallow_unplug then begin + debug "Setting disallow_unplug on master, and clearing slaves"; + Db.PIF.set_disallow_unplug ~__context ~self:master ~value:true; + List.iter (fun pif -> + Db.PIF.set_disallow_unplug ~__context ~self:pif ~value:false) + members + end; + TaskHelper.set_progress ~__context 1.0; + ); + (* return a ref to the new Bond object *) + bond let destroy ~__context ~self = - with_local_lock (fun () -> - let master = Db.Bond.get_master ~__context ~self in - let members = Db.Bond.get_slaves ~__context ~self in - let plugged = Db.PIF.get_currently_attached ~__context ~self:master in - let master_network = Db.PIF.get_network ~__context ~self:master in - let host = Db.PIF.get_host ~__context ~self:master in - let primary_slave = Db.Bond.get_primary_slave ~__context ~self in - let primary_slave_network = Db.PIF.get_network ~__context ~self:primary_slave in - - let local_vifs = get_local_vifs ~__context host [master_network] in - let local_vlans = Db.PIF.get_VLAN_slave_of ~__context ~self:master in - let local_tunnels = Db.PIF.get_tunnel_transport_PIF_of ~__context ~self:master in - - (* CA-86573: forbid the deletion of a bond involving the mgmt interface if HA is on *) - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool && Db.PIF.get_management ~__context ~self:master - then raise (Api_errors.Server_error(Api_errors.ha_cannot_change_bond_status_of_mgmt_iface, [])); - - (* Copy IP configuration from master to primary member *) - move_configuration ~__context master primary_slave; - - if Db.PIF.get_management ~__context ~self:master = true then begin - (* The master is the management interface: move management to first slave *) - debug "Moving management from master to slaves"; - move_management ~__context master primary_slave; - List.iter (fun pif -> if pif <> primary_slave then Nm.bring_pif_up ~__context pif) members - end else begin - (* Plug the members if the master was plugged *) - if plugged then - List.iter (Nm.bring_pif_up ~__context) members - end; - TaskHelper.set_progress ~__context 0.2; - - (* Move VIFs from master to slaves *) - debug "Check VIFs to move from master to slaves"; - List.iter (Xapi_vif.move_internal ~__context ~network:primary_slave_network) local_vifs; - TaskHelper.set_progress ~__context 0.4; - - (* Move VLANs down *) - debug "Check VLANs to move from master to slaves"; - List.iter (move_vlan ~__context host primary_slave) local_vlans; - TaskHelper.set_progress ~__context 0.6; - - (* Move tunnels down *) - debug "Check tunnels to move from master to slaves"; - List.iter (move_tunnel ~__context host primary_slave) local_tunnels; - TaskHelper.set_progress ~__context 0.8; - - if Db.PIF.get_disallow_unplug ~__context ~self:master = true then begin - debug "Setting disallow_unplug on primary slave"; - Db.PIF.set_disallow_unplug ~__context ~self:primary_slave ~value:true - end; - - (* Destroy the Bond and master PIF *) - Db.Bond.destroy ~__context ~self; - Db.PIF.destroy ~__context ~self:master; - - (* Clear the PIF.bond_slave_of fields of the members. *) - List.iter (fun slave -> Db.PIF.set_bond_slave_of ~__context ~self:slave ~value:(Ref.null)) members; - TaskHelper.set_progress ~__context 1.0 - ) + with_local_lock (fun () -> + let master = Db.Bond.get_master ~__context ~self in + let members = Db.Bond.get_slaves ~__context ~self in + let plugged = Db.PIF.get_currently_attached ~__context ~self:master in + let master_network = Db.PIF.get_network ~__context ~self:master in + let host = Db.PIF.get_host ~__context ~self:master in + let primary_slave = Db.Bond.get_primary_slave ~__context ~self in + let primary_slave_network = Db.PIF.get_network ~__context ~self:primary_slave in + + let local_vifs = get_local_vifs ~__context host [master_network] in + let local_vlans = Db.PIF.get_VLAN_slave_of ~__context ~self:master in + let local_tunnels = Db.PIF.get_tunnel_transport_PIF_of ~__context ~self:master in + + (* CA-86573: forbid the deletion of a bond involving the mgmt interface if HA is on *) + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool && Db.PIF.get_management ~__context ~self:master + then raise (Api_errors.Server_error(Api_errors.ha_cannot_change_bond_status_of_mgmt_iface, [])); + + (* Copy IP configuration from master to primary member *) + move_configuration ~__context master primary_slave; + + if Db.PIF.get_management ~__context ~self:master = true then begin + (* The master is the management interface: move management to first slave *) + debug "Moving management from master to slaves"; + move_management ~__context master primary_slave; + List.iter (fun pif -> if pif <> primary_slave then Nm.bring_pif_up ~__context pif) members + end else begin + (* Plug the members if the master was plugged *) + if plugged then + List.iter (Nm.bring_pif_up ~__context) members + end; + TaskHelper.set_progress ~__context 0.2; + + (* Move VIFs from master to slaves *) + debug "Check VIFs to move from master to slaves"; + List.iter (Xapi_vif.move_internal ~__context ~network:primary_slave_network) local_vifs; + TaskHelper.set_progress ~__context 0.4; + + (* Move VLANs down *) + debug "Check VLANs to move from master to slaves"; + List.iter (move_vlan ~__context host primary_slave) local_vlans; + TaskHelper.set_progress ~__context 0.6; + + (* Move tunnels down *) + debug "Check tunnels to move from master to slaves"; + List.iter (move_tunnel ~__context host primary_slave) local_tunnels; + TaskHelper.set_progress ~__context 0.8; + + if Db.PIF.get_disallow_unplug ~__context ~self:master = true then begin + debug "Setting disallow_unplug on primary slave"; + Db.PIF.set_disallow_unplug ~__context ~self:primary_slave ~value:true + end; + + (* Destroy the Bond and master PIF *) + Db.Bond.destroy ~__context ~self; + Db.PIF.destroy ~__context ~self:master; + + (* Clear the PIF.bond_slave_of fields of the members. *) + List.iter (fun slave -> Db.PIF.set_bond_slave_of ~__context ~self:slave ~value:(Ref.null)) members; + TaskHelper.set_progress ~__context 1.0 + ) let set_mode ~__context ~self ~value = - Db.Bond.set_mode ~__context ~self ~value; - let master = Db.Bond.get_master ~__context ~self in + Db.Bond.set_mode ~__context ~self ~value; + let master = Db.Bond.get_master ~__context ~self in - (* Set up sensible properties for this bond mode. *) - let requirements = requirements_of_mode value in - let properties = Db.Bond.get_properties ~__context ~self - |> List.filter (fun property -> try ignore(Map_check.validate_kvpair "properties" requirements property); true with _-> false) - |> Map_check.add_defaults requirements - in - Db.Bond.set_properties ~__context ~self ~value:properties; + (* Set up sensible properties for this bond mode. *) + let requirements = requirements_of_mode value in + let properties = Db.Bond.get_properties ~__context ~self + |> List.filter (fun property -> try ignore(Map_check.validate_kvpair "properties" requirements property); true with _-> false) + |> Map_check.add_defaults requirements + in + Db.Bond.set_properties ~__context ~self ~value:properties; - Nm.bring_pif_up ~__context master + Nm.bring_pif_up ~__context master let set_property ~__context ~self ~name ~value = - let mode = Db.Bond.get_mode ~__context ~self in - let requirements = requirements_of_mode mode in - Map_check.validate_kvpair "properties" requirements (name, value); - - (* Remove the existing property with this name, then add the new value. *) - let properties = List.filter - (fun (property_name, _) -> property_name <> name) - (Db.Bond.get_properties ~__context ~self) - in - let properties = (name, value)::properties in - Db.Bond.set_properties ~__context ~self ~value:properties; - - let master = Db.Bond.get_master ~__context ~self in - Nm.bring_pif_up ~__context master + let mode = Db.Bond.get_mode ~__context ~self in + let requirements = requirements_of_mode mode in + Map_check.validate_kvpair "properties" requirements (name, value); + + (* Remove the existing property with this name, then add the new value. *) + let properties = List.filter + (fun (property_name, _) -> property_name <> name) + (Db.Bond.get_properties ~__context ~self) + in + let properties = (name, value)::properties in + Db.Bond.set_properties ~__context ~self ~value:properties; + + let master = Db.Bond.get_master ~__context ~self in + Nm.bring_pif_up ~__context master (* Functions to export for testing only *) diff --git a/ocaml/xapi/xapi_bond.mli b/ocaml/xapi/xapi_bond.mli index 630a7d3cc87..736094d00ac 100644 --- a/ocaml/xapi/xapi_bond.mli +++ b/ocaml/xapi/xapi_bond.mli @@ -13,22 +13,22 @@ *) (** Module that defines API functions for Bond objects * @group Networking - *) - +*) + (** -A host can have multiple network interfaces. These can be used to physically separate networks. However, multiple interfaces can also be {i bonded} together to form a single high-throughput interface. -{ul -{- The PIFs that are bonded together are called {i slaves}.} -{- The datamodel has a {i Bond} object that joins the slaves.} -{- The bond is used through a {i master} PIF, which is also associated with the Bond object. The master PIF is a special PIF that does not directly represent a physical interface. The master PIF is associated with a Network, and used as a regular PIF.} -{- Bond slaves should never be used directly: they are not allowed to be plugged. Hence, they are not connected to bridges and therefore not accessible.} -} + A host can have multiple network interfaces. These can be used to physically separate networks. However, multiple interfaces can also be {i bonded} together to form a single high-throughput interface. + {ul + {- The PIFs that are bonded together are called {i slaves}.} + {- The datamodel has a {i Bond} object that joins the slaves.} + {- The bond is used through a {i master} PIF, which is also associated with the Bond object. The master PIF is a special PIF that does not directly represent a physical interface. The master PIF is associated with a Network, and used as a regular PIF.} + {- Bond slaves should never be used directly: they are not allowed to be plugged. Hence, they are not connected to bridges and therefore not accessible.} + } *) -(** Create a PIF to represent the bond master and a Bond record to represent the bond. +(** Create a PIF to represent the bond master and a Bond record to represent the bond. * Return a reference to the bond record. The given network must not have any local * PIFs on it yet. - *) +*) val create : __context:Context.t -> network:[ `network ] Ref.t -> diff --git a/ocaml/xapi/xapi_cli.ml b/ocaml/xapi/xapi_cli.ml index bca74d826c6..5254039fd25 100644 --- a/ocaml/xapi/xapi_cli.ml +++ b/ocaml/xapi/xapi_cli.ml @@ -13,7 +13,7 @@ *) (** * @group Command-Line Interface (CLI) - *) +*) open Stdext open Pervasiveext @@ -40,46 +40,46 @@ let write s string = ignore(Unix.write s towrite 0 (String.length towrite)) let forward args s session = - (* Reject forwarding cli commands if the request came in from a tcp socket *) - if not (Context.is_unix_socket s) then raise (Api_errors.Server_error (Api_errors.host_is_slave,[Pool_role.get_master_address ()])); - let open Xmlrpc_client in - let transport = SSL(SSL.make (), Pool_role.get_master_address (), !Xapi_globs.https_port) in - let body = - let args = Opt.default [] (Opt.map (fun s -> [ Printf.sprintf "session_id=%s" (Ref.string_of s) ]) session) @ args in - String.concat "\r\n" args in - let user_agent = Printf.sprintf "xapi/%s" Xapi_globs.api_version_string in - let request = Http.Request.make ~version:"1.0" ~user_agent ~body - Http.Post "/cli" in - with_transport transport - (fun ms -> - Unixext.really_write_string ms (Http.Request.to_wire_string request); - (* NB: CLI protocol handler doesn't send an HTTP response *) - let (_ : int * int) = unmarshal_protocol ms in - marshal_protocol ms; - Unixext.proxy (Unix.dup s) (Unix.dup ms) - ) + (* Reject forwarding cli commands if the request came in from a tcp socket *) + if not (Context.is_unix_socket s) then raise (Api_errors.Server_error (Api_errors.host_is_slave,[Pool_role.get_master_address ()])); + let open Xmlrpc_client in + let transport = SSL(SSL.make (), Pool_role.get_master_address (), !Xapi_globs.https_port) in + let body = + let args = Opt.default [] (Opt.map (fun s -> [ Printf.sprintf "session_id=%s" (Ref.string_of s) ]) session) @ args in + String.concat "\r\n" args in + let user_agent = Printf.sprintf "xapi/%s" Xapi_globs.api_version_string in + let request = Http.Request.make ~version:"1.0" ~user_agent ~body + Http.Post "/cli" in + with_transport transport + (fun ms -> + Unixext.really_write_string ms (Http.Request.to_wire_string request); + (* NB: CLI protocol handler doesn't send an HTTP response *) + let (_ : int * int) = unmarshal_protocol ms in + marshal_protocol ms; + Unixext.proxy (Unix.dup s) (Unix.dup ms) + ) (* Check that keys are all present in cmd *) let check_required_keys cmd keylist = let (_: (string * string) list) = get_params cmd in - List.map (get_reqd_param cmd) keylist + List.map (get_reqd_param cmd) keylist -let with_session ~local rpc u p session f = - let session, logout = - match local, session with - | false, None -> - Client.Client.Session.login_with_password ~rpc ~uname:u ~pwd:p ~version:Xapi_globs.api_version_string ~originator:"cli", true - | true, None -> - Client.Client.Session.slave_local_login_with_password ~rpc ~uname:u ~pwd:p, true - | _, Some session -> session, false in +let with_session ~local rpc u p session f = + let session, logout = + match local, session with + | false, None -> + Client.Client.Session.login_with_password ~rpc ~uname:u ~pwd:p ~version:Xapi_globs.api_version_string ~originator:"cli", true + | true, None -> + Client.Client.Session.slave_local_login_with_password ~rpc ~uname:u ~pwd:p, true + | _, Some session -> session, false in let do_logout () = if logout then begin if local then Client.Client.Session.local_logout ~rpc ~session_id:session else Client.Client.Session.logout ~rpc ~session_id:session end in - finally + finally (fun () -> f session) (fun () -> do_logout ()) @@ -92,7 +92,7 @@ let do_rpcs req s username password minimal cmd session args = error "Rethrowing Not_found as Unknown_command %s" cmdname; Backtrace.reraise e (Unknown_command cmdname) in (* Forward if we're not the master, and if the cspec doesn't contain the key 'neverforward' *) - let do_forward = + let do_forward = (not (Pool_role.is_master ())) && (not (List.mem Neverforward cspec.flags)) in let _ = check_required_keys cmd cspec.reqd in @@ -105,30 +105,30 @@ let do_rpcs req s username password minimal cmd session args = then with_session ~local:false rpc username password session (fun sess -> forward args s (Some sess)) else begin - let (printer,flush) = Cli_printer.make_printer s minimal in - let flush_and_marshall() = flush (); marshal s (Command(Exit 0)) in - begin - match cspec.implementation with - | No_fd f -> - with_session ~local:false rpc username password session - (fun session->f printer rpc session (get_params cmd); flush_and_marshall()) - | No_fd_local_session f -> - with_session ~local:true rpc username password session - (fun session-> - f printer rpc session (get_params cmd); flush_and_marshall()) - | With_fd f -> - with_session ~local:false rpc username password session - (fun session->f s printer rpc session (get_params cmd); flush_and_marshall()) - | With_fd_local_session f -> - with_session ~local:true rpc username password session - (fun session-> - f s printer rpc session (get_params cmd); flush_and_marshall()) - end + let (printer,flush) = Cli_printer.make_printer s minimal in + let flush_and_marshall() = flush (); marshal s (Command(Exit 0)) in + begin + match cspec.implementation with + | No_fd f -> + with_session ~local:false rpc username password session + (fun session->f printer rpc session (get_params cmd); flush_and_marshall()) + | No_fd_local_session f -> + with_session ~local:true rpc username password session + (fun session-> + f printer rpc session (get_params cmd); flush_and_marshall()) + | With_fd f -> + with_session ~local:false rpc username password session + (fun session->f s printer rpc session (get_params cmd); flush_and_marshall()) + | With_fd_local_session f -> + with_session ~local:true rpc username password session + (fun session-> + f s printer rpc session (get_params cmd); flush_and_marshall()) + end end with - Unix.Unix_error (a,b,c) as e -> - warn "Uncaught exception: Unix_error '%s' '%s' '%s'" (Unix.error_message a) b c; - raise e + Unix.Unix_error (a,b,c) as e -> + warn "Uncaught exception: Unix_error '%s' '%s' '%s'" (Unix.error_message a) b c; + raise e let do_help cmd minimal s = let (printer,flush)=Cli_printer.make_printer s minimal in @@ -139,54 +139,54 @@ let do_help cmd minimal s = let uninteresting_cmd_postfixes = [ "help"; "-get"; "-list" ] let exec_command req cmd s session args = - let params = get_params cmd in - let minimal = - if (List.mem_assoc "minimal" params) - then bool_of_string (List.assoc "minimal" params) - else false in - let u = try List.assoc "username" params with _ -> "" in - let p = try List.assoc "password" params with _ -> "" in - (* Create a list of commands and their associated arguments which might be sensitive. *) - let commands_and_params_to_hide = - let st = String.startswith in - let eq = (=) in - [ - eq "user-password-change", ["old"; "new"]; - st "secret", ["value"]; - eq "pool-enable-external-auth", ["config:pass"]; - eq "pool-disable-external-auth", ["config:pass"]; - eq "host-call-plugin", ["args:url"]; - ] in - let rpc = Helpers.get_rpc () req s in - Cli_frontend.populate_cmdtable rpc Ref.null; - (* Log the actual CLI command to help diagnose failures like CA-25516 *) - let cmd_name = get_cmdname cmd in - if cmd_name = "help" then do_help cmd minimal s - else - let uninteresting = - List.exists - (fun k -> String.endswith k cmd_name) uninteresting_cmd_postfixes in - let do_log = if uninteresting then debug else info in - let params_to_hide = List.fold_left - (fun accu (cmd_test, params) -> if cmd_test cmd_name then accu @ params else accu) - [] - commands_and_params_to_hide - in - let must_censor param_name = - (* name contains (case-insensitive) "password" or is in list *) - Re.execp (Re.compile(Re.no_case(Re.str "password"))) param_name - || List.mem param_name params_to_hide - in - do_log "xe %s %s" cmd_name (String.concat " " (List.map (fun (k, v) -> let v' = if must_censor k then "(omitted)" else v in k ^ "=" ^ v') params)); - do_rpcs req s u p minimal cmd session args + let params = get_params cmd in + let minimal = + if (List.mem_assoc "minimal" params) + then bool_of_string (List.assoc "minimal" params) + else false in + let u = try List.assoc "username" params with _ -> "" in + let p = try List.assoc "password" params with _ -> "" in + (* Create a list of commands and their associated arguments which might be sensitive. *) + let commands_and_params_to_hide = + let st = String.startswith in + let eq = (=) in + [ + eq "user-password-change", ["old"; "new"]; + st "secret", ["value"]; + eq "pool-enable-external-auth", ["config:pass"]; + eq "pool-disable-external-auth", ["config:pass"]; + eq "host-call-plugin", ["args:url"]; + ] in + let rpc = Helpers.get_rpc () req s in + Cli_frontend.populate_cmdtable rpc Ref.null; + (* Log the actual CLI command to help diagnose failures like CA-25516 *) + let cmd_name = get_cmdname cmd in + if cmd_name = "help" then do_help cmd minimal s + else + let uninteresting = + List.exists + (fun k -> String.endswith k cmd_name) uninteresting_cmd_postfixes in + let do_log = if uninteresting then debug else info in + let params_to_hide = List.fold_left + (fun accu (cmd_test, params) -> if cmd_test cmd_name then accu @ params else accu) + [] + commands_and_params_to_hide + in + let must_censor param_name = + (* name contains (case-insensitive) "password" or is in list *) + Re.execp (Re.compile(Re.no_case(Re.str "password"))) param_name + || List.mem param_name params_to_hide + in + do_log "xe %s %s" cmd_name (String.concat " " (List.map (fun (k, v) -> let v' = if must_censor k then "(omitted)" else v in k ^ "=" ^ v') params)); + do_rpcs req s u p minimal cmd session args let get_line str i = try let next_endl = String.index_from str i '\n' in (Some (next_endl+1),String.sub str i (next_endl - i)) with - Not_found -> (None,String.sub str i (String.length str - i)) - | _ -> (None,"") + Not_found -> (None,String.sub str i (String.length str - i)) + | _ -> (None,"") let param_error s t sock = marshal sock (Command (PrintStderr ((if s <> "" then s ^ ": " ^ t else t)^"\n"))); @@ -197,72 +197,72 @@ let other_error msg sock = let multiple_error errs sock = List.iter (fun (erruuid, errmsg) -> - let msg = Printf.sprintf "operation failed on %s: %s" erruuid errmsg in - marshal sock (Command (Print msg))) errs + let msg = Printf.sprintf "operation failed on %s: %s" erruuid errmsg in + marshal sock (Command (Print msg))) errs (* This never raises exceptions: *) let parse_session_and_args str = - let rec get_args n cur = - let (next,arg) = get_line str n in - let arg = zap_cr arg in - match next with - | Some i -> get_args i (arg::cur) - | None -> (arg::cur) in - let args = List.rev (get_args 0 []) in - try - let line = List.hd args in - if String.startswith "session_id=" line - then (Some (Ref.of_string (String.sub line 11 (String.length line - 11))), List.tl args) - else (None,args) - with _ -> (None,args) + let rec get_args n cur = + let (next,arg) = get_line str n in + let arg = zap_cr arg in + match next with + | Some i -> get_args i (arg::cur) + | None -> (arg::cur) in + let args = List.rev (get_args 0 []) in + try + let line = List.hd args in + if String.startswith "session_id=" line + then (Some (Ref.of_string (String.sub line 11 (String.length line - 11))), List.tl args) + else (None,args) + with _ -> (None,args) let exception_handler s e = error "Converting exception %s into a CLI response" (ExnHelper.string_of_exn e); match e with - | Cli_operations.ExitWithError n -> - marshal s (Command (Exit n)) - | Unknown_command str -> - param_error "Unknown command" str s - | Cli_frontend.ParseError str -> - param_error "Syntax error" str s - | Cli_frontend.ParamNotFound str -> - param_error "Required parameter not found" str s - | Cli_operations.Multiple_failure errs -> - multiple_error errs s - | Cli_util.Cli_failure str -> - other_error ("Error: "^str) s - | Api_errors.Server_error(code, params) -> - if code=Api_errors.session_authentication_failed - then - let uname = List.hd params in - if uname="" (* default when not specified *) - then param_error "" "Username/password must be specified" s - else param_error "" "Authentication failed" s - else - Cli_util.server_error code params s - | Records.CLI_failed_to_find_param name -> - Cli_util.server_error "Missing parameter" [name] s - | Record_util.Record_failure str -> - other_error ("Error: "^str) s - | Xml.Error exn -> - Cli_util.server_error Api_errors.internal_error [ "XML error: " ^ (Xml.error exn) ] s - | Failure str -> - Cli_util.server_error Api_errors.internal_error [ "Failure: " ^ str ] s - | Unix.Unix_error (a,b,c) -> - Cli_util.server_error Api_errors.internal_error [ "Unix_error: " ^ (Unix.error_message a) ] s - | exc -> - Cli_util.server_error Api_errors.internal_error [ ExnHelper.string_of_exn exc ] s + | Cli_operations.ExitWithError n -> + marshal s (Command (Exit n)) + | Unknown_command str -> + param_error "Unknown command" str s + | Cli_frontend.ParseError str -> + param_error "Syntax error" str s + | Cli_frontend.ParamNotFound str -> + param_error "Required parameter not found" str s + | Cli_operations.Multiple_failure errs -> + multiple_error errs s + | Cli_util.Cli_failure str -> + other_error ("Error: "^str) s + | Api_errors.Server_error(code, params) -> + if code=Api_errors.session_authentication_failed + then + let uname = List.hd params in + if uname="" (* default when not specified *) + then param_error "" "Username/password must be specified" s + else param_error "" "Authentication failed" s + else + Cli_util.server_error code params s + | Records.CLI_failed_to_find_param name -> + Cli_util.server_error "Missing parameter" [name] s + | Record_util.Record_failure str -> + other_error ("Error: "^str) s + | Xml.Error exn -> + Cli_util.server_error Api_errors.internal_error [ "XML error: " ^ (Xml.error exn) ] s + | Failure str -> + Cli_util.server_error Api_errors.internal_error [ "Failure: " ^ str ] s + | Unix.Unix_error (a,b,c) -> + Cli_util.server_error Api_errors.internal_error [ "Unix_error: " ^ (Unix.error_message a) ] s + | exc -> + Cli_util.server_error Api_errors.internal_error [ ExnHelper.string_of_exn exc ] s let handler (req:Http.Request.t) (bio: Buf_io.t) _ = - let str = Http_svr.read_body ~limit:Xapi_globs.http_limit_max_cli_size req bio in + let str = Http_svr.read_body ~limit:Xapi_globs.http_limit_max_cli_size req bio in let s = Buf_io.fd_of bio in (* Tell the client the server version *) marshal_protocol s; (* Read the client's protocol version *) let major', minor' = unmarshal_protocol s in if major' <> major then begin - debug "Rejecting request from client"; - failwith "Version mismatch" + debug "Rejecting request from client"; + failwith "Version mismatch" end; let session, args = parse_session_and_args str in try @@ -271,15 +271,15 @@ let handler (req:Http.Request.t) (bio: Buf_io.t) _ = match Backtrace.with_backtraces (fun () -> exec_command req cmd s session args) with | `Ok _ -> () | `Error (e, bt) -> - exception_handler s e; - (* Command execution errors can use --trace *) - if Cli_operations.get_bool_param cmd.params "trace" then begin - marshal s (Command (PrintStderr (Printf.sprintf "Raised %s\n" (Printexc.to_string e)))); - marshal s (Command (PrintStderr "Backtrace:\n")); - marshal s (Command (PrintStderr (Backtrace.(to_string_hum bt)))); - end; - Debug.log_backtrace e bt; - marshal s (Command (Exit 1)); + exception_handler s e; + (* Command execution errors can use --trace *) + if Cli_operations.get_bool_param cmd.params "trace" then begin + marshal s (Command (PrintStderr (Printf.sprintf "Raised %s\n" (Printexc.to_string e)))); + marshal s (Command (PrintStderr "Backtrace:\n")); + marshal s (Command (PrintStderr (Backtrace.(to_string_hum bt)))); + end; + Debug.log_backtrace e bt; + marshal s (Command (Exit 1)); with e -> exception_handler s e; marshal s (Command (Exit 1)); diff --git a/ocaml/xapi/xapi_config.ml b/ocaml/xapi/xapi_config.ml index 9cdd617ec8c..677b4bd7157 100644 --- a/ocaml/xapi/xapi_config.ml +++ b/ocaml/xapi/xapi_config.ml @@ -21,15 +21,15 @@ module D=Debug.Make(struct let name="xapi" end) open D let log_if_not_empty format_string value = - if value <> "" then debug format_string value + if value <> "" then debug format_string value let dump_config () = - debug "Server configuration:"; - log_if_not_empty "product_version: %s" (Version.product_version ()); - log_if_not_empty "product_brand: %s" (Version.product_brand ()); - debug "platform_version: %s" (Version.platform_version ()); - debug "platform_name: %s" (Version.platform_name ()); - debug "build_number: %s" (Version.build_number ()); - debug "git changeset: %s" Version.git_id; - debug "version: %d.%d" version_major version_minor; - (* debug "License filename: %s" !License_file.filename *) + debug "Server configuration:"; + log_if_not_empty "product_version: %s" (Version.product_version ()); + log_if_not_empty "product_brand: %s" (Version.product_brand ()); + debug "platform_version: %s" (Version.platform_version ()); + debug "platform_name: %s" (Version.platform_name ()); + debug "build_number: %s" (Version.build_number ()); + debug "git changeset: %s" Version.git_id; + debug "version: %d.%d" version_major version_minor; + (* debug "License filename: %s" !License_file.filename *) diff --git a/ocaml/xapi/xapi_crashdump.ml b/ocaml/xapi/xapi_crashdump.ml index 3d99609f772..d8183877109 100644 --- a/ocaml/xapi/xapi_crashdump.ml +++ b/ocaml/xapi/xapi_crashdump.ml @@ -18,15 +18,15 @@ let nothrow f () = try f() with _ -> () let create ~__context ~vM ~vDI = let cdumpref = Ref.make() in let uuid = Uuid.to_string (Uuid.make_uuid()) in - Db.Crashdump.create ~__context ~ref:cdumpref ~uuid ~vM ~vDI ~other_config:[]; - cdumpref + Db.Crashdump.create ~__context ~ref:cdumpref ~uuid ~vM ~vDI ~other_config:[]; + cdumpref let destroy ~__context ~self = Stdext.Pervasiveext.finally (nothrow (fun ()-> - let vdi = Db.Crashdump.get_VDI ~__context ~self in - Helpers.call_api_functions ~__context - (fun rpc session_id -> - Client.Client.VDI.destroy rpc session_id vdi))) + let vdi = Db.Crashdump.get_VDI ~__context ~self in + Helpers.call_api_functions ~__context + (fun rpc session_id -> + Client.Client.VDI.destroy rpc session_id vdi))) (fun ()-> Db.Crashdump.destroy ~__context ~self) diff --git a/ocaml/xapi/xapi_db_upgrade.ml b/ocaml/xapi/xapi_db_upgrade.ml index 5a5e58a3c09..e8b0d3969f9 100644 --- a/ocaml/xapi/xapi_db_upgrade.ml +++ b/ocaml/xapi/xapi_db_upgrade.ml @@ -32,16 +32,16 @@ type upgrade_rule = { } (** Apply all the rules needed for the previous_version *) -let apply_upgrade_rules ~__context rules previous_version = +let apply_upgrade_rules ~__context rules previous_version = debug "Looking for database upgrade rules:"; let required_rules = List.filter (fun r -> r.version previous_version) rules in List.iter (fun r -> - debug "Applying database upgrade rule: %s" r.description; - try - r.fn ~__context - with exn -> - error "Database upgrade rule '%s' failed: %s" r.description (Printexc.to_string exn) + debug "Applying database upgrade rule: %s" r.description; + try + r.fn ~__context + with exn -> + error "Database upgrade rule '%s' failed: %s" r.description (Printexc.to_string exn) ) required_rules let george = Datamodel.george_release_schema_major_vsn, Datamodel.george_release_schema_minor_vsn @@ -57,486 +57,486 @@ let dundee = Datamodel.dundee_release_schema_major_vsn, Datamodel.dundee_release let vsn_with_meaningful_has_vendor_device = Datamodel.meaningful_vm_has_vendor_device_schema_major_vsn, Datamodel.meaningful_vm_has_vendor_device_schema_minor_vsn let upgrade_alert_priority = { - description = "Upgrade alert priority"; - version = (fun _ -> true); - fn = fun ~__context -> - let alert_refs = Xapi_message.get_all () in - List.iter - (fun r -> - try - let m = Xapi_message.get_record ~__context ~self:r in - if List.mem_assoc m.API.message_name !Api_messages.msgList then - let prio = List.assoc m.API.message_name !Api_messages.msgList in - if prio <> m.API.message_priority then begin - Xapi_message.destroy ~__context ~self:r; - let gen = Xapi_message.write ~__context ~_ref:r - ~message:{m with API.message_priority=prio} in - match gen with - | Some _ -> - debug "Update message %s with new priority %Ld" (Ref.string_of r) prio - | None -> () - end - with e -> - warn "Update message %s failed due to exception %s" (Ref.string_of r) (Printexc.to_string e)) - alert_refs + description = "Upgrade alert priority"; + version = (fun _ -> true); + fn = fun ~__context -> + let alert_refs = Xapi_message.get_all () in + List.iter + (fun r -> + try + let m = Xapi_message.get_record ~__context ~self:r in + if List.mem_assoc m.API.message_name !Api_messages.msgList then + let prio = List.assoc m.API.message_name !Api_messages.msgList in + if prio <> m.API.message_priority then begin + Xapi_message.destroy ~__context ~self:r; + let gen = Xapi_message.write ~__context ~_ref:r + ~message:{m with API.message_priority=prio} in + match gen with + | Some _ -> + debug "Update message %s with new priority %Ld" (Ref.string_of r) prio + | None -> () + end + with e -> + warn "Update message %s failed due to exception %s" (Ref.string_of r) (Printexc.to_string e)) + alert_refs } let update_mail_min_priority = { - description = "Update pool's other-config:mail-min-priority"; - version = (fun x -> x <= tampa); - fn = fun ~__context -> - List.iter (fun self -> - let oc = Db.Pool.get_other_config ~__context ~self in - let key = "mail-min-priority" in - if List.mem_assoc key oc then - try - let prio = int_of_string (List.assoc key oc) in - let prio' = - if prio > 10 then 0 - else if prio = 10 then 1 - else if prio > 5 then 2 - else if prio = 5 then 3 - else if prio > 1 then 4 - else 5 in - Db.Pool.remove_from_other_config ~__context ~self ~key; - Db.Pool.add_to_other_config ~__context ~self ~key ~value:(string_of_int prio'); - debug "Upgrade pool's other-config:mail-min-priority: %d -> %d" prio prio' - with e -> - warn "Failed to update other-config:mail-min-priority of the pool: %s, remove to reset" - (Printexc.to_string e); - Db.Pool.remove_from_other_config ~__context ~self ~key) - (Db.Pool.get_all ~__context) + description = "Update pool's other-config:mail-min-priority"; + version = (fun x -> x <= tampa); + fn = fun ~__context -> + List.iter (fun self -> + let oc = Db.Pool.get_other_config ~__context ~self in + let key = "mail-min-priority" in + if List.mem_assoc key oc then + try + let prio = int_of_string (List.assoc key oc) in + let prio' = + if prio > 10 then 0 + else if prio = 10 then 1 + else if prio > 5 then 2 + else if prio = 5 then 3 + else if prio > 1 then 4 + else 5 in + Db.Pool.remove_from_other_config ~__context ~self ~key; + Db.Pool.add_to_other_config ~__context ~self ~key ~value:(string_of_int prio'); + debug "Upgrade pool's other-config:mail-min-priority: %d -> %d" prio prio' + with e -> + warn "Failed to update other-config:mail-min-priority of the pool: %s, remove to reset" + (Printexc.to_string e); + Db.Pool.remove_from_other_config ~__context ~self ~key) + (Db.Pool.get_all ~__context) } let upgrade_vm_memory_overheads = { - description = "Upgrade VM.memory_overhead fields"; - version = (fun _ -> true); - fn = fun ~__context -> - List.iter - (fun vm -> Xapi_vm_helpers.update_memory_overhead ~__context ~vm) - (Db.VM.get_all ~__context) + description = "Upgrade VM.memory_overhead fields"; + version = (fun _ -> true); + fn = fun ~__context -> + List.iter + (fun vm -> Xapi_vm_helpers.update_memory_overhead ~__context ~vm) + (Db.VM.get_all ~__context) } let upgrade_wlb_configuration = { - description = "Upgrade WLB to use secrets"; - version = (fun _ -> true); - fn = fun ~__context -> - (* there can be only one pool *) - let pool = Helpers.get_pool ~__context in - (* get a Secret reference that makes sense, if there is no password ("") - then use null, otherwise convert if clear-text and else keep what's - there *) - let wlb_passwd_ref = - let old_wlb_pwd = Ref.string_of - (Db.Pool.get_wlb_password ~__context ~self:pool) in - if old_wlb_pwd = "" - then Ref.null - else if String.startswith "OpaqueRef:" old_wlb_pwd - then Db.Pool.get_wlb_password ~__context ~self:pool - else Xapi_secret.create ~__context ~value:old_wlb_pwd ~other_config:[] - in - Db.Pool.set_wlb_password ~__context ~self:pool ~value:wlb_passwd_ref + description = "Upgrade WLB to use secrets"; + version = (fun _ -> true); + fn = fun ~__context -> + (* there can be only one pool *) + let pool = Helpers.get_pool ~__context in + (* get a Secret reference that makes sense, if there is no password ("") + then use null, otherwise convert if clear-text and else keep what's + there *) + let wlb_passwd_ref = + let old_wlb_pwd = Ref.string_of + (Db.Pool.get_wlb_password ~__context ~self:pool) in + if old_wlb_pwd = "" + then Ref.null + else if String.startswith "OpaqueRef:" old_wlb_pwd + then Db.Pool.get_wlb_password ~__context ~self:pool + else Xapi_secret.create ~__context ~value:old_wlb_pwd ~other_config:[] + in + Db.Pool.set_wlb_password ~__context ~self:pool ~value:wlb_passwd_ref } (** On upgrade to the first ballooning-enabled XenServer, we reset memory -properties to safe defaults to avoid triggering something bad. -{ul - {- For guest domains, we replace the current set of possibly-invalid memory - constraints {i s} with a new set of valid and unballooned constraints {i t} - such that: - {ol - {- t.dynamic_max := s.static_max} - {- t.target := s.static_max} - {- t.dynamic_min := s.static_max} - {- t.static_min := minimum (s.static_min, s.static_max)}}} - {- For control domains, we respect the administrator's choice of target: - {ol - {- t.dynamic_max := s.target} - {- t.dynamic_min := s.target}}} -} + properties to safe defaults to avoid triggering something bad. + {ul + {- For guest domains, we replace the current set of possibly-invalid memory + constraints {i s} with a new set of valid and unballooned constraints {i t} + such that: + {ol + {- t.dynamic_max := s.static_max} + {- t.target := s.static_max} + {- t.dynamic_min := s.static_max} + {- t.static_min := minimum (s.static_min, s.static_max)}}} + {- For control domains, we respect the administrator's choice of target: + {ol + {- t.dynamic_max := s.target} + {- t.dynamic_min := s.target}}} + } *) -let upgrade_vm_memory_for_dmc = { - description = "Upgrading VM memory fields for DMC"; - version = (fun x -> x <= george); - fn = - fun ~__context -> - debug "Upgrading VM.memory_dynamic_{min,max} in guest and control domains."; - let module VMC = Vm_memory_constraints.Vm_memory_constraints in - - let update_vm (vm_ref, vm_rec) = - if vm_rec.API.vM_is_control_domain then begin - let target = vm_rec.API.vM_memory_target in - debug "VM %s (%s) dynamic_{min,max} <- %Ld" - vm_rec.API.vM_uuid - vm_rec.API.vM_name_label - target; - Db.VM.set_memory_dynamic_min ~__context ~self:vm_ref ~value:target; - Db.VM.set_memory_dynamic_max ~__context ~self:vm_ref ~value:target; - end else begin - (* Note this will also transform templates *) - let safe_constraints = VMC.reset_to_safe_defaults ~constraints: - { VMC.static_min = vm_rec.API.vM_memory_static_min - ; dynamic_min = vm_rec.API.vM_memory_dynamic_min - ; target = vm_rec.API.vM_memory_target - ; dynamic_max = vm_rec.API.vM_memory_dynamic_max - ; static_max = vm_rec.API.vM_memory_static_max - } in - debug "VM %s (%s) dynamic_{min,max},target <- %Ld" - vm_rec.API.vM_uuid vm_rec.API.vM_name_label - safe_constraints.VMC.static_max; - Db.VM.set_memory_static_min ~__context ~self:vm_ref ~value:safe_constraints.VMC.static_min; - Db.VM.set_memory_dynamic_min ~__context ~self:vm_ref ~value:safe_constraints.VMC.dynamic_min; - Db.VM.set_memory_target ~__context ~self:vm_ref ~value:safe_constraints.VMC.target; - Db.VM.set_memory_dynamic_max ~__context ~self:vm_ref ~value:safe_constraints.VMC.dynamic_max; - - Db.VM.set_memory_static_max ~__context ~self:vm_ref ~value:safe_constraints.VMC.static_max; - end in - List.iter update_vm (Db.VM.get_all_records ~__context) +let upgrade_vm_memory_for_dmc = { + description = "Upgrading VM memory fields for DMC"; + version = (fun x -> x <= george); + fn = + fun ~__context -> + debug "Upgrading VM.memory_dynamic_{min,max} in guest and control domains."; + let module VMC = Vm_memory_constraints.Vm_memory_constraints in + + let update_vm (vm_ref, vm_rec) = + if vm_rec.API.vM_is_control_domain then begin + let target = vm_rec.API.vM_memory_target in + debug "VM %s (%s) dynamic_{min,max} <- %Ld" + vm_rec.API.vM_uuid + vm_rec.API.vM_name_label + target; + Db.VM.set_memory_dynamic_min ~__context ~self:vm_ref ~value:target; + Db.VM.set_memory_dynamic_max ~__context ~self:vm_ref ~value:target; + end else begin + (* Note this will also transform templates *) + let safe_constraints = VMC.reset_to_safe_defaults ~constraints: + { VMC.static_min = vm_rec.API.vM_memory_static_min + ; dynamic_min = vm_rec.API.vM_memory_dynamic_min + ; target = vm_rec.API.vM_memory_target + ; dynamic_max = vm_rec.API.vM_memory_dynamic_max + ; static_max = vm_rec.API.vM_memory_static_max + } in + debug "VM %s (%s) dynamic_{min,max},target <- %Ld" + vm_rec.API.vM_uuid vm_rec.API.vM_name_label + safe_constraints.VMC.static_max; + Db.VM.set_memory_static_min ~__context ~self:vm_ref ~value:safe_constraints.VMC.static_min; + Db.VM.set_memory_dynamic_min ~__context ~self:vm_ref ~value:safe_constraints.VMC.dynamic_min; + Db.VM.set_memory_target ~__context ~self:vm_ref ~value:safe_constraints.VMC.target; + Db.VM.set_memory_dynamic_max ~__context ~self:vm_ref ~value:safe_constraints.VMC.dynamic_max; + + Db.VM.set_memory_static_max ~__context ~self:vm_ref ~value:safe_constraints.VMC.static_max; + end in + List.iter update_vm (Db.VM.get_all_records ~__context) } -(* GEORGE OEM -> BODIE/MNR *) +(* GEORGE OEM -> BODIE/MNR *) let upgrade_bios_strings = { - description = "Upgrading VM BIOS strings"; - version = (fun x -> x <= george); - fn = fun ~__context -> - let oem_manufacturer = - try - let ic = open_in "/var/tmp/.previousInventory" in - let rec find_oem_manufacturer () = - let line = input_line ic in - match Xapi_inventory.parse_inventory_entry line with - | Some (k, v) when k = "OEM_MANUFACTURER" -> Some v - | Some _ -> find_oem_manufacturer () - | None -> None - in - Pervasiveext.finally (find_oem_manufacturer) (fun () -> close_in ic) - with _ -> None - in - let update_vms bios_strings = - List.iter - (fun self -> Db.VM.set_bios_strings ~__context ~self ~value:bios_strings) - (Db.VM.get_all ~__context) in - match oem_manufacturer with - | Some oem -> - info "Upgrade from OEM edition (%s)." oem; - if String.has_substr oem "HP" then begin - debug "Using old HP BIOS strings"; - update_vms Xapi_globs.old_hp_bios_strings - end else if String.has_substr oem "Dell" then begin - debug "Using old Dell BIOS strings"; - update_vms Xapi_globs.old_dell_bios_strings - end - | None -> - info "Upgrade from retail edition."; - debug "Using generic BIOS strings"; - update_vms Xapi_globs.generic_bios_strings + description = "Upgrading VM BIOS strings"; + version = (fun x -> x <= george); + fn = fun ~__context -> + let oem_manufacturer = + try + let ic = open_in "/var/tmp/.previousInventory" in + let rec find_oem_manufacturer () = + let line = input_line ic in + match Xapi_inventory.parse_inventory_entry line with + | Some (k, v) when k = "OEM_MANUFACTURER" -> Some v + | Some _ -> find_oem_manufacturer () + | None -> None + in + Pervasiveext.finally (find_oem_manufacturer) (fun () -> close_in ic) + with _ -> None + in + let update_vms bios_strings = + List.iter + (fun self -> Db.VM.set_bios_strings ~__context ~self ~value:bios_strings) + (Db.VM.get_all ~__context) in + match oem_manufacturer with + | Some oem -> + info "Upgrade from OEM edition (%s)." oem; + if String.has_substr oem "HP" then begin + debug "Using old HP BIOS strings"; + update_vms Xapi_globs.old_hp_bios_strings + end else if String.has_substr oem "Dell" then begin + debug "Using old Dell BIOS strings"; + update_vms Xapi_globs.old_dell_bios_strings + end + | None -> + info "Upgrade from retail edition."; + debug "Using generic BIOS strings"; + update_vms Xapi_globs.generic_bios_strings } let update_snapshots = { - description = "Updating snapshot parent references"; - version = (fun x -> x <= george); - fn = fun ~__context -> - let all_vms = Db.VM.get_all ~__context in - let update_snapshots self = - let snapshots = List.filter (fun snap -> Db.VM.get_snapshot_of ~__context ~self:snap = self) all_vms in - let compare s1 s2 = - let t1 = Db.VM.get_snapshot_time ~__context ~self:s1 in - let t2 = Db.VM.get_snapshot_time ~__context ~self:s2 in - compare t1 t2 in - let ordered_snapshots = List.sort compare snapshots in - debug "Snapshots(%s) = {%s}" (Ref.string_of self) (String.concat ", " (List.map Ref.string_of ordered_snapshots)); - let rec aux snaps = match snaps with - | [] | [_] -> () - | s1 :: s2 :: t -> - Db.VM.set_parent ~__context ~self:s2 ~value:s1; - aux (s2 :: t) in - aux (ordered_snapshots @ [ self]) in - List.iter update_snapshots all_vms + description = "Updating snapshot parent references"; + version = (fun x -> x <= george); + fn = fun ~__context -> + let all_vms = Db.VM.get_all ~__context in + let update_snapshots self = + let snapshots = List.filter (fun snap -> Db.VM.get_snapshot_of ~__context ~self:snap = self) all_vms in + let compare s1 s2 = + let t1 = Db.VM.get_snapshot_time ~__context ~self:s1 in + let t2 = Db.VM.get_snapshot_time ~__context ~self:s2 in + compare t1 t2 in + let ordered_snapshots = List.sort compare snapshots in + debug "Snapshots(%s) = {%s}" (Ref.string_of self) (String.concat ", " (List.map Ref.string_of ordered_snapshots)); + let rec aux snaps = match snaps with + | [] | [_] -> () + | s1 :: s2 :: t -> + Db.VM.set_parent ~__context ~self:s2 ~value:s1; + aux (s2 :: t) in + aux (ordered_snapshots @ [ self]) in + List.iter update_snapshots all_vms } (* Upgrade the old guest installer network *) let upgrade_guest_installer_network = { - description = "Upgrading the existing guest installer network"; - version = (fun _ -> true); - fn = fun ~__context -> - List.iter - (fun self -> - let oc = Db.Network.get_other_config ~__context ~self in - let is_true key = List.mem_assoc key oc && (try bool_of_string (List.assoc key oc) with _ -> false) in - if is_true Xapi_globs.is_guest_installer_network && not(is_true Xapi_globs.is_host_internal_management_network) then begin - debug "Upgrading guest installer network uuid: %s" (Db.Network.get_uuid ~__context ~self); - Db.Network.set_name_label ~__context ~self ~value:Create_networks.internal_management_network_name; - Db.Network.set_name_description ~__context ~self ~value:Create_networks.internal_management_network_desc; - Db.Network.set_other_config ~__context ~self ~value:Create_networks.internal_management_network_oc; - Db.Network.set_bridge ~__context ~self ~value:Create_networks.internal_management_bridge; - end - ) (Db.Network.get_all ~__context) + description = "Upgrading the existing guest installer network"; + version = (fun _ -> true); + fn = fun ~__context -> + List.iter + (fun self -> + let oc = Db.Network.get_other_config ~__context ~self in + let is_true key = List.mem_assoc key oc && (try bool_of_string (List.assoc key oc) with _ -> false) in + if is_true Xapi_globs.is_guest_installer_network && not(is_true Xapi_globs.is_host_internal_management_network) then begin + debug "Upgrading guest installer network uuid: %s" (Db.Network.get_uuid ~__context ~self); + Db.Network.set_name_label ~__context ~self ~value:Create_networks.internal_management_network_name; + Db.Network.set_name_description ~__context ~self ~value:Create_networks.internal_management_network_desc; + Db.Network.set_other_config ~__context ~self ~value:Create_networks.internal_management_network_oc; + Db.Network.set_bridge ~__context ~self ~value:Create_networks.internal_management_bridge; + end + ) (Db.Network.get_all ~__context) } (* COWLEY -> BOSTON *) let upgrade_vdi_types = { - description = "Upgrading VDIs with type 'metadata' to type 'redo_log'"; - version = (fun x -> x <= cowley); - fn = fun ~__context -> - let all_vdis = Db.VDI.get_all ~__context in - let update_vdi vdi = - let vdi_type = Db.VDI.get_type ~__context ~self:vdi in - if vdi_type = `metadata then - Db.VDI.set_type ~__context ~self:vdi ~value:`redo_log - in - List.iter update_vdi all_vdis + description = "Upgrading VDIs with type 'metadata' to type 'redo_log'"; + version = (fun x -> x <= cowley); + fn = fun ~__context -> + let all_vdis = Db.VDI.get_all ~__context in + let update_vdi vdi = + let vdi_type = Db.VDI.get_type ~__context ~self:vdi in + if vdi_type = `metadata then + Db.VDI.set_type ~__context ~self:vdi ~value:`redo_log + in + List.iter update_vdi all_vdis } let upgrade_ha_restart_priority = { - description = "Upgrading ha_restart_priority"; - version = (fun x -> x <= cowley); - fn = fun ~__context -> - let all_vms = Db.VM.get_all ~__context in - let update_vm vm = - let priority = Db.VM.get_ha_restart_priority ~__context ~self:vm in - let (new_priority, new_order) = match priority with - | "0" -> ("restart", 0L) - | "1" -> ("restart", 1L) - | "2" -> ("restart", 2L) - | "3" -> ("restart", 3L) - | "best-effort" -> ("best-effort", 0L) - | _ -> ("", 0L) - in - Db.VM.set_ha_restart_priority ~__context ~self:vm ~value:new_priority; - Db.VM.set_order ~__context ~self:vm ~value:new_order - in - List.iter update_vm all_vms + description = "Upgrading ha_restart_priority"; + version = (fun x -> x <= cowley); + fn = fun ~__context -> + let all_vms = Db.VM.get_all ~__context in + let update_vm vm = + let priority = Db.VM.get_ha_restart_priority ~__context ~self:vm in + let (new_priority, new_order) = match priority with + | "0" -> ("restart", 0L) + | "1" -> ("restart", 1L) + | "2" -> ("restart", 2L) + | "3" -> ("restart", 3L) + | "best-effort" -> ("best-effort", 0L) + | _ -> ("", 0L) + in + Db.VM.set_ha_restart_priority ~__context ~self:vm ~value:new_priority; + Db.VM.set_order ~__context ~self:vm ~value:new_order + in + List.iter update_vm all_vms } (* To deal with the removal of the "Auto-start on server boot" feature in Boston, *) (* all VMs with the other_config flag "auto_poweron" set to true will have *) (* ha_restart_priority set to "best-effort". *) let upgrade_auto_poweron = { - description = "Upgrading all VMs with auto_poweron=true"; - version = (fun x -> x <= cowley); - fn = fun ~__context -> - let all_vms = Db.VM.get_all ~__context in - let update_vm vm = - let other_config = Db.VM.get_other_config ~__context ~self:vm in - let auto_poweron = - if List.mem_assoc "auto_poweron" other_config then - List.assoc "auto_poweron" other_config = "true" - else - false - in - let restart_priority = Db.VM.get_ha_restart_priority ~__context ~self:vm in - if auto_poweron && restart_priority = "" then - Db.VM.set_ha_restart_priority ~__context ~self:vm ~value:Constants.ha_restart_best_effort - in - List.iter update_vm all_vms + description = "Upgrading all VMs with auto_poweron=true"; + version = (fun x -> x <= cowley); + fn = fun ~__context -> + let all_vms = Db.VM.get_all ~__context in + let update_vm vm = + let other_config = Db.VM.get_other_config ~__context ~self:vm in + let auto_poweron = + if List.mem_assoc "auto_poweron" other_config then + List.assoc "auto_poweron" other_config = "true" + else + false + in + let restart_priority = Db.VM.get_ha_restart_priority ~__context ~self:vm in + if auto_poweron && restart_priority = "" then + Db.VM.set_ha_restart_priority ~__context ~self:vm ~value:Constants.ha_restart_best_effort + in + List.iter update_vm all_vms } let upgrade_pif_metrics = { - description = "Upgrading PIF_metrics"; - version = (fun x -> x <= boston); - fn = fun ~__context -> - let pifs = Db.PIF.get_all ~__context in - let phy_and_bond_pifs = List.filter (fun self -> - Db.PIF.get_physical ~__context ~self || - Db.PIF.get_bond_master_of ~__context ~self <> [] - ) pifs in - List.iter (fun pif -> - let rc = Db.PIF.get_record ~__context ~self:pif in - let vlan_pifs = List.map (fun self -> - Db.VLAN.get_untagged_PIF ~__context ~self) rc.API.pIF_VLAN_slave_of in - let tunnel_pifs = List.map (fun self -> - Db.Tunnel.get_access_PIF ~__context ~self) rc.API.pIF_tunnel_transport_PIF_of in - List.iter (fun self -> - let metrics = Db.PIF.get_metrics ~__context ~self in - if metrics <> rc.API.pIF_metrics then begin - Db.PIF.set_metrics ~__context ~self ~value:rc.API.pIF_metrics; - Db.PIF_metrics.destroy ~__context ~self:metrics - end - ) (vlan_pifs @ tunnel_pifs) - ) phy_and_bond_pifs + description = "Upgrading PIF_metrics"; + version = (fun x -> x <= boston); + fn = fun ~__context -> + let pifs = Db.PIF.get_all ~__context in + let phy_and_bond_pifs = List.filter (fun self -> + Db.PIF.get_physical ~__context ~self || + Db.PIF.get_bond_master_of ~__context ~self <> [] + ) pifs in + List.iter (fun pif -> + let rc = Db.PIF.get_record ~__context ~self:pif in + let vlan_pifs = List.map (fun self -> + Db.VLAN.get_untagged_PIF ~__context ~self) rc.API.pIF_VLAN_slave_of in + let tunnel_pifs = List.map (fun self -> + Db.Tunnel.get_access_PIF ~__context ~self) rc.API.pIF_tunnel_transport_PIF_of in + List.iter (fun self -> + let metrics = Db.PIF.get_metrics ~__context ~self in + if metrics <> rc.API.pIF_metrics then begin + Db.PIF.set_metrics ~__context ~self ~value:rc.API.pIF_metrics; + Db.PIF_metrics.destroy ~__context ~self:metrics + end + ) (vlan_pifs @ tunnel_pifs) + ) phy_and_bond_pifs } let remove_vmpp = { - description = "Removing VMPP metadata (feature was removed)"; - version = (fun x -> x <= tampa); - fn = fun ~__context -> - let vmpps = Db.VMPP.get_all ~__context in - List.iter (fun self -> Db.VMPP.destroy ~__context ~self) vmpps; - let open Db_filter_types in - let vms = Db.VM.get_refs_where ~__context ~expr:( - Not (Eq (Field "protection_policy", Literal (Ref.string_of Ref.null))) - ) in - List.iter (fun self -> Db.VM.set_protection_policy ~__context ~self ~value:Ref.null) vms + description = "Removing VMPP metadata (feature was removed)"; + version = (fun x -> x <= tampa); + fn = fun ~__context -> + let vmpps = Db.VMPP.get_all ~__context in + List.iter (fun self -> Db.VMPP.destroy ~__context ~self) vmpps; + let open Db_filter_types in + let vms = Db.VM.get_refs_where ~__context ~expr:( + Not (Eq (Field "protection_policy", Literal (Ref.string_of Ref.null))) + ) in + List.iter (fun self -> Db.VM.set_protection_policy ~__context ~self ~value:Ref.null) vms } let add_default_pif_properties = { - description = "Adding default PIF properties"; - version = (fun x -> x < creedence); - fn = fun ~__context -> - List.iter - (fun self -> Xapi_pif.set_default_properties ~__context ~self) - (Db.PIF.get_all ~__context) + description = "Adding default PIF properties"; + version = (fun x -> x < creedence); + fn = fun ~__context -> + List.iter + (fun self -> Xapi_pif.set_default_properties ~__context ~self) + (Db.PIF.get_all ~__context) } let default_has_vendor_device_false = { - description = "Defaulting has_vendor_device false"; - version = (fun x -> x < vsn_with_meaningful_has_vendor_device); - fn = fun ~__context -> - List.iter - (fun self -> Db.VM.set_has_vendor_device ~__context ~self ~value:false) - (Db.VM.get_all ~__context) + description = "Defaulting has_vendor_device false"; + version = (fun x -> x < vsn_with_meaningful_has_vendor_device); + fn = fun ~__context -> + List.iter + (fun self -> Db.VM.set_has_vendor_device ~__context ~self ~value:false) + (Db.VM.get_all ~__context) } let default_pv_drivers_detected_false = { - description = "Defaulting PV_drivers_detected false"; - version = (fun x -> x < dundee); - fn = fun ~__context -> - List.iter - (fun self -> - let gm = Db.VM.get_guest_metrics ~__context ~self in - Db.VM_guest_metrics.set_PV_drivers_detected ~__context ~self:gm ~value:false) - (Db.VM.get_all ~__context) + description = "Defaulting PV_drivers_detected false"; + version = (fun x -> x < dundee); + fn = fun ~__context -> + List.iter + (fun self -> + let gm = Db.VM.get_guest_metrics ~__context ~self in + Db.VM_guest_metrics.set_PV_drivers_detected ~__context ~self:gm ~value:false) + (Db.VM.get_all ~__context) } let populate_pgpu_vgpu_types = { - description = "Populating lists of VGPU types on existing PGPUs"; - version = (fun x -> x <= clearwater); - fn = fun ~__context -> - let pgpus = Db.PGPU.get_all ~__context in - let system_display_device = Xapi_pci.get_system_display_device () in - List.iter - (fun pgpu -> - let pci = Db.PGPU.get_PCI ~__context ~self:pgpu in - let pci_addr = Some (Db.PCI.get_pci_id ~__context ~self:pci) in - let is_system_display_device = (pci_addr = system_display_device) in - let is_host_display_enabled = true in - let is_pci_hidden = false in - let supported_vgpu_types = - Xapi_vgpu_type.find_or_create_supported_types ~__context ~pci - ~is_system_display_device - ~is_host_display_enabled - ~is_pci_hidden - in - Db.PGPU.set_supported_VGPU_types ~__context - ~self:pgpu ~value:supported_vgpu_types; - Db.PGPU.set_enabled_VGPU_types ~__context - ~self:pgpu ~value:supported_vgpu_types) - pgpus; + description = "Populating lists of VGPU types on existing PGPUs"; + version = (fun x -> x <= clearwater); + fn = fun ~__context -> + let pgpus = Db.PGPU.get_all ~__context in + let system_display_device = Xapi_pci.get_system_display_device () in + List.iter + (fun pgpu -> + let pci = Db.PGPU.get_PCI ~__context ~self:pgpu in + let pci_addr = Some (Db.PCI.get_pci_id ~__context ~self:pci) in + let is_system_display_device = (pci_addr = system_display_device) in + let is_host_display_enabled = true in + let is_pci_hidden = false in + let supported_vgpu_types = + Xapi_vgpu_type.find_or_create_supported_types ~__context ~pci + ~is_system_display_device + ~is_host_display_enabled + ~is_pci_hidden + in + Db.PGPU.set_supported_VGPU_types ~__context + ~self:pgpu ~value:supported_vgpu_types; + Db.PGPU.set_enabled_VGPU_types ~__context + ~self:pgpu ~value:supported_vgpu_types) + pgpus; } let set_vgpu_types = { - description = "Setting the types of existing VGPUs"; - version = (fun x -> x <= clearwater); - fn = fun ~__context -> - let vgpus = Db.VGPU.get_all ~__context in - let passthrough_vgpu_type = - Xapi_vgpu_type.find_or_create ~__context Xapi_vgpu_type.passthrough_gpu - in - List.iter - (fun vgpu -> - Db.VGPU.set_type ~__context ~self:vgpu ~value:passthrough_vgpu_type) - vgpus; + description = "Setting the types of existing VGPUs"; + version = (fun x -> x <= clearwater); + fn = fun ~__context -> + let vgpus = Db.VGPU.get_all ~__context in + let passthrough_vgpu_type = + Xapi_vgpu_type.find_or_create ~__context Xapi_vgpu_type.passthrough_gpu + in + List.iter + (fun vgpu -> + Db.VGPU.set_type ~__context ~self:vgpu ~value:passthrough_vgpu_type) + vgpus; } let remove_restricted_pbd_keys = { - description = "Removing restricted legacy PBD.device_config keys"; - version = (fun x -> x < creedence); - fn = fun ~__context -> - List.iter (fun self -> - let dc = Db.PBD.get_device_config ~__context ~self in - let dc' = List.filter (fun (k, _) -> k <> "SRmaster") dc in - Db.PBD.set_device_config ~__context ~self ~value:dc' - ) (Db.PBD.get_all ~__context) + description = "Removing restricted legacy PBD.device_config keys"; + version = (fun x -> x < creedence); + fn = fun ~__context -> + List.iter (fun self -> + let dc = Db.PBD.get_device_config ~__context ~self in + let dc' = List.filter (fun (k, _) -> k <> "SRmaster") dc in + Db.PBD.set_device_config ~__context ~self ~value:dc' + ) (Db.PBD.get_all ~__context) } let upgrade_recommendations_for_gpu_passthru = { - description = "Upgrading recommendations to allow GPU passthrough on HVM Linux guests"; - version = (fun x -> x < cream); - fn = fun ~__context -> - List.iter (fun self -> - let recommendations = Db.VM.get_recommendations ~__context ~self in - let updated = ref false in - let ob = Buffer.create 600 in - let i = Xmlm.make_input (`String (0, recommendations)) in - let o = Xmlm.make_output (`Buffer ob) in - let rec pull i o depth = - match Xmlm.input i with - | `El_start ((_, name), attrs) as el -> - (* Assumption: a recommendation pre-Cream that has allow-gpu-passthrough = 0 implies HVM Linux. - * We are upgrading these to allow-gpu-passthrough = 1, but allow-vgpu = 0. *) - let attrs = List.map (fun ((_, n), m) -> n, m) attrs in - let field = if List.mem_assoc "field" attrs then Some (List.assoc "field" attrs) else None in - let value = if List.mem_assoc "value" attrs then Some (List.assoc "value" attrs) else None in - if name = "restriction" && field = Some "allow-gpu-passthrough" && value = Some "0" then begin - Xmlm.output o (`El_start (("", name), [("", "field"), "allow-gpu-passthrough"; ("", "value"), "1"])); - Xmlm.output o (`El_start (("", name), [("", "field"), "allow-vgpu"; ("", "value"), "0"])); - updated := true - end else - Xmlm.output o el; - pull i o (depth + 1) - | el -> - Xmlm.output o el; - if el = `El_end then - if depth = 1 then () else pull i o (depth - 1) - else - pull i o depth - in - try - pull i o 0; - if !updated then - Db.VM.set_recommendations ~__context ~self ~value:(Buffer.contents ob) - with _ -> - (* Ignore any errors while parsing the recommendations XML. The upgrade is "best effort". *) - () - ) (Db.VM.get_all ~__context) + description = "Upgrading recommendations to allow GPU passthrough on HVM Linux guests"; + version = (fun x -> x < cream); + fn = fun ~__context -> + List.iter (fun self -> + let recommendations = Db.VM.get_recommendations ~__context ~self in + let updated = ref false in + let ob = Buffer.create 600 in + let i = Xmlm.make_input (`String (0, recommendations)) in + let o = Xmlm.make_output (`Buffer ob) in + let rec pull i o depth = + match Xmlm.input i with + | `El_start ((_, name), attrs) as el -> + (* Assumption: a recommendation pre-Cream that has allow-gpu-passthrough = 0 implies HVM Linux. + * We are upgrading these to allow-gpu-passthrough = 1, but allow-vgpu = 0. *) + let attrs = List.map (fun ((_, n), m) -> n, m) attrs in + let field = if List.mem_assoc "field" attrs then Some (List.assoc "field" attrs) else None in + let value = if List.mem_assoc "value" attrs then Some (List.assoc "value" attrs) else None in + if name = "restriction" && field = Some "allow-gpu-passthrough" && value = Some "0" then begin + Xmlm.output o (`El_start (("", name), [("", "field"), "allow-gpu-passthrough"; ("", "value"), "1"])); + Xmlm.output o (`El_start (("", name), [("", "field"), "allow-vgpu"; ("", "value"), "0"])); + updated := true + end else + Xmlm.output o el; + pull i o (depth + 1) + | el -> + Xmlm.output o el; + if el = `El_end then + if depth = 1 then () else pull i o (depth - 1) + else + pull i o depth + in + try + pull i o 0; + if !updated then + Db.VM.set_recommendations ~__context ~self ~value:(Buffer.contents ob) + with _ -> + (* Ignore any errors while parsing the recommendations XML. The upgrade is "best effort". *) + () + ) (Db.VM.get_all ~__context) } let rules = [ - upgrade_alert_priority; - update_mail_min_priority; - upgrade_vm_memory_overheads; - upgrade_wlb_configuration; - upgrade_vm_memory_for_dmc; - upgrade_bios_strings; - update_snapshots; - upgrade_guest_installer_network; - upgrade_vdi_types; - upgrade_ha_restart_priority; - upgrade_auto_poweron; - upgrade_pif_metrics; - remove_vmpp; - populate_pgpu_vgpu_types; - set_vgpu_types; - add_default_pif_properties; - default_has_vendor_device_false; - default_pv_drivers_detected_false; - remove_restricted_pbd_keys; - upgrade_recommendations_for_gpu_passthru; + upgrade_alert_priority; + update_mail_min_priority; + upgrade_vm_memory_overheads; + upgrade_wlb_configuration; + upgrade_vm_memory_for_dmc; + upgrade_bios_strings; + update_snapshots; + upgrade_guest_installer_network; + upgrade_vdi_types; + upgrade_ha_restart_priority; + upgrade_auto_poweron; + upgrade_pif_metrics; + remove_vmpp; + populate_pgpu_vgpu_types; + set_vgpu_types; + add_default_pif_properties; + default_has_vendor_device_false; + default_pv_drivers_detected_false; + remove_restricted_pbd_keys; + upgrade_recommendations_for_gpu_passthru; ] (* Maybe upgrade most recent db *) let maybe_upgrade ~__context = - let db_ref = Context.database_of __context in - let db = Db_ref.get_database db_ref in - let (previous_major_vsn, previous_minor_vsn) as previous_vsn = Db_cache_types.Manifest.schema (Db_cache_types.Database.manifest db) in - let (latest_major_vsn, latest_minor_vsn) as latest_vsn = Datamodel.schema_major_vsn, Datamodel.schema_minor_vsn in - let previous_string = Printf.sprintf "(%d, %d)" previous_major_vsn previous_minor_vsn in - let latest_string = Printf.sprintf "(%d, %d)" latest_major_vsn latest_minor_vsn in - debug "Database schema version is %s; binary schema version is %s" previous_string latest_string; - if previous_vsn > latest_vsn then begin - warn "Database schema version %s is more recent than binary %s: downgrade is unsupported." previous_string previous_string; - end else begin - if previous_vsn < latest_vsn then begin - apply_upgrade_rules ~__context rules previous_vsn; - debug "Upgrade rules applied, bumping schema version to %d.%d" latest_major_vsn latest_minor_vsn; - Db_ref.update_database db_ref - ((Db_cache_types.Database.update_manifest ++ Db_cache_types.Manifest.update_schema) - (fun _ -> Some (latest_major_vsn, latest_minor_vsn))) - end else begin - debug "Database schemas match, no upgrade required"; - end - end + let db_ref = Context.database_of __context in + let db = Db_ref.get_database db_ref in + let (previous_major_vsn, previous_minor_vsn) as previous_vsn = Db_cache_types.Manifest.schema (Db_cache_types.Database.manifest db) in + let (latest_major_vsn, latest_minor_vsn) as latest_vsn = Datamodel.schema_major_vsn, Datamodel.schema_minor_vsn in + let previous_string = Printf.sprintf "(%d, %d)" previous_major_vsn previous_minor_vsn in + let latest_string = Printf.sprintf "(%d, %d)" latest_major_vsn latest_minor_vsn in + debug "Database schema version is %s; binary schema version is %s" previous_string latest_string; + if previous_vsn > latest_vsn then begin + warn "Database schema version %s is more recent than binary %s: downgrade is unsupported." previous_string previous_string; + end else begin + if previous_vsn < latest_vsn then begin + apply_upgrade_rules ~__context rules previous_vsn; + debug "Upgrade rules applied, bumping schema version to %d.%d" latest_major_vsn latest_minor_vsn; + Db_ref.update_database db_ref + ((Db_cache_types.Database.update_manifest ++ Db_cache_types.Manifest.update_schema) + (fun _ -> Some (latest_major_vsn, latest_minor_vsn))) + end else begin + debug "Database schemas match, no upgrade required"; + end + end (* This function is called during the xapi startup (xapi.ml:server_init). By the time it's called we've lost information about whether we need @@ -545,9 +545,9 @@ let maybe_upgrade ~__context = REMEMBER TO UPDATE IT AS WE MOVE TO NEW RELEASES. *) let hi_level_db_upgrade_rules ~__context () = - try - maybe_upgrade ~__context; - with e -> - error - "Could not perform high-level database upgrade: '%s'" - (Printexc.to_string e) + try + maybe_upgrade ~__context; + with e -> + error + "Could not perform high-level database upgrade: '%s'" + (Printexc.to_string e) diff --git a/ocaml/xapi/xapi_dr.ml b/ocaml/xapi/xapi_dr.ml index 19335b228db..43cceca3e50 100644 --- a/ocaml/xapi/xapi_dr.ml +++ b/ocaml/xapi/xapi_dr.ml @@ -30,133 +30,133 @@ let db_vdi_cache_mutex = Mutex.create () (* This doesn't grab the mutex, so should only be called from add_vdis_to_cache or remove_vdis_from_cache. *) let update_metadata_latest ~__context = - (* Clear out invalid entries in the cache. *) - let cached_vdis = Hashtbl.fold - (fun vdi _ vdi_list -> vdi::vdi_list) - db_vdi_cache - [] - in - List.iter - (fun vdi -> if not (Db.is_valid_ref __context vdi) then Hashtbl.remove db_vdi_cache vdi) - cached_vdis; - debug "Updating metadata_latest on all foreign pool metadata VDIs"; - let module PoolMap = Map.Make(struct type t = API.ref_pool let compare = compare end) in - (* First, create a map of type Pool -> (VDI, generation count) list *) - let vdis_grouped_by_pool = Hashtbl.fold - (fun vdi (generation, _) map -> - (* Add this VDI to the map. *) - let pool = Db.VDI.get_metadata_of_pool ~__context ~self:vdi in - let new_list = try - let current_list = PoolMap.find pool map in - (vdi, generation) :: current_list - with Not_found -> - [vdi, generation] - in - PoolMap.add pool new_list map) - db_vdi_cache - PoolMap.empty - in - (* For each pool who has metadata VDIs in the database, find the VDIs with the highest database generation count. *) - (* These VDIs contain the newest metadata we have for the pool. *) - PoolMap.iter - (fun pool vdi_list -> - debug "Updating metadata_latest on all VDIs with metadata_of_pool %s" (Ref.string_of pool); - debug "Pool %s has %d metadata VDIs" (Ref.string_of pool) (List.length vdi_list); - (* Find the maximum database generation for VDIs containing metadata of this particular foreign pool. *) - let maximum_generation = List.fold_right - (fun (_, generation) acc -> - if generation > acc then generation - else acc) - vdi_list 0L - in - debug "Largest known database generation for pool %s is %Ld." (Ref.string_of pool) maximum_generation; - (* Set VDI.metadata_latest according to whether the VDI has the highest known generation count. *) - List.iter - (fun (vdi, generation) -> - let metadata_latest = (generation = maximum_generation) in - debug "Database in VDI %s has generation %Ld - setting metadata_latest to %b." - (Db.VDI.get_uuid ~__context ~self:vdi) - generation metadata_latest; - Db.VDI.set_metadata_latest ~__context ~self:vdi ~value:metadata_latest) - vdi_list) - vdis_grouped_by_pool + (* Clear out invalid entries in the cache. *) + let cached_vdis = Hashtbl.fold + (fun vdi _ vdi_list -> vdi::vdi_list) + db_vdi_cache + [] + in + List.iter + (fun vdi -> if not (Db.is_valid_ref __context vdi) then Hashtbl.remove db_vdi_cache vdi) + cached_vdis; + debug "Updating metadata_latest on all foreign pool metadata VDIs"; + let module PoolMap = Map.Make(struct type t = API.ref_pool let compare = compare end) in + (* First, create a map of type Pool -> (VDI, generation count) list *) + let vdis_grouped_by_pool = Hashtbl.fold + (fun vdi (generation, _) map -> + (* Add this VDI to the map. *) + let pool = Db.VDI.get_metadata_of_pool ~__context ~self:vdi in + let new_list = try + let current_list = PoolMap.find pool map in + (vdi, generation) :: current_list + with Not_found -> + [vdi, generation] + in + PoolMap.add pool new_list map) + db_vdi_cache + PoolMap.empty + in + (* For each pool who has metadata VDIs in the database, find the VDIs with the highest database generation count. *) + (* These VDIs contain the newest metadata we have for the pool. *) + PoolMap.iter + (fun pool vdi_list -> + debug "Updating metadata_latest on all VDIs with metadata_of_pool %s" (Ref.string_of pool); + debug "Pool %s has %d metadata VDIs" (Ref.string_of pool) (List.length vdi_list); + (* Find the maximum database generation for VDIs containing metadata of this particular foreign pool. *) + let maximum_generation = List.fold_right + (fun (_, generation) acc -> + if generation > acc then generation + else acc) + vdi_list 0L + in + debug "Largest known database generation for pool %s is %Ld." (Ref.string_of pool) maximum_generation; + (* Set VDI.metadata_latest according to whether the VDI has the highest known generation count. *) + List.iter + (fun (vdi, generation) -> + let metadata_latest = (generation = maximum_generation) in + debug "Database in VDI %s has generation %Ld - setting metadata_latest to %b." + (Db.VDI.get_uuid ~__context ~self:vdi) + generation metadata_latest; + Db.VDI.set_metadata_latest ~__context ~self:vdi ~value:metadata_latest) + vdi_list) + vdis_grouped_by_pool let read_database_generation ~db_ref = - let db = Db_ref.get_database db_ref in - let manifest = Database.manifest db in - Manifest.generation manifest + let db = Db_ref.get_database db_ref in + let manifest = Database.manifest db in + Manifest.generation manifest (* For each VDI, try to open the contained database. *) (* If this is successful, add its generation count to the cache. *) (* Finally, update metadata_latest on all metadata VDIs. *) let add_vdis_to_cache ~__context ~vdis = - Mutex.execute db_vdi_cache_mutex - (fun () -> - List.iter - (fun vdi -> - let vdi_uuid = (Db.VDI.get_uuid ~__context ~self:vdi) in - try - let db_ref = Xapi_vdi_helpers.database_ref_of_vdi ~__context ~vdi in - let generation = read_database_generation ~db_ref in - let __foreign_database_context = Context.make ~database:db_ref "Querying foreign database." in - let pool = Helpers.get_pool ~__context:__foreign_database_context in - let pool_uuid = Db.Pool.get_uuid ~__context:__foreign_database_context ~self:pool in - debug "Adding VDI %s to metadata VDI cache." vdi_uuid; - Hashtbl.replace db_vdi_cache vdi (generation, pool_uuid) - with e -> - (* If we can't open the database then it doesn't really matter that the VDI is not added to the cache. *) - debug "Could not open database from VDI %s - caught %s" - (Db.VDI.get_uuid ~__context ~self:vdi) - (Printexc.to_string e)) - vdis; - update_metadata_latest ~__context) + Mutex.execute db_vdi_cache_mutex + (fun () -> + List.iter + (fun vdi -> + let vdi_uuid = (Db.VDI.get_uuid ~__context ~self:vdi) in + try + let db_ref = Xapi_vdi_helpers.database_ref_of_vdi ~__context ~vdi in + let generation = read_database_generation ~db_ref in + let __foreign_database_context = Context.make ~database:db_ref "Querying foreign database." in + let pool = Helpers.get_pool ~__context:__foreign_database_context in + let pool_uuid = Db.Pool.get_uuid ~__context:__foreign_database_context ~self:pool in + debug "Adding VDI %s to metadata VDI cache." vdi_uuid; + Hashtbl.replace db_vdi_cache vdi (generation, pool_uuid) + with e -> + (* If we can't open the database then it doesn't really matter that the VDI is not added to the cache. *) + debug "Could not open database from VDI %s - caught %s" + (Db.VDI.get_uuid ~__context ~self:vdi) + (Printexc.to_string e)) + vdis; + update_metadata_latest ~__context) (* Remove all the supplied VDIs from the cache, then update metadata_latest on the remaining VDIs. *) let remove_vdis_from_cache ~__context ~vdis = - Mutex.execute db_vdi_cache_mutex - (fun () -> - List.iter - (fun vdi -> - debug "Removing VDI %s from metadata VDI cache." (Db.VDI.get_uuid ~__context ~self:vdi); - Hashtbl.remove db_vdi_cache vdi) - vdis; - update_metadata_latest ~__context) + Mutex.execute db_vdi_cache_mutex + (fun () -> + List.iter + (fun vdi -> + debug "Removing VDI %s from metadata VDI cache." (Db.VDI.get_uuid ~__context ~self:vdi); + Hashtbl.remove db_vdi_cache vdi) + vdis; + update_metadata_latest ~__context) let read_vdi_cache_record ~vdi = - Mutex.execute db_vdi_cache_mutex - (fun () -> - if Hashtbl.mem db_vdi_cache vdi then - Some (Hashtbl.find db_vdi_cache vdi) - else - None) + Mutex.execute db_vdi_cache_mutex + (fun () -> + if Hashtbl.mem db_vdi_cache vdi then + Some (Hashtbl.find db_vdi_cache vdi) + else + None) let handle_metadata_vdis ~__context ~sr = - let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in - debug "Shared SR %s is being plugged to master - handling metadata VDIs." sr_uuid; - let metadata_vdis = List.filter - (fun vdi -> Db.VDI.get_type ~__context ~self:vdi = `metadata) - (Db.SR.get_VDIs ~__context ~self:sr) - in - let pool = Helpers.get_pool ~__context in - let (vdis_of_this_pool, vdis_of_foreign_pool) = List.partition - (fun vdi -> Db.VDI.get_metadata_of_pool ~__context ~self:vdi = pool) - metadata_vdis - in - debug "Adding foreign pool metadata VDIs to cache: [%s]" - (String.concat ";" (List.map (fun vdi -> Db.VDI.get_uuid ~__context ~self:vdi) vdis_of_foreign_pool)); - add_vdis_to_cache ~__context ~vdis:vdis_of_foreign_pool; - debug "Found metadata VDIs created by this pool: [%s]" - (String.concat ";" (List.map (fun vdi -> Db.VDI.get_uuid ~__context ~self:vdi) vdis_of_this_pool)); - if vdis_of_this_pool <> [] then begin - let target_vdi = List.hd vdis_of_this_pool in - let vdi_uuid = Db.VDI.get_uuid ~__context ~self:target_vdi in - try - Xapi_vdi_helpers.enable_database_replication ~__context ~get_vdi_callback:(fun () -> target_vdi); - debug "Re-enabled database replication to VDI %s" vdi_uuid - with e -> - debug "Could not re-enable database replication to VDI %s - caught %s" - vdi_uuid (Printexc.to_string e) - end + let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in + debug "Shared SR %s is being plugged to master - handling metadata VDIs." sr_uuid; + let metadata_vdis = List.filter + (fun vdi -> Db.VDI.get_type ~__context ~self:vdi = `metadata) + (Db.SR.get_VDIs ~__context ~self:sr) + in + let pool = Helpers.get_pool ~__context in + let (vdis_of_this_pool, vdis_of_foreign_pool) = List.partition + (fun vdi -> Db.VDI.get_metadata_of_pool ~__context ~self:vdi = pool) + metadata_vdis + in + debug "Adding foreign pool metadata VDIs to cache: [%s]" + (String.concat ";" (List.map (fun vdi -> Db.VDI.get_uuid ~__context ~self:vdi) vdis_of_foreign_pool)); + add_vdis_to_cache ~__context ~vdis:vdis_of_foreign_pool; + debug "Found metadata VDIs created by this pool: [%s]" + (String.concat ";" (List.map (fun vdi -> Db.VDI.get_uuid ~__context ~self:vdi) vdis_of_this_pool)); + if vdis_of_this_pool <> [] then begin + let target_vdi = List.hd vdis_of_this_pool in + let vdi_uuid = Db.VDI.get_uuid ~__context ~self:target_vdi in + try + Xapi_vdi_helpers.enable_database_replication ~__context ~get_vdi_callback:(fun () -> target_vdi); + debug "Re-enabled database replication to VDI %s" vdi_uuid + with e -> + debug "Could not re-enable database replication to VDI %s - caught %s" + vdi_uuid (Printexc.to_string e) + end (* ------------ Providing signalling that an SR is ready for DR ------------- *) @@ -165,95 +165,95 @@ let processing_srs_m = Mutex.create () let processing_srs_c = Condition.create () let signal_sr_is_processing ~__context ~sr = - debug "Recording that processing of SR %s has started." (Db.SR.get_uuid ~__context ~self:sr); - Mutex.execute processing_srs_m - (fun () -> - let srs = !processing_srs in - if not(List.mem sr srs) then - processing_srs := sr::srs) + debug "Recording that processing of SR %s has started." (Db.SR.get_uuid ~__context ~self:sr); + Mutex.execute processing_srs_m + (fun () -> + let srs = !processing_srs in + if not(List.mem sr srs) then + processing_srs := sr::srs) let signal_sr_is_ready ~__context ~sr = - debug "Recording that processing of SR %s has finished." (Db.SR.get_uuid ~__context ~self:sr); - Mutex.execute processing_srs_m - (fun () -> - let srs = !processing_srs in - if List.mem sr srs then begin - processing_srs := (List.filter (fun x -> x <> sr) srs); - Condition.broadcast processing_srs_c - end) + debug "Recording that processing of SR %s has finished." (Db.SR.get_uuid ~__context ~self:sr); + Mutex.execute processing_srs_m + (fun () -> + let srs = !processing_srs in + if List.mem sr srs then begin + processing_srs := (List.filter (fun x -> x <> sr) srs); + Condition.broadcast processing_srs_c + end) let wait_until_sr_is_ready ~__context ~sr = - let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in - Mutex.execute processing_srs_m - (fun () -> - debug "Waiting for SR %s to be processed." sr_uuid; - while List.mem sr !processing_srs do - Condition.wait processing_srs_c processing_srs_m - done; - debug "Finished waiting for SR %s to be processed." sr_uuid) + let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in + Mutex.execute processing_srs_m + (fun () -> + debug "Waiting for SR %s to be processed." sr_uuid; + while List.mem sr !processing_srs do + Condition.wait processing_srs_c processing_srs_m + done; + debug "Finished waiting for SR %s to be processed." sr_uuid) (* --------------------------------- VM recovery ---------------------------- *) (* This function uses the VM export functionality to *) (* create the objects required to reimport a list of VMs *) let create_import_objects ~__context ~vms = - let table = Export.create_table () in - List.iter (Export.update_table ~__context ~include_snapshots:true ~preserve_power_state:true ~include_vhd_parents:false ~table) vms; - Export.make_all ~with_snapshot_metadata:true ~preserve_power_state:true table __context + let table = Export.create_table () in + List.iter (Export.update_table ~__context ~include_snapshots:true ~preserve_power_state:true ~include_vhd_parents:false ~table) vms; + Export.make_all ~with_snapshot_metadata:true ~preserve_power_state:true table __context let clear_sr_introduced_by ~__context ~vm = - let srs = Xapi_vm_helpers.list_required_SRs ~__context ~self:vm in - List.iter - (fun sr -> Db.SR.set_introduced_by ~__context ~self:sr ~value:Ref.null) - srs + let srs = Xapi_vm_helpers.list_required_SRs ~__context ~self:vm in + List.iter + (fun sr -> Db.SR.set_introduced_by ~__context ~self:sr ~value:Ref.null) + srs let assert_session_allows_dr ~session_id ~action = - Server_helpers.exec_with_new_task ~session_id "Checking pool license and session permissions allow DR" - (fun __context -> - Pool_features.assert_enabled ~__context ~f:Features.DR; - (* Any session can call VM(_appliance).recover since it is marked as readonly *) - (* so it can be used by the sessions returned by VDI.open_database. *) - (* We need to manually check that a session could legitimately have called VDI.open_database. *) - let permission = Rbac_static.permission_VDI_open_database in - if not(Rbac.has_permission ~__context ~permission) then - raise (Api_errors.Server_error(Api_errors.rbac_permission_denied, - [action; "The supplied session does not have the required permissions for VM recovery."]))) + Server_helpers.exec_with_new_task ~session_id "Checking pool license and session permissions allow DR" + (fun __context -> + Pool_features.assert_enabled ~__context ~f:Features.DR; + (* Any session can call VM(_appliance).recover since it is marked as readonly *) + (* so it can be used by the sessions returned by VDI.open_database. *) + (* We need to manually check that a session could legitimately have called VDI.open_database. *) + let permission = Rbac_static.permission_VDI_open_database in + if not(Rbac.has_permission ~__context ~permission) then + raise (Api_errors.Server_error(Api_errors.rbac_permission_denied, + [action; "The supplied session does not have the required permissions for VM recovery."]))) let recover_vms ~__context ~vms ~session_to ~force = - let metadata_options = { - Import.dry_run = false; - Import.live = false; - vdi_map = []; (* we expect the VDI metadata to be present *) - } in - let config = { - Import.import_type = Import.Metadata_import metadata_options; - Import.full_restore = true; - Import.force = force; - } in - let objects = create_import_objects ~__context ~vms in - Server_helpers.exec_with_new_task ~session_id:session_to "Importing VMs" - (fun __context_to -> - let rpc = Helpers.make_rpc ~__context:__context_to in - let state = Import.handle_all __context_to - config rpc session_to objects - in - let vmrefs = List.setify - (List.map - (fun (cls, id, r) -> Ref.of_string r) - state.Import.created_vms) - in - try - Import.complete_import ~__context:__context_to vmrefs; - (* Remove the introduced_by field from any SRs required for VMs. *) - List.iter - (fun vm -> clear_sr_introduced_by ~__context:__context_to ~vm) - vmrefs; - vmrefs - with e -> - if force then - debug "%s" "VM recovery failed - not cleaning up as action was forced." - else begin - debug "%s" "VM recovery failed - cleaning up."; - Importexport.cleanup state.Import.cleanup - end; - raise e) + let metadata_options = { + Import.dry_run = false; + Import.live = false; + vdi_map = []; (* we expect the VDI metadata to be present *) + } in + let config = { + Import.import_type = Import.Metadata_import metadata_options; + Import.full_restore = true; + Import.force = force; + } in + let objects = create_import_objects ~__context ~vms in + Server_helpers.exec_with_new_task ~session_id:session_to "Importing VMs" + (fun __context_to -> + let rpc = Helpers.make_rpc ~__context:__context_to in + let state = Import.handle_all __context_to + config rpc session_to objects + in + let vmrefs = List.setify + (List.map + (fun (cls, id, r) -> Ref.of_string r) + state.Import.created_vms) + in + try + Import.complete_import ~__context:__context_to vmrefs; + (* Remove the introduced_by field from any SRs required for VMs. *) + List.iter + (fun vm -> clear_sr_introduced_by ~__context:__context_to ~vm) + vmrefs; + vmrefs + with e -> + if force then + debug "%s" "VM recovery failed - not cleaning up as action was forced." + else begin + debug "%s" "VM recovery failed - cleaning up."; + Importexport.cleanup state.Import.cleanup + end; + raise e) diff --git a/ocaml/xapi/xapi_dr_task.ml b/ocaml/xapi/xapi_dr_task.ml index d19d4e8bc30..2621d4eba44 100644 --- a/ocaml/xapi/xapi_dr_task.ml +++ b/ocaml/xapi/xapi_dr_task.ml @@ -20,134 +20,134 @@ module D = Debug.Make(struct let name="xapi" end) open D let make_task ~__context = - let uuid = Uuid.make_uuid () in - let ref = Ref.make () in - Db.DR_task.create ~__context ~ref ~uuid:(Uuid.to_string uuid); - ref + let uuid = Uuid.make_uuid () in + let ref = Ref.make () in + Db.DR_task.create ~__context ~ref ~uuid:(Uuid.to_string uuid); + ref (* A type to represent an SR record parsed from an sr_probe result. *) type sr_probe_sr = { - uuid: string; - name_label: string; - name_description: string; - metadata_detected: bool; + uuid: string; + name_label: string; + name_description: string; + metadata_detected: bool; } (* Attempt to parse a key/value pair from XML. *) -let parse_kv = function - | Xml.Element(key, _, [ Xml.PCData v ]) -> - key, String.strip String.isspace v (* remove whitespace at both ends *) - | Xml.Element(key, _, []) -> - key, "" - | _ -> - failwith "Malformed key/value pair" +let parse_kv = function + | Xml.Element(key, _, [ Xml.PCData v ]) -> + key, String.strip String.isspace v (* remove whitespace at both ends *) + | Xml.Element(key, _, []) -> + key, "" + | _ -> + failwith "Malformed key/value pair" (* Parse a list of SRs from an iscsi/hba SR probe response with sm-config:metadata=true *) -let parse_sr_probe xml = - match Xml.parse_string xml with - | Xml.Element("SRlist", _, children) -> - let parse_sr = function - | Xml.Element("SR", _, children) -> - let all = List.map parse_kv children in - { - uuid = List.assoc "UUID" all; - name_label = List.assoc "name_label" all; - name_description = List.assoc "name_description" all; - metadata_detected = (List.assoc "pool_metadata_detected" all = "true"); - } - | _ -> failwith "Malformed or missing " in - List.map parse_sr children - | _ -> failwith "Missing element" +let parse_sr_probe xml = + match Xml.parse_string xml with + | Xml.Element("SRlist", _, children) -> + let parse_sr = function + | Xml.Element("SR", _, children) -> + let all = List.map parse_kv children in + { + uuid = List.assoc "UUID" all; + name_label = List.assoc "name_label" all; + name_description = List.assoc "name_description" all; + metadata_detected = (List.assoc "pool_metadata_detected" all = "true"); + } + | _ -> failwith "Malformed or missing " in + List.map parse_sr children + | _ -> failwith "Missing element" (* Make a best-effort attempt to create an SR and associate it with the DR_task. *) (* If anything goes wrong, unplug all PBDs which were created, and forget the SR. *) let try_create_sr_from_record ~__context ~_type ~device_config ~dr_task ~sr_record = - Helpers.call_api_functions ~__context - (fun rpc session_id -> - (* Create the SR record. *) - debug "Introducing SR %s" sr_record.uuid; - let sr = Client.SR.introduce ~rpc ~session_id - ~uuid:sr_record.uuid ~name_label:sr_record.name_label - ~name_description:sr_record.name_description - ~_type ~content_type:"" ~shared:true - ~sm_config:[] - in - try - (* Create and plug PBDs. *) - Xapi_pool_helpers.call_fn_on_master_then_slaves ~__context - (fun ~rpc ~session_id ~host -> - debug "Attaching SR %s to host %s" sr_record.uuid (Db.Host.get_name_label ~__context ~self:host); - let pbd = Client.PBD.create ~rpc ~session_id ~host ~sR:sr ~device_config ~other_config:[] in - Client.PBD.plug ~rpc ~session_id ~self:pbd); - (* Wait until the asynchronous scan is complete and metadata_latest has been updated for all metadata VDIs. *) - Xapi_dr.wait_until_sr_is_ready ~__context ~sr; - Db.SR.set_introduced_by ~__context ~self:sr ~value:dr_task - with e -> - Backtrace.is_important e; - (* Clean up if anything goes wrong. *) - warn "Could not successfully attach SR %s - caught %s" sr_record.uuid (Printexc.to_string e); - let pbds = Xapi_sr.get_pbds ~__context ~self:sr ~attached:true ~master_pos:`Last in - List.iter (fun pbd -> Client.PBD.unplug ~rpc ~session_id ~self:pbd) pbds; - Client.SR.forget ~rpc ~session_id ~sr; - raise e) + Helpers.call_api_functions ~__context + (fun rpc session_id -> + (* Create the SR record. *) + debug "Introducing SR %s" sr_record.uuid; + let sr = Client.SR.introduce ~rpc ~session_id + ~uuid:sr_record.uuid ~name_label:sr_record.name_label + ~name_description:sr_record.name_description + ~_type ~content_type:"" ~shared:true + ~sm_config:[] + in + try + (* Create and plug PBDs. *) + Xapi_pool_helpers.call_fn_on_master_then_slaves ~__context + (fun ~rpc ~session_id ~host -> + debug "Attaching SR %s to host %s" sr_record.uuid (Db.Host.get_name_label ~__context ~self:host); + let pbd = Client.PBD.create ~rpc ~session_id ~host ~sR:sr ~device_config ~other_config:[] in + Client.PBD.plug ~rpc ~session_id ~self:pbd); + (* Wait until the asynchronous scan is complete and metadata_latest has been updated for all metadata VDIs. *) + Xapi_dr.wait_until_sr_is_ready ~__context ~sr; + Db.SR.set_introduced_by ~__context ~self:sr ~value:dr_task + with e -> + Backtrace.is_important e; + (* Clean up if anything goes wrong. *) + warn "Could not successfully attach SR %s - caught %s" sr_record.uuid (Printexc.to_string e); + let pbds = Xapi_sr.get_pbds ~__context ~self:sr ~attached:true ~master_pos:`Last in + List.iter (fun pbd -> Client.PBD.unplug ~rpc ~session_id ~self:pbd) pbds; + Client.SR.forget ~rpc ~session_id ~sr; + raise e) let create ~__context ~_type ~device_config ~whitelist = - (* Check if licence allows disaster recovery. *) - Pool_features.assert_enabled ~__context ~f:Features.DR; - (* Check that the SR type supports metadata. *) - if not (List.mem_assoc Smint.Sr_metadata (Sm.features_of_driver _type)) then - raise (Api_errors.Server_error (Api_errors.operation_not_allowed, - [Printf.sprintf "Disaster recovery not supported on SRs of type %s" _type])); - (* Probe the specified device for SRs. *) - let master = Helpers.get_master ~__context in - let probe_result = Helpers.call_api_functions ~__context - (fun rpc session_id -> - Client.SR.probe ~rpc ~session_id - ~host:master ~device_config - ~_type ~sm_config:["metadata", "true"]) - in - (* Parse the probe result. *) - let sr_records = - try - parse_sr_probe probe_result - with Failure msg -> - raise (Api_errors.Server_error(Api_errors.internal_error, - [Printf.sprintf "SR probe response was malformed: %s" msg])) - in - (* If the SR record has a UUID, make sure it's in the whitelist. *) - let sr_records = List.filter - (fun sr_record -> - List.mem sr_record.uuid whitelist) - sr_records - in - (* SR probe went ok, so create the DR task. *) - let dr_task = make_task ~__context in - (* Create the SR records and attach each SR to each host. *) - List.iter - (fun sr_record -> - try - ignore (Db.SR.get_by_uuid ~__context ~uuid:sr_record.uuid); - (* If an SR with this UUID has already been introduced, don't mess with it. *) - (* It may have been manually introduced, or introduced by another DR_task. *) - debug "SR %s has already been introduced, so not adding it to this disaster recovery task." sr_record.uuid; - with Db_exn.Read_missing_uuid(_, _, _) -> - try_create_sr_from_record ~__context ~_type ~device_config ~dr_task ~sr_record) - sr_records; - dr_task + (* Check if licence allows disaster recovery. *) + Pool_features.assert_enabled ~__context ~f:Features.DR; + (* Check that the SR type supports metadata. *) + if not (List.mem_assoc Smint.Sr_metadata (Sm.features_of_driver _type)) then + raise (Api_errors.Server_error (Api_errors.operation_not_allowed, + [Printf.sprintf "Disaster recovery not supported on SRs of type %s" _type])); + (* Probe the specified device for SRs. *) + let master = Helpers.get_master ~__context in + let probe_result = Helpers.call_api_functions ~__context + (fun rpc session_id -> + Client.SR.probe ~rpc ~session_id + ~host:master ~device_config + ~_type ~sm_config:["metadata", "true"]) + in + (* Parse the probe result. *) + let sr_records = + try + parse_sr_probe probe_result + with Failure msg -> + raise (Api_errors.Server_error(Api_errors.internal_error, + [Printf.sprintf "SR probe response was malformed: %s" msg])) + in + (* If the SR record has a UUID, make sure it's in the whitelist. *) + let sr_records = List.filter + (fun sr_record -> + List.mem sr_record.uuid whitelist) + sr_records + in + (* SR probe went ok, so create the DR task. *) + let dr_task = make_task ~__context in + (* Create the SR records and attach each SR to each host. *) + List.iter + (fun sr_record -> + try + ignore (Db.SR.get_by_uuid ~__context ~uuid:sr_record.uuid); + (* If an SR with this UUID has already been introduced, don't mess with it. *) + (* It may have been manually introduced, or introduced by another DR_task. *) + debug "SR %s has already been introduced, so not adding it to this disaster recovery task." sr_record.uuid; + with Db_exn.Read_missing_uuid(_, _, _) -> + try_create_sr_from_record ~__context ~_type ~device_config ~dr_task ~sr_record) + sr_records; + dr_task let destroy ~__context ~self = - let open Db_filter_types in - let introduced_SRs = Db.DR_task.get_introduced_SRs ~__context ~self in - List.iter (fun sr -> - let pbds = Xapi_sr.get_pbds ~__context ~self:sr ~attached:true ~master_pos:`Last in - List.iter (fun pbd -> - debug "Unplugging PBD %s" (Db.PBD.get_uuid ~__context ~self:pbd); - Helpers.call_api_functions ~__context - (fun rpc session_id -> Client.PBD.unplug ~rpc ~session_id ~self:pbd) - ) pbds; - (* Forget the SR. *) - debug "Forgetting SR %s (%s)" (Db.SR.get_uuid ~__context ~self:sr) (Db.SR.get_name_label ~__context ~self:sr); - Helpers.call_api_functions ~__context - (fun rpc session_id -> Client.SR.forget ~rpc ~session_id ~sr) - ) introduced_SRs; - Db.DR_task.destroy ~__context ~self + let open Db_filter_types in + let introduced_SRs = Db.DR_task.get_introduced_SRs ~__context ~self in + List.iter (fun sr -> + let pbds = Xapi_sr.get_pbds ~__context ~self:sr ~attached:true ~master_pos:`Last in + List.iter (fun pbd -> + debug "Unplugging PBD %s" (Db.PBD.get_uuid ~__context ~self:pbd); + Helpers.call_api_functions ~__context + (fun rpc session_id -> Client.PBD.unplug ~rpc ~session_id ~self:pbd) + ) pbds; + (* Forget the SR. *) + debug "Forgetting SR %s (%s)" (Db.SR.get_uuid ~__context ~self:sr) (Db.SR.get_name_label ~__context ~self:sr); + Helpers.call_api_functions ~__context + (fun rpc session_id -> Client.SR.forget ~rpc ~session_id ~sr) + ) introduced_SRs; + Db.DR_task.destroy ~__context ~self diff --git a/ocaml/xapi/xapi_dr_task.mli b/ocaml/xapi/xapi_dr_task.mli index e64ea99be4f..52cb148a932 100644 --- a/ocaml/xapi/xapi_dr_task.mli +++ b/ocaml/xapi/xapi_dr_task.mli @@ -20,10 +20,10 @@ If anything goes wrong, unplug all PBDs which were created, forget the SRs, and re-raise the error. *) val create : __context:Context.t -> - _type:string -> - device_config:(string * string) list -> - whitelist:string list -> - API.ref_DR_task + _type:string -> + device_config:(string * string) list -> + whitelist:string list -> + API.ref_DR_task (** * Unplug all PBDs for each SR associated with the DR_task. * Forget each SR associated with the DR_task. diff --git a/ocaml/xapi/xapi_event.ml b/ocaml/xapi/xapi_event.ml index cc45600c830..98e5a3a2133 100644 --- a/ocaml/xapi/xapi_event.ml +++ b/ocaml/xapi/xapi_event.ml @@ -23,589 +23,589 @@ module D=Debug.Make(struct let name="xapi_event" end) open D module Message = struct - type t = - | Create of (API.ref_message * API.message_t) - | Del of API.ref_message + type t = + | Create of (API.ref_message * API.message_t) + | Del of API.ref_message - let get_since_for_events : (__context:Context.t -> int64 -> (int64 * t list)) ref = ref ( fun ~__context _ -> ignore __context; (0L, [])) + let get_since_for_events : (__context:Context.t -> int64 -> (int64 * t list)) ref = ref ( fun ~__context _ -> ignore __context; (0L, [])) end module Token = struct - type t = int64 * int64 (* last id, message id *) + type t = int64 * int64 (* last id, message id *) - exception Failed_to_parse of string + exception Failed_to_parse of string - let of_string token = - match String.split ',' token with - | [from;from_t] -> - (Int64.of_string from, Int64.of_string from_t) - | [""] -> (0L, 0L) - | _ -> - raise (Failed_to_parse token) + let of_string token = + match String.split ',' token with + | [from;from_t] -> + (Int64.of_string from, Int64.of_string from_t) + | [""] -> (0L, 0L) + | _ -> + raise (Failed_to_parse token) - let to_string (last,last_t) = - (* We prefix with zeroes so tokens which differ only in the generation - can be compared lexicographically as strings. *) - Printf.sprintf "%020Ld,%020Ld" last last_t + let to_string (last,last_t) = + (* We prefix with zeroes so tokens which differ only in the generation + can be compared lexicographically as strings. *) + Printf.sprintf "%020Ld,%020Ld" last last_t end module Subscription = struct - type t = - | Class of string - | Object of string * string - | All - - let of_string x = if x = "*" then All else match String.split ~limit:2 '/' x with - | [ cls ] -> Class (String.lowercase cls) - | [ cls; id ] -> Object(String.lowercase cls, id) - | _ -> - raise (Api_errors.Server_error(Api_errors.event_subscription_parse_failure, [ x ])) - let to_string subs = - let to_string x = - match x with - | Class y -> Printf.sprintf "class(%s)" y - | Object (cls,id) -> Printf.sprintf "object(%s,%s)" cls id - | All -> "all" - in - Printf.sprintf "[%s]" (String.concat "," (List.map to_string subs)) - - let any = List.fold_left (fun acc x -> acc || x) false - - (** [table_matches subs tbl]: true if at least one subscription from [subs] would select some events from [tbl] *) - let table_matches subs tbl = - let tbl = String.lowercase tbl in - let matches = function - | All -> true - | Class x -> x = tbl - | Object (x, _) -> x = tbl in - any (List.map matches subs) - - (** [event_matches subs ev]: true if at least one subscription from [subs] selects for specified class and object *) - let object_matches subs ty _ref = - let tbl = String.lowercase ty in - let matches = function - | All -> true - | Class x -> x = tbl - | Object (x, y) -> x = tbl && (y = _ref) in - any (List.map matches subs) - - (** [event_matches subs ev]: true if at least one subscription from [subs] selects for event [ev] *) - let event_matches subs ev = object_matches subs ev.ty ev.reference + type t = + | Class of string + | Object of string * string + | All + + let of_string x = if x = "*" then All else match String.split ~limit:2 '/' x with + | [ cls ] -> Class (String.lowercase cls) + | [ cls; id ] -> Object(String.lowercase cls, id) + | _ -> + raise (Api_errors.Server_error(Api_errors.event_subscription_parse_failure, [ x ])) + let to_string subs = + let to_string x = + match x with + | Class y -> Printf.sprintf "class(%s)" y + | Object (cls,id) -> Printf.sprintf "object(%s,%s)" cls id + | All -> "all" + in + Printf.sprintf "[%s]" (String.concat "," (List.map to_string subs)) + + let any = List.fold_left (fun acc x -> acc || x) false + + (** [table_matches subs tbl]: true if at least one subscription from [subs] would select some events from [tbl] *) + let table_matches subs tbl = + let tbl = String.lowercase tbl in + let matches = function + | All -> true + | Class x -> x = tbl + | Object (x, _) -> x = tbl in + any (List.map matches subs) + + (** [event_matches subs ev]: true if at least one subscription from [subs] selects for specified class and object *) + let object_matches subs ty _ref = + let tbl = String.lowercase ty in + let matches = function + | All -> true + | Class x -> x = tbl + | Object (x, y) -> x = tbl && (y = _ref) in + any (List.map matches subs) + + (** [event_matches subs ev]: true if at least one subscription from [subs] selects for event [ev] *) + let event_matches subs ev = object_matches subs ev.ty ev.reference end module Next = struct - (* Infrastructure for the deprecated Event.next *) - - (** Limit the event queue to this many events: *) - let max_queue_size = 10000000 - let old_max_queue_length = 500 - - (** Ordered list of events, newest first *) - let queue = ref [] - - (** Monotonically increasing event ID. One higher than the highest event ID in the queue *) - let id = ref 0L - - (** When we GC events we track how many we've deleted so we can send an error to the client *) - let highest_forgotten_id = ref (-1L) - - type subscription = { - mutable last_id: int64; (* last event ID to sent to this client *) - mutable subs: Subscription.t list; (* list of all the subscriptions *) - m: Mutex.t; (* protects access to the mutable fields in this record *) - session: API.ref_session; (* session which owns this subscription *) - mutable session_invalid: bool; (* set to true if the associated session has been deleted *) - mutable timeout: float; (* Timeout *) - } - - (* For Event.next, the single subscription associated with a session *) - let subscriptions : (API.ref_session, subscription) Hashtbl.t = Hashtbl.create 10 - - let m = Mutex.create () - let c = Condition.create () - - let event_size ev = - let rpc = rpc_of_event ev in - let string = Jsonrpc.to_string rpc in - String.length string - - (* Add an event to the queue if it matches any active subscriptions *) - let add ev = - Mutex.execute m - (fun () -> - let matches = Hashtbl.fold - (fun _ s acc -> - if Subscription.event_matches s.subs ev - then true - else acc - ) subscriptions false in - if matches then begin - let size = event_size ev in - queue := (size,ev) :: !queue; - (* debug "Adding event %Ld: %s" (!id) (string_of_event ev); *) - id := Int64.add !id Int64.one; - Condition.broadcast c; - end else begin - (* debug "Dropping event %s" (string_of_event ev) *) - end; - - (* GC the events in the queue *) - let total_size = List.fold_left (fun acc (sz,_) -> acc + sz) 0 !queue in - - let too_many = total_size > max_queue_size in - let to_keep, to_drop = if not too_many then !queue, [] - else - (* Reverse-sort by ID and preserve only enough events such that the total - size does not exceed 'max_queue_size' *) - let sorted = (List.sort (fun (_,a) (_,b) -> compare (Int64.of_string b.id) (Int64.of_string a.id)) !queue) in - let total_size_after, rev_to_keep, rev_to_drop = List.fold_left - (fun (tot_size,keep,drop) (size,elt) -> - if tot_size + size < max_queue_size - then (tot_size + size, (size,elt)::keep, drop) - else (tot_size + size, keep, (size,elt)::drop)) (0,[],[]) sorted in - let to_keep = List.rev rev_to_keep in - let to_drop = List.rev rev_to_drop in - if List.length to_keep < old_max_queue_length then - warn "Event queue length degraded. Number of events kept: %d (less than old_max_queue_length=%d)" (List.length to_keep) old_max_queue_length; - to_keep, to_drop - in - - queue := to_keep; - (* Remember the highest ID of the list of events to drop *) - if to_drop <> [] then - highest_forgotten_id := Int64.of_string (snd (List.hd to_drop)).id; - (* debug "After event queue GC: keeping %d; dropping %d (highest dropped id = %Ld)" - (List.length to_keep) (List.length to_drop) !highest_forgotten_id *) - ) - - let assert_subscribed session = - Mutex.execute m - (fun () -> - if not(Hashtbl.mem subscriptions session) - then raise (Api_errors.Server_error(Api_errors.session_not_registered, [ Context.trackid_of_session (Some session) ]))) - - (* Fetch the single subscription_record associated with a session or create - one if one doesn't exist already *) - let get_subscription session = - Mutex.execute m - (fun () -> - if Hashtbl.mem subscriptions session then begin - Hashtbl.find subscriptions session - end else begin - let subscription = { last_id = !id; subs = []; m = Mutex.create(); session = session; session_invalid = false; timeout=0.0; } in - Hashtbl.replace subscriptions session subscription; - subscription - end) - - let on_session_deleted session_id = - Mutex.execute m - (fun () -> - - let mark_invalid sub = - (* Mark the subscription as invalid and wake everyone up *) - Mutex.execute sub.m (fun () -> sub.session_invalid <- true); - Condition.broadcast c in - - if Hashtbl.mem subscriptions session_id then begin - let sub = Hashtbl.find subscriptions session_id in - mark_invalid sub; - Hashtbl.remove subscriptions session_id; - end; - ) - - let session_is_invalid sub = Mutex.execute sub.m (fun () -> sub.session_invalid) - - (* Blocks the caller until the current ID has changed OR the session has been - invalidated. *) - let wait subscription from_id = - let result = ref 0L in - Mutex.execute m - (fun () -> - (* NB we occasionally grab the specific session lock while holding the general lock *) - while !id = from_id && not (session_is_invalid subscription) do Condition.wait c m done; - result := !id); - if session_is_invalid subscription - then raise (Api_errors.Server_error(Api_errors.session_invalid, [ Ref.string_of subscription.session ])) - else !result - - (* Thrown if the user requests events which we don't have because we've thrown - then away. This should only happen if more than max_stored_events are produced - between successive calls to Event.next (). The client should refresh all its state - manually before calling Event.next () again. - *) - let events_lost () = raise (Api_errors.Server_error (Api_errors.events_lost, [])) - - (* Return events from the queue between a start and an end ID. Throws - an API error if some events have been lost, signalling the client to - re-register. *) - let events_read id_start id_end = - let check_ev ev = id_start <= Int64.of_string ev.id && Int64.of_string ev.id < id_end in - - let some_events_lost = ref false in - let selected_events = - Mutex.execute m - (fun () -> - some_events_lost := !highest_forgotten_id >= id_start; - List.find_all (fun (_,ev) -> check_ev ev) !queue - ) in - (* Note we may actually retrieve fewer events than we expect because the - queue may have been coalesced. *) - if !some_events_lost (* is true *) then events_lost (); - - (* NB queue is kept in reverse order *) - List.map snd (List.rev selected_events) + (* Infrastructure for the deprecated Event.next *) + + (** Limit the event queue to this many events: *) + let max_queue_size = 10000000 + let old_max_queue_length = 500 + + (** Ordered list of events, newest first *) + let queue = ref [] + + (** Monotonically increasing event ID. One higher than the highest event ID in the queue *) + let id = ref 0L + + (** When we GC events we track how many we've deleted so we can send an error to the client *) + let highest_forgotten_id = ref (-1L) + + type subscription = { + mutable last_id: int64; (* last event ID to sent to this client *) + mutable subs: Subscription.t list; (* list of all the subscriptions *) + m: Mutex.t; (* protects access to the mutable fields in this record *) + session: API.ref_session; (* session which owns this subscription *) + mutable session_invalid: bool; (* set to true if the associated session has been deleted *) + mutable timeout: float; (* Timeout *) + } + + (* For Event.next, the single subscription associated with a session *) + let subscriptions : (API.ref_session, subscription) Hashtbl.t = Hashtbl.create 10 + + let m = Mutex.create () + let c = Condition.create () + + let event_size ev = + let rpc = rpc_of_event ev in + let string = Jsonrpc.to_string rpc in + String.length string + + (* Add an event to the queue if it matches any active subscriptions *) + let add ev = + Mutex.execute m + (fun () -> + let matches = Hashtbl.fold + (fun _ s acc -> + if Subscription.event_matches s.subs ev + then true + else acc + ) subscriptions false in + if matches then begin + let size = event_size ev in + queue := (size,ev) :: !queue; + (* debug "Adding event %Ld: %s" (!id) (string_of_event ev); *) + id := Int64.add !id Int64.one; + Condition.broadcast c; + end else begin + (* debug "Dropping event %s" (string_of_event ev) *) + end; + + (* GC the events in the queue *) + let total_size = List.fold_left (fun acc (sz,_) -> acc + sz) 0 !queue in + + let too_many = total_size > max_queue_size in + let to_keep, to_drop = if not too_many then !queue, [] + else + (* Reverse-sort by ID and preserve only enough events such that the total + size does not exceed 'max_queue_size' *) + let sorted = (List.sort (fun (_,a) (_,b) -> compare (Int64.of_string b.id) (Int64.of_string a.id)) !queue) in + let total_size_after, rev_to_keep, rev_to_drop = List.fold_left + (fun (tot_size,keep,drop) (size,elt) -> + if tot_size + size < max_queue_size + then (tot_size + size, (size,elt)::keep, drop) + else (tot_size + size, keep, (size,elt)::drop)) (0,[],[]) sorted in + let to_keep = List.rev rev_to_keep in + let to_drop = List.rev rev_to_drop in + if List.length to_keep < old_max_queue_length then + warn "Event queue length degraded. Number of events kept: %d (less than old_max_queue_length=%d)" (List.length to_keep) old_max_queue_length; + to_keep, to_drop + in + + queue := to_keep; + (* Remember the highest ID of the list of events to drop *) + if to_drop <> [] then + highest_forgotten_id := Int64.of_string (snd (List.hd to_drop)).id; + (* debug "After event queue GC: keeping %d; dropping %d (highest dropped id = %Ld)" + (List.length to_keep) (List.length to_drop) !highest_forgotten_id *) + ) + + let assert_subscribed session = + Mutex.execute m + (fun () -> + if not(Hashtbl.mem subscriptions session) + then raise (Api_errors.Server_error(Api_errors.session_not_registered, [ Context.trackid_of_session (Some session) ]))) + + (* Fetch the single subscription_record associated with a session or create + one if one doesn't exist already *) + let get_subscription session = + Mutex.execute m + (fun () -> + if Hashtbl.mem subscriptions session then begin + Hashtbl.find subscriptions session + end else begin + let subscription = { last_id = !id; subs = []; m = Mutex.create(); session = session; session_invalid = false; timeout=0.0; } in + Hashtbl.replace subscriptions session subscription; + subscription + end) + + let on_session_deleted session_id = + Mutex.execute m + (fun () -> + + let mark_invalid sub = + (* Mark the subscription as invalid and wake everyone up *) + Mutex.execute sub.m (fun () -> sub.session_invalid <- true); + Condition.broadcast c in + + if Hashtbl.mem subscriptions session_id then begin + let sub = Hashtbl.find subscriptions session_id in + mark_invalid sub; + Hashtbl.remove subscriptions session_id; + end; + ) + + let session_is_invalid sub = Mutex.execute sub.m (fun () -> sub.session_invalid) + + (* Blocks the caller until the current ID has changed OR the session has been + invalidated. *) + let wait subscription from_id = + let result = ref 0L in + Mutex.execute m + (fun () -> + (* NB we occasionally grab the specific session lock while holding the general lock *) + while !id = from_id && not (session_is_invalid subscription) do Condition.wait c m done; + result := !id); + if session_is_invalid subscription + then raise (Api_errors.Server_error(Api_errors.session_invalid, [ Ref.string_of subscription.session ])) + else !result + + (* Thrown if the user requests events which we don't have because we've thrown + then away. This should only happen if more than max_stored_events are produced + between successive calls to Event.next (). The client should refresh all its state + manually before calling Event.next () again. + *) + let events_lost () = raise (Api_errors.Server_error (Api_errors.events_lost, [])) + + (* Return events from the queue between a start and an end ID. Throws + an API error if some events have been lost, signalling the client to + re-register. *) + let events_read id_start id_end = + let check_ev ev = id_start <= Int64.of_string ev.id && Int64.of_string ev.id < id_end in + + let some_events_lost = ref false in + let selected_events = + Mutex.execute m + (fun () -> + some_events_lost := !highest_forgotten_id >= id_start; + List.find_all (fun (_,ev) -> check_ev ev) !queue + ) in + (* Note we may actually retrieve fewer events than we expect because the + queue may have been coalesced. *) + if !some_events_lost (* is true *) then events_lost (); + + (* NB queue is kept in reverse order *) + List.map snd (List.rev selected_events) end module From = struct - let m = Mutex.create () - let c = Condition.create () - - let next_index = - let id = ref 0L in - fun () -> - Mutex.execute m (fun () -> - let result = !id in - id := Int64.succ !id; - result - ) - - (* A (blocking) call which should be unblocked on logout *) - type call = { - index: int64; (* Unique id for this call *) - mutable cur_id: int64; (* Most current generation count relevant to the client *) - subs: Subscription.t list; (* list of all the subscriptions *) - session: API.ref_session; (* the session associated with this call *) - mutable session_invalid: bool; (* set to true if the associated session has been deleted *) - m: Mutex.t; (* protects access to the mutable fields in this record *) - } - - (* The set of (blocking) calls associated with a session *) - let calls : (API.ref_session, call list) Hashtbl.t = Hashtbl.create 10 - - let get_current_event_number () = - (Db_cache_types.Manifest.generation (Db_cache_types.Database.manifest (Db_ref.get_database (Db_backend.make ())))) - - (* Add an event to the queue if it matches any active subscriptions *) - let add ev = - Mutex.execute m - (fun () -> - let matches_per_thread = Hashtbl.fold - (fun _ s acc -> - List.fold_left (fun acc s -> - if Subscription.event_matches s.subs ev - then (s.cur_id <- get_current_event_number (); true) - else acc) acc s - ) calls false in - if matches_per_thread then Condition.broadcast c; - ) - - (* Call a function with a registered call which will be woken up if - the session is destroyed in the background. *) - let with_call session subs f = - let index = next_index () in - let fresh = { index; cur_id = 0L; subs; m = Mutex.create(); session; session_invalid = false; } in - Mutex.execute m - (fun () -> - let existing = - if Hashtbl.mem calls session - then Hashtbl.find calls session - else [] in - Hashtbl.replace calls session (fresh :: existing) - ); - finally - (fun () -> f fresh) - (fun () -> Mutex.execute m (fun () -> - if Hashtbl.mem calls session then begin - let existing = Hashtbl.find calls session in - let remaining = List.filter (fun x -> not(x.index = fresh.index)) existing in - if remaining = [] - then Hashtbl.remove calls session - else Hashtbl.replace calls session remaining - end - )) - - (* Is called by the session timeout code *) - let on_session_deleted session_id = - Mutex.execute m - (fun () -> - let mark_invalid sub = - (* Mark the subscription as invalid and wake everyone up *) - Mutex.execute sub.m (fun () -> sub.session_invalid <- true); - Condition.broadcast c in - - if Hashtbl.mem calls session_id then begin - List.iter mark_invalid (Hashtbl.find calls session_id); - Hashtbl.remove calls session_id; - end; - ) - - let session_is_invalid call = Mutex.execute call.m (fun () -> call.session_invalid) - - let wait2 call from_id deadline = - let timeoutname = Printf.sprintf "event_from_timeout_%Ld" call.index in - Mutex.execute m - (fun () -> - while from_id = call.cur_id && not (session_is_invalid call) && Unix.gettimeofday () < deadline do - Xapi_periodic_scheduler.add_to_queue timeoutname Xapi_periodic_scheduler.OneShot (deadline -. Unix.gettimeofday () +. 0.5) (fun () -> Condition.broadcast c); - Condition.wait c m; - Xapi_periodic_scheduler.remove_from_queue timeoutname - done; - ); - if session_is_invalid call then begin - info "%s raising SESSION_INVALID *because* subscription is invalid" (Context.trackid_of_session (Some call.session)); - raise (Api_errors.Server_error(Api_errors.session_invalid, [ Ref.string_of call.session ])) - end + let m = Mutex.create () + let c = Condition.create () + + let next_index = + let id = ref 0L in + fun () -> + Mutex.execute m (fun () -> + let result = !id in + id := Int64.succ !id; + result + ) + + (* A (blocking) call which should be unblocked on logout *) + type call = { + index: int64; (* Unique id for this call *) + mutable cur_id: int64; (* Most current generation count relevant to the client *) + subs: Subscription.t list; (* list of all the subscriptions *) + session: API.ref_session; (* the session associated with this call *) + mutable session_invalid: bool; (* set to true if the associated session has been deleted *) + m: Mutex.t; (* protects access to the mutable fields in this record *) + } + + (* The set of (blocking) calls associated with a session *) + let calls : (API.ref_session, call list) Hashtbl.t = Hashtbl.create 10 + + let get_current_event_number () = + (Db_cache_types.Manifest.generation (Db_cache_types.Database.manifest (Db_ref.get_database (Db_backend.make ())))) + + (* Add an event to the queue if it matches any active subscriptions *) + let add ev = + Mutex.execute m + (fun () -> + let matches_per_thread = Hashtbl.fold + (fun _ s acc -> + List.fold_left (fun acc s -> + if Subscription.event_matches s.subs ev + then (s.cur_id <- get_current_event_number (); true) + else acc) acc s + ) calls false in + if matches_per_thread then Condition.broadcast c; + ) + + (* Call a function with a registered call which will be woken up if + the session is destroyed in the background. *) + let with_call session subs f = + let index = next_index () in + let fresh = { index; cur_id = 0L; subs; m = Mutex.create(); session; session_invalid = false; } in + Mutex.execute m + (fun () -> + let existing = + if Hashtbl.mem calls session + then Hashtbl.find calls session + else [] in + Hashtbl.replace calls session (fresh :: existing) + ); + finally + (fun () -> f fresh) + (fun () -> Mutex.execute m (fun () -> + if Hashtbl.mem calls session then begin + let existing = Hashtbl.find calls session in + let remaining = List.filter (fun x -> not(x.index = fresh.index)) existing in + if remaining = [] + then Hashtbl.remove calls session + else Hashtbl.replace calls session remaining + end + )) + + (* Is called by the session timeout code *) + let on_session_deleted session_id = + Mutex.execute m + (fun () -> + let mark_invalid sub = + (* Mark the subscription as invalid and wake everyone up *) + Mutex.execute sub.m (fun () -> sub.session_invalid <- true); + Condition.broadcast c in + + if Hashtbl.mem calls session_id then begin + List.iter mark_invalid (Hashtbl.find calls session_id); + Hashtbl.remove calls session_id; + end; + ) + + let session_is_invalid call = Mutex.execute call.m (fun () -> call.session_invalid) + + let wait2 call from_id deadline = + let timeoutname = Printf.sprintf "event_from_timeout_%Ld" call.index in + Mutex.execute m + (fun () -> + while from_id = call.cur_id && not (session_is_invalid call) && Unix.gettimeofday () < deadline do + Xapi_periodic_scheduler.add_to_queue timeoutname Xapi_periodic_scheduler.OneShot (deadline -. Unix.gettimeofday () +. 0.5) (fun () -> Condition.broadcast c); + Condition.wait c m; + Xapi_periodic_scheduler.remove_from_queue timeoutname + done; + ); + if session_is_invalid call then begin + info "%s raising SESSION_INVALID *because* subscription is invalid" (Context.trackid_of_session (Some call.session)); + raise (Api_errors.Server_error(Api_errors.session_invalid, [ Ref.string_of call.session ])) + end end (** Register an interest in events generated on objects of class *) let register ~__context ~classes = - let session = Context.get_session_id __context in - let open Next in - let subs = List.map Subscription.of_string classes in - let sub = Next.get_subscription session in - Mutex.execute sub.m (fun () -> sub.subs <- subs @ sub.subs) + let session = Context.get_session_id __context in + let open Next in + let subs = List.map Subscription.of_string classes in + let sub = Next.get_subscription session in + Mutex.execute sub.m (fun () -> sub.subs <- subs @ sub.subs) (** Unregister interest in events generated on objects of class *) -let unregister ~__context ~classes = - let session = Context.get_session_id __context in - let open Next in - let subs = List.map Subscription.of_string classes in - let sub = Next.get_subscription session in - Mutex.execute sub.m - (fun () -> sub.subs <- List.filter (fun x -> not(List.mem x subs)) sub.subs) +let unregister ~__context ~classes = + let session = Context.get_session_id __context in + let open Next in + let subs = List.map Subscription.of_string classes in + let sub = Next.get_subscription session in + Mutex.execute sub.m + (fun () -> sub.subs <- List.filter (fun x -> not(List.mem x subs)) sub.subs) (** Blocking call which returns the next set of events relevant to this session. *) let rec next ~__context = - let session = Context.get_session_id __context in - let open Next in - assert_subscribed session; - - let subscription = get_subscription session in - - (* Return a exclusive range that is guaranteed to be specific to this - thread. Concurrent calls will grab wholly disjoint ranges. Note the range might be - empty. *) - let grab_range () = - (* Briefly hold both the general and the specific mutex *) - Mutex.execute m - (fun () -> Mutex.execute subscription.m - (fun () -> - let last_id = subscription.last_id in - (* Bump our last_id counter: these events don't have to be looked at again *) - subscription.last_id <- !id ; - last_id, !id)) in - (* Like grab_range () only guarantees to return a non-empty range by blocking if necessary *) - let rec grab_nonempty_range () = - let last_id, end_id = grab_range () in - if last_id = end_id then begin - let (_: int64) = wait subscription end_id in - grab_nonempty_range () - end else last_id, end_id in - - let last_id, end_id = grab_nonempty_range () in - (* debug "next examining events in range %Ld <= x < %Ld" last_id end_id; *) - (* Are any of the new events interesting? *) - let events = events_read last_id end_id in - let subs = Mutex.execute subscription.m (fun () -> subscription.subs) in - let relevant = List.filter (fun ev -> Subscription.event_matches subs ev) events in - (* debug "number of relevant events = %d" (List.length relevant); *) - if relevant = [] then next ~__context - else rpc_of_events relevant - -let from_inner __context session subs from from_t deadline = - let open From in - - (* The database tables involved in our subscription *) - let tables = - let all = - let objs = List.filter (fun x->x.Datamodel_types.gen_events) (Dm_api.objects_of_api Datamodel.all_api) in - let objs = List.map (fun x->x.Datamodel_types.name) objs in - objs in - List.filter (fun table -> Subscription.table_matches subs table) all in - - let last_generation = ref from in - let last_msg_gen = ref from_t in - - let grab_range t = - let tableset = Db_cache_types.Database.tableset (Db_ref.get_database t) in - let (msg_gen,messages) = - if Subscription.table_matches subs "message" then (!Message.get_since_for_events) ~__context !last_msg_gen else (0L, []) in - (msg_gen, messages, tableset, List.fold_left - (fun acc table -> - (* Fold over the live objects *) - let acc = Db_cache_types.Table.fold_over_recent !last_generation - (fun objref { Db_cache_types.Stat.created; modified; deleted } _ (creates,mods,deletes,last) -> - if Subscription.object_matches subs (String.lowercase table) objref then begin - let last = max last (max modified deleted) in (* mtime guaranteed to always be larger than ctime *) - ((if created > !last_generation then (table, objref, created)::creates else creates), - (if modified > !last_generation then (table, objref, modified)::mods else mods), - deletes, last) - end else begin - (creates,mods,deletes,last) - end - ) (Db_cache_types.TableSet.find table tableset) acc in - (* Fold over the deleted objects *) - Db_cache_types.Table.fold_over_deleted !last_generation - (fun objref { Db_cache_types.Stat.created; modified; deleted } (creates,mods,deletes,last) -> - if Subscription.object_matches subs (String.lowercase table) objref then begin - let last = max last (max modified deleted) in (* mtime guaranteed to always be larger than ctime *) - if created > !last_generation then - (creates,mods,deletes,last) (* It was created and destroyed since the last update *) - else - (creates,mods,(table, objref, deleted)::deletes,last) (* It might have been modified, but we can't tell now *) - end else begin - (creates,mods,deletes,last) - end - ) (Db_cache_types.TableSet.find table tableset) acc - ) ([],[],[],!last_generation) tables) in - - (* Each event.from should have an independent subscription record *) - let msg_gen, messages, tableset, (creates, mods, deletes, last) = - with_call session subs - (fun sub -> - let rec grab_nonempty_range () = - let (msg_gen, messages, tableset, (creates,mods,deletes,last)) as result = Db_lock.with_lock (fun () -> grab_range (Db_backend.make ())) in - if creates = [] && mods = [] && deletes = [] && messages = [] && Unix.gettimeofday () < deadline then begin - last_generation := last; (* Cur_id was bumped, but nothing relevent fell out of the db. Therefore the *) - sub.cur_id <- last; (* last id the client got is equivalent to the current one *) - last_msg_gen := msg_gen; - wait2 sub last deadline; - Thread.delay 0.05; - grab_nonempty_range () - end else - result in - grab_nonempty_range () - ) in - - last_generation := last; - - let event_of op ?snapshot (table, objref, time) = { - id=Int64.to_string time; ts="0.0"; ty=String.lowercase table; op=op; reference=objref; snapshot=snapshot - } in - let events = List.fold_left (fun acc x -> - let ev = event_of `del x in - if Subscription.event_matches subs ev then ev::acc else acc - ) [] deletes in - let events = List.fold_left (fun acc (table, objref, mtime) -> - let serialiser = Eventgen.find_get_record table in - try - let xml = serialiser ~__context ~self:objref () in - let ev = event_of `_mod ?snapshot:xml (table, objref, mtime) in - if Subscription.event_matches subs ev then ev::acc else acc - with _ -> acc - ) events mods in - let events = List.fold_left (fun acc (table, objref, ctime) -> - let serialiser = Eventgen.find_get_record table in - try - let xml = serialiser ~__context ~self:objref () in - let ev = event_of `add ?snapshot:xml (table, objref, ctime) in - if Subscription.event_matches subs ev then ev::acc else acc - with _ -> acc - ) events creates in - let events = List.fold_left (fun acc mev -> - let event = match mev with - | Message.Create (_ref,message) -> event_of `add ?snapshot:(Some (API.rpc_of_message_t message)) ("message", Ref.string_of _ref, 0L) - | Message.Del _ref -> event_of `del ("message",Ref.string_of _ref, 0L) in - event::acc) events messages in - - let valid_ref_counts = - Db_cache_types.TableSet.fold - (fun tablename _ table acc -> - (String.lowercase tablename, - (Db_cache_types.Table.fold - (fun r _ _ acc -> Int32.add 1l acc) table 0l))::acc) - tableset [] in - - { - events; valid_ref_counts; - token = Token.to_string (last,msg_gen); - } + let session = Context.get_session_id __context in + let open Next in + assert_subscribed session; + + let subscription = get_subscription session in + + (* Return a exclusive range that is guaranteed to be specific to this + thread. Concurrent calls will grab wholly disjoint ranges. Note the range might be + empty. *) + let grab_range () = + (* Briefly hold both the general and the specific mutex *) + Mutex.execute m + (fun () -> Mutex.execute subscription.m + (fun () -> + let last_id = subscription.last_id in + (* Bump our last_id counter: these events don't have to be looked at again *) + subscription.last_id <- !id ; + last_id, !id)) in + (* Like grab_range () only guarantees to return a non-empty range by blocking if necessary *) + let rec grab_nonempty_range () = + let last_id, end_id = grab_range () in + if last_id = end_id then begin + let (_: int64) = wait subscription end_id in + grab_nonempty_range () + end else last_id, end_id in + + let last_id, end_id = grab_nonempty_range () in + (* debug "next examining events in range %Ld <= x < %Ld" last_id end_id; *) + (* Are any of the new events interesting? *) + let events = events_read last_id end_id in + let subs = Mutex.execute subscription.m (fun () -> subscription.subs) in + let relevant = List.filter (fun ev -> Subscription.event_matches subs ev) events in + (* debug "number of relevant events = %d" (List.length relevant); *) + if relevant = [] then next ~__context + else rpc_of_events relevant + +let from_inner __context session subs from from_t deadline = + let open From in + + (* The database tables involved in our subscription *) + let tables = + let all = + let objs = List.filter (fun x->x.Datamodel_types.gen_events) (Dm_api.objects_of_api Datamodel.all_api) in + let objs = List.map (fun x->x.Datamodel_types.name) objs in + objs in + List.filter (fun table -> Subscription.table_matches subs table) all in + + let last_generation = ref from in + let last_msg_gen = ref from_t in + + let grab_range t = + let tableset = Db_cache_types.Database.tableset (Db_ref.get_database t) in + let (msg_gen,messages) = + if Subscription.table_matches subs "message" then (!Message.get_since_for_events) ~__context !last_msg_gen else (0L, []) in + (msg_gen, messages, tableset, List.fold_left + (fun acc table -> + (* Fold over the live objects *) + let acc = Db_cache_types.Table.fold_over_recent !last_generation + (fun objref { Db_cache_types.Stat.created; modified; deleted } _ (creates,mods,deletes,last) -> + if Subscription.object_matches subs (String.lowercase table) objref then begin + let last = max last (max modified deleted) in (* mtime guaranteed to always be larger than ctime *) + ((if created > !last_generation then (table, objref, created)::creates else creates), + (if modified > !last_generation then (table, objref, modified)::mods else mods), + deletes, last) + end else begin + (creates,mods,deletes,last) + end + ) (Db_cache_types.TableSet.find table tableset) acc in + (* Fold over the deleted objects *) + Db_cache_types.Table.fold_over_deleted !last_generation + (fun objref { Db_cache_types.Stat.created; modified; deleted } (creates,mods,deletes,last) -> + if Subscription.object_matches subs (String.lowercase table) objref then begin + let last = max last (max modified deleted) in (* mtime guaranteed to always be larger than ctime *) + if created > !last_generation then + (creates,mods,deletes,last) (* It was created and destroyed since the last update *) + else + (creates,mods,(table, objref, deleted)::deletes,last) (* It might have been modified, but we can't tell now *) + end else begin + (creates,mods,deletes,last) + end + ) (Db_cache_types.TableSet.find table tableset) acc + ) ([],[],[],!last_generation) tables) in + + (* Each event.from should have an independent subscription record *) + let msg_gen, messages, tableset, (creates, mods, deletes, last) = + with_call session subs + (fun sub -> + let rec grab_nonempty_range () = + let (msg_gen, messages, tableset, (creates,mods,deletes,last)) as result = Db_lock.with_lock (fun () -> grab_range (Db_backend.make ())) in + if creates = [] && mods = [] && deletes = [] && messages = [] && Unix.gettimeofday () < deadline then begin + last_generation := last; (* Cur_id was bumped, but nothing relevent fell out of the db. Therefore the *) + sub.cur_id <- last; (* last id the client got is equivalent to the current one *) + last_msg_gen := msg_gen; + wait2 sub last deadline; + Thread.delay 0.05; + grab_nonempty_range () + end else + result in + grab_nonempty_range () + ) in + + last_generation := last; + + let event_of op ?snapshot (table, objref, time) = { + id=Int64.to_string time; ts="0.0"; ty=String.lowercase table; op=op; reference=objref; snapshot=snapshot + } in + let events = List.fold_left (fun acc x -> + let ev = event_of `del x in + if Subscription.event_matches subs ev then ev::acc else acc + ) [] deletes in + let events = List.fold_left (fun acc (table, objref, mtime) -> + let serialiser = Eventgen.find_get_record table in + try + let xml = serialiser ~__context ~self:objref () in + let ev = event_of `_mod ?snapshot:xml (table, objref, mtime) in + if Subscription.event_matches subs ev then ev::acc else acc + with _ -> acc + ) events mods in + let events = List.fold_left (fun acc (table, objref, ctime) -> + let serialiser = Eventgen.find_get_record table in + try + let xml = serialiser ~__context ~self:objref () in + let ev = event_of `add ?snapshot:xml (table, objref, ctime) in + if Subscription.event_matches subs ev then ev::acc else acc + with _ -> acc + ) events creates in + let events = List.fold_left (fun acc mev -> + let event = match mev with + | Message.Create (_ref,message) -> event_of `add ?snapshot:(Some (API.rpc_of_message_t message)) ("message", Ref.string_of _ref, 0L) + | Message.Del _ref -> event_of `del ("message",Ref.string_of _ref, 0L) in + event::acc) events messages in + + let valid_ref_counts = + Db_cache_types.TableSet.fold + (fun tablename _ table acc -> + (String.lowercase tablename, + (Db_cache_types.Table.fold + (fun r _ _ acc -> Int32.add 1l acc) table 0l))::acc) + tableset [] in + + { + events; valid_ref_counts; + token = Token.to_string (last,msg_gen); + } let from ~__context ~classes ~token ~timeout = - let session = Context.get_session_id __context in - let from, from_t = - try - Token.of_string token - with e -> - warn "Failed to parse event.from token: %s (%s)" token (Printexc.to_string e); - raise (Api_errors.Server_error(Api_errors.event_from_token_parse_failure, [ token ])) in - - let subs = List.map Subscription.of_string classes in - - let deadline = Unix.gettimeofday () +. timeout in - - (* We need to iterate because it's possible for an empty event set - to be generated if we peek in-between a Modify and a Delete; we'll - miss the Delete event and fail to generate the Modify because the - snapshot can't be taken. *) - let rec loop () = - let event_from = from_inner __context session subs from from_t deadline in - if event_from.events = [] && (Unix.gettimeofday () < deadline) then begin - debug "suppressing empty event.from"; - loop () - end else begin - rpc_of_event_from event_from - end in - loop () + let session = Context.get_session_id __context in + let from, from_t = + try + Token.of_string token + with e -> + warn "Failed to parse event.from token: %s (%s)" token (Printexc.to_string e); + raise (Api_errors.Server_error(Api_errors.event_from_token_parse_failure, [ token ])) in + + let subs = List.map Subscription.of_string classes in + + let deadline = Unix.gettimeofday () +. timeout in + + (* We need to iterate because it's possible for an empty event set + to be generated if we peek in-between a Modify and a Delete; we'll + miss the Delete event and fail to generate the Modify because the + snapshot can't be taken. *) + let rec loop () = + let event_from = from_inner __context session subs from from_t deadline in + if event_from.events = [] && (Unix.gettimeofday () < deadline) then begin + debug "suppressing empty event.from"; + loop () + end else begin + rpc_of_event_from event_from + end in + loop () let get_current_id ~__context = Mutex.execute Next.m (fun () -> !Next.id) let inject ~__context ~_class ~_ref = - let open Db_cache_types in - let generation : int64 = Db_lock.with_lock - (fun () -> - let db_ref = Db_backend.make () in - let g = Manifest.generation (Database.manifest (Db_ref.get_database db_ref)) in - Db_cache_impl.touch_row db_ref _class _ref; (* consumes this generation *) - g - ) in - let token = Int64.sub generation 1L, 0L in - Token.to_string token + let open Db_cache_types in + let generation : int64 = Db_lock.with_lock + (fun () -> + let db_ref = Db_backend.make () in + let g = Manifest.generation (Database.manifest (Db_ref.get_database db_ref)) in + Db_cache_impl.touch_row db_ref _class _ref; (* consumes this generation *) + g + ) in + let token = Int64.sub generation 1L, 0L in + Token.to_string token (* Internal interface ****************************************************) let event_add ?snapshot ty op reference = - let objs = List.filter (fun x->x.Datamodel_types.gen_events) (Dm_api.objects_of_api Datamodel.all_api) in - let objs = List.map (fun x->x.Datamodel_types.name) objs in - if List.mem ty objs then begin - let ts = string_of_float (Unix.time ()) in - let op = op_of_string op in + let objs = List.filter (fun x->x.Datamodel_types.gen_events) (Dm_api.objects_of_api Datamodel.all_api) in + let objs = List.map (fun x->x.Datamodel_types.name) objs in + if List.mem ty objs then begin + let ts = string_of_float (Unix.time ()) in + let op = op_of_string op in - let ev = { id = Int64.to_string !Next.id; ts; ty = String.lowercase ty; op; reference; snapshot } in - From.add ev; - Next.add ev - end + let ev = { id = Int64.to_string !Next.id; ts; ty = String.lowercase ty; op; reference; snapshot } in + From.add ev; + Next.add ev + end let register_hooks () = Db_action_helper.events_register event_add (* Called whenever a session is being destroyed i.e. by Session.logout and db_gc *) -let on_session_deleted session_id = - (* Unregister this session if is associated with in imported DB. *) - (* FIXME: this doesn't logically belong in the event code *) - Db_backend.unregister_session session_id; +let on_session_deleted session_id = + (* Unregister this session if is associated with in imported DB. *) + (* FIXME: this doesn't logically belong in the event code *) + Db_backend.unregister_session session_id; - Next.on_session_deleted session_id; - From.on_session_deleted session_id + Next.on_session_deleted session_id; + From.on_session_deleted session_id (* Inject an unnecessary update as a heartbeat. This will: 1. hopefully prevent some firewalls from silently closing the connection 2. allow the server to detect when a client has failed *) let heartbeat ~__context = - try - Db_lock.with_lock - (fun () -> - (* We must hold the database lock since we are sending an update for a real object - and we don't want to accidentally transmit an older snapshot. *) - let pool = try Some (Helpers.get_pool ~__context) with _ -> None in - match pool with - | Some pool -> - let pool_r = Db.Pool.get_record ~__context ~self:pool in - let pool_xml = API.rpc_of_pool_t pool_r in - event_add ~snapshot:pool_xml "pool" "mod" (Ref.string_of pool) - | None -> () (* no pool object created during initial boot *) - ) - with e -> - error "Caught exception sending event heartbeat: %s" (ExnHelper.string_of_exn e) + try + Db_lock.with_lock + (fun () -> + (* We must hold the database lock since we are sending an update for a real object + and we don't want to accidentally transmit an older snapshot. *) + let pool = try Some (Helpers.get_pool ~__context) with _ -> None in + match pool with + | Some pool -> + let pool_r = Db.Pool.get_record ~__context ~self:pool in + let pool_xml = API.rpc_of_pool_t pool_r in + event_add ~snapshot:pool_xml "pool" "mod" (Ref.string_of pool) + | None -> () (* no pool object created during initial boot *) + ) + with e -> + error "Caught exception sending event heartbeat: %s" (ExnHelper.string_of_exn e) diff --git a/ocaml/xapi/xapi_event.mli b/ocaml/xapi/xapi_event.mli index c5385614656..550e7e3c5c5 100644 --- a/ocaml/xapi/xapi_event.mli +++ b/ocaml/xapi/xapi_event.mli @@ -45,10 +45,10 @@ val on_session_deleted: API.ref_session -> unit val heartbeat: __context:Context.t -> unit module Message : sig - type t = - | Create of (API.ref_message * API.message_t) - | Del of API.ref_message + type t = + | Create of (API.ref_message * API.message_t) + | Del of API.ref_message - val get_since_for_events : (__context:Context.t -> int64 -> (int64 * t list)) ref + val get_since_for_events : (__context:Context.t -> int64 -> (int64 * t list)) ref end diff --git a/ocaml/xapi/xapi_extensions.ml b/ocaml/xapi/xapi_extensions.ml index de2b90ff5d4..0fdd630c5fc 100644 --- a/ocaml/xapi/xapi_extensions.ml +++ b/ocaml/xapi/xapi_extensions.ml @@ -19,37 +19,37 @@ open D (* Only scripts in the Xapi_globs.xapi_extensions_root can be called *) let find_extension name = - let all = try Array.to_list (Sys.readdir !Xapi_globs.xapi_extensions_root) with _ -> [] in - (* Sys.readdir output doesn't include "." or ".." *) - if List.mem name all - then Filename.concat !Xapi_globs.xapi_extensions_root name - else raise (Api_errors.Server_error(Api_errors.message_method_unknown, [name])) + let all = try Array.to_list (Sys.readdir !Xapi_globs.xapi_extensions_root) with _ -> [] in + (* Sys.readdir output doesn't include "." or ".." *) + if List.mem name all + then Filename.concat !Xapi_globs.xapi_extensions_root name + else raise (Api_errors.Server_error(Api_errors.message_method_unknown, [name])) (* Execute the extension with XMLRPC-over-cmdline/stdout convention. *) let call_extension rpc = - try - let path = find_extension rpc.Rpc.name in + try + let path = find_extension rpc.Rpc.name in - let output, _ = - try - Forkhelpers.execute_command_get_output_send_stdin path [ "--xmlrpc" ] (Xmlrpc.string_of_call rpc) - with - | Forkhelpers.Spawn_internal_error(log, output, Unix.WSTOPPED i) -> - raise (Api_errors.Server_error (Api_errors.internal_error, [path; "task stopped"; output; log ])) - | Forkhelpers.Spawn_internal_error(log, output, Unix.WSIGNALED i) -> - raise (Api_errors.Server_error (Api_errors.internal_error, [path; Printf.sprintf "signal: %s" (Stdext.Unixext.string_of_signal i); output; log ])) - | Forkhelpers.Spawn_internal_error(log, output, Unix.WEXITED i) -> - raise (Api_errors.Server_error (Api_errors.internal_error, [path; "non-zero exit"; output; log ])) in - begin - try - Xmlrpc.response_of_string output - with e -> - raise (Api_errors.Server_error(Api_errors.internal_error, [ path; "failed to parse extension output"; output; Printexc.to_string e ])) - end; - with - | Api_errors.Server_error(code, params) -> - API.response_of_failure code params - | e -> - error "Unexpected exception calling extension %s: %s" rpc.Rpc.name (Printexc.to_string e); - Debug.log_backtrace e (Backtrace.get e); - API.response_of_failure Api_errors.internal_error [ rpc.Rpc.name; Printexc.to_string e ] + let output, _ = + try + Forkhelpers.execute_command_get_output_send_stdin path [ "--xmlrpc" ] (Xmlrpc.string_of_call rpc) + with + | Forkhelpers.Spawn_internal_error(log, output, Unix.WSTOPPED i) -> + raise (Api_errors.Server_error (Api_errors.internal_error, [path; "task stopped"; output; log ])) + | Forkhelpers.Spawn_internal_error(log, output, Unix.WSIGNALED i) -> + raise (Api_errors.Server_error (Api_errors.internal_error, [path; Printf.sprintf "signal: %s" (Stdext.Unixext.string_of_signal i); output; log ])) + | Forkhelpers.Spawn_internal_error(log, output, Unix.WEXITED i) -> + raise (Api_errors.Server_error (Api_errors.internal_error, [path; "non-zero exit"; output; log ])) in + begin + try + Xmlrpc.response_of_string output + with e -> + raise (Api_errors.Server_error(Api_errors.internal_error, [ path; "failed to parse extension output"; output; Printexc.to_string e ])) + end; + with + | Api_errors.Server_error(code, params) -> + API.response_of_failure code params + | e -> + error "Unexpected exception calling extension %s: %s" rpc.Rpc.name (Printexc.to_string e); + Debug.log_backtrace e (Backtrace.get e); + API.response_of_failure Api_errors.internal_error [ rpc.Rpc.name; Printexc.to_string e ] diff --git a/ocaml/xapi/xapi_fist.ml b/ocaml/xapi/xapi_fist.ml index f32f2b8f226..1646f4933d6 100644 --- a/ocaml/xapi/xapi_fist.ml +++ b/ocaml/xapi/xapi_fist.ml @@ -13,7 +13,7 @@ *) (** Module for reading FIST points * @group Testing - *) +*) open Stdext @@ -25,9 +25,9 @@ open D let fistpoint name = try Unix.access ("/tmp/fist_" ^ name) [ Unix.F_OK ]; true with _ -> false let fistpoint_read name = - try - Some (Unixext.string_of_file ("/tmp/fist_" ^ name)) - with _ -> None + try + Some (Unixext.string_of_file ("/tmp/fist_" ^ name)) + with _ -> None let delete name = Unixext.unlink_safe ("/tmp/fist_" ^ name) @@ -78,7 +78,7 @@ let simulate_blocking_planner () = fistpoint "simulate_blocking_planner" (** Used to simulate an initial VBD.unplug failure *) let simulate_vbd_unplug_failure () = fistpoint "simulate_vbd_unplug_failure" -(** {2 RRD fist points} +(** {2 RRD fist points} * NB: these are evaluated once at run time and not again - no dynamic changing here :-) *) (** Reduce blob sync period to 5 minutes *) @@ -94,7 +94,7 @@ let disable_sync_lifecycle_path () = fistpoint "disable_sync_lifecycle_path" let disable_event_lifecycle_path () = fistpoint "disable_event_lifecycle_path" (** If set to "reboot" "halt" "suspend" "crash" this will forcibly shutdown the domain during reboot/shutdown *) -let simulate_internal_shutdown () = +let simulate_internal_shutdown () = let fist = "simulate_internal_shutdown" in let x = fistpoint_read fist in delete fist; diff --git a/ocaml/xapi/xapi_fuse.ml b/ocaml/xapi/xapi_fuse.ml index 2686b10f557..5b7f15a1afb 100644 --- a/ocaml/xapi/xapi_fuse.ml +++ b/ocaml/xapi/xapi_fuse.ml @@ -19,73 +19,73 @@ open D module Rrdd = Rrd_client.Client -let time f = +let time f = let start = Unix.gettimeofday () in (try f () with e -> warn "Caught exception while performing timed function: %s" (Printexc.to_string e)); Unix.gettimeofday () -. start (* give xapi time to reply to API messages by means of a 10 second fuse! *) let light_fuse_and_run ?(fuse_length = !Xapi_globs.fuse_time) () = - debug "light_fuse_and_run: calling Rrdd.backup_rrds to save current RRDs locally"; - let delay_so_far = - time (fun _ -> log_and_ignore_exn Xapi_stats.stop) +. - time (fun _ -> log_and_ignore_exn Rrdd.backup_rrds) - in + debug "light_fuse_and_run: calling Rrdd.backup_rrds to save current RRDs locally"; + let delay_so_far = + time (fun _ -> log_and_ignore_exn Xapi_stats.stop) +. + time (fun _ -> log_and_ignore_exn Rrdd.backup_rrds) + in let new_fuse_length = max 5. (fuse_length -. delay_so_far) in debug "light_fuse_and_run: current RRDs have been saved"; ignore (Thread.create - (fun ()-> - Thread.delay new_fuse_length; - debug "light_fuse_and_run: calling flush and exit"; - (* CA-16368: If the database hasn't been initialised *at all* we can exit immediately. - This happens if someone calls flush_and_exit before the db conf has been parsed, the connections - initialised and the database "mode" set. - *) - try - let dbconn = Db_connections.preferred_write_db () in - Db_cache_impl.flush_and_exit dbconn Xapi_globs.restart_return_code - with e -> - warn "Caught an exception flushing database (perhaps it hasn't been initialised yet): %s; restarting immediately" (ExnHelper.string_of_exn e); - exit Xapi_globs.restart_return_code - ) () ) + (fun ()-> + Thread.delay new_fuse_length; + debug "light_fuse_and_run: calling flush and exit"; + (* CA-16368: If the database hasn't been initialised *at all* we can exit immediately. + This happens if someone calls flush_and_exit before the db conf has been parsed, the connections + initialised and the database "mode" set. + *) + try + let dbconn = Db_connections.preferred_write_db () in + Db_cache_impl.flush_and_exit dbconn Xapi_globs.restart_return_code + with e -> + warn "Caught an exception flushing database (perhaps it hasn't been initialised yet): %s; restarting immediately" (ExnHelper.string_of_exn e); + exit Xapi_globs.restart_return_code + ) () ) let light_fuse_and_reboot_after_eject() = ignore (Thread.create - (fun ()-> - Thread.delay !Xapi_globs.fuse_time; - (* this activates firstboot script and reboots the host *) - ignore (Forkhelpers.execute_command_get_output "/sbin/xs-firstboot" [ "reset-and-reboot" ]); - () - ) ()) + (fun ()-> + Thread.delay !Xapi_globs.fuse_time; + (* this activates firstboot script and reboots the host *) + ignore (Forkhelpers.execute_command_get_output "/sbin/xs-firstboot" [ "reset-and-reboot" ]); + () + ) ()) let light_fuse_and_reboot ?(fuse_length = !Xapi_globs.fuse_time) () = ignore (Thread.create - (fun ()-> - Thread.delay fuse_length; - ignore(Sys.command "shutdown -r now") - ) ()) + (fun ()-> + Thread.delay fuse_length; + ignore(Sys.command "shutdown -r now") + ) ()) let light_fuse_and_dont_restart ?(fuse_length = !Xapi_globs.fuse_time) () = ignore (Thread.create - (fun () -> - debug "light_fuse_and_dont_restart: calling Rrdd.backup_rrds to save current RRDs locally"; - log_and_ignore_exn Xapi_stats.stop; - log_and_ignore_exn Rrdd.backup_rrds; - Thread.delay fuse_length; - Db_cache_impl.flush_and_exit (Db_connections.preferred_write_db ()) 0) ()); + (fun () -> + debug "light_fuse_and_dont_restart: calling Rrdd.backup_rrds to save current RRDs locally"; + log_and_ignore_exn Xapi_stats.stop; + log_and_ignore_exn Rrdd.backup_rrds; + Thread.delay fuse_length; + Db_cache_impl.flush_and_exit (Db_connections.preferred_write_db ()) 0) ()); (* This is a best-effort attempt to use the database. We must not block the flush_and_exit above, hence the use of a background thread. *) Helpers.log_exn_continue "setting Host.enabled to false" (fun () -> Server_helpers.exec_with_new_task "Setting Host.enabled to false" - (fun __context -> - debug "About to set Host.enabled to false"; - let localhost = Helpers.get_localhost ~__context in - Db.Host.set_enabled ~__context ~self:localhost ~value:false; - Helpers.call_api_functions ~__context - (fun rpc session_id -> - Db_gc.send_one_heartbeat ~__context rpc ~shutting_down:true session_id - ) - ) + (fun __context -> + debug "About to set Host.enabled to false"; + let localhost = Helpers.get_localhost ~__context in + Db.Host.set_enabled ~__context ~self:localhost ~value:false; + Helpers.call_api_functions ~__context + (fun rpc session_id -> + Db_gc.send_one_heartbeat ~__context rpc ~shutting_down:true session_id + ) + ) ) () diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml index 4fc9dbc006c..05a13ed1fa7 100644 --- a/ocaml/xapi/xapi_globs.ml +++ b/ocaml/xapi/xapi_globs.ml @@ -131,30 +131,30 @@ let _dbv = "dbv" * older than itself. *) let default_platform_version = "0.0.0" -(* Used to differentiate between +(* Used to differentiate between Rio beta2 (0) [no inline checksums, end-of-tar checksum table], Rio GA (1) [inline checksums, end-of-tar checksum table] and Miami GA (2) [inline checksums, no end-of-tar checksum table] *) let export_vsn = 2 let software_version () = - (* In the case of XCP, all product_* fields will be blank. *) - List.filter (fun (_, value) -> value <> "") - [_product_version, Version.product_version (); - _product_version_text, Version.product_version_text (); - _product_version_text_short, Version.product_version_text_short (); - _platform_name, Version.platform_name (); - _platform_version, Version.platform_version (); - _product_brand, Version.product_brand (); - _build_number, Version.build_number (); - _git_id, Version.git_id; - _hostname, Version.hostname; - _date, Version.date] + (* In the case of XCP, all product_* fields will be blank. *) + List.filter (fun (_, value) -> value <> "") + [_product_version, Version.product_version (); + _product_version_text, Version.product_version_text (); + _product_version_text_short, Version.product_version_text_short (); + _platform_name, Version.platform_name (); + _platform_version, Version.platform_version (); + _product_brand, Version.product_brand (); + _build_number, Version.build_number (); + _git_id, Version.git_id; + _hostname, Version.hostname; + _date, Version.date] let pygrub_path = "/usr/bin/pygrub" let eliloader_path = "/usr/bin/eliloader" let supported_bootloaders = [ "pygrub", pygrub_path; - "eliloader", eliloader_path ] + "eliloader", eliloader_path ] (* Deprecated: *) let is_guest_installer_network = "is_guest_installer_network" @@ -433,40 +433,40 @@ let xapi_extensions_root = ref "/etc/xapi.d/extensions" (** be read by a Miami host, and remove any items that are not found on the lists. *) let host_operations_miami = [ - `evacuate; - `provision; + `evacuate; + `provision; ] let vm_operations_miami = [ - `assert_operation_valid; - `changing_memory_live; - `changing_shadow_memory_live; - `changing_VCPUs_live; - `clean_reboot; - `clean_shutdown; - `clone; - `copy; - `csvm; - `destroy; - `export; - `get_boot_record; - `hard_reboot; - `hard_shutdown; - `import; - `make_into_template; - `pause; - `pool_migrate; - `power_state_reset; - `provision; - `resume; - `resume_on; - `send_sysrq; - `send_trigger; - `start; - `start_on; - `suspend; - `unpause; - `update_allowed_operations; + `assert_operation_valid; + `changing_memory_live; + `changing_shadow_memory_live; + `changing_VCPUs_live; + `clean_reboot; + `clean_shutdown; + `clone; + `copy; + `csvm; + `destroy; + `export; + `get_boot_record; + `hard_reboot; + `hard_shutdown; + `import; + `make_into_template; + `pause; + `pool_migrate; + `power_state_reset; + `provision; + `resume; + `resume_on; + `send_sysrq; + `send_trigger; + `start; + `start_on; + `suspend; + `unpause; + `update_allowed_operations; ] (* Viridian key name (goes in platform flags) *) @@ -584,43 +584,43 @@ let xenclient_enabled = false (** Type 11 strings that are always included *) let standard_type11_strings = - ["oem-1", "Xen"; - "oem-2", "MS_VM_CERT/SHA1/bdbeb6e0a816d43fa6d3fe8aaef04c2bad9d3e3d"] + ["oem-1", "Xen"; + "oem-2", "MS_VM_CERT/SHA1/bdbeb6e0a816d43fa6d3fe8aaef04c2bad9d3e3d"] -(** Generic BIOS strings *) +(** Generic BIOS strings *) let generic_bios_strings = - ["bios-vendor", "Xen"; - "bios-version", ""; - "system-manufacturer", "Xen"; - "system-product-name", "HVM domU"; - "system-version", ""; - "system-serial-number", ""; - "hp-rombios", ""] @ standard_type11_strings + ["bios-vendor", "Xen"; + "bios-version", ""; + "system-manufacturer", "Xen"; + "system-product-name", "HVM domU"; + "system-version", ""; + "system-serial-number", ""; + "hp-rombios", ""] @ standard_type11_strings (** BIOS strings of the old (XS 5.5) Dell Edition *) let old_dell_bios_strings = - ["bios-vendor", "Dell Inc."; - "bios-version", "1.9.9"; - "system-manufacturer", "Dell Inc."; - "system-product-name", "PowerEdge"; - "system-version", ""; - "system-serial-number", "3.3.1"; - "oem-1", "Dell System"; - "oem-2", "5[0000]"; - "oem-3", "MS_VM_CERT/SHA1/bdbeb6e0a816d43fa6d3fe8aaef04c2bad9d3e3d"; - "hp-rombios", ""] + ["bios-vendor", "Dell Inc."; + "bios-version", "1.9.9"; + "system-manufacturer", "Dell Inc."; + "system-product-name", "PowerEdge"; + "system-version", ""; + "system-serial-number", "3.3.1"; + "oem-1", "Dell System"; + "oem-2", "5[0000]"; + "oem-3", "MS_VM_CERT/SHA1/bdbeb6e0a816d43fa6d3fe8aaef04c2bad9d3e3d"; + "hp-rombios", ""] (** BIOS strings of the old (XS 5.5) HP Edition *) let old_hp_bios_strings = - ["bios-vendor", "Xen"; - "bios-version", "3.3.1"; - "system-manufacturer", "HP"; - "system-product-name", "ProLiant Virtual Platform"; - "system-version", "3.3.1"; - "system-serial-number", "e30aecc3-e587-5a95-9537-7c306759bced"; - "oem-1", "Xen"; - "oem-2", "MS_VM_CERT/SHA1/bdbeb6e0a816d43fa6d3fe8aaef04c2bad9d3e3d"; - "hp-rombios", "COMPAQ"] + ["bios-vendor", "Xen"; + "bios-version", "3.3.1"; + "system-manufacturer", "HP"; + "system-product-name", "ProLiant Virtual Platform"; + "system-version", "3.3.1"; + "system-serial-number", "e30aecc3-e587-5a95-9537-7c306759bced"; + "oem-1", "Xen"; + "oem-2", "MS_VM_CERT/SHA1/bdbeb6e0a816d43fa6d3fe8aaef04c2bad9d3e3d"; + "hp-rombios", "COMPAQ"] (** {2 CPUID feature masking} *) @@ -706,7 +706,7 @@ let fuse_time = ref 10. let db_restore_fuse_time = ref 30. (* If a session has a last_active older than this we delete it *) -let inactive_session_timeout = ref 86400. (* 24 hrs in seconds *) +let inactive_session_timeout = ref 86400. (* 24 hrs in seconds *) let pending_task_timeout = ref 86400. (* 24 hrs in seconds *) @@ -855,67 +855,67 @@ let sr_health_check_task_label = "SR Recovering" type xapi_globs_spec_ty = | Float of float ref | Int of int ref let xapi_globs_spec = - [ "master_connection_reset_timeout", Float master_connection_reset_timeout; - "master_connection_retry_timeout", Float master_connection_retry_timeout; - "master_connection_default_timeout", Float master_connection_default_timeout; - "qemu_dm_ready_timeout", Float qemu_dm_ready_timeout; - "hotplug_timeout", Float hotplug_timeout; - "pif_reconfigure_ip_timeout", Float pif_reconfigure_ip_timeout; - "pool_db_sync_interval", Float pool_db_sync_interval; - "pool_data_sync_interval", Float pool_data_sync_interval; - "domain_shutdown_ack_timeout", Float domain_shutdown_ack_timeout; - "domain_shutdown_total_timeout", Float domain_shutdown_total_timeout; - "emergency_reboot_delay_base", Float emergency_reboot_delay_base; - "emergency_reboot_delay_extra", Float emergency_reboot_delay_extra; - "ha_xapi_healthcheck_interval", Int ha_xapi_healthcheck_interval; - "ha_xapi_healthcheck_timeout", Int ha_xapi_healthcheck_timeout; - "ha_xapi_restart_attempts", Int ha_xapi_restart_attempts; - "ha_xapi_restart_timeout", Int ha_xapi_restart_timeout; - "logrotate_check_interval", Float logrotate_check_interval; - "rrd_backup_interval", Float rrd_backup_interval; - "session_revalidation_interval", Float session_revalidation_interval; - "update_all_subjects_interval", Float update_all_subjects_interval; - "wait_memory_target_timeout", Float wait_memory_target_timeout; - "snapshot_with_quiesce_timeout", Float snapshot_with_quiesce_timeout; - "host_heartbeat_interval", Float host_heartbeat_interval; - "host_assumed_dead_interval", Float host_assumed_dead_interval; - "fuse_time", Float fuse_time; - "db_restore_fuse_time", Float db_restore_fuse_time; - "inactive_session_timeout", Float inactive_session_timeout; - "pending_task_timeout", Float pending_task_timeout; - "completed_task_timeout", Float completed_task_timeout; - "minimum_time_between_bounces", Float minimum_time_between_bounces; - "minimum_time_between_reboot_with_no_added_delay", Float minimum_time_between_reboot_with_no_added_delay; - "ha_monitor_interval", Float ha_monitor_interval; - "ha_monitor_plan_interval", Float ha_monitor_plan_interval; - "ha_monitor_startup_timeout", Float ha_monitor_startup_timeout; - "ha_default_timeout_base", Float ha_default_timeout_base; - "guest_liveness_timeout", Float guest_liveness_timeout; - "permanent_master_failure_retry_interval", Float permanent_master_failure_retry_interval; - "redo_log_max_block_time_empty", Float redo_log_max_block_time_empty; - "redo_log_max_block_time_read", Float redo_log_max_block_time_read; - "redo_log_max_block_time_writedelta", Float redo_log_max_block_time_writedelta; - "redo_log_max_block_time_writedb", Float redo_log_max_block_time_writedb; - "redo_log_max_startup_time", Float redo_log_max_startup_time; - "redo_log_connect_delay", Float redo_log_connect_delay; - "default-vbd3-polling-duration", Int default_vbd3_polling_duration; - "default-vbd3-polling-idle-threshold", Int default_vbd3_polling_idle_threshold; - "vm_call_plugin_interval", Float vm_call_plugin_interval; - ] - -let options_of_xapi_globs_spec = - List.map (fun (name,ty) -> - name, (match ty with Float x -> Arg.Set_float x | Int x -> Arg.Set_int x), - (fun () -> match ty with Float x -> string_of_float !x | Int x -> string_of_int !x), - (Printf.sprintf "Set the value of '%s'" name)) xapi_globs_spec + [ "master_connection_reset_timeout", Float master_connection_reset_timeout; + "master_connection_retry_timeout", Float master_connection_retry_timeout; + "master_connection_default_timeout", Float master_connection_default_timeout; + "qemu_dm_ready_timeout", Float qemu_dm_ready_timeout; + "hotplug_timeout", Float hotplug_timeout; + "pif_reconfigure_ip_timeout", Float pif_reconfigure_ip_timeout; + "pool_db_sync_interval", Float pool_db_sync_interval; + "pool_data_sync_interval", Float pool_data_sync_interval; + "domain_shutdown_ack_timeout", Float domain_shutdown_ack_timeout; + "domain_shutdown_total_timeout", Float domain_shutdown_total_timeout; + "emergency_reboot_delay_base", Float emergency_reboot_delay_base; + "emergency_reboot_delay_extra", Float emergency_reboot_delay_extra; + "ha_xapi_healthcheck_interval", Int ha_xapi_healthcheck_interval; + "ha_xapi_healthcheck_timeout", Int ha_xapi_healthcheck_timeout; + "ha_xapi_restart_attempts", Int ha_xapi_restart_attempts; + "ha_xapi_restart_timeout", Int ha_xapi_restart_timeout; + "logrotate_check_interval", Float logrotate_check_interval; + "rrd_backup_interval", Float rrd_backup_interval; + "session_revalidation_interval", Float session_revalidation_interval; + "update_all_subjects_interval", Float update_all_subjects_interval; + "wait_memory_target_timeout", Float wait_memory_target_timeout; + "snapshot_with_quiesce_timeout", Float snapshot_with_quiesce_timeout; + "host_heartbeat_interval", Float host_heartbeat_interval; + "host_assumed_dead_interval", Float host_assumed_dead_interval; + "fuse_time", Float fuse_time; + "db_restore_fuse_time", Float db_restore_fuse_time; + "inactive_session_timeout", Float inactive_session_timeout; + "pending_task_timeout", Float pending_task_timeout; + "completed_task_timeout", Float completed_task_timeout; + "minimum_time_between_bounces", Float minimum_time_between_bounces; + "minimum_time_between_reboot_with_no_added_delay", Float minimum_time_between_reboot_with_no_added_delay; + "ha_monitor_interval", Float ha_monitor_interval; + "ha_monitor_plan_interval", Float ha_monitor_plan_interval; + "ha_monitor_startup_timeout", Float ha_monitor_startup_timeout; + "ha_default_timeout_base", Float ha_default_timeout_base; + "guest_liveness_timeout", Float guest_liveness_timeout; + "permanent_master_failure_retry_interval", Float permanent_master_failure_retry_interval; + "redo_log_max_block_time_empty", Float redo_log_max_block_time_empty; + "redo_log_max_block_time_read", Float redo_log_max_block_time_read; + "redo_log_max_block_time_writedelta", Float redo_log_max_block_time_writedelta; + "redo_log_max_block_time_writedb", Float redo_log_max_block_time_writedb; + "redo_log_max_startup_time", Float redo_log_max_startup_time; + "redo_log_connect_delay", Float redo_log_connect_delay; + "default-vbd3-polling-duration", Int default_vbd3_polling_duration; + "default-vbd3-polling-idle-threshold", Int default_vbd3_polling_idle_threshold; + "vm_call_plugin_interval", Float vm_call_plugin_interval; + ] + +let options_of_xapi_globs_spec = + List.map (fun (name,ty) -> + name, (match ty with Float x -> Arg.Set_float x | Int x -> Arg.Set_int x), + (fun () -> match ty with Float x -> string_of_float !x | Int x -> string_of_int !x), + (Printf.sprintf "Set the value of '%s'" name)) xapi_globs_spec let xapissl_path = ref "xapissl" let xenopsd_queues = ref ([ - "org.xen.xapi.xenops.classic"; - "org.xen.xapi.xenops.simulator"; - "org.xen.xapi.xenops.xenlight"; -]) + "org.xen.xapi.xenops.classic"; + "org.xen.xapi.xenops.simulator"; + "org.xen.xapi.xenops.xenlight"; + ]) let default_xenopsd = ref "org.xen.xapi.xenops.xenlight" @@ -958,28 +958,28 @@ let other_options = [ (fun x -> if x = "*" then `All else `Sm x) (fun x -> match x with `All -> "*" | `Sm x -> x) sm_plugins; "hotfix-fingerprint", Arg.Set_string trusted_patch_key, - (fun () -> !trusted_patch_key), "Fingerprint of the key used for signed hotfixes"; + (fun () -> !trusted_patch_key), "Fingerprint of the key used for signed hotfixes"; - "logconfig", Arg.Set_string log_config_file, - (fun () -> !log_config_file), "Log config file to use"; + "logconfig", Arg.Set_string log_config_file, + (fun () -> !log_config_file), "Log config file to use"; - "writereadyfile", Arg.Set_string ready_file, - (fun () -> !ready_file), "touch specified file when xapi is ready to accept requests"; + "writereadyfile", Arg.Set_string ready_file, + (fun () -> !ready_file), "touch specified file when xapi is ready to accept requests"; - "writeinitcomplete", Arg.Set_string init_complete, - (fun () -> !init_complete), "touch specified file when xapi init process is complete"; + "writeinitcomplete", Arg.Set_string init_complete, + (fun () -> !init_complete), "touch specified file when xapi init process is complete"; - "nowatchdog", Arg.Set nowatchdog, - (fun () -> string_of_bool !nowatchdog), "turn watchdog off, avoiding initial fork"; + "nowatchdog", Arg.Set nowatchdog, + (fun () -> string_of_bool !nowatchdog), "turn watchdog off, avoiding initial fork"; "log-getter", Arg.Set log_getter, - (fun () -> string_of_bool !log_getter), "Enable/Disable logging for getters"; + (fun () -> string_of_bool !log_getter), "Enable/Disable logging for getters"; - "onsystemboot", Arg.Set on_system_boot, - (fun () -> string_of_bool !on_system_boot), "indicates that this server start is the first since the host rebooted"; + "onsystemboot", Arg.Set on_system_boot, + (fun () -> string_of_bool !on_system_boot), "indicates that this server start is the first since the host rebooted"; "relax-xsm-sr-check", Arg.Set relax_xsm_sr_check, - (fun () -> string_of_bool !relax_xsm_sr_check), "allow storage migration when SRs have been mirrored out-of-band (and have matching SR uuids)"; + (fun () -> string_of_bool !relax_xsm_sr_check), "allow storage migration when SRs have been mirrored out-of-band (and have matching SR uuids)"; gen_list_option "disable-logging-for" "space-separated list of modules to suppress logging from" @@ -990,46 +990,46 @@ let other_options = [ (fun s -> s) (fun s -> s) disable_dbsync_for; "xenopsd-queues", Arg.String (fun x -> xenopsd_queues := String.split ',' x), - (fun () -> String.concat "," !xenopsd_queues), "list of xenopsd instances to manage"; + (fun () -> String.concat "," !xenopsd_queues), "list of xenopsd instances to manage"; "xenopsd-default", Arg.Set_string default_xenopsd, - (fun () -> !default_xenopsd), "default xenopsd to use"; + (fun () -> !default_xenopsd), "default xenopsd to use"; gen_list_option "igd-passthru-vendor-whitelist" "list of PCI vendor IDs for integrated graphics passthrough (space-separated)" (fun s -> - D.debug "Whitelisting PCI vendor %s for passthrough" s; - Scanf.sscanf s "%4Lx" (fun _ -> s)) (* Scanf verifies format *) + D.debug "Whitelisting PCI vendor %s for passthrough" s; + Scanf.sscanf s "%4Lx" (fun _ -> s)) (* Scanf verifies format *) (fun s -> s) igd_passthru_vendor_whitelist; "gvt-g-whitelist", Arg.Set_string gvt_g_whitelist, - (fun () -> !gvt_g_whitelist), "path to the GVT-g whitelist file"; + (fun () -> !gvt_g_whitelist), "path to the GVT-g whitelist file"; "pass-through-pif-carrier", Arg.Set pass_through_pif_carrier, (fun () -> string_of_bool !pass_through_pif_carrier), "reflect physical interface carrier information to VMs by default"; "cluster-stack-default", Arg.Set_string cluster_stack_default, - (fun () -> !cluster_stack_default), "Default cluster stack (HA)"; + (fun () -> !cluster_stack_default), "Default cluster stack (HA)"; "ciphersuites-good-outbound", Arg.String (fun s -> ciphersuites_good_outbound := if String_plain.trim s <> "" then Some s else None), - (fun () -> match !ciphersuites_good_outbound with None -> "" | Some s -> s), - "Preferred set of ciphersuites for outgoing TLS connections. (This list must match, or at least contain one of, the GOOD_CIPHERS in the 'xapissl' script for starting the listening stunnel.)"; + (fun () -> match !ciphersuites_good_outbound with None -> "" | Some s -> s), + "Preferred set of ciphersuites for outgoing TLS connections. (This list must match, or at least contain one of, the GOOD_CIPHERS in the 'xapissl' script for starting the listening stunnel.)"; "ciphersuites-legacy-outbound", Arg.Set_string ciphersuites_legacy_outbound, - (fun () -> !ciphersuites_legacy_outbound), "For backwards compatibility: to be used in addition to ciphersuites-good-outbound for outgoing TLS connections"; + (fun () -> !ciphersuites_legacy_outbound), "For backwards compatibility: to be used in addition to ciphersuites-good-outbound for outgoing TLS connections"; "gpumon_stop_timeout", Arg.Set_float gpumon_stop_timeout, - (fun () -> string_of_float !gpumon_stop_timeout), "Time to wait after attempting to stop gpumon when launching a vGPU-enabled VM."; + (fun () -> string_of_float !gpumon_stop_timeout), "Time to wait after attempting to stop gpumon when launching a vGPU-enabled VM."; "reboot_required_hfxs", Arg.Set_string reboot_required_hfxs, - (fun () -> !reboot_required_hfxs), "File to query hotfix uuids which require reboot"; + (fun () -> !reboot_required_hfxs), "File to query hotfix uuids which require reboot"; "xen_livepatch_list", Arg.Set_string xen_livepatch_list, - (fun () -> !xen_livepatch_list), "Command to query current xen livepatch list"; + (fun () -> !xen_livepatch_list), "Command to query current xen livepatch list"; "kpatch_list", Arg.Set_string kpatch_list, - (fun () -> !kpatch_list), "Command to query current kernel patch list"; -] + (fun () -> !kpatch_list), "Command to query current kernel patch list"; +] let all_options = options_of_xapi_globs_spec @ other_options @@ -1040,99 +1040,99 @@ let has_vendor_device = 2L (* This set is used as an indicator to show the virtual hardware platform versions the current host offers to its guests *) let host_virtual_hardware_platform_versions = [ - (* Zero is the implicit version offered by hosts older than this - versioning concept, and the version implicitly required by old - guests that do not specify a version. *) - 0L; - - (* Version one is the version in which this versioning concept was - introduced. This Virtual Hardware Platform might not differ - significantly from the immediately preceding version zero, but - it seems prudent to introduce a way to differentiate it from - the whole history of older host versions. *) - 1L; - - (* Version two which is "has_vendor_device" will be the first virtual - hardware platform version to offer the option of an emulated PCI - device used to trigger a guest to install or upgrade its PV tools - (originally introduced to exploit the Windows Update system). *) - has_vendor_device; + (* Zero is the implicit version offered by hosts older than this + versioning concept, and the version implicitly required by old + guests that do not specify a version. *) + 0L; + + (* Version one is the version in which this versioning concept was + introduced. This Virtual Hardware Platform might not differ + significantly from the immediately preceding version zero, but + it seems prudent to introduce a way to differentiate it from + the whole history of older host versions. *) + 1L; + + (* Version two which is "has_vendor_device" will be the first virtual + hardware platform version to offer the option of an emulated PCI + device used to trigger a guest to install or upgrade its PV tools + (originally introduced to exploit the Windows Update system). *) + has_vendor_device; ] module Resources = struct - let essential_executables = [ - "xapissl", xapissl_path, "Script for starting the listening stunnel"; - "busybox", busybox, "Swiss army knife executable - used as DHCP server"; - "pbis-force-domain-leave-script", pbis_force_domain_leave_script, "Executed when PBIS domain-leave fails"; - "redo-log-block-device-io", redo_log_block_device_io, "Used by the redo log for block device I/O"; - "sparse_dd", sparse_dd, "Path to sparse_dd"; - "vhd-tool", vhd_tool, "Path to vhd-tool"; - "fence", fence, "Path to fence binary, used for HA host fencing"; - "host-bugreport-upload", host_bugreport_upload, "Path to host-bugreport-upload"; - "set-hostname", set_hostname, "Path to set-hostname"; - "xe-syslog-reconfigure", xe_syslog_reconfigure, "Path to xe-syslog-reconfigure"; - "logs-download", logs_download, "Used by /get_host_logs_download HTTP handler"; - "update-mh-info", update_mh_info_script, "Executed when changing the management interface"; - "upload-wrapper", upload_wrapper, "Used by Host_crashdump.upload"; - "host-backup", host_backup, "Path to host-backup"; - "host-restore", host_restore, "Path to host-restore"; - "xe", xe_path, "Path to xe CLI binary"; - "xe-toolstack-restart", xe_toolstack_restart, "Path to xe-toolstack-restart script"; - "xsh", xsh, "Path to xsh binary"; - "static-vdis", static_vdis, "Path to static-vdis script"; - "xen-cmdline-script", xen_cmdline_script, "Path to xen-cmdline script"; - ] - let nonessential_executables = [ - "startup-script-hook", startup_script_hook, "Executed during startup"; - "rolling-upgrade-script-hook", rolling_upgrade_script_hook, "Executed when a rolling upgrade is detected starting or stopping"; - "xapi-message-script", xapi_message_script, "Executed when messages are generated if email feature is disabled"; - "non-managed-pifs", non_managed_pifs, "Executed during PIF.scan to find out which NICs should not be managed by xapi"; - "update-issue", update_issue_script, "Running update-service when configuring the management interface"; - "killall", kill_process_script, "Executed to kill process"; - ] - let essential_files = [ - "pool_config_file", pool_config_file, "Pool configuration file"; - "db-config-file", db_conf_path, "Database configuration file"; - "udhcpd-skel", udhcpd_skel, "Skeleton config for udhcp"; - ] - let nonessential_files = [ - "pool_secret_path", pool_secret_path, "Pool configuration file"; - "udhcpd-conf", udhcpd_conf, "Optional configuration file for udchp"; - "remote-db-conf-file", remote_db_conf_fragment_path, "Where to store information about remote databases"; - "logconfig", log_config_file, "Configure the logging policy"; - "cpu-info-file", cpu_info_file, "Where to cache boot-time CPU info"; - "server-cert-path", server_cert_path, "Path to server ssl certificate"; - ] - let essential_dirs = [ - "sm-dir", sm_dir, "Directory containing SM plugins"; - "tools-sr-dir", tools_sr_dir, "Directory containing tools ISO"; - "web-dir", web_dir, "Directory to export fileserver"; - "cluster-stack-root", cluster_stack_root, "Directory containing collections of HA tools and scripts"; - "xen-cmdline", xen_cmdline_path, "Path to xen-cmdline binary"; - "gpg-homedir", gpg_homedir, "Passed as --homedir to gpg commands"; - "post-install-scripts-dir", post_install_scripts_dir, "Directory containing trusted guest provisioning scripts"; - ] - let nonessential_dirs = [ - "master-scripts-dir", master_scripts_dir, "Scripts to execute when transitioning pool role"; - "packs-dir", packs_dir, "Directory containing supplemental pack data"; - "xapi-hooks-root", xapi_hooks_root, "Root directory for xapi hooks"; - "xapi-plugins-root", xapi_plugins_root, "Optional directory containing XenAPI plugins"; - "xapi-extensions-root", xapi_extensions_root, "Optional directory containing XenAPI extensions"; - "static-vdis-root", static_vdis_dir, "Optional directory for configuring static VDIs"; - ] - - let xcp_resources = - let make_resource perms essential (name, path, description) = - { Xcp_service.essential; name; description; path; perms } - in - let open Unix in - List.fold_left List.rev_append [] [ - List.map (make_resource [X_OK] true) essential_executables; - List.map (make_resource [X_OK] false) nonessential_executables; - List.map (make_resource [R_OK; W_OK] true) essential_files; - List.map (make_resource [R_OK; W_OK] false) nonessential_files; - List.map (make_resource [R_OK; W_OK] true) essential_dirs; - List.map (make_resource [R_OK; W_OK] false) nonessential_dirs; - ] + let essential_executables = [ + "xapissl", xapissl_path, "Script for starting the listening stunnel"; + "busybox", busybox, "Swiss army knife executable - used as DHCP server"; + "pbis-force-domain-leave-script", pbis_force_domain_leave_script, "Executed when PBIS domain-leave fails"; + "redo-log-block-device-io", redo_log_block_device_io, "Used by the redo log for block device I/O"; + "sparse_dd", sparse_dd, "Path to sparse_dd"; + "vhd-tool", vhd_tool, "Path to vhd-tool"; + "fence", fence, "Path to fence binary, used for HA host fencing"; + "host-bugreport-upload", host_bugreport_upload, "Path to host-bugreport-upload"; + "set-hostname", set_hostname, "Path to set-hostname"; + "xe-syslog-reconfigure", xe_syslog_reconfigure, "Path to xe-syslog-reconfigure"; + "logs-download", logs_download, "Used by /get_host_logs_download HTTP handler"; + "update-mh-info", update_mh_info_script, "Executed when changing the management interface"; + "upload-wrapper", upload_wrapper, "Used by Host_crashdump.upload"; + "host-backup", host_backup, "Path to host-backup"; + "host-restore", host_restore, "Path to host-restore"; + "xe", xe_path, "Path to xe CLI binary"; + "xe-toolstack-restart", xe_toolstack_restart, "Path to xe-toolstack-restart script"; + "xsh", xsh, "Path to xsh binary"; + "static-vdis", static_vdis, "Path to static-vdis script"; + "xen-cmdline-script", xen_cmdline_script, "Path to xen-cmdline script"; + ] + let nonessential_executables = [ + "startup-script-hook", startup_script_hook, "Executed during startup"; + "rolling-upgrade-script-hook", rolling_upgrade_script_hook, "Executed when a rolling upgrade is detected starting or stopping"; + "xapi-message-script", xapi_message_script, "Executed when messages are generated if email feature is disabled"; + "non-managed-pifs", non_managed_pifs, "Executed during PIF.scan to find out which NICs should not be managed by xapi"; + "update-issue", update_issue_script, "Running update-service when configuring the management interface"; + "killall", kill_process_script, "Executed to kill process"; + ] + let essential_files = [ + "pool_config_file", pool_config_file, "Pool configuration file"; + "db-config-file", db_conf_path, "Database configuration file"; + "udhcpd-skel", udhcpd_skel, "Skeleton config for udhcp"; + ] + let nonessential_files = [ + "pool_secret_path", pool_secret_path, "Pool configuration file"; + "udhcpd-conf", udhcpd_conf, "Optional configuration file for udchp"; + "remote-db-conf-file", remote_db_conf_fragment_path, "Where to store information about remote databases"; + "logconfig", log_config_file, "Configure the logging policy"; + "cpu-info-file", cpu_info_file, "Where to cache boot-time CPU info"; + "server-cert-path", server_cert_path, "Path to server ssl certificate"; + ] + let essential_dirs = [ + "sm-dir", sm_dir, "Directory containing SM plugins"; + "tools-sr-dir", tools_sr_dir, "Directory containing tools ISO"; + "web-dir", web_dir, "Directory to export fileserver"; + "cluster-stack-root", cluster_stack_root, "Directory containing collections of HA tools and scripts"; + "xen-cmdline", xen_cmdline_path, "Path to xen-cmdline binary"; + "gpg-homedir", gpg_homedir, "Passed as --homedir to gpg commands"; + "post-install-scripts-dir", post_install_scripts_dir, "Directory containing trusted guest provisioning scripts"; + ] + let nonessential_dirs = [ + "master-scripts-dir", master_scripts_dir, "Scripts to execute when transitioning pool role"; + "packs-dir", packs_dir, "Directory containing supplemental pack data"; + "xapi-hooks-root", xapi_hooks_root, "Root directory for xapi hooks"; + "xapi-plugins-root", xapi_plugins_root, "Optional directory containing XenAPI plugins"; + "xapi-extensions-root", xapi_extensions_root, "Optional directory containing XenAPI extensions"; + "static-vdis-root", static_vdis_dir, "Optional directory for configuring static VDIs"; + ] + + let xcp_resources = + let make_resource perms essential (name, path, description) = + { Xcp_service.essential; name; description; path; perms } + in + let open Unix in + List.fold_left List.rev_append [] [ + List.map (make_resource [X_OK] true) essential_executables; + List.map (make_resource [X_OK] false) nonessential_executables; + List.map (make_resource [R_OK; W_OK] true) essential_files; + List.map (make_resource [R_OK; W_OK] false) nonessential_files; + List.map (make_resource [R_OK; W_OK] true) essential_dirs; + List.map (make_resource [R_OK; W_OK] false) nonessential_dirs; + ] end diff --git a/ocaml/xapi/xapi_gpu_group.ml b/ocaml/xapi/xapi_gpu_group.ml index 0ba3a4d6df7..ffc63a7b80a 100644 --- a/ocaml/xapi/xapi_gpu_group.ml +++ b/ocaml/xapi/xapi_gpu_group.ml @@ -15,123 +15,123 @@ module D=Debug.Make(struct let name="xapi" end) open D open Stdext let create ~__context ~name_label ~name_description ~other_config = - let group = Ref.make () in - let uuid = Uuid.to_string (Uuid.make_uuid ()) in - Db.GPU_group.create ~__context ~ref:group ~uuid ~name_label ~name_description - ~gPU_types:[] ~other_config ~allocation_algorithm:`depth_first; - group + let group = Ref.make () in + let uuid = Uuid.to_string (Uuid.make_uuid ()) in + Db.GPU_group.create ~__context ~ref:group ~uuid ~name_label ~name_description + ~gPU_types:[] ~other_config ~allocation_algorithm:`depth_first; + group let destroy ~__context ~self = - let vgpus = Db.GPU_group.get_VGPUs ~__context ~self in - let connected = List.filter (fun self -> - Db.VGPU.get_currently_attached ~__context ~self - ) vgpus in - if connected <> [] then - raise (Api_errors.Server_error (Api_errors.gpu_group_contains_vgpu, List.map Ref.string_of connected)); + let vgpus = Db.GPU_group.get_VGPUs ~__context ~self in + let connected = List.filter (fun self -> + Db.VGPU.get_currently_attached ~__context ~self + ) vgpus in + if connected <> [] then + raise (Api_errors.Server_error (Api_errors.gpu_group_contains_vgpu, List.map Ref.string_of connected)); - let pgpus = Db.GPU_group.get_PGPUs ~__context ~self in - if pgpus <> [] then - raise (Api_errors.Server_error (Api_errors.gpu_group_contains_pgpu, List.map Ref.string_of pgpus)); + let pgpus = Db.GPU_group.get_PGPUs ~__context ~self in + if pgpus <> [] then + raise (Api_errors.Server_error (Api_errors.gpu_group_contains_pgpu, List.map Ref.string_of pgpus)); - (* Destroy all vGPUs *) - List.iter (fun vgpu -> - Helpers.log_exn_continue (Printf.sprintf "destroying VGPU: %s" (Ref.string_of vgpu)) - (fun vgpu -> Db.VGPU.destroy ~__context ~self:vgpu) vgpu) vgpus; + (* Destroy all vGPUs *) + List.iter (fun vgpu -> + Helpers.log_exn_continue (Printf.sprintf "destroying VGPU: %s" (Ref.string_of vgpu)) + (fun vgpu -> Db.VGPU.destroy ~__context ~self:vgpu) vgpu) vgpus; - Db.GPU_group.destroy ~__context ~self + Db.GPU_group.destroy ~__context ~self let find_or_create ~__context pgpu = - let pci = Db.PGPU.get_PCI ~__context ~self:pgpu in - let pci_rec = Db.PCI.get_record_internal ~__context ~self:pci in - let gpu_type = Xapi_pci.string_of_pci ~__context ~self:pci in - try - List.find (fun rf-> - let rc = Db.GPU_group.get_record_internal ~__context ~self:rf in - rc.Db_actions.gPU_group_GPU_types = [gpu_type] - ) - (Db.GPU_group.get_all ~__context) - with Not_found -> - let name_label = "Group of " ^ pci_rec.Db_actions.pCI_vendor_name ^ " " ^ pci_rec.Db_actions.pCI_device_name ^ " GPUs" in - let group = create ~__context ~name_label ~name_description:"" ~other_config:[] in - group + let pci = Db.PGPU.get_PCI ~__context ~self:pgpu in + let pci_rec = Db.PCI.get_record_internal ~__context ~self:pci in + let gpu_type = Xapi_pci.string_of_pci ~__context ~self:pci in + try + List.find (fun rf-> + let rc = Db.GPU_group.get_record_internal ~__context ~self:rf in + rc.Db_actions.gPU_group_GPU_types = [gpu_type] + ) + (Db.GPU_group.get_all ~__context) + with Not_found -> + let name_label = "Group of " ^ pci_rec.Db_actions.pCI_vendor_name ^ " " ^ pci_rec.Db_actions.pCI_device_name ^ " GPUs" in + let group = create ~__context ~name_label ~name_description:"" ~other_config:[] in + group module VGPU_type_set = Set.Make(struct type t = API.ref_VGPU_type let compare = compare end) let union_type_lists ~type_lists = - (* Fold each item of each list into a set, - * then return the elements of the set as a list. *) - let union_set = List.fold_left - (fun acc type_list -> - List.fold_left - (fun acc vgpu_type -> VGPU_type_set.add vgpu_type acc) - acc type_list) - VGPU_type_set.empty type_lists - in - VGPU_type_set.elements union_set + (* Fold each item of each list into a set, + * then return the elements of the set as a list. *) + let union_set = List.fold_left + (fun acc type_list -> + List.fold_left + (fun acc vgpu_type -> VGPU_type_set.add vgpu_type acc) + acc type_list) + VGPU_type_set.empty type_lists + in + VGPU_type_set.elements union_set let update_enabled_VGPU_types ~__context ~self = - let pgpus = Db.GPU_group.get_PGPUs ~__context ~self in - let enabled_VGPU_types = union_type_lists - (List.map - (fun pgpu -> Db.PGPU.get_enabled_VGPU_types ~__context ~self:pgpu) - pgpus) - in - Db.GPU_group.set_enabled_VGPU_types ~__context ~self ~value:enabled_VGPU_types + let pgpus = Db.GPU_group.get_PGPUs ~__context ~self in + let enabled_VGPU_types = union_type_lists + (List.map + (fun pgpu -> Db.PGPU.get_enabled_VGPU_types ~__context ~self:pgpu) + pgpus) + in + Db.GPU_group.set_enabled_VGPU_types ~__context ~self ~value:enabled_VGPU_types let update_supported_VGPU_types ~__context ~self = - let pgpus = Db.GPU_group.get_PGPUs ~__context ~self in - let supported_VGPU_types = union_type_lists - (List.map - (fun pgpu -> Db.PGPU.get_supported_VGPU_types ~__context ~self:pgpu) - pgpus) - in - Db.GPU_group.set_supported_VGPU_types ~__context ~self ~value:supported_VGPU_types + let pgpus = Db.GPU_group.get_PGPUs ~__context ~self in + let supported_VGPU_types = union_type_lists + (List.map + (fun pgpu -> Db.PGPU.get_supported_VGPU_types ~__context ~self:pgpu) + pgpus) + in + Db.GPU_group.set_supported_VGPU_types ~__context ~self ~value:supported_VGPU_types let get_remaining_capacity_internal ~__context ~self ~vgpu_type = - (* If there is capacity in the group for this VGPU type, we return the - * capacity. If there is not then we will have an exception for each PGPU, - * explaining why it cannot run a VGPU of this type. We need to look through - * this list of exceptions and pick one to raise as the "overall" error. *) - let choose_exception = function - | [] -> - (* Should only ever get here if there are no PGPUs in the GPU group. *) - Api_errors.Server_error - (Api_errors.gpu_group_contains_no_pgpus, [Ref.string_of self]) - | exceptions -> - let error_code_scores = [ - Api_errors.pgpu_insufficient_capacity_for_vgpu, 10; - Api_errors.vgpu_type_not_compatible_with_running_type, 8; - Api_errors.vgpu_type_not_enabled, 6; - Api_errors.vgpu_type_not_supported, 4; - ] in - let score_exception = function - | Api_errors.Server_error (code, _) -> - if List.mem_assoc code error_code_scores - then List.assoc code error_code_scores - else 0 - | _ -> 0 - in - List.hd - (Helpers.sort_by_schwarzian ~descending:true score_exception exceptions) - in - (* For each PGPU in the group, if it is unable to run the specified VGPU type, - * save the exception returned. Otherwise just add its capacity - * to the total. *) - let pgpus = Db.GPU_group.get_PGPUs ~__context ~self in - let capacity, exceptions = List.fold_left - (fun (capacity, exceptions) pgpu -> - match - Xapi_pgpu_helpers.get_remaining_capacity_internal - ~__context ~self:pgpu ~vgpu_type - with - | Either.Left e -> (capacity, e :: exceptions) - | Either.Right n -> (Int64.add n capacity, exceptions)) - (0L, []) pgpus - in - if capacity > 0L - then Either.Right capacity - else Either.Left (choose_exception exceptions) + (* If there is capacity in the group for this VGPU type, we return the + * capacity. If there is not then we will have an exception for each PGPU, + * explaining why it cannot run a VGPU of this type. We need to look through + * this list of exceptions and pick one to raise as the "overall" error. *) + let choose_exception = function + | [] -> + (* Should only ever get here if there are no PGPUs in the GPU group. *) + Api_errors.Server_error + (Api_errors.gpu_group_contains_no_pgpus, [Ref.string_of self]) + | exceptions -> + let error_code_scores = [ + Api_errors.pgpu_insufficient_capacity_for_vgpu, 10; + Api_errors.vgpu_type_not_compatible_with_running_type, 8; + Api_errors.vgpu_type_not_enabled, 6; + Api_errors.vgpu_type_not_supported, 4; + ] in + let score_exception = function + | Api_errors.Server_error (code, _) -> + if List.mem_assoc code error_code_scores + then List.assoc code error_code_scores + else 0 + | _ -> 0 + in + List.hd + (Helpers.sort_by_schwarzian ~descending:true score_exception exceptions) + in + (* For each PGPU in the group, if it is unable to run the specified VGPU type, + * save the exception returned. Otherwise just add its capacity + * to the total. *) + let pgpus = Db.GPU_group.get_PGPUs ~__context ~self in + let capacity, exceptions = List.fold_left + (fun (capacity, exceptions) pgpu -> + match + Xapi_pgpu_helpers.get_remaining_capacity_internal + ~__context ~self:pgpu ~vgpu_type + with + | Either.Left e -> (capacity, e :: exceptions) + | Either.Right n -> (Int64.add n capacity, exceptions)) + (0L, []) pgpus + in + if capacity > 0L + then Either.Right capacity + else Either.Left (choose_exception exceptions) let get_remaining_capacity ~__context ~self ~vgpu_type = - match get_remaining_capacity_internal ~__context ~self ~vgpu_type with - | Either.Left e -> 0L - | Either.Right capacity -> capacity + match get_remaining_capacity_internal ~__context ~self ~vgpu_type with + | Either.Left e -> 0L + | Either.Right capacity -> capacity diff --git a/ocaml/xapi/xapi_gpu_group.mli b/ocaml/xapi/xapi_gpu_group.mli index 5ffe4186f79..6dcc51b2973 100644 --- a/ocaml/xapi/xapi_gpu_group.mli +++ b/ocaml/xapi/xapi_gpu_group.mli @@ -14,7 +14,7 @@ (** Module that defines API functions for GPU_group objects * @group Graphics - *) +*) (** Create a GPU group. *) val create : @@ -31,19 +31,19 @@ val find_or_create : __context:Context.t -> [ `PGPU ] Ref.t -> [ `GPU_group ] Ref.t val update_enabled_VGPU_types : - __context:Context.t -> self:[ `GPU_group ] Ref.t -> unit + __context:Context.t -> self:[ `GPU_group ] Ref.t -> unit val update_supported_VGPU_types : - __context:Context.t -> self:[ `GPU_group ] Ref.t -> unit + __context:Context.t -> self:[ `GPU_group ] Ref.t -> unit val get_remaining_capacity_internal : - __context:Context.t -> - self: [ `GPU_group ] Ref.t -> - vgpu_type:[ `VGPU_type ] Ref.t -> - (exn, int64) Stdext.Either.t + __context:Context.t -> + self: [ `GPU_group ] Ref.t -> + vgpu_type:[ `VGPU_type ] Ref.t -> + (exn, int64) Stdext.Either.t val get_remaining_capacity : - __context:Context.t -> - self: [ `GPU_group ] Ref.t -> - vgpu_type:[ `VGPU_type ] Ref.t -> - int64 + __context:Context.t -> + self: [ `GPU_group ] Ref.t -> + vgpu_type:[ `VGPU_type ] Ref.t -> + int64 diff --git a/ocaml/xapi/xapi_gpumon.ml b/ocaml/xapi/xapi_gpumon.ml index a8e8b88bef6..953c8944c8d 100644 --- a/ocaml/xapi/xapi_gpumon.ml +++ b/ocaml/xapi/xapi_gpumon.ml @@ -19,21 +19,21 @@ let systemctl = "/usr/bin/systemctl" let gpumon = "xcp-rrdd-gpumon" module Gpumon = Daemon_manager.Make(struct - let check = Daemon_manager.Function (fun () -> - try - ignore - (Forkhelpers.execute_command_get_output systemctl - ["is-active"; "-q"; gpumon]); - true - with _ -> false) + let check = Daemon_manager.Function (fun () -> + try + ignore + (Forkhelpers.execute_command_get_output systemctl + ["is-active"; "-q"; gpumon]); + true + with _ -> false) - let start () = - debug "Starting %s" gpumon; - ignore (Forkhelpers.execute_command_get_output systemctl ["start"; gpumon]) + let start () = + debug "Starting %s" gpumon; + ignore (Forkhelpers.execute_command_get_output systemctl ["start"; gpumon]) - let stop () = - debug "Stopping %s" gpumon; - ignore (Forkhelpers.execute_command_get_output systemctl ["stop"; gpumon]) -end) + let stop () = + debug "Stopping %s" gpumon; + ignore (Forkhelpers.execute_command_get_output systemctl ["stop"; gpumon]) + end) let with_gpumon_stopped = Gpumon.with_daemon_stopped diff --git a/ocaml/xapi/xapi_guest_agent.ml b/ocaml/xapi/xapi_guest_agent.ml index e5a466f2de9..32dc31b20d0 100644 --- a/ocaml/xapi/xapi_guest_agent.ml +++ b/ocaml/xapi/xapi_guest_agent.ml @@ -27,7 +27,7 @@ module IntSet = Set.Make(struct type t=int let compare=compare end) (* A fixed set of standard keys are placed in the PV_drivers_version map. NB each key is annotated with whether it appears in windows and/or linux *) -let pv_drivers_version = +let pv_drivers_version = [ "drivers/xenevtchn", "xenevtchn"; (* windows *) "drivers/xenvbd", "xenvbd"; (* windows *) "drivers/xennet", "xennet"; (* windows *) @@ -39,7 +39,7 @@ let pv_drivers_version = ] (* A fixed set of standard keys placed in the os_version map. *) -let os_version = +let os_version = [ "data/os_name", "name"; (* linux + windows *) "data/os_uname", "uname"; (* linux *) "data/os_distro", "distro"; (* linux + windows *) @@ -49,7 +49,7 @@ let os_version = "attr/os/spminor", "spminor"; (* windows *) ] -let memory = +let memory = [ "data/meminfo_free", "free"; "data/meminfo_total", "total" ] @@ -68,41 +68,41 @@ let extend base str = Printf.sprintf "%s/%s" base str * attr/eth0/ipv6/1/addr -> 0/ipv6/1 * *) let networks path (list: string -> string list) = - (* Find all ipv6 addresses under a path. *) - let find_ipv6 path prefix = List.map - (fun str -> (extend (extend path str) "addr", extend prefix str)) - (list path) - in - (* Find the ipv4 address under a path, and the ipv6 addresses if they exist. *) - let find_all_ips path prefix = - let ipv4 = (extend path "ip", extend prefix "ip") in - if List.mem "ipv6" (list path) then - ipv4 :: (find_ipv6 (extend path "ipv6") (extend prefix "ipv6")) - else - [ipv4] - in - (* Find all "ethn" or "xenbrn" under a path. *) - let find_eths path = List.fold_left - (fun acc eth -> - if String.startswith "eth" eth then - let n = String.sub eth 3 (String.length eth - 3) in - (extend path eth, n) :: acc - else if String.startswith "xenbr" eth then - let n = String.sub eth 5 (String.length eth - 5) in - (extend path eth, n) :: acc - else - acc) - [] (list path) - in - path - |> find_eths - |> List.map (fun (path, prefix) -> find_all_ips path prefix) - |> List.concat + (* Find all ipv6 addresses under a path. *) + let find_ipv6 path prefix = List.map + (fun str -> (extend (extend path str) "addr", extend prefix str)) + (list path) + in + (* Find the ipv4 address under a path, and the ipv6 addresses if they exist. *) + let find_all_ips path prefix = + let ipv4 = (extend path "ip", extend prefix "ip") in + if List.mem "ipv6" (list path) then + ipv4 :: (find_ipv6 (extend path "ipv6") (extend prefix "ipv6")) + else + [ipv4] + in + (* Find all "ethn" or "xenbrn" under a path. *) + let find_eths path = List.fold_left + (fun acc eth -> + if String.startswith "eth" eth then + let n = String.sub eth 3 (String.length eth - 3) in + (extend path eth, n) :: acc + else if String.startswith "xenbr" eth then + let n = String.sub eth 5 (String.length eth - 5) in + (extend path eth, n) :: acc + else + acc) + [] (list path) + in + path + |> find_eths + |> List.map (fun (path, prefix) -> find_all_ips path prefix) + |> List.concat (* One key is placed in the other map per control/* key in xenstore. This catches keys like "feature-shutdown" "feature-hibernate" "feature-reboot" "feature-sysrq" *) -let other all_control = +let other all_control = List.map (fun x -> "control/" ^ x, x) all_control (* There are two memory keys: data/meminfo_free and data/meminfo_total. These are *) @@ -113,15 +113,15 @@ let other all_control = type m = (string * string) list type guest_metrics_t = { - pv_drivers_version: m; - os_version: m; - networks: m; - other: m; - memory: m; - device_id: m; - last_updated: float; - can_use_hotplug_vbd: API.tristate_type; - can_use_hotplug_vif: API.tristate_type; + pv_drivers_version: m; + os_version: m; + networks: m; + other: m; + memory: m; + device_id: m; + last_updated: float; + can_use_hotplug_vbd: API.tristate_type; + can_use_hotplug_vif: API.tristate_type; } let cache : (int, guest_metrics_t) Hashtbl.t = Hashtbl.create 20 let memory_targets : (int, int64) Hashtbl.t = Hashtbl.create 20 @@ -129,24 +129,24 @@ let dead_domains : IntSet.t ref = ref IntSet.empty let mutex = Mutex.create () (** Reset all the guest metrics for a particular VM. 'lookup' reads a key from xenstore - and 'list' reads a directory from xenstore. Both are relative to the guest's + and 'list' reads a directory from xenstore. Both are relative to the guest's domainpath. *) let all (lookup: string -> string option) (list: string -> string list) ~__context ~domid ~uuid = let all_control = list "control" in let to_map kvpairs = List.concat (List.map (fun (xskey, mapkey) -> match lookup xskey with - | Some xsval -> [ mapkey, xsval ] - | None -> []) kvpairs) in + | Some xsval -> [ mapkey, xsval ] + | None -> []) kvpairs) in let get_tristate xskey = - match lookup xskey with - | Some "0" -> `no - | Some "1" -> `yes - | _ -> `unspecified in + match lookup xskey with + | Some "0" -> `no + | Some "1" -> `yes + | _ -> `unspecified in let ts = match lookup "data/ts" with - | Some value -> ["data-ts",value] - | None -> [] in - + | Some value -> ["data-ts",value] + | None -> [] in + let pv_drivers_version = to_map pv_drivers_version and os_version = to_map os_version and device_id = to_map device_id @@ -157,8 +157,8 @@ let all (lookup: string -> string option) (list: string -> string list) ~__conte let can_use_hotplug_vbd = get_tristate "feature/hotplug/vbd" in let can_use_hotplug_vif = get_tristate "feature/hotplug/vif" in - (* let num = Mutex.execute mutex (fun () -> Hashtbl.fold (fun _ _ c -> 1 + c) cache 0) in - debug "Number of entries in hashtbl: %d" num; *) + (* let num = Mutex.execute mutex (fun () -> Hashtbl.fold (fun _ _ c -> 1 + c) cache 0) in + debug "Number of entries in hashtbl: %d" num; *) (* to avoid breakage whilst 'micro' is added to linux and windows agents, default this field to -1 if it's not present in xenstore *) @@ -169,136 +169,136 @@ let all (lookup: string -> string option) (list: string -> string list) ~__conte let self = Db.VM.get_by_uuid ~__context ~uuid in let ( - guest_metrics_cached + guest_metrics_cached ) = Mutex.execute mutex (fun () -> try - Hashtbl.find cache domid - with _ -> - (* Make sure our cached idea of whether the domain is live or not is correct *) - let vm_guest_metrics = Db.VM.get_guest_metrics ~__context ~self in - let live = true - && Db.is_valid_ref __context vm_guest_metrics - && Db.VM_guest_metrics.get_live ~__context ~self:vm_guest_metrics in - if live then - dead_domains := IntSet.remove domid !dead_domains - else - dead_domains := IntSet.add domid !dead_domains; - { - pv_drivers_version = []; - os_version = []; - networks = []; - other = []; - memory = []; - device_id = []; - last_updated = 0.0; - can_use_hotplug_vbd = `unspecified; - can_use_hotplug_vif = `unspecified; - } - ) in + Hashtbl.find cache domid + with _ -> + (* Make sure our cached idea of whether the domain is live or not is correct *) + let vm_guest_metrics = Db.VM.get_guest_metrics ~__context ~self in + let live = true + && Db.is_valid_ref __context vm_guest_metrics + && Db.VM_guest_metrics.get_live ~__context ~self:vm_guest_metrics in + if live then + dead_domains := IntSet.remove domid !dead_domains + else + dead_domains := IntSet.add domid !dead_domains; + { + pv_drivers_version = []; + os_version = []; + networks = []; + other = []; + memory = []; + device_id = []; + last_updated = 0.0; + can_use_hotplug_vbd = `unspecified; + can_use_hotplug_vif = `unspecified; + } + ) in (* Consider the data valid IF the data/updated key exists *) let data_updated = lookup "data/updated" <> None in if data_updated then begin - (* Only if the data is valid, cache it (CA-20353) *) - Mutex.execute mutex (fun () -> Hashtbl.replace cache domid {pv_drivers_version; os_version; networks; other; memory; device_id; last_updated; can_use_hotplug_vbd; can_use_hotplug_vif;}); + (* Only if the data is valid, cache it (CA-20353) *) + Mutex.execute mutex (fun () -> Hashtbl.replace cache domid {pv_drivers_version; os_version; networks; other; memory; device_id; last_updated; can_use_hotplug_vbd; can_use_hotplug_vif;}); + + (* We update only if any actual data has changed *) + if ( guest_metrics_cached.pv_drivers_version <> pv_drivers_version + || + guest_metrics_cached.os_version <> os_version + || + guest_metrics_cached.networks <> networks + || + guest_metrics_cached.other <> other + || + guest_metrics_cached.device_id <> device_id) + || + guest_metrics_cached.can_use_hotplug_vbd <> can_use_hotplug_vbd + || + guest_metrics_cached.can_use_hotplug_vif <> can_use_hotplug_vif + (* Nb. we're ignoring the memory updates as far as the VM_guest_metrics API object is concerned. We are putting them into an RRD instead *) + (* || + guest_metrics_cached.memory <> memory)*) + then + begin + let gm = + let existing = Db.VM.get_guest_metrics ~__context ~self in + if (try ignore(Db.VM_guest_metrics.get_uuid ~__context ~self:existing); true with _ -> false) + then existing + else + (* if it doesn't exist, make a fresh one *) + let new_ref = Ref.make () and new_uuid = Uuid.to_string (Uuid.make_uuid ()) in + Db.VM_guest_metrics.create ~__context ~ref:new_ref ~uuid:new_uuid + ~os_version:os_version ~pV_drivers_version:pv_drivers_version ~pV_drivers_up_to_date:false ~memory:[] ~disks:[] ~networks:networks ~other:other + ~pV_drivers_detected:false ~last_updated:(Date.of_float last_updated) ~other_config:[] ~live:true ~can_use_hotplug_vbd:`unspecified ~can_use_hotplug_vif:`unspecified; + Db.VM.set_guest_metrics ~__context ~self ~value:new_ref; + (* We've just set the thing to live, let's make sure it's not in the dead list *) + let sl xs = String.concat "; " (List.map (fun (k, v) -> k ^ ": " ^ v) xs) in + info "Received initial update from guest agent in VM %s; os_version = [ %s]; pv_drivers_version = [ %s ]; networks = [ %s ]" (Ref.string_of self) (sl os_version) (sl pv_drivers_version) (sl networks); + Mutex.execute mutex (fun () -> dead_domains := IntSet.remove domid !dead_domains); + new_ref in + + (* We unconditionally reset the database values but observe that the database + checks whether a value has actually changed before doing anything *) + if(guest_metrics_cached.pv_drivers_version <> pv_drivers_version) then + Db.VM_guest_metrics.set_PV_drivers_version ~__context ~self:gm ~value:pv_drivers_version; + if(guest_metrics_cached.os_version <> os_version) then + Db.VM_guest_metrics.set_os_version ~__context ~self:gm ~value:os_version; + if(guest_metrics_cached.networks <> networks) then + Db.VM_guest_metrics.set_networks ~__context ~self:gm ~value:networks; + if(guest_metrics_cached.other <> other) then begin + Db.VM_guest_metrics.set_other ~__context ~self:gm ~value:other; + Helpers.call_api_functions ~__context (fun rpc session_id -> Client.Client.VM.update_allowed_operations rpc session_id self); + end; + if(guest_metrics_cached.can_use_hotplug_vbd <> can_use_hotplug_vbd) then begin + Db.VM_guest_metrics.set_can_use_hotplug_vbd ~__context ~self:gm ~value:can_use_hotplug_vbd; + end; + if(guest_metrics_cached.can_use_hotplug_vif <> can_use_hotplug_vif) then begin + Db.VM_guest_metrics.set_can_use_hotplug_vif ~__context ~self:gm ~value:can_use_hotplug_vif; + end; + (* if(guest_metrics_cached.memory <> memory) then + Db.VM_guest_metrics.set_memory ~__context ~self:gm ~value:memory; *) - (* We update only if any actual data has changed *) - if ( guest_metrics_cached.pv_drivers_version <> pv_drivers_version - || - guest_metrics_cached.os_version <> os_version - || - guest_metrics_cached.networks <> networks - || - guest_metrics_cached.other <> other - || - guest_metrics_cached.device_id <> device_id) - || - guest_metrics_cached.can_use_hotplug_vbd <> can_use_hotplug_vbd - || - guest_metrics_cached.can_use_hotplug_vif <> can_use_hotplug_vif -(* Nb. we're ignoring the memory updates as far as the VM_guest_metrics API object is concerned. We are putting them into an RRD instead *) -(* || - guest_metrics_cached.memory <> memory)*) - then - begin - let gm = - let existing = Db.VM.get_guest_metrics ~__context ~self in - if (try ignore(Db.VM_guest_metrics.get_uuid ~__context ~self:existing); true with _ -> false) - then existing - else - (* if it doesn't exist, make a fresh one *) - let new_ref = Ref.make () and new_uuid = Uuid.to_string (Uuid.make_uuid ()) in - Db.VM_guest_metrics.create ~__context ~ref:new_ref ~uuid:new_uuid - ~os_version:os_version ~pV_drivers_version:pv_drivers_version ~pV_drivers_up_to_date:false ~memory:[] ~disks:[] ~networks:networks ~other:other - ~pV_drivers_detected:false ~last_updated:(Date.of_float last_updated) ~other_config:[] ~live:true ~can_use_hotplug_vbd:`unspecified ~can_use_hotplug_vif:`unspecified; - Db.VM.set_guest_metrics ~__context ~self ~value:new_ref; - (* We've just set the thing to live, let's make sure it's not in the dead list *) - let sl xs = String.concat "; " (List.map (fun (k, v) -> k ^ ": " ^ v) xs) in - info "Received initial update from guest agent in VM %s; os_version = [ %s]; pv_drivers_version = [ %s ]; networks = [ %s ]" (Ref.string_of self) (sl os_version) (sl pv_drivers_version) (sl networks); - Mutex.execute mutex (fun () -> dead_domains := IntSet.remove domid !dead_domains); - new_ref in + Db.VM_guest_metrics.set_last_updated ~__context ~self:gm ~value:(Date.of_float last_updated); - (* We unconditionally reset the database values but observe that the database - checks whether a value has actually changed before doing anything *) - if(guest_metrics_cached.pv_drivers_version <> pv_drivers_version) then - Db.VM_guest_metrics.set_PV_drivers_version ~__context ~self:gm ~value:pv_drivers_version; - if(guest_metrics_cached.os_version <> os_version) then - Db.VM_guest_metrics.set_os_version ~__context ~self:gm ~value:os_version; - if(guest_metrics_cached.networks <> networks) then - Db.VM_guest_metrics.set_networks ~__context ~self:gm ~value:networks; - if(guest_metrics_cached.other <> other) then begin - Db.VM_guest_metrics.set_other ~__context ~self:gm ~value:other; - Helpers.call_api_functions ~__context (fun rpc session_id -> Client.Client.VM.update_allowed_operations rpc session_id self); - end; - if(guest_metrics_cached.can_use_hotplug_vbd <> can_use_hotplug_vbd) then begin - Db.VM_guest_metrics.set_can_use_hotplug_vbd ~__context ~self:gm ~value:can_use_hotplug_vbd; - end; - if(guest_metrics_cached.can_use_hotplug_vif <> can_use_hotplug_vif) then begin - Db.VM_guest_metrics.set_can_use_hotplug_vif ~__context ~self:gm ~value:can_use_hotplug_vif; - end; -(* if(guest_metrics_cached.memory <> memory) then - Db.VM_guest_metrics.set_memory ~__context ~self:gm ~value:memory; *) - - Db.VM_guest_metrics.set_last_updated ~__context ~self:gm ~value:(Date.of_float last_updated); - - if(guest_metrics_cached.device_id <> device_id) then begin - if(List.mem_assoc Xapi_globs.device_id_key_name device_id) then begin - let value = List.assoc Xapi_globs.device_id_key_name device_id in - let platform = Db.VM.get_platform ~__context ~self in - info "Updating VM %s platform:%s <- %s" (Ref.string_of self) Xapi_globs.device_id_key_name value; - if List.mem_assoc Xapi_globs.device_id_key_name platform then - (try - Db.VM.remove_from_platform ~__context ~self ~key:Xapi_globs.device_id_key_name - with _ -> ()); - try - Db.VM.add_to_platform ~__context ~self ~key:Xapi_globs.device_id_key_name ~value:value; - with _ -> () - end - end; + if(guest_metrics_cached.device_id <> device_id) then begin + if(List.mem_assoc Xapi_globs.device_id_key_name device_id) then begin + let value = List.assoc Xapi_globs.device_id_key_name device_id in + let platform = Db.VM.get_platform ~__context ~self in + info "Updating VM %s platform:%s <- %s" (Ref.string_of self) Xapi_globs.device_id_key_name value; + if List.mem_assoc Xapi_globs.device_id_key_name platform then + (try + Db.VM.remove_from_platform ~__context ~self ~key:Xapi_globs.device_id_key_name + with _ -> ()); + try + Db.VM.add_to_platform ~__context ~self ~key:Xapi_globs.device_id_key_name ~value:value; + with _ -> () + end + end; - (* Update the 'up to date' flag afterwards *) - let gmr = Db.VM_guest_metrics.get_record_internal ~__context ~self:gm in + (* Update the 'up to date' flag afterwards *) + let gmr = Db.VM_guest_metrics.get_record_internal ~__context ~self:gm in - (* CA-18034: If viridian flag isn't in there and we have Orlando-or-newer Windows PV drivers then shove it in the metadata for next boot... *) - if Xapi_pv_driver_version.is_windows_and_orlando_or_newer gmr then begin - let platform = Db.VM.get_platform ~__context ~self in - if not(List.mem_assoc Xapi_globs.viridian_key_name platform) then begin - info "Setting VM %s platform:%s <- %s" (Ref.string_of self) Xapi_globs.viridian_key_name Xapi_globs.default_viridian_key_value; - try - Db.VM.add_to_platform ~__context ~self ~key:Xapi_globs.viridian_key_name ~value:Xapi_globs.default_viridian_key_value; - with _ -> () - end - end; + (* CA-18034: If viridian flag isn't in there and we have Orlando-or-newer Windows PV drivers then shove it in the metadata for next boot... *) + if Xapi_pv_driver_version.is_windows_and_orlando_or_newer gmr then begin + let platform = Db.VM.get_platform ~__context ~self in + if not(List.mem_assoc Xapi_globs.viridian_key_name platform) then begin + info "Setting VM %s platform:%s <- %s" (Ref.string_of self) Xapi_globs.viridian_key_name Xapi_globs.default_viridian_key_value; + try + Db.VM.add_to_platform ~__context ~self ~key:Xapi_globs.viridian_key_name ~value:Xapi_globs.default_viridian_key_value; + with _ -> () + end + end; - (* We base some of our allowed-operations decisions on these advertised features and the presence/absence of PV drivers. *) - if guest_metrics_cached.pv_drivers_version <> pv_drivers_version - || guest_metrics_cached.can_use_hotplug_vbd <> can_use_hotplug_vbd - || guest_metrics_cached.can_use_hotplug_vif <> can_use_hotplug_vif - then begin - Helpers.call_api_functions ~__context (fun rpc session_id -> Client.Client.VM.update_allowed_operations rpc session_id self); - end; - end (* else debug "Ignored spurious guest agent update" *) + (* We base some of our allowed-operations decisions on these advertised features and the presence/absence of PV drivers. *) + if guest_metrics_cached.pv_drivers_version <> pv_drivers_version + || guest_metrics_cached.can_use_hotplug_vbd <> can_use_hotplug_vbd + || guest_metrics_cached.can_use_hotplug_vif <> can_use_hotplug_vif + then begin + Helpers.call_api_functions ~__context (fun rpc session_id -> Client.Client.VM.update_allowed_operations rpc session_id self); + end; + end (* else debug "Ignored spurious guest agent update" *) end (* XXX This function was previously called in the monitoring loop of the @@ -306,9 +306,9 @@ let all (lookup: string -> string option) (list: string -> string list) ~__conte * is currently no caller. It probably needs to be called from whereever * the fields in this file are used. *) let sync_cache valid_domids = - Mutex.execute mutex - (fun _ -> - let stored_domids = Hashtbl.fold (fun k v acc -> k::acc) cache [] in - List.iter (fun domid -> if not (List.mem domid valid_domids) then dead_domains := IntSet.remove domid !dead_domains) stored_domids; - Hashtblext.remove_other_keys cache valid_domids; - ) + Mutex.execute mutex + (fun _ -> + let stored_domids = Hashtbl.fold (fun k v acc -> k::acc) cache [] in + List.iter (fun domid -> if not (List.mem domid valid_domids) then dead_domains := IntSet.remove domid !dead_domains) stored_domids; + Hashtblext.remove_other_keys cache valid_domids; + ) diff --git a/ocaml/xapi/xapi_ha.ml b/ocaml/xapi/xapi_ha.ml index 99f8f0acff0..38d74598896 100644 --- a/ocaml/xapi/xapi_ha.ml +++ b/ocaml/xapi/xapi_ha.ml @@ -12,11 +12,11 @@ * GNU Lesser General Public License for more details. *) (** Functions for implementing 'High Availability' (HA). File is divided into 3 sections: - + scripts and functions which form part of the HA subsystem interface - + internal API calls used for arming and disarming individual hosts - + external API calls (Pool.enable_ha, Pool.disable_ha) used for turning on/off HA pool-wide - * @group High Availability (HA) - *) + + scripts and functions which form part of the HA subsystem interface + + internal API calls used for arming and disarming individual hosts + + external API calls (Pool.enable_ha, Pool.disable_ha) used for turning on/off HA pool-wide + * @group High Availability (HA) +*) module D = Debug.Make(struct let name="xapi_ha" end) open D @@ -42,594 +42,594 @@ let ha_redo_log = Redo_log.create ~name:"HA redo log" ~state_change_callback:Non (** Returns the current live set info *) let query_liveset() = - let txt = call_script ~log_successful_output:false ha_query_liveset [] in - Xha_interface.LiveSetInformation.of_xml_string txt + let txt = call_script ~log_successful_output:false ha_query_liveset [] in + Xha_interface.LiveSetInformation.of_xml_string txt (** Returns true if this node has statefile access *) let i_have_statefile_access () = - try - let liveset = query_liveset () in - let hosts = liveset.Xha_interface.LiveSetInformation.hosts in - let me = Hashtbl.find hosts liveset.Xha_interface.LiveSetInformation.local_host_id in - me.Xha_interface.LiveSetInformation.Host.state_file_access && not (me.Xha_interface.LiveSetInformation.Host.state_file_corrupted) - with e -> - info "Caught exception querying liveset; assuming we have no statefile access: %s" (ExnHelper.string_of_exn e); - false + try + let liveset = query_liveset () in + let hosts = liveset.Xha_interface.LiveSetInformation.hosts in + let me = Hashtbl.find hosts liveset.Xha_interface.LiveSetInformation.local_host_id in + me.Xha_interface.LiveSetInformation.Host.state_file_access && not (me.Xha_interface.LiveSetInformation.Host.state_file_corrupted) + with e -> + info "Caught exception querying liveset; assuming we have no statefile access: %s" (ExnHelper.string_of_exn e); + false (** Returns true if this node is allowed to be the master *) let propose_master () = - try - let result = call_script ha_propose_master [] in - String.rtrim result = "TRUE" - with Xha_error e -> - error "ha_propose_master threw unexpected exception: %s" (Xha_errno.to_string e); - false + try + let result = call_script ha_propose_master [] in + String.rtrim result = "TRUE" + with Xha_error e -> + error "ha_propose_master threw unexpected exception: %s" (Xha_errno.to_string e); + false (** Returns true if local failover decisions have not been disabled on this node *) let local_failover_decisions_are_ok () = - try not(bool_of_string (Localdb.get Constants.ha_disable_failover_decisions)) with _ -> true + try not(bool_of_string (Localdb.get Constants.ha_disable_failover_decisions)) with _ -> true (** Since the liveset info doesn't include the host IP address, we persist these ourselves *) let write_uuid_to_ip_mapping ~__context = - let table = List.map (fun (_, host) -> host.API.host_uuid, host.API.host_address) (Db.Host.get_all_records ~__context) in - let v = String_marshall_helper.map (fun x -> x) (fun x -> x) table in - Localdb.put Constants.ha_peers v + let table = List.map (fun (_, host) -> host.API.host_uuid, host.API.host_address) (Db.Host.get_all_records ~__context) in + let v = String_marshall_helper.map (fun x -> x) (fun x -> x) table in + Localdb.put Constants.ha_peers v (** Since the liveset info doesn't include the host IP address, we persist these ourselves *) let get_uuid_to_ip_mapping () = - let v = Localdb.get Constants.ha_peers in - String_unmarshall_helper.map (fun x -> x) (fun x -> x) v + let v = Localdb.get Constants.ha_peers in + String_unmarshall_helper.map (fun x -> x) (fun x -> x) v (** Without using the Pool's database, returns the IP address of a particular host - named by UUID. *) + named by UUID. *) let address_of_host_uuid uuid = - let table = get_uuid_to_ip_mapping () in - if not(List.mem_assoc uuid table) then begin - error "Failed to find the IP address of host UUID %s" uuid; - raise Not_found - end else List.assoc uuid table + let table = get_uuid_to_ip_mapping () in + if not(List.mem_assoc uuid table) then begin + error "Failed to find the IP address of host UUID %s" uuid; + raise Not_found + end else List.assoc uuid table (** Without using the Pool's database, returns the UUID of a particular host named by - heartbeat IP address. This is only necesary because the liveset info doesn't include - the host IP address *) + heartbeat IP address. This is only necesary because the liveset info doesn't include + the host IP address *) let uuid_of_host_address address = - let table = List.map (fun (k, v) -> v, k) (get_uuid_to_ip_mapping ()) in - if not(List.mem_assoc address table) then begin - error "Failed to find the UUID address of host with address %s" address; - raise Not_found - end else List.assoc address table + let table = List.map (fun (k, v) -> v, k) (get_uuid_to_ip_mapping ()) in + if not(List.mem_assoc address table) then begin + error "Failed to find the UUID address of host with address %s" address; + raise Not_found + end else List.assoc address table (** Called in two circumstances: - 1. When I started up I thought I was the master but my proposal was rejected by the - heartbeat component. - 2. I was happily running as someone's slave but they left the liveset. - *) + 1. When I started up I thought I was the master but my proposal was rejected by the + heartbeat component. + 2. I was happily running as someone's slave but they left the liveset. +*) let on_master_failure () = - (* The plan is: keep asking if I should be the master. If I'm rejected then query the - live set and see if someone else has been marked as master, if so become a slave of them. *) - - let become_master () = - info "This node will become the master"; - Xapi_pool_transition.become_master (); - info "Waiting for server restart"; - while true do Thread.delay 3600. done in - let become_slave_of uuid = - let address = address_of_host_uuid uuid in - info "This node will become the slave of host %s (%s)" uuid address; - Xapi_pool_transition.become_another_masters_slave address; - (* XXX CA-16388: prevent blocking *) - Thread.delay 15.; - error "Failed to flush and exit properly; forcibly exiting"; - exit Xapi_globs.restart_return_code in - - let finished = ref false in - while not !finished do - (* When HA is disabled without the statefile we set a flag to indicate that this node - cannot transition to master automatically on boot. This is to prevent failures during - the 'disarm fencing' step which cause some nodes to not fence themselves when they should. *) - if local_failover_decisions_are_ok () && propose_master () then begin - info "ha_propose_master succeeded"; - become_master (); - finished := true - end else begin - if local_failover_decisions_are_ok () - then info "ha_propose_master failed: looking for another master" - else info "ha_can_not_be_master_on_next_boot set: I cannot be master; looking for another master"; - - let liveset = query_liveset () in - match Hashtbl.fold - (fun uuid host acc -> - if host.Xha_interface.LiveSetInformation.Host.master - && host.Xha_interface.LiveSetInformation.Host.liveness (* CP-25481: a dead host may still have the master lock *) - then uuid :: acc else acc) liveset.Xha_interface.LiveSetInformation.hosts [] with - | [] -> - info "no other master exists yet; waiting 5 seconds and retrying"; - Thread.delay 5. - | uuid :: [] -> - become_slave_of (Uuid.string_of_uuid uuid) - | xs -> - (* should never happen *) - error "multiple masters reported: [ %s ]; failing" - (String.concat "; " (List.map Uuid.string_of_uuid xs)); - failwith "multiple masters" - end - done + (* The plan is: keep asking if I should be the master. If I'm rejected then query the + live set and see if someone else has been marked as master, if so become a slave of them. *) + + let become_master () = + info "This node will become the master"; + Xapi_pool_transition.become_master (); + info "Waiting for server restart"; + while true do Thread.delay 3600. done in + let become_slave_of uuid = + let address = address_of_host_uuid uuid in + info "This node will become the slave of host %s (%s)" uuid address; + Xapi_pool_transition.become_another_masters_slave address; + (* XXX CA-16388: prevent blocking *) + Thread.delay 15.; + error "Failed to flush and exit properly; forcibly exiting"; + exit Xapi_globs.restart_return_code in + + let finished = ref false in + while not !finished do + (* When HA is disabled without the statefile we set a flag to indicate that this node + cannot transition to master automatically on boot. This is to prevent failures during + the 'disarm fencing' step which cause some nodes to not fence themselves when they should. *) + if local_failover_decisions_are_ok () && propose_master () then begin + info "ha_propose_master succeeded"; + become_master (); + finished := true + end else begin + if local_failover_decisions_are_ok () + then info "ha_propose_master failed: looking for another master" + else info "ha_can_not_be_master_on_next_boot set: I cannot be master; looking for another master"; + + let liveset = query_liveset () in + match Hashtbl.fold + (fun uuid host acc -> + if host.Xha_interface.LiveSetInformation.Host.master + && host.Xha_interface.LiveSetInformation.Host.liveness (* CP-25481: a dead host may still have the master lock *) + then uuid :: acc else acc) liveset.Xha_interface.LiveSetInformation.hosts [] with + | [] -> + info "no other master exists yet; waiting 5 seconds and retrying"; + Thread.delay 5. + | uuid :: [] -> + become_slave_of (Uuid.string_of_uuid uuid) + | xs -> + (* should never happen *) + error "multiple masters reported: [ %s ]; failing" + (String.concat "; " (List.map Uuid.string_of_uuid xs)); + failwith "multiple masters" + end + done module Timeouts = struct - type t = { - heart_beat_interval: int; - state_file_interval: int; - heart_beat_timeout: int; - state_file_timeout: int; - heart_beat_watchdog_timeout: int; - state_file_watchdog_timeout: int; - boot_join_timeout: int; - enable_join_timeout: int; - xapi_healthcheck_timeout: int; - xapi_healthcheck_interval: int; - xapi_restart_timeout: int; - xapi_restart_attempts: int; - } - let derive (t: int) = - (* xHA interface section 4.1.4.1.1 Formula of key timeouts *) - (* t >= 10 *) - if t < 10 then failwith "constraint violation: timeout >= 10"; - (* All other values are derived from this single parameter *) - let heart_beat_interval = (t + 10) / 10 in - let state_file_interval = heart_beat_interval in - let heart_beat_timeout = t in - let state_file_timeout = t in - let heart_beat_watchdog_timeout = t in - let state_file_watchdog_timeout = t + 15 in - let boot_join_timeout = t + 60 in - let enable_join_timeout = boot_join_timeout in - - { heart_beat_interval = heart_beat_interval; - state_file_interval = state_file_interval; - heart_beat_timeout = heart_beat_timeout; - state_file_timeout = state_file_timeout; - heart_beat_watchdog_timeout = heart_beat_watchdog_timeout; - state_file_watchdog_timeout = state_file_watchdog_timeout; - boot_join_timeout = boot_join_timeout; - enable_join_timeout = enable_join_timeout; - - xapi_healthcheck_interval = !Xapi_globs.ha_xapi_healthcheck_interval; - xapi_healthcheck_timeout = !Xapi_globs.ha_xapi_healthcheck_timeout; - xapi_restart_attempts = !Xapi_globs.ha_xapi_restart_attempts; - xapi_restart_timeout = !Xapi_globs.ha_xapi_restart_timeout; (* 180s is max start delay and 60s max shutdown delay in the initscript *) - } - - (** Returns the base timeout value from which the rest are derived *) - let get_base_t ~__context = - let pool = Helpers.get_pool ~__context in - let other_config = Db.Pool.get_other_config ~__context ~self:pool in - let configuration = Db.Pool.get_ha_configuration ~__context ~self:pool in - (* xHA built-in default is 30s. We've bumped ours to 40s to make TC7710/CA-17639 happier *) - (* We then bumped it again to 60s to work around multipath breakage in CA-27666 *) - (* We then allowed it to be bumped persistently by users to work around multipath breakage in CA-28306 *) - let t = - if List.mem_assoc Xapi_globs.xha_timeout configuration - then int_of_string (List.assoc Xapi_globs.xha_timeout configuration) - else - if List.mem_assoc Xapi_globs.default_ha_timeout other_config - then int_of_string (List.assoc Xapi_globs.default_ha_timeout other_config) - else int_of_float !Xapi_globs.ha_default_timeout_base in - t + type t = { + heart_beat_interval: int; + state_file_interval: int; + heart_beat_timeout: int; + state_file_timeout: int; + heart_beat_watchdog_timeout: int; + state_file_watchdog_timeout: int; + boot_join_timeout: int; + enable_join_timeout: int; + xapi_healthcheck_timeout: int; + xapi_healthcheck_interval: int; + xapi_restart_timeout: int; + xapi_restart_attempts: int; + } + let derive (t: int) = + (* xHA interface section 4.1.4.1.1 Formula of key timeouts *) + (* t >= 10 *) + if t < 10 then failwith "constraint violation: timeout >= 10"; + (* All other values are derived from this single parameter *) + let heart_beat_interval = (t + 10) / 10 in + let state_file_interval = heart_beat_interval in + let heart_beat_timeout = t in + let state_file_timeout = t in + let heart_beat_watchdog_timeout = t in + let state_file_watchdog_timeout = t + 15 in + let boot_join_timeout = t + 60 in + let enable_join_timeout = boot_join_timeout in + + { heart_beat_interval = heart_beat_interval; + state_file_interval = state_file_interval; + heart_beat_timeout = heart_beat_timeout; + state_file_timeout = state_file_timeout; + heart_beat_watchdog_timeout = heart_beat_watchdog_timeout; + state_file_watchdog_timeout = state_file_watchdog_timeout; + boot_join_timeout = boot_join_timeout; + enable_join_timeout = enable_join_timeout; + + xapi_healthcheck_interval = !Xapi_globs.ha_xapi_healthcheck_interval; + xapi_healthcheck_timeout = !Xapi_globs.ha_xapi_healthcheck_timeout; + xapi_restart_attempts = !Xapi_globs.ha_xapi_restart_attempts; + xapi_restart_timeout = !Xapi_globs.ha_xapi_restart_timeout; (* 180s is max start delay and 60s max shutdown delay in the initscript *) + } + + (** Returns the base timeout value from which the rest are derived *) + let get_base_t ~__context = + let pool = Helpers.get_pool ~__context in + let other_config = Db.Pool.get_other_config ~__context ~self:pool in + let configuration = Db.Pool.get_ha_configuration ~__context ~self:pool in + (* xHA built-in default is 30s. We've bumped ours to 40s to make TC7710/CA-17639 happier *) + (* We then bumped it again to 60s to work around multipath breakage in CA-27666 *) + (* We then allowed it to be bumped persistently by users to work around multipath breakage in CA-28306 *) + let t = + if List.mem_assoc Xapi_globs.xha_timeout configuration + then int_of_string (List.assoc Xapi_globs.xha_timeout configuration) + else + if List.mem_assoc Xapi_globs.default_ha_timeout other_config + then int_of_string (List.assoc Xapi_globs.default_ha_timeout other_config) + else int_of_float !Xapi_globs.ha_default_timeout_base in + t end module Monitor = struct - (** Control the background HA monitoring thread *) - - let request_shutdown = ref false - let prevent_failover_actions_until = ref 0. (* protected by the request_shutdown_m too *) - let block_delay_calls = ref false (* set to true when Pool.ha_prevent_restarts_for calls must wait *) - let block_delay_calls_c = Condition.create () (* used to wake up all Pool.ha_prevent_restarts_for threads *) - let m = Mutex.create () - - - (* We use this for interruptible sleeping *) - let delay = Delay.make () - - let thread = ref None - let thread_m = Mutex.create () - - let database_state_valid = ref false - let database_state_valid_c = Condition.create () - - (* Used to explicitly signal that we should replan *) - let plan_out_of_date = ref true - - exception Already_started - exception Not_started - - (** Background thread which monitors the membership set and takes action if HA is - armed and something goes wrong *) - let ha_monitor () : unit = Debug.with_thread_named "ha_monitor" (fun () -> - debug "initialising HA background thread"; - (* NB we may be running this code on a slave in emergency mode *) - - Server_helpers.exec_with_new_task "HA monitor" (fun __context -> - - let statefiles = Xha_statefile.list_existing_statefiles () in - - debug "HA background thread starting"; - - (* Grab the base timeout value so we can cook the reported latencies *) - let base_t = int_of_string (Localdb.get Constants.ha_base_t) in - let timeouts = Timeouts.derive base_t in - - (* Set up our per-host alert triggers *) - - let localhost_uuid = Helpers.get_localhost_uuid () in - let boolean_warning msg body_option = - let trigger = - Xapi_alert.edge_trigger - (fun old newvalue -> - if newvalue then begin - warn "%s" (fst msg); - begin - match body_option with - None -> () - | (Some body) -> - Xapi_alert.add ~msg ~cls:`Host ~obj_uuid:localhost_uuid ~body - end - end) in - (* make sure we spot the case where the warning triggers immediately *) - trigger false; - trigger in - (* Per-host warnings which are logged but do not generate alerts: *) - let warning_statefile_lost = boolean_warning Api_messages.ha_statefile_lost None in - let warning_heartbeat_approaching_timeout = boolean_warning Api_messages.ha_heartbeat_approaching_timeout None in - let warning_statefile_approaching_timeout = boolean_warning Api_messages.ha_statefile_approaching_timeout None in - let warning_xapi_healthcheck_approaching_timeout = boolean_warning Api_messages.ha_xapi_healthcheck_approaching_timeout None in - (* Per-host warnings which are logged *and* generate alerts: *) - let warning_network_bonding_error = - boolean_warning Api_messages.ha_network_bonding_error - (Some (Printf.sprintf "The network bond used for transmitting HA heartbeat messages on host '%s' has failed" localhost_uuid)) in - - (* Pool-wide warning which is logged by the master *and* generates an alert. Since this call is only ever made on the master we're - ok to make database calls to compute the message body without worrying about the db call blocking: *) - let warning_all_live_nodes_lost_statefile = - boolean_warning Api_messages.ha_statefile_lost - (Some (Printf.sprintf "All live servers have lost access to the HA statefile")) in - - let last_liveset_uuids = ref [] in - let last_plan_time = ref 0. in - - (* Called on all hosts to query the liveset and update statistics + messages *) - (* WARNING: must not touch the database or perform blocking I/O *) - let query_liveset_on_all_hosts () = - let liveset = - try - (* XXX: if we detect the liveset has been poisoned then we're in the middle - of a with-statefile disable and should stop our monitor thread *) - query_liveset () - with - | Xha_error Xha_errno.Mtc_exit_daemon_is_not_present as e -> - info "Monitor thread caught MTC_EXIT_DAEMON_IS_NOT_PRESENT; deactivating HA failover thread"; - Mutex.execute m (fun () -> request_shutdown := true); - raise e - | e -> - info "Caught exception querying liveset: %s; deactivating HA failover thread" - (ExnHelper.string_of_exn e); - Mutex.execute m (fun () -> request_shutdown := true); - raise e in - debug "Liveset: %s" (Xha_interface.LiveSetInformation.to_summary_string liveset); - (* All hosts: Feed the current latency values into the per-host RRDs (if available) *) - Opt.iter - (fun local -> - (* Assume all values are ms *) - let statefile = float_of_int (local.Xha_interface.LiveSetInformation.RawStatus.statefile_latency) /. 1000. in - let heartbeat_latency = float_of_int local.Xha_interface.LiveSetInformation.RawStatus.heartbeat_latency /. 1000. -. (float_of_int timeouts.Timeouts.heart_beat_interval) in - let xapi_latency = float_of_int (local.Xha_interface.LiveSetInformation.RawStatus.xapi_healthcheck_latency) /. 1000. in - let statefile_latencies = List.map (fun vdi -> let open Rrd.Statefile_latency in {id = vdi.Static_vdis.uuid; latency = Some statefile}) statefiles in - log_and_ignore_exn (fun () -> Rrdd.HA.enable_and_update ~statefile_latencies ~heartbeat_latency ~xapi_latency) - ) liveset.Xha_interface.LiveSetInformation.raw_status_on_local_host; - - (* All hosts: create alerts from per-host warnings (if available) *) - debug "Processing warnings"; - Opt.iter - (fun warning -> - warning_statefile_lost warning.Xha_interface.LiveSetInformation.Warning.statefile_lost; - warning_heartbeat_approaching_timeout warning.Xha_interface.LiveSetInformation.Warning.heartbeat_approaching_timeout; - warning_statefile_approaching_timeout warning.Xha_interface.LiveSetInformation.Warning.statefile_approaching_timeout; - warning_xapi_healthcheck_approaching_timeout warning.Xha_interface.LiveSetInformation.Warning.xapi_healthcheck_approaching_timeout; - warning_network_bonding_error warning.Xha_interface.LiveSetInformation.Warning.network_bonding_error; - ) liveset.Xha_interface.LiveSetInformation.warning_on_local_host; - debug "Done with warnings"; - liveset in - - (* Slaves monitor the master *) - (* WARNING: must not touch the database or perform blocking I/O *) - let process_liveset_on_slave liveset = - let address = Pool_role.get_master_address () in - let master_uuid = uuid_of_host_address address in - let master_info = Hashtbl.find liveset.Xha_interface.LiveSetInformation.hosts (Uuid.uuid_of_string master_uuid) in - if true - && master_info.Xha_interface.LiveSetInformation.Host.liveness - && master_info.Xha_interface.LiveSetInformation.Host.master - then debug "The node we think is the master is still alive and marked as master; this is OK" - else begin - warn "We think node %s (%s) is the master but the liveset disagrees" master_uuid address; - on_master_failure () - end in - - (* Return the host UUIDs of all nodes in the liveset *) - let uuids_of_liveset liveset = - Hashtbl.fold - (fun uuid host acc -> - if host.Xha_interface.LiveSetInformation.Host.liveness then uuid :: acc else acc) - liveset.Xha_interface.LiveSetInformation.hosts [] in - - (* Master performs VM restart and keeps track of the recovery plan *) - (* WARNING: database is ok but must not perform blocking I/O *) - let process_liveset_on_master liveset = - let pool = Helpers.get_pool ~__context in - let to_tolerate = Int64.to_int (Db.Pool.get_ha_host_failures_to_tolerate ~__context ~self:pool) in - (* let planned_for = Int64.to_int (Db.Pool.get_ha_plan_exists_for ~__context ~self:pool) in *) - - (* First consider whether VM failover actions need to happen. - Convert the liveset into a list of Host references used by the VM failover code *) - let liveset_uuids = List.sort compare (uuids_of_liveset liveset) in - if !last_liveset_uuids <> liveset_uuids then begin - warn "Liveset looks different; assuming we need to rerun the planner"; - plan_out_of_date := true; - last_liveset_uuids := liveset_uuids - end; - - let liveset_refs = List.map (fun uuid -> Db.Host.get_by_uuid ~__context ~uuid:(Uuid.string_of_uuid uuid)) liveset_uuids in - if local_failover_decisions_are_ok () then begin - try - Xapi_ha_vm_failover.restart_auto_run_vms ~__context liveset_refs to_tolerate - with e -> - error "Caught unexpected exception when executing restart plan: %s" (ExnHelper.string_of_exn e) - end; - - (* At this point the hosts not in the liveset have been declared dead *) - - (* Next update the Host_metrics.live value to spot hosts coming back *) - let all_hosts = Db.Host.get_all ~__context in - let livemap = List.map (fun host -> host, List.mem host liveset_refs) all_hosts in - List.iter (fun (host, live) -> - Helpers.log_exn_continue - (Printf.sprintf "updating Host_metrics.live to %b for %s" live (Ref.string_of host)) - (fun () -> - let metrics = Db.Host.get_metrics ~__context ~self:host in - let current = Db.Host_metrics.get_live ~__context ~self:metrics in - let shutting_down = - Mutex.execute Xapi_globs.hosts_which_are_shutting_down_m - (fun () -> List.mem host !Xapi_globs.hosts_which_are_shutting_down) in - if current <> live then begin - (* This can only be a false -> true transient as the 'restart_auto_run_vms' function - has already dealt with the true -> false case. *) - (* => live must be true. No need to consider calling current script hooks *) - if shutting_down - then info "Not marking host %s as live because it is shutting down" (Ref.string_of host) - else begin - Db.Host_metrics.set_live ~__context ~self:metrics ~value:live; - Xapi_host_helpers.update_allowed_operations ~__context ~self:host - end - end - ) () - ) livemap; - - (* Next update the Host.ha_statefiles and Host.ha_network_peers fields. For the network - peers we use whichever view is more recent: network or statefile *) - let statefiles = Db.Pool.get_ha_statefiles ~__context ~self:pool in - let host_host_table = List.map - (fun host -> - host, Hashtbl.find liveset.Xha_interface.LiveSetInformation.hosts - (Uuid.uuid_of_string (Db.Host.get_uuid ~__context ~self:host))) all_hosts in - List.iter (fun (host, xha_host) -> - (* NB there is only one statefile currently *) - let current = Db.Host.get_ha_statefiles ~__context ~self:host <> [] in - let newval = xha_host.Xha_interface.LiveSetInformation.Host.state_file_access in - if current <> newval - then Db.Host.set_ha_statefiles ~__context ~self:host - ~value:(if newval then statefiles else []); - ) host_host_table; - (* If all live hosts have lost statefile then we are running thanks to Survival Rule 2: this should be flagged to the user, - who should fix their storage. Note that if some hosts can see the storage but others cannot, there is no point generating - an alert because those who cannot are about to fence. *) - let relying_on_rule_2 xha_host = - true - && xha_host.Xha_interface.LiveSetInformation.Host.liveness (* it is still alive *) - && not xha_host.Xha_interface.LiveSetInformation.Host.state_file_access (* and yet has no statefile access *) - in - let all_live_nodes_lost_statefile = List.fold_left (&&) true (List.map (fun (_, xha_host) -> relying_on_rule_2 xha_host) host_host_table) in - warning_all_live_nodes_lost_statefile all_live_nodes_lost_statefile; - - (* Now the Host.ha_network_peers *) - let subset a b = List.fold_left (fun acc x -> acc && (List.mem x b)) true a in - let set_equals a b = - let a' = List.setify a and b' = List.setify b in - subset a' b' && (subset b' a') in - - (* NB raw_status_on_local_host not available if in 'Starting' state *) - begin match liveset.Xha_interface.LiveSetInformation.raw_status_on_local_host with - | None -> - debug "No raw_status_on_local_host to process" - | Some local -> - let host_raw_table = List.map - (fun host -> - host, Hashtbl.find local.Xha_interface.LiveSetInformation.RawStatus.host_raw_data - (Uuid.uuid_of_string (Db.Host.get_uuid ~__context ~self:host))) all_hosts in - List.iter (fun (host, raw) -> - (* Use the list of network peers given by the host recent update: statefile or network *) - let peers = - if raw.Xha_interface.LiveSetInformation.HostRawData.time_since_last_update_on_statefile < - raw.Xha_interface.LiveSetInformation.HostRawData.time_since_last_heartbeat - then raw.Xha_interface.LiveSetInformation.HostRawData.heartbeat_active_list_on_statefile - else raw.Xha_interface.LiveSetInformation.HostRawData.heartbeat_active_list_on_heartbeat in - let peer_strings = List.map Uuid.string_of_uuid peers in - debug "Network peers = [%s]" (String.concat ";" peer_strings); - let existing_strings = Db.Host.get_ha_network_peers ~__context ~self:host in - if not(set_equals peer_strings existing_strings) - then Db.Host.set_ha_network_peers ~__context ~self:host ~value:peer_strings) - host_raw_table; - - end; - - let now = Unix.gettimeofday () in - let plan_too_old = now -. !last_plan_time > !Xapi_globs.ha_monitor_plan_interval in - if plan_too_old || !plan_out_of_date then begin - let changed = Xapi_ha_vm_failover.update_pool_status ~__context ~live_set:liveset_refs () in - - (* Extremely bad: something managed to break our careful plan *) - if changed && not !plan_out_of_date then error "Overcommit protection failed to prevent a change which invalidated our failover plan"; - - last_plan_time := now; - plan_out_of_date := false; - end in - - (* Wait for all hosts in the liveset to become enabled so we can start VMs on them. We wait for up to ha_monitor_startup_timeout. *) - let wait_for_slaves_on_master () = - (* CA-17849: need to make sure that, in the xapi startup case, all hosts have been set to dead and disabled initially *) - info "Master HA startup waiting for explicit signal that the database state is valid"; - Mutex.execute thread_m - (fun () -> - while not(!database_state_valid) do - Condition.wait database_state_valid_c thread_m - done); - - info "Master HA startup waiting for up to %.2f for slaves in the liveset to report in and enable themselves" !Xapi_globs.ha_monitor_startup_timeout; - let start = Unix.gettimeofday () in - let finished = ref false in - while Mutex.execute m (fun () -> not(!request_shutdown)) && not(!finished) do - try - ignore(Delay.wait delay !Xapi_globs.ha_monitor_interval); - if Mutex.execute m (fun () -> not(!request_shutdown)) then begin - let liveset = query_liveset_on_all_hosts () in - let uuids = List.map Uuid.string_of_uuid (uuids_of_liveset liveset) in - let enabled = List.map (fun uuid -> Db.Host.get_enabled ~__context ~self:(Db.Host.get_by_uuid ~__context ~uuid)) uuids in - let enabled, disabled = List.partition (fun (_, x) -> x) (List.combine uuids enabled) in - debug "Enabled hosts = [ %s ]; disabled hosts = [ %s ]" (String.concat "; " (List.map fst enabled)) (String.concat "; " (List.map fst disabled)); - - if disabled = [] then begin - info "Master HA startup: All live hosts are now enabled: [ %s ]" (String.concat "; " (List.map fst enabled)); - finished := true; - end; - - if Unix.gettimeofday () -. start > !Xapi_globs.ha_monitor_startup_timeout && disabled <> [] then begin - info "Master HA startup: Timed out waiting for all live slaves to enable themselves (have some hosts failed to attach storage?) Live but disabled hosts: [ %s ]" - (String.concat "; " (List.map fst disabled)); - finished := true - end; - end; - with e -> - debug "Exception in HA monitor thread while waiting for slaves: %s" (ExnHelper.string_of_exn e); - Thread.delay !Xapi_globs.ha_monitor_interval - done in - - (* If we're the master we must wait for our live slaves to turn up before we consider restarting VMs etc *) - if Pool_role.is_master () then wait_for_slaves_on_master (); - - (* Monitoring phase: we must assume the worst and not touch the database here *) - while Mutex.execute m (fun () -> not(!request_shutdown)) do - try - ignore(Delay.wait delay !Xapi_globs.ha_monitor_interval); - - if Mutex.execute m (fun () -> not(!request_shutdown)) then begin - let liveset = query_liveset_on_all_hosts () in - if Pool_role.is_slave () then process_liveset_on_slave liveset; - if Pool_role.is_master () then begin - (* CA-23998: allow MTC to block master failover actions (ie VM restart) for a certain period of time - while their Level 2 VMs are being restarted. *) - finally - (fun () -> - let until = Mutex.execute m - (fun () -> - (* Callers of 'delay' will have to wait until we have finished processing this loop iteration *) - block_delay_calls := true; - !prevent_failover_actions_until) in - - (* FIST *) - while Xapi_fist.simulate_blocking_planner () do Thread.delay 1. done; - - let now = Unix.gettimeofday () in - if now < until - then debug "Blocking VM restart thread for at least another %.0f seconds" (until -. now) - else process_liveset_on_master liveset) - (fun () -> - (* Safe to unblock callers of 'delay' now *) - Mutex.execute m - (fun () -> - (* Callers of 'delay' can now safely request a delay knowing that the liveset won't be processed - until after the delay period. *) - block_delay_calls := false; - Condition.broadcast block_delay_calls_c) ) - end - end - with e -> - debug "Exception in HA monitor thread: %s" (ExnHelper.string_of_exn e); - Thread.delay !Xapi_globs.ha_monitor_interval - done; - - debug "Re-enabling old Host_metrics.live heartbeat"; - Mutex.execute Db_gc.use_host_heartbeat_for_liveness_m - (fun () -> Db_gc.use_host_heartbeat_for_liveness := true); - - debug "Stopping reading per-host HA stats"; - log_and_ignore_exn Rrdd.HA.disable; - - debug "HA background thread told to stop") - ) () - - let prevent_restarts_for seconds = - (* Wait until the thread stops processing and is about to sleep / is already sleeping *) - Mutex.execute m - (fun () -> - while !block_delay_calls = true do Condition.wait block_delay_calls_c m done; - debug "Blocking VM restart actions for another %Ld seconds" seconds; - prevent_failover_actions_until := Unix.gettimeofday () +. (Int64.to_float seconds); - (* If we get a value of 0 then we immediately trigger a VM restart *) - if seconds < 1L then Delay.signal delay - ) - - let start () = - debug "Monitor.start()"; - debug "Disabling old heartbeat; live flag will be driven directly from xHA liveset"; - (* NB in the xapi startup case this will prevent the db_gc.single_pass from setting any live flags *) - Mutex.execute Db_gc.use_host_heartbeat_for_liveness_m - (fun () -> Db_gc.use_host_heartbeat_for_liveness := false); - - Mutex.execute thread_m - (fun () -> - match !thread with - | Some _ -> raise Already_started - | None -> - (* This will cause the started thread to block until signal_database_state_valid is called *) - request_shutdown := false; - thread := Some (Thread.create ha_monitor ())) - - let signal_database_state_valid () = - Mutex.execute thread_m - (fun () -> - debug "Signalling HA monitor thread that it is ok to look at the database now"; - database_state_valid := true; - Condition.signal database_state_valid_c) - - let stop () = - debug "Monitor.stop()"; - Mutex.execute thread_m - (fun () -> - match !thread with - | None -> - warn "Failed to stop HA monitor thread because it wasn't running. Perhaps it was stopped more than once?" - | Some t -> - Mutex.execute m (fun () -> request_shutdown := true; Delay.signal delay); - Thread.join t; - thread := None) + (** Control the background HA monitoring thread *) + + let request_shutdown = ref false + let prevent_failover_actions_until = ref 0. (* protected by the request_shutdown_m too *) + let block_delay_calls = ref false (* set to true when Pool.ha_prevent_restarts_for calls must wait *) + let block_delay_calls_c = Condition.create () (* used to wake up all Pool.ha_prevent_restarts_for threads *) + let m = Mutex.create () + + + (* We use this for interruptible sleeping *) + let delay = Delay.make () + + let thread = ref None + let thread_m = Mutex.create () + + let database_state_valid = ref false + let database_state_valid_c = Condition.create () + + (* Used to explicitly signal that we should replan *) + let plan_out_of_date = ref true + + exception Already_started + exception Not_started + + (** Background thread which monitors the membership set and takes action if HA is + armed and something goes wrong *) + let ha_monitor () : unit = Debug.with_thread_named "ha_monitor" (fun () -> + debug "initialising HA background thread"; + (* NB we may be running this code on a slave in emergency mode *) + + Server_helpers.exec_with_new_task "HA monitor" (fun __context -> + + let statefiles = Xha_statefile.list_existing_statefiles () in + + debug "HA background thread starting"; + + (* Grab the base timeout value so we can cook the reported latencies *) + let base_t = int_of_string (Localdb.get Constants.ha_base_t) in + let timeouts = Timeouts.derive base_t in + + (* Set up our per-host alert triggers *) + + let localhost_uuid = Helpers.get_localhost_uuid () in + let boolean_warning msg body_option = + let trigger = + Xapi_alert.edge_trigger + (fun old newvalue -> + if newvalue then begin + warn "%s" (fst msg); + begin + match body_option with + None -> () + | (Some body) -> + Xapi_alert.add ~msg ~cls:`Host ~obj_uuid:localhost_uuid ~body + end + end) in + (* make sure we spot the case where the warning triggers immediately *) + trigger false; + trigger in + (* Per-host warnings which are logged but do not generate alerts: *) + let warning_statefile_lost = boolean_warning Api_messages.ha_statefile_lost None in + let warning_heartbeat_approaching_timeout = boolean_warning Api_messages.ha_heartbeat_approaching_timeout None in + let warning_statefile_approaching_timeout = boolean_warning Api_messages.ha_statefile_approaching_timeout None in + let warning_xapi_healthcheck_approaching_timeout = boolean_warning Api_messages.ha_xapi_healthcheck_approaching_timeout None in + (* Per-host warnings which are logged *and* generate alerts: *) + let warning_network_bonding_error = + boolean_warning Api_messages.ha_network_bonding_error + (Some (Printf.sprintf "The network bond used for transmitting HA heartbeat messages on host '%s' has failed" localhost_uuid)) in + + (* Pool-wide warning which is logged by the master *and* generates an alert. Since this call is only ever made on the master we're + ok to make database calls to compute the message body without worrying about the db call blocking: *) + let warning_all_live_nodes_lost_statefile = + boolean_warning Api_messages.ha_statefile_lost + (Some (Printf.sprintf "All live servers have lost access to the HA statefile")) in + + let last_liveset_uuids = ref [] in + let last_plan_time = ref 0. in + + (* Called on all hosts to query the liveset and update statistics + messages *) + (* WARNING: must not touch the database or perform blocking I/O *) + let query_liveset_on_all_hosts () = + let liveset = + try + (* XXX: if we detect the liveset has been poisoned then we're in the middle + of a with-statefile disable and should stop our monitor thread *) + query_liveset () + with + | Xha_error Xha_errno.Mtc_exit_daemon_is_not_present as e -> + info "Monitor thread caught MTC_EXIT_DAEMON_IS_NOT_PRESENT; deactivating HA failover thread"; + Mutex.execute m (fun () -> request_shutdown := true); + raise e + | e -> + info "Caught exception querying liveset: %s; deactivating HA failover thread" + (ExnHelper.string_of_exn e); + Mutex.execute m (fun () -> request_shutdown := true); + raise e in + debug "Liveset: %s" (Xha_interface.LiveSetInformation.to_summary_string liveset); + (* All hosts: Feed the current latency values into the per-host RRDs (if available) *) + Opt.iter + (fun local -> + (* Assume all values are ms *) + let statefile = float_of_int (local.Xha_interface.LiveSetInformation.RawStatus.statefile_latency) /. 1000. in + let heartbeat_latency = float_of_int local.Xha_interface.LiveSetInformation.RawStatus.heartbeat_latency /. 1000. -. (float_of_int timeouts.Timeouts.heart_beat_interval) in + let xapi_latency = float_of_int (local.Xha_interface.LiveSetInformation.RawStatus.xapi_healthcheck_latency) /. 1000. in + let statefile_latencies = List.map (fun vdi -> let open Rrd.Statefile_latency in {id = vdi.Static_vdis.uuid; latency = Some statefile}) statefiles in + log_and_ignore_exn (fun () -> Rrdd.HA.enable_and_update ~statefile_latencies ~heartbeat_latency ~xapi_latency) + ) liveset.Xha_interface.LiveSetInformation.raw_status_on_local_host; + + (* All hosts: create alerts from per-host warnings (if available) *) + debug "Processing warnings"; + Opt.iter + (fun warning -> + warning_statefile_lost warning.Xha_interface.LiveSetInformation.Warning.statefile_lost; + warning_heartbeat_approaching_timeout warning.Xha_interface.LiveSetInformation.Warning.heartbeat_approaching_timeout; + warning_statefile_approaching_timeout warning.Xha_interface.LiveSetInformation.Warning.statefile_approaching_timeout; + warning_xapi_healthcheck_approaching_timeout warning.Xha_interface.LiveSetInformation.Warning.xapi_healthcheck_approaching_timeout; + warning_network_bonding_error warning.Xha_interface.LiveSetInformation.Warning.network_bonding_error; + ) liveset.Xha_interface.LiveSetInformation.warning_on_local_host; + debug "Done with warnings"; + liveset in + + (* Slaves monitor the master *) + (* WARNING: must not touch the database or perform blocking I/O *) + let process_liveset_on_slave liveset = + let address = Pool_role.get_master_address () in + let master_uuid = uuid_of_host_address address in + let master_info = Hashtbl.find liveset.Xha_interface.LiveSetInformation.hosts (Uuid.uuid_of_string master_uuid) in + if true + && master_info.Xha_interface.LiveSetInformation.Host.liveness + && master_info.Xha_interface.LiveSetInformation.Host.master + then debug "The node we think is the master is still alive and marked as master; this is OK" + else begin + warn "We think node %s (%s) is the master but the liveset disagrees" master_uuid address; + on_master_failure () + end in + + (* Return the host UUIDs of all nodes in the liveset *) + let uuids_of_liveset liveset = + Hashtbl.fold + (fun uuid host acc -> + if host.Xha_interface.LiveSetInformation.Host.liveness then uuid :: acc else acc) + liveset.Xha_interface.LiveSetInformation.hosts [] in + + (* Master performs VM restart and keeps track of the recovery plan *) + (* WARNING: database is ok but must not perform blocking I/O *) + let process_liveset_on_master liveset = + let pool = Helpers.get_pool ~__context in + let to_tolerate = Int64.to_int (Db.Pool.get_ha_host_failures_to_tolerate ~__context ~self:pool) in + (* let planned_for = Int64.to_int (Db.Pool.get_ha_plan_exists_for ~__context ~self:pool) in *) + + (* First consider whether VM failover actions need to happen. + Convert the liveset into a list of Host references used by the VM failover code *) + let liveset_uuids = List.sort compare (uuids_of_liveset liveset) in + if !last_liveset_uuids <> liveset_uuids then begin + warn "Liveset looks different; assuming we need to rerun the planner"; + plan_out_of_date := true; + last_liveset_uuids := liveset_uuids + end; + + let liveset_refs = List.map (fun uuid -> Db.Host.get_by_uuid ~__context ~uuid:(Uuid.string_of_uuid uuid)) liveset_uuids in + if local_failover_decisions_are_ok () then begin + try + Xapi_ha_vm_failover.restart_auto_run_vms ~__context liveset_refs to_tolerate + with e -> + error "Caught unexpected exception when executing restart plan: %s" (ExnHelper.string_of_exn e) + end; + + (* At this point the hosts not in the liveset have been declared dead *) + + (* Next update the Host_metrics.live value to spot hosts coming back *) + let all_hosts = Db.Host.get_all ~__context in + let livemap = List.map (fun host -> host, List.mem host liveset_refs) all_hosts in + List.iter (fun (host, live) -> + Helpers.log_exn_continue + (Printf.sprintf "updating Host_metrics.live to %b for %s" live (Ref.string_of host)) + (fun () -> + let metrics = Db.Host.get_metrics ~__context ~self:host in + let current = Db.Host_metrics.get_live ~__context ~self:metrics in + let shutting_down = + Mutex.execute Xapi_globs.hosts_which_are_shutting_down_m + (fun () -> List.mem host !Xapi_globs.hosts_which_are_shutting_down) in + if current <> live then begin + (* This can only be a false -> true transient as the 'restart_auto_run_vms' function + has already dealt with the true -> false case. *) + (* => live must be true. No need to consider calling current script hooks *) + if shutting_down + then info "Not marking host %s as live because it is shutting down" (Ref.string_of host) + else begin + Db.Host_metrics.set_live ~__context ~self:metrics ~value:live; + Xapi_host_helpers.update_allowed_operations ~__context ~self:host + end + end + ) () + ) livemap; + + (* Next update the Host.ha_statefiles and Host.ha_network_peers fields. For the network + peers we use whichever view is more recent: network or statefile *) + let statefiles = Db.Pool.get_ha_statefiles ~__context ~self:pool in + let host_host_table = List.map + (fun host -> + host, Hashtbl.find liveset.Xha_interface.LiveSetInformation.hosts + (Uuid.uuid_of_string (Db.Host.get_uuid ~__context ~self:host))) all_hosts in + List.iter (fun (host, xha_host) -> + (* NB there is only one statefile currently *) + let current = Db.Host.get_ha_statefiles ~__context ~self:host <> [] in + let newval = xha_host.Xha_interface.LiveSetInformation.Host.state_file_access in + if current <> newval + then Db.Host.set_ha_statefiles ~__context ~self:host + ~value:(if newval then statefiles else []); + ) host_host_table; + (* If all live hosts have lost statefile then we are running thanks to Survival Rule 2: this should be flagged to the user, + who should fix their storage. Note that if some hosts can see the storage but others cannot, there is no point generating + an alert because those who cannot are about to fence. *) + let relying_on_rule_2 xha_host = + true + && xha_host.Xha_interface.LiveSetInformation.Host.liveness (* it is still alive *) + && not xha_host.Xha_interface.LiveSetInformation.Host.state_file_access (* and yet has no statefile access *) + in + let all_live_nodes_lost_statefile = List.fold_left (&&) true (List.map (fun (_, xha_host) -> relying_on_rule_2 xha_host) host_host_table) in + warning_all_live_nodes_lost_statefile all_live_nodes_lost_statefile; + + (* Now the Host.ha_network_peers *) + let subset a b = List.fold_left (fun acc x -> acc && (List.mem x b)) true a in + let set_equals a b = + let a' = List.setify a and b' = List.setify b in + subset a' b' && (subset b' a') in + + (* NB raw_status_on_local_host not available if in 'Starting' state *) + begin match liveset.Xha_interface.LiveSetInformation.raw_status_on_local_host with + | None -> + debug "No raw_status_on_local_host to process" + | Some local -> + let host_raw_table = List.map + (fun host -> + host, Hashtbl.find local.Xha_interface.LiveSetInformation.RawStatus.host_raw_data + (Uuid.uuid_of_string (Db.Host.get_uuid ~__context ~self:host))) all_hosts in + List.iter (fun (host, raw) -> + (* Use the list of network peers given by the host recent update: statefile or network *) + let peers = + if raw.Xha_interface.LiveSetInformation.HostRawData.time_since_last_update_on_statefile < + raw.Xha_interface.LiveSetInformation.HostRawData.time_since_last_heartbeat + then raw.Xha_interface.LiveSetInformation.HostRawData.heartbeat_active_list_on_statefile + else raw.Xha_interface.LiveSetInformation.HostRawData.heartbeat_active_list_on_heartbeat in + let peer_strings = List.map Uuid.string_of_uuid peers in + debug "Network peers = [%s]" (String.concat ";" peer_strings); + let existing_strings = Db.Host.get_ha_network_peers ~__context ~self:host in + if not(set_equals peer_strings existing_strings) + then Db.Host.set_ha_network_peers ~__context ~self:host ~value:peer_strings) + host_raw_table; + + end; + + let now = Unix.gettimeofday () in + let plan_too_old = now -. !last_plan_time > !Xapi_globs.ha_monitor_plan_interval in + if plan_too_old || !plan_out_of_date then begin + let changed = Xapi_ha_vm_failover.update_pool_status ~__context ~live_set:liveset_refs () in + + (* Extremely bad: something managed to break our careful plan *) + if changed && not !plan_out_of_date then error "Overcommit protection failed to prevent a change which invalidated our failover plan"; + + last_plan_time := now; + plan_out_of_date := false; + end in + + (* Wait for all hosts in the liveset to become enabled so we can start VMs on them. We wait for up to ha_monitor_startup_timeout. *) + let wait_for_slaves_on_master () = + (* CA-17849: need to make sure that, in the xapi startup case, all hosts have been set to dead and disabled initially *) + info "Master HA startup waiting for explicit signal that the database state is valid"; + Mutex.execute thread_m + (fun () -> + while not(!database_state_valid) do + Condition.wait database_state_valid_c thread_m + done); + + info "Master HA startup waiting for up to %.2f for slaves in the liveset to report in and enable themselves" !Xapi_globs.ha_monitor_startup_timeout; + let start = Unix.gettimeofday () in + let finished = ref false in + while Mutex.execute m (fun () -> not(!request_shutdown)) && not(!finished) do + try + ignore(Delay.wait delay !Xapi_globs.ha_monitor_interval); + if Mutex.execute m (fun () -> not(!request_shutdown)) then begin + let liveset = query_liveset_on_all_hosts () in + let uuids = List.map Uuid.string_of_uuid (uuids_of_liveset liveset) in + let enabled = List.map (fun uuid -> Db.Host.get_enabled ~__context ~self:(Db.Host.get_by_uuid ~__context ~uuid)) uuids in + let enabled, disabled = List.partition (fun (_, x) -> x) (List.combine uuids enabled) in + debug "Enabled hosts = [ %s ]; disabled hosts = [ %s ]" (String.concat "; " (List.map fst enabled)) (String.concat "; " (List.map fst disabled)); + + if disabled = [] then begin + info "Master HA startup: All live hosts are now enabled: [ %s ]" (String.concat "; " (List.map fst enabled)); + finished := true; + end; + + if Unix.gettimeofday () -. start > !Xapi_globs.ha_monitor_startup_timeout && disabled <> [] then begin + info "Master HA startup: Timed out waiting for all live slaves to enable themselves (have some hosts failed to attach storage?) Live but disabled hosts: [ %s ]" + (String.concat "; " (List.map fst disabled)); + finished := true + end; + end; + with e -> + debug "Exception in HA monitor thread while waiting for slaves: %s" (ExnHelper.string_of_exn e); + Thread.delay !Xapi_globs.ha_monitor_interval + done in + + (* If we're the master we must wait for our live slaves to turn up before we consider restarting VMs etc *) + if Pool_role.is_master () then wait_for_slaves_on_master (); + + (* Monitoring phase: we must assume the worst and not touch the database here *) + while Mutex.execute m (fun () -> not(!request_shutdown)) do + try + ignore(Delay.wait delay !Xapi_globs.ha_monitor_interval); + + if Mutex.execute m (fun () -> not(!request_shutdown)) then begin + let liveset = query_liveset_on_all_hosts () in + if Pool_role.is_slave () then process_liveset_on_slave liveset; + if Pool_role.is_master () then begin + (* CA-23998: allow MTC to block master failover actions (ie VM restart) for a certain period of time + while their Level 2 VMs are being restarted. *) + finally + (fun () -> + let until = Mutex.execute m + (fun () -> + (* Callers of 'delay' will have to wait until we have finished processing this loop iteration *) + block_delay_calls := true; + !prevent_failover_actions_until) in + + (* FIST *) + while Xapi_fist.simulate_blocking_planner () do Thread.delay 1. done; + + let now = Unix.gettimeofday () in + if now < until + then debug "Blocking VM restart thread for at least another %.0f seconds" (until -. now) + else process_liveset_on_master liveset) + (fun () -> + (* Safe to unblock callers of 'delay' now *) + Mutex.execute m + (fun () -> + (* Callers of 'delay' can now safely request a delay knowing that the liveset won't be processed + until after the delay period. *) + block_delay_calls := false; + Condition.broadcast block_delay_calls_c) ) + end + end + with e -> + debug "Exception in HA monitor thread: %s" (ExnHelper.string_of_exn e); + Thread.delay !Xapi_globs.ha_monitor_interval + done; + + debug "Re-enabling old Host_metrics.live heartbeat"; + Mutex.execute Db_gc.use_host_heartbeat_for_liveness_m + (fun () -> Db_gc.use_host_heartbeat_for_liveness := true); + + debug "Stopping reading per-host HA stats"; + log_and_ignore_exn Rrdd.HA.disable; + + debug "HA background thread told to stop") + ) () + + let prevent_restarts_for seconds = + (* Wait until the thread stops processing and is about to sleep / is already sleeping *) + Mutex.execute m + (fun () -> + while !block_delay_calls = true do Condition.wait block_delay_calls_c m done; + debug "Blocking VM restart actions for another %Ld seconds" seconds; + prevent_failover_actions_until := Unix.gettimeofday () +. (Int64.to_float seconds); + (* If we get a value of 0 then we immediately trigger a VM restart *) + if seconds < 1L then Delay.signal delay + ) + + let start () = + debug "Monitor.start()"; + debug "Disabling old heartbeat; live flag will be driven directly from xHA liveset"; + (* NB in the xapi startup case this will prevent the db_gc.single_pass from setting any live flags *) + Mutex.execute Db_gc.use_host_heartbeat_for_liveness_m + (fun () -> Db_gc.use_host_heartbeat_for_liveness := false); + + Mutex.execute thread_m + (fun () -> + match !thread with + | Some _ -> raise Already_started + | None -> + (* This will cause the started thread to block until signal_database_state_valid is called *) + request_shutdown := false; + thread := Some (Thread.create ha_monitor ())) + + let signal_database_state_valid () = + Mutex.execute thread_m + (fun () -> + debug "Signalling HA monitor thread that it is ok to look at the database now"; + database_state_valid := true; + Condition.signal database_state_valid_c) + + let stop () = + debug "Monitor.stop()"; + Mutex.execute thread_m + (fun () -> + match !thread with + | None -> + warn "Failed to stop HA monitor thread because it wasn't running. Perhaps it was stopped more than once?" + | Some t -> + Mutex.execute m (fun () -> request_shutdown := true; Delay.signal delay); + Thread.join t; + thread := None) end (** Called by MTC in Orlando Update 1 to temporarily block the VM restart thread. *) let ha_prevent_restarts_for __context seconds = - (* Even if HA is not enabled, this should still go ahead (rather than doing - * a successful no-op) in case HA is about to be enabled within the specified - * number of seconds. Raising an error here caused CA-189075. *) - Monitor.prevent_restarts_for seconds + (* Even if HA is not enabled, this should still go ahead (rather than doing + * a successful no-op) in case HA is about to be enabled within the specified + * number of seconds. Raising an error here caused CA-189075. *) + Monitor.prevent_restarts_for seconds (* ----------------------------- *) @@ -638,515 +638,515 @@ let ha_prevent_restarts_for __context seconds = (* This function is called when HA is enabled during run-time: flush the DB to * the redo-log and make future DB changes get written as deltas. *) let redo_log_ha_enabled_during_runtime __context = - debug "Enabling HA, so also enabling writing to redo-log"; - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_redo_log_enabled ~__context ~self:pool then begin - info "A redo log is already in use; switch to the dedicated HA VDI."; - Redo_log.switch ha_redo_log Xapi_globs.ha_metadata_vdi_reason - end else begin - info "Switching on HA redo log."; - Redo_log.enable ha_redo_log Xapi_globs.ha_metadata_vdi_reason - (* upon the first attempt to write a delta, it will realise that a DB flush - * is necessary as the I/O process will not be running *) - end + debug "Enabling HA, so also enabling writing to redo-log"; + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_redo_log_enabled ~__context ~self:pool then begin + info "A redo log is already in use; switch to the dedicated HA VDI."; + Redo_log.switch ha_redo_log Xapi_globs.ha_metadata_vdi_reason + end else begin + info "Switching on HA redo log."; + Redo_log.enable ha_redo_log Xapi_globs.ha_metadata_vdi_reason + (* upon the first attempt to write a delta, it will realise that a DB flush + * is necessary as the I/O process will not be running *) + end (* This function is called when HA is disabled during run-time: stop the * I/O process and make future DB changes not go to the redo-log. *) let redo_log_ha_disabled_during_runtime __context = - debug "Disabling HA, so also disabling writing to redo-log"; - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_redo_log_enabled ~__context ~self:pool then begin - (* switch to the other VDI *) - info "A general redo-log is in available, independent from HA; using it now"; - Redo_log.switch ha_redo_log Xapi_globs.gen_metadata_vdi_reason - end - else begin - Redo_log_usage.stop_using_redo_log ha_redo_log; - Redo_log.disable ha_redo_log - end + debug "Disabling HA, so also disabling writing to redo-log"; + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_redo_log_enabled ~__context ~self:pool then begin + (* switch to the other VDI *) + info "A general redo-log is in available, independent from HA; using it now"; + Redo_log.switch ha_redo_log Xapi_globs.gen_metadata_vdi_reason + end + else begin + Redo_log_usage.stop_using_redo_log ha_redo_log; + Redo_log.disable ha_redo_log + end (* This function is called when HA is found to be enabled at startup, before * the DB backend is initialised. Read the latest DB from the block-device, and * make future DB changes get written as deltas. *) let redo_log_ha_enabled_at_startup () = - (* If we are still the master, extract any HA metadata database so we can consider population from it *) - if Pool_role.is_master () then begin - debug "HA is enabled, so enabling writing to redo-log"; - Redo_log.enable ha_redo_log Xapi_globs.ha_metadata_vdi_reason; (* enable the use of the redo log *) - debug "This node is a master; attempting to extract a database from a metadata VDI"; - let db_ref = Db_backend.make () in - Redo_log_usage.read_from_redo_log ha_redo_log Xapi_globs.ha_metadata_db db_ref (* best effort only: does not raise any exceptions *) - end + (* If we are still the master, extract any HA metadata database so we can consider population from it *) + if Pool_role.is_master () then begin + debug "HA is enabled, so enabling writing to redo-log"; + Redo_log.enable ha_redo_log Xapi_globs.ha_metadata_vdi_reason; (* enable the use of the redo log *) + debug "This node is a master; attempting to extract a database from a metadata VDI"; + let db_ref = Db_backend.make () in + Redo_log_usage.read_from_redo_log ha_redo_log Xapi_globs.ha_metadata_db db_ref (* best effort only: does not raise any exceptions *) + end (* ----------------------------- *) (** Called when xapi restarts: server may be in emergency mode at this point. We need - to inspect the local configuration and if HA is supposed to be armed we need to - set everything up. - Note that - the master shouldn't be able to activate HA while we are offline since that would cause - us to come up with a broken configuration (the enable-HA stage has the critical task of - synchronising the HA configuration on all the hosts). So really we only want to notice - if the Pool has had HA disabled while we were offline. *) + to inspect the local configuration and if HA is supposed to be armed we need to + set everything up. + Note that + the master shouldn't be able to activate HA while we are offline since that would cause + us to come up with a broken configuration (the enable-HA stage has the critical task of + synchronising the HA configuration on all the hosts). So really we only want to notice + if the Pool has had HA disabled while we were offline. *) let on_server_restart () = - let armed = bool_of_string (Localdb.get Constants.ha_armed) in - - if armed then begin - debug "HA is supposed to be armed"; - (* Make sure daemons are up *) - - let finished = ref false in - (* Do not proceed any further until the situation is resolved. - XXX we might need some kind of user-override *) - while not (!finished) do - - (* If someone has called Host.emergency_ha_disable in the background then we notice the change here *) - if not (try bool_of_string (Localdb.get Constants.ha_armed) with _ -> false) then begin - warn "ha_start_daemon aborted because someone has called Host.emergency_ha_disable"; - failwith "Host.emergency_ha_disable"; (* failure causes HA startup to abort *) - end; - - try - if Xapi_fist.ha_cannot_access_statefile () then raise (Xha_error Xha_errno.Mtc_exit_can_not_access_statefile); - if Xapi_fist.ha_daemon_startup_failed () then failwith "simulating xha daemon startup failure"; - - (* CA-21406: Try again to reattach the statefile VDI *) - Static_vdis.reattempt_on_boot_attach (); - - let (_ : string) = call_script ha_start_daemon [] in - finished := true; - with - | Xha_error Xha_errno.Mtc_exit_daemon_is_present -> - warn "ha_start_daemon failed with MTC_EXIT_DAEMON_IS_PRESENT: continuing with startup"; - finished := true; - | Xha_error Xha_errno.Mtc_exit_invalid_pool_state as e -> - warn "ha_start_daemon failed with MTC_EXIT_INVALID_POOL_STATE: disabling HA on this host"; - Localdb.put Constants.ha_armed "false"; - raise e - | Xha_error Xha_errno.Mtc_exit_can_not_access_statefile as e -> - warn "ha_start_daemon failed with MTC_EXIT_CAN_NOT_ACCESS_STATEFILE: will contact existing master and check if HA is still enabled"; - - (* check the Pool.ha_enabled on the master... assuming we can find the master. If we can't we stay here forever. *) - let master_can_confirm_ha_is_disabled () = - try - let address = Pool_role.get_master_address () in - Helpers.call_emergency_mode_functions address - (fun rpc session_id -> - let pool = List.hd (Client.Pool.get_all rpc session_id) in - not (Client.Pool.get_ha_enabled rpc session_id pool) - ) - with _ -> - (* there's no-one for us to ask about whether HA is enabled or not *) - false in - if master_can_confirm_ha_is_disabled () then begin - info "Existing master confirmed that HA is disabled pool-wide: disabling HA on this host"; - Localdb.put Constants.ha_armed "false"; - raise e - end; - info "Assuming HA is still enabled on the Pool and that our storage system has failed: will retry in 10s"; - Xapi_globs.slave_emergency_mode := true; - Xapi_globs.emergency_mode_error := Api_errors.Server_error(Api_errors.ha_host_cannot_access_statefile, []); - Helpers.touch_file !Xapi_globs.ready_file; - Thread.delay 10.; - | Xha_error errno -> - error "ha_start_daemon failed with unexpected error %s: will retry in 10s" (Xha_errno.to_string errno); - Xapi_globs.slave_emergency_mode := true; - Xapi_globs.emergency_mode_error := Api_errors.Server_error(Api_errors.ha_heartbeat_daemon_startup_failed, []); - Helpers.touch_file !Xapi_globs.ready_file; - Thread.delay 10.; - | e -> - error "ha_start_daemon failed with unexpected exception: %s -- retrying in 10s" (ExnHelper.string_of_exn e); - Xapi_globs.slave_emergency_mode := true; - Xapi_globs.emergency_mode_error := Api_errors.Server_error(Api_errors.ha_heartbeat_daemon_startup_failed, []); - Helpers.touch_file !Xapi_globs.ready_file; - Thread.delay 10.; - done; - - if Pool_role.is_master () then begin - if not (local_failover_decisions_are_ok ()) then begin - warn "ha.disable_failover_decisions flag set: not proposing myself as master"; - on_master_failure () - end else begin - if propose_master () - then info "ha_propose_master succeeded; continuing" - else on_master_failure () - end - end; - - (* Start up the redo-log if appropriate. *) - redo_log_ha_enabled_at_startup (); - - debug "About to start the monitor"; - Monitor.start (); - (* We signal the monitor that the database state is valid (wrt liveness + disabledness of hosts) later *) - end + let armed = bool_of_string (Localdb.get Constants.ha_armed) in + + if armed then begin + debug "HA is supposed to be armed"; + (* Make sure daemons are up *) + + let finished = ref false in + (* Do not proceed any further until the situation is resolved. + XXX we might need some kind of user-override *) + while not (!finished) do + + (* If someone has called Host.emergency_ha_disable in the background then we notice the change here *) + if not (try bool_of_string (Localdb.get Constants.ha_armed) with _ -> false) then begin + warn "ha_start_daemon aborted because someone has called Host.emergency_ha_disable"; + failwith "Host.emergency_ha_disable"; (* failure causes HA startup to abort *) + end; + + try + if Xapi_fist.ha_cannot_access_statefile () then raise (Xha_error Xha_errno.Mtc_exit_can_not_access_statefile); + if Xapi_fist.ha_daemon_startup_failed () then failwith "simulating xha daemon startup failure"; + + (* CA-21406: Try again to reattach the statefile VDI *) + Static_vdis.reattempt_on_boot_attach (); + + let (_ : string) = call_script ha_start_daemon [] in + finished := true; + with + | Xha_error Xha_errno.Mtc_exit_daemon_is_present -> + warn "ha_start_daemon failed with MTC_EXIT_DAEMON_IS_PRESENT: continuing with startup"; + finished := true; + | Xha_error Xha_errno.Mtc_exit_invalid_pool_state as e -> + warn "ha_start_daemon failed with MTC_EXIT_INVALID_POOL_STATE: disabling HA on this host"; + Localdb.put Constants.ha_armed "false"; + raise e + | Xha_error Xha_errno.Mtc_exit_can_not_access_statefile as e -> + warn "ha_start_daemon failed with MTC_EXIT_CAN_NOT_ACCESS_STATEFILE: will contact existing master and check if HA is still enabled"; + + (* check the Pool.ha_enabled on the master... assuming we can find the master. If we can't we stay here forever. *) + let master_can_confirm_ha_is_disabled () = + try + let address = Pool_role.get_master_address () in + Helpers.call_emergency_mode_functions address + (fun rpc session_id -> + let pool = List.hd (Client.Pool.get_all rpc session_id) in + not (Client.Pool.get_ha_enabled rpc session_id pool) + ) + with _ -> + (* there's no-one for us to ask about whether HA is enabled or not *) + false in + if master_can_confirm_ha_is_disabled () then begin + info "Existing master confirmed that HA is disabled pool-wide: disabling HA on this host"; + Localdb.put Constants.ha_armed "false"; + raise e + end; + info "Assuming HA is still enabled on the Pool and that our storage system has failed: will retry in 10s"; + Xapi_globs.slave_emergency_mode := true; + Xapi_globs.emergency_mode_error := Api_errors.Server_error(Api_errors.ha_host_cannot_access_statefile, []); + Helpers.touch_file !Xapi_globs.ready_file; + Thread.delay 10.; + | Xha_error errno -> + error "ha_start_daemon failed with unexpected error %s: will retry in 10s" (Xha_errno.to_string errno); + Xapi_globs.slave_emergency_mode := true; + Xapi_globs.emergency_mode_error := Api_errors.Server_error(Api_errors.ha_heartbeat_daemon_startup_failed, []); + Helpers.touch_file !Xapi_globs.ready_file; + Thread.delay 10.; + | e -> + error "ha_start_daemon failed with unexpected exception: %s -- retrying in 10s" (ExnHelper.string_of_exn e); + Xapi_globs.slave_emergency_mode := true; + Xapi_globs.emergency_mode_error := Api_errors.Server_error(Api_errors.ha_heartbeat_daemon_startup_failed, []); + Helpers.touch_file !Xapi_globs.ready_file; + Thread.delay 10.; + done; + + if Pool_role.is_master () then begin + if not (local_failover_decisions_are_ok ()) then begin + warn "ha.disable_failover_decisions flag set: not proposing myself as master"; + on_master_failure () + end else begin + if propose_master () + then info "ha_propose_master succeeded; continuing" + else on_master_failure () + end + end; + + (* Start up the redo-log if appropriate. *) + redo_log_ha_enabled_at_startup (); + + debug "About to start the monitor"; + Monitor.start (); + (* We signal the monitor that the database state is valid (wrt liveness + disabledness of hosts) later *) + end (** Called in the master xapi startup when the database is ready. We set all hosts (including this one) to - disabled then signal the monitor thread to look. It can then wait for slaves to turn up - before trying to restart VMs. *) + disabled then signal the monitor thread to look. It can then wait for slaves to turn up + before trying to restart VMs. *) let on_database_engine_ready () = - info "Setting all hosts to dead and disabled. Hosts must re-enable themselves explicitly"; - Server_helpers.exec_with_new_task "Setting all hosts to dead and disabled" - (fun __context -> - List.iter (fun self -> - let uuid = Db.Host.get_uuid ~__context ~self in - let hostname = Db.Host.get_hostname ~__context ~self in - info "Host.enabled: on_database_engine_ready: setting host %s (%s) to disabled" uuid hostname; - Db.Host.set_enabled ~__context ~self ~value:false - ) - (Db.Host.get_all ~__context) - ); - Monitor.signal_database_state_valid () + info "Setting all hosts to dead and disabled. Hosts must re-enable themselves explicitly"; + Server_helpers.exec_with_new_task "Setting all hosts to dead and disabled" + (fun __context -> + List.iter (fun self -> + let uuid = Db.Host.get_uuid ~__context ~self in + let hostname = Db.Host.get_hostname ~__context ~self in + info "Host.enabled: on_database_engine_ready: setting host %s (%s) to disabled" uuid hostname; + Db.Host.set_enabled ~__context ~self ~value:false + ) + (Db.Host.get_all ~__context) + ); + Monitor.signal_database_state_valid () (*********************************************************************************************) (* Internal API calls to configure individual hosts *) (** Internal API call to prevent this node making an unsafe failover decision. - This call is idempotent. *) + This call is idempotent. *) let ha_disable_failover_decisions __context localhost = - debug "Disabling failover decisions"; - (* FIST *) - if Xapi_fist.disable_ha_disable_failover () then begin - error "FIST: ha_disable_failover_decisions"; - failwith "FIST: ha_disable_failover_decisions" - end; - Localdb.put Constants.ha_disable_failover_decisions "true" + debug "Disabling failover decisions"; + (* FIST *) + if Xapi_fist.disable_ha_disable_failover () then begin + error "FIST: ha_disable_failover_decisions"; + failwith "FIST: ha_disable_failover_decisions" + end; + Localdb.put Constants.ha_disable_failover_decisions "true" (** Internal API call to disarm localhost. - If the daemon is missing then we return success. Either fencing was previously disabled and the - daemon has shutdown OR the daemon has died and this node will fence shortly... - *) + If the daemon is missing then we return success. Either fencing was previously disabled and the + daemon has shutdown OR the daemon has died and this node will fence shortly... +*) let ha_disarm_fencing __context localhost = - try - let (_ : string) = call_script ha_disarm_fencing [] in () - with Xha_error Xha_errno.Mtc_exit_daemon_is_not_present -> - info "ha_disarm_fencing: daemon has exited so returning success" + try + let (_ : string) = call_script ha_disarm_fencing [] in () + with Xha_error Xha_errno.Mtc_exit_daemon_is_not_present -> + info "ha_disarm_fencing: daemon has exited so returning success" let ha_set_excluded __context localhost = - let (_ : string) = call_script ha_set_excluded [] in () + let (_ : string) = call_script ha_set_excluded [] in () (** Internal API call to stop the HA daemon. - This call is idempotent. *) + This call is idempotent. *) let ha_stop_daemon __context localhost = - Monitor.stop (); - let (_ : string) = call_script ha_stop_daemon [] in () + Monitor.stop (); + let (_ : string) = call_script ha_stop_daemon [] in () (** Emergency-mode API call to disarm localhost *) let emergency_ha_disable __context = - if Localdb.get Constants.ha_armed = "false" - then raise (Api_errors.Server_error(Api_errors.ha_not_enabled, [])); - - warn "Host.emergency_ha_disable: Disabling the HA subsystem on the local host only."; - Localdb.put Constants.ha_armed "false"; - - begin - try - ha_disarm_fencing __context (); - with Xha_error e -> - error "Host.emergency_ha_disable: ha_disarm_fencing failed with %s; continuing" (Xha_errno.to_string e) - end; - begin - try - ha_stop_daemon __context (); - with Xha_error e -> - error "Host.emergency_ha_disable: ha_stop_daemon failed with %s; continuing" (Xha_errno.to_string e) - end; - (* Might not be able to access the database to detach statefiles; however this isn't critical *) - () + if Localdb.get Constants.ha_armed = "false" + then raise (Api_errors.Server_error(Api_errors.ha_not_enabled, [])); + + warn "Host.emergency_ha_disable: Disabling the HA subsystem on the local host only."; + Localdb.put Constants.ha_armed "false"; + + begin + try + ha_disarm_fencing __context (); + with Xha_error e -> + error "Host.emergency_ha_disable: ha_disarm_fencing failed with %s; continuing" (Xha_errno.to_string e) + end; + begin + try + ha_stop_daemon __context (); + with Xha_error e -> + error "Host.emergency_ha_disable: ha_stop_daemon failed with %s; continuing" (Xha_errno.to_string e) + end; + (* Might not be able to access the database to detach statefiles; however this isn't critical *) + () (** Internal API call to release any HA resources after the system has - been shutdown. This call is idempotent. Modified for CA-48539 to - call vdi.deactivate before vdi.detach. *) + been shutdown. This call is idempotent. Modified for CA-48539 to + call vdi.deactivate before vdi.detach. *) let ha_release_resources __context localhost = - Monitor.stop (); - - (* Why aren't we calling Xha_statefile.detach_existing_statefiles? - Does Db.Pool.get_ha_statefiles return a different set of - statefiles than Xha_statefile.list_existing_statefiles? *) - - (* Deactivate and detach all statefile VDIs in the entire pool *) - let statefile_vdis = Db.Pool.get_ha_statefiles ~__context ~self:(Helpers.get_pool ~__context) - and deactivate_and_detach_vdi vdi_str = - let uuid = Db.VDI.get_uuid ~__context ~self:(Ref.of_string vdi_str) in - Helpers.log_exn_continue - (Printf.sprintf "detaching statefile VDI uuid: %s" uuid) - (fun () -> - Static_vdis.permanent_vdi_deactivate_by_uuid ~__context ~uuid ; - Static_vdis.permanent_vdi_detach_by_uuid ~__context ~uuid) () - in List.iter deactivate_and_detach_vdi statefile_vdis ; - - (* Deactivate and detach any metadata VDIs *) - Helpers.log_exn_continue - (Printf.sprintf "deactivating and detaching metadata VDIs") - (fun () -> Xha_metadata_vdi.deactivate_and_detach_existing ~__context) (); - - (* At this point a restart won't enable the HA subsystem *) - Localdb.put Constants.ha_armed "false" + Monitor.stop (); + + (* Why aren't we calling Xha_statefile.detach_existing_statefiles? + Does Db.Pool.get_ha_statefiles return a different set of + statefiles than Xha_statefile.list_existing_statefiles? *) + + (* Deactivate and detach all statefile VDIs in the entire pool *) + let statefile_vdis = Db.Pool.get_ha_statefiles ~__context ~self:(Helpers.get_pool ~__context) + and deactivate_and_detach_vdi vdi_str = + let uuid = Db.VDI.get_uuid ~__context ~self:(Ref.of_string vdi_str) in + Helpers.log_exn_continue + (Printf.sprintf "detaching statefile VDI uuid: %s" uuid) + (fun () -> + Static_vdis.permanent_vdi_deactivate_by_uuid ~__context ~uuid ; + Static_vdis.permanent_vdi_detach_by_uuid ~__context ~uuid) () + in List.iter deactivate_and_detach_vdi statefile_vdis ; + + (* Deactivate and detach any metadata VDIs *) + Helpers.log_exn_continue + (Printf.sprintf "deactivating and detaching metadata VDIs") + (fun () -> Xha_metadata_vdi.deactivate_and_detach_existing ~__context) (); + + (* At this point a restart won't enable the HA subsystem *) + Localdb.put Constants.ha_armed "false" (** Internal API call which blocks until this node's xHA daemon spots the invalid statefile - and exits cleanly. If the daemon survives but the statefile access is lost then this function - will return an exception and the no-statefile shutdown can be attempted. - *) + and exits cleanly. If the daemon survives but the statefile access is lost then this function + will return an exception and the no-statefile shutdown can be attempted. +*) let ha_wait_for_shutdown_via_statefile __context localhost = - try - while true do - let liveset = query_liveset () in - - let hosts = liveset.Xha_interface.LiveSetInformation.hosts in - let me = Hashtbl.find hosts liveset.Xha_interface.LiveSetInformation.local_host_id in - (* If we have no statefile access or if it looks corrupted, fail the operation *) - if false - || not me.Xha_interface.LiveSetInformation.Host.state_file_access - || me.Xha_interface.LiveSetInformation.Host.state_file_corrupted - then raise (Api_errors.Server_error(Api_errors.ha_lost_statefile, [])); - - (* check to see if this node still has statefile access *) - Thread.delay 5. - done - with - | Xha_error Xha_errno.Mtc_exit_daemon_is_not_present -> - info "ha_wait_for_shutdown_via_statefile: daemon has exited so returning success" + try + while true do + let liveset = query_liveset () in + + let hosts = liveset.Xha_interface.LiveSetInformation.hosts in + let me = Hashtbl.find hosts liveset.Xha_interface.LiveSetInformation.local_host_id in + (* If we have no statefile access or if it looks corrupted, fail the operation *) + if false + || not me.Xha_interface.LiveSetInformation.Host.state_file_access + || me.Xha_interface.LiveSetInformation.Host.state_file_corrupted + then raise (Api_errors.Server_error(Api_errors.ha_lost_statefile, [])); + + (* check to see if this node still has statefile access *) + Thread.delay 5. + done + with + | Xha_error Xha_errno.Mtc_exit_daemon_is_not_present -> + info "ha_wait_for_shutdown_via_statefile: daemon has exited so returning success" (** Attach the statefile VDIs and return the resulting list of paths in dom0 *) let attach_statefiles ~__context statevdis = - (* First GC any existing statefiles: these are not needed any more *) - info "Detaching any existing statefiles: these are not needed any more"; - Xha_statefile.detach_existing_statefiles ~__context; - - let paths = ref [] in - begin - let cur_vdi_str = ref "" in - try - List.iter - (fun vdi -> - cur_vdi_str := Ref.string_of vdi; - info "Attempting to permanently attach statefile VDI: %s" (Ref.string_of vdi); - paths := Static_vdis.permanent_vdi_attach ~__context ~vdi ~reason:Xha_statefile.reason:: !paths) statevdis - with e -> - error "Caught exception attaching statefile: %s" (ExnHelper.string_of_exn e); - List.iter - (fun vdi -> - Helpers.log_exn_continue - (Printf.sprintf "detaching statefile: %s" (Ref.string_of vdi)) - (fun () -> Static_vdis.permanent_vdi_detach ~__context ~vdi) () - ) statevdis; - raise (Api_errors.Server_error(Api_errors.vdi_not_available, [!cur_vdi_str])) - end; - !paths + (* First GC any existing statefiles: these are not needed any more *) + info "Detaching any existing statefiles: these are not needed any more"; + Xha_statefile.detach_existing_statefiles ~__context; + + let paths = ref [] in + begin + let cur_vdi_str = ref "" in + try + List.iter + (fun vdi -> + cur_vdi_str := Ref.string_of vdi; + info "Attempting to permanently attach statefile VDI: %s" (Ref.string_of vdi); + paths := Static_vdis.permanent_vdi_attach ~__context ~vdi ~reason:Xha_statefile.reason:: !paths) statevdis + with e -> + error "Caught exception attaching statefile: %s" (ExnHelper.string_of_exn e); + List.iter + (fun vdi -> + Helpers.log_exn_continue + (Printf.sprintf "detaching statefile: %s" (Ref.string_of vdi)) + (fun () -> Static_vdis.permanent_vdi_detach ~__context ~vdi) () + ) statevdis; + raise (Api_errors.Server_error(Api_errors.vdi_not_available, [!cur_vdi_str])) + end; + !paths (** Attach the metadata VDI and return the resulting path in dom0 *) let attach_metadata_vdi ~__context vdi = - info "Detaching any existing metadata volume: these are not needed anymore"; - Xha_metadata_vdi.detach_existing ~__context; - Static_vdis.permanent_vdi_attach ~__context ~vdi ~reason:Xapi_globs.ha_metadata_vdi_reason + info "Detaching any existing metadata volume: these are not needed anymore"; + Xha_metadata_vdi.detach_existing ~__context; + Static_vdis.permanent_vdi_attach ~__context ~vdi ~reason:Xapi_globs.ha_metadata_vdi_reason (** Write the local configfile *) let write_config_file ~__context statevdi_paths generation = - let local_heart_beat_interface = Xapi_inventory.lookup Xapi_inventory._management_interface in - (* Need to find the name of the physical interface, so xHA can monitor the bonding status (if appropriate). - Note that this interface isn't used for sending packets so VLANs don't matter: the physical NIC or bond device is all we need. *) - let localhost = Helpers.get_localhost ~__context in - let mgmt_pifs = List.filter (fun self -> Db.PIF.get_management ~__context ~self) (Db.Host.get_PIFs ~__context ~self:localhost) in - if mgmt_pifs = [] then failwith (Printf.sprintf "Cannot enable HA on host %s: there is no management interface for heartbeating" (Db.Host.get_hostname ~__context ~self:localhost)); - let mgmt_pif = List.hd mgmt_pifs in (* there should be only one in Orlando *) - let local_heart_beat_physical_interface = Db.PIF.get_device ~__context ~self:mgmt_pif in - - let local_state_file = List.hd statevdi_paths in - info "Using statefile: %s" local_state_file; - - let base_t = Timeouts.get_base_t ~__context in - let timeouts = Timeouts.derive base_t in - - (* Rewrite HA configuration files *) - let config = Xha_interface.DaemonConfiguration.create - ~state_file_interval:timeouts.Timeouts.state_file_interval - ~heart_beat_interval:timeouts.Timeouts.heart_beat_interval - ~state_file_timeout:timeouts.Timeouts.state_file_timeout - ~heart_beat_timeout:timeouts.Timeouts.heart_beat_timeout - ~state_file_watchdog_timeout:timeouts.Timeouts.state_file_watchdog_timeout - ~heart_beat_watchdog_timeout:timeouts.Timeouts.heart_beat_watchdog_timeout - ~boot_join_timeout:timeouts.Timeouts.boot_join_timeout - ~enable_join_timeout:timeouts.Timeouts.enable_join_timeout - ~xapi_healthcheck_interval:timeouts.Timeouts.xapi_healthcheck_interval - ~xapi_healthcheck_timeout:timeouts.Timeouts.xapi_healthcheck_timeout - ~xapi_restart_attempts:timeouts.Timeouts.xapi_restart_attempts - ~xapi_restart_timeout:timeouts.Timeouts.xapi_restart_timeout - ~common_udp_port:Xapi_globs.xha_udp_port - ~common_generation_uuid:(Uuid.uuid_of_string generation) - ~local_heart_beat_interface - ~local_heart_beat_physical_interface - ~local_state_file - ~__context - () in - - Unixext.write_string_to_file Xha_interface.DaemonConfiguration.filename - (Xha_interface.DaemonConfiguration.to_xml_string config); - debug "%s file written" Xha_interface.DaemonConfiguration.filename + let local_heart_beat_interface = Xapi_inventory.lookup Xapi_inventory._management_interface in + (* Need to find the name of the physical interface, so xHA can monitor the bonding status (if appropriate). + Note that this interface isn't used for sending packets so VLANs don't matter: the physical NIC or bond device is all we need. *) + let localhost = Helpers.get_localhost ~__context in + let mgmt_pifs = List.filter (fun self -> Db.PIF.get_management ~__context ~self) (Db.Host.get_PIFs ~__context ~self:localhost) in + if mgmt_pifs = [] then failwith (Printf.sprintf "Cannot enable HA on host %s: there is no management interface for heartbeating" (Db.Host.get_hostname ~__context ~self:localhost)); + let mgmt_pif = List.hd mgmt_pifs in (* there should be only one in Orlando *) + let local_heart_beat_physical_interface = Db.PIF.get_device ~__context ~self:mgmt_pif in + + let local_state_file = List.hd statevdi_paths in + info "Using statefile: %s" local_state_file; + + let base_t = Timeouts.get_base_t ~__context in + let timeouts = Timeouts.derive base_t in + + (* Rewrite HA configuration files *) + let config = Xha_interface.DaemonConfiguration.create + ~state_file_interval:timeouts.Timeouts.state_file_interval + ~heart_beat_interval:timeouts.Timeouts.heart_beat_interval + ~state_file_timeout:timeouts.Timeouts.state_file_timeout + ~heart_beat_timeout:timeouts.Timeouts.heart_beat_timeout + ~state_file_watchdog_timeout:timeouts.Timeouts.state_file_watchdog_timeout + ~heart_beat_watchdog_timeout:timeouts.Timeouts.heart_beat_watchdog_timeout + ~boot_join_timeout:timeouts.Timeouts.boot_join_timeout + ~enable_join_timeout:timeouts.Timeouts.enable_join_timeout + ~xapi_healthcheck_interval:timeouts.Timeouts.xapi_healthcheck_interval + ~xapi_healthcheck_timeout:timeouts.Timeouts.xapi_healthcheck_timeout + ~xapi_restart_attempts:timeouts.Timeouts.xapi_restart_attempts + ~xapi_restart_timeout:timeouts.Timeouts.xapi_restart_timeout + ~common_udp_port:Xapi_globs.xha_udp_port + ~common_generation_uuid:(Uuid.uuid_of_string generation) + ~local_heart_beat_interface + ~local_heart_beat_physical_interface + ~local_state_file + ~__context + () in + + Unixext.write_string_to_file Xha_interface.DaemonConfiguration.filename + (Xha_interface.DaemonConfiguration.to_xml_string config); + debug "%s file written" Xha_interface.DaemonConfiguration.filename (** Internal API call to preconfigure localhost *) let preconfigure_host __context localhost statevdis metadata_vdi generation = - info "Host.preconfigure_ha host = %s; statevdis = [ %s ]; generation = %s" - (Ref.string_of localhost) (String.concat "; " (List.map Ref.string_of statevdis)) generation; - - (* FIST *) - if Xapi_fist.reconfigure_host () then begin - error "FIST: fist_reconfigure_host"; - failwith "FIST: fist_reconfigure_host" - end; - - (* Write name of cluster stack to the local DB. This determines which HA scripts we use. *) - let pool = Helpers.get_pool ~__context in - let cluster_stack = Db.Pool.get_ha_cluster_stack ~__context ~self:pool in - (try - let dir = Filename.concat !Xapi_globs.cluster_stack_root cluster_stack in - Unix.access dir [Unix.F_OK] - with _ -> - failwith ("cluster stack " ^ cluster_stack ^ " not installed")); - Localdb.put Constants.ha_cluster_stack cluster_stack; - - Db.Host.set_ha_statefiles ~__context ~self:localhost ~value:(List.map Ref.string_of statevdis); - - (* The master has already attached the statefile VDIs and written the - configuration file. *) - if not(Pool_role.is_master ()) then begin - let statefiles = attach_statefiles ~__context statevdis in - write_config_file ~__context statefiles generation; - - (* It's unnecessary to remember the path since this can be queried dynamically *) - ignore(attach_metadata_vdi ~__context metadata_vdi); - end; - - write_uuid_to_ip_mapping ~__context; - - let base_t = Timeouts.get_base_t ~__context in - Localdb.put Constants.ha_base_t (string_of_int base_t) + info "Host.preconfigure_ha host = %s; statevdis = [ %s ]; generation = %s" + (Ref.string_of localhost) (String.concat "; " (List.map Ref.string_of statevdis)) generation; + + (* FIST *) + if Xapi_fist.reconfigure_host () then begin + error "FIST: fist_reconfigure_host"; + failwith "FIST: fist_reconfigure_host" + end; + + (* Write name of cluster stack to the local DB. This determines which HA scripts we use. *) + let pool = Helpers.get_pool ~__context in + let cluster_stack = Db.Pool.get_ha_cluster_stack ~__context ~self:pool in + (try + let dir = Filename.concat !Xapi_globs.cluster_stack_root cluster_stack in + Unix.access dir [Unix.F_OK] + with _ -> + failwith ("cluster stack " ^ cluster_stack ^ " not installed")); + Localdb.put Constants.ha_cluster_stack cluster_stack; + + Db.Host.set_ha_statefiles ~__context ~self:localhost ~value:(List.map Ref.string_of statevdis); + + (* The master has already attached the statefile VDIs and written the + configuration file. *) + if not(Pool_role.is_master ()) then begin + let statefiles = attach_statefiles ~__context statevdis in + write_config_file ~__context statefiles generation; + + (* It's unnecessary to remember the path since this can be queried dynamically *) + ignore(attach_metadata_vdi ~__context metadata_vdi); + end; + + write_uuid_to_ip_mapping ~__context; + + let base_t = Timeouts.get_base_t ~__context in + Localdb.put Constants.ha_base_t (string_of_int base_t) let join_liveset __context host = - info "Host.ha_join_liveset host = %s" (Ref.string_of host); - let (_ : string) = call_script ha_start_daemon [] in - Localdb.put Constants.ha_disable_failover_decisions "false"; - Localdb.put Constants.ha_armed "true"; - info "Local flag ha_armed <- true"; - - (* If this host is the current master then it must assert its authority as master; - otherwise another host's heartbeat thread might conclude that the master has gone - and propose itself. This would lead the xHA notion of master to immediately diverge - from the XenAPI notion. *) - if Pool_role.is_master () then begin - if not (propose_master ()) - then failwith "failed to propose the current master as master"; - info "ha_propose_master succeeded; continuing"; - end else begin - (* If this host is a slave then we must wait to confirm that the master manages to - assert itself, otherwise our monitoring thread might attempt a hostile takeover *) - let master_address = Pool_role.get_master_address () in - let master_uuid = Uuid.uuid_of_string (uuid_of_host_address master_address) in - let master_found = ref false in - while not !master_found do - (* It takes a non-trivial amount of time for the master to assert itself: we might - as well wait here rather than enumerating all the if/then/else branches where we - should wait. *) - Thread.delay 5.; - let liveset = query_liveset () in - debug "Liveset: %s" (Xha_interface.LiveSetInformation.to_summary_string liveset); - if liveset.Xha_interface.LiveSetInformation.status = Xha_interface.LiveSetInformation.Status.Online then begin - (* 'master' is the node we believe should become the xHA-level master initially *) - let master = Hashtbl.find liveset.Xha_interface.LiveSetInformation.hosts master_uuid in - if master.Xha_interface.LiveSetInformation.Host.master then begin - info "existing master has successfully asserted itself"; - master_found := true (* loop will terminate *) - end else begin - if false - || not master.Xha_interface.LiveSetInformation.Host.liveness - || master.Xha_interface.LiveSetInformation.Host.state_file_corrupted - || not master.Xha_interface.LiveSetInformation.Host.state_file_access - || master.Xha_interface.LiveSetInformation.Host.excluded then begin - error "Existing master has failed during HA enable process"; - failwith "Existing master failed during HA enable process" - end else debug "existing master has not yet asserted itself: looking again in 5s"; - end - end else debug "liveset is not yet online: looking again in 5s"; - done - end; - - debug "About to start the monitor"; - Monitor.start (); - Monitor.signal_database_state_valid () + info "Host.ha_join_liveset host = %s" (Ref.string_of host); + let (_ : string) = call_script ha_start_daemon [] in + Localdb.put Constants.ha_disable_failover_decisions "false"; + Localdb.put Constants.ha_armed "true"; + info "Local flag ha_armed <- true"; + + (* If this host is the current master then it must assert its authority as master; + otherwise another host's heartbeat thread might conclude that the master has gone + and propose itself. This would lead the xHA notion of master to immediately diverge + from the XenAPI notion. *) + if Pool_role.is_master () then begin + if not (propose_master ()) + then failwith "failed to propose the current master as master"; + info "ha_propose_master succeeded; continuing"; + end else begin + (* If this host is a slave then we must wait to confirm that the master manages to + assert itself, otherwise our monitoring thread might attempt a hostile takeover *) + let master_address = Pool_role.get_master_address () in + let master_uuid = Uuid.uuid_of_string (uuid_of_host_address master_address) in + let master_found = ref false in + while not !master_found do + (* It takes a non-trivial amount of time for the master to assert itself: we might + as well wait here rather than enumerating all the if/then/else branches where we + should wait. *) + Thread.delay 5.; + let liveset = query_liveset () in + debug "Liveset: %s" (Xha_interface.LiveSetInformation.to_summary_string liveset); + if liveset.Xha_interface.LiveSetInformation.status = Xha_interface.LiveSetInformation.Status.Online then begin + (* 'master' is the node we believe should become the xHA-level master initially *) + let master = Hashtbl.find liveset.Xha_interface.LiveSetInformation.hosts master_uuid in + if master.Xha_interface.LiveSetInformation.Host.master then begin + info "existing master has successfully asserted itself"; + master_found := true (* loop will terminate *) + end else begin + if false + || not master.Xha_interface.LiveSetInformation.Host.liveness + || master.Xha_interface.LiveSetInformation.Host.state_file_corrupted + || not master.Xha_interface.LiveSetInformation.Host.state_file_access + || master.Xha_interface.LiveSetInformation.Host.excluded then begin + error "Existing master has failed during HA enable process"; + failwith "Existing master failed during HA enable process" + end else debug "existing master has not yet asserted itself: looking again in 5s"; + end + end else debug "liveset is not yet online: looking again in 5s"; + done + end; + + debug "About to start the monitor"; + Monitor.start (); + Monitor.signal_database_state_valid () (* The last proposal received *) let proposed_master : string option ref = ref None - (* The time the proposal was received. XXX need to be quite careful with timeouts to handle - the case where the proposed new master dies in the middle of the protocol. Once we believe - he has fenced himself then we can abort the transaction. *) +(* The time the proposal was received. XXX need to be quite careful with timeouts to handle + the case where the proposed new master dies in the middle of the protocol. Once we believe + he has fenced himself then we can abort the transaction. *) let proposed_master_time = ref 0. let proposed_master_m = Mutex.create () (* This should be called under proposed_master_m *) let rec propose_new_master_internal ~__context ~address ~manual = - (* Handy function to throw the right API error *) - let issue_abort reason = - raise (Api_errors.Server_error (Api_errors.ha_abort_new_master, [ reason ])) - in - match !proposed_master with - | Some x when address = x -> - proposed_master_time := Unix.gettimeofday () - | Some x -> begin - (* XXX: check if we're past the fencing time *) - let now = Unix.gettimeofday () in - let diff = now -. !proposed_master_time in - let ten_minutes = 10. *. 60. in (* TO TEST: change to 60 secs *) - - if diff > ten_minutes - then begin - proposed_master := None; - propose_new_master_internal ~__context ~address ~manual - end else - issue_abort (Printf.sprintf "Already agreed to commit host address '%s' at %s ('%f' secs ago)" - x (Date.to_string (Date.of_float !proposed_master_time)) diff) - end - | None -> - (* XXX no more automatic transititions *) - - proposed_master := Some address; - proposed_master_time := Unix.gettimeofday () + (* Handy function to throw the right API error *) + let issue_abort reason = + raise (Api_errors.Server_error (Api_errors.ha_abort_new_master, [ reason ])) + in + match !proposed_master with + | Some x when address = x -> + proposed_master_time := Unix.gettimeofday () + | Some x -> begin + (* XXX: check if we're past the fencing time *) + let now = Unix.gettimeofday () in + let diff = now -. !proposed_master_time in + let ten_minutes = 10. *. 60. in (* TO TEST: change to 60 secs *) + + if diff > ten_minutes + then begin + proposed_master := None; + propose_new_master_internal ~__context ~address ~manual + end else + issue_abort (Printf.sprintf "Already agreed to commit host address '%s' at %s ('%f' secs ago)" + x (Date.to_string (Date.of_float !proposed_master_time)) diff) + end + | None -> + (* XXX no more automatic transititions *) + + proposed_master := Some address; + proposed_master_time := Unix.gettimeofday () (* First phase of a two-phase commit of a new master *) let propose_new_master ~__context ~address ~manual = - Mutex.execute proposed_master_m - (fun () -> propose_new_master_internal ~__context ~address ~manual) + Mutex.execute proposed_master_m + (fun () -> propose_new_master_internal ~__context ~address ~manual) (* Second phase of a two-phase commit of a new master *) let commit_new_master ~__context ~address = - begin match !proposed_master with - | Some x when x <> address -> - let msg = Printf.sprintf "Received commit_new_master(%s) but previously received proposal for %s" address x in - error "%s" msg; - raise (Api_errors.Server_error(Api_errors.ha_abort_new_master, [ msg ])) - | None -> - let msg = Printf.sprintf "Received commit_new_master(%s) but never received a proposal" address in - error "%s" msg; - raise (Api_errors.Server_error(Api_errors.ha_abort_new_master, [ msg ])) - | Some _ -> debug "Setting new master address to: %s" address; - end; - - Mutex.execute proposed_master_m - (fun () -> - (* NB we might not be in emergency mode yet, so not identical to - Xapi_pool.emergency_reset_master *) - if Helpers.this_is_my_address ~__context address - then Xapi_pool_transition.become_master () - else Xapi_pool_transition.become_another_masters_slave address) + begin match !proposed_master with + | Some x when x <> address -> + let msg = Printf.sprintf "Received commit_new_master(%s) but previously received proposal for %s" address x in + error "%s" msg; + raise (Api_errors.Server_error(Api_errors.ha_abort_new_master, [ msg ])) + | None -> + let msg = Printf.sprintf "Received commit_new_master(%s) but never received a proposal" address in + error "%s" msg; + raise (Api_errors.Server_error(Api_errors.ha_abort_new_master, [ msg ])) + | Some _ -> debug "Setting new master address to: %s" address; + end; + + Mutex.execute proposed_master_m + (fun () -> + (* NB we might not be in emergency mode yet, so not identical to + Xapi_pool.emergency_reset_master *) + if Helpers.this_is_my_address ~__context address + then Xapi_pool_transition.become_master () + else Xapi_pool_transition.become_another_masters_slave address) let abort_new_master ~__context ~address = - Mutex.execute proposed_master_m - (fun () -> - if !proposed_master = None - then error "Received abort_new_master %s but we never saw the original proposal" address; - proposed_master := None) + Mutex.execute proposed_master_m + (fun () -> + if !proposed_master = None + then error "Received abort_new_master %s but we never saw the original proposal" address; + proposed_master := None) (*********************************************************************************************) @@ -1157,433 +1157,433 @@ let abort_new_master ~__context ~address = remaining nodes cannot survive (either they see the poisoned statefile or they don't see a full heartbeat partition) *) let disable_internal __context = - debug "Disabling HA on the Pool"; - - let pool = Helpers.get_pool ~__context in - - (* Find the HA metadata and statefile VDIs for later *) - let statefile_vdis = List.map Ref.of_string (Db.Pool.get_ha_statefiles ~__context ~self:pool) in - let metadata_vdis = List.map (fun x -> Db.VDI.get_by_uuid ~__context ~uuid:x.Static_vdis.uuid) (Xha_metadata_vdi.list_existing ()) in - - redo_log_ha_disabled_during_runtime __context; - - (* Steps from 8.6 Disabling HA - If the master has access to the state file (how do we determine this)? - * ha_set_pool_state(invalid) - If the master hasn't access to the state file but all hosts are available via heartbeat - * set the flag "can not be master and no VM failover decision on next boot" - * ha_disarm_fencing() - * ha_stop_daemon() - Otherwise we'll be fenced *) - - let hosts = Db.Host.get_all ~__context in - - (** Attempt the HA disable via the statefile, returning true if successful and false - otherwise. If false then we'll retry with the no-statefile procedure *) - let attempt_disable_through_statefile () = - info "I have statefile access -- setting pool state to invalid"; - - (* FIST *) - if Xapi_fist.disable_ha_via_statefile () then begin - error "FIST: attempt_disable_through_statefile"; - failwith "FIST: attempt_disable_through_statefile" - end; - - begin - try - let (_: string) = call_script ha_set_pool_state [ "invalid" ] in () - with e -> - error "Caught exception %s while setting pool state to invalid-- retrying with no-statefile procedure" (ExnHelper.string_of_exn e); - raise e - end; - - (* Normally all hosts would now see the invalid pool state and gracefully disarm fencing - and stop their HA daemons. If the statefile disappears for *all* hosts then the hosts - could remain alive by Survival Rule 2, if none of them (including us!) saw the invalid state. - - [XXX: can the HA daemon on this node fail to notice the invalid state?] - - We can prevent this by explicitly stopping our HA daemon now -- this will cause remaining - nodes to self-fence if the statefile disappears. *) - Helpers.log_exn_continue - "stopping HA daemon on the master after setting pool state to invalid" - (fun () -> ha_stop_daemon __context (Helpers.get_localhost ~__context)) (); - - (* No node may become the master automatically without the statefile so we can safely change - the Pool state to disabled *) - Db.Pool.set_ha_plan_exists_for ~__context ~self:pool ~value:0L; - Db.Pool.set_ha_enabled ~__context ~self:pool ~value:false; - info "Pool.ha_enabled <- false"; - - (* The rest of the shutdown is necessarily best-effort: errors are logged but the operation - will succeed anyway. Noone will perform any failover actions. *) - - Helpers.call_api_functions ~__context - (fun rpc session_id -> - (* Wait for each host to shutdown via the statefile *) - let errors = thread_iter_all_exns - (fun host -> - debug "Waiting for host '%s' ('%s') to see invalid statefile" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); - Client.Host.ha_wait_for_shutdown_via_statefile rpc session_id host - ) hosts in - (* Print all the errors that happened *) - List.iter - (fun (host, e) -> - error "Host '%s' ('%s') failed to diable HA via statefile; if node has statefile access it will disarm; if not it will self-fence (%s)" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e) - ) errors - ) in - - (** Attempt the HA disable without the statefile. *) - let attempt_disable_without_statefile () = - - (* This is the no-statefile procedure: *) - Helpers.call_api_functions ~__context - (fun rpc session_id -> - (* By disabling failover decisions (through ha_disable_failover_decisions) we prevent a - failure leaving some hosts with their fencing disabled, causing potential split-brain - and VM corruption. - We cannot continue unless all hosts have completed this operation. - Transient failures (due to temporary network blips) may cause this operation to fail - in which case the user will have to retry. Permanent network outtages will cause all - nodes to self-fence. *) - let errors = thread_iter_all_exns - (fun host -> - debug "Disabling all failover decisions on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); - Client.Host.ha_disable_failover_decisions rpc session_id host) hosts in - List.iter - (fun (host, e) -> - error "Host '%s' ('%s') failed to diable failover decisions: %s" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e) - ) errors; - if errors <> [] then raise (snd (List.hd errors)); - - (* From this point no host will attempt to become the new master so no split-brain. - - This also means that we own the pool database and can safely set ha_enabled to false, - knowing that, although each slave has a backup database where ha_enabled is still true, - they won't be able to rollback our change because they cannot become the master. - - NB even if we fail to disarm fencing on individuals then the worst that will happen - is that they will fail and fence themselves. When they come back they will either - resynchronise HA state with us and disarm themselves, or if we've failed the situation - is equivalent to a master failure without HA. *) - Db.Pool.set_ha_enabled ~__context ~self:pool ~value:false; - info "Pool.ha_enabled <- false"; - - let errors = thread_iter_all_exns - (fun host -> - debug "Disarming fencing on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); - Client.Host.ha_disarm_fencing rpc session_id host - ) hosts in - List.iter - (fun (host, e) -> - error "Failed to disarm fencing on host '%s' ('%s'); this means the host may well be about to fence itself even though HA is officially disabled on the Pool (%s)" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e) - ) errors; - - let errors = thread_iter_all_exns - (fun host -> - debug "Stopping HA daemon on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); - Client.Host.ha_stop_daemon rpc session_id host - ) hosts in - List.iter - (fun (host, e) -> - error "Failed to stop daemon on host '%s' ('%s') even though HA is officially disabled on the Pool (%s)" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e) - ) errors - ) in - - try - - let do_one_attempt () = - (* Have a go at disabling HA. If we're sure we've done it then return true. If we suffer a partial - failure (which may leave some nodes with their failover actions disabled) then return false. *) - let exn_to_bool f = try f (); true with _ -> false in - (* Check if the statefile exists and try that first. If it succeeds then we're done. If it fails or wasn't attempted - then we need to try the without-statefile procedure: *) - (if i_have_statefile_access () - then exn_to_bool attempt_disable_through_statefile - else false) - || (exn_to_bool attempt_disable_without_statefile) in - - (* CA-16296: if we temporarily lose access to the statefile and attempt the non-statefile procedure - we will fail if some nodes cannot be contacted to have their failover decision flag set. If the - statefile comes back then the pool can become stable again but with some nodes crippled by the - failover decision flag. If this partial failure happens we keep trying forever to disable HA. *) - while not(do_one_attempt ()) do - error "Suffered a partial failure during HA disable procedure. Will try again in 30s"; - Thread.delay 30. - done; - - (* Assuming all is well then we can release resources on all hosts *) - Helpers.call_api_functions ~__context - (fun rpc session_id -> - let errors = thread_iter_all_exns - (fun host -> - debug "Releasing resources on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); - Client.Host.ha_release_resources rpc session_id host - ) hosts in - List.iter - (fun (host, e) -> - error "Failed to release HA resources on host '%s' ('%s') (%s)" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e) - ) errors - ); - (* Update the allowed operations on the statefile VDIs for tidiness *) - List.iter (fun vdi -> Xapi_vdi.update_allowed_operations ~__context ~self:vdi) (metadata_vdis @ statefile_vdis); - - with exn -> - error "Caught exception while disabling HA: %s" (ExnHelper.string_of_exn exn); - error "Pool.ha_enabled = %b [but some hosts may be out of sync]" (Db.Pool.get_ha_enabled ~__context ~self:pool); - raise exn + debug "Disabling HA on the Pool"; + + let pool = Helpers.get_pool ~__context in + + (* Find the HA metadata and statefile VDIs for later *) + let statefile_vdis = List.map Ref.of_string (Db.Pool.get_ha_statefiles ~__context ~self:pool) in + let metadata_vdis = List.map (fun x -> Db.VDI.get_by_uuid ~__context ~uuid:x.Static_vdis.uuid) (Xha_metadata_vdi.list_existing ()) in + + redo_log_ha_disabled_during_runtime __context; + + (* Steps from 8.6 Disabling HA + If the master has access to the state file (how do we determine this)? + * ha_set_pool_state(invalid) + If the master hasn't access to the state file but all hosts are available via heartbeat + * set the flag "can not be master and no VM failover decision on next boot" + * ha_disarm_fencing() + * ha_stop_daemon() + Otherwise we'll be fenced *) + + let hosts = Db.Host.get_all ~__context in + + (** Attempt the HA disable via the statefile, returning true if successful and false + otherwise. If false then we'll retry with the no-statefile procedure *) + let attempt_disable_through_statefile () = + info "I have statefile access -- setting pool state to invalid"; + + (* FIST *) + if Xapi_fist.disable_ha_via_statefile () then begin + error "FIST: attempt_disable_through_statefile"; + failwith "FIST: attempt_disable_through_statefile" + end; + + begin + try + let (_: string) = call_script ha_set_pool_state [ "invalid" ] in () + with e -> + error "Caught exception %s while setting pool state to invalid-- retrying with no-statefile procedure" (ExnHelper.string_of_exn e); + raise e + end; + + (* Normally all hosts would now see the invalid pool state and gracefully disarm fencing + and stop their HA daemons. If the statefile disappears for *all* hosts then the hosts + could remain alive by Survival Rule 2, if none of them (including us!) saw the invalid state. + + [XXX: can the HA daemon on this node fail to notice the invalid state?] + + We can prevent this by explicitly stopping our HA daemon now -- this will cause remaining + nodes to self-fence if the statefile disappears. *) + Helpers.log_exn_continue + "stopping HA daemon on the master after setting pool state to invalid" + (fun () -> ha_stop_daemon __context (Helpers.get_localhost ~__context)) (); + + (* No node may become the master automatically without the statefile so we can safely change + the Pool state to disabled *) + Db.Pool.set_ha_plan_exists_for ~__context ~self:pool ~value:0L; + Db.Pool.set_ha_enabled ~__context ~self:pool ~value:false; + info "Pool.ha_enabled <- false"; + + (* The rest of the shutdown is necessarily best-effort: errors are logged but the operation + will succeed anyway. Noone will perform any failover actions. *) + + Helpers.call_api_functions ~__context + (fun rpc session_id -> + (* Wait for each host to shutdown via the statefile *) + let errors = thread_iter_all_exns + (fun host -> + debug "Waiting for host '%s' ('%s') to see invalid statefile" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); + Client.Host.ha_wait_for_shutdown_via_statefile rpc session_id host + ) hosts in + (* Print all the errors that happened *) + List.iter + (fun (host, e) -> + error "Host '%s' ('%s') failed to diable HA via statefile; if node has statefile access it will disarm; if not it will self-fence (%s)" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e) + ) errors + ) in + + (** Attempt the HA disable without the statefile. *) + let attempt_disable_without_statefile () = + + (* This is the no-statefile procedure: *) + Helpers.call_api_functions ~__context + (fun rpc session_id -> + (* By disabling failover decisions (through ha_disable_failover_decisions) we prevent a + failure leaving some hosts with their fencing disabled, causing potential split-brain + and VM corruption. + We cannot continue unless all hosts have completed this operation. + Transient failures (due to temporary network blips) may cause this operation to fail + in which case the user will have to retry. Permanent network outtages will cause all + nodes to self-fence. *) + let errors = thread_iter_all_exns + (fun host -> + debug "Disabling all failover decisions on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); + Client.Host.ha_disable_failover_decisions rpc session_id host) hosts in + List.iter + (fun (host, e) -> + error "Host '%s' ('%s') failed to diable failover decisions: %s" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e) + ) errors; + if errors <> [] then raise (snd (List.hd errors)); + + (* From this point no host will attempt to become the new master so no split-brain. + + This also means that we own the pool database and can safely set ha_enabled to false, + knowing that, although each slave has a backup database where ha_enabled is still true, + they won't be able to rollback our change because they cannot become the master. + + NB even if we fail to disarm fencing on individuals then the worst that will happen + is that they will fail and fence themselves. When they come back they will either + resynchronise HA state with us and disarm themselves, or if we've failed the situation + is equivalent to a master failure without HA. *) + Db.Pool.set_ha_enabled ~__context ~self:pool ~value:false; + info "Pool.ha_enabled <- false"; + + let errors = thread_iter_all_exns + (fun host -> + debug "Disarming fencing on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); + Client.Host.ha_disarm_fencing rpc session_id host + ) hosts in + List.iter + (fun (host, e) -> + error "Failed to disarm fencing on host '%s' ('%s'); this means the host may well be about to fence itself even though HA is officially disabled on the Pool (%s)" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e) + ) errors; + + let errors = thread_iter_all_exns + (fun host -> + debug "Stopping HA daemon on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); + Client.Host.ha_stop_daemon rpc session_id host + ) hosts in + List.iter + (fun (host, e) -> + error "Failed to stop daemon on host '%s' ('%s') even though HA is officially disabled on the Pool (%s)" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e) + ) errors + ) in + + try + + let do_one_attempt () = + (* Have a go at disabling HA. If we're sure we've done it then return true. If we suffer a partial + failure (which may leave some nodes with their failover actions disabled) then return false. *) + let exn_to_bool f = try f (); true with _ -> false in + (* Check if the statefile exists and try that first. If it succeeds then we're done. If it fails or wasn't attempted + then we need to try the without-statefile procedure: *) + (if i_have_statefile_access () + then exn_to_bool attempt_disable_through_statefile + else false) + || (exn_to_bool attempt_disable_without_statefile) in + + (* CA-16296: if we temporarily lose access to the statefile and attempt the non-statefile procedure + we will fail if some nodes cannot be contacted to have their failover decision flag set. If the + statefile comes back then the pool can become stable again but with some nodes crippled by the + failover decision flag. If this partial failure happens we keep trying forever to disable HA. *) + while not(do_one_attempt ()) do + error "Suffered a partial failure during HA disable procedure. Will try again in 30s"; + Thread.delay 30. + done; + + (* Assuming all is well then we can release resources on all hosts *) + Helpers.call_api_functions ~__context + (fun rpc session_id -> + let errors = thread_iter_all_exns + (fun host -> + debug "Releasing resources on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); + Client.Host.ha_release_resources rpc session_id host + ) hosts in + List.iter + (fun (host, e) -> + error "Failed to release HA resources on host '%s' ('%s') (%s)" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e) + ) errors + ); + (* Update the allowed operations on the statefile VDIs for tidiness *) + List.iter (fun vdi -> Xapi_vdi.update_allowed_operations ~__context ~self:vdi) (metadata_vdis @ statefile_vdis); + + with exn -> + error "Caught exception while disabling HA: %s" (ExnHelper.string_of_exn exn); + error "Pool.ha_enabled = %b [but some hosts may be out of sync]" (Db.Pool.get_ha_enabled ~__context ~self:pool); + raise exn let disable __context = - let pool = Helpers.get_pool ~__context in - if not(Db.Pool.get_ha_enabled ~__context ~self:pool) - then raise (Api_errors.Server_error(Api_errors.ha_not_enabled, [])); - disable_internal __context + let pool = Helpers.get_pool ~__context in + if not(Db.Pool.get_ha_enabled ~__context ~self:pool) + then raise (Api_errors.Server_error(Api_errors.ha_not_enabled, [])); + disable_internal __context open Db_cache_types (* for the Manifest. Database. functions below *) let enable __context heartbeat_srs configuration = - debug "Enabling HA on the Pool."; - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool - then raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])); - - Pool_features.assert_enabled ~__context ~f:Features.HA; - - (* Check that all of our 'disallow_unplug' PIFs are currently attached *) - let unplugged_ununpluggable_pifs = Db.PIF.get_refs_where ~__context ~expr:(And ( - Eq (Field "disallow_unplug", Literal "true"), - Eq (Field "currently_attached", Literal "false") - )) in - if List.length unplugged_ununpluggable_pifs > 0 then - raise (Api_errors.Server_error(Api_errors.required_pif_is_unplugged, - (List.map (fun pif -> Ref.string_of pif) unplugged_ununpluggable_pifs))); - - (* Check also that any PIFs with IP information set are currently attached - it's a non-fatal - error if they are, but we'll warn with a message *) - let pifs_with_ip_config = Db.PIF.get_records_where ~__context ~expr:( - Not (Eq (Field "ip_configuration_mode", Literal "None")) - ) in - let not_bond_slaves = List.filter (fun (_,pifr) -> not (Db.is_valid_ref __context pifr.API.pIF_bond_slave_of)) pifs_with_ip_config in - let without_disallow_unplug = List.filter (fun (_,pifr) -> not (pifr.API.pIF_disallow_unplug || pifr.API.pIF_management)) not_bond_slaves in - if List.length without_disallow_unplug > 0 then begin - let pifinfo = List.map (fun (pif,pifr) -> (Db.Host.get_name_label ~__context ~self:pifr.API.pIF_host, pif, pifr)) without_disallow_unplug in - let bodylines = - ["A possible network anomaly was found. The following hosts possibly have storage PIFs that are not dedicated:"] @ - (List.map (fun (hostname,pif,pifr) -> Printf.sprintf "%s: %s (uuid: %s)" hostname pifr.API.pIF_device pifr.API.pIF_uuid) pifinfo) - in - warn "Warning: A possible network anomaly was found. The following hosts possibly have storage PIFs that can be unplugged: %s" - (String.concat ", " bodylines); - let (name, priority) = Api_messages.ip_configured_pif_can_unplug in - ignore(Xapi_message.create ~__context ~name ~priority ~cls:`Pool ~obj_uuid:(Db.Pool.get_uuid ~__context ~self:(Helpers.get_pool ~__context)) - ~body:(String.concat "\n" bodylines)) - end; - - (* Fail if any host is offline. Otherwise we end up with an Xha_errno(bootjoin_timeout) *) - List.iter (fun host -> - let alive = - try - let hm = Db.Host.get_metrics ~__context ~self:host in - Db.Host_metrics.get_live ~__context ~self:hm - with _ -> false in - if not alive then raise (Api_errors.Server_error(Api_errors.host_offline, [ Ref.string_of host ])) - ) (Db.Host.get_all ~__context); - - let pool = Helpers.get_pool ~__context in - - let cluster_stack = Cluster_stack_constraints.choose_cluster_stack ~__context in - Db.Pool.set_ha_cluster_stack ~__context ~self:pool ~value:cluster_stack; - Localdb.put Constants.ha_cluster_stack cluster_stack; - - (* Steps from 8.7 Enabling HA in Marathon spec: - * 1. Bring up state file VDI(s) - * 2. Clear the flag "can not be master and no VM failover decision on next boot" - * 3. XAPI stops its internal heartbeats with other hosts in the pool - * 4. ha_set_pool_state(init) *) - - let statefile_vdis = ref [] in - let database_vdis = ref [] in - try - (* 1a. Create state file VDIs *) - let possible_srs = if heartbeat_srs = [] then Xha_statefile.list_srs_which_can_host_statefile ~__context ~cluster_stack else heartbeat_srs in - if List.length possible_srs = 0 - then raise (Api_errors.Server_error(Api_errors.cannot_create_state_file, [])); - - (* For the moment we'll create a state file in one compatible SR since the xHA component only handles one *) - let srs = [ List.hd possible_srs ] in - List.iter - (fun sr -> - let vdi = Xha_statefile.find_or_create ~__context ~sr ~cluster_stack in - statefile_vdis := vdi :: !statefile_vdis; - ) srs; - (* For storing the database, assume there is only one SR *) - let database_vdi = Xha_metadata_vdi.find_or_create ~__context ~sr:(List.hd srs) in - database_vdis := database_vdi :: !database_vdis; - - (* Record the statefile UUIDs in the Pool.ha_statefile set *) - Db.Pool.set_ha_statefiles ~__context ~self:pool ~value:(List.map Ref.string_of !statefile_vdis); - - (* Record the extra configuration in the Pool.ha_configuration map *) - Db.Pool.set_ha_configuration ~__context ~self:pool ~value:configuration; - - (* Update the Pool's planning configuration (ha_overcommitted, ha_plan_exists_for) *) - (* Start by assuming there is no ha_plan_for: this can be revised upwards later *) - Db.Pool.set_ha_plan_exists_for ~__context ~self:pool ~value:0L; - let (_: bool) = Xapi_ha_vm_failover.update_pool_status ~__context () in - - let generation = Uuid.string_of_uuid (Uuid.make_uuid ()) in - - let hosts = Db.Host.get_all ~__context in - - (* This code always runs on the master *) - let statefiles = attach_statefiles ~__context (!statefile_vdis) in - write_config_file ~__context statefiles generation; - let (_: string) = call_script ha_set_pool_state [ "init" ] in - - (* It's unnecessary to remember the path since this can be queried dynamically *) - ignore(attach_metadata_vdi ~__context database_vdi); - - (* To make a progress bar we keep track of the total API calls *) - let task = Context.get_task_id __context in - let total_calls = List.length hosts * 3 in - let count_call = - let task_m = Mutex.create () in - let call_count = ref 0 in - fun () -> - Mutex.execute task_m - (fun () -> - incr call_count; - Db.Task.set_progress ~__context ~self:task ~value:(float_of_int !call_count /. (float_of_int total_calls)) - ) in - - Helpers.call_api_functions ~__context - (fun rpc session_id -> - (* ... *) - (* Tell each host to attach its statefile, write config files etc. Do not start the xHA daemon. *) - List.iter - (fun host -> - try - debug "Preconfiguring HA on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); - Client.Host.preconfigure_ha rpc session_id host !statefile_vdis database_vdi generation; - count_call () - with e -> - error "Caught exception while calling Host.preconfigure_ha: '%s' ('%s') %s" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e); - (* Perform a disable since the pool HA state isn't consistent *) - error "Attempting to disable HA pool-wide"; - Helpers.log_exn_continue "Disabling HA after a failure during the configuration stage" disable_internal __context; - raise e - ) hosts; - - let errors = thread_iter_all_exns - (fun host -> - debug "host '%s' ('%s') will attempt to join the liveset" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); - Client.Host.ha_join_liveset rpc session_id host; - count_call () - ) hosts in - List.iter - (fun (host, e) -> - error "Caught exception while calling Host.ha_join_liveset: '%s' ('%s') %s" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e) - ) errors; - if errors <> [] then begin - (* Perform a disable since the pool HA state isn't consistent *) - error "Attempting to disable HA pool-wide"; - Helpers.log_exn_continue "Disabling HA after a failure joining all hosts to the liveset" disable_internal __context; - raise (snd (List.hd errors)) - end; - - (* We have to set the HA enabled flag before forcing a database resynchronisation *) - Db.Pool.set_ha_enabled ~__context ~self:pool ~value:true; - debug "HA enabled"; - - (* Enable writing to the redo-log *) - redo_log_ha_enabled_during_runtime __context; - - (* ... *) - (* Make sure everyone's got a fresh database *) - let generation = Db_lock.with_lock (fun () -> Manifest.generation (Database.manifest (Db_ref.get_database (Db_backend.make ())))) in - let errors = thread_iter_all_exns - (fun host -> - debug "Synchronising database with host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); - Client.Host.request_backup rpc session_id host generation true; - count_call () - ) hosts in - List.iter - (fun (host, e) -> - error "Caught exception while calling Host.request_backup: '%s' ('%s') %s" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e) - ) errors; - if errors <> [] then begin - (* Perform a disable since the pool HA state isn't consistent *) - error "Attempting to disable HA pool-wide"; - Helpers.log_exn_continue "Disabling HA after a failure during enable" disable_internal __context; - raise (snd (List.hd errors)) - end; - - (* Update the allowed_operations on the HA volumes to prevent people thinking they can mess with them *) - List.iter (fun vdi -> Xapi_vdi.update_allowed_operations ~__context ~self:vdi) (!database_vdis @ !statefile_vdis); - ); (* end of api call *) - - with exn -> - debug "Caught exception while enabling HA: %s" (ExnHelper.string_of_exn exn); - (* We don't destroy the statefile VDIs, preferring to leave these around for the next - time enable is called. Hopefully any confused host which reads the statefile will - notice the invalid state and disable its HA *) - raise exn + debug "Enabling HA on the Pool."; + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool + then raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])); + + Pool_features.assert_enabled ~__context ~f:Features.HA; + + (* Check that all of our 'disallow_unplug' PIFs are currently attached *) + let unplugged_ununpluggable_pifs = Db.PIF.get_refs_where ~__context ~expr:(And ( + Eq (Field "disallow_unplug", Literal "true"), + Eq (Field "currently_attached", Literal "false") + )) in + if List.length unplugged_ununpluggable_pifs > 0 then + raise (Api_errors.Server_error(Api_errors.required_pif_is_unplugged, + (List.map (fun pif -> Ref.string_of pif) unplugged_ununpluggable_pifs))); + + (* Check also that any PIFs with IP information set are currently attached - it's a non-fatal + error if they are, but we'll warn with a message *) + let pifs_with_ip_config = Db.PIF.get_records_where ~__context ~expr:( + Not (Eq (Field "ip_configuration_mode", Literal "None")) + ) in + let not_bond_slaves = List.filter (fun (_,pifr) -> not (Db.is_valid_ref __context pifr.API.pIF_bond_slave_of)) pifs_with_ip_config in + let without_disallow_unplug = List.filter (fun (_,pifr) -> not (pifr.API.pIF_disallow_unplug || pifr.API.pIF_management)) not_bond_slaves in + if List.length without_disallow_unplug > 0 then begin + let pifinfo = List.map (fun (pif,pifr) -> (Db.Host.get_name_label ~__context ~self:pifr.API.pIF_host, pif, pifr)) without_disallow_unplug in + let bodylines = + ["A possible network anomaly was found. The following hosts possibly have storage PIFs that are not dedicated:"] @ + (List.map (fun (hostname,pif,pifr) -> Printf.sprintf "%s: %s (uuid: %s)" hostname pifr.API.pIF_device pifr.API.pIF_uuid) pifinfo) + in + warn "Warning: A possible network anomaly was found. The following hosts possibly have storage PIFs that can be unplugged: %s" + (String.concat ", " bodylines); + let (name, priority) = Api_messages.ip_configured_pif_can_unplug in + ignore(Xapi_message.create ~__context ~name ~priority ~cls:`Pool ~obj_uuid:(Db.Pool.get_uuid ~__context ~self:(Helpers.get_pool ~__context)) + ~body:(String.concat "\n" bodylines)) + end; + + (* Fail if any host is offline. Otherwise we end up with an Xha_errno(bootjoin_timeout) *) + List.iter (fun host -> + let alive = + try + let hm = Db.Host.get_metrics ~__context ~self:host in + Db.Host_metrics.get_live ~__context ~self:hm + with _ -> false in + if not alive then raise (Api_errors.Server_error(Api_errors.host_offline, [ Ref.string_of host ])) + ) (Db.Host.get_all ~__context); + + let pool = Helpers.get_pool ~__context in + + let cluster_stack = Cluster_stack_constraints.choose_cluster_stack ~__context in + Db.Pool.set_ha_cluster_stack ~__context ~self:pool ~value:cluster_stack; + Localdb.put Constants.ha_cluster_stack cluster_stack; + + (* Steps from 8.7 Enabling HA in Marathon spec: + * 1. Bring up state file VDI(s) + * 2. Clear the flag "can not be master and no VM failover decision on next boot" + * 3. XAPI stops its internal heartbeats with other hosts in the pool + * 4. ha_set_pool_state(init) *) + + let statefile_vdis = ref [] in + let database_vdis = ref [] in + try + (* 1a. Create state file VDIs *) + let possible_srs = if heartbeat_srs = [] then Xha_statefile.list_srs_which_can_host_statefile ~__context ~cluster_stack else heartbeat_srs in + if List.length possible_srs = 0 + then raise (Api_errors.Server_error(Api_errors.cannot_create_state_file, [])); + + (* For the moment we'll create a state file in one compatible SR since the xHA component only handles one *) + let srs = [ List.hd possible_srs ] in + List.iter + (fun sr -> + let vdi = Xha_statefile.find_or_create ~__context ~sr ~cluster_stack in + statefile_vdis := vdi :: !statefile_vdis; + ) srs; + (* For storing the database, assume there is only one SR *) + let database_vdi = Xha_metadata_vdi.find_or_create ~__context ~sr:(List.hd srs) in + database_vdis := database_vdi :: !database_vdis; + + (* Record the statefile UUIDs in the Pool.ha_statefile set *) + Db.Pool.set_ha_statefiles ~__context ~self:pool ~value:(List.map Ref.string_of !statefile_vdis); + + (* Record the extra configuration in the Pool.ha_configuration map *) + Db.Pool.set_ha_configuration ~__context ~self:pool ~value:configuration; + + (* Update the Pool's planning configuration (ha_overcommitted, ha_plan_exists_for) *) + (* Start by assuming there is no ha_plan_for: this can be revised upwards later *) + Db.Pool.set_ha_plan_exists_for ~__context ~self:pool ~value:0L; + let (_: bool) = Xapi_ha_vm_failover.update_pool_status ~__context () in + + let generation = Uuid.string_of_uuid (Uuid.make_uuid ()) in + + let hosts = Db.Host.get_all ~__context in + + (* This code always runs on the master *) + let statefiles = attach_statefiles ~__context (!statefile_vdis) in + write_config_file ~__context statefiles generation; + let (_: string) = call_script ha_set_pool_state [ "init" ] in + + (* It's unnecessary to remember the path since this can be queried dynamically *) + ignore(attach_metadata_vdi ~__context database_vdi); + + (* To make a progress bar we keep track of the total API calls *) + let task = Context.get_task_id __context in + let total_calls = List.length hosts * 3 in + let count_call = + let task_m = Mutex.create () in + let call_count = ref 0 in + fun () -> + Mutex.execute task_m + (fun () -> + incr call_count; + Db.Task.set_progress ~__context ~self:task ~value:(float_of_int !call_count /. (float_of_int total_calls)) + ) in + + Helpers.call_api_functions ~__context + (fun rpc session_id -> + (* ... *) + (* Tell each host to attach its statefile, write config files etc. Do not start the xHA daemon. *) + List.iter + (fun host -> + try + debug "Preconfiguring HA on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); + Client.Host.preconfigure_ha rpc session_id host !statefile_vdis database_vdi generation; + count_call () + with e -> + error "Caught exception while calling Host.preconfigure_ha: '%s' ('%s') %s" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e); + (* Perform a disable since the pool HA state isn't consistent *) + error "Attempting to disable HA pool-wide"; + Helpers.log_exn_continue "Disabling HA after a failure during the configuration stage" disable_internal __context; + raise e + ) hosts; + + let errors = thread_iter_all_exns + (fun host -> + debug "host '%s' ('%s') will attempt to join the liveset" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); + Client.Host.ha_join_liveset rpc session_id host; + count_call () + ) hosts in + List.iter + (fun (host, e) -> + error "Caught exception while calling Host.ha_join_liveset: '%s' ('%s') %s" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e) + ) errors; + if errors <> [] then begin + (* Perform a disable since the pool HA state isn't consistent *) + error "Attempting to disable HA pool-wide"; + Helpers.log_exn_continue "Disabling HA after a failure joining all hosts to the liveset" disable_internal __context; + raise (snd (List.hd errors)) + end; + + (* We have to set the HA enabled flag before forcing a database resynchronisation *) + Db.Pool.set_ha_enabled ~__context ~self:pool ~value:true; + debug "HA enabled"; + + (* Enable writing to the redo-log *) + redo_log_ha_enabled_during_runtime __context; + + (* ... *) + (* Make sure everyone's got a fresh database *) + let generation = Db_lock.with_lock (fun () -> Manifest.generation (Database.manifest (Db_ref.get_database (Db_backend.make ())))) in + let errors = thread_iter_all_exns + (fun host -> + debug "Synchronising database with host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); + Client.Host.request_backup rpc session_id host generation true; + count_call () + ) hosts in + List.iter + (fun (host, e) -> + error "Caught exception while calling Host.request_backup: '%s' ('%s') %s" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host) (ExnHelper.string_of_exn e) + ) errors; + if errors <> [] then begin + (* Perform a disable since the pool HA state isn't consistent *) + error "Attempting to disable HA pool-wide"; + Helpers.log_exn_continue "Disabling HA after a failure during enable" disable_internal __context; + raise (snd (List.hd errors)) + end; + + (* Update the allowed_operations on the HA volumes to prevent people thinking they can mess with them *) + List.iter (fun vdi -> Xapi_vdi.update_allowed_operations ~__context ~self:vdi) (!database_vdis @ !statefile_vdis); + ); (* end of api call *) + + with exn -> + debug "Caught exception while enabling HA: %s" (ExnHelper.string_of_exn exn); + (* We don't destroy the statefile VDIs, preferring to leave these around for the next + time enable is called. Hopefully any confused host which reads the statefile will + notice the invalid state and disable its HA *) + raise exn (* Called before shutting down or rebooting a host *) let before_clean_shutdown_or_reboot ~__context ~host = - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool then begin - - (* The XenServer HA interface spec recommends that we set this node to excluded - only after we disarm fencing and stop the HA daemon, since otherwise we'll self-fence - on the next watchdog timeout, which is too soon for a clean shutdown. - - One problem is that ha_set_excluded will fail if this node does not have statefile access, - which would leave us running with no fencing. The suggested solution is to first check - if we have statefile access and abort (to avoid killing the whole pool in the case - where everyone has lost the statefile). If we do have statefile access initially but - then we lose it and ha_set_excluded fails, manually fence ourselves. *) - - (* Safe early abort if we don't have statefile access *) - let liveset = query_liveset () in - let me = Hashtbl.find liveset.Xha_interface.LiveSetInformation.hosts - liveset.Xha_interface.LiveSetInformation.local_host_id in - if false - || not(me.Xha_interface.LiveSetInformation.Host.state_file_access) - || me.Xha_interface.LiveSetInformation.Host.state_file_corrupted - then raise (Api_errors.Server_error(Api_errors.ha_lost_statefile, [])); - - (* From this point we will fence ourselves if any unexpected error occurs *) - begin try - begin - try ha_disarm_fencing __context host - with Xha_error Xha_errno.Mtc_exit_daemon_is_not_present -> - info "Ignoring MTC_EXIT_DAEMON_IS_NOT_PRESENT error while disarming fencing" - end; - begin - try ha_stop_daemon __context host - with Xha_error Xha_errno.Mtc_exit_daemon_is_not_present -> - info "Ignoring MTC_EXIT_DAEMON_IS_NOT_PRESENT error while stopping xHA daemon" - end; - - ha_set_excluded __context host; - info "This node has been marked as excluded. Proceeding to shutdown"; - with e -> - (* UNLIKELY to happen but we do our best to kill ourselves and do not return *) - error "Error past the commit-point while cleanly shutting down host: %s" (ExnHelper.string_of_exn e); - error "Host will self-fence via its own watchdog for safety"; - (* NB we don't use Xenctrl directly because in the SDK VM this is all fake... *) - ignore(Forkhelpers.execute_command_get_output !Xapi_globs.fence [ "yesreally" ]); - Thread.delay 60.; - error "Watchdog has not triggered after 60 seconds"; - (* Attempt to issue a reboot and kill the control stack *) - Xapi_fuse.light_fuse_and_reboot (); - info "Waiting for reboot"; - let start = Unix.gettimeofday () in - while true do - Thread.delay 300.; - info "Still waiting to reboot after %.2f seconds" (Unix.gettimeofday () -. start) - done - end; - List.iter Static_vdis.detach_only (Static_vdis.list()) - end + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool then begin + + (* The XenServer HA interface spec recommends that we set this node to excluded + only after we disarm fencing and stop the HA daemon, since otherwise we'll self-fence + on the next watchdog timeout, which is too soon for a clean shutdown. + + One problem is that ha_set_excluded will fail if this node does not have statefile access, + which would leave us running with no fencing. The suggested solution is to first check + if we have statefile access and abort (to avoid killing the whole pool in the case + where everyone has lost the statefile). If we do have statefile access initially but + then we lose it and ha_set_excluded fails, manually fence ourselves. *) + + (* Safe early abort if we don't have statefile access *) + let liveset = query_liveset () in + let me = Hashtbl.find liveset.Xha_interface.LiveSetInformation.hosts + liveset.Xha_interface.LiveSetInformation.local_host_id in + if false + || not(me.Xha_interface.LiveSetInformation.Host.state_file_access) + || me.Xha_interface.LiveSetInformation.Host.state_file_corrupted + then raise (Api_errors.Server_error(Api_errors.ha_lost_statefile, [])); + + (* From this point we will fence ourselves if any unexpected error occurs *) + begin try + begin + try ha_disarm_fencing __context host + with Xha_error Xha_errno.Mtc_exit_daemon_is_not_present -> + info "Ignoring MTC_EXIT_DAEMON_IS_NOT_PRESENT error while disarming fencing" + end; + begin + try ha_stop_daemon __context host + with Xha_error Xha_errno.Mtc_exit_daemon_is_not_present -> + info "Ignoring MTC_EXIT_DAEMON_IS_NOT_PRESENT error while stopping xHA daemon" + end; + + ha_set_excluded __context host; + info "This node has been marked as excluded. Proceeding to shutdown"; + with e -> + (* UNLIKELY to happen but we do our best to kill ourselves and do not return *) + error "Error past the commit-point while cleanly shutting down host: %s" (ExnHelper.string_of_exn e); + error "Host will self-fence via its own watchdog for safety"; + (* NB we don't use Xenctrl directly because in the SDK VM this is all fake... *) + ignore(Forkhelpers.execute_command_get_output !Xapi_globs.fence [ "yesreally" ]); + Thread.delay 60.; + error "Watchdog has not triggered after 60 seconds"; + (* Attempt to issue a reboot and kill the control stack *) + Xapi_fuse.light_fuse_and_reboot (); + info "Waiting for reboot"; + let start = Unix.gettimeofday () in + while true do + Thread.delay 300.; + info "Still waiting to reboot after %.2f seconds" (Unix.gettimeofday () -. start) + done + end; + List.iter Static_vdis.detach_only (Static_vdis.list()) + end diff --git a/ocaml/xapi/xapi_ha_vm_failover.ml b/ocaml/xapi/xapi_ha_vm_failover.ml index 806ddff1bab..c9060a9413d 100644 --- a/ocaml/xapi/xapi_ha_vm_failover.ml +++ b/ocaml/xapi/xapi_ha_vm_failover.ml @@ -16,9 +16,9 @@ module D = Debug.Make(struct let name="xapi_ha_vm_failover" end) open D (* Return a list of (ref, record) pairs for all VMs which are marked as always_run *) -let all_protected_vms ~__context = - let vms = Db.VM.get_all_records ~__context in - List.filter (fun (_, vm_rec) -> Helpers.vm_should_always_run vm_rec.API.vM_ha_always_run vm_rec.API.vM_ha_restart_priority) vms +let all_protected_vms ~__context = + let vms = Db.VM.get_all_records ~__context in + List.filter (fun (_, vm_rec) -> Helpers.vm_should_always_run vm_rec.API.vM_ha_always_run vm_rec.API.vM_ha_restart_priority) vms (* Comparison function which can be used to sort a list of VM ref, record by order *) let by_order (vm_ref1,vm_rec1) (vm_ref2,vm_rec2) = @@ -33,42 +33,42 @@ let ($) x y = x y (* Planning code follows *) (* Compute the total memory required of a VM (Running or not) *) -let total_memory_of_vm ~__context policy snapshot = +let total_memory_of_vm ~__context policy snapshot = let main, shadow = Memory_check.vm_compute_start_memory ~__context ~policy snapshot in - Int64.add main shadow + Int64.add main shadow (** Return a VM -> Host plan for the Host.evacuate code. We assume the VMs are all agile. The returned plan may be incomplete if there was not enough memory. *) -let compute_evacuation_plan ~__context total_hosts remaining_hosts vms_and_snapshots = - let hosts = List.map (fun host -> host, (Memory_check.host_compute_free_memory_with_maximum_compression ~__context ~host None)) remaining_hosts in +let compute_evacuation_plan ~__context total_hosts remaining_hosts vms_and_snapshots = + let hosts = List.map (fun host -> host, (Memory_check.host_compute_free_memory_with_maximum_compression ~__context ~host None)) remaining_hosts in let vms = List.map (fun (vm, snapshot) -> vm, total_memory_of_vm ~__context Memory_check.Dynamic_min snapshot) vms_and_snapshots in let config = { Binpack.hosts = hosts; vms = vms; placement = []; total_hosts = total_hosts; num_failures = 1 } in Binpack.check_configuration config; - debug "Planning configuration for offline agile VMs = %s" - (Binpack.string_of_configuration + debug "Planning configuration for offline agile VMs = %s" + (Binpack.string_of_configuration (fun x -> Printf.sprintf "%s (%s)" (Helpers.short_string_of_ref x) (Db.Host.get_hostname ~__context ~self:x)) (fun x -> Printf.sprintf "%s (%s)" (Helpers.short_string_of_ref x) (Db.VM.get_name_label ~__context ~self:x)) config); - debug "VMs to attempt to evacuate: [ %s ]" + debug "VMs to attempt to evacuate: [ %s ]" (String.concat "; " (List.map (fun (r, record) -> Printf.sprintf "%s (%s)" (Helpers.short_string_of_ref r) record.API.vM_name_label) vms_and_snapshots)); let h = Binpack.choose_heuristic config in h.Binpack.get_specific_plan config (List.map fst vms_and_snapshots) -(** Passed to the planner to reason about other possible configurations, used to block operations which would +(** Passed to the planner to reason about other possible configurations, used to block operations which would destroy the HA VM restart plan. *) type configuration_change = { old_vms_leaving: (API.ref_host * (API.ref_VM * API.vM_t)) list; (** existing VMs which are leaving *) old_vms_arriving: (API.ref_host * (API.ref_VM * API.vM_t)) list; (** existing VMs which are arriving *) hosts_to_disable: API.ref_host list; (** hosts to pretend to disable *) num_failures: int option; (** new number of failures to consider *) - new_vms_to_protect: API.ref_VM list; (** new VMs to restart *) + new_vms_to_protect: API.ref_VM list; (** new VMs to restart *) } let no_configuration_change = { old_vms_leaving = []; old_vms_arriving = []; hosts_to_disable = []; num_failures = None; new_vms_to_protect = [] } -let string_of_configuration_change ~__context (x: configuration_change) = +let string_of_configuration_change ~__context (x: configuration_change) = let string_of_host h = Printf.sprintf "%s (%s)" (Helpers.short_string_of_ref h) (Db.Host.get_name_label ~__context ~self:h) in Printf.sprintf "configuration_change = { old_vms_leaving = [ %s ]; new_vms_arriving = [ %s ]; hosts_to_disable = [ %s ]; num_failures = %s; new_vms = [ %s ] }" (String.concat "; " (List.map (fun (h, (vm_ref, vm_t)) -> Printf.sprintf "%s %s (%s)" (string_of_host h) (Helpers.short_string_of_ref vm_ref) vm_t.API.vM_name_label) x.old_vms_leaving)) @@ -81,188 +81,188 @@ let string_of_configuration_change ~__context (x: configuration_change) = otherwise a non-agile VM may 'move' between several hosts which it can actually run on, which is not what we need for the planner. *) let host_of_non_agile_vm ~__context all_hosts_and_snapshots_sorted (vm, snapshot) = - match (List.filter (fun (host, _) -> - try Xapi_vm_helpers.assert_can_boot_here ~__context ~self:vm ~host ~snapshot ~do_memory_check:false (); true - with _ -> false) all_hosts_and_snapshots_sorted) with - | (host, host_snapshot) :: _ -> - (* Multiple hosts are possible because "not agile" means "not restartable on every host". It is - possible to unplug PBDs so that only a proper subset of hosts (not the singleton element) supports a VM. *) - debug "Non-agile VM %s (%s) considered pinned to Host %s (%s)" (Helpers.short_string_of_ref vm) snapshot.API.vM_name_label (Helpers.short_string_of_ref host) host_snapshot.API.host_hostname; - [ vm, host ] + match (List.filter (fun (host, _) -> + try Xapi_vm_helpers.assert_can_boot_here ~__context ~self:vm ~host ~snapshot ~do_memory_check:false (); true + with _ -> false) all_hosts_and_snapshots_sorted) with + | (host, host_snapshot) :: _ -> + (* Multiple hosts are possible because "not agile" means "not restartable on every host". It is + possible to unplug PBDs so that only a proper subset of hosts (not the singleton element) supports a VM. *) + debug "Non-agile VM %s (%s) considered pinned to Host %s (%s)" (Helpers.short_string_of_ref vm) snapshot.API.vM_name_label (Helpers.short_string_of_ref host) host_snapshot.API.host_hostname; + [ vm, host ] | [] -> - warn "No host could support protected xHA VM: %s (%s)" (Helpers.short_string_of_ref vm) (snapshot.API.vM_name_label); - [] + warn "No host could support protected xHA VM: %s (%s)" (Helpers.short_string_of_ref vm) (snapshot.API.vM_name_label); + [] let get_live_set ~__context = - let all_hosts = Db.Host.get_all_records ~__context in - let live_hosts = List.filter (fun (rf,r) -> r.API.host_enabled - && (try Db.Host_metrics.get_live ~__context ~self:r.API.host_metrics with _ -> false)) - all_hosts in - List.map (fun (rf,_) -> rf) live_hosts + let all_hosts = Db.Host.get_all_records ~__context in + let live_hosts = List.filter (fun (rf,r) -> r.API.host_enabled + && (try Db.Host_metrics.get_live ~__context ~self:r.API.host_metrics with _ -> false)) + all_hosts in + List.map (fun (rf,_) -> rf) live_hosts (** Given the current number of host failures to consider (only useful for passing to the binpacker to influence its choice of heuristic), return an instantaneous VM restart plan which includes all protected offline VMs, and a planning configuration corresponding to the state of the world after the starts are complete, for use in further - planning. + planning. Returns: (VM restart plan, new planning configuration, true if some protected non-agile VMs exist) *) let compute_restart_plan ~__context ~all_protected_vms ~live_set ?(change=no_configuration_change) num_failures = - (* This function must be deterministic: for the same set of hosts and set of VMs it must produce the same output. - We rely partially on the binpacker enforcing its own ordering over hosts and vms, so it's not critical for us - to sort the result of Db.*.get_all calls generally. However the handling of non-agile VMs needs special care. *) - - (* We first must deal with protected but currently offline VMs: we need to simulate the start of these VMs before we can - ask any questions about future host failures, since we need to know on which hosts these VMs will end up. - Note this is only useful in the initial startup transient: assuming all protected VMs actually are restarted then - this code will do nothing. *) - - (* Note further that we simulate the start of offline protected VMs *using this function* (ie by the background HA - thread). If the user makes their own poor placement decisions via explicit VM.start/VM.start_on then the plan - may evaporate. This is no different to (eg) the user migrating a VM and breaking the plan. *) - - (* Note further that we consider the amount of host memory free using the current VM configurations (thanks to the - semantics of the Memory_check.host_compute_free_memory call) but *crucially* consider that VMs requiring a restart - will use their new memory_static_max: so we always use a live 'VM.get_record' and not a 'last_booted_record' *) - - (* Allow the num_failures to be overriden *) - let (num_failures: int) = Stdext.Opt.default num_failures change.num_failures in - - (* All the VMs to protect; these VMs may or may not be currently running anywhere: they will be offline when a host has - failed and possibly initially during the enable-ha transient. *) - let vms_to_ensure_running = all_protected_vms in - - (* Add in any extra VMs which aren't already protected *) - let extra_vms = List.map (fun vm -> vm, Db.VM.get_record ~__context ~self:vm) change.new_vms_to_protect in - let vms_to_ensure_running = vms_to_ensure_running @ extra_vms in - - (* For each leaving VM unset the resident_on (so 'is_accounted_for' returns false) *) - (* For each arriving VM set the resident_on again (so 'is_accounted_for' returns true) *) - (* For each arriving VM make sure we use the new VM configuration (eg new memory size) *) - (* NB host memory is adjusted later *) - let vms_to_ensure_running = List.map (fun (vm_ref, vm_t) -> - let leaving = List.filter (fun (_, (vm, _)) -> vm_ref = vm) change.old_vms_leaving in - let leaving_host = List.map (fun (host, (vm, _)) -> vm, host) leaving in - (* let leaving_snapshots = List.map snd leaving in *) - let arriving = List.filter (fun (_, (vm, _)) -> vm_ref = vm) change.old_vms_arriving in - let arriving_host = List.map (fun (host, (vm, _)) -> vm, host) arriving in - let arriving_snapshots = List.map snd arriving in - match List.mem_assoc vm_ref leaving_host, List.mem_assoc vm_ref arriving_host with - | _, true -> vm_ref, { (List.assoc vm_ref arriving_snapshots) with API.vM_resident_on = List.assoc vm_ref arriving_host } - | true, false -> vm_ref, { vm_t with API.vM_resident_on = Ref.null } - | _, _ -> vm_ref, vm_t) - vms_to_ensure_running in - - let all_hosts_and_snapshots = Db.Host.get_all_records ~__context in - let total_hosts = List.length all_hosts_and_snapshots in - (* Any deterministic ordering is fine here: *) - let all_hosts_and_snapshots = List.sort (fun (_, a) (_, b) -> compare a.API.host_uuid b.API.host_uuid) all_hosts_and_snapshots in - - let is_alive (rf, r) = - (* We exclude: (i) online disabled hosts; (ii) online proposed disabled hosts; and (iii) offline hosts *) - true - && r.API.host_enabled - && not (List.mem rf change.hosts_to_disable) - && (try Db.Host_metrics.get_live ~__context ~self:r.API.host_metrics with _ -> false) - && (List.mem rf live_set) in - let live_hosts_and_snapshots, dead_hosts_and_snapshots = List.partition is_alive all_hosts_and_snapshots in - - let live_hosts = List.map fst live_hosts_and_snapshots (* and dead_hosts = List.map fst dead_hosts_and_snapshots *) in - - (* Any deterministic ordering is fine here: *) - let vms_to_ensure_running = List.sort (fun (_, a) (_, b) -> compare a.API.vM_uuid b.API.vM_uuid) vms_to_ensure_running in - - let agile_vms, not_agile_vms = Agility.partition_vm_ps_by_agile ~__context vms_to_ensure_running in - - (* If a VM is marked as resident on a live_host then it will already be accounted for in the host's current free memory. *) - let vm_accounted_to_host vm = - let vm_t = List.assoc vm vms_to_ensure_running in - if List.mem vm_t.API.vM_resident_on live_hosts - then Some vm_t.API.vM_resident_on - else - let scheduled = Db.VM.get_scheduled_to_be_resident_on ~__context ~self:vm in - if List.mem scheduled live_hosts - then Some scheduled else None in - - let string_of_vm vm = Printf.sprintf "%s (%s)" (Helpers.short_string_of_ref vm) (List.assoc vm vms_to_ensure_running).API.vM_name_label in - let string_of_host host = - let name = (List.assoc host all_hosts_and_snapshots).API.host_name_label in - Printf.sprintf "%s (%s)" (Helpers.short_string_of_ref host) name in - let string_of_plan p = String.concat "; " (List.map (fun (vm, host) -> Printf.sprintf "%s -> %s" (string_of_vm vm) (string_of_host host)) p) in - - debug "Protected VMs: [ %s ]" (String.concat "; " (List.map (fun (vm, _) -> string_of_vm vm) vms_to_ensure_running)); - - (* Current free memory on all hosts (does not include any for *offline* protected VMs ie those for which (vm_accounted_to_host vm) - returns None) Also apply the supplied counterfactual-reasoning changes (if any) *) - let hosts_and_memory = List.map (fun host -> - (* Ultra-conservative assumption: plan using VM static_max values for normal domains, - and dynamic_max for control domains. *) - let summary = Memory_check.get_host_memory_summary ~__context ~host in - let currently_free = Memory_check.host_compute_free_memory_with_policy~__context summary Memory_check.Static_max in - let sum = List.fold_left Int64.add 0L in - let arriving = List.filter (fun (h, _) -> h = host) change.old_vms_arriving in - let arriving_memory = sum (List.map (fun (_, (vm_ref, snapshot)) -> - total_memory_of_vm ~__context (if not $ Db.VM.get_is_control_domain ~__context ~self:vm_ref - then Memory_check.Static_max - else Memory_check.Dynamic_max) snapshot) arriving) in - let leaving = List.filter (fun (h, _) -> h = host) change.old_vms_leaving in - let leaving_memory = sum (List.map (fun (_, (vm_ref, snapshot)) -> total_memory_of_vm ~__context - (if not $ Db.VM.get_is_control_domain ~__context ~self:vm_ref - then Memory_check.Static_max - else Memory_check.Dynamic_max) snapshot) leaving) in - host, Int64.sub (Int64.add currently_free leaving_memory) arriving_memory) live_hosts in - - (* Memory required by all protected VMs *) - let vms_and_memory = List.map (fun (vm, snapshot) -> vm, total_memory_of_vm ~__context Memory_check.Static_max snapshot) vms_to_ensure_running in - - (* For each non-agile VM, consider it pinned it to one host (even if it /could/ run on several). Note that if it is - actually running somewhere else (very strange semi-agile situation) then it will be counted as overhead there and - plans will be made for it running on the host we choose. *) - let pinned = List.concat (List.map (host_of_non_agile_vm ~__context all_hosts_and_snapshots) not_agile_vms) in - - (* The restart plan for offline non-agile VMs is just the map VM -> pinned Host *) - let non_agile_restart_plan = List.filter (fun (vm, _) -> vm_accounted_to_host vm = None) pinned in - debug "Restart plan for non-agile offline VMs: [ %s ]" (string_of_plan non_agile_restart_plan); - - (* Update the host free memory to take this plan into account. Note we don't update the VM placement because that only - considers agile VMs. Non-agile VMs are treated as per-host overhead. *) - let hosts_and_memory = Binpack.account hosts_and_memory vms_and_memory non_agile_restart_plan in - - (* Now that we've considered the overhead of the non-agile (pinned) VMs, we can perform some binpacking of the agile VMs. *) - - let agile_vms_and_memory = List.map (fun (vm, _) -> vm, List.assoc vm vms_and_memory) agile_vms in - (* Compute the current placement for all agile VMs. VMs which are powered off currently are placed nowhere *) - let agile_vm_accounted_to_host = List.map (fun (vm, snapshot) -> vm, vm_accounted_to_host vm) agile_vms in - (* All these hosts are live and the VMs are running (or scheduled to be running): *) - let agile_vm_placement = List.concat (List.map (fun (vm, host) -> match host with Some h -> [ vm, h ] | _ -> []) agile_vm_accounted_to_host) in - (* These VMs are not running on any host (either in real life or only hypothetically) *) - let agile_vm_failed = List.concat (List.map (fun (vm, host) -> if host = None then [ vm ] else []) agile_vm_accounted_to_host) in - - let config = { Binpack.hosts = hosts_and_memory; vms = agile_vms_and_memory; placement = agile_vm_placement - ; total_hosts = total_hosts; num_failures = num_failures } in - Binpack.check_configuration config; - debug "Planning configuration for offline agile VMs = %s" (Binpack.string_of_configuration string_of_host string_of_vm config); - let h = Binpack.choose_heuristic config in - - (* Figure out how we could start as many of the agile VMs as possible *) - debug "Computing a specific plan for the failure of VMs: [ %s ]" (String.concat "; " (List.map string_of_vm agile_vm_failed)); - let agile_restart_plan = h.Binpack.get_specific_plan config agile_vm_failed in - debug "Restart plan for agile offline VMs: [ %s ]" (string_of_plan agile_restart_plan); - - let vms_restarted = List.map fst agile_restart_plan in - (* List the protected VMs which are not already running and weren't in the restart plan *) - let vms_not_restarted = List.map fst (List.filter (fun (vm, _) -> vm_accounted_to_host vm = None && not(List.mem vm vms_restarted)) vms_to_ensure_running) in - if vms_not_restarted <> [] - then warn "Some protected VMs could not be restarted: [ %s ]" (String.concat "; " (List.map string_of_vm vms_not_restarted)); - - (* Applying the plan means: - 1. subtract from each host the memory needed to start the VMs in the plan; and - 2. modifying the VM placement map to reflect the plan. *) - let config = Binpack.apply_plan config agile_restart_plan in - (* All agile VMs which were offline have all been 'restarted' provided vms_not_restarted <> [] - If vms_not_restarted = [] then some VMs will have been left out. *) - Binpack.check_configuration config; - debug "Planning configuration for future failures = %s" (Binpack.string_of_configuration string_of_host string_of_vm config); - non_agile_restart_plan @ agile_restart_plan, config, vms_not_restarted, not_agile_vms <> [] + (* This function must be deterministic: for the same set of hosts and set of VMs it must produce the same output. + We rely partially on the binpacker enforcing its own ordering over hosts and vms, so it's not critical for us + to sort the result of Db.*.get_all calls generally. However the handling of non-agile VMs needs special care. *) + + (* We first must deal with protected but currently offline VMs: we need to simulate the start of these VMs before we can + ask any questions about future host failures, since we need to know on which hosts these VMs will end up. + Note this is only useful in the initial startup transient: assuming all protected VMs actually are restarted then + this code will do nothing. *) + + (* Note further that we simulate the start of offline protected VMs *using this function* (ie by the background HA + thread). If the user makes their own poor placement decisions via explicit VM.start/VM.start_on then the plan + may evaporate. This is no different to (eg) the user migrating a VM and breaking the plan. *) + + (* Note further that we consider the amount of host memory free using the current VM configurations (thanks to the + semantics of the Memory_check.host_compute_free_memory call) but *crucially* consider that VMs requiring a restart + will use their new memory_static_max: so we always use a live 'VM.get_record' and not a 'last_booted_record' *) + + (* Allow the num_failures to be overriden *) + let (num_failures: int) = Stdext.Opt.default num_failures change.num_failures in + + (* All the VMs to protect; these VMs may or may not be currently running anywhere: they will be offline when a host has + failed and possibly initially during the enable-ha transient. *) + let vms_to_ensure_running = all_protected_vms in + + (* Add in any extra VMs which aren't already protected *) + let extra_vms = List.map (fun vm -> vm, Db.VM.get_record ~__context ~self:vm) change.new_vms_to_protect in + let vms_to_ensure_running = vms_to_ensure_running @ extra_vms in + + (* For each leaving VM unset the resident_on (so 'is_accounted_for' returns false) *) + (* For each arriving VM set the resident_on again (so 'is_accounted_for' returns true) *) + (* For each arriving VM make sure we use the new VM configuration (eg new memory size) *) + (* NB host memory is adjusted later *) + let vms_to_ensure_running = List.map (fun (vm_ref, vm_t) -> + let leaving = List.filter (fun (_, (vm, _)) -> vm_ref = vm) change.old_vms_leaving in + let leaving_host = List.map (fun (host, (vm, _)) -> vm, host) leaving in + (* let leaving_snapshots = List.map snd leaving in *) + let arriving = List.filter (fun (_, (vm, _)) -> vm_ref = vm) change.old_vms_arriving in + let arriving_host = List.map (fun (host, (vm, _)) -> vm, host) arriving in + let arriving_snapshots = List.map snd arriving in + match List.mem_assoc vm_ref leaving_host, List.mem_assoc vm_ref arriving_host with + | _, true -> vm_ref, { (List.assoc vm_ref arriving_snapshots) with API.vM_resident_on = List.assoc vm_ref arriving_host } + | true, false -> vm_ref, { vm_t with API.vM_resident_on = Ref.null } + | _, _ -> vm_ref, vm_t) + vms_to_ensure_running in + + let all_hosts_and_snapshots = Db.Host.get_all_records ~__context in + let total_hosts = List.length all_hosts_and_snapshots in + (* Any deterministic ordering is fine here: *) + let all_hosts_and_snapshots = List.sort (fun (_, a) (_, b) -> compare a.API.host_uuid b.API.host_uuid) all_hosts_and_snapshots in + + let is_alive (rf, r) = + (* We exclude: (i) online disabled hosts; (ii) online proposed disabled hosts; and (iii) offline hosts *) + true + && r.API.host_enabled + && not (List.mem rf change.hosts_to_disable) + && (try Db.Host_metrics.get_live ~__context ~self:r.API.host_metrics with _ -> false) + && (List.mem rf live_set) in + let live_hosts_and_snapshots, dead_hosts_and_snapshots = List.partition is_alive all_hosts_and_snapshots in + + let live_hosts = List.map fst live_hosts_and_snapshots (* and dead_hosts = List.map fst dead_hosts_and_snapshots *) in + + (* Any deterministic ordering is fine here: *) + let vms_to_ensure_running = List.sort (fun (_, a) (_, b) -> compare a.API.vM_uuid b.API.vM_uuid) vms_to_ensure_running in + + let agile_vms, not_agile_vms = Agility.partition_vm_ps_by_agile ~__context vms_to_ensure_running in + + (* If a VM is marked as resident on a live_host then it will already be accounted for in the host's current free memory. *) + let vm_accounted_to_host vm = + let vm_t = List.assoc vm vms_to_ensure_running in + if List.mem vm_t.API.vM_resident_on live_hosts + then Some vm_t.API.vM_resident_on + else + let scheduled = Db.VM.get_scheduled_to_be_resident_on ~__context ~self:vm in + if List.mem scheduled live_hosts + then Some scheduled else None in + + let string_of_vm vm = Printf.sprintf "%s (%s)" (Helpers.short_string_of_ref vm) (List.assoc vm vms_to_ensure_running).API.vM_name_label in + let string_of_host host = + let name = (List.assoc host all_hosts_and_snapshots).API.host_name_label in + Printf.sprintf "%s (%s)" (Helpers.short_string_of_ref host) name in + let string_of_plan p = String.concat "; " (List.map (fun (vm, host) -> Printf.sprintf "%s -> %s" (string_of_vm vm) (string_of_host host)) p) in + + debug "Protected VMs: [ %s ]" (String.concat "; " (List.map (fun (vm, _) -> string_of_vm vm) vms_to_ensure_running)); + + (* Current free memory on all hosts (does not include any for *offline* protected VMs ie those for which (vm_accounted_to_host vm) + returns None) Also apply the supplied counterfactual-reasoning changes (if any) *) + let hosts_and_memory = List.map (fun host -> + (* Ultra-conservative assumption: plan using VM static_max values for normal domains, + and dynamic_max for control domains. *) + let summary = Memory_check.get_host_memory_summary ~__context ~host in + let currently_free = Memory_check.host_compute_free_memory_with_policy~__context summary Memory_check.Static_max in + let sum = List.fold_left Int64.add 0L in + let arriving = List.filter (fun (h, _) -> h = host) change.old_vms_arriving in + let arriving_memory = sum (List.map (fun (_, (vm_ref, snapshot)) -> + total_memory_of_vm ~__context (if not $ Db.VM.get_is_control_domain ~__context ~self:vm_ref + then Memory_check.Static_max + else Memory_check.Dynamic_max) snapshot) arriving) in + let leaving = List.filter (fun (h, _) -> h = host) change.old_vms_leaving in + let leaving_memory = sum (List.map (fun (_, (vm_ref, snapshot)) -> total_memory_of_vm ~__context + (if not $ Db.VM.get_is_control_domain ~__context ~self:vm_ref + then Memory_check.Static_max + else Memory_check.Dynamic_max) snapshot) leaving) in + host, Int64.sub (Int64.add currently_free leaving_memory) arriving_memory) live_hosts in + + (* Memory required by all protected VMs *) + let vms_and_memory = List.map (fun (vm, snapshot) -> vm, total_memory_of_vm ~__context Memory_check.Static_max snapshot) vms_to_ensure_running in + + (* For each non-agile VM, consider it pinned it to one host (even if it /could/ run on several). Note that if it is + actually running somewhere else (very strange semi-agile situation) then it will be counted as overhead there and + plans will be made for it running on the host we choose. *) + let pinned = List.concat (List.map (host_of_non_agile_vm ~__context all_hosts_and_snapshots) not_agile_vms) in + + (* The restart plan for offline non-agile VMs is just the map VM -> pinned Host *) + let non_agile_restart_plan = List.filter (fun (vm, _) -> vm_accounted_to_host vm = None) pinned in + debug "Restart plan for non-agile offline VMs: [ %s ]" (string_of_plan non_agile_restart_plan); + + (* Update the host free memory to take this plan into account. Note we don't update the VM placement because that only + considers agile VMs. Non-agile VMs are treated as per-host overhead. *) + let hosts_and_memory = Binpack.account hosts_and_memory vms_and_memory non_agile_restart_plan in + + (* Now that we've considered the overhead of the non-agile (pinned) VMs, we can perform some binpacking of the agile VMs. *) + + let agile_vms_and_memory = List.map (fun (vm, _) -> vm, List.assoc vm vms_and_memory) agile_vms in + (* Compute the current placement for all agile VMs. VMs which are powered off currently are placed nowhere *) + let agile_vm_accounted_to_host = List.map (fun (vm, snapshot) -> vm, vm_accounted_to_host vm) agile_vms in + (* All these hosts are live and the VMs are running (or scheduled to be running): *) + let agile_vm_placement = List.concat (List.map (fun (vm, host) -> match host with Some h -> [ vm, h ] | _ -> []) agile_vm_accounted_to_host) in + (* These VMs are not running on any host (either in real life or only hypothetically) *) + let agile_vm_failed = List.concat (List.map (fun (vm, host) -> if host = None then [ vm ] else []) agile_vm_accounted_to_host) in + + let config = { Binpack.hosts = hosts_and_memory; vms = agile_vms_and_memory; placement = agile_vm_placement + ; total_hosts = total_hosts; num_failures = num_failures } in + Binpack.check_configuration config; + debug "Planning configuration for offline agile VMs = %s" (Binpack.string_of_configuration string_of_host string_of_vm config); + let h = Binpack.choose_heuristic config in + + (* Figure out how we could start as many of the agile VMs as possible *) + debug "Computing a specific plan for the failure of VMs: [ %s ]" (String.concat "; " (List.map string_of_vm agile_vm_failed)); + let agile_restart_plan = h.Binpack.get_specific_plan config agile_vm_failed in + debug "Restart plan for agile offline VMs: [ %s ]" (string_of_plan agile_restart_plan); + + let vms_restarted = List.map fst agile_restart_plan in + (* List the protected VMs which are not already running and weren't in the restart plan *) + let vms_not_restarted = List.map fst (List.filter (fun (vm, _) -> vm_accounted_to_host vm = None && not(List.mem vm vms_restarted)) vms_to_ensure_running) in + if vms_not_restarted <> [] + then warn "Some protected VMs could not be restarted: [ %s ]" (String.concat "; " (List.map string_of_vm vms_not_restarted)); + + (* Applying the plan means: + 1. subtract from each host the memory needed to start the VMs in the plan; and + 2. modifying the VM placement map to reflect the plan. *) + let config = Binpack.apply_plan config agile_restart_plan in + (* All agile VMs which were offline have all been 'restarted' provided vms_not_restarted <> [] + If vms_not_restarted = [] then some VMs will have been left out. *) + Binpack.check_configuration config; + debug "Planning configuration for future failures = %s" (Binpack.string_of_configuration string_of_host string_of_vm config); + non_agile_restart_plan @ agile_restart_plan, config, vms_not_restarted, not_agile_vms <> [] (** Returned by the plan_for_n_failures function *) type result = @@ -274,14 +274,14 @@ type result = and if so, what type. Note some protected VMs may currently be offline and we should first attempt to place those before considering future failures. This function also supports a limited counterfactual reasoning mode where: - 1. hosts can be given more/less free memory than they currently have in order to check that a proposed + 1. hosts can be given more/less free memory than they currently have in order to check that a proposed VM operation would not break the failover plan. 2. hosts can be omitted from the plan in order to check that a host can be disabled/shutdown without breaking the plan. *) let plan_for_n_failures ~__context ~all_protected_vms ?live_set ?(change = no_configuration_change) n = - let live_set = match live_set with None -> get_live_set ~__context | Some s -> s in + let live_set = match live_set with None -> get_live_set ~__context | Some s -> s in try (* 'changes' are applied by the compute_restart_plan function *) let plan, config, vms_not_restarted, non_agile_protected_vms_exist = compute_restart_plan ~__context ~all_protected_vms ~live_set ~change n in @@ -291,10 +291,10 @@ let plan_for_n_failures ~__context ~all_protected_vms ?live_set ?(change = no_co error "Even with no Host failures this Pool cannot start the configured protected VMs."; No_plan_exists end else begin - debug "plan_for_n_failures config = %s" - (Binpack.string_of_configuration - (fun x -> Printf.sprintf "%s (%s)" (Helpers.short_string_of_ref x) (Db.Host.get_hostname ~__context ~self:x)) - (fun x -> Printf.sprintf "%s (%s)" (Helpers.short_string_of_ref x) (Db.VM.get_name_label ~__context ~self:x)) config); + debug "plan_for_n_failures config = %s" + (Binpack.string_of_configuration + (fun x -> Printf.sprintf "%s (%s)" (Helpers.short_string_of_ref x) (Db.Host.get_hostname ~__context ~self:x)) + (fun x -> Printf.sprintf "%s (%s)" (Helpers.short_string_of_ref x) (Db.VM.get_name_label ~__context ~self:x)) config); Binpack.check_configuration config; let h = Binpack.choose_heuristic config in match h.Binpack.plan_always_possible config, non_agile_protected_vms_exist with @@ -304,20 +304,20 @@ let plan_for_n_failures ~__context ~all_protected_vms ?live_set ?(change = no_co end with | e -> - error "Unexpected error in HA VM failover planning function: %s" (ExnHelper.string_of_exn e); - No_plan_exists + error "Unexpected error in HA VM failover planning function: %s" (ExnHelper.string_of_exn e); + No_plan_exists let compute_max_host_failures_to_tolerate ~__context ?live_set ?protected_vms () = let protected_vms = match protected_vms with - | None -> all_protected_vms ~__context + | None -> all_protected_vms ~__context | Some vms -> vms in let nhosts = List.length (Db.Host.get_all ~__context) in (* We assume that if not(plan_exists(n)) then \forall.x>n not(plan_exists(n)) - although even if we screw this up it's not a disaster because all we need is a + although even if we screw this up it's not a disaster because all we need is a safe approximation (so ultimately "0" will do but we'd prefer higher) *) Helpers.bisect (fun n -> plan_for_n_failures ~__context ~all_protected_vms:protected_vms ?live_set (Int64.to_int n) = Plan_exists_for_all_VMs) 0L (Int64.of_int nhosts) -(* Make sure the pool is marked as overcommitted and the appropriate alert is generated. Return +(* Make sure the pool is marked as overcommitted and the appropriate alert is generated. Return true if something changed, false otherwise *) let mark_pool_as_overcommitted ~__context ~live_set = let pool = Helpers.get_pool ~__context in @@ -332,7 +332,7 @@ let mark_pool_as_overcommitted ~__context ~live_set = if max_failures < planned_for then Xapi_alert.add ~msg:Api_messages.ha_pool_drop_in_plan_exists_for ~cls:`Pool ~obj_uuid:(Db.Pool.get_uuid ~__context ~self:pool) ~body:(Int64.to_string max_failures); end; - + if not overcommitted then begin Db.Pool.set_ha_overcommitted ~__context ~self:pool ~value:true; @@ -343,8 +343,8 @@ let mark_pool_as_overcommitted ~__context ~live_set = danger of blocking for db.* calls to return *) let (name, priority) = Api_messages.ha_pool_overcommitted in let (_: 'a Ref.t) = Xapi_message.create ~__context ~name ~priority ~cls:`Pool ~obj_uuid - ~body:(Printf.sprintf "The failover tolerance for pool '%s' has dropped and the initially specified number of host failures to tolerate can no longer be guaranteed" - pool_name_label) in + ~body:(Printf.sprintf "The failover tolerance for pool '%s' has dropped and the initially specified number of host failures to tolerate can no longer be guaranteed" + pool_name_label) in (); (* Call a hook to allow someone the opportunity to bring more capacity online *) Xapi_hooks.pool_ha_overcommitted_hook ~__context @@ -354,9 +354,9 @@ let mark_pool_as_overcommitted ~__context ~live_set = (* Update the pool's HA fields *) let update_pool_status ~__context ?live_set () = - let live_set = match live_set with - | None -> get_live_set ~__context - | Some s -> s in + let live_set = match live_set with + | None -> get_live_set ~__context + | Some s -> s in let pool = Helpers.get_pool ~__context in let overcommitted = Db.Pool.get_ha_overcommitted ~__context ~self:pool in let to_tolerate = Db.Pool.get_ha_host_failures_to_tolerate ~__context ~self:pool in @@ -370,57 +370,57 @@ let update_pool_status ~__context ?live_set () = *) match plan_for_n_failures ~__context ~all_protected_vms ~live_set (Int64.to_int to_tolerate) with | Plan_exists_for_all_VMs -> - debug "HA failover plan exists for all protected VMs"; - Db.Pool.set_ha_overcommitted ~__context ~self:pool ~value:false; - debug "to_tolerate = %Ld planned_for = %Ld" to_tolerate planned_for; - if planned_for <> to_tolerate then Db.Pool.set_ha_plan_exists_for ~__context ~self:pool ~value:to_tolerate; - (* return true if something changed *) - overcommitted || (planned_for <> to_tolerate) + debug "HA failover plan exists for all protected VMs"; + Db.Pool.set_ha_overcommitted ~__context ~self:pool ~value:false; + debug "to_tolerate = %Ld planned_for = %Ld" to_tolerate planned_for; + if planned_for <> to_tolerate then Db.Pool.set_ha_plan_exists_for ~__context ~self:pool ~value:to_tolerate; + (* return true if something changed *) + overcommitted || (planned_for <> to_tolerate) | Plan_exists_excluding_non_agile_VMs -> - debug "HA failover plan exists for all protected VMs, excluding some non-agile VMs"; - mark_pool_as_overcommitted ~__context ~live_set; (* might define this as false later *) + debug "HA failover plan exists for all protected VMs, excluding some non-agile VMs"; + mark_pool_as_overcommitted ~__context ~live_set; (* might define this as false later *) | No_plan_exists -> - debug "No HA failover plan exists"; - mark_pool_as_overcommitted ~__context ~live_set + debug "No HA failover plan exists"; + mark_pool_as_overcommitted ~__context ~live_set -let assert_configuration_change_preserves_ha_plan ~__context c = +let assert_configuration_change_preserves_ha_plan ~__context c = debug "assert_configuration_change_preserves_ha_plan c = %s" (string_of_configuration_change ~__context c); (* Only block the operation if a plan exists now but would evaporate with the proposed changes. This prevents us blocking all operations should be suddenly become overcommitted eg through multiple host failures *) - let live_set = get_live_set ~__context in + let live_set = get_live_set ~__context in let pool = Helpers.get_pool ~__context in if Db.Pool.get_ha_enabled ~__context ~self:pool && not(Db.Pool.get_ha_allow_overcommit ~__context ~self:pool) then begin let to_tolerate = Int64.to_int (Db.Pool.get_ha_plan_exists_for ~__context ~self:pool) in let all_protected_vms = all_protected_vms ~__context in match plan_for_n_failures ~__context ~all_protected_vms ~live_set to_tolerate with - | Plan_exists_excluding_non_agile_VMs - | No_plan_exists -> - debug "assert_configuration_change_preserves_ha_plan: no plan currently exists; cannot get worse" + | Plan_exists_excluding_non_agile_VMs + | No_plan_exists -> + debug "assert_configuration_change_preserves_ha_plan: no plan currently exists; cannot get worse" | Plan_exists_for_all_VMs -> begin - (* Does the plan break? *) - match plan_for_n_failures ~__context ~all_protected_vms ~live_set ~change:c to_tolerate with - | Plan_exists_for_all_VMs -> - debug "assert_configuration_change_preserves_ha_plan: plan exists after change" - | Plan_exists_excluding_non_agile_VMs - | No_plan_exists -> - debug "assert_configuration_change_preserves_ha_plan: proposed change breaks plan"; - raise (Api_errors.Server_error(Api_errors.ha_operation_would_break_failover_plan, [])) + (* Does the plan break? *) + match plan_for_n_failures ~__context ~all_protected_vms ~live_set ~change:c to_tolerate with + | Plan_exists_for_all_VMs -> + debug "assert_configuration_change_preserves_ha_plan: plan exists after change" + | Plan_exists_excluding_non_agile_VMs + | No_plan_exists -> + debug "assert_configuration_change_preserves_ha_plan: proposed change breaks plan"; + raise (Api_errors.Server_error(Api_errors.ha_operation_would_break_failover_plan, [])) end end -let assert_host_disable_preserves_ha_plan ~__context host = +let assert_host_disable_preserves_ha_plan ~__context host = assert_configuration_change_preserves_ha_plan ~__context { no_configuration_change with hosts_to_disable = [ host ] } let assert_vm_placement_preserves_ha_plan ~__context ?(leaving=[]) ?(arriving=[]) () = assert_configuration_change_preserves_ha_plan ~__context { no_configuration_change with old_vms_leaving = leaving; old_vms_arriving = arriving } -let assert_nfailures_change_preserves_ha_plan ~__context n = +let assert_nfailures_change_preserves_ha_plan ~__context n = assert_configuration_change_preserves_ha_plan ~__context { no_configuration_change with num_failures = Some n } -let assert_new_vm_preserves_ha_plan ~__context new_vm = +let assert_new_vm_preserves_ha_plan ~__context new_vm = assert_configuration_change_preserves_ha_plan ~__context { no_configuration_change with new_vms_to_protect = [ new_vm ] } (* If a VM fails to start then we remember this fact to avoid sending duplicate alerts. *) @@ -429,213 +429,213 @@ let restart_failed : (API.ref_VM, unit) Hashtbl.t = Hashtbl.create 10 (* We also limit the rate we attempt to retry starting the VM. *) let last_start_attempt : (API.ref_VM, float) Hashtbl.t = Hashtbl.create 10 -(* Takes the current live_set and number of hosts we're planning to handle, updates the host records in the database +(* Takes the current live_set and number of hosts we're planning to handle, updates the host records in the database and restarts any offline protected VMs *) let restart_auto_run_vms ~__context live_set n = - (* ensure we have live=false on the host_metrics for those hosts not in the live_set; and force state to Halted for - all VMs that are "running" or "paused" with resident_on set to one of the hosts that is now dead - *) - debug "restart_auto_run_vms called"; - let hosts = Db.Host.get_all ~__context in - (* Keep a list of all the VMs whose power-states we force to Halted to use later in the - 'best-effort' restart code. Note that due to the weakly consistent database this is not - an accurate way to determine 'failed' VMs but it will suffice for our 'best-effort' - category. *) - let reset_vms = ref [] in - let dead_hosts = ref [] in - List.iter (fun h -> - if not (List.mem h live_set) then begin - let hostname = Db.Host.get_hostname ~__context ~self:h in - debug "Setting host %s to dead" hostname; - (* Sample this before calling any hook scripts *) - let resident_on_vms = List.filter - (fun vm -> not (Db.VM.get_is_control_domain ~__context ~self:vm)) - (Db.Host.get_resident_VMs ~__context ~self:h) in - reset_vms := resident_on_vms @ !reset_vms; - - (* ensure live=false *) - begin - try - let h_metrics = Db.Host.get_metrics ~__context ~self:h in - let current = Db.Host_metrics.get_live ~__context ~self:h_metrics in - if current then begin - (* Fire off a ha_host_failed message if the host hasn't just shut itself down *) - let shutting_down = Stdext.Threadext.Mutex.execute Xapi_globs.hosts_which_are_shutting_down_m (fun () -> !Xapi_globs.hosts_which_are_shutting_down) in - if not (List.exists (fun x -> x=h) shutting_down) then begin - let obj_uuid = Db.Host.get_uuid ~__context ~self:h in - let host_name = Db.Host.get_name_label ~__context ~self:h in - Xapi_alert.add ~msg:Api_messages.ha_host_failed ~cls:`Host ~obj_uuid - ~body:(Printf.sprintf "Server '%s' has failed" host_name); - end; - (* Call external host failed hook (allows a third-party to use power-fencing if desired) *) - Xapi_hooks.host_pre_declare_dead ~__context ~host:h ~reason:Xapi_hooks.reason__fenced; - Db.Host_metrics.set_live ~__context ~self:h_metrics ~value:false; (* since slave is fenced, it will not set this to true again itself *) - Xapi_host_helpers.update_allowed_operations ~__context ~self:h; - dead_hosts := h :: !dead_hosts; - end - with _ -> - () (* if exn assume h_metrics doesn't exist, then "live" is defined to be false implicitly, so do nothing *) - end - end) hosts; - - debug "Setting all VMs running or paused to Halted"; - (* ensure all vms resident_on this host running or paused have their powerstates reset *) - List.iter (fun vm -> - if Xapi_vm_lifecycle.is_live ~__context ~self:vm then - Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:`Halted) - !reset_vms; - (* host_post_declare_dead may take a long time if the SR is locked *) - dead_hosts := List.rev !dead_hosts; - List.iter (fun h -> Xapi_hooks.host_post_declare_dead ~__context ~host:h ~reason:Xapi_hooks.reason__fenced) - !dead_hosts; - - (* If something has changed then we'd better refresh the pool status *) - if !reset_vms <> [] then ignore(update_pool_status ~__context ~live_set ()); - - (* At this point failed protected agile VMs are Halted, not resident_on anywhere *) - - let all_protected_vms = all_protected_vms ~__context in - - let plan, plan_is_complete = - try - if Xapi_fist.simulate_planner_failure () then failwith "fist_simulate_planner_failure"; - (* CA-23981: if the pool-pre-ha-vm-restart hook exists AND if we're about to auto-start some VMs then - call the script hook first and then recompute the plan aftwards. Note that these VMs may either - be protected or best-effort. For the protected ones we assume that these are included in the VM - restart plan-- we ignore the possibility that the planner may fail here (even through there is some - last-ditch code later to perform best-effort VM.starts). This is ok since it should never happen and - this particular hook is really a 'best-effort' integration point since it conflicts with the overcommit - protection. - For the best-effort VMs we call the script - when we have reset some VMs to halted (no guarantee there is enough resource but better safe than sorry) *) - let plan, config, vms_not_restarted, non_agile_protected_vms_exist = compute_restart_plan ~__context ~all_protected_vms ~live_set n in - let plan, config, vms_not_restarted, non_agile_protected_vms_exist = - if true - && Xapi_hooks.pool_pre_ha_vm_restart_hook_exists () - && (plan <> [] || !reset_vms <> []) then begin - (* We're about to soak up some resources for 'Level 1' VMs somewhere; before we do that give 'Level 2' VMs a shot *) - (* Whatever this script does we don't let it break our restart thread *) - begin - try - Xapi_hooks.pool_pre_ha_vm_restart_hook ~__context - with e -> - error "pool-pre-ha-vm-restart-hook failed: %s: continuing anyway" (ExnHelper.string_of_exn e) - end; - debug "Recomputing restart plan to take into account new state of the world after running the script"; - compute_restart_plan ~__context ~all_protected_vms ~live_set n - end else plan, config, vms_not_restarted, non_agile_protected_vms_exist (* nothing needs recomputing *) - in - - (* If we are undercommitted then vms_not_restarted = [] and plan will include all offline protected_vms *) - let plan_is_complete = vms_not_restarted = [] in - plan, plan_is_complete - with e -> - error "Caught unexpected exception in HA planner: %s" (ExnHelper.string_of_exn e); - [], false in - - (* Send at most one alert per protected VM failure *) - let consider_sending_failed_alert_for vm = - debug "We failed to restart protected VM %s: considering sending an alert" (Ref.string_of vm); - if not(Hashtbl.mem restart_failed vm) then begin - Hashtbl.replace restart_failed vm (); - let obj_uuid = Db.VM.get_uuid ~__context ~self:vm in - Xapi_alert.add ~msg:Api_messages.ha_protected_vm_restart_failed ~cls:`VM ~obj_uuid ~body:"" - end in - - (* execute the plan *) - Helpers.call_api_functions ~__context - (fun rpc session_id -> - - (* Helper function to start a VM somewhere. If the HA overcommit protection stops us then disable it and try once more. - Returns true if the VM was restarted and false otherwise. *) - let restart_vm vm ?host () = - let go () = - - if Xapi_fist.simulate_restart_failure () then begin - match Random.int 3 with - | 0 -> raise (Api_errors.Server_error(Api_errors.ha_operation_would_break_failover_plan, [])) - | 1 -> raise (Api_errors.Server_error("FIST: unexpected exception", [])) - | _ -> () - end; - - (* If we tried before and failed, don't retry again within 2 minutes *) - let attempt_restart = - if Hashtbl.mem last_start_attempt vm - then Unix.gettimeofday () -. (Hashtbl.find last_start_attempt vm) > 120. - else true in - - if attempt_restart then begin - Hashtbl.replace last_start_attempt vm (Unix.gettimeofday ()); - match host with - | None -> Client.Client.VM.start rpc session_id vm false true - | Some h -> Client.Client.VM.start_on rpc session_id vm h false true - end else failwith (Printf.sprintf "VM: %s restart attempt delayed for 120s" (Ref.string_of vm)) in - try - go (); - true - with - | Api_errors.Server_error(code, params) when code = Api_errors.ha_operation_would_break_failover_plan -> - (* This should never happen since the planning code would always allow the restart of a protected VM... *) - error "Caught exception HA_OPERATION_WOULD_BREAK_FAILOVER_PLAN: setting pool as overcommitted and retrying"; - ignore (mark_pool_as_overcommitted ~__context ~live_set : bool); - begin - try - go (); - true - with e -> - error "Caught exception trying to restart VM %s: %s" (Ref.string_of vm) (ExnHelper.string_of_exn e); - false - end - | e -> - error "Caught exception trying to restart VM %s: %s" (Ref.string_of vm) (ExnHelper.string_of_exn e); - false in - - (* Build a list of bools, one per Halted protected VM indicating whether we managed to start it or not *) - let started = - if not plan_is_complete then begin - (* If the Pool is overcommitted the restart priority will make the difference between a VM restart or not, - while if we're undercommitted the restart priority only affects the timing slightly. *) - let all = List.filter (fun (_, r) -> r.API.vM_power_state = `Halted) all_protected_vms in - let all = List.sort by_order all in - warn "Failed to find plan to restart all protected VMs: falling back to simple VM.start in priority order"; - List.map (fun (vm, _) -> vm, restart_vm vm ()) all - end else begin - (* Walk over the VMs in priority order, starting each on the planned host *) - let all = List.sort by_order (List.map (fun (vm, _) -> vm, Db.VM.get_record ~__context ~self:vm) plan) in - List.map (fun (vm, _) -> - vm, (if List.mem_assoc vm plan - then restart_vm vm ~host:(List.assoc vm plan) () - else false)) all - end in - (* Perform one final restart attempt of any that weren't started. *) - let started = List.map (fun (vm, started) -> match started with - | true -> vm, true - | false -> vm, restart_vm vm ()) started in - (* Send an alert for any failed VMs *) - List.iter (fun (vm, started) -> if not started then consider_sending_failed_alert_for vm) started; - - (* Forget about previously failed VMs which have gone *) - let vms_we_know_about = List.map fst started in - let gc_table tbl = - let vms_in_table = Hashtbl.fold (fun vm _ acc -> vm :: acc) tbl [] in - List.iter (fun vm -> if not(List.mem vm vms_we_know_about) then (debug "Forgetting VM: %s" (Ref.string_of vm); Hashtbl.remove tbl vm)) vms_in_table in - gc_table last_start_attempt; - gc_table restart_failed; - - (* Consider restarting the best-effort VMs we *think* have failed (but we might get this wrong -- - ok since this is 'best-effort'). NOTE we do not use the restart_vm function above as this will mark the - pool as overcommitted if an HA_OPERATION_WOULD_BREAK_FAILOVER_PLAN is received (although this should never - happen it's better safe than sorry) *) - List.iter - (fun vm -> - try - if Db.VM.get_power_state ~__context ~self:vm = `Halted - && Db.VM.get_ha_restart_priority ~__context ~self:vm = Constants.ha_restart_best_effort - then Client.Client.VM.start rpc session_id vm false true - with e -> - error "Failed to restart best-effort VM %s (%s): %s" - (Db.VM.get_uuid ~__context ~self:vm) - (Db.VM.get_name_label ~__context ~self:vm) - (ExnHelper.string_of_exn e)) !reset_vms - - ) + (* ensure we have live=false on the host_metrics for those hosts not in the live_set; and force state to Halted for + all VMs that are "running" or "paused" with resident_on set to one of the hosts that is now dead + *) + debug "restart_auto_run_vms called"; + let hosts = Db.Host.get_all ~__context in + (* Keep a list of all the VMs whose power-states we force to Halted to use later in the + 'best-effort' restart code. Note that due to the weakly consistent database this is not + an accurate way to determine 'failed' VMs but it will suffice for our 'best-effort' + category. *) + let reset_vms = ref [] in + let dead_hosts = ref [] in + List.iter (fun h -> + if not (List.mem h live_set) then begin + let hostname = Db.Host.get_hostname ~__context ~self:h in + debug "Setting host %s to dead" hostname; + (* Sample this before calling any hook scripts *) + let resident_on_vms = List.filter + (fun vm -> not (Db.VM.get_is_control_domain ~__context ~self:vm)) + (Db.Host.get_resident_VMs ~__context ~self:h) in + reset_vms := resident_on_vms @ !reset_vms; + + (* ensure live=false *) + begin + try + let h_metrics = Db.Host.get_metrics ~__context ~self:h in + let current = Db.Host_metrics.get_live ~__context ~self:h_metrics in + if current then begin + (* Fire off a ha_host_failed message if the host hasn't just shut itself down *) + let shutting_down = Stdext.Threadext.Mutex.execute Xapi_globs.hosts_which_are_shutting_down_m (fun () -> !Xapi_globs.hosts_which_are_shutting_down) in + if not (List.exists (fun x -> x=h) shutting_down) then begin + let obj_uuid = Db.Host.get_uuid ~__context ~self:h in + let host_name = Db.Host.get_name_label ~__context ~self:h in + Xapi_alert.add ~msg:Api_messages.ha_host_failed ~cls:`Host ~obj_uuid + ~body:(Printf.sprintf "Server '%s' has failed" host_name); + end; + (* Call external host failed hook (allows a third-party to use power-fencing if desired) *) + Xapi_hooks.host_pre_declare_dead ~__context ~host:h ~reason:Xapi_hooks.reason__fenced; + Db.Host_metrics.set_live ~__context ~self:h_metrics ~value:false; (* since slave is fenced, it will not set this to true again itself *) + Xapi_host_helpers.update_allowed_operations ~__context ~self:h; + dead_hosts := h :: !dead_hosts; + end + with _ -> + () (* if exn assume h_metrics doesn't exist, then "live" is defined to be false implicitly, so do nothing *) + end + end) hosts; + + debug "Setting all VMs running or paused to Halted"; + (* ensure all vms resident_on this host running or paused have their powerstates reset *) + List.iter (fun vm -> + if Xapi_vm_lifecycle.is_live ~__context ~self:vm then + Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:`Halted) + !reset_vms; + (* host_post_declare_dead may take a long time if the SR is locked *) + dead_hosts := List.rev !dead_hosts; + List.iter (fun h -> Xapi_hooks.host_post_declare_dead ~__context ~host:h ~reason:Xapi_hooks.reason__fenced) + !dead_hosts; + + (* If something has changed then we'd better refresh the pool status *) + if !reset_vms <> [] then ignore(update_pool_status ~__context ~live_set ()); + + (* At this point failed protected agile VMs are Halted, not resident_on anywhere *) + + let all_protected_vms = all_protected_vms ~__context in + + let plan, plan_is_complete = + try + if Xapi_fist.simulate_planner_failure () then failwith "fist_simulate_planner_failure"; + (* CA-23981: if the pool-pre-ha-vm-restart hook exists AND if we're about to auto-start some VMs then + call the script hook first and then recompute the plan aftwards. Note that these VMs may either + be protected or best-effort. For the protected ones we assume that these are included in the VM + restart plan-- we ignore the possibility that the planner may fail here (even through there is some + last-ditch code later to perform best-effort VM.starts). This is ok since it should never happen and + this particular hook is really a 'best-effort' integration point since it conflicts with the overcommit + protection. + For the best-effort VMs we call the script + when we have reset some VMs to halted (no guarantee there is enough resource but better safe than sorry) *) + let plan, config, vms_not_restarted, non_agile_protected_vms_exist = compute_restart_plan ~__context ~all_protected_vms ~live_set n in + let plan, config, vms_not_restarted, non_agile_protected_vms_exist = + if true + && Xapi_hooks.pool_pre_ha_vm_restart_hook_exists () + && (plan <> [] || !reset_vms <> []) then begin + (* We're about to soak up some resources for 'Level 1' VMs somewhere; before we do that give 'Level 2' VMs a shot *) + (* Whatever this script does we don't let it break our restart thread *) + begin + try + Xapi_hooks.pool_pre_ha_vm_restart_hook ~__context + with e -> + error "pool-pre-ha-vm-restart-hook failed: %s: continuing anyway" (ExnHelper.string_of_exn e) + end; + debug "Recomputing restart plan to take into account new state of the world after running the script"; + compute_restart_plan ~__context ~all_protected_vms ~live_set n + end else plan, config, vms_not_restarted, non_agile_protected_vms_exist (* nothing needs recomputing *) + in + + (* If we are undercommitted then vms_not_restarted = [] and plan will include all offline protected_vms *) + let plan_is_complete = vms_not_restarted = [] in + plan, plan_is_complete + with e -> + error "Caught unexpected exception in HA planner: %s" (ExnHelper.string_of_exn e); + [], false in + + (* Send at most one alert per protected VM failure *) + let consider_sending_failed_alert_for vm = + debug "We failed to restart protected VM %s: considering sending an alert" (Ref.string_of vm); + if not(Hashtbl.mem restart_failed vm) then begin + Hashtbl.replace restart_failed vm (); + let obj_uuid = Db.VM.get_uuid ~__context ~self:vm in + Xapi_alert.add ~msg:Api_messages.ha_protected_vm_restart_failed ~cls:`VM ~obj_uuid ~body:"" + end in + + (* execute the plan *) + Helpers.call_api_functions ~__context + (fun rpc session_id -> + + (* Helper function to start a VM somewhere. If the HA overcommit protection stops us then disable it and try once more. + Returns true if the VM was restarted and false otherwise. *) + let restart_vm vm ?host () = + let go () = + + if Xapi_fist.simulate_restart_failure () then begin + match Random.int 3 with + | 0 -> raise (Api_errors.Server_error(Api_errors.ha_operation_would_break_failover_plan, [])) + | 1 -> raise (Api_errors.Server_error("FIST: unexpected exception", [])) + | _ -> () + end; + + (* If we tried before and failed, don't retry again within 2 minutes *) + let attempt_restart = + if Hashtbl.mem last_start_attempt vm + then Unix.gettimeofday () -. (Hashtbl.find last_start_attempt vm) > 120. + else true in + + if attempt_restart then begin + Hashtbl.replace last_start_attempt vm (Unix.gettimeofday ()); + match host with + | None -> Client.Client.VM.start rpc session_id vm false true + | Some h -> Client.Client.VM.start_on rpc session_id vm h false true + end else failwith (Printf.sprintf "VM: %s restart attempt delayed for 120s" (Ref.string_of vm)) in + try + go (); + true + with + | Api_errors.Server_error(code, params) when code = Api_errors.ha_operation_would_break_failover_plan -> + (* This should never happen since the planning code would always allow the restart of a protected VM... *) + error "Caught exception HA_OPERATION_WOULD_BREAK_FAILOVER_PLAN: setting pool as overcommitted and retrying"; + ignore (mark_pool_as_overcommitted ~__context ~live_set : bool); + begin + try + go (); + true + with e -> + error "Caught exception trying to restart VM %s: %s" (Ref.string_of vm) (ExnHelper.string_of_exn e); + false + end + | e -> + error "Caught exception trying to restart VM %s: %s" (Ref.string_of vm) (ExnHelper.string_of_exn e); + false in + + (* Build a list of bools, one per Halted protected VM indicating whether we managed to start it or not *) + let started = + if not plan_is_complete then begin + (* If the Pool is overcommitted the restart priority will make the difference between a VM restart or not, + while if we're undercommitted the restart priority only affects the timing slightly. *) + let all = List.filter (fun (_, r) -> r.API.vM_power_state = `Halted) all_protected_vms in + let all = List.sort by_order all in + warn "Failed to find plan to restart all protected VMs: falling back to simple VM.start in priority order"; + List.map (fun (vm, _) -> vm, restart_vm vm ()) all + end else begin + (* Walk over the VMs in priority order, starting each on the planned host *) + let all = List.sort by_order (List.map (fun (vm, _) -> vm, Db.VM.get_record ~__context ~self:vm) plan) in + List.map (fun (vm, _) -> + vm, (if List.mem_assoc vm plan + then restart_vm vm ~host:(List.assoc vm plan) () + else false)) all + end in + (* Perform one final restart attempt of any that weren't started. *) + let started = List.map (fun (vm, started) -> match started with + | true -> vm, true + | false -> vm, restart_vm vm ()) started in + (* Send an alert for any failed VMs *) + List.iter (fun (vm, started) -> if not started then consider_sending_failed_alert_for vm) started; + + (* Forget about previously failed VMs which have gone *) + let vms_we_know_about = List.map fst started in + let gc_table tbl = + let vms_in_table = Hashtbl.fold (fun vm _ acc -> vm :: acc) tbl [] in + List.iter (fun vm -> if not(List.mem vm vms_we_know_about) then (debug "Forgetting VM: %s" (Ref.string_of vm); Hashtbl.remove tbl vm)) vms_in_table in + gc_table last_start_attempt; + gc_table restart_failed; + + (* Consider restarting the best-effort VMs we *think* have failed (but we might get this wrong -- + ok since this is 'best-effort'). NOTE we do not use the restart_vm function above as this will mark the + pool as overcommitted if an HA_OPERATION_WOULD_BREAK_FAILOVER_PLAN is received (although this should never + happen it's better safe than sorry) *) + List.iter + (fun vm -> + try + if Db.VM.get_power_state ~__context ~self:vm = `Halted + && Db.VM.get_ha_restart_priority ~__context ~self:vm = Constants.ha_restart_best_effort + then Client.Client.VM.start rpc session_id vm false true + with e -> + error "Failed to restart best-effort VM %s (%s): %s" + (Db.VM.get_uuid ~__context ~self:vm) + (Db.VM.get_name_label ~__context ~self:vm) + (ExnHelper.string_of_exn e)) !reset_vms + + ) diff --git a/ocaml/xapi/xapi_ha_vm_failover.mli b/ocaml/xapi/xapi_ha_vm_failover.mli index cd718b44d52..cd28676ed28 100644 --- a/ocaml/xapi/xapi_ha_vm_failover.mli +++ b/ocaml/xapi/xapi_ha_vm_failover.mli @@ -13,9 +13,9 @@ *) (** * @group High Availability (HA) - *) - -val all_protected_vms : __context:Context.t -> (API.ref_VM * API.vM_t) list +*) + +val all_protected_vms : __context:Context.t -> (API.ref_VM * API.vM_t) list (** Take a set of live VMs and attempt to restart all protected VMs which have failed *) val restart_auto_run_vms : __context:Context.t -> API.ref_host list -> int -> unit @@ -24,19 +24,19 @@ val restart_auto_run_vms : __context:Context.t -> API.ref_host list -> int -> un val compute_evacuation_plan : __context:Context.t -> int -> API.ref_host list -> (API.ref_VM * API.vM_t) list -> (API.ref_VM * API.ref_host) list (** Abstract result of the background HA planning function *) -type result = +type result = | Plan_exists_for_all_VMs (** All protected VMs could be restarted *) | Plan_exists_excluding_non_agile_VMs (** Excluding 'trivial' failures due to non-agile VMs, all protected VMs could be restarted *) | No_plan_exists (** Not all protected VMs could be restarted *) -(** Passed to the planner to reason about other possible configurations, used to block operations which would +(** Passed to the planner to reason about other possible configurations, used to block operations which would destroy the HA VM restart plan. *) type configuration_change = { old_vms_leaving: (API.ref_host * (API.ref_VM * API.vM_t)) list; (** existing VMs which are leaving *) old_vms_arriving: (API.ref_host * (API.ref_VM * API.vM_t)) list; (** existing VMs which are arriving *) hosts_to_disable: API.ref_host list; (** hosts to pretend to disable *) num_failures: int option; (** new number of failures to consider *) - new_vms_to_protect: API.ref_VM list; (** new VMs to restart *) + new_vms_to_protect: API.ref_VM list; (** new VMs to restart *) } val no_configuration_change : configuration_change @@ -50,7 +50,7 @@ val plan_for_n_failures : __context:Context.t -> all_protected_vms:((API.ref_VM (** Compute the maximum plan size we can currently find *) val compute_max_host_failures_to_tolerate : __context:Context.t -> ?live_set:API.ref_host list -> ?protected_vms:((API.ref_VM * API.vM_t) list) -> unit -> int64 -(** HA admission control functions: aim is to block operations which would make us become overcommitted: *) +(** HA admission control functions: aim is to block operations which would make us become overcommitted: *) val assert_vm_placement_preserves_ha_plan : __context:Context.t -> ?leaving:(API.ref_host * (API.ref_VM * API.vM_t)) list -> ?arriving:(API.ref_host * (API.ref_VM * API.vM_t)) list -> unit -> unit val assert_host_disable_preserves_ha_plan : __context:Context.t -> API.ref_host -> unit val assert_nfailures_change_preserves_ha_plan : __context:Context.t -> int -> unit diff --git a/ocaml/xapi/xapi_hooks.ml b/ocaml/xapi/xapi_hooks.ml index 7f0d8c11191..3b6e1ad09b9 100644 --- a/ocaml/xapi/xapi_hooks.ml +++ b/ocaml/xapi/xapi_hooks.ml @@ -41,7 +41,7 @@ let exitcode_log_and_continue = 1 let list_individual_hooks ~script_name = let script_dir = Filename.concat !Xapi_globs.xapi_hooks_root script_name in - if (try Unix.access script_dir [Unix.F_OK]; true with _ -> false) + if (try Unix.access script_dir [Unix.F_OK]; true with _ -> false) then let scripts = Sys.readdir script_dir in Array.stable_sort compare scripts; @@ -53,58 +53,58 @@ let execute_hook ~__context ~script_name ~args ~reason = let scripts = list_individual_hooks ~script_name in let script_dir = Filename.concat !Xapi_globs.xapi_hooks_root script_name in - Array.iter - (fun script-> - try - debug "Executing hook '%s/%s' with args [ %s ]" script_name script (String.concat "; " args); - ignore (Forkhelpers.execute_command_get_output (Filename.concat script_dir script) args); - with - Forkhelpers.Spawn_internal_error (_,stdout,Unix.WEXITED i) (* i<>0 since that case does not generate exn *) -> - if i=exitcode_log_and_continue then - debug "Hook '%s/%s' with args [ %s ] logged '%s'" script_name script (String.concat "; " args) (String.escaped stdout) - else - raise (Api_errors.Server_error (Api_errors.xapi_hook_failed, [ script_name^"/"^script; reason; stdout; string_of_int i ]) - )) - scripts - -let execute_vm_hook ~__context ~reason ~vm = + Array.iter + (fun script-> + try + debug "Executing hook '%s/%s' with args [ %s ]" script_name script (String.concat "; " args); + ignore (Forkhelpers.execute_command_get_output (Filename.concat script_dir script) args); + with + Forkhelpers.Spawn_internal_error (_,stdout,Unix.WEXITED i) (* i<>0 since that case does not generate exn *) -> + if i=exitcode_log_and_continue then + debug "Hook '%s/%s' with args [ %s ] logged '%s'" script_name script (String.concat "; " args) (String.escaped stdout) + else + raise (Api_errors.Server_error (Api_errors.xapi_hook_failed, [ script_name^"/"^script; reason; stdout; string_of_int i ]) + )) + scripts + +let execute_vm_hook ~__context ~reason ~vm = let vmuuid = Db.VM.get_uuid ~__context ~self:vm in execute_hook ~__context ~args:[ "-vmuuid"; vmuuid ] ~reason -let execute_host_hook ~__context ~reason ~host = +let execute_host_hook ~__context ~reason ~host = let uuid = Db.Host.get_uuid ~__context ~self:host in execute_hook ~__context ~args:[ "-hostuuid"; uuid ] ~reason let execute_pool_hook ~__context ~reason = execute_hook ~__context ~args:[] ~reason -let host_pre_declare_dead ~__context ~host ~reason = +let host_pre_declare_dead ~__context ~host ~reason = execute_host_hook ~__context ~script_name:scriptname__host_pre_declare_dead ~reason ~host (* Called when host died -- !! hook code in here to abort outstanding forwarded ops *) let internal_host_dead_hook __context host = - info "Running host dead hook for %s" (Ref.string_of host); - (* reverse lookup host from metrics id; don't have backedge here... *) - let forwarded_tasks = - let open Db_filter_types in - Db.Task.get_refs_where ~__context - ~expr:(Eq (Field "forwarded_to", Literal (Ref.string_of host))) - in - List.iter - (fun task -> - let resources = Locking_helpers.Thread_state.get_acquired_resources_by_task task in - List.iter Locking_helpers.kill_resource resources - ) forwarded_tasks - -let host_post_declare_dead ~__context ~host ~reason = + info "Running host dead hook for %s" (Ref.string_of host); + (* reverse lookup host from metrics id; don't have backedge here... *) + let forwarded_tasks = + let open Db_filter_types in + Db.Task.get_refs_where ~__context + ~expr:(Eq (Field "forwarded_to", Literal (Ref.string_of host))) + in + List.iter + (fun task -> + let resources = Locking_helpers.Thread_state.get_acquired_resources_by_task task in + List.iter Locking_helpers.kill_resource resources + ) forwarded_tasks + +let host_post_declare_dead ~__context ~host ~reason = (* Cancel outstanding tasks first-- should release necessary locks *) internal_host_dead_hook __context host; execute_host_hook ~__context ~script_name:scriptname__host_post_declare_dead ~reason ~host -let pool_ha_overcommitted_hook ~__context = +let pool_ha_overcommitted_hook ~__context = execute_pool_hook ~__context ~script_name:scriptname__pool_ha_overcommitted ~reason:reason__none -let pool_pre_ha_vm_restart_hook ~__context = +let pool_pre_ha_vm_restart_hook ~__context = execute_pool_hook ~__context ~script_name:scriptname__pool_pre_ha_vm_restart ~reason:reason__none let pool_join_hook ~__context = diff --git a/ocaml/xapi/xapi_host.ml b/ocaml/xapi/xapi_host.ml index 75888284e80..7623d4170e0 100644 --- a/ocaml/xapi/xapi_host.ml +++ b/ocaml/xapi/xapi_host.ml @@ -45,127 +45,127 @@ let set_power_on_mode ~__context ~self ~power_on_mode ~power_on_config = Xapi_host_helpers.update_allowed_operations ~__context ~self (** Before we re-enable this host we make sure it's safe to do so. It isn't if: - + we're in the middle of an HA shutdown/reboot and have our fencing temporarily disabled. - + xapi hasn't properly started up yet. - + HA is enabled and this host has broken storage or networking which would cause protected VMs - to become non-agile - *) + + we're in the middle of an HA shutdown/reboot and have our fencing temporarily disabled. + + xapi hasn't properly started up yet. + + HA is enabled and this host has broken storage or networking which would cause protected VMs + to become non-agile +*) let assert_safe_to_reenable ~__context ~self = - assert_startup_complete (); - let host_disabled_until_reboot = try bool_of_string (Localdb.get Constants.host_disabled_until_reboot) with _ -> false in - if host_disabled_until_reboot - then raise (Api_errors.Server_error(Api_errors.host_disabled_until_reboot, [])); - if Db.Pool.get_ha_enabled ~__context ~self:(Helpers.get_pool ~__context) then begin - let pbds = Db.Host.get_PBDs ~__context ~self in - let unplugged_pbds = List.filter (fun pbd -> not(Db.PBD.get_currently_attached ~__context ~self:pbd)) pbds in - (* Make sure it is 'ok' to have these PBDs remain unplugged *) - List.iter (fun self -> Xapi_pbd.abort_if_storage_attached_to_protected_vms ~__context ~self) unplugged_pbds; - let pifs = Db.Host.get_PIFs ~__context ~self in - let unplugged_pifs = List.filter (fun pif -> not(Db.PIF.get_currently_attached ~__context ~self:pif)) pifs in - (* Make sure it is 'ok' to have these PIFs remain unplugged *) - List.iter (fun self -> Xapi_pif.abort_if_network_attached_to_protected_vms ~__context ~self) unplugged_pifs; - end + assert_startup_complete (); + let host_disabled_until_reboot = try bool_of_string (Localdb.get Constants.host_disabled_until_reboot) with _ -> false in + if host_disabled_until_reboot + then raise (Api_errors.Server_error(Api_errors.host_disabled_until_reboot, [])); + if Db.Pool.get_ha_enabled ~__context ~self:(Helpers.get_pool ~__context) then begin + let pbds = Db.Host.get_PBDs ~__context ~self in + let unplugged_pbds = List.filter (fun pbd -> not(Db.PBD.get_currently_attached ~__context ~self:pbd)) pbds in + (* Make sure it is 'ok' to have these PBDs remain unplugged *) + List.iter (fun self -> Xapi_pbd.abort_if_storage_attached_to_protected_vms ~__context ~self) unplugged_pbds; + let pifs = Db.Host.get_PIFs ~__context ~self in + let unplugged_pifs = List.filter (fun pif -> not(Db.PIF.get_currently_attached ~__context ~self:pif)) pifs in + (* Make sure it is 'ok' to have these PIFs remain unplugged *) + List.iter (fun self -> Xapi_pif.abort_if_network_attached_to_protected_vms ~__context ~self) unplugged_pifs; + end let xen_bugtool = "/usr/sbin/xen-bugtool" let bugreport_upload ~__context ~host ~url ~options = let proxy = - if List.mem_assoc "http_proxy" options - then List.assoc "http_proxy" options - else try Unix.getenv "http_proxy" with _ -> "" in + if List.mem_assoc "http_proxy" options + then List.assoc "http_proxy" options + else try Unix.getenv "http_proxy" with _ -> "" in let cmd = Printf.sprintf "%s %s %s" !Xapi_globs.host_bugreport_upload url proxy in try - let stdout, stderr = Forkhelpers.execute_command_get_output !Xapi_globs.host_bugreport_upload [ url; proxy ] in - debug "%s succeeded with stdout=[%s] stderr=[%s]" cmd stdout stderr + let stdout, stderr = Forkhelpers.execute_command_get_output !Xapi_globs.host_bugreport_upload [ url; proxy ] in + debug "%s succeeded with stdout=[%s] stderr=[%s]" cmd stdout stderr with Forkhelpers.Spawn_internal_error(stderr, stdout, status) as e -> - debug "%s failed with stdout=[%s] stderr=[%s]" cmd stdout stderr; - (* Attempt to interpret curl's exit code (from curl(1)) *) - begin match status with - | Unix.WEXITED (1 | 3 | 4) -> - failwith "URL not recognised" - | Unix.WEXITED (5 | 6) -> - failwith "Failed to resolve proxy or host" - | Unix.WEXITED 7 -> - failwith "Failed to connect to host" - | Unix.WEXITED 9 -> - failwith "FTP access denied" - | _ -> raise e - end + debug "%s failed with stdout=[%s] stderr=[%s]" cmd stdout stderr; + (* Attempt to interpret curl's exit code (from curl(1)) *) + begin match status with + | Unix.WEXITED (1 | 3 | 4) -> + failwith "URL not recognised" + | Unix.WEXITED (5 | 6) -> + failwith "Failed to resolve proxy or host" + | Unix.WEXITED 7 -> + failwith "Failed to connect to host" + | Unix.WEXITED 9 -> + failwith "FTP access denied" + | _ -> raise e + end (** Check that a) there are no running VMs present on the host, b) there are no VBDs currently - attached to dom0, c) host is disabled. + attached to dom0, c) host is disabled. - This is approximately maintainance mode as defined by the gui. However, since - we haven't agreed on an exact definition of this mode, we'll not call this maintainance mode here, but we'll - use a synonym. According to http://thesaurus.com/browse/maintenance, bacon is a synonym - for maintainance, hence the name of the following function. + This is approximately maintainance mode as defined by the gui. However, since + we haven't agreed on an exact definition of this mode, we'll not call this maintainance mode here, but we'll + use a synonym. According to http://thesaurus.com/browse/maintenance, bacon is a synonym + for maintainance, hence the name of the following function. *) let assert_bacon_mode ~__context ~host = - if Db.Host.get_enabled ~__context ~self:host - then raise (Api_errors.Server_error (Api_errors.host_not_disabled, [])); - - let selfref = Ref.string_of host in - let vms = Db.VM.get_refs_where ~__context ~expr:(And(Eq (Field "resident_on", Literal (Ref.string_of host)), - Eq (Field "power_state", Literal "Running"))) in - (* We always expect a control domain to be resident on a host *) - (match List.filter (fun vm -> not (Db.VM.get_is_control_domain ~__context ~self:vm)) vms with - | [] -> () - | guest_vms -> - let vm_data = [selfref; "vm"; Ref.string_of (List.hd guest_vms)] in - raise (Api_errors.Server_error (Api_errors.host_in_use, vm_data))); - debug "Bacon test: VMs OK - %d running VMs" (List.length vms); - let control_domain_vbds = - List.filter (fun vm -> - Db.VM.get_resident_on ~__context ~self:vm = host - && Db.VM.get_is_control_domain ~__context ~self:vm - ) (Db.VM.get_all ~__context) - |> List.map (fun self -> Db.VM.get_VBDs ~__context ~self) - |> List.flatten - |> List.filter (fun self -> Db.VBD.get_currently_attached ~__context ~self) in - if List.length control_domain_vbds > 0 then - raise (Api_errors.Server_error ( - Api_errors.host_in_use, - [ selfref; "vbd"; List.hd (List.map Ref.string_of control_domain_vbds) ] - )); - debug "Bacon test: VBDs OK" + if Db.Host.get_enabled ~__context ~self:host + then raise (Api_errors.Server_error (Api_errors.host_not_disabled, [])); + + let selfref = Ref.string_of host in + let vms = Db.VM.get_refs_where ~__context ~expr:(And(Eq (Field "resident_on", Literal (Ref.string_of host)), + Eq (Field "power_state", Literal "Running"))) in + (* We always expect a control domain to be resident on a host *) + (match List.filter (fun vm -> not (Db.VM.get_is_control_domain ~__context ~self:vm)) vms with + | [] -> () + | guest_vms -> + let vm_data = [selfref; "vm"; Ref.string_of (List.hd guest_vms)] in + raise (Api_errors.Server_error (Api_errors.host_in_use, vm_data))); + debug "Bacon test: VMs OK - %d running VMs" (List.length vms); + let control_domain_vbds = + List.filter (fun vm -> + Db.VM.get_resident_on ~__context ~self:vm = host + && Db.VM.get_is_control_domain ~__context ~self:vm + ) (Db.VM.get_all ~__context) + |> List.map (fun self -> Db.VM.get_VBDs ~__context ~self) + |> List.flatten + |> List.filter (fun self -> Db.VBD.get_currently_attached ~__context ~self) in + if List.length control_domain_vbds > 0 then + raise (Api_errors.Server_error ( + Api_errors.host_in_use, + [ selfref; "vbd"; List.hd (List.map Ref.string_of control_domain_vbds) ] + )); + debug "Bacon test: VBDs OK" let signal_networking_change ~__context = - Helpers.update_pif_addresses ~__context; - Xapi_mgmt_iface.on_dom0_networking_change ~__context + Helpers.update_pif_addresses ~__context; + Xapi_mgmt_iface.on_dom0_networking_change ~__context let signal_cdrom_event ~__context params = let find_vdi_name sr name = - let ret = ref None in - let vdis = Db.SR.get_VDIs ~__context ~self:sr in - List.iter (fun vdi -> - if Db.VDI.get_location ~__context ~self:vdi = name then ret := Some vdi - ) vdis; - !ret - in + let ret = ref None in + let vdis = Db.SR.get_VDIs ~__context ~self:sr in + List.iter (fun vdi -> + if Db.VDI.get_location ~__context ~self:vdi = name then ret := Some vdi + ) vdis; + !ret + in let find_vdis name = - let srs = List.filter (fun sr -> - let ty = Db.SR.get_type ~__context ~self:sr in ty = "local" || ty = "udev" - ) (Db.SR.get_all ~__context) in - List.fold_left (fun acc o -> match o with Some x -> x :: acc | None -> acc) [] - (List.map (fun sr -> find_vdi_name sr name) srs) - in + let srs = List.filter (fun sr -> + let ty = Db.SR.get_type ~__context ~self:sr in ty = "local" || ty = "udev" + ) (Db.SR.get_all ~__context) in + List.fold_left (fun acc o -> match o with Some x -> x :: acc | None -> acc) [] + (List.map (fun sr -> find_vdi_name sr name) srs) + in let insert dev = - let vdis = find_vdis dev in - if List.length vdis = 1 then ( - let vdi = List.hd vdis in - debug "cdrom inserted notification in vdi %s" (Ref.string_of vdi); - let vbds = Db.VDI.get_VBDs ~__context ~self:vdi in - List.iter (fun vbd -> Xapi_xenops.vbd_insert ~__context ~self:vbd ~vdi) vbds - ) else - () - in + let vdis = find_vdis dev in + if List.length vdis = 1 then ( + let vdi = List.hd vdis in + debug "cdrom inserted notification in vdi %s" (Ref.string_of vdi); + let vbds = Db.VDI.get_VBDs ~__context ~self:vdi in + List.iter (fun vbd -> Xapi_xenops.vbd_insert ~__context ~self:vbd ~vdi) vbds + ) else + () + in try - match String.split ':' params with - | ["inserted";dev] -> insert dev - | "ejected"::_ -> () - | _ -> () + match String.split ':' params with + | ["inserted";dev] -> insert dev + | "ejected"::_ -> () + | _ -> () with _ -> - () + () let notify ~__context ~ty ~params = match ty with @@ -183,106 +183,106 @@ type per_vm_plan = let string_of_per_vm_plan p = match p with - | Migrate h -> - Ref.string_of h - | Error (e, t) -> - String.concat "," (e :: t) + | Migrate h -> + Ref.string_of h + | Error (e, t) -> + String.concat "," (e :: t) (** Return a table mapping VMs to 'per_vm_plan' types indicating either a target - Host or a reason why the VM cannot be migrated. *) + Host or a reason why the VM cannot be migrated. *) let compute_evacuation_plan_no_wlb ~__context ~host = - let all_hosts = Db.Host.get_all ~__context in - let enabled_hosts = List.filter (fun self -> Db.Host.get_enabled ~__context ~self) all_hosts in - (* Only consider migrating to other enabled hosts (not this one obviously) *) - let target_hosts = List.filter (fun self -> self <> host) enabled_hosts in - - (* PR-1007: During a rolling pool upgrade, we are only allowed to - migrate VMs to hosts that have the same or higher version as - the source host. So as long as host versions aren't decreasing, - we're allowed to migrate VMs between hosts. *) - debug "evacuating host version: %s" - (Helpers.version_string_of ~__context (Helpers.LocalObject host)); - let target_hosts = List.filter - (fun target -> - debug "host %s version: %s" - (Db.Host.get_hostname ~__context ~self:target) - (Helpers.version_string_of ~__context (Helpers.LocalObject target)) ; - Helpers.host_versions_not_decreasing ~__context - ~host_from:(Helpers.LocalObject host) - ~host_to:(Helpers.LocalObject target)) - target_hosts - in - debug "evacuation target hosts are [%s]" - (String.concat "; " (List.map (fun h -> Db.Host.get_hostname ~__context ~self:h) target_hosts)) ; - - let all_vms = Db.Host.get_resident_VMs ~__context ~self:host in - let all_vms = List.map (fun self -> self, Db.VM.get_record ~__context ~self) all_vms in - let all_user_vms = List.filter (fun (_, record) -> not record.API.vM_is_control_domain) all_vms in - - let plans = Hashtbl.create 10 in - - if target_hosts = [] - then - begin - List.iter (fun (vm, _) -> - Hashtbl.add plans vm (Error (Api_errors.no_hosts_available, [ Ref.string_of vm ]))) - all_user_vms ; - plans - end - else - begin - - (* If HA is enabled we require that non-protected VMs are suspended. This gives us the property that - the result obtained by executing the evacuation plan and disabling the host looks the same (from the HA - planner's PoV) to the result obtained following a host failure and VM restart. *) - let pool = Helpers.get_pool ~__context in - let protected_vms, unprotected_vms = - if Db.Pool.get_ha_enabled ~__context ~self:pool - then List.partition (fun (vm, record) -> - Helpers.vm_should_always_run record.API.vM_ha_always_run record.API.vM_ha_restart_priority) - all_user_vms - else all_user_vms, [] in - List.iter (fun (vm, _) -> - Hashtbl.replace plans vm (Error (Api_errors.host_not_enough_free_memory, [ Ref.string_of vm ]))) - unprotected_vms; - let migratable_vms, unmigratable_vms = List.partition (fun (vm, record) -> - begin - try - List.iter (fun host -> - Xapi_vm_helpers.assert_can_boot_here ~__context ~self:vm ~host ~snapshot:record - ~do_memory_check:false ()) - target_hosts; - true - with (Api_errors.Server_error (code, params)) -> Hashtbl.replace plans vm (Error (code, params)); false - end) protected_vms in - - (* Check for impediments before attempting to perform pool_migrate *) - List.iter - (fun (vm, _) -> - match Xapi_vm_lifecycle.get_operation_error ~__context ~self:vm ~op:`pool_migrate ~strict:true with - | None -> () - | Some (a,b) -> Hashtbl.replace plans vm (Error ( a, b)) - )all_user_vms; - - (* Compute the binpack which takes only memory size into account. We will check afterwards for storage - and network availability. *) - let plan = Xapi_ha_vm_failover.compute_evacuation_plan ~__context (List.length all_hosts) target_hosts migratable_vms in - (* Check if the plan was actually complete: if some VMs are missing it means there wasn't enough memory *) - let vms_handled = List.map fst plan in - let vms_missing = List.filter (fun x -> not(List.mem x vms_handled)) (List.map fst protected_vms) in - List.iter (fun vm -> Hashtbl.add plans vm (Error (Api_errors.host_not_enough_free_memory, [ Ref.string_of vm ]))) vms_missing; - - (* Now for each VM we did place, verify storage and network visibility. *) - List.iter (fun (vm, host) -> - let snapshot = List.assoc vm all_vms in - begin - try Xapi_vm_helpers.assert_can_boot_here ~__context ~self:vm ~host ~snapshot ~do_memory_check:false () - with (Api_errors.Server_error (code, params)) -> Hashtbl.replace plans vm (Error (code, params)) - end; - if not(Hashtbl.mem plans vm) then Hashtbl.add plans vm (Migrate host) - ) plan; - plans - end + let all_hosts = Db.Host.get_all ~__context in + let enabled_hosts = List.filter (fun self -> Db.Host.get_enabled ~__context ~self) all_hosts in + (* Only consider migrating to other enabled hosts (not this one obviously) *) + let target_hosts = List.filter (fun self -> self <> host) enabled_hosts in + + (* PR-1007: During a rolling pool upgrade, we are only allowed to + migrate VMs to hosts that have the same or higher version as + the source host. So as long as host versions aren't decreasing, + we're allowed to migrate VMs between hosts. *) + debug "evacuating host version: %s" + (Helpers.version_string_of ~__context (Helpers.LocalObject host)); + let target_hosts = List.filter + (fun target -> + debug "host %s version: %s" + (Db.Host.get_hostname ~__context ~self:target) + (Helpers.version_string_of ~__context (Helpers.LocalObject target)) ; + Helpers.host_versions_not_decreasing ~__context + ~host_from:(Helpers.LocalObject host) + ~host_to:(Helpers.LocalObject target)) + target_hosts + in + debug "evacuation target hosts are [%s]" + (String.concat "; " (List.map (fun h -> Db.Host.get_hostname ~__context ~self:h) target_hosts)) ; + + let all_vms = Db.Host.get_resident_VMs ~__context ~self:host in + let all_vms = List.map (fun self -> self, Db.VM.get_record ~__context ~self) all_vms in + let all_user_vms = List.filter (fun (_, record) -> not record.API.vM_is_control_domain) all_vms in + + let plans = Hashtbl.create 10 in + + if target_hosts = [] + then + begin + List.iter (fun (vm, _) -> + Hashtbl.add plans vm (Error (Api_errors.no_hosts_available, [ Ref.string_of vm ]))) + all_user_vms ; + plans + end + else + begin + + (* If HA is enabled we require that non-protected VMs are suspended. This gives us the property that + the result obtained by executing the evacuation plan and disabling the host looks the same (from the HA + planner's PoV) to the result obtained following a host failure and VM restart. *) + let pool = Helpers.get_pool ~__context in + let protected_vms, unprotected_vms = + if Db.Pool.get_ha_enabled ~__context ~self:pool + then List.partition (fun (vm, record) -> + Helpers.vm_should_always_run record.API.vM_ha_always_run record.API.vM_ha_restart_priority) + all_user_vms + else all_user_vms, [] in + List.iter (fun (vm, _) -> + Hashtbl.replace plans vm (Error (Api_errors.host_not_enough_free_memory, [ Ref.string_of vm ]))) + unprotected_vms; + let migratable_vms, unmigratable_vms = List.partition (fun (vm, record) -> + begin + try + List.iter (fun host -> + Xapi_vm_helpers.assert_can_boot_here ~__context ~self:vm ~host ~snapshot:record + ~do_memory_check:false ()) + target_hosts; + true + with (Api_errors.Server_error (code, params)) -> Hashtbl.replace plans vm (Error (code, params)); false + end) protected_vms in + + (* Check for impediments before attempting to perform pool_migrate *) + List.iter + (fun (vm, _) -> + match Xapi_vm_lifecycle.get_operation_error ~__context ~self:vm ~op:`pool_migrate ~strict:true with + | None -> () + | Some (a,b) -> Hashtbl.replace plans vm (Error ( a, b)) + )all_user_vms; + + (* Compute the binpack which takes only memory size into account. We will check afterwards for storage + and network availability. *) + let plan = Xapi_ha_vm_failover.compute_evacuation_plan ~__context (List.length all_hosts) target_hosts migratable_vms in + (* Check if the plan was actually complete: if some VMs are missing it means there wasn't enough memory *) + let vms_handled = List.map fst plan in + let vms_missing = List.filter (fun x -> not(List.mem x vms_handled)) (List.map fst protected_vms) in + List.iter (fun vm -> Hashtbl.add plans vm (Error (Api_errors.host_not_enough_free_memory, [ Ref.string_of vm ]))) vms_missing; + + (* Now for each VM we did place, verify storage and network visibility. *) + List.iter (fun (vm, host) -> + let snapshot = List.assoc vm all_vms in + begin + try Xapi_vm_helpers.assert_can_boot_here ~__context ~self:vm ~host ~snapshot ~do_memory_check:false () + with (Api_errors.Server_error (code, params)) -> Hashtbl.replace plans vm (Error (code, params)) + end; + if not(Hashtbl.mem plans vm) then Hashtbl.add plans vm (Migrate host) + ) plan; + plans + end (* Old Miami style function with the strange error encoding *) let assert_can_evacuate ~__context ~host = @@ -299,17 +299,17 @@ let get_vms_which_prevent_evacuation ~__context ~self = let compute_evacuation_plan_wlb ~__context ~self = (* We treat xapi as primary when it comes to "hard" errors, i.e. those that aren't down to memory constraints. These are things like - VM_REQUIRES_SR or VM_LACKS_FEATURE_SUSPEND. + VM_REQUIRES_SR or VM_LACKS_FEATURE_SUSPEND. - We treat WLB as primary when it comes to placement of things that can actually move. WLB will return a list of migrations to perform, - and we pass those on. WLB will only return a partial set of migrations -- if there's not enough memory available, or if the VM can't - move, then it will simply omit that from the results. + We treat WLB as primary when it comes to placement of things that can actually move. WLB will return a list of migrations to perform, + and we pass those on. WLB will only return a partial set of migrations -- if there's not enough memory available, or if the VM can't + move, then it will simply omit that from the results. - So the algorithm is: - Record all the recommendations made by WLB. - Record all the non-memory errors from compute_evacuation_plan_no_wlb. These might overwrite recommendations by WLB, which is the - right thing to do because WLB doesn't know about all the HA corner cases (for example), but xapi does. - If there are any VMs left over, record them as HOST_NOT_ENOUGH_FREE_MEMORY, because we assume that WLB thinks they don't fit. + So the algorithm is: + Record all the recommendations made by WLB. + Record all the non-memory errors from compute_evacuation_plan_no_wlb. These might overwrite recommendations by WLB, which is the + right thing to do because WLB doesn't know about all the HA corner cases (for example), but xapi does. + If there are any VMs left over, record them as HOST_NOT_ENOUGH_FREE_MEMORY, because we assume that WLB thinks they don't fit. *) let error_vms = compute_evacuation_plan_no_wlb ~__context ~host:self in @@ -317,142 +317,142 @@ let compute_evacuation_plan_wlb ~__context ~self = let recs = Hashtbl.create 31 in List.iter (fun (v, detail) -> - debug "WLB recommends VM evacuation: %s to %s" (Db.VM.get_name_label ~__context ~self:v) (String.concat "," detail); - - (* Sanity check - Note: if the vm being moved is dom0 then this is a power management rec and this check does not apply - *) - let resident_h = (Db.VM.get_resident_on ~__context ~self:v) in - let target_uuid = List.hd (List.tl detail) in - let target_host = Db.Host.get_by_uuid ~__context ~uuid:target_uuid in - if Db.Host.get_control_domain ~__context ~self:target_host != v && Db.Host.get_uuid ~__context ~self:resident_h = target_uuid - then - (* resident host and migration host are the same. Reject this plan *) - raise (Api_errors.Server_error - (Api_errors.wlb_malformed_response, - [Printf.sprintf "WLB recommends migrating VM %s to the same server it is being evacuated from." - (Db.VM.get_name_label ~__context ~self:v)])); - - match detail with - | ["WLB"; host_uuid; _] -> - Hashtbl.replace recs v (Migrate (Db.Host.get_by_uuid ~__context ~uuid:host_uuid)) - | _ -> - raise (Api_errors.Server_error - (Api_errors.wlb_malformed_response, ["WLB gave malformed details for VM evacuation."]))) vm_recoms; + debug "WLB recommends VM evacuation: %s to %s" (Db.VM.get_name_label ~__context ~self:v) (String.concat "," detail); + + (* Sanity check + Note: if the vm being moved is dom0 then this is a power management rec and this check does not apply + *) + let resident_h = (Db.VM.get_resident_on ~__context ~self:v) in + let target_uuid = List.hd (List.tl detail) in + let target_host = Db.Host.get_by_uuid ~__context ~uuid:target_uuid in + if Db.Host.get_control_domain ~__context ~self:target_host != v && Db.Host.get_uuid ~__context ~self:resident_h = target_uuid + then + (* resident host and migration host are the same. Reject this plan *) + raise (Api_errors.Server_error + (Api_errors.wlb_malformed_response, + [Printf.sprintf "WLB recommends migrating VM %s to the same server it is being evacuated from." + (Db.VM.get_name_label ~__context ~self:v)])); + + match detail with + | ["WLB"; host_uuid; _] -> + Hashtbl.replace recs v (Migrate (Db.Host.get_by_uuid ~__context ~uuid:host_uuid)) + | _ -> + raise (Api_errors.Server_error + (Api_errors.wlb_malformed_response, ["WLB gave malformed details for VM evacuation."]))) vm_recoms; Hashtbl.iter (fun v detail -> - match detail with - | (Migrate _) -> - (* Skip migrations -- WLB is providing these *) - () - | (Error (e, _)) when e = Api_errors.host_not_enough_free_memory -> - (* Skip errors down to free memory -- we're letting WLB decide this *) - () - | (Error _) as p -> - debug "VM preventing evacuation: %s because %s" (Db.VM.get_name_label ~__context ~self:v) (string_of_per_vm_plan p); - Hashtbl.replace recs v detail) error_vms; + match detail with + | (Migrate _) -> + (* Skip migrations -- WLB is providing these *) + () + | (Error (e, _)) when e = Api_errors.host_not_enough_free_memory -> + (* Skip errors down to free memory -- we're letting WLB decide this *) + () + | (Error _) as p -> + debug "VM preventing evacuation: %s because %s" (Db.VM.get_name_label ~__context ~self:v) (string_of_per_vm_plan p); + Hashtbl.replace recs v detail) error_vms; let resident_vms = - List.filter (fun v -> (not (Db.VM.get_is_control_domain ~__context ~self:v)) && (not (Db.VM.get_is_a_template ~__context ~self:v))) - (Db.Host.get_resident_VMs ~__context ~self) in + List.filter (fun v -> (not (Db.VM.get_is_control_domain ~__context ~self:v)) && (not (Db.VM.get_is_a_template ~__context ~self:v))) + (Db.Host.get_resident_VMs ~__context ~self) in List.iter (fun vm -> - if not (Hashtbl.mem recs vm) then - (* Anything for which we don't have a recommendation from WLB, but which is agile, we treat as "not enough memory" *) - Hashtbl.replace recs vm (Error (Api_errors.host_not_enough_free_memory, [Ref.string_of vm]))) resident_vms; + if not (Hashtbl.mem recs vm) then + (* Anything for which we don't have a recommendation from WLB, but which is agile, we treat as "not enough memory" *) + Hashtbl.replace recs vm (Error (Api_errors.host_not_enough_free_memory, [Ref.string_of vm]))) resident_vms; Hashtbl.iter (fun vm detail -> - debug "compute_evacuation_plan_wlb: Key: %s Value %s" (Db.VM.get_name_label ~__context ~self:vm) (string_of_per_vm_plan detail)) recs; + debug "compute_evacuation_plan_wlb: Key: %s Value %s" (Db.VM.get_name_label ~__context ~self:vm) (string_of_per_vm_plan detail)) recs; recs let compute_evacuation_plan ~__context ~host = let oc = Db.Pool.get_other_config ~__context ~self:(Helpers.get_pool ~__context) in if ((List.exists (fun (k,v) -> k = "wlb_choose_host_disable" && (String.lowercase v = "true")) oc) - || not (Workload_balancing.check_wlb_enabled ~__context)) + || not (Workload_balancing.check_wlb_enabled ~__context)) then - begin - debug "Using wlb recommendations for choosing a host has been disabled or wlb is not available. Using original algorithm"; - compute_evacuation_plan_no_wlb ~__context ~host - end + begin + debug "Using wlb recommendations for choosing a host has been disabled or wlb is not available. Using original algorithm"; + compute_evacuation_plan_no_wlb ~__context ~host + end else - try - debug "Using WLB recommendations for host evacuation."; - compute_evacuation_plan_wlb ~__context ~self:host - with - | Api_errors.Server_error(error_type, error_detail) -> - debug "Encountered error when using wlb for choosing host \"%s: %s\". Using original algorithm" error_type (String.concat "" error_detail); - (try - let uuid = Db.Host.get_uuid ~__context ~self:host in - let message_body = - Printf.sprintf "Wlb consultation for Host '%s' failed (pool uuid: %s)" - (Db.Host.get_name_label ~__context ~self:host) - (Db.Pool.get_uuid ~__context ~self:(Helpers.get_pool ~__context)) - in - let (name, priority) = Api_messages.wlb_failed in - ignore(Xapi_message.create ~__context ~name ~priority ~cls:`Host ~obj_uuid:uuid ~body:message_body) - with _ -> ()); - compute_evacuation_plan_no_wlb ~__context ~host - | _ -> - debug "Encountered an unknown error when using wlb for choosing host. Using original algorithm"; - compute_evacuation_plan_no_wlb ~__context ~host + try + debug "Using WLB recommendations for host evacuation."; + compute_evacuation_plan_wlb ~__context ~self:host + with + | Api_errors.Server_error(error_type, error_detail) -> + debug "Encountered error when using wlb for choosing host \"%s: %s\". Using original algorithm" error_type (String.concat "" error_detail); + (try + let uuid = Db.Host.get_uuid ~__context ~self:host in + let message_body = + Printf.sprintf "Wlb consultation for Host '%s' failed (pool uuid: %s)" + (Db.Host.get_name_label ~__context ~self:host) + (Db.Pool.get_uuid ~__context ~self:(Helpers.get_pool ~__context)) + in + let (name, priority) = Api_messages.wlb_failed in + ignore(Xapi_message.create ~__context ~name ~priority ~cls:`Host ~obj_uuid:uuid ~body:message_body) + with _ -> ()); + compute_evacuation_plan_no_wlb ~__context ~host + | _ -> + debug "Encountered an unknown error when using wlb for choosing host. Using original algorithm"; + compute_evacuation_plan_no_wlb ~__context ~host let evacuate ~__context ~host = - let task = Context.get_task_id __context in - begin - let plans = compute_evacuation_plan ~__context ~host in - (* Check there are no errors in this list *) - Hashtbl.iter (fun vm plan -> - match plan with - | Error(code, params) -> raise (Api_errors.Server_error(code, params)) - | _ -> ()) - plans; - - (* Do it *) - let individual_progress = 1.0 /. float (Hashtbl.length plans) in - let migrate_vm vm plan = match plan with - | Migrate host -> - (try - Helpers.call_api_functions ~__context - (fun rpc session_id -> Client.Client.VM.pool_migrate - ~rpc ~session_id ~vm ~host ~options:[ "live", "true" ]) - with - |Api_errors.Server_error(code, params) when code = Api_errors.vm_bad_power_state -> () - | e -> raise e - ); - let progress = Db.Task.get_progress ~__context ~self:task in - TaskHelper.set_progress ~__context (progress +. individual_progress) - | Error(code, params) -> (* should never happen *) - raise (Api_errors.Server_error(code, params)) - in - let () = Hashtbl.iter migrate_vm plans in - - (* Now check there are no VMs left *) - let vms = Db.Host.get_resident_VMs ~__context ~self:host in - let vms = - List.filter - (fun vm -> - not (Db.VM.get_is_control_domain ~__context ~self:vm)) - vms - in - assert (List.length vms = 0) - end + let task = Context.get_task_id __context in + begin + let plans = compute_evacuation_plan ~__context ~host in + (* Check there are no errors in this list *) + Hashtbl.iter (fun vm plan -> + match plan with + | Error(code, params) -> raise (Api_errors.Server_error(code, params)) + | _ -> ()) + plans; + + (* Do it *) + let individual_progress = 1.0 /. float (Hashtbl.length plans) in + let migrate_vm vm plan = match plan with + | Migrate host -> + (try + Helpers.call_api_functions ~__context + (fun rpc session_id -> Client.Client.VM.pool_migrate + ~rpc ~session_id ~vm ~host ~options:[ "live", "true" ]) + with + |Api_errors.Server_error(code, params) when code = Api_errors.vm_bad_power_state -> () + | e -> raise e + ); + let progress = Db.Task.get_progress ~__context ~self:task in + TaskHelper.set_progress ~__context (progress +. individual_progress) + | Error(code, params) -> (* should never happen *) + raise (Api_errors.Server_error(code, params)) + in + let () = Hashtbl.iter migrate_vm plans in + + (* Now check there are no VMs left *) + let vms = Db.Host.get_resident_VMs ~__context ~self:host in + let vms = + List.filter + (fun vm -> + not (Db.VM.get_is_control_domain ~__context ~self:vm)) + vms + in + assert (List.length vms = 0) + end let retrieve_wlb_evacuate_recommendations ~__context ~self = let plans = compute_evacuation_plan_wlb ~__context ~self in Hashtbl.fold - (fun vm detail acc -> - let plan = match detail with - | Error (e, t) -> - e :: t - | Migrate h -> - ["WLB"; (Db.Host.get_uuid ~__context ~self:h)] - in - (vm, plan) :: acc) plans [] + (fun vm detail acc -> + let plan = match detail with + | Error (e, t) -> + e :: t + | Migrate h -> + ["WLB"; (Db.Host.get_uuid ~__context ~self:h)] + in + (vm, plan) :: acc) plans [] let restart_agent ~__context ~host = - let syslog_stdout = Forkhelpers.Syslog_WithKey ("Host.restart_agent") in - let pid = Forkhelpers.safe_close_and_exec None None None [] ~syslog_stdout !Xapi_globs.xe_toolstack_restart [] in - debug "Created process with pid: %d to perform xe-toolstack-restart" (Forkhelpers.getpid pid) + let syslog_stdout = Forkhelpers.Syslog_WithKey ("Host.restart_agent") in + let pid = Forkhelpers.safe_close_and_exec None None None [] ~syslog_stdout !Xapi_globs.xe_toolstack_restart [] in + debug "Created process with pid: %d to perform xe-toolstack-restart" (Forkhelpers.getpid pid) let shutdown_agent ~__context = debug "Host.restart_agent: Host agent will shutdown in 1s!!!!"; @@ -460,75 +460,75 @@ let shutdown_agent ~__context = let disable ~__context ~host = if Db.Host.get_enabled ~__context ~self:host then begin - info "Host.enabled: setting host %s (%s) to disabled because of user request" (Ref.string_of host) (Db.Host.get_hostname ~__context ~self:host); - Db.Host.set_enabled ~__context ~self:host ~value:false; - Xapi_host_helpers.user_requested_host_disable := true + info "Host.enabled: setting host %s (%s) to disabled because of user request" (Ref.string_of host) (Db.Host.get_hostname ~__context ~self:host); + Db.Host.set_enabled ~__context ~self:host ~value:false; + Xapi_host_helpers.user_requested_host_disable := true end let enable ~__context ~host = if not (Db.Host.get_enabled ~__context ~self:host) then begin - assert_safe_to_reenable ~__context ~self:host; - Xapi_host_helpers.user_requested_host_disable := false; - info "Host.enabled: setting host %s (%s) to enabled because of user request" (Ref.string_of host) (Db.Host.get_hostname ~__context ~self:host); - Db.Host.set_enabled ~__context ~self:host ~value:true; - (* Normally we schedule a plan recomputation when we successfully plug in our storage. In the case - when some of our storage was broken and required maintenance, we end up here, manually re-enabling - the host. If we're overcommitted then this might fix the problem. *) - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool && Db.Pool.get_ha_overcommitted ~__context ~self:pool - then Helpers.call_api_functions ~__context (fun rpc session_id -> Client.Client.Pool.ha_schedule_plan_recomputation rpc session_id) + assert_safe_to_reenable ~__context ~self:host; + Xapi_host_helpers.user_requested_host_disable := false; + info "Host.enabled: setting host %s (%s) to enabled because of user request" (Ref.string_of host) (Db.Host.get_hostname ~__context ~self:host); + Db.Host.set_enabled ~__context ~self:host ~value:true; + (* Normally we schedule a plan recomputation when we successfully plug in our storage. In the case + when some of our storage was broken and required maintenance, we end up here, manually re-enabling + the host. If we're overcommitted then this might fix the problem. *) + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool && Db.Pool.get_ha_overcommitted ~__context ~self:pool + then Helpers.call_api_functions ~__context (fun rpc session_id -> Client.Client.Pool.ha_schedule_plan_recomputation rpc session_id) end let shutdown_and_reboot_common ~__context ~host label description operation cmd = - if Db.Host.get_enabled ~__context ~self:host - then raise (Api_errors.Server_error (Api_errors.host_not_disabled, [])); + if Db.Host.get_enabled ~__context ~self:host + then raise (Api_errors.Server_error (Api_errors.host_not_disabled, [])); Xapi_ha.before_clean_shutdown_or_reboot ~__context ~host; Remote_requests.stop_request_thread(); - (* Push the Host RRD to the master. Note there are no VMs running here so we don't have to worry about them. *) - if not(Pool_role.is_master ()) - then log_and_ignore_exn ( fun () -> Rrdd.send_host_rrd_to_master ~master_address:(Pool_role.get_master_address () )); - (* Also save the Host RRD to local disk for us to pick up when we return. Note there are no VMs running at this point. *) - log_and_ignore_exn Rrdd.backup_rrds; + (* Push the Host RRD to the master. Note there are no VMs running here so we don't have to worry about them. *) + if not(Pool_role.is_master ()) + then log_and_ignore_exn ( fun () -> Rrdd.send_host_rrd_to_master ~master_address:(Pool_role.get_master_address () )); + (* Also save the Host RRD to local disk for us to pick up when we return. Note there are no VMs running at this point. *) + log_and_ignore_exn Rrdd.backup_rrds; (* This prevents anyone actually re-enabling us until after reboot *) Localdb.put Constants.host_disabled_until_reboot "true"; (* This helps us distinguish between an HA fence and a reboot *) Localdb.put Constants.host_restarted_cleanly "true"; (* This tells the master that the shutdown is still ongoing: it can be used to continue - masking other operations even after this call return. + masking other operations even after this call return. - If xapi restarts then this task will be reset by the startup code, which is unfortunate - but the host will stay disabled provided host_disabled_until_reboot is still set... so - safe but ugly. *) + If xapi restarts then this task will be reset by the startup code, which is unfortunate + but the host will stay disabled provided host_disabled_until_reboot is still set... so + safe but ugly. *) Server_helpers.exec_with_new_task ~subtask_of:(Context.get_task_id __context) ~task_description:description ~task_in_database:true label (fun __newcontext -> - Db.Host.add_to_current_operations ~__context ~self:host ~key:(Ref.string_of (Context.get_task_id __newcontext)) ~value:operation; - (* Do the shutdown in a background thread with a delay to give this API call - a reasonable chance of succeeding. *) - ignore(Thread.create (fun () -> - Thread.delay 10.; - ignore(Sys.command cmd)) ())) + Db.Host.add_to_current_operations ~__context ~self:host ~key:(Ref.string_of (Context.get_task_id __newcontext)) ~value:operation; + (* Do the shutdown in a background thread with a delay to give this API call + a reasonable chance of succeeding. *) + ignore(Thread.create (fun () -> + Thread.delay 10.; + ignore(Sys.command cmd)) ())) let shutdown ~__context ~host = shutdown_and_reboot_common ~__context ~host "Host is shutting down" "Host is shutting down" - `shutdown "/sbin/shutdown -h now" + `shutdown "/sbin/shutdown -h now" let reboot ~__context ~host = shutdown_and_reboot_common ~__context ~host "Host is rebooting" "Host is rebooting" - `shutdown "/sbin/shutdown -r now" + `shutdown "/sbin/shutdown -r now" let power_on ~__context ~host = let result = Xapi_plugins.call_plugin (Context.get_session_id __context) - Constants.power_on_plugin Constants.power_on_fn - [ "remote_host_uuid", Db.Host.get_uuid ~__context ~self:host ] in + Constants.power_on_plugin Constants.power_on_fn + [ "remote_host_uuid", Db.Host.get_uuid ~__context ~self:host ] in if result <> "True" then failwith (Printf.sprintf "The host failed to power on.") let dmesg ~__context ~host = - let dbg = Context.string_of_task __context in - let open Xapi_xenops_queue in - let module Client = (val make_client (default_xenopsd ()): XENOPS) in - Client.HOST.get_console_data dbg + let dbg = Context.string_of_task __context in + let open Xapi_xenops_queue in + let module Client = (val make_client (default_xenopsd ()): XENOPS) in + Client.HOST.get_console_data dbg let dmesg_clear ~__context ~host = raise (Api_errors.Server_error (Api_errors.not_implemented, [ "dmesg_clear" ])) @@ -537,10 +537,10 @@ let get_log ~__context ~host = raise (Api_errors.Server_error (Api_errors.not_implemented, [ "get_log" ])) let send_debug_keys ~__context ~host ~keys = - let open Xapi_xenops_queue in - let module Client = (val make_client (default_xenopsd ()): XENOPS) in - let dbg = Context.string_of_task __context in - Client.HOST.send_debug_keys dbg keys + let open Xapi_xenops_queue in + let module Client = (val make_client (default_xenopsd ()): XENOPS) in + let dbg = Context.string_of_task __context in + Client.HOST.send_debug_keys dbg keys let list_methods ~__context = raise (Api_errors.Server_error (Api_errors.not_implemented, [ "list_method" ])) @@ -550,36 +550,36 @@ let is_slave ~__context ~host = not (Pool_role.is_master ()) let ask_host_if_it_is_a_slave ~__context ~host = let local_fn = is_slave ~host in Message_forwarding.do_op_on_localsession_nolivecheck ~local_fn ~__context - ~host (fun session_id rpc -> Client.Client.Pool.is_slave rpc session_id host) + ~host (fun session_id rpc -> Client.Client.Pool.is_slave rpc session_id host) let is_host_alive ~__context ~host = (* If the host is marked as not-live then assume we don't need to contact it to verify *) let should_contact_host = - try - let hm = Db.Host.get_metrics ~__context ~self:host in - Db.Host_metrics.get_live ~__context ~self:hm - with _ -> - true in + try + let hm = Db.Host.get_metrics ~__context ~self:host in + Db.Host_metrics.get_live ~__context ~self:hm + with _ -> + true in if should_contact_host then begin - debug "is_host_alive host=%s is marked as live in the database; asking host to make sure" (Ref.string_of host); - try - ignore(ask_host_if_it_is_a_slave ~__context ~host); - true - with e -> - warn "is_host_alive host=%s caught %s while querying host liveness: assuming dead" - (Ref.string_of host) (ExnHelper.string_of_exn e); - false + debug "is_host_alive host=%s is marked as live in the database; asking host to make sure" (Ref.string_of host); + try + ignore(ask_host_if_it_is_a_slave ~__context ~host); + true + with e -> + warn "is_host_alive host=%s caught %s while querying host liveness: assuming dead" + (Ref.string_of host) (ExnHelper.string_of_exn e); + false end else begin - debug "is_host_alive host=%s is marked as dead in the database; treating this as definitive." (Ref.string_of host); - false + debug "is_host_alive host=%s is marked as dead in the database; treating this as definitive." (Ref.string_of host); + false end let create ~__context ~uuid ~name_label ~name_description ~hostname ~address ~external_auth_type ~external_auth_service_name ~external_auth_configuration ~license_params ~edition ~license_server ~local_cache_sr ~chipset_info ~ssl_legacy = let make_new_metrics_object ref = - Db.Host_metrics.create ~__context ~ref - ~uuid:(Uuid.to_string (Uuid.make_uuid ())) ~live:false - ~memory_total:0L ~memory_free:0L ~last_updated:Date.never ~other_config:[] in + Db.Host_metrics.create ~__context ~ref + ~uuid:(Uuid.to_string (Uuid.make_uuid ())) ~live:false + ~memory_total:0L ~memory_free:0L ~last_updated:Date.never ~other_config:[] in let name_description = "Default install" and host = Ref.make () in @@ -588,39 +588,39 @@ let create ~__context ~uuid ~name_label ~name_description ~hostname ~address ~ex let host_is_us = (uuid=(Helpers.get_localhost_uuid ())) in Db.Host.create ~__context ~ref:host - ~current_operations:[] ~allowed_operations:[] - ~software_version:(Xapi_globs.software_version ()) - ~enabled:false - ~aPI_version_major:Xapi_globs.api_version_major - ~aPI_version_minor:Xapi_globs.api_version_minor - ~aPI_version_vendor:Xapi_globs.api_version_vendor - ~aPI_version_vendor_implementation:Xapi_globs.api_version_vendor_implementation - ~name_description ~name_label ~uuid ~other_config:[] - ~capabilities:[] - ~cpu_configuration:[] (* !!! FIXME hard coding *) - ~cpu_info:[] - ~chipset_info - ~memory_overhead:0L - ~sched_policy:"credit" (* !!! FIXME hard coding *) - ~supported_bootloaders:(List.map fst Xapi_globs.supported_bootloaders) - ~suspend_image_sr:Ref.null ~crash_dump_sr:Ref.null - ~logging:[] ~hostname ~address ~metrics - ~license_params ~boot_free_mem:0L - ~ha_statefiles:[] ~ha_network_peers:[] ~blobs:[] ~tags:[] - ~external_auth_type - ~external_auth_service_name - ~external_auth_configuration - ~edition ~license_server - ~bios_strings:[] - ~power_on_mode:"" - ~power_on_config:[] - ~local_cache_sr - ~ssl_legacy - ~guest_VCPUs_params:[] - ~display:`enabled - ~virtual_hardware_platform_versions:(if host_is_us then Xapi_globs.host_virtual_hardware_platform_versions else [0L]) - ~control_domain:Ref.null - ~patches_requiring_reboot:[] + ~current_operations:[] ~allowed_operations:[] + ~software_version:(Xapi_globs.software_version ()) + ~enabled:false + ~aPI_version_major:Xapi_globs.api_version_major + ~aPI_version_minor:Xapi_globs.api_version_minor + ~aPI_version_vendor:Xapi_globs.api_version_vendor + ~aPI_version_vendor_implementation:Xapi_globs.api_version_vendor_implementation + ~name_description ~name_label ~uuid ~other_config:[] + ~capabilities:[] + ~cpu_configuration:[] (* !!! FIXME hard coding *) + ~cpu_info:[] + ~chipset_info + ~memory_overhead:0L + ~sched_policy:"credit" (* !!! FIXME hard coding *) + ~supported_bootloaders:(List.map fst Xapi_globs.supported_bootloaders) + ~suspend_image_sr:Ref.null ~crash_dump_sr:Ref.null + ~logging:[] ~hostname ~address ~metrics + ~license_params ~boot_free_mem:0L + ~ha_statefiles:[] ~ha_network_peers:[] ~blobs:[] ~tags:[] + ~external_auth_type + ~external_auth_service_name + ~external_auth_configuration + ~edition ~license_server + ~bios_strings:[] + ~power_on_mode:"" + ~power_on_config:[] + ~local_cache_sr + ~ssl_legacy + ~guest_VCPUs_params:[] + ~display:`enabled + ~virtual_hardware_platform_versions:(if host_is_us then Xapi_globs.host_virtual_hardware_platform_versions else [0L]) + ~control_domain:Ref.null + ~patches_requiring_reboot:[] ; (* If the host we're creating is us, make sure its set to live *) Db.Host_metrics.set_last_updated ~__context ~self:metrics ~value:(Date.of_float (Unix.gettimeofday ())); @@ -629,11 +629,11 @@ let create ~__context ~uuid ~name_label ~name_description ~hostname ~address ~ex let precheck_destroy_declare_dead ~__context ~self call = (* Fail if the host is still online: the user should either isolate the machine from the network - or use Pool.eject. *) + or use Pool.eject. *) let hostname = Db.Host.get_hostname ~__context ~self in if is_host_alive ~__context ~host:self then begin - error "Host.%s successfully contacted host %s; host is not offline; refusing to %s" call hostname call; - raise (Api_errors.Server_error(Api_errors.host_is_live, [ Ref.string_of self ])) + error "Host.%s successfully contacted host %s; host is not offline; refusing to %s" call hostname call; + raise (Api_errors.Server_error(Api_errors.host_is_live, [ Ref.string_of self ])) end; (* This check is probably redundant since the Pool master should always be 'alive': *) @@ -670,16 +670,16 @@ let destroy ~__context ~self = Pool_features.update_pool_features ~__context let declare_dead ~__context ~host = - precheck_destroy_declare_dead ~__context ~self:host "declare_dead"; - - let my_control_domains, my_regular_vms = get_resident_vms ~__context ~self:host in + precheck_destroy_declare_dead ~__context ~self:host "declare_dead"; - Helpers.call_api_functions ~__context (fun rpc session_id -> - List.iter (fun vm -> Client.Client.VM.power_state_reset rpc session_id vm) my_regular_vms); + let my_control_domains, my_regular_vms = get_resident_vms ~__context ~self:host in - Db.Host.set_enabled ~__context ~self:host ~value:false; + Helpers.call_api_functions ~__context (fun rpc session_id -> + List.iter (fun vm -> Client.Client.VM.power_state_reset rpc session_id vm) my_regular_vms); - Xapi_hooks.host_post_declare_dead ~__context ~host ~reason:Xapi_hooks.reason__user + Db.Host.set_enabled ~__context ~self:host ~value:false; + + Xapi_hooks.host_post_declare_dead ~__context ~host ~reason:Xapi_hooks.reason__user let ha_disable_failover_decisions ~__context ~host = Xapi_ha.ha_disable_failover_decisions __context host let ha_disarm_fencing ~__context ~host = Xapi_ha.ha_disarm_fencing __context host @@ -688,22 +688,22 @@ let ha_release_resources ~__context ~host = Xapi_ha.ha_release_resources __conte let ha_wait_for_shutdown_via_statefile ~__context ~host = Xapi_ha.ha_wait_for_shutdown_via_statefile __context host let ha_xapi_healthcheck ~__context = (* Consider checking the status of various internal tasks / tickling locks but for now assume - that, since we got here unharmed, all is well.*) + that, since we got here unharmed, all is well.*) not(Xapi_fist.fail_healthcheck ()) let preconfigure_ha ~__context ~host ~statefiles ~metadata_vdi ~generation = - Xapi_ha.preconfigure_host __context host statefiles metadata_vdi generation + Xapi_ha.preconfigure_host __context host statefiles metadata_vdi generation let ha_join_liveset ~__context ~host = try - Xapi_ha.join_liveset __context host + Xapi_ha.join_liveset __context host with | Xha_scripts.Xha_error Xha_errno.Mtc_exit_bootjoin_timeout -> - error "HA enable failed with BOOTJOIN_TIMEOUT"; - raise (Api_errors.Server_error(Api_errors.ha_failed_to_form_liveset, [])) + error "HA enable failed with BOOTJOIN_TIMEOUT"; + raise (Api_errors.Server_error(Api_errors.ha_failed_to_form_liveset, [])) | Xha_scripts.Xha_error Xha_errno.Mtc_exit_can_not_access_statefile -> - error "HA enable failed with CAN_NOT_ACCESS_STATEFILE"; - raise (Api_errors.Server_error(Api_errors.ha_host_cannot_access_statefile, [])) + error "HA enable failed with CAN_NOT_ACCESS_STATEFILE"; + raise (Api_errors.Server_error(Api_errors.ha_host_cannot_access_statefile, [])) let propose_new_master ~__context ~address ~manual = Xapi_ha.propose_new_master __context address manual let commit_new_master ~__context ~address = Xapi_ha.commit_new_master __context address @@ -723,8 +723,8 @@ let request_backup ~__context ~host ~generation ~force = then failwith "Forwarded to the wrong host"; if Pool_role.is_master () then begin debug "Requesting database backup on master: Using direct sync"; - let connections = Db_conn_store.read_db_connections () in - Db_cache_impl.sync connections (Db_ref.get_database (Db_backend.make ())) + let connections = Db_conn_store.read_db_connections () in + Db_cache_impl.sync connections (Db_ref.get_database (Db_backend.make ())) end else begin let master_address = Helpers.get_main_ip_address () in Pool_db_backup.fetch_database_backup ~master_address:master_address ~pool_secret:!Xapi_globs.pool_secret @@ -740,81 +740,81 @@ let request_config_file_sync ~__context ~host ~hash = (* Host parameter will just be me, as message forwarding layer ensures this call has been forwarded correctly *) let syslog_reconfigure ~__context ~host = - let localhost = Helpers.get_localhost ~__context in - let logging = Db.Host.get_logging ~__context ~self:localhost in - - let destination = try List.assoc "syslog_destination" logging with _ -> "" in - let flag = match destination with - | "" -> - "--noremote" - | _ -> - "--remote="^destination - in - - let args = [| !Xapi_globs.xe_syslog_reconfigure; flag |] in - ignore (Unixext.spawnvp args.(0) args) + let localhost = Helpers.get_localhost ~__context in + let logging = Db.Host.get_logging ~__context ~self:localhost in + + let destination = try List.assoc "syslog_destination" logging with _ -> "" in + let flag = match destination with + | "" -> + "--noremote" + | _ -> + "--remote="^destination + in + + let args = [| !Xapi_globs.xe_syslog_reconfigure; flag |] in + ignore (Unixext.spawnvp args.(0) args) let get_management_interface ~__context ~host = - let pifs = Db.PIF.get_refs_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of host)), - Eq (Field "management", Literal "true") - )) in - match pifs with - | [] -> - raise Not_found - | pif :: _ -> - pif + let pifs = Db.PIF.get_refs_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of host)), + Eq (Field "management", Literal "true") + )) in + match pifs with + | [] -> + raise Not_found + | pif :: _ -> + pif let change_management_interface ~__context interface primary_address_type = - debug "Changing management interface"; - Xapi_mgmt_iface.change interface primary_address_type; - Xapi_mgmt_iface.run ~__context ~mgmt_enabled:true; - (* once the inventory file has been rewritten to specify new interface, sync up db with - state of world.. *) - Xapi_mgmt_iface.on_dom0_networking_change ~__context + debug "Changing management interface"; + Xapi_mgmt_iface.change interface primary_address_type; + Xapi_mgmt_iface.run ~__context ~mgmt_enabled:true; + (* once the inventory file has been rewritten to specify new interface, sync up db with + state of world.. *) + Xapi_mgmt_iface.on_dom0_networking_change ~__context let local_management_reconfigure ~__context ~interface = (* Only let this one through if we are in emergency mode, otherwise use - Host.management_reconfigure *) + Host.management_reconfigure *) if not !Xapi_globs.slave_emergency_mode then raise (Api_errors.Server_error (Api_errors.pool_not_in_emergency_mode, [])); change_management_interface ~__context interface (Record_util.primary_address_type_of_string (Xapi_inventory.lookup Xapi_inventory._management_address_type ~default:"ipv4")) let management_reconfigure ~__context ~pif = - (* Disallow if HA is enabled *) - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool then - raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])); - - (* Plugging a bond slave is not allowed *) - if Db.PIF.get_bond_slave_of ~__context ~self:pif <> Ref.null then - raise (Api_errors.Server_error (Api_errors.cannot_plug_bond_slave, [Ref.string_of pif])); - - let net = Db.PIF.get_network ~__context ~self:pif in - let bridge = Db.Network.get_bridge ~__context ~self:net in - let primary_address_type = Db.PIF.get_primary_address_type ~__context ~self:pif in - if Db.PIF.get_managed ~__context ~self:pif = true then begin - if primary_address_type = `IPv4 && Db.PIF.get_ip_configuration_mode ~__context ~self:pif = `None then - raise (Api_errors.Server_error(Api_errors.pif_has_no_network_configuration, [])) - else if primary_address_type = `IPv6 && Db.PIF.get_ipv6_configuration_mode ~__context ~self:pif = `None then - raise (Api_errors.Server_error(Api_errors.pif_has_no_v6_network_configuration, [])) - else try - let mgmt_pif = get_management_interface ~__context ~host:(Helpers.get_localhost ~__context) in - let mgmt_address_type = Db.PIF.get_primary_address_type ~__context ~self:mgmt_pif in - if primary_address_type <> mgmt_address_type then - raise (Api_errors.Server_error(Api_errors.pif_incompatible_primary_address_type, [])); - with _ -> - () (* no current management interface *) - end; - - if Db.PIF.get_management ~__context ~self:pif then - debug "PIF %s is already marked as a management PIF; taking no action" (Ref.string_of pif) - else begin - Xapi_network.attach_internal ~management_interface:true ~__context ~self:net (); - change_management_interface ~__context bridge primary_address_type; - Xapi_pif.update_management_flags ~__context ~host:(Helpers.get_localhost ~__context) - end + (* Disallow if HA is enabled *) + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool then + raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])); + + (* Plugging a bond slave is not allowed *) + if Db.PIF.get_bond_slave_of ~__context ~self:pif <> Ref.null then + raise (Api_errors.Server_error (Api_errors.cannot_plug_bond_slave, [Ref.string_of pif])); + + let net = Db.PIF.get_network ~__context ~self:pif in + let bridge = Db.Network.get_bridge ~__context ~self:net in + let primary_address_type = Db.PIF.get_primary_address_type ~__context ~self:pif in + if Db.PIF.get_managed ~__context ~self:pif = true then begin + if primary_address_type = `IPv4 && Db.PIF.get_ip_configuration_mode ~__context ~self:pif = `None then + raise (Api_errors.Server_error(Api_errors.pif_has_no_network_configuration, [])) + else if primary_address_type = `IPv6 && Db.PIF.get_ipv6_configuration_mode ~__context ~self:pif = `None then + raise (Api_errors.Server_error(Api_errors.pif_has_no_v6_network_configuration, [])) + else try + let mgmt_pif = get_management_interface ~__context ~host:(Helpers.get_localhost ~__context) in + let mgmt_address_type = Db.PIF.get_primary_address_type ~__context ~self:mgmt_pif in + if primary_address_type <> mgmt_address_type then + raise (Api_errors.Server_error(Api_errors.pif_incompatible_primary_address_type, [])); + with _ -> + () (* no current management interface *) + end; + + if Db.PIF.get_management ~__context ~self:pif then + debug "PIF %s is already marked as a management PIF; taking no action" (Ref.string_of pif) + else begin + Xapi_network.attach_internal ~management_interface:true ~__context ~self:net (); + change_management_interface ~__context bridge primary_address_type; + Xapi_pif.update_management_flags ~__context ~host:(Helpers.get_localhost ~__context) + end let management_disable ~__context = (* Disallow if HA is enabled *) @@ -855,78 +855,78 @@ let serialize_host_enable_disable_extauth = Mutex.create() let set_hostname_live ~__context ~host ~hostname = Mutex.execute serialize_host_enable_disable_extauth (fun () -> - let current_auth_type = Db.Host.get_external_auth_type ~__context ~self:host in - (* the AD/Likewise extauth plugin is incompatible with a hostname change *) - (if current_auth_type = Extauth.auth_type_AD_Likewise then - let current_service_name = Db.Host.get_external_auth_service_name ~__context ~self:host in - raise (Api_errors.Server_error(Api_errors.auth_already_enabled, [current_auth_type;current_service_name])) - ); - (* hostname is valid if contains only alpha, decimals, and hyphen - (for hyphens, only in middle position) *) - let is_invalid_hostname hostname = - let len = String.length hostname in - let i = ref 0 in - let valid = ref true in - let range = [ 'a', 'z'; 'A', 'Z'; '0', '9'; '-', '-'; '.', '.' ] in - while !valid && (!i < len) - do - begin try - ignore (List.find (fun (r1, r2) -> r1 <= hostname.[!i] && hostname.[!i] <= r2) range) - with Not_found -> - valid := false - end; - incr i; - done; - if hostname.[0] = '-' || hostname.[len - 1] = '-' then - true - else - (not !valid) - in - if String.length hostname = 0 then - raise (Api_errors.Server_error (Api_errors.host_name_invalid, [ "hostname empty" ])); - if String.length hostname > 255 then - raise (Api_errors.Server_error (Api_errors.host_name_invalid, [ "hostname is too long" ])); - if is_invalid_hostname hostname then - raise (Api_errors.Server_error (Api_errors.host_name_invalid, [ "hostname contains invalid characters" ])); - ignore(Forkhelpers.execute_command_get_output !Xapi_globs.set_hostname [hostname]); - Debug.invalidate_hostname_cache (); - Db.Host.set_hostname ~__context ~self:host ~value:hostname; - Helpers.update_domain_zero_name ~__context host hostname - ) + let current_auth_type = Db.Host.get_external_auth_type ~__context ~self:host in + (* the AD/Likewise extauth plugin is incompatible with a hostname change *) + (if current_auth_type = Extauth.auth_type_AD_Likewise then + let current_service_name = Db.Host.get_external_auth_service_name ~__context ~self:host in + raise (Api_errors.Server_error(Api_errors.auth_already_enabled, [current_auth_type;current_service_name])) + ); + (* hostname is valid if contains only alpha, decimals, and hyphen + (for hyphens, only in middle position) *) + let is_invalid_hostname hostname = + let len = String.length hostname in + let i = ref 0 in + let valid = ref true in + let range = [ 'a', 'z'; 'A', 'Z'; '0', '9'; '-', '-'; '.', '.' ] in + while !valid && (!i < len) + do + begin try + ignore (List.find (fun (r1, r2) -> r1 <= hostname.[!i] && hostname.[!i] <= r2) range) + with Not_found -> + valid := false + end; + incr i; + done; + if hostname.[0] = '-' || hostname.[len - 1] = '-' then + true + else + (not !valid) + in + if String.length hostname = 0 then + raise (Api_errors.Server_error (Api_errors.host_name_invalid, [ "hostname empty" ])); + if String.length hostname > 255 then + raise (Api_errors.Server_error (Api_errors.host_name_invalid, [ "hostname is too long" ])); + if is_invalid_hostname hostname then + raise (Api_errors.Server_error (Api_errors.host_name_invalid, [ "hostname contains invalid characters" ])); + ignore(Forkhelpers.execute_command_get_output !Xapi_globs.set_hostname [hostname]); + Debug.invalidate_hostname_cache (); + Db.Host.set_hostname ~__context ~self:host ~value:hostname; + Helpers.update_domain_zero_name ~__context host hostname + ) let m_ssl_legacy = Mutex.create () let set_stunnel_legacy ~__context legacy = - debug "Setting stunnel legacy runtime config to %b" legacy; - Stunnel.set_legacy_protocol_and_ciphersuites_allowed legacy; - debug "Resetting long running stunnel clients e.g. master connection"; - Master_connection.force_connection_reset (); - debug "Resetting long running stunnel server proxy"; - Xapi_mgmt_iface.reconfigure_stunnel ~__context; - info "Updating stunnel legacy inventory to %b." legacy; - Xapi_inventory.update Xapi_inventory._stunnel_legacy (string_of_bool legacy) + debug "Setting stunnel legacy runtime config to %b" legacy; + Stunnel.set_legacy_protocol_and_ciphersuites_allowed legacy; + debug "Resetting long running stunnel clients e.g. master connection"; + Master_connection.force_connection_reset (); + debug "Resetting long running stunnel server proxy"; + Xapi_mgmt_iface.reconfigure_stunnel ~__context; + info "Updating stunnel legacy inventory to %b." legacy; + Xapi_inventory.update Xapi_inventory._stunnel_legacy (string_of_bool legacy) let set_ssl_legacy ~__context ~self ~value = - (* Use the mutex to ensure inventory and DB are consistent. *) - Mutex.execute m_ssl_legacy (fun () -> - let old = Db.Host.get_ssl_legacy ~__context ~self in - if old <> value then ( - info "set_ssl_legacy %B where old=%B" value old; - Db.Host.set_ssl_legacy ~__context ~self ~value; - set_stunnel_legacy ~__context value - ) - ) + (* Use the mutex to ensure inventory and DB are consistent. *) + Mutex.execute m_ssl_legacy (fun () -> + let old = Db.Host.get_ssl_legacy ~__context ~self in + if old <> value then ( + info "set_ssl_legacy %B where old=%B" value old; + Db.Host.set_ssl_legacy ~__context ~self ~value; + set_stunnel_legacy ~__context value + ) + ) let is_in_emergency_mode ~__context = !Xapi_globs.slave_emergency_mode let compute_free_memory ~__context ~host = - (*** XXX: Use a more appropriate free memory calculation here. *) - Memory_check.host_compute_free_memory_with_maximum_compression - ~dump_stats:false ~__context ~host None + (*** XXX: Use a more appropriate free memory calculation here. *) + Memory_check.host_compute_free_memory_with_maximum_compression + ~dump_stats:false ~__context ~host None let compute_memory_overhead ~__context ~host = - Memory_check.host_compute_memory_overhead ~__context ~host + Memory_check.host_compute_memory_overhead ~__context ~host let get_data_sources ~__context ~host = List.map Rrdd_helper.to_API_data_source (Rrdd.query_possible_host_dss ()) @@ -946,47 +946,47 @@ let create_new_blob ~__context ~host ~name ~mime_type ~public = let extauth_hook_script_name = Extauth.extauth_hook_script_name (* this special extauth plugin call is only used inside host.enable/disable extauth to avoid deadlock with the mutex *) let call_extauth_plugin_nomutex ~__context ~host ~fn ~args = - let plugin = extauth_hook_script_name in - debug "Calling extauth plugin %s in host %s with event %s and params %s" plugin (Db.Host.get_name_label ~__context ~self:host) fn (List.fold_left (fun a (b,y)->a^"("^b^"="^y^"),") "" args); - Xapi_plugins.call_plugin (Context.get_session_id __context) plugin fn args + let plugin = extauth_hook_script_name in + debug "Calling extauth plugin %s in host %s with event %s and params %s" plugin (Db.Host.get_name_label ~__context ~self:host) fn (List.fold_left (fun a (b,y)->a^"("^b^"="^y^"),") "" args); + Xapi_plugins.call_plugin (Context.get_session_id __context) plugin fn args (* this is the generic extauth plugin call available to xapi users that avoids concurrency problems *) let call_extauth_plugin ~__context ~host ~fn ~args = - Mutex.execute serialize_host_enable_disable_extauth (fun () -> - call_extauth_plugin_nomutex ~__context ~host ~fn ~args - ) + Mutex.execute serialize_host_enable_disable_extauth (fun () -> + call_extauth_plugin_nomutex ~__context ~host ~fn ~args + ) (* this is the generic plugin call available to xapi users *) let call_plugin ~__context ~host ~plugin ~fn ~args = - if plugin = extauth_hook_script_name - then call_extauth_plugin ~__context ~host ~fn ~args - else Xapi_plugins.call_plugin (Context.get_session_id __context) plugin fn args + if plugin = extauth_hook_script_name + then call_extauth_plugin ~__context ~host ~fn ~args + else Xapi_plugins.call_plugin (Context.get_session_id __context) plugin fn args (* this is the generic extension call available to xapi users *) let call_extension ~__context ~host ~call = - let rpc = Jsonrpc.call_of_string call in - let response = Xapi_extensions.call_extension rpc in - Jsonrpc.string_of_response response + let rpc = Jsonrpc.call_of_string call in + let response = Xapi_extensions.call_extension rpc in + Jsonrpc.string_of_response response let has_extension ~__context ~host ~name = - try - let (_: string) = Xapi_extensions.find_extension name in - true - with _ -> - false + try + let (_: string) = Xapi_extensions.find_extension name in + true + with _ -> + false let sync_data ~__context ~host = Xapi_sync.sync_host __context host (* Nb, no attempt to wrap exceptions yet *) let backup_rrds ~__context ~host ~delay = - Xapi_periodic_scheduler.add_to_queue "RRD backup" Xapi_periodic_scheduler.OneShot - delay (fun _ -> - log_and_ignore_exn (Rrdd.backup_rrds ~remote_address:(try Some (Pool_role.get_master_address ()) with _ -> None)); - log_and_ignore_exn (fun () -> - List.iter (fun sr -> - Xapi_sr.maybe_copy_sr_rrds ~__context ~sr - ) (Helpers.get_all_plugged_srs ~__context) - ) - ) + Xapi_periodic_scheduler.add_to_queue "RRD backup" Xapi_periodic_scheduler.OneShot + delay (fun _ -> + log_and_ignore_exn (Rrdd.backup_rrds ~remote_address:(try Some (Pool_role.get_master_address ()) with _ -> None)); + log_and_ignore_exn (fun () -> + List.iter (fun sr -> + Xapi_sr.maybe_copy_sr_rrds ~__context ~sr + ) (Helpers.get_all_plugged_srs ~__context) + ) + ) let get_servertime ~__context ~host = Date.of_float (Unix.gettimeofday ()) @@ -995,15 +995,15 @@ let get_server_localtime ~__context ~host = let gmt_time= Unix.gettimeofday () in let local_time = Unix.localtime gmt_time in Date.of_string - ( - Printf.sprintf "%04d%02d%02dT%02d:%02d:%02d" - (local_time.Unix.tm_year+1900) - (local_time.Unix.tm_mon+1) - local_time.Unix.tm_mday - local_time.Unix.tm_hour - local_time.Unix.tm_min - local_time.Unix.tm_sec - ) + ( + Printf.sprintf "%04d%02d%02dT%02d:%02d:%02d" + (local_time.Unix.tm_year+1900) + (local_time.Unix.tm_mon+1) + local_time.Unix.tm_mday + local_time.Unix.tm_hour + local_time.Unix.tm_min + local_time.Unix.tm_sec + ) let enable_binary_storage ~__context ~host = Unixext.mkdir_safe Xapi_globs.xapi_blob_location 0o700; @@ -1047,45 +1047,45 @@ let get_server_certificate ~__context ~host = (* CA-24856: detect non-homogeneous external-authentication config in pool *) let detect_nonhomogeneous_external_auth_in_host ~__context ~host = - Helpers.call_api_functions ~__context (fun rpc session_id -> - let pool = List.hd (Client.Client.Pool.get_all rpc session_id) in - let master = Client.Client.Pool.get_master rpc session_id pool in - let master_rec = Client.Client.Host.get_record rpc session_id master in - let host_rec = Client.Client.Host.get_record rpc session_id host in - - (* if this host being verified is the master, then we need to verify homogeneity for all slaves in the pool *) - if (host_rec.API.host_uuid = master_rec.API.host_uuid) - then - Client.Client.Pool.detect_nonhomogeneous_external_auth rpc session_id pool - - else (* this host is a slave, let's check if it is homogeneous to the master *) - - let master_external_auth_type = master_rec.API.host_external_auth_type in - let master_external_auth_service_name = master_rec.API.host_external_auth_service_name in - let host_external_auth_type = host_rec.API.host_external_auth_type in - let host_external_auth_service_name = host_rec.API.host_external_auth_service_name in - if (host_external_auth_type <> master_external_auth_type - || - host_external_auth_service_name <> master_external_auth_service_name - ) then begin - (* ... this slave has non-homogeneous external-authentication data *) - debug "Detected non-homogeneous external authentication in host %s: host_auth_type=%s, host_service_name=%s, master_auth_type=%s, master_service_name=%s" - (Ref.string_of host) host_external_auth_type host_external_auth_service_name - master_external_auth_type master_external_auth_service_name; - (* raise alert about this non-homogeneous slave in the pool *) - let host_uuid = host_rec.API.host_uuid in - let (name, priority) = Api_messages.auth_external_pool_non_homogeneous in - ignore( - Client.Client.Message.create ~rpc ~session_id ~name ~priority - ~cls:`Host ~obj_uuid:host_uuid ~body:( - "host_external_auth_type="^host_external_auth_type^ - ", host_external_auth_service_name="^host_external_auth_service_name^ - ", master_external_auth_type="^master_external_auth_type^ - ", master_external_auth_service_name="^master_external_auth_service_name - ) - ) - end - ) + Helpers.call_api_functions ~__context (fun rpc session_id -> + let pool = List.hd (Client.Client.Pool.get_all rpc session_id) in + let master = Client.Client.Pool.get_master rpc session_id pool in + let master_rec = Client.Client.Host.get_record rpc session_id master in + let host_rec = Client.Client.Host.get_record rpc session_id host in + + (* if this host being verified is the master, then we need to verify homogeneity for all slaves in the pool *) + if (host_rec.API.host_uuid = master_rec.API.host_uuid) + then + Client.Client.Pool.detect_nonhomogeneous_external_auth rpc session_id pool + + else (* this host is a slave, let's check if it is homogeneous to the master *) + + let master_external_auth_type = master_rec.API.host_external_auth_type in + let master_external_auth_service_name = master_rec.API.host_external_auth_service_name in + let host_external_auth_type = host_rec.API.host_external_auth_type in + let host_external_auth_service_name = host_rec.API.host_external_auth_service_name in + if (host_external_auth_type <> master_external_auth_type + || + host_external_auth_service_name <> master_external_auth_service_name + ) then begin + (* ... this slave has non-homogeneous external-authentication data *) + debug "Detected non-homogeneous external authentication in host %s: host_auth_type=%s, host_service_name=%s, master_auth_type=%s, master_service_name=%s" + (Ref.string_of host) host_external_auth_type host_external_auth_service_name + master_external_auth_type master_external_auth_service_name; + (* raise alert about this non-homogeneous slave in the pool *) + let host_uuid = host_rec.API.host_uuid in + let (name, priority) = Api_messages.auth_external_pool_non_homogeneous in + ignore( + Client.Client.Message.create ~rpc ~session_id ~name ~priority + ~cls:`Host ~obj_uuid:host_uuid ~body:( + "host_external_auth_type="^host_external_auth_type^ + ", host_external_auth_service_name="^host_external_auth_service_name^ + ", master_external_auth_type="^master_external_auth_type^ + ", master_external_auth_service_name="^master_external_auth_service_name + ) + ) + end + ) (* CP-717: Enables external auth/directory service on a single host within the pool with specified config, *) (* type and service_name. Fails if an auth/directory service is already enabled for this host (must disable first).*) @@ -1101,323 +1101,323 @@ open Extauth let enable_external_auth ~__context ~host ~config ~service_name ~auth_type = - (* CP-825: Serialize execution of host-enable-extauth and host-disable-extauth *) - (* we need to protect against concurrent access to the host.external_auth_type variable *) - Mutex.execute serialize_host_enable_disable_extauth (fun () -> - - let host_name_label = Db.Host.get_name_label ~__context ~self:host in - let current_auth_type = Db.Host.get_external_auth_type ~__context ~self:host in - let current_service_name = Db.Host.get_external_auth_service_name ~__context ~self:host in - debug "current external_auth_type is %s" current_auth_type; - if (current_auth_type <> "") then - begin (* if auth_type is already defined, then we cannot set up a new one *) - let msg = (Printf.sprintf "external authentication %s service %s is already enabled" current_auth_type current_service_name) in - debug "Failed to enable external authentication type %s for service name %s in host %s: %s" auth_type service_name host_name_label msg; - raise (Api_errors.Server_error(Api_errors.auth_already_enabled, [current_auth_type;current_service_name])) - end - else if auth_type = "" then - begin (* we must error out here, because we never enable an _empty_ external auth_type *) - let msg = "" in - debug "Failed while enabling unknown external authentication type %s for service name %s in host %s" msg service_name host_name_label; - raise (Api_errors.Server_error(Api_errors.auth_unknown_type, [msg])) - end - else - begin (* if no auth_type is currently defined (it is an empty string), then we can set up a new one *) - - (* we try to use the configuration to set up the new external authentication service *) - try - (* we persist as much set up configuration now as we can *) - Db.Host.set_external_auth_service_name ~__context ~self:host ~value:service_name; - (* the ext_auth.on_enable dispatcher called below will store the configuration params, and also *) - (* filter out any one-time credentials such as the administrator password, so we *) - (* should not call here 'host.set_external_auth_configuration ~config' *) - - (* use the special 'named dispatcher' function to call an extauth plugin function even though we have *) - (* not yet set up the external_auth_type value that will enable generic access to the extauth plugin. *) - (Ext_auth.nd(auth_type)).on_enable config; - (* from this point on, we have successfully enabled the external authentication services. *) - - (* Up to this point, we cannot call external auth functions via extauth's generic dispatcher d(). *) - Db.Host.set_external_auth_type ~__context ~self:host ~value:auth_type; - (* From this point on, anyone can call external auth functions via extauth.ml's generic dispatcher d(), which depends on the value of external_auth_type. *) - (* This enables all functions to the external authentication and directory service that xapi makes available to the user, *) - (* such as external login, subject id/info queries, group membership etc *) - - (* CP-709: call extauth hook-script after extauth.enable *) - (* we must not fork, intead block until the script has returned *) - (* so that at most one enable-external-auth event script is running at any one time in the same host *) - (* we use its local variation without mutex, otherwise we will deadlock *) - let call_plugin_fn () = call_extauth_plugin_nomutex ~__context ~host - ~fn:Extauth.event_name_after_extauth_enable - ~args:(Extauth.get_event_params ~__context host) - in ignore(Extauth.call_extauth_hook_script_in_host_wrapper ~__context host Extauth.event_name_after_extauth_enable ~call_plugin_fn); - - debug "external authentication service type %s for service name %s enabled successfully in host %s" auth_type service_name host_name_label; - Xapi_globs.event_hook_auth_on_xapi_initialize_succeeded := true; - - (* CA-24856: detect non-homogeneous external-authentication config in this host *) - detect_nonhomogeneous_external_auth_in_host ~__context ~host; - - with - | Extauth.Unknown_extauth_type msg -> (* unknown plugin *) - begin - (* we rollback to the original xapi configuration *) - Db.Host.set_external_auth_type ~__context ~self:host ~value:current_auth_type; - Db.Host.set_external_auth_service_name ~__context ~self:host ~value:current_service_name; - debug "Failed while enabling unknown external authentication type %s for service name %s in host %s" msg service_name host_name_label; - raise (Api_errors.Server_error(Api_errors.auth_unknown_type, [msg])) - end - | Auth_signature.Auth_service_error (errtag,msg) -> (* plugin returned some error *) - (* we rollback to the original xapi configuration *) - Db.Host.set_external_auth_type ~__context ~self:host ~value:current_auth_type; - Db.Host.set_external_auth_service_name ~__context ~self:host ~value:current_service_name; - debug "Failed while enabling external authentication type %s for service name %s in host %s" msg service_name host_name_label; - raise (Api_errors.Server_error(Api_errors.auth_enable_failed^(Auth_signature.suffix_of_tag errtag), [msg])) - | e -> (* unknown failure, just-enabled plugin might be in an inconsistent state *) - begin - (* we rollback to the original xapi configuration *) - Db.Host.set_external_auth_type ~__context ~self:host ~value:current_auth_type; - Db.Host.set_external_auth_service_name ~__context ~self:host ~value:current_service_name; - debug "Failed while enabling external authentication type %s for service name %s in host %s" auth_type service_name host_name_label; - raise (Api_errors.Server_error(Api_errors.auth_enable_failed, [ExnHelper.string_of_exn e])) - end - end - ) + (* CP-825: Serialize execution of host-enable-extauth and host-disable-extauth *) + (* we need to protect against concurrent access to the host.external_auth_type variable *) + Mutex.execute serialize_host_enable_disable_extauth (fun () -> + + let host_name_label = Db.Host.get_name_label ~__context ~self:host in + let current_auth_type = Db.Host.get_external_auth_type ~__context ~self:host in + let current_service_name = Db.Host.get_external_auth_service_name ~__context ~self:host in + debug "current external_auth_type is %s" current_auth_type; + if (current_auth_type <> "") then + begin (* if auth_type is already defined, then we cannot set up a new one *) + let msg = (Printf.sprintf "external authentication %s service %s is already enabled" current_auth_type current_service_name) in + debug "Failed to enable external authentication type %s for service name %s in host %s: %s" auth_type service_name host_name_label msg; + raise (Api_errors.Server_error(Api_errors.auth_already_enabled, [current_auth_type;current_service_name])) + end + else if auth_type = "" then + begin (* we must error out here, because we never enable an _empty_ external auth_type *) + let msg = "" in + debug "Failed while enabling unknown external authentication type %s for service name %s in host %s" msg service_name host_name_label; + raise (Api_errors.Server_error(Api_errors.auth_unknown_type, [msg])) + end + else + begin (* if no auth_type is currently defined (it is an empty string), then we can set up a new one *) + + (* we try to use the configuration to set up the new external authentication service *) + try + (* we persist as much set up configuration now as we can *) + Db.Host.set_external_auth_service_name ~__context ~self:host ~value:service_name; + (* the ext_auth.on_enable dispatcher called below will store the configuration params, and also *) + (* filter out any one-time credentials such as the administrator password, so we *) + (* should not call here 'host.set_external_auth_configuration ~config' *) + + (* use the special 'named dispatcher' function to call an extauth plugin function even though we have *) + (* not yet set up the external_auth_type value that will enable generic access to the extauth plugin. *) + (Ext_auth.nd(auth_type)).on_enable config; + (* from this point on, we have successfully enabled the external authentication services. *) + + (* Up to this point, we cannot call external auth functions via extauth's generic dispatcher d(). *) + Db.Host.set_external_auth_type ~__context ~self:host ~value:auth_type; + (* From this point on, anyone can call external auth functions via extauth.ml's generic dispatcher d(), which depends on the value of external_auth_type. *) + (* This enables all functions to the external authentication and directory service that xapi makes available to the user, *) + (* such as external login, subject id/info queries, group membership etc *) + + (* CP-709: call extauth hook-script after extauth.enable *) + (* we must not fork, intead block until the script has returned *) + (* so that at most one enable-external-auth event script is running at any one time in the same host *) + (* we use its local variation without mutex, otherwise we will deadlock *) + let call_plugin_fn () = call_extauth_plugin_nomutex ~__context ~host + ~fn:Extauth.event_name_after_extauth_enable + ~args:(Extauth.get_event_params ~__context host) + in ignore(Extauth.call_extauth_hook_script_in_host_wrapper ~__context host Extauth.event_name_after_extauth_enable ~call_plugin_fn); + + debug "external authentication service type %s for service name %s enabled successfully in host %s" auth_type service_name host_name_label; + Xapi_globs.event_hook_auth_on_xapi_initialize_succeeded := true; + + (* CA-24856: detect non-homogeneous external-authentication config in this host *) + detect_nonhomogeneous_external_auth_in_host ~__context ~host; + + with + | Extauth.Unknown_extauth_type msg -> (* unknown plugin *) + begin + (* we rollback to the original xapi configuration *) + Db.Host.set_external_auth_type ~__context ~self:host ~value:current_auth_type; + Db.Host.set_external_auth_service_name ~__context ~self:host ~value:current_service_name; + debug "Failed while enabling unknown external authentication type %s for service name %s in host %s" msg service_name host_name_label; + raise (Api_errors.Server_error(Api_errors.auth_unknown_type, [msg])) + end + | Auth_signature.Auth_service_error (errtag,msg) -> (* plugin returned some error *) + (* we rollback to the original xapi configuration *) + Db.Host.set_external_auth_type ~__context ~self:host ~value:current_auth_type; + Db.Host.set_external_auth_service_name ~__context ~self:host ~value:current_service_name; + debug "Failed while enabling external authentication type %s for service name %s in host %s" msg service_name host_name_label; + raise (Api_errors.Server_error(Api_errors.auth_enable_failed^(Auth_signature.suffix_of_tag errtag), [msg])) + | e -> (* unknown failure, just-enabled plugin might be in an inconsistent state *) + begin + (* we rollback to the original xapi configuration *) + Db.Host.set_external_auth_type ~__context ~self:host ~value:current_auth_type; + Db.Host.set_external_auth_service_name ~__context ~self:host ~value:current_service_name; + debug "Failed while enabling external authentication type %s for service name %s in host %s" auth_type service_name host_name_label; + raise (Api_errors.Server_error(Api_errors.auth_enable_failed, [ExnHelper.string_of_exn e])) + end + end + ) (* CP-718: Disables external auth/directory service for host *) let disable_external_auth_common ?during_pool_eject:(during_pool_eject=false) ~__context ~host ~config = - (* CP-825: Serialize execution of host-enable-extauth and host-disable-extauth *) - (* we need to protect against concurrent access to the host.external_auth_type variable *) - Mutex.execute serialize_host_enable_disable_extauth (fun () -> - - let host_name_label = Db.Host.get_name_label ~__context ~self:host in - let auth_type = Db.Host.get_external_auth_type ~__context ~self:host in - if (auth_type = "") then - begin (* nothing to do, external authentication is already disabled *) - let msg = "external authentication service is already disabled" in - debug "Failed to disable external authentication in host %s: %s" host_name_label msg; - (* we do not raise an exception here. for our purposes, there's nothing wrong*) - (* disabling an already disabled authentication plugin *) - end - else (* this is the case when auth_type <> "" *) - begin - (* CP-709: call extauth hook-script before extauth.disable *) - (* we must not fork, instead block until the script has returned, so that the script is able *) - (* to obtain auth_type and other information from the metadata and there is at most one *) - (* disable-external-auth event script running at any one time in the same host *) - (* we use its local variation without mutex, otherwise we will deadlock *) - let call_plugin_fn () = call_extauth_plugin_nomutex ~__context ~host - ~fn:Extauth.event_name_before_extauth_disable - ~args:(Extauth.get_event_params ~__context host) - in ignore(Extauth.call_extauth_hook_script_in_host_wrapper ~__context host Extauth.event_name_before_extauth_disable ~call_plugin_fn); - - (* 1. first, we try to call the external auth plugin to disable the external authentication service *) - let plugin_disable_failure = - (try - (Ext_auth.d()).on_disable config; - None (* OK, on_disable succeeded *) - with - | Auth_signature.Auth_service_error (errtag,msg) -> - begin - debug "Failed while calling on_disable event of external authentication plugin in host %s: %s" host_name_label msg; - Some (Api_errors.Server_error(Api_errors.auth_disable_failed^(Auth_signature.suffix_of_tag errtag), [msg])) - end - | e -> (*absorb any exception*) - begin - debug "Failed while calling on_disable event of external authentication plugin in host %s: %s" host_name_label (ExnHelper.string_of_exn e); - Some (Api_errors.Server_error(Api_errors.auth_disable_failed, [ExnHelper.string_of_exn e])) - end - ) in - - (* 2. then, if no exception was raised, we always remove our persistent extauth configuration *) - Db.Host.set_external_auth_type ~__context ~self:host ~value:""; - Db.Host.set_external_auth_service_name ~__context ~self:host ~value:""; - debug "external authentication service disabled successfully in host %s" host_name_label; - (* 2.1 if we are still trying to initialize the external auth service in the xapi.on_xapi_initialize thread, we should stop now *) - Xapi_globs.event_hook_auth_on_xapi_initialize_succeeded := true;(* succeeds because there's no need to initialize anymore *) - - (* 3. CP-703: we always revalidate all sessions after the external authentication has been disabled *) - (* so that all sessions that were externally authenticated will be destroyed *) - debug "calling revalidate_all_sessions after disabling external auth for host %s" (host_name_label); - Xapi_session.revalidate_all_sessions ~__context; - - if (not during_pool_eject) then (* CA-28168 *) - begin - (* CA-24856: detect non-homogeneous external-authentication config in this host *) - detect_nonhomogeneous_external_auth_in_host ~__context ~host; - end; - - match plugin_disable_failure with - | None -> () - | Some e -> if not during_pool_eject - then raise e (* bubble up plugin's on_disable exception *) - else () (* we do not want to stop pool_eject *) - end - ) + (* CP-825: Serialize execution of host-enable-extauth and host-disable-extauth *) + (* we need to protect against concurrent access to the host.external_auth_type variable *) + Mutex.execute serialize_host_enable_disable_extauth (fun () -> + + let host_name_label = Db.Host.get_name_label ~__context ~self:host in + let auth_type = Db.Host.get_external_auth_type ~__context ~self:host in + if (auth_type = "") then + begin (* nothing to do, external authentication is already disabled *) + let msg = "external authentication service is already disabled" in + debug "Failed to disable external authentication in host %s: %s" host_name_label msg; + (* we do not raise an exception here. for our purposes, there's nothing wrong*) + (* disabling an already disabled authentication plugin *) + end + else (* this is the case when auth_type <> "" *) + begin + (* CP-709: call extauth hook-script before extauth.disable *) + (* we must not fork, instead block until the script has returned, so that the script is able *) + (* to obtain auth_type and other information from the metadata and there is at most one *) + (* disable-external-auth event script running at any one time in the same host *) + (* we use its local variation without mutex, otherwise we will deadlock *) + let call_plugin_fn () = call_extauth_plugin_nomutex ~__context ~host + ~fn:Extauth.event_name_before_extauth_disable + ~args:(Extauth.get_event_params ~__context host) + in ignore(Extauth.call_extauth_hook_script_in_host_wrapper ~__context host Extauth.event_name_before_extauth_disable ~call_plugin_fn); + + (* 1. first, we try to call the external auth plugin to disable the external authentication service *) + let plugin_disable_failure = + (try + (Ext_auth.d()).on_disable config; + None (* OK, on_disable succeeded *) + with + | Auth_signature.Auth_service_error (errtag,msg) -> + begin + debug "Failed while calling on_disable event of external authentication plugin in host %s: %s" host_name_label msg; + Some (Api_errors.Server_error(Api_errors.auth_disable_failed^(Auth_signature.suffix_of_tag errtag), [msg])) + end + | e -> (*absorb any exception*) + begin + debug "Failed while calling on_disable event of external authentication plugin in host %s: %s" host_name_label (ExnHelper.string_of_exn e); + Some (Api_errors.Server_error(Api_errors.auth_disable_failed, [ExnHelper.string_of_exn e])) + end + ) in + + (* 2. then, if no exception was raised, we always remove our persistent extauth configuration *) + Db.Host.set_external_auth_type ~__context ~self:host ~value:""; + Db.Host.set_external_auth_service_name ~__context ~self:host ~value:""; + debug "external authentication service disabled successfully in host %s" host_name_label; + (* 2.1 if we are still trying to initialize the external auth service in the xapi.on_xapi_initialize thread, we should stop now *) + Xapi_globs.event_hook_auth_on_xapi_initialize_succeeded := true;(* succeeds because there's no need to initialize anymore *) + + (* 3. CP-703: we always revalidate all sessions after the external authentication has been disabled *) + (* so that all sessions that were externally authenticated will be destroyed *) + debug "calling revalidate_all_sessions after disabling external auth for host %s" (host_name_label); + Xapi_session.revalidate_all_sessions ~__context; + + if (not during_pool_eject) then (* CA-28168 *) + begin + (* CA-24856: detect non-homogeneous external-authentication config in this host *) + detect_nonhomogeneous_external_auth_in_host ~__context ~host; + end; + + match plugin_disable_failure with + | None -> () + | Some e -> if not during_pool_eject + then raise e (* bubble up plugin's on_disable exception *) + else () (* we do not want to stop pool_eject *) + end + ) let disable_external_auth ~__context ~host ~config = - disable_external_auth_common ~during_pool_eject:false ~__context ~host ~config + disable_external_auth_common ~during_pool_eject:false ~__context ~host ~config let attach_static_vdis ~__context ~host ~vdi_reason_map = let attach (vdi, reason) = - let static_vdis = Static_vdis_list.list () in - let check v = - (v.Static_vdis_list.uuid = Db.VDI.get_uuid ~__context ~self:vdi && - v.Static_vdis_list.currently_attached) in - if not (List.exists check static_vdis) then - Pervasiveext.ignore_string (Static_vdis.permanent_vdi_attach ~__context ~vdi ~reason) + let static_vdis = Static_vdis_list.list () in + let check v = + (v.Static_vdis_list.uuid = Db.VDI.get_uuid ~__context ~self:vdi && + v.Static_vdis_list.currently_attached) in + if not (List.exists check static_vdis) then + Pervasiveext.ignore_string (Static_vdis.permanent_vdi_attach ~__context ~vdi ~reason) in List.iter attach vdi_reason_map let detach_static_vdis ~__context ~host ~vdis = let detach vdi = - let static_vdis = Static_vdis_list.list () in - let check v = - (v.Static_vdis_list.uuid = Db.VDI.get_uuid ~__context ~self:vdi) in - if List.exists check static_vdis then - Static_vdis.permanent_vdi_detach ~__context ~vdi; + let static_vdis = Static_vdis_list.list () in + let check v = + (v.Static_vdis_list.uuid = Db.VDI.get_uuid ~__context ~self:vdi) in + if List.exists check static_vdis then + Static_vdis.permanent_vdi_detach ~__context ~vdi; in List.iter detach vdis let update_pool_secret ~__context ~host ~pool_secret = - Unixext.write_string_to_file !Xapi_globs.pool_secret_path pool_secret + Unixext.write_string_to_file !Xapi_globs.pool_secret_path pool_secret let set_localdb_key ~__context ~host ~key ~value = - Localdb.put key value; - debug "Local-db key '%s' has been set to '%s'" key value + Localdb.put key value; + debug "Local-db key '%s' has been set to '%s'" key value (* Licensing *) let copy_license_to_db ~__context ~host ~features ~additional = - let restrict_kvpairs = Features.to_assoc_list features in - let license_params = additional @ restrict_kvpairs in - Helpers.call_api_functions ~__context - (fun rpc session_id -> - (* This will trigger a pool sku/restrictions recomputation *) - Client.Client.Host.set_license_params rpc session_id !Xapi_globs.localhost_ref license_params) + let restrict_kvpairs = Features.to_assoc_list features in + let license_params = additional @ restrict_kvpairs in + Helpers.call_api_functions ~__context + (fun rpc session_id -> + (* This will trigger a pool sku/restrictions recomputation *) + Client.Client.Host.set_license_params rpc session_id !Xapi_globs.localhost_ref license_params) let set_license_params ~__context ~self ~value = - Db.Host.set_license_params ~__context ~self ~value; - Pool_features.update_pool_features ~__context + Db.Host.set_license_params ~__context ~self ~value; + Pool_features.update_pool_features ~__context let apply_edition_internal ~__context ~host ~edition ~additional = - let edition', features, additional = - V6client.apply_edition ~__context edition additional - in - Db.Host.set_edition ~__context ~self:host ~value:edition'; - copy_license_to_db ~__context ~host ~features ~additional + let edition', features, additional = + V6client.apply_edition ~__context edition additional + in + Db.Host.set_edition ~__context ~self:host ~value:edition'; + copy_license_to_db ~__context ~host ~features ~additional let apply_edition ~__context ~host ~edition ~force = - (* if HA is enabled do not allow the edition to be changed *) - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool then - raise (Api_errors.Server_error (Api_errors.ha_is_enabled, [])) - else - let additional = if force then ["force", "true"] else [] in - apply_edition_internal ~__context ~host ~edition ~additional + (* if HA is enabled do not allow the edition to be changed *) + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool then + raise (Api_errors.Server_error (Api_errors.ha_is_enabled, [])) + else + let additional = if force then ["force", "true"] else [] in + apply_edition_internal ~__context ~host ~edition ~additional let license_add ~__context ~host ~contents = - let license = - try - Base64.decode contents - with _ -> - error "Base64 decoding of supplied license has failed"; - raise Api_errors.(Server_error(license_processing_error, [])) - in - let tmp = "/tmp/new_license" in - finally - (fun () -> - begin try - Unixext.write_string_to_file tmp license - with _ -> - let s = "Failed to write temporary file." in - raise Api_errors.(Server_error(internal_error, [s])) - end; - let edition', features, additional = V6client.apply_edition ~__context "" ["license_file", tmp] in - Db.Host.set_edition ~__context ~self:host ~value:edition'; - copy_license_to_db ~__context ~host ~features ~additional - ) - (fun () -> - (* The license will have been moved to a standard location if it was valid, and - * should be removed otherwise -> always remove the file at the tmp path, if any. *) - Unixext.unlink_safe tmp - ) + let license = + try + Base64.decode contents + with _ -> + error "Base64 decoding of supplied license has failed"; + raise Api_errors.(Server_error(license_processing_error, [])) + in + let tmp = "/tmp/new_license" in + finally + (fun () -> + begin try + Unixext.write_string_to_file tmp license + with _ -> + let s = "Failed to write temporary file." in + raise Api_errors.(Server_error(internal_error, [s])) + end; + let edition', features, additional = V6client.apply_edition ~__context "" ["license_file", tmp] in + Db.Host.set_edition ~__context ~self:host ~value:edition'; + copy_license_to_db ~__context ~host ~features ~additional + ) + (fun () -> + (* The license will have been moved to a standard location if it was valid, and + * should be removed otherwise -> always remove the file at the tmp path, if any. *) + Unixext.unlink_safe tmp + ) let license_remove ~__context ~host = - let edition', features, additional = - V6client.apply_edition ~__context "" ["license_file", ""] in - Db.Host.set_edition ~__context ~self:host ~value:edition'; - copy_license_to_db ~__context ~host ~features ~additional + let edition', features, additional = + V6client.apply_edition ~__context "" ["license_file", ""] in + Db.Host.set_edition ~__context ~self:host ~value:edition'; + copy_license_to_db ~__context ~host ~features ~additional (* Supplemental packs *) let refresh_pack_info ~__context ~host = - debug "Refreshing software_version"; - Create_misc.create_software_version ~__context + debug "Refreshing software_version"; + Create_misc.create_software_version ~__context (* Network reset *) let reset_networking ~__context ~host = - debug "Resetting networking"; - (* This is only ever done on the master, so using "Db.*.get_all " is ok. *) - let local_pifs = List.filter (fun pif -> Db.PIF.get_host ~__context ~self:pif = host) (Db.PIF.get_all ~__context) in - let bond_is_local bond = - List.fold_left (fun a pif -> Db.Bond.get_master ~__context ~self:bond = pif || a) false local_pifs - in - let vlan_is_local vlan = - List.fold_left (fun a pif -> Db.VLAN.get_untagged_PIF ~__context ~self:vlan = pif || a) false local_pifs - in - let tunnel_is_local tunnel = - List.fold_left (fun a pif -> Db.Tunnel.get_access_PIF ~__context ~self:tunnel = pif || a) false local_pifs - in - let bonds = List.filter bond_is_local (Db.Bond.get_all ~__context) in - List.iter (fun bond -> debug "destroying bond %s" (Db.Bond.get_uuid ~__context ~self:bond); - Db.Bond.destroy ~__context ~self:bond) bonds; - let vlans = List.filter vlan_is_local (Db.VLAN.get_all ~__context) in - List.iter (fun vlan -> debug "destroying VLAN %s" (Db.VLAN.get_uuid ~__context ~self:vlan); - Db.VLAN.destroy ~__context ~self:vlan) vlans; - let tunnels = List.filter tunnel_is_local (Db.Tunnel.get_all ~__context) in - List.iter (fun tunnel -> debug "destroying tunnel %s" (Db.Tunnel.get_uuid ~__context ~self:tunnel); - Db.Tunnel.destroy ~__context ~self:tunnel) tunnels; - List.iter (fun self -> debug "destroying PIF %s" (Db.PIF.get_uuid ~__context ~self); - if Db.PIF.get_physical ~__context ~self = true || Db.PIF.get_bond_master_of ~__context ~self <> [] then begin - let metrics = Db.PIF.get_metrics ~__context ~self in - Db.PIF_metrics.destroy ~__context ~self:metrics - end; - Db.PIF.destroy ~__context ~self; - ) local_pifs + debug "Resetting networking"; + (* This is only ever done on the master, so using "Db.*.get_all " is ok. *) + let local_pifs = List.filter (fun pif -> Db.PIF.get_host ~__context ~self:pif = host) (Db.PIF.get_all ~__context) in + let bond_is_local bond = + List.fold_left (fun a pif -> Db.Bond.get_master ~__context ~self:bond = pif || a) false local_pifs + in + let vlan_is_local vlan = + List.fold_left (fun a pif -> Db.VLAN.get_untagged_PIF ~__context ~self:vlan = pif || a) false local_pifs + in + let tunnel_is_local tunnel = + List.fold_left (fun a pif -> Db.Tunnel.get_access_PIF ~__context ~self:tunnel = pif || a) false local_pifs + in + let bonds = List.filter bond_is_local (Db.Bond.get_all ~__context) in + List.iter (fun bond -> debug "destroying bond %s" (Db.Bond.get_uuid ~__context ~self:bond); + Db.Bond.destroy ~__context ~self:bond) bonds; + let vlans = List.filter vlan_is_local (Db.VLAN.get_all ~__context) in + List.iter (fun vlan -> debug "destroying VLAN %s" (Db.VLAN.get_uuid ~__context ~self:vlan); + Db.VLAN.destroy ~__context ~self:vlan) vlans; + let tunnels = List.filter tunnel_is_local (Db.Tunnel.get_all ~__context) in + List.iter (fun tunnel -> debug "destroying tunnel %s" (Db.Tunnel.get_uuid ~__context ~self:tunnel); + Db.Tunnel.destroy ~__context ~self:tunnel) tunnels; + List.iter (fun self -> debug "destroying PIF %s" (Db.PIF.get_uuid ~__context ~self); + if Db.PIF.get_physical ~__context ~self = true || Db.PIF.get_bond_master_of ~__context ~self <> [] then begin + let metrics = Db.PIF.get_metrics ~__context ~self in + Db.PIF_metrics.destroy ~__context ~self:metrics + end; + Db.PIF.destroy ~__context ~self; + ) local_pifs (* Local storage caching *) let enable_local_storage_caching ~__context ~host ~sr = - assert_bacon_mode ~__context ~host; - let ty = Db.SR.get_type ~__context ~self:sr in - let pbds = Db.SR.get_PBDs ~__context ~self:sr in - let shared = Db.SR.get_shared ~__context ~self:sr in - let has_required_capability = - let caps = Sm.features_of_driver ty in - List.mem_assoc Smint.Sr_supports_local_caching caps - in - debug "shared: %b. List.length pbds: %d. has_required_capability: %b" shared (List.length pbds) has_required_capability; - if (shared=false) && (List.length pbds = 1) && has_required_capability then begin - let pbd_host = Db.PBD.get_host ~__context ~self:(List.hd pbds) in - if pbd_host <> host then raise (Api_errors.Server_error (Api_errors.host_cannot_see_SR,[Ref.string_of host; Ref.string_of sr])); - let old_sr = Db.Host.get_local_cache_sr ~__context ~self:host in - if old_sr <> Ref.null then Db.SR.set_local_cache_enabled ~__context ~self:old_sr ~value:false; - Db.Host.set_local_cache_sr ~__context ~self:host ~value:sr; - Db.SR.set_local_cache_enabled ~__context ~self:sr ~value:true; - log_and_ignore_exn (fun () -> Rrdd.set_cache_sr ~sr_uuid:(Db.SR.get_uuid ~__context ~self:sr)); - end else begin - raise (Api_errors.Server_error (Api_errors.sr_operation_not_supported,[])) - end + assert_bacon_mode ~__context ~host; + let ty = Db.SR.get_type ~__context ~self:sr in + let pbds = Db.SR.get_PBDs ~__context ~self:sr in + let shared = Db.SR.get_shared ~__context ~self:sr in + let has_required_capability = + let caps = Sm.features_of_driver ty in + List.mem_assoc Smint.Sr_supports_local_caching caps + in + debug "shared: %b. List.length pbds: %d. has_required_capability: %b" shared (List.length pbds) has_required_capability; + if (shared=false) && (List.length pbds = 1) && has_required_capability then begin + let pbd_host = Db.PBD.get_host ~__context ~self:(List.hd pbds) in + if pbd_host <> host then raise (Api_errors.Server_error (Api_errors.host_cannot_see_SR,[Ref.string_of host; Ref.string_of sr])); + let old_sr = Db.Host.get_local_cache_sr ~__context ~self:host in + if old_sr <> Ref.null then Db.SR.set_local_cache_enabled ~__context ~self:old_sr ~value:false; + Db.Host.set_local_cache_sr ~__context ~self:host ~value:sr; + Db.SR.set_local_cache_enabled ~__context ~self:sr ~value:true; + log_and_ignore_exn (fun () -> Rrdd.set_cache_sr ~sr_uuid:(Db.SR.get_uuid ~__context ~self:sr)); + end else begin + raise (Api_errors.Server_error (Api_errors.sr_operation_not_supported,[])) + end let disable_local_storage_caching ~__context ~host = - assert_bacon_mode ~__context ~host; - let sr = Db.Host.get_local_cache_sr ~__context ~self:host in - Db.Host.set_local_cache_sr ~__context ~self:host ~value:Ref.null; - log_and_ignore_exn Rrdd.unset_cache_sr; - try Db.SR.set_local_cache_enabled ~__context ~self:sr ~value:false with _ -> () + assert_bacon_mode ~__context ~host; + let sr = Db.Host.get_local_cache_sr ~__context ~self:host in + Db.Host.set_local_cache_sr ~__context ~self:host ~value:Ref.null; + log_and_ignore_exn Rrdd.unset_cache_sr; + try Db.SR.set_local_cache_enabled ~__context ~self:sr ~value:false with _ -> () (* Here's how we do VLAN resyncing: We take a VLAN master and record (i) the Network it is on; (ii) its VLAN tag; @@ -1429,233 +1429,233 @@ let disable_local_storage_caching ~__context ~host = PIF (e.g. if the master has eth0.25 and we don't have eth0) then we do nothing. *) let sync_vlans ~__context ~host = - let master = !Xapi_globs.localhost_ref in - let master_vlan_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of master)), - Not (Eq (Field "VLAN_master_of", Literal (Ref.string_of Ref.null))) - )) in - let slave_vlan_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of host)), - Not (Eq (Field "VLAN_master_of", Literal (Ref.string_of Ref.null))) - )) in - - let get_network_of_pif_underneath_vlan vlan_pif = - let vlan = Db.PIF.get_VLAN_master_of ~__context ~self:vlan_pif in - let pif_underneath_vlan = Db.VLAN.get_tagged_PIF ~__context ~self:vlan in - Db.PIF.get_network ~__context ~self:pif_underneath_vlan - in - - let maybe_create_vlan (master_pif_ref, master_pif_rec) = - (* Check to see if the slave has any existing pif(s) that for the specified device, network, vlan... *) - let existing_pif = List.filter (fun (slave_pif_ref, slave_pif_record) -> - (* Is slave VLAN PIF that we're considering (slave_pif_ref) the one that corresponds - * to the master_pif we're considering (master_pif_ref)? *) - true - && slave_pif_record.API.pIF_network = master_pif_rec.API.pIF_network - && slave_pif_record.API.pIF_VLAN = master_pif_rec.API.pIF_VLAN - && ((get_network_of_pif_underneath_vlan slave_pif_ref) = - (get_network_of_pif_underneath_vlan master_pif_ref)) - ) slave_vlan_pifs in - (* if I don't have any such pif(s) then make one: *) - if List.length existing_pif = 0 - then - begin - (* On the master, we find the pif, p, that underlies the VLAN - * (e.g. "eth0" underlies "eth0.25") and then find the network that p's on: *) - let network_of_pif_underneath_vlan_on_master = get_network_of_pif_underneath_vlan master_pif_ref in - let pifs = Db.PIF.get_records_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of host)), - Eq (Field "network", Literal (Ref.string_of network_of_pif_underneath_vlan_on_master)) - )) in - match pifs with - | [] -> - (* We have no PIF on which to make the VLAN; do nothing *) - () - | [(pif_ref, pif_rec)] -> - (* This is the PIF on which we want to base our VLAN record; let's make it *) - debug "Creating VLAN %Ld on slave" master_pif_rec.API.pIF_VLAN; - ignore (Xapi_vlan.create_internal ~__context ~host ~tagged_PIF:pif_ref - ~tag:master_pif_rec.API.pIF_VLAN ~network:master_pif_rec.API.pIF_network - ~device:pif_rec.API.pIF_device) - | _ -> - (* This should never happen since we should never have more than one of _our_ pifs - * on the same network *) - () - end - in - (* For each of the master's PIFs, create a corresponding one on the slave if necessary *) - List.iter maybe_create_vlan master_vlan_pifs + let master = !Xapi_globs.localhost_ref in + let master_vlan_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of master)), + Not (Eq (Field "VLAN_master_of", Literal (Ref.string_of Ref.null))) + )) in + let slave_vlan_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of host)), + Not (Eq (Field "VLAN_master_of", Literal (Ref.string_of Ref.null))) + )) in + + let get_network_of_pif_underneath_vlan vlan_pif = + let vlan = Db.PIF.get_VLAN_master_of ~__context ~self:vlan_pif in + let pif_underneath_vlan = Db.VLAN.get_tagged_PIF ~__context ~self:vlan in + Db.PIF.get_network ~__context ~self:pif_underneath_vlan + in + + let maybe_create_vlan (master_pif_ref, master_pif_rec) = + (* Check to see if the slave has any existing pif(s) that for the specified device, network, vlan... *) + let existing_pif = List.filter (fun (slave_pif_ref, slave_pif_record) -> + (* Is slave VLAN PIF that we're considering (slave_pif_ref) the one that corresponds + * to the master_pif we're considering (master_pif_ref)? *) + true + && slave_pif_record.API.pIF_network = master_pif_rec.API.pIF_network + && slave_pif_record.API.pIF_VLAN = master_pif_rec.API.pIF_VLAN + && ((get_network_of_pif_underneath_vlan slave_pif_ref) = + (get_network_of_pif_underneath_vlan master_pif_ref)) + ) slave_vlan_pifs in + (* if I don't have any such pif(s) then make one: *) + if List.length existing_pif = 0 + then + begin + (* On the master, we find the pif, p, that underlies the VLAN + * (e.g. "eth0" underlies "eth0.25") and then find the network that p's on: *) + let network_of_pif_underneath_vlan_on_master = get_network_of_pif_underneath_vlan master_pif_ref in + let pifs = Db.PIF.get_records_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of host)), + Eq (Field "network", Literal (Ref.string_of network_of_pif_underneath_vlan_on_master)) + )) in + match pifs with + | [] -> + (* We have no PIF on which to make the VLAN; do nothing *) + () + | [(pif_ref, pif_rec)] -> + (* This is the PIF on which we want to base our VLAN record; let's make it *) + debug "Creating VLAN %Ld on slave" master_pif_rec.API.pIF_VLAN; + ignore (Xapi_vlan.create_internal ~__context ~host ~tagged_PIF:pif_ref + ~tag:master_pif_rec.API.pIF_VLAN ~network:master_pif_rec.API.pIF_network + ~device:pif_rec.API.pIF_device) + | _ -> + (* This should never happen since we should never have more than one of _our_ pifs + * on the same network *) + () + end + in + (* For each of the master's PIFs, create a corresponding one on the slave if necessary *) + List.iter maybe_create_vlan master_vlan_pifs let sync_tunnels ~__context ~host = - let master = !Xapi_globs.localhost_ref in - - let master_tunnel_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of master)), - Not (Eq (Field "tunnel_access_PIF_of", Literal "()")) - )) in - let slave_tunnel_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of host)), - Not (Eq (Field "tunnel_access_PIF_of", Literal "()")) - )) in - - let get_network_of_transport_pif access_pif = - match Db.PIF.get_tunnel_access_PIF_of ~__context ~self:access_pif with - | [tunnel] -> - let transport_pif = Db.Tunnel.get_transport_PIF ~__context ~self:tunnel in - Db.PIF.get_network ~__context ~self:transport_pif - | _ -> failwith (Printf.sprintf "PIF %s has no tunnel_access_PIF_of" (Ref.string_of access_pif)) - in - - let maybe_create_tunnel_for_me (master_pif_ref, master_pif_rec) = - (* check to see if I have any existing pif(s) that for the specified device, network, vlan... *) - let existing_pif = List.filter (fun (_, slave_pif_record) -> - (* Is the slave's tunnel access PIF that we're considering (slave_pif_ref) - * the one that corresponds to the master's tunnel access PIF we're considering (master_pif_ref)? *) - slave_pif_record.API.pIF_network = master_pif_rec.API.pIF_network - ) slave_tunnel_pifs in - (* If the slave doesn't have any such PIF then make one: *) - if List.length existing_pif = 0 - then - begin - (* On the master, we find the network the tunnel transport PIF is on *) - let network_of_transport_pif_on_master = get_network_of_transport_pif master_pif_ref in - let pifs = Db.PIF.get_records_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of host)), - Eq (Field "network", Literal (Ref.string_of network_of_transport_pif_on_master)) - )) in - match pifs with - | [] -> - (* we have no PIF on which to make the tunnel; do nothing *) - () - | [(pif_ref,_)] -> - (* this is the PIF on which we want as transport PIF; let's make it *) - ignore (Xapi_tunnel.create_internal ~__context ~transport_PIF:pif_ref - ~network:master_pif_rec.API.pIF_network ~host) - | _ -> - (* This should never happen cos we should never have more than one of _our_ pifs - * on the same nework *) - () - end - in - (* for each of the master's pifs, create a corresponding one on this host if necessary *) - List.iter maybe_create_tunnel_for_me master_tunnel_pifs + let master = !Xapi_globs.localhost_ref in + + let master_tunnel_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of master)), + Not (Eq (Field "tunnel_access_PIF_of", Literal "()")) + )) in + let slave_tunnel_pifs = Db.PIF.get_records_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of host)), + Not (Eq (Field "tunnel_access_PIF_of", Literal "()")) + )) in + + let get_network_of_transport_pif access_pif = + match Db.PIF.get_tunnel_access_PIF_of ~__context ~self:access_pif with + | [tunnel] -> + let transport_pif = Db.Tunnel.get_transport_PIF ~__context ~self:tunnel in + Db.PIF.get_network ~__context ~self:transport_pif + | _ -> failwith (Printf.sprintf "PIF %s has no tunnel_access_PIF_of" (Ref.string_of access_pif)) + in + + let maybe_create_tunnel_for_me (master_pif_ref, master_pif_rec) = + (* check to see if I have any existing pif(s) that for the specified device, network, vlan... *) + let existing_pif = List.filter (fun (_, slave_pif_record) -> + (* Is the slave's tunnel access PIF that we're considering (slave_pif_ref) + * the one that corresponds to the master's tunnel access PIF we're considering (master_pif_ref)? *) + slave_pif_record.API.pIF_network = master_pif_rec.API.pIF_network + ) slave_tunnel_pifs in + (* If the slave doesn't have any such PIF then make one: *) + if List.length existing_pif = 0 + then + begin + (* On the master, we find the network the tunnel transport PIF is on *) + let network_of_transport_pif_on_master = get_network_of_transport_pif master_pif_ref in + let pifs = Db.PIF.get_records_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of host)), + Eq (Field "network", Literal (Ref.string_of network_of_transport_pif_on_master)) + )) in + match pifs with + | [] -> + (* we have no PIF on which to make the tunnel; do nothing *) + () + | [(pif_ref,_)] -> + (* this is the PIF on which we want as transport PIF; let's make it *) + ignore (Xapi_tunnel.create_internal ~__context ~transport_PIF:pif_ref + ~network:master_pif_rec.API.pIF_network ~host) + | _ -> + (* This should never happen cos we should never have more than one of _our_ pifs + * on the same nework *) + () + end + in + (* for each of the master's pifs, create a corresponding one on this host if necessary *) + List.iter maybe_create_tunnel_for_me master_tunnel_pifs let sync_pif_currently_attached ~__context ~host ~bridges = - (* Produce internal lookup tables *) - let networks = Db.Network.get_all_records ~__context in - let pifs = Db.PIF.get_records_where ~__context ~expr:( - Eq (Field "host", Literal (Ref.string_of host)) - ) in - - let network_to_bridge = List.map (fun (net, net_r) -> net, net_r.API.network_bridge) networks in - - (* PIF -> bridge option: None means "dangling PIF" *) - let pif_to_bridge = - (* Create a list pairing each PIF with the bridge for the network - that it is on *) - List.map (fun (pif, pif_r) -> - let net = pif_r.API.pIF_network in - let bridge = - if List.mem_assoc net network_to_bridge then - Some (List.assoc net network_to_bridge) - else - None - in pif, bridge - ) pifs in - - (* Perform the database resynchronisation *) - List.iter - (fun (pif, pif_r) -> - let bridge = List.assoc pif pif_to_bridge in - let currently_attached = Opt.default false (Opt.map (fun x -> List.mem x bridges) bridge) in - if pif_r.API.pIF_currently_attached <> currently_attached then begin - Db.PIF.set_currently_attached ~__context ~self:pif ~value:currently_attached; - debug "PIF %s currently_attached <- %b" (Ref.string_of pif) currently_attached; - end; - ) pifs + (* Produce internal lookup tables *) + let networks = Db.Network.get_all_records ~__context in + let pifs = Db.PIF.get_records_where ~__context ~expr:( + Eq (Field "host", Literal (Ref.string_of host)) + ) in + + let network_to_bridge = List.map (fun (net, net_r) -> net, net_r.API.network_bridge) networks in + + (* PIF -> bridge option: None means "dangling PIF" *) + let pif_to_bridge = + (* Create a list pairing each PIF with the bridge for the network + that it is on *) + List.map (fun (pif, pif_r) -> + let net = pif_r.API.pIF_network in + let bridge = + if List.mem_assoc net network_to_bridge then + Some (List.assoc net network_to_bridge) + else + None + in pif, bridge + ) pifs in + + (* Perform the database resynchronisation *) + List.iter + (fun (pif, pif_r) -> + let bridge = List.assoc pif pif_to_bridge in + let currently_attached = Opt.default false (Opt.map (fun x -> List.mem x bridges) bridge) in + if pif_r.API.pIF_currently_attached <> currently_attached then begin + Db.PIF.set_currently_attached ~__context ~self:pif ~value:currently_attached; + debug "PIF %s currently_attached <- %b" (Ref.string_of pif) currently_attached; + end; + ) pifs let migrate_receive ~__context ~host ~network ~options = - Xapi_vm_migrate.assert_licensed_storage_motion ~__context ; - - let session_id = Context.get_session_id __context in - let session_rec = Db.Session.get_record ~__context ~self:session_id in - let new_session_id = Xapi_session.login_no_password ~__context ~uname:None ~host - ~pool:session_rec.API.session_pool - ~is_local_superuser:session_rec.API.session_is_local_superuser - ~subject:session_rec.API.session_subject - ~auth_user_sid:session_rec.API.session_auth_user_sid - ~auth_user_name:session_rec.API.session_auth_user_name - ~rbac_permissions:session_rec.API.session_rbac_permissions in - let new_session_id = (Ref.string_of new_session_id) in - let pifs = Db.Network.get_PIFs ~__context ~self:network in - let pif = - try List.find (fun x -> host = Db.PIF.get_host ~__context ~self:x) pifs - with Not_found -> - raise (Api_errors.Server_error(Api_errors.host_cannot_attach_network,[Ref.string_of host; Ref.string_of network])) - in - let ip = Db.PIF.get_IP ~__context ~self:pif in - if String.length ip = 0 then begin - match Db.PIF.get_ip_configuration_mode ~__context ~self:pif with - | `None -> raise (Api_errors.Server_error(Api_errors.pif_has_no_network_configuration,[Ref.string_of pif])) - | `DHCP -> raise (Api_errors.Server_error(Api_errors.interface_has_no_ip,[Ref.string_of pif])) - | _ -> failwith "No IP address on PIF" - end; - let sm_url = Printf.sprintf "http://%s/services/SM?session_id=%s" ip new_session_id in - let xenops_url = Printf.sprintf "http://%s/services/xenops?session_id=%s" ip new_session_id in - let master_address = try Pool_role.get_master_address () with Pool_role.This_host_is_a_master -> - Opt.unbox (Helpers.get_management_ip_addr ~__context) in - - let master_url = Printf.sprintf "http://%s/" master_address in - [ Xapi_vm_migrate._sm, sm_url; - Xapi_vm_migrate._host, Ref.string_of host; - Xapi_vm_migrate._xenops, xenops_url; - Xapi_vm_migrate._session_id, new_session_id; - Xapi_vm_migrate._master, master_url; - ] + Xapi_vm_migrate.assert_licensed_storage_motion ~__context ; + + let session_id = Context.get_session_id __context in + let session_rec = Db.Session.get_record ~__context ~self:session_id in + let new_session_id = Xapi_session.login_no_password ~__context ~uname:None ~host + ~pool:session_rec.API.session_pool + ~is_local_superuser:session_rec.API.session_is_local_superuser + ~subject:session_rec.API.session_subject + ~auth_user_sid:session_rec.API.session_auth_user_sid + ~auth_user_name:session_rec.API.session_auth_user_name + ~rbac_permissions:session_rec.API.session_rbac_permissions in + let new_session_id = (Ref.string_of new_session_id) in + let pifs = Db.Network.get_PIFs ~__context ~self:network in + let pif = + try List.find (fun x -> host = Db.PIF.get_host ~__context ~self:x) pifs + with Not_found -> + raise (Api_errors.Server_error(Api_errors.host_cannot_attach_network,[Ref.string_of host; Ref.string_of network])) + in + let ip = Db.PIF.get_IP ~__context ~self:pif in + if String.length ip = 0 then begin + match Db.PIF.get_ip_configuration_mode ~__context ~self:pif with + | `None -> raise (Api_errors.Server_error(Api_errors.pif_has_no_network_configuration,[Ref.string_of pif])) + | `DHCP -> raise (Api_errors.Server_error(Api_errors.interface_has_no_ip,[Ref.string_of pif])) + | _ -> failwith "No IP address on PIF" + end; + let sm_url = Printf.sprintf "http://%s/services/SM?session_id=%s" ip new_session_id in + let xenops_url = Printf.sprintf "http://%s/services/xenops?session_id=%s" ip new_session_id in + let master_address = try Pool_role.get_master_address () with Pool_role.This_host_is_a_master -> + Opt.unbox (Helpers.get_management_ip_addr ~__context) in + + let master_url = Printf.sprintf "http://%s/" master_address in + [ Xapi_vm_migrate._sm, sm_url; + Xapi_vm_migrate._host, Ref.string_of host; + Xapi_vm_migrate._xenops, xenops_url; + Xapi_vm_migrate._session_id, new_session_id; + Xapi_vm_migrate._master, master_url; + ] let update_display ~__context ~host ~action = - let open Xapi_host_display in - let db_current = Db.Host.get_display ~__context ~self:host in - let db_new, actual_action = match db_current, action with - | `enabled, `enable -> `enabled, None - | `disable_on_reboot, `enable -> `enabled, Some `enable - | `disabled, `enable -> `enable_on_reboot, Some `enable - | `enable_on_reboot, `enable -> `enable_on_reboot, None - | `enabled, `disable -> `disable_on_reboot, Some `disable - | `disable_on_reboot, `disable -> `disable_on_reboot, None - | `disabled, `disable -> `disabled, None - | `enable_on_reboot, `disable -> `disabled, Some `disable - in - begin - match actual_action with - | None -> () - | Some `disable -> disable () - | Some `enable -> enable () - end; - if db_new <> db_current - then Db.Host.set_display ~__context ~self:host ~value:db_new; - db_new + let open Xapi_host_display in + let db_current = Db.Host.get_display ~__context ~self:host in + let db_new, actual_action = match db_current, action with + | `enabled, `enable -> `enabled, None + | `disable_on_reboot, `enable -> `enabled, Some `enable + | `disabled, `enable -> `enable_on_reboot, Some `enable + | `enable_on_reboot, `enable -> `enable_on_reboot, None + | `enabled, `disable -> `disable_on_reboot, Some `disable + | `disable_on_reboot, `disable -> `disable_on_reboot, None + | `disabled, `disable -> `disabled, None + | `enable_on_reboot, `disable -> `disabled, Some `disable + in + begin + match actual_action with + | None -> () + | Some `disable -> disable () + | Some `enable -> enable () + end; + if db_new <> db_current + then Db.Host.set_display ~__context ~self:host ~value:db_new; + db_new let enable_display ~__context ~host = - update_display ~__context ~host ~action:`enable + update_display ~__context ~host ~action:`enable let disable_display ~__context ~host = - if not (Pool_features.is_enabled ~__context Features.Integrated_GPU) - then raise Api_errors.(Server_error (feature_restricted, [])); - update_display ~__context ~host ~action:`disable + if not (Pool_features.is_enabled ~__context Features.Integrated_GPU) + then raise Api_errors.(Server_error (feature_restricted, [])); + update_display ~__context ~host ~action:`disable let sync_display ~__context ~host= - if !Xapi_globs.on_system_boot then begin - let status = match Xapi_host_display.status () with - | `enabled | `unknown -> `enabled - | `disabled -> `disabled - in - if status = `disabled - then Xapi_pci.disable_system_display_device (); - Db.Host.set_display ~__context ~self:host ~value:status - end + if !Xapi_globs.on_system_boot then begin + let status = match Xapi_host_display.status () with + | `enabled | `unknown -> `enabled + | `disabled -> `disabled + in + if status = `disabled + then Xapi_pci.disable_system_display_device (); + Db.Host.set_display ~__context ~self:host ~value:status + end let apply_guest_agent_config ~__context ~host = - let pool = Helpers.get_pool ~__context in - let config = Db.Pool.get_guest_agent_config ~__context ~self:pool in - Xapi_xenops.apply_guest_agent_config ~__context config + let pool = Helpers.get_pool ~__context in + let config = Db.Pool.get_guest_agent_config ~__context ~self:pool in + Xapi_xenops.apply_guest_agent_config ~__context config diff --git a/ocaml/xapi/xapi_host.mli b/ocaml/xapi/xapi_host.mli index fdbc1955124..2f78b7c2dac 100644 --- a/ocaml/xapi/xapi_host.mli +++ b/ocaml/xapi/xapi_host.mli @@ -13,7 +13,7 @@ *) (** Module that defines API functions for Host objects * @group XenAPI functions - *) +*) (** {2 (Fill in Title!)} *) @@ -23,16 +23,16 @@ val set_emergency_mode_error : string -> string list -> unit by the CLI, indicating stuff like: failure to get a management IP address, the master doesn't recognise us etc. *) -val local_assert_healthy : __context:'a -> unit +val local_assert_healthy : __context:'a -> unit val set_power_on_mode : __context:Context.t -> self:[ `host ] Ref.t -> power_on_mode: string -> power_on_config:(string * string) list -> unit - + val bugreport_upload : __context:'a -> host:'b -> url:string -> options:(string * string) list -> unit - + val signal_networking_change : __context:Context.t -> unit val signal_cdrom_event : __context:Context.t -> string -> unit val notify : __context:Context.t -> ty:string -> params:string -> unit @@ -62,7 +62,7 @@ val send_debug_keys : __context:Context.t -> host:'b -> keys:string -> unit val list_methods : __context:'a -> 'b val is_slave : __context:'a -> host:'b -> bool -(** Contact the host and return whether it is a slave or not. +(** Contact the host and return whether it is a slave or not. If the host is dead then one of the xmlrpcclient exceptions will be thrown *) val ask_host_if_it_is_a_slave : __context:Context.t -> host:API.ref_host -> bool @@ -225,11 +225,11 @@ val detach_static_vdis : (** {2 Local Database} *) (** Set a key in the Local DB of the host. *) -val set_localdb_key : __context:Context.t -> host:API.ref_host -> key:string -> value:string -> unit +val set_localdb_key : __context:Context.t -> host:API.ref_host -> key:string -> value:string -> unit (** {2 Secrets} *) - + val update_pool_secret : __context:'a -> host:'b -> pool_secret:string -> unit @@ -244,13 +244,13 @@ val refresh_pack_info : __context:Context.t -> host:API.ref_host -> unit (** Called by post-floodgate slaves to update the database AND recompute the pool_sku on the master *) val set_license_params : - __context:Context.t -> - self:[ `host ] Ref.t -> value:(string * string) list -> unit + __context:Context.t -> + self:[ `host ] Ref.t -> value:(string * string) list -> unit val copy_license_to_db : - __context:Context.t -> - host:[ `host ] Ref.t -> - features:Features.feature list -> additional:(string * string) list -> unit + __context:Context.t -> + host:[ `host ] Ref.t -> + features:Features.feature list -> additional:(string * string) list -> unit val license_add : __context:Context.t -> host:API.ref_host -> contents:string -> unit @@ -264,10 +264,10 @@ val license_remove : __context:Context.t -> host:API.ref_host -> unit * connection details in host.license_server have been amended. *) val apply_edition : __context:Context.t -> host:API.ref_host -> edition:string -> force:bool -> unit val apply_edition_internal : __context:Context.t -> host:API.ref_host -> - edition:string -> additional:(string * string) list -> unit + edition:string -> additional:(string * string) list -> unit (** {2 CPU Feature Masking} *) - + (** Control the local caching behaviour of the host *) val enable_local_storage_caching : __context:Context.t -> host:API.ref_host -> sr:API.ref_SR -> unit val disable_local_storage_caching : __context:Context.t -> host:API.ref_host -> unit diff --git a/ocaml/xapi/xapi_host_backup.ml b/ocaml/xapi/xapi_host_backup.ml index c224c61231c..eaf8c7b855b 100644 --- a/ocaml/xapi/xapi_host_backup.ml +++ b/ocaml/xapi/xapi_host_backup.ml @@ -13,7 +13,7 @@ *) (** * @group Host Management - *) +*) open Stdext open Http @@ -25,97 +25,97 @@ module D = Debug.Make(struct let name="xapi" end) open D let host_backup_handler_core ~__context s = - match - (with_logfile_fd "host-backup" - (fun log_fd -> - let pid = safe_close_and_exec None (Some s) (Some log_fd) [] !Xapi_globs.host_backup [] in + match + (with_logfile_fd "host-backup" + (fun log_fd -> + let pid = safe_close_and_exec None (Some s) (Some log_fd) [] !Xapi_globs.host_backup [] in - let waitpid () = - match Forkhelpers.waitpid_nohang pid with - | 0, _ -> false - | _, Unix.WEXITED 0 -> true - | _, Unix.WEXITED n -> raise (Subprocess_failed n) - | _, _ -> raise (Subprocess_failed 0) - in + let waitpid () = + match Forkhelpers.waitpid_nohang pid with + | 0, _ -> false + | _, Unix.WEXITED 0 -> true + | _, Unix.WEXITED n -> raise (Subprocess_failed n) + | _, _ -> raise (Subprocess_failed 0) + in - let t = ref (0.0) in + let t = ref (0.0) in - while not (waitpid ()) do - Thread.delay 2.0; - t := !t -. 0.1; - let progress = 0.9 *. (1.0 -. (exp !t)) in - TaskHelper.set_progress ~__context progress - done - ) - ) - with - | Success(log,()) -> - debug "host_backup succeeded - returned: %s" log; - () - | Failure(log,e) -> - debug "host_backup failed - host_backup returned: %s" log; - raise (Api_errors.Server_error (Api_errors.backup_script_failed, [log])) + while not (waitpid ()) do + Thread.delay 2.0; + t := !t -. 0.1; + let progress = 0.9 *. (1.0 -. (exp !t)) in + TaskHelper.set_progress ~__context progress + done + ) + ) + with + | Success(log,()) -> + debug "host_backup succeeded - returned: %s" log; + () + | Failure(log,e) -> + debug "host_backup failed - host_backup returned: %s" log; + raise (Api_errors.Server_error (Api_errors.backup_script_failed, [log])) let host_backup_handler (req: Request.t) s _ = - req.Request.close <- true; - Xapi_http.with_context "Downloading host backup" req s - (fun __context -> - Http_svr.headers s (Http.http_200_ok ()); - host_backup_handler_core ~__context s - ) + req.Request.close <- true; + Xapi_http.with_context "Downloading host backup" req s + (fun __context -> + Http_svr.headers s (Http.http_200_ok ()); + host_backup_handler_core ~__context s + ) (** Helper function to prevent double-closes of file descriptors - TODO: this function was copied from util/sha1sum.ml, and should - really go in a shared lib somewhere + TODO: this function was copied from util/sha1sum.ml, and should + really go in a shared lib somewhere *) let close to_close fd = - if List.mem fd !to_close then Unix.close fd; - to_close := List.filter (fun x -> fd <> x) !to_close + if List.mem fd !to_close then Unix.close fd; + to_close := List.filter (fun x -> fd <> x) !to_close let host_restore_handler (req: Request.t) s _ = - req.Request.close <- true; - Xapi_http.with_context "Uploading host backup" req s - (fun __context -> - Http_svr.headers s (Http.http_200_ok ()); + req.Request.close <- true; + Xapi_http.with_context "Uploading host backup" req s + (fun __context -> + Http_svr.headers s (Http.http_200_ok ()); - let out_pipe, in_pipe = Unix.pipe () in - Unix.set_close_on_exec in_pipe; - let to_close = ref [ out_pipe; in_pipe ] in - let close = close to_close in - (* Lets be paranoid about closing fds *) + let out_pipe, in_pipe = Unix.pipe () in + Unix.set_close_on_exec in_pipe; + let to_close = ref [ out_pipe; in_pipe ] in + let close = close to_close in + (* Lets be paranoid about closing fds *) - finally - (fun () -> - (* XXX: ideally need to log this stuff *) - let result = with_logfile_fd "host-restore-log" - (fun log_fd -> - let pid = safe_close_and_exec (Some out_pipe) (Some log_fd) (Some log_fd) [] !Xapi_globs.host_restore [] in + finally + (fun () -> + (* XXX: ideally need to log this stuff *) + let result = with_logfile_fd "host-restore-log" + (fun log_fd -> + let pid = safe_close_and_exec (Some out_pipe) (Some log_fd) (Some log_fd) [] !Xapi_globs.host_restore [] in - close out_pipe; + close out_pipe; - finally - (fun () -> - debug "Host restore: reading backup..."; - let copied_bytes = match req.Request.content_length with - | Some i -> - debug "got content-length of %s" (Int64.to_string i); - Unixext.copy_file ~limit:i s in_pipe - | None -> Unixext.copy_file s in_pipe - in - debug "Host restore: read %s bytes of backup..." - (Int64.to_string copied_bytes) - ) - (fun () -> - close in_pipe; - waitpid_fail_if_bad_exit pid - ) - ) - in + finally + (fun () -> + debug "Host restore: reading backup..."; + let copied_bytes = match req.Request.content_length with + | Some i -> + debug "got content-length of %s" (Int64.to_string i); + Unixext.copy_file ~limit:i s in_pipe + | None -> Unixext.copy_file s in_pipe + in + debug "Host restore: read %s bytes of backup..." + (Int64.to_string copied_bytes) + ) + (fun () -> + close in_pipe; + waitpid_fail_if_bad_exit pid + ) + ) + in - match result with - | Success _ -> debug "restore script exited successfully" - | Failure (log, exn) -> - debug "host-restore script failed with output: %s" log; - raise (Api_errors.Server_error (Api_errors.restore_script_failed, [log])) ) - (fun () -> List.iter close !to_close) - ) + match result with + | Success _ -> debug "restore script exited successfully" + | Failure (log, exn) -> + debug "host-restore script failed with output: %s" log; + raise (Api_errors.Server_error (Api_errors.restore_script_failed, [log])) ) + (fun () -> List.iter close !to_close) + ) diff --git a/ocaml/xapi/xapi_host_cpu.ml b/ocaml/xapi/xapi_host_cpu.ml index ccf26ab360b..0b8567acea0 100644 --- a/ocaml/xapi/xapi_host_cpu.ml +++ b/ocaml/xapi/xapi_host_cpu.ml @@ -13,8 +13,8 @@ *) (** * @group Host Management - *) - +*) + module D=Debug.Make(struct let name="xapi" end) open D diff --git a/ocaml/xapi/xapi_host_crashdump.ml b/ocaml/xapi/xapi_host_crashdump.ml index 9161e2f32b0..607fc8aa168 100644 --- a/ocaml/xapi/xapi_host_crashdump.ml +++ b/ocaml/xapi/xapi_host_crashdump.ml @@ -32,82 +32,82 @@ let du = "/usr/bin/du" let crash_dir = "/var/crash" let delete_crashdump_dir filename = - let path = Filename.concat crash_dir filename in - try - let stat = Unix.stat path in - match stat.Unix.st_kind with - | Unix.S_DIR -> - (* crash dumps are directories *) - let cmd = Printf.sprintf "%s -rf %s" rm path in - let output = Helpers.get_process_output cmd in - if output <> "" then warn "Output from %s: %s" cmd output - | _ -> - error "Crashdump path %s refers to something other than a directory!" path; - with e -> - error "Caught exception while deleting crashdump at path %s (%s)" filename (ExnHelper.string_of_exn e); - raise e + let path = Filename.concat crash_dir filename in + try + let stat = Unix.stat path in + match stat.Unix.st_kind with + | Unix.S_DIR -> + (* crash dumps are directories *) + let cmd = Printf.sprintf "%s -rf %s" rm path in + let output = Helpers.get_process_output cmd in + if output <> "" then warn "Output from %s: %s" cmd output + | _ -> + error "Crashdump path %s refers to something other than a directory!" path; + with e -> + error "Caught exception while deleting crashdump at path %s (%s)" filename (ExnHelper.string_of_exn e); + raise e (* Called once on host boot to resync the crash directory with the database *) let resynchronise ~__context ~host = - debug "Xapi_host_crashdump.resynchronise"; - let all_refs = Db.Host.get_crashdumps ~__context ~self:host in - let db_filenames = List.map - (fun self -> - Db.Host_crashdump.get_filename ~__context ~self) all_refs in + debug "Xapi_host_crashdump.resynchronise"; + let all_refs = Db.Host.get_crashdumps ~__context ~self:host in + let db_filenames = List.map + (fun self -> + Db.Host_crashdump.get_filename ~__context ~self) all_refs in - let real_filenames = - List.filter (fun filename -> - let stat = Unix.stat (Filename.concat crash_dir filename) in - stat.Unix.st_kind = Unix.S_DIR (*only directories are marked as crashdumps*) - ) - (try Array.to_list (Sys.readdir crash_dir) with _ -> []) in - let gone_away = List.set_difference db_filenames real_filenames - and arrived = List.set_difference real_filenames db_filenames in + let real_filenames = + List.filter (fun filename -> + let stat = Unix.stat (Filename.concat crash_dir filename) in + stat.Unix.st_kind = Unix.S_DIR (*only directories are marked as crashdumps*) + ) + (try Array.to_list (Sys.readdir crash_dir) with _ -> []) in + let gone_away = List.set_difference db_filenames real_filenames + and arrived = List.set_difference real_filenames db_filenames in - let was_shutdown_cleanly = try bool_of_string (Localdb.get Constants.host_restarted_cleanly) with _ -> false in - Localdb.put Constants.host_restarted_cleanly "false"; - (* If HA is enabled AND no crashdump appeared AND we weren't shutdown cleanly then assume it was a fence. *) - let ha_is_enabled = - try Db.Pool.get_ha_enabled ~__context ~self:(Helpers.get_pool ~__context) - with _ -> false (* on first boot no-pool=>exn, but on first boot HA is never enabled *) in - begin - if ha_is_enabled && (arrived = []) && not was_shutdown_cleanly && !Xapi_globs.on_system_boot - then Xapi_alert.add ~msg:Api_messages.ha_host_was_fenced ~cls:`Host ~obj_uuid:(Db.Host.get_uuid ~__context ~self:host) ~body:"" - end; + let was_shutdown_cleanly = try bool_of_string (Localdb.get Constants.host_restarted_cleanly) with _ -> false in + Localdb.put Constants.host_restarted_cleanly "false"; + (* If HA is enabled AND no crashdump appeared AND we weren't shutdown cleanly then assume it was a fence. *) + let ha_is_enabled = + try Db.Pool.get_ha_enabled ~__context ~self:(Helpers.get_pool ~__context) + with _ -> false (* on first boot no-pool=>exn, but on first boot HA is never enabled *) in + begin + if ha_is_enabled && (arrived = []) && not was_shutdown_cleanly && !Xapi_globs.on_system_boot + then Xapi_alert.add ~msg:Api_messages.ha_host_was_fenced ~cls:`Host ~obj_uuid:(Db.Host.get_uuid ~__context ~self:host) ~body:"" + end; - let table = List.combine db_filenames all_refs in - List.iter (fun filename -> - debug "Deleting record corresponding to old crashdump %s" filename; - let r = List.assoc filename table in - Db.Host_crashdump.destroy ~__context ~self:r) gone_away; - List.iter (fun filename -> - debug "Adding record corresponding to new crashdump %s" filename; - let cmd = Printf.sprintf "%s --bytes -s %s/%s" du crash_dir filename in - let size = match String.split_f String.isspace (Helpers.get_process_output cmd) with - | size :: _ -> Int64.of_string size - | _ -> (-1L) in - let timestamp = - let open Unix in - try Scanf.sscanf filename "%04d%02d%02d-%02d%02d%02d-UTC" - (fun year mon tm_mday tm_hour tm_min tm_sec -> - fst ( mktime {tm_year=year-1900; tm_mon=mon-1; tm_mday; tm_hour; tm_min; tm_sec; tm_wday=0; tm_yday=0; tm_isdst=false})) - with _ -> - (Unix.stat (Filename.concat crash_dir filename)).Unix.st_ctime in - let timestamp = Date.of_float timestamp in - let r = Ref.make () and uuid = Uuid.to_string (Uuid.make_uuid ()) in - Db.Host_crashdump.create ~__context ~ref:r ~uuid ~other_config:[] - ~host ~timestamp ~size ~filename) arrived + let table = List.combine db_filenames all_refs in + List.iter (fun filename -> + debug "Deleting record corresponding to old crashdump %s" filename; + let r = List.assoc filename table in + Db.Host_crashdump.destroy ~__context ~self:r) gone_away; + List.iter (fun filename -> + debug "Adding record corresponding to new crashdump %s" filename; + let cmd = Printf.sprintf "%s --bytes -s %s/%s" du crash_dir filename in + let size = match String.split_f String.isspace (Helpers.get_process_output cmd) with + | size :: _ -> Int64.of_string size + | _ -> (-1L) in + let timestamp = + let open Unix in + try Scanf.sscanf filename "%04d%02d%02d-%02d%02d%02d-UTC" + (fun year mon tm_mday tm_hour tm_min tm_sec -> + fst ( mktime {tm_year=year-1900; tm_mon=mon-1; tm_mday; tm_hour; tm_min; tm_sec; tm_wday=0; tm_yday=0; tm_isdst=false})) + with _ -> + (Unix.stat (Filename.concat crash_dir filename)).Unix.st_ctime in + let timestamp = Date.of_float timestamp in + let r = Ref.make () and uuid = Uuid.to_string (Uuid.make_uuid ()) in + Db.Host_crashdump.create ~__context ~ref:r ~uuid ~other_config:[] + ~host ~timestamp ~size ~filename) arrived let destroy ~__context ~self = - let filename = Db.Host_crashdump.get_filename ~__context ~self in - finally - (fun () -> delete_crashdump_dir filename) - (fun () -> Db.Host_crashdump.destroy ~__context ~self) + let filename = Db.Host_crashdump.get_filename ~__context ~self in + finally + (fun () -> delete_crashdump_dir filename) + (fun () -> Db.Host_crashdump.destroy ~__context ~self) let upload ~__context ~self ~url ~options = - let filename = Db.Host_crashdump.get_filename ~__context ~self in - let url = if url = "" then (upload_url filename) else url in - do_upload "host-crash-upload" (crash_dir ^ "/" ^ filename) url options + let filename = Db.Host_crashdump.get_filename ~__context ~self in + let url = if url = "" then (upload_url filename) else url in + do_upload "host-crash-upload" (crash_dir ^ "/" ^ filename) url options diff --git a/ocaml/xapi/xapi_host_display.ml b/ocaml/xapi/xapi_host_display.ml index 2e175bd6e64..56ebe059580 100644 --- a/ocaml/xapi/xapi_host_display.ml +++ b/ocaml/xapi/xapi_host_display.ml @@ -15,17 +15,17 @@ let script = "/opt/xensource/libexec/host-display" let call_script ~command = - let (stdout, _) = Forkhelpers.execute_command_get_output script [command] - in String.trim stdout + let (stdout, _) = Forkhelpers.execute_command_get_output script [command] + in String.trim stdout let disable () = - let (_: string) = call_script "disable" in () + let (_: string) = call_script "disable" in () let enable () = - let (_: string) = call_script "enable" in () + let (_: string) = call_script "enable" in () let status () = - match call_script "status" with - | "disabled" -> `disabled - | "enabled" -> `enabled - | _ -> `unknown + match call_script "status" with + | "disabled" -> `disabled + | "enabled" -> `enabled + | _ -> `unknown diff --git a/ocaml/xapi/xapi_host_helpers.ml b/ocaml/xapi/xapi_host_helpers.ml index 834f4668c12..2265702e71a 100644 --- a/ocaml/xapi/xapi_host_helpers.ml +++ b/ocaml/xapi/xapi_host_helpers.ml @@ -13,7 +13,7 @@ *) (** Common code between the fake and real servers for dealing with Hosts. * @group Host Management - *) +*) module D = Debug.Make(struct let name="xapi" end) open D @@ -25,10 +25,10 @@ open Record_util (* for host_operation_to_string *) open Threadext let all_operations = [ `provision; `evacuate; `reboot; `shutdown; - `vm_start; `vm_resume; `vm_migrate; `power_on ] + `vm_start; `vm_resume; `vm_migrate; `power_on ] (** Returns a table of operations -> API error options (None if the operation would be ok) *) -let valid_operations ~__context record _ref' = +let valid_operations ~__context record _ref' = let _ref = Ref.string_of _ref' in let current_ops = List.map snd record.Db_actions.host_current_operations in @@ -36,60 +36,60 @@ let valid_operations ~__context record _ref' = List.iter (fun x -> Hashtbl.replace table x None) all_operations; let set_errors (code: string) (params: string list) (ops: API.host_allowed_operations_set) = List.iter (fun op -> - if Hashtbl.find table op = None - then Hashtbl.replace table op (Some(code, params))) ops in + if Hashtbl.find table op = None + then Hashtbl.replace table op (Some(code, params))) ops in (* Operations are divided into two groups: - 1. those that create new VMs: `provision, `vm_resume, `vm_migrate - 2. those that remove VMs: `evacuate, `reboot, `shutdown *) + 1. those that create new VMs: `provision, `vm_resume, `vm_migrate + 2. those that remove VMs: `evacuate, `reboot, `shutdown *) let is_creating_new x = List.mem x [ `provision; `vm_resume; `vm_migrate ] in let is_removing x = List.mem x [ `evacuate; `reboot; `shutdown ] in let creating_new = List.fold_left (fun acc op -> acc || (is_creating_new op)) false current_ops in let removing = List.fold_left (fun acc op -> acc || (is_removing op)) false current_ops in List.iter - (fun op -> - if is_creating_new op && removing || (is_removing op && creating_new) - then set_errors Api_errors.other_operation_in_progress - [ "host"; _ref; host_operation_to_string (List.hd current_ops) ] - [ op ] - ) (List.filter (fun x -> x <> `power_on) all_operations); + (fun op -> + if is_creating_new op && removing || (is_removing op && creating_new) + then set_errors Api_errors.other_operation_in_progress + [ "host"; _ref; host_operation_to_string (List.hd current_ops) ] + [ op ] + ) (List.filter (fun x -> x <> `power_on) all_operations); (* reboot and shutdown cannot run concurrently *) if List.mem `reboot current_ops then set_errors Api_errors.other_operation_in_progress - [ "host"; _ref; host_operation_to_string `reboot ] [ `shutdown ]; + [ "host"; _ref; host_operation_to_string `reboot ] [ `shutdown ]; if List.mem `shutdown current_ops then set_errors Api_errors.other_operation_in_progress - [ "host"; _ref; host_operation_to_string `shutdown ] [ `reboot ]; + [ "host"; _ref; host_operation_to_string `shutdown ] [ `reboot ]; (* Prevent more than one provision happening at a time to prevent extreme dom0 load (in the case of the debian template). Once the template becomes a 'real' template we can relax this. *) if List.mem `provision current_ops then set_errors Api_errors.other_operation_in_progress - [ "host"; _ref; host_operation_to_string `provision ] - [ `provision ]; + [ "host"; _ref; host_operation_to_string `provision ] + [ `provision ]; (* The host must be disabled before reboots or shutdowns are permitted *) if record.Db_actions.host_enabled then set_errors Api_errors.host_not_disabled [] [ `reboot; `shutdown ]; (* The host must be (thought to be down) before power_on is possible *) - begin - try - if Db.Host_metrics.get_live ~__context ~self:record.Db_actions.host_metrics - then set_errors Api_errors.host_is_live [ _ref ] [ `power_on ] - with _ -> () + begin + try + if Db.Host_metrics.get_live ~__context ~self:record.Db_actions.host_metrics + then set_errors Api_errors.host_is_live [ _ref ] [ `power_on ] + with _ -> () end; (* The host power_on_mode must be not disabled *) - begin - try + begin + try if record.Db_actions.host_power_on_mode = "" then set_errors Api_errors.host_power_on_mode_disabled [] [ `power_on ] - with _ -> () + with _ -> () end; (* The power-on-host plugin must be available before power_on is possible *) - begin + begin try Unix.access (Filename.concat !Xapi_globs.xapi_plugins_root Constants.power_on_plugin) [ Unix.X_OK ] with _ -> set_errors Api_errors.xenapi_missing_plugin [ Constants.power_on_plugin ] [ `power_on ] end; @@ -97,7 +97,7 @@ let valid_operations ~__context record _ref' = (* Check where there are any attached clustered SRs. If so: * - Only one host may be down at a time; * - No hosts may go down if the SR is "recovering". - *) + *) let plugged_srs = Helpers.get_all_plugged_srs ~__context in let plugged_clustered_srs = List.filter (fun self -> Db.SR.get_clustered ~__context ~self) plugged_srs in if plugged_clustered_srs <> [] then begin @@ -116,8 +116,8 @@ let valid_operations ~__context record _ref' = (* All other operations may be parallelised *) table - -let throw_error table op = + +let throw_error table op = if not(Hashtbl.mem table op) then raise (Api_errors.Server_error(Api_errors.internal_error, [ Printf.sprintf "xapi_host_helpers.assert_operation_valid unknown operation: %s" (host_operation_to_string op) ])); @@ -125,7 +125,7 @@ let throw_error table op = | Some (code, params) -> raise (Api_errors.Server_error(code, params)) | None -> () -let assert_operation_valid ~__context ~self ~(op:API.host_allowed_operations) = +let assert_operation_valid ~__context ~self ~(op:API.host_allowed_operations) = let all = Db.Host.get_record_internal ~__context ~self in let table = valid_operations ~__context all self in throw_error table op @@ -157,12 +157,12 @@ let shutdown ~__context ~host = () let reboot ~__context ~host = () -let update_host_metrics ~__context ~host ~memory_total ~memory_free = +let update_host_metrics ~__context ~host ~memory_total ~memory_free = (* If HA is enabled then we don't set the live flag at all. If the node is marked as shutting down then we ignore the heartbeats. *) let pool = Helpers.get_pool ~__context in let ha_enabled = Db.Pool.get_ha_enabled ~__context ~self:pool in - let shutting_down = + let shutting_down = Mutex.execute Xapi_globs.hosts_which_are_shutting_down_m (fun () -> List.mem host !Xapi_globs.hosts_which_are_shutting_down) in let should_set_live = not ha_enabled && not shutting_down in @@ -177,12 +177,12 @@ let update_host_metrics ~__context ~host ~memory_total ~memory_free = if should_set_live then begin Db.Host_metrics.set_live ~__context ~self:m ~value:true; update_allowed_operations ~__context ~self:host - end + end end else warn "Host %s has invalid Host_metrics object reference" (Ref.string_of host) -(* When the Host.shutdown and Host.reboot calls return to the master, the slave is - shutting down asycnronously. We immediately set the Host_metrics.live to false +(* When the Host.shutdown and Host.reboot calls return to the master, the slave is + shutting down asycnronously. We immediately set the Host_metrics.live to false and add the host to the global list of known-dying hosts. *) let mark_host_as_dead ~__context ~host ~reason = Mutex.execute Xapi_globs.hosts_which_are_shutting_down_m @@ -209,76 +209,76 @@ let startup_complete = ref false let startup_complete_m = Mutex.create () let signal_startup_complete () = - Mutex.execute startup_complete_m (fun () -> startup_complete := true) + Mutex.execute startup_complete_m (fun () -> startup_complete := true) let assert_startup_complete () = - Mutex.execute startup_complete_m - (fun () -> if not (!startup_complete) then - raise (Api_errors.Server_error (Api_errors.host_still_booting, []))) + Mutex.execute startup_complete_m + (fun () -> if not (!startup_complete) then + raise (Api_errors.Server_error (Api_errors.host_still_booting, []))) let consider_enabling_host_nolock ~__context = - debug "Xapi_host_helpers.consider_enabling_host_nolock called"; - (* If HA is enabled only consider marking the host as enabled if all the storage plugs in successfully. - Disabled hosts are excluded from the HA planning calculations. Otherwise a host may boot, - fail to plug in a PBD and cause all protected VMs to suddenly become non-agile. *) - let ha_enabled = try bool_of_string (Localdb.get Constants.ha_armed) with _ -> false in - let localhost = Helpers.get_localhost ~__context in - let pbds = Db.Host.get_PBDs ~__context ~self:localhost in - Storage_access.resynchronise_pbds ~__context ~pbds; - let all_pbds_ok = List.fold_left (&&) true (List.map (fun self -> Db.PBD.get_currently_attached ~__context ~self) pbds) in - - if not !user_requested_host_disable && (not ha_enabled || all_pbds_ok) then begin - (* If we were in the middle of a shutdown or reboot with HA enabled but somehow we failed - and xapi restarted, make sure we don't automatically re-enable ourselves. This is to avoid - letting a machine with no fencing touch any VMs. Once the host reboots we can safely clear - the flag 'host_disabled_until_reboot' *) - let pool = Helpers.get_pool ~__context in - if !Xapi_globs.on_system_boot then begin - debug "Host.enabled: system has just restarted: setting localhost to enabled"; - Db.Host.set_enabled ~__context ~self:localhost ~value:true; - Localdb.put Constants.host_disabled_until_reboot "false"; - (* Start processing pending VM powercycle events *) - Local_work_queue.start_vm_lifecycle_queue (); - end else begin - if try bool_of_string (Localdb.get Constants.host_disabled_until_reboot) with _ -> false then begin - debug "Host.enabled: system not just rebooted but host_disabled_until_reboot still set. Leaving host disabled"; - end else begin - debug "Host.enabled: system not just rebooted && host_disabled_until_reboot not set: setting localhost to enabled"; - Db.Host.set_enabled ~__context ~self:localhost ~value:true; - (* Start processing pending VM powercycle events *) - Local_work_queue.start_vm_lifecycle_queue (); - end - end; - (* If Host has been enabled and HA is also enabled then tell the master to recompute its plan *) - if Db.Host.get_enabled ~__context ~self:localhost && (Db.Pool.get_ha_enabled ~__context ~self:pool) - then Helpers.call_api_functions ~__context (fun rpc session_id -> Client.Client.Pool.ha_schedule_plan_recomputation rpc session_id) - end; - signal_startup_complete () + debug "Xapi_host_helpers.consider_enabling_host_nolock called"; + (* If HA is enabled only consider marking the host as enabled if all the storage plugs in successfully. + Disabled hosts are excluded from the HA planning calculations. Otherwise a host may boot, + fail to plug in a PBD and cause all protected VMs to suddenly become non-agile. *) + let ha_enabled = try bool_of_string (Localdb.get Constants.ha_armed) with _ -> false in + let localhost = Helpers.get_localhost ~__context in + let pbds = Db.Host.get_PBDs ~__context ~self:localhost in + Storage_access.resynchronise_pbds ~__context ~pbds; + let all_pbds_ok = List.fold_left (&&) true (List.map (fun self -> Db.PBD.get_currently_attached ~__context ~self) pbds) in + + if not !user_requested_host_disable && (not ha_enabled || all_pbds_ok) then begin + (* If we were in the middle of a shutdown or reboot with HA enabled but somehow we failed + and xapi restarted, make sure we don't automatically re-enable ourselves. This is to avoid + letting a machine with no fencing touch any VMs. Once the host reboots we can safely clear + the flag 'host_disabled_until_reboot' *) + let pool = Helpers.get_pool ~__context in + if !Xapi_globs.on_system_boot then begin + debug "Host.enabled: system has just restarted: setting localhost to enabled"; + Db.Host.set_enabled ~__context ~self:localhost ~value:true; + Localdb.put Constants.host_disabled_until_reboot "false"; + (* Start processing pending VM powercycle events *) + Local_work_queue.start_vm_lifecycle_queue (); + end else begin + if try bool_of_string (Localdb.get Constants.host_disabled_until_reboot) with _ -> false then begin + debug "Host.enabled: system not just rebooted but host_disabled_until_reboot still set. Leaving host disabled"; + end else begin + debug "Host.enabled: system not just rebooted && host_disabled_until_reboot not set: setting localhost to enabled"; + Db.Host.set_enabled ~__context ~self:localhost ~value:true; + (* Start processing pending VM powercycle events *) + Local_work_queue.start_vm_lifecycle_queue (); + end + end; + (* If Host has been enabled and HA is also enabled then tell the master to recompute its plan *) + if Db.Host.get_enabled ~__context ~self:localhost && (Db.Pool.get_ha_enabled ~__context ~self:pool) + then Helpers.call_api_functions ~__context (fun rpc session_id -> Client.Client.Pool.ha_schedule_plan_recomputation rpc session_id) + end; + signal_startup_complete () (** Attempt to minimise the number of times we call consider_enabling_host_nolock *) let consider_enabling_host = - At_least_once_more.make "consider_enabling_host" - (fun () -> - Server_helpers.exec_with_new_task "consider_enabling_host" - (fun __context -> consider_enabling_host_nolock __context) - ) + At_least_once_more.make "consider_enabling_host" + (fun () -> + Server_helpers.exec_with_new_task "consider_enabling_host" + (fun __context -> consider_enabling_host_nolock __context) + ) let consider_enabling_host_request ~__context = At_least_once_more.again consider_enabling_host let consider_enabling_host ~__context = - debug "Xapi_host_helpers.consider_enabling_host called"; - consider_enabling_host_request ~__context + debug "Xapi_host_helpers.consider_enabling_host called"; + consider_enabling_host_request ~__context module Host_requires_reboot = struct - let m = Mutex.create () + let m = Mutex.create () - let get () = - Mutex.execute m (fun () -> - try Unix.access Xapi_globs.requires_reboot_file [Unix.F_OK]; true with _ -> false - ) + let get () = + Mutex.execute m (fun () -> + try Unix.access Xapi_globs.requires_reboot_file [Unix.F_OK]; true with _ -> false + ) - let set () = - Mutex.execute m (fun () -> - Unixext.touch_file Xapi_globs.requires_reboot_file - ) + let set () = + Mutex.execute m (fun () -> + Unixext.touch_file Xapi_globs.requires_reboot_file + ) end diff --git a/ocaml/xapi/xapi_host_patch.ml b/ocaml/xapi/xapi_host_patch.ml index 5b1e24d19b4..5fcb96445dc 100644 --- a/ocaml/xapi/xapi_host_patch.ml +++ b/ocaml/xapi/xapi_host_patch.ml @@ -13,14 +13,14 @@ *) (** * @group Host Management - *) - +*) + module D = Debug.Make(struct let name="xapi" end) open D -let destroy ~__context ~self = +let destroy ~__context ~self = Db.Host_patch.destroy ~__context ~self let apply ~__context ~self = - raise (Api_errors.Server_error (Api_errors.message_deprecated, + raise (Api_errors.Server_error (Api_errors.message_deprecated, [])) diff --git a/ocaml/xapi/xapi_http.ml b/ocaml/xapi/xapi_http.ml index d93d3c385a9..0e1a9ddc7c3 100644 --- a/ocaml/xapi/xapi_http.ml +++ b/ocaml/xapi/xapi_http.ml @@ -16,7 +16,7 @@ module D = Debug.Make(struct let name="xapi" end) open D -let validate_session __context session_id realm = +let validate_session __context session_id realm = try let (_: string) = Db.Session.get_uuid ~__context ~self:session_id in () with _ -> @@ -25,18 +25,18 @@ let validate_session __context session_id realm = (* Talk to the master over the network. NB we deliberately use the network rather than the unix domain socket because we don't want to accidentally bypass the authentication *) -let inet_rpc xml = - let version = "1.1" and path = "/" in - let http = 80 and https = !Xapi_globs.https_port in - (* Bypass SSL for localhost, this works even if the management interface - is disabled. *) - let open Xmlrpc_client in - let transport = - if Pool_role.is_master () - then TCP("127.0.0.1", http) - else SSL(SSL.make (), Pool_role.get_master_address (), https) in - let http = xmlrpc ~version path in - XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"xapi" ~transport ~http xml +let inet_rpc xml = + let version = "1.1" and path = "/" in + let http = 80 and https = !Xapi_globs.https_port in + (* Bypass SSL for localhost, this works even if the management interface + is disabled. *) + let open Xmlrpc_client in + let transport = + if Pool_role.is_master () + then TCP("127.0.0.1", http) + else SSL(SSL.make (), Pool_role.get_master_address (), https) in + let http = xmlrpc ~version path in + XMLRPC_protocol.rpc ~srcstr:"xapi" ~dststr:"xapi" ~transport ~http xml open Client @@ -48,42 +48,42 @@ let get_session_id (req: Request.t) = let all = req.Request.cookie @ req.Request.query in if List.mem_assoc "session_id" all then - let session_id = (Ref.of_string (List.assoc "session_id" all)) in - session_id - else - Ref.null + let session_id = (Ref.of_string (List.assoc "session_id" all)) in + session_id + else + Ref.null let append_to_master_audit_log __context action line = (* http actions are not automatically written to the master's audit log *) (* it is necessary to do that manually from the slaves *) - if Stdext.Xstringext.String.startswith - Datamodel.rbac_http_permission_prefix - action - then - if Pool_role.is_slave () - then begin - Helpers.call_api_functions ~__context (fun rpc session_id -> - Client.Pool.audit_log_append ~rpc ~session_id ~line - ) - end + if Stdext.Xstringext.String.startswith + Datamodel.rbac_http_permission_prefix + action + then + if Pool_role.is_slave () + then begin + Helpers.call_api_functions ~__context (fun rpc session_id -> + Client.Pool.audit_log_append ~rpc ~session_id ~line + ) + end (* static call-back from rbac-audit into append_to_master_audit_log function *) (* used to avoid cycle dependency between xapi_http and rbac_audit *) let init_fn_append_to_master_audit_log = - if !Rbac_audit.fn_append_to_master_audit_log = None - then Rbac_audit.fn_append_to_master_audit_log := Some(append_to_master_audit_log) + if !Rbac_audit.fn_append_to_master_audit_log = None + then Rbac_audit.fn_append_to_master_audit_log := Some(append_to_master_audit_log) let rbac_audit_params_of (req: Request.t) = - let all = req.Request.cookie @ req.Request.query in - List.fold_right (fun (n,v) (acc_n,acc_v) -> - (n::acc_n, - (Rpc.String v)::acc_v - ) - ) - all - ([],[]) + let all = req.Request.cookie @ req.Request.query in + List.fold_right (fun (n,v) (acc_n,acc_v) -> + (n::acc_n, + (Rpc.String v)::acc_v + ) + ) + all + ([],[]) let assert_credentials_ok realm ?(http_action=realm) ?(fn=Rbac.nofn) (req: Request.t) ic = let http_permission = Datamodel.rbac_http_permission_prefix ^ http_action in @@ -92,33 +92,33 @@ let assert_credentials_ok realm ?(http_action=realm) ?(fn=Rbac.nofn) (req: Reque if List.mem_assoc "subtask_of" all then Some (Ref.of_string (List.assoc "subtask_of" all)) else None in - let task_id = - if List.mem_assoc "task_id" all - then Some (Ref.of_string (List.assoc "task_id" all)) - else None - in - let rbac_raise permission msg exc = + let task_id = + if List.mem_assoc "task_id" all + then Some (Ref.of_string (List.assoc "task_id" all)) + else None + in + let rbac_raise permission msg exc = (match task_id with - | None -> () - | Some task_id -> - TaskHelper.failed - ~__context:(Context.from_forwarded_task task_id) - (Api_errors.Server_error (Api_errors.rbac_permission_denied,[permission;msg])) + | None -> () + | Some task_id -> + TaskHelper.failed + ~__context:(Context.from_forwarded_task task_id) + (Api_errors.Server_error (Api_errors.rbac_permission_denied,[permission;msg])) ); raise exc - in - let rbac_task_desc = "handler" in - let rbac_check session_id = + in + let rbac_task_desc = "handler" in + let rbac_check session_id = (try Rbac.check_with_new_task session_id http_permission ~fn - ~args:(rbac_audit_params_of req) - ~task_desc:rbac_task_desc - with - | Api_errors.Server_error (err,[perm;msg]) - when err = Api_errors.rbac_permission_denied - -> rbac_raise perm msg Http.Forbidden - | e -> rbac_raise http_permission (ExnHelper.string_of_exn e) e - ) - in + ~args:(rbac_audit_params_of req) + ~task_desc:rbac_task_desc + with + | Api_errors.Server_error (err,[perm;msg]) + when err = Api_errors.rbac_permission_denied + -> rbac_raise perm msg Http.Forbidden + | e -> rbac_raise http_permission (ExnHelper.string_of_exn e) e + ) + in if Context.is_unix_socket ic then () (* Connections from unix-domain socket implies you're root on the box, ergo everything is OK *) else @@ -127,44 +127,44 @@ let assert_credentials_ok realm ?(http_action=realm) ?(fn=Rbac.nofn) (req: Reque (* Session ref has been passed in - check that it's OK *) begin Server_helpers.exec_with_new_task ?subtask_of "xapi_http_session_check" (fun __context -> - let session_id = (Ref.of_string (List.assoc "session_id" all)) in - (try validate_session __context session_id realm; - with _ -> raise (Http.Unauthorised realm)); - rbac_check session_id; - ); + let session_id = (Ref.of_string (List.assoc "session_id" all)) in + (try validate_session __context session_id realm; + with _ -> raise (Http.Unauthorised realm)); + rbac_check session_id; + ); end else if List.mem_assoc "pool_secret" all then begin - if List.assoc "pool_secret" all = !Xapi_globs.pool_secret then - fn () - else - raise (Http.Unauthorised realm) + if List.assoc "pool_secret" all = !Xapi_globs.pool_secret then + fn () + else + raise (Http.Unauthorised realm) end else begin match req.Http.Request.auth with - | Some (Http.Basic(username, password)) -> - begin - let session_id = try - Client.Session.login_with_password inet_rpc username password Xapi_globs.api_version_string Xapi_globs.xapi_user_agent - with _ -> raise (Http.Unauthorised realm) - in - Stdext.Pervasiveext.finally - (fun ()-> rbac_check session_id) - (fun ()->(try Client.Session.logout inet_rpc session_id with _ -> ())) - end - | Some (Http.UnknownAuth x) -> - raise (Failure (Printf.sprintf "Unknown authorization header: %s" x)) - | _ -> begin - debug "No header credentials during http connection to %s" realm; - raise (Http.Unauthorised realm) end + | Some (Http.Basic(username, password)) -> + begin + let session_id = try + Client.Session.login_with_password inet_rpc username password Xapi_globs.api_version_string Xapi_globs.xapi_user_agent + with _ -> raise (Http.Unauthorised realm) + in + Stdext.Pervasiveext.finally + (fun ()-> rbac_check session_id) + (fun ()->(try Client.Session.logout inet_rpc session_id with _ -> ())) + end + | Some (Http.UnknownAuth x) -> + raise (Failure (Printf.sprintf "Unknown authorization header: %s" x)) + | _ -> begin + debug "No header credentials during http connection to %s" realm; + raise (Http.Unauthorised realm) end end -let with_context ?(dummy=false) label (req: Request.t) (s: Unix.file_descr) f = +let with_context ?(dummy=false) label (req: Request.t) (s: Unix.file_descr) f = let all = req.Request.cookie @ req.Request.query in let task_id = - if List.mem_assoc "task_id" all + if List.mem_assoc "task_id" all then Some (Ref.of_string (List.assoc "task_id" all)) else None in let subtask_of = @@ -173,55 +173,55 @@ let with_context ?(dummy=false) label (req: Request.t) (s: Unix.file_descr) f = else None in let localhost = Server_helpers.exec_with_new_task "with_context" (fun __context -> Helpers.get_localhost ~__context) in try - let session_id,must_logout = + let session_id,must_logout = if Context.is_unix_socket s then Client.Session.slave_login inet_rpc localhost !Xapi_globs.pool_secret, true - else + else if List.mem_assoc "session_id" all then Ref.of_string (List.assoc "session_id" all), false - else - if List.mem_assoc "pool_secret" all - then Client.Session.slave_login inet_rpc localhost (List.assoc "pool_secret" all), true - else begin - match req.Http.Request.auth with - | Some (Http.Basic(username, password)) -> - begin - try - Client.Session.login_with_password inet_rpc username password Xapi_globs.api_version_string Xapi_globs.xapi_user_agent, true - with Api_errors.Server_error(code, params) when code = Api_errors.session_authentication_failed -> - raise (Http.Unauthorised label) - end - | Some (Http.UnknownAuth x) -> - raise (Failure (Printf.sprintf "Unknown authorization header: %s" x)) - | _ -> raise (Http.Unauthorised label) - end + else + if List.mem_assoc "pool_secret" all + then Client.Session.slave_login inet_rpc localhost (List.assoc "pool_secret" all), true + else begin + match req.Http.Request.auth with + | Some (Http.Basic(username, password)) -> + begin + try + Client.Session.login_with_password inet_rpc username password Xapi_globs.api_version_string Xapi_globs.xapi_user_agent, true + with Api_errors.Server_error(code, params) when code = Api_errors.session_authentication_failed -> + raise (Http.Unauthorised label) + end + | Some (Http.UnknownAuth x) -> + raise (Failure (Printf.sprintf "Unknown authorization header: %s" x)) + | _ -> raise (Http.Unauthorised label) + end in Stdext.Pervasiveext.finally (fun () -> - let login_perform_logout __context = - validate_session __context session_id label; - if not must_logout then Xapi_session.consider_touching_session inet_rpc session_id (); - f __context - in - begin match task_id with - | None -> Server_helpers.exec_with_new_task ?subtask_of ~session_id ~task_in_database:(not dummy) ~origin:(Context.Http(req,s)) label login_perform_logout - | Some task_id -> Server_helpers.exec_with_forwarded_task ~session_id ~origin:(Context.Http(req,s)) task_id login_perform_logout - end + let login_perform_logout __context = + validate_session __context session_id label; + if not must_logout then Xapi_session.consider_touching_session inet_rpc session_id (); + f __context + in + begin match task_id with + | None -> Server_helpers.exec_with_new_task ?subtask_of ~session_id ~task_in_database:(not dummy) ~origin:(Context.Http(req,s)) label login_perform_logout + | Some task_id -> Server_helpers.exec_with_forwarded_task ~session_id ~origin:(Context.Http(req,s)) task_id login_perform_logout + end ) - (fun () -> - if must_logout - then Helpers.log_exn_continue "Logging out" - (fun session_id -> Client.Session.logout inet_rpc session_id) session_id + (fun () -> + if must_logout + then Helpers.log_exn_continue "Logging out" + (fun session_id -> Client.Session.logout inet_rpc session_id) session_id ) - with Http.Unauthorised s as e -> - let fail __context = + with Http.Unauthorised s as e -> + let fail __context = TaskHelper.failed ~__context (Api_errors.Server_error(Api_errors.session_authentication_failed, [])) in debug "No authentication provided to http handler: returning 401 unauthorised"; (* Fail the task *) begin match task_id with - | None -> Server_helpers.exec_with_new_task ~task_in_database:(not dummy) label fail - | Some task_id -> Server_helpers.exec_with_forwarded_task task_id fail + | None -> Server_helpers.exec_with_new_task ~task_in_database:(not dummy) label fail + | Some task_id -> Server_helpers.exec_with_forwarded_task task_id fail end; req.Request.close <- true; raise e @@ -229,90 +229,90 @@ let with_context ?(dummy=false) label (req: Request.t) (s: Unix.file_descr) f = (* Other exceptions are dealt with by the Http_svr module's exception handler *) let server = - let server = Http_svr.Server.empty () in - Http_svr.Server.enable_fastpath server; - server - + let server = Http_svr.Server.empty () in + Http_svr.Server.enable_fastpath server; + server + let http_request = Http.Request.make ~user_agent:Xapi_globs.xapi_user_agent let bind inetaddr = - let description = match inetaddr with - | Unix.ADDR_INET(ip, port) -> Printf.sprintf "INET %s:%d" (Unix.string_of_inet_addr ip) port - | Unix.ADDR_UNIX path -> Printf.sprintf "UNIX %s" path in - (* Sometimes we see failures which we hope are transient. If this - happens then we'll retry a couple of times before failing. *) - let start = Unix.gettimeofday () in - let timeout = 30.0 in (* 30s *) - let rec bind' () = - try - Some (Http_svr.bind ~listen_backlog:Xapi_globs.listen_backlog inetaddr description) - with - | Unix.Unix_error(code, _, _) when code = Unix.EAFNOSUPPORT -> - info "Kernel does not support IPv6"; - None - | Unix.Unix_error(code, _, _) -> - debug "While binding %s: %s" description (Unix.error_message code); - if Unix.gettimeofday () -. start < timeout then begin - Thread.delay 5.; - bind' () - end else - None - in - match bind' () with - | None -> failwith (Printf.sprintf "Failed to bind: %s" description) - | Some s -> - info "Successfully bound socket to: %s" description; - s + let description = match inetaddr with + | Unix.ADDR_INET(ip, port) -> Printf.sprintf "INET %s:%d" (Unix.string_of_inet_addr ip) port + | Unix.ADDR_UNIX path -> Printf.sprintf "UNIX %s" path in + (* Sometimes we see failures which we hope are transient. If this + happens then we'll retry a couple of times before failing. *) + let start = Unix.gettimeofday () in + let timeout = 30.0 in (* 30s *) + let rec bind' () = + try + Some (Http_svr.bind ~listen_backlog:Xapi_globs.listen_backlog inetaddr description) + with + | Unix.Unix_error(code, _, _) when code = Unix.EAFNOSUPPORT -> + info "Kernel does not support IPv6"; + None + | Unix.Unix_error(code, _, _) -> + debug "While binding %s: %s" description (Unix.error_message code); + if Unix.gettimeofday () -. start < timeout then begin + Thread.delay 5.; + bind' () + end else + None + in + match bind' () with + | None -> failwith (Printf.sprintf "Failed to bind: %s" description) + | Some s -> + info "Successfully bound socket to: %s" description; + s let add_handler (name, handler) = let action = - try List.assoc name Datamodel.http_actions - with Not_found -> - (* This should only affect developers: *) - error "HTTP handler %s not registered in ocaml/idl/datamodel.ml" name; - failwith (Printf.sprintf "Unregistered HTTP handler: %s" name) in + try List.assoc name Datamodel.http_actions + with Not_found -> + (* This should only affect developers: *) + error "HTTP handler %s not registered in ocaml/idl/datamodel.ml" name; + failwith (Printf.sprintf "Unregistered HTTP handler: %s" name) in let check_rbac = Rbac.is_rbac_enabled_for_http_action name in - let h = match handler with - | Http_svr.BufIO callback -> - Http_svr.BufIO (fun req ic context -> - (try - if check_rbac - then (* rbac checks *) - (try - assert_credentials_ok name req ~fn:(fun () -> callback req ic context) (Buf_io.fd_of ic) - with e -> - debug "Leaving RBAC-handler in xapi_http after: %s" (ExnHelper.string_of_exn e); - raise e - ) - else (* no rbac checks *) - callback req ic context - with - | Api_errors.Server_error(name, params) as e -> - error "Unhandled Api_errors.Server_error(%s, [ %s ])" name (String.concat "; " params); - raise (Http_svr.Generic_error (ExnHelper.string_of_exn e)) - ) - ) - | Http_svr.FdIO callback -> - Http_svr.FdIO (fun req ic context -> - (try - (if check_rbac then assert_credentials_ok name req ic); (* session and rbac checks *) - callback req ic context - with - | Api_errors.Server_error(name, params) as e -> - error "Unhandled Api_errors.Server_error(%s, [ %s ])" name (String.concat "; " params); - raise (Http_svr.Generic_error (ExnHelper.string_of_exn e)) - ) - ) - in + let h = match handler with + | Http_svr.BufIO callback -> + Http_svr.BufIO (fun req ic context -> + (try + if check_rbac + then (* rbac checks *) + (try + assert_credentials_ok name req ~fn:(fun () -> callback req ic context) (Buf_io.fd_of ic) + with e -> + debug "Leaving RBAC-handler in xapi_http after: %s" (ExnHelper.string_of_exn e); + raise e + ) + else (* no rbac checks *) + callback req ic context + with + | Api_errors.Server_error(name, params) as e -> + error "Unhandled Api_errors.Server_error(%s, [ %s ])" name (String.concat "; " params); + raise (Http_svr.Generic_error (ExnHelper.string_of_exn e)) + ) + ) + | Http_svr.FdIO callback -> + Http_svr.FdIO (fun req ic context -> + (try + (if check_rbac then assert_credentials_ok name req ic); (* session and rbac checks *) + callback req ic context + with + | Api_errors.Server_error(name, params) as e -> + error "Unhandled Api_errors.Server_error(%s, [ %s ])" name (String.concat "; " params); + raise (Http_svr.Generic_error (ExnHelper.string_of_exn e)) + ) + ) + in - match action with (meth, uri, sdk, sdkargs, roles, sub_actions) -> - let ty = match meth with - Datamodel.Get -> Http.Get - | Datamodel.Put -> Http.Put - | Datamodel.Post -> Http.Post - | Datamodel.Connect -> Http.Connect - | Datamodel.Options -> Http.Options - in Http_svr.Server.add_handler server ty uri h + match action with (meth, uri, sdk, sdkargs, roles, sub_actions) -> + let ty = match meth with + Datamodel.Get -> Http.Get + | Datamodel.Put -> Http.Put + | Datamodel.Post -> Http.Post + | Datamodel.Connect -> Http.Connect + | Datamodel.Options -> Http.Options + in Http_svr.Server.add_handler server ty uri h diff --git a/ocaml/xapi/xapi_local_session.ml b/ocaml/xapi/xapi_local_session.ml index 1b758d847ee..52cb02cd9e6 100644 --- a/ocaml/xapi/xapi_local_session.ml +++ b/ocaml/xapi/xapi_local_session.ml @@ -14,7 +14,7 @@ (** Code to handle local sessions, used so that slaves can communicate even when the master is down. *) -type t = { +type t = { r: API.ref_session; pool: bool; last_active: Stdext.Date.iso8601 } @@ -26,19 +26,19 @@ let table = Hashtbl.create 10 let get_all ~__context = Mutex.execute m (fun () -> Hashtbl.fold (fun k v acc -> k :: acc) table []) -let create ~__context ~pool = +let create ~__context ~pool = let r = Ref.make () in let session = { r = r; pool = pool; last_active = Stdext.Date.of_float (Unix.gettimeofday ()) } in Mutex.execute m (fun () -> Hashtbl.replace table r session); r -let get_record ~__context ~self = +let get_record ~__context ~self = Mutex.execute m (fun () -> Hashtbl.find table self) -let destroy ~__context ~self = +let destroy ~__context ~self = Mutex.execute m (fun () -> Hashtbl.remove table self) -let local_session_hook ~__context ~session_id = +let local_session_hook ~__context ~session_id = try ignore(get_record ~__context ~self:session_id); true with _ -> false - + diff --git a/ocaml/xapi/xapi_local_session.mli b/ocaml/xapi/xapi_local_session.mli index ffb7749f7d3..8aea8c54968 100644 --- a/ocaml/xapi/xapi_local_session.mli +++ b/ocaml/xapi/xapi_local_session.mli @@ -13,7 +13,7 @@ *) (** Represents local sessions, for use in emergency mode *) -type t = { +type t = { r: API.ref_session; pool: bool; last_active: Stdext.Date.iso8601 } diff --git a/ocaml/xapi/xapi_logs_download.ml b/ocaml/xapi/xapi_logs_download.ml index 3f76fd7e9ae..a73efbcffba 100644 --- a/ocaml/xapi/xapi_logs_download.ml +++ b/ocaml/xapi/xapi_logs_download.ml @@ -21,8 +21,8 @@ let logs_download_handler (req: Request.t) s _ = debug "running logs-download handler"; Xapi_http.with_context "Downloading host logs" req s (fun __context -> - Http_svr.headers s (Http.http_200_ok ()); - - debug "send the http headers"; - let pid = safe_close_and_exec None (Some s) None [] !Xapi_globs.logs_download [] in - waitpid_fail_if_bad_exit pid) + Http_svr.headers s (Http.http_200_ok ()); + + debug "send the http headers"; + let pid = safe_close_and_exec None (Some s) None [] !Xapi_globs.logs_download [] in + waitpid_fail_if_bad_exit pid) diff --git a/ocaml/xapi/xapi_main.ml b/ocaml/xapi/xapi_main.ml index 6e0323b73dc..3644c09fd62 100644 --- a/ocaml/xapi/xapi_main.ml +++ b/ocaml/xapi/xapi_main.ml @@ -13,25 +13,25 @@ *) (** * @group Main Loop and Start-up - *) +*) open Xapi let _ = - Debug.set_facility Syslog.Local5; + Debug.set_facility Syslog.Local5; - init_args(); (* need to read args to find out whether to daemonize or not *) - Xcp_service.maybe_daemonize (); + init_args(); (* need to read args to find out whether to daemonize or not *) + Xcp_service.maybe_daemonize (); (* Disable logging for the module requested in the config *) List.iter (fun m -> - D.debug "Disabling logging for: %s" m; - Debug.disable m - ) !Xapi_globs.disable_logging_for; + D.debug "Disabling logging for: %s" m; + Debug.disable m + ) !Xapi_globs.disable_logging_for; Stdext.Unixext.pidfile_write "/var/run/xapi.pid"; - (* chdir to /var/lib/xcp/debug so that's where xapi coredumps go + (* chdir to /var/lib/xcp/debug so that's where xapi coredumps go (in the unlikely event that there are any ;) *) Stdext.Unixext.mkdir_rec (Filename.concat "/var/lib/xcp" "debug") 0o700; Unix.chdir (Filename.concat "/var/lib/xcp" "debug"); diff --git a/ocaml/xapi/xapi_message.ml b/ocaml/xapi/xapi_message.ml index 05ef27f6976..b809cc49ba7 100644 --- a/ocaml/xapi/xapi_message.ml +++ b/ocaml/xapi/xapi_message.ml @@ -13,26 +13,26 @@ *) (** Module that defines API functions for Message objects * @group XenAPI functions - *) +*) + - (** Message store *) -(* We use a filesystem based 'database': +(* We use a filesystem based 'database': * Base directory: /var/lib/xcp/blobs/messages * All messages go in there, filename=timestamp - * + * * Symlinks are created to the messages for fast indexing: * /var/lib/xcp/blobs/messages/VM// -> message * /var/lib/xcp/blobs/messages/uuid/ -> message * /var/lib/xcp/blobs/messages/ref/ -> message - *) +*) open Stdext -open Listext +open Listext open Xstringext open Threadext - + module D = Debug.Make(struct let name="xapi" end) open D @@ -46,56 +46,56 @@ let in_memory_cache_length = ref 0 let in_memory_cache_length_max = 512 let in_memory_cache_length_default = 256 -let class_to_string cls = - match cls with - | `VM -> "VM" - | `Host -> "Host" - | `SR -> "SR" - | `Pool -> "Pool" - | `VMPP -> "VMPP" - | _ -> "unknown" - -let string_to_class str = - match str with - | "VM" -> `VM - | "Host" -> `Host - | "SR" -> `SR - | "Pool" -> `Pool - | "VMPP" -> `VMPP - | _ -> failwith "Bad type" +let class_to_string cls = + match cls with + | `VM -> "VM" + | `Host -> "Host" + | `SR -> "SR" + | `Pool -> "Pool" + | `VMPP -> "VMPP" + | _ -> "unknown" + +let string_to_class str = + match str with + | "VM" -> `VM + | "Host" -> `Host + | "SR" -> `SR + | "Pool" -> `Pool + | "VMPP" -> `VMPP + | _ -> failwith "Bad type" (* We use the timestamp to name the file. For consistency, use this function *) let timestamp_to_string f = Printf.sprintf "%0.5f" f - + (************* Marshalling/unmarshalling functions ************) let to_xml output _ref gen message = - let tag n next () = - Xmlm.output output (`El_start (("",n),[])); - List.iter (fun x -> x ()) next; - Xmlm.output output `El_end + let tag n next () = + Xmlm.output output (`El_start (("",n),[])); + List.iter (fun x -> x ()) next; + Xmlm.output output `El_end in let data dat () = Xmlm.output output (`Data dat) in Xmlm.output output (`Dtd None); let message_subtags = [ - tag "ref" [ data (Ref.string_of _ref) ]; - tag "name" [ data message.API.message_name ]; - tag "priority" [ data (Int64.to_string message.API.message_priority) ]; - tag "cls" [data (class_to_string message.API.message_cls) ]; - tag "obj_uuid" [data message.API.message_obj_uuid ]; - tag "timestamp" [data (Date.to_string message.API.message_timestamp) ]; - tag "uuid" [data message.API.message_uuid]; - tag "body" [data message.API.message_body] + tag "ref" [ data (Ref.string_of _ref) ]; + tag "name" [ data message.API.message_name ]; + tag "priority" [ data (Int64.to_string message.API.message_priority) ]; + tag "cls" [data (class_to_string message.API.message_cls) ]; + tag "obj_uuid" [data message.API.message_obj_uuid ]; + tag "timestamp" [data (Date.to_string message.API.message_timestamp) ]; + tag "uuid" [data message.API.message_uuid]; + tag "body" [data message.API.message_body] ] in let message_subtags = match gen with - | Some g -> - (tag "generation" [data (Int64.to_string g) ])::message_subtags - | None -> - message_subtags + | Some g -> + (tag "generation" [data (Int64.to_string g) ])::message_subtags + | None -> + message_subtags in tag "message" message_subtags () @@ -103,34 +103,34 @@ let to_xml output _ref gen message = let of_xml input = let current_elt = ref "" in let message = ref { - API.message_name=""; - API.message_priority=0L; - API.message_cls=`VM; - API.message_obj_uuid=""; - API.message_timestamp=Date.never; - API.message_body=""; - API.message_uuid = ""} + API.message_name=""; + API.message_priority=0L; + API.message_cls=`VM; + API.message_obj_uuid=""; + API.message_timestamp=Date.never; + API.message_body=""; + API.message_uuid = ""} in let _ref = ref "" in let gen = ref 0L in let rec f () = match Xmlm.input input with - | `El_start ((ns,tag),attr) -> current_elt := tag; f () - | `El_end -> current_elt := ""; if Xmlm.eoi input then () else f () - | `Data dat -> - begin match !current_elt with - | "name" -> message := {!message with API.message_name=dat} - | "priority" -> message := {!message with API.message_priority=Int64.of_string dat} - | "cls" -> message := {!message with API.message_cls=string_to_class dat} - | "obj_uuid" -> message := {!message with API.message_obj_uuid=dat} - | "timestamp" -> message := {!message with API.message_timestamp=Date.of_string dat} - | "uuid" -> message := {!message with API.message_uuid=dat} - | "body" -> message := {!message with API.message_body=dat} - | "generation" -> gen := Int64.of_string dat; - | "ref" -> _ref := dat - | _ -> failwith "Bad XML!" - end; - f () - | `Dtd _ -> f () + | `El_start ((ns,tag),attr) -> current_elt := tag; f () + | `El_end -> current_elt := ""; if Xmlm.eoi input then () else f () + | `Data dat -> + begin match !current_elt with + | "name" -> message := {!message with API.message_name=dat} + | "priority" -> message := {!message with API.message_priority=Int64.of_string dat} + | "cls" -> message := {!message with API.message_cls=string_to_class dat} + | "obj_uuid" -> message := {!message with API.message_obj_uuid=dat} + | "timestamp" -> message := {!message with API.message_timestamp=Date.of_string dat} + | "uuid" -> message := {!message with API.message_uuid=dat} + | "body" -> message := {!message with API.message_body=dat} + | "generation" -> gen := Int64.of_string dat; + | "ref" -> _ref := dat + | _ -> failwith "Bad XML!" + end; + f () + | `Dtd _ -> f () in try f (); @@ -140,40 +140,40 @@ let of_xml input = raise e let export_xml messages = - let size = 500 * (List.length messages) in - let buf = Buffer.create size in - let output = Xmlm.make_output (`Buffer buf) in - List.iter (function | r,m -> to_xml output r None m) messages ; - Buffer.contents buf + let size = 500 * (List.length messages) in + let buf = Buffer.create size in + let output = Xmlm.make_output (`Buffer buf) in + List.iter (function | r,m -> to_xml output r None m) messages ; + Buffer.contents buf let import_xml xml_in = - let split_xml = - let ob = Buffer.create 600 in - (* let i = Xmlm.make_input (`String (0, xml)) in *) - let o = Xmlm.make_output (`Buffer ob) in - let rec pull xml_in o depth = - Xmlm.output o (Xmlm.peek xml_in); - match Xmlm.input xml_in with - | `El_start _ -> pull xml_in o (depth + 1) - | `El_end -> if depth = 1 then () else pull xml_in o (depth - 1) - | `Data _ -> pull xml_in o depth - | `Dtd _ -> pull xml_in o depth - in - - let out = ref [] in - while not (Xmlm.eoi xml_in) do - pull xml_in o 0 ; - out := (Buffer.contents ob) :: !out ; - Buffer.clear ob - done ; - !out in - - let rec loop = function - | [] -> [] - | m :: ms -> - let im = Xmlm.make_input (`String (0, m)) in - (of_xml im) :: loop ms - in loop split_xml + let split_xml = + let ob = Buffer.create 600 in + (* let i = Xmlm.make_input (`String (0, xml)) in *) + let o = Xmlm.make_output (`Buffer ob) in + let rec pull xml_in o depth = + Xmlm.output o (Xmlm.peek xml_in); + match Xmlm.input xml_in with + | `El_start _ -> pull xml_in o (depth + 1) + | `El_end -> if depth = 1 then () else pull xml_in o (depth - 1) + | `Data _ -> pull xml_in o depth + | `Dtd _ -> pull xml_in o depth + in + + let out = ref [] in + while not (Xmlm.eoi xml_in) do + pull xml_in o 0 ; + out := (Buffer.contents ob) :: !out ; + Buffer.clear ob + done ; + !out in + + let rec loop = function + | [] -> [] + | m :: ms -> + let im = Xmlm.make_input (`String (0, m)) in + (of_xml im) :: loop ms + in loop split_xml (********** Symlink functions *************) @@ -193,39 +193,39 @@ let gen_symlink () = (** Returns a list of tuples - (directory, filename) *) let symlinks _ref gen message basefilename = - let symlinks = - [(class_symlink message.API.message_cls message.API.message_obj_uuid, None); - (uuid_symlink (), Some message.API.message_uuid); - (ref_symlink (), Some (Ref.string_of _ref))] in - let symlinks = - match gen with - | Some gen -> - (gen_symlink (), Some (Int64.to_string gen)) :: symlinks - | None -> - symlinks - in - List.map (fun (dir,fnameopt) -> - let newfname = match fnameopt with - | None -> basefilename - | Some f -> f - in - (dir,dir ^ "/" ^ newfname)) - symlinks + let symlinks = + [(class_symlink message.API.message_cls message.API.message_obj_uuid, None); + (uuid_symlink (), Some message.API.message_uuid); + (ref_symlink (), Some (Ref.string_of _ref))] in + let symlinks = + match gen with + | Some gen -> + (gen_symlink (), Some (Int64.to_string gen)) :: symlinks + | None -> + symlinks + in + List.map (fun (dir,fnameopt) -> + let newfname = match fnameopt with + | None -> basefilename + | Some f -> f + in + (dir,dir ^ "/" ^ newfname)) + symlinks (** Check to see if the UUID is valid. This should not use get_by_uuid as - this causes spurious exceptions to be logged... *) + this causes spurious exceptions to be logged... *) let check_uuid ~__context ~cls ~uuid = try - (match cls with - | `VM -> ignore(Db.VM.get_by_uuid ~__context ~uuid) - | `Host -> ignore(Db.Host.get_by_uuid ~__context ~uuid) - | `SR -> ignore(Db.SR.get_by_uuid ~__context ~uuid) - | `Pool -> ignore(Db.Pool.get_by_uuid ~__context ~uuid) - | `VMPP -> ignore(Db.VMPP.get_by_uuid ~__context ~uuid) - ); - true + (match cls with + | `VM -> ignore(Db.VM.get_by_uuid ~__context ~uuid) + | `Host -> ignore(Db.Host.get_by_uuid ~__context ~uuid) + | `SR -> ignore(Db.SR.get_by_uuid ~__context ~uuid) + | `Pool -> ignore(Db.Pool.get_by_uuid ~__context ~uuid) + | `VMPP -> ignore(Db.VMPP.get_by_uuid ~__context ~uuid) + ); + true with _ -> - false + false (*********** Thread_queue to exec the message script hook ***********) @@ -239,16 +239,16 @@ let message_to_string (_ref,message) = let handle_message ~__context message = try - if not (Pool_features.is_enabled ~__context Features.Email) - then info "Email alerting is restricted by current license: not generating email" - else begin - if Sys.file_exists !Xapi_globs.xapi_message_script then begin - let output, log = Forkhelpers.execute_command_get_output !Xapi_globs.xapi_message_script [message] in - debug "Executed message hook: output='%s' log='%s'" output log - end else info "%s not found, skipping" !Xapi_globs.xapi_message_script - end + if not (Pool_features.is_enabled ~__context Features.Email) + then info "Email alerting is restricted by current license: not generating email" + else begin + if Sys.file_exists !Xapi_globs.xapi_message_script then begin + let output, log = Forkhelpers.execute_command_get_output !Xapi_globs.xapi_message_script [message] in + debug "Executed message hook: output='%s' log='%s'" output log + end else info "%s not found, skipping" !Xapi_globs.xapi_message_script + end with e -> - error "Unexpected exception in message hook %s: %s" !Xapi_globs.xapi_message_script (ExnHelper.string_of_exn e) + error "Unexpected exception in message hook %s: %s" !Xapi_globs.xapi_message_script (ExnHelper.string_of_exn e) let start_message_hook_thread ~__context () = queue_push := (Thread_queue.make ~name:"email message queue" ~max_q_length:100 (handle_message ~__context)).Thread_queue.push_fn @@ -257,162 +257,162 @@ let start_message_hook_thread ~__context () = (********************************************************************) let cache_insert _ref message gen = - Mutex.execute in_memory_cache_mutex (fun () -> - in_memory_cache := - (gen,_ref,message) :: !in_memory_cache ; - - in_memory_cache_length := - !in_memory_cache_length + 1 ; - - if !in_memory_cache_length > in_memory_cache_length_max then begin - in_memory_cache := Listext.List.take - in_memory_cache_length_default - !in_memory_cache ; - in_memory_cache_length := in_memory_cache_length_default ; - debug "Pruning in-memory cache of messages: Length=%d (%d)" - !in_memory_cache_length - (List.length !in_memory_cache) - end) + Mutex.execute in_memory_cache_mutex (fun () -> + in_memory_cache := + (gen,_ref,message) :: !in_memory_cache ; + + in_memory_cache_length := + !in_memory_cache_length + 1 ; + + if !in_memory_cache_length > in_memory_cache_length_max then begin + in_memory_cache := Listext.List.take + in_memory_cache_length_default + !in_memory_cache ; + in_memory_cache_length := in_memory_cache_length_default ; + debug "Pruning in-memory cache of messages: Length=%d (%d)" + !in_memory_cache_length + (List.length !in_memory_cache) + end) let cache_remove _ref = - Mutex.execute in_memory_cache_mutex (fun () -> - let (to_delete,to_keep) = List.partition (function | _ , _ref', _ -> _ref' = _ref) !in_memory_cache in - if List.length to_delete > 1 then - error "Internal error: Repeated reference in messages in_memory_cache"; - in_memory_cache := to_keep; - in_memory_cache_length := List.length to_keep) + Mutex.execute in_memory_cache_mutex (fun () -> + let (to_delete,to_keep) = List.partition (function | _ , _ref', _ -> _ref' = _ref) !in_memory_cache in + if List.length to_delete > 1 then + error "Internal error: Repeated reference in messages in_memory_cache"; + in_memory_cache := to_keep; + in_memory_cache_length := List.length to_keep) (** Write: write message to disk. Returns boolean indicating whether - message was written *) + message was written *) let write ~__context ~_ref ~message = - (* Check if a message with _ref has already been written *) - let message_exists () = - let file = (ref_symlink ()) ^ "/" ^ (Ref.string_of _ref) in - try Unix.access file [Unix.F_OK] ; true with _ -> false in - - let message_gen () = - let fn = (ref_symlink ()) ^ "/" ^ (Ref.string_of _ref) in - let ic = open_in fn in - let xi = Xmlm.make_input (`Channel ic) in - let (gen,_,_) = Pervasiveext.finally - (fun () -> of_xml xi) - (fun () -> close_in ic) in - gen - in - - let gen = ref 0L in - - Db_lock.with_lock (fun () -> - let t = Context.database_of __context in - Db_ref.update_database t (fun db -> - gen := Db_cache_types.Manifest.generation (Db_cache_types.Database.manifest db); - Db_cache_types.Database.increment db)); - - Unixext.mkdir_rec message_dir 0o700; - let timestamp = ref (Date.to_float (message.API.message_timestamp)) in - - if message_exists () then (Some (message_gen ())) - else try Mutex.execute event_mutex (fun () -> - let fd, basefilename, filename = - (* Try 10, no wait, 11 times to create message file *) - let rec doit n = - if n>10 then failwith "Couldn't create a file" else begin - let basefilename = timestamp_to_string !timestamp in - let filename = message_dir ^ "/" ^ basefilename in - try - let fd = Unix.openfile filename - [Unix.O_RDWR; Unix.O_CREAT; Unix.O_EXCL] 0o600 in - (* Set file's timestamp to message timestamp *) - Unix.utimes filename !timestamp !timestamp ; - fd, basefilename, filename - with _ -> begin - (* We may be copying messages from another - pool, in which case we may have - filename collision (unlikely, but - possible). So increment the filename - and try again, but leave the original - timestamp in the message untouched. *) - timestamp := !timestamp +. 0.00001 ; - doit (n+1) - end - end - in doit 0 - in - - (* Write the message to file *) - let oc = Unix.out_channel_of_descr fd in - let output = Xmlm.make_output (`Channel oc) in - to_xml output _ref (Some !gen) message; - close_out oc; - - (* Message now written, let's symlink it in various places *) - let symlinks = symlinks _ref (Some !gen) message basefilename in - List.iter (fun (dir,newpath) -> - Unixext.mkdir_rec dir 0o700; - Unix.symlink filename newpath) symlinks; - - (* Insert a written message into in_memory_cache *) - cache_insert _ref message !gen; - - (* Emit a create event (with the old event API). If the message - hasn't been written, we may want to also emit a del even, for - consistency (since the reference for the message will never be - valid again. *) - let rpc = API.rpc_of_message_t message in - Xapi_event.event_add ~snapshot:rpc "message" "add" (Ref.string_of _ref); - let (_: bool) = (!queue_push) message.API.message_name (message_to_string (_ref,message)) in - (*Xapi_event.event_add ~snapshot:xml "message" "del" (Ref.string_of _ref);*) - - Some !gen - ) - with _ -> None - - -(** create: Create a new message, and write to disk. Returns null ref - if write failed, or message ref otherwise. *) -let create ~__context ~name ~priority ~cls ~obj_uuid ~body = - debug "Message.create %s %Ld %s %s" name priority - (class_to_string cls) obj_uuid; - - - (if not (Encodings.UTF8_XML.is_valid body) - then raise (Api_errors.Server_error - (Api_errors.invalid_value, ["UTF8 expected"]))) ; - (if not (check_uuid ~__context ~cls ~uuid:obj_uuid) - then raise (Api_errors.Server_error - (Api_errors.uuid_invalid, [class_to_string cls; obj_uuid]))) ; + (* Check if a message with _ref has already been written *) + let message_exists () = + let file = (ref_symlink ()) ^ "/" ^ (Ref.string_of _ref) in + try Unix.access file [Unix.F_OK] ; true with _ -> false in + + let message_gen () = + let fn = (ref_symlink ()) ^ "/" ^ (Ref.string_of _ref) in + let ic = open_in fn in + let xi = Xmlm.make_input (`Channel ic) in + let (gen,_,_) = Pervasiveext.finally + (fun () -> of_xml xi) + (fun () -> close_in ic) in + gen + in - let _ref = Ref.make () in - let uuid = Uuid.to_string (Uuid.make_uuid ()) in + let gen = ref 0L in - let timestamp = Mutex.execute event_mutex (fun () -> - Unix.gettimeofday ()) in + Db_lock.with_lock (fun () -> + let t = Context.database_of __context in + Db_ref.update_database t (fun db -> + gen := Db_cache_types.Manifest.generation (Db_cache_types.Database.manifest db); + Db_cache_types.Database.increment db)); + + Unixext.mkdir_rec message_dir 0o700; + let timestamp = ref (Date.to_float (message.API.message_timestamp)) in + + if message_exists () then (Some (message_gen ())) + else try Mutex.execute event_mutex (fun () -> + let fd, basefilename, filename = + (* Try 10, no wait, 11 times to create message file *) + let rec doit n = + if n>10 then failwith "Couldn't create a file" else begin + let basefilename = timestamp_to_string !timestamp in + let filename = message_dir ^ "/" ^ basefilename in + try + let fd = Unix.openfile filename + [Unix.O_RDWR; Unix.O_CREAT; Unix.O_EXCL] 0o600 in + (* Set file's timestamp to message timestamp *) + Unix.utimes filename !timestamp !timestamp ; + fd, basefilename, filename + with _ -> begin + (* We may be copying messages from another + pool, in which case we may have + filename collision (unlikely, but + possible). So increment the filename + and try again, but leave the original + timestamp in the message untouched. *) + timestamp := !timestamp +. 0.00001 ; + doit (n+1) + end + end + in doit 0 + in + + (* Write the message to file *) + let oc = Unix.out_channel_of_descr fd in + let output = Xmlm.make_output (`Channel oc) in + to_xml output _ref (Some !gen) message; + close_out oc; + + (* Message now written, let's symlink it in various places *) + let symlinks = symlinks _ref (Some !gen) message basefilename in + List.iter (fun (dir,newpath) -> + Unixext.mkdir_rec dir 0o700; + Unix.symlink filename newpath) symlinks; + + (* Insert a written message into in_memory_cache *) + cache_insert _ref message !gen; + + (* Emit a create event (with the old event API). If the message + hasn't been written, we may want to also emit a del even, for + consistency (since the reference for the message will never be + valid again. *) + let rpc = API.rpc_of_message_t message in + Xapi_event.event_add ~snapshot:rpc "message" "add" (Ref.string_of _ref); + let (_: bool) = (!queue_push) message.API.message_name (message_to_string (_ref,message)) in + (*Xapi_event.event_add ~snapshot:xml "message" "del" (Ref.string_of _ref);*) + + Some !gen + ) + with _ -> None - (* During rolling upgrade, upgraded master might have a alerts grading - system different from the not yet upgraded slaves, during that process we - transform the priority of received messages as a special case. *) - let priority = - if Helpers.rolling_upgrade_in_progress ~__context && List.mem_assoc name !Api_messages.msgList then - List.assoc name !Api_messages.msgList - else priority in - let message = {API.message_name=name; - API.message_uuid=uuid; - API.message_priority=priority; - API.message_cls=cls; - API.message_obj_uuid=obj_uuid; - API.message_timestamp=Date.of_float timestamp; - API.message_body=body;} - in +(** create: Create a new message, and write to disk. Returns null ref + if write failed, or message ref otherwise. *) +let create ~__context ~name ~priority ~cls ~obj_uuid ~body = + debug "Message.create %s %Ld %s %s" name priority + (class_to_string cls) obj_uuid; + + + (if not (Encodings.UTF8_XML.is_valid body) + then raise (Api_errors.Server_error + (Api_errors.invalid_value, ["UTF8 expected"]))) ; + (if not (check_uuid ~__context ~cls ~uuid:obj_uuid) + then raise (Api_errors.Server_error + (Api_errors.uuid_invalid, [class_to_string cls; obj_uuid]))) ; + + let _ref = Ref.make () in + let uuid = Uuid.to_string (Uuid.make_uuid ()) in + + let timestamp = Mutex.execute event_mutex (fun () -> + Unix.gettimeofday ()) in + + (* During rolling upgrade, upgraded master might have a alerts grading + system different from the not yet upgraded slaves, during that process we + transform the priority of received messages as a special case. *) + let priority = + if Helpers.rolling_upgrade_in_progress ~__context && List.mem_assoc name !Api_messages.msgList then + List.assoc name !Api_messages.msgList + else priority in + + let message = {API.message_name=name; + API.message_uuid=uuid; + API.message_priority=priority; + API.message_cls=cls; + API.message_obj_uuid=obj_uuid; + API.message_timestamp=Date.of_float timestamp; + API.message_body=body;} + in - (* Write the message to disk *) - let gen = write ~__context ~_ref ~message in + (* Write the message to disk *) + let gen = write ~__context ~_ref ~message in - (* Return the message ref, or Ref.null if the message wasn't written *) - match gen with - | Some _ -> _ref - | None -> Ref.null + (* Return the message ref, or Ref.null if the message wasn't written *) + match gen with + | Some _ -> _ref + | None -> Ref.null let deleted : (Generation.t * API.ref_message) list ref = ref [0L, Ref.null] @@ -423,32 +423,32 @@ let destroy_real __context basefilename = let filename = message_dir ^ "/" ^ basefilename in let ic = open_in filename in let (gen,_ref,message) = Pervasiveext.finally - (fun () -> of_xml (Xmlm.make_input (`Channel ic))) - (fun () -> close_in ic) + (fun () -> of_xml (Xmlm.make_input (`Channel ic))) + (fun () -> close_in ic) in let symlinks = symlinks _ref (Some gen) message basefilename in List.iter (fun (dir,newpath) -> - Unixext.unlink_safe newpath) symlinks; + Unixext.unlink_safe newpath) symlinks; Unixext.unlink_safe filename; let rpc = API.rpc_of_message_t message in let gen = ref 0L in Db_lock.with_lock (fun () -> - let t = Context.database_of __context in - Db_ref.update_database t (fun db -> - gen := Db_cache_types.Manifest.generation (Db_cache_types.Database.manifest db); - Db_cache_types.Database.increment db)); + let t = Context.database_of __context in + Db_ref.update_database t (fun db -> + gen := Db_cache_types.Manifest.generation (Db_cache_types.Database.manifest db); + Db_cache_types.Database.increment db)); Mutex.execute event_mutex - (fun () -> - deleted := (!gen, _ref) :: !deleted; - ndeleted := !ndeleted + 1; - if !ndeleted > 1024 - then - (deleted := Listext.List.take 512 !deleted; - ndeleted := 512) - ); + (fun () -> + deleted := (!gen, _ref) :: !deleted; + ndeleted := !ndeleted + 1; + if !ndeleted > 1024 + then + (deleted := Listext.List.take 512 !deleted; + ndeleted := 512) + ); cache_remove _ref; Xapi_event.event_add ~snapshot:rpc "message" "del" (Ref.string_of _ref) @@ -456,20 +456,20 @@ let destroy ~__context ~self = (* Find the original message so we know where the symlinks will be *) let symlinkfname = (ref_symlink ()) ^ "/" ^ (Ref.string_of self) in let fullpath = - try Unix.readlink symlinkfname - with _ -> begin - let allfiles = List.map (fun file -> message_dir ^ "/" ^ file) (Array.to_list (Sys.readdir message_dir)) in - let allmsgs = List.filter (fun file -> not (Sys.is_directory file)) allfiles in - try - List.find (fun msg_fname -> - try - let ic = open_in msg_fname in - let (_,_ref,_) = Pervasiveext.finally (fun () -> of_xml (Xmlm.make_input (`Channel ic))) (fun () -> close_in ic) in - if _ref = self then true else false - with _ -> false - ) allmsgs - with _ -> raise (Api_errors.Server_error (Api_errors.handle_invalid, [Datamodel._message; Ref.string_of self])) - end + try Unix.readlink symlinkfname + with _ -> begin + let allfiles = List.map (fun file -> message_dir ^ "/" ^ file) (Array.to_list (Sys.readdir message_dir)) in + let allmsgs = List.filter (fun file -> not (Sys.is_directory file)) allfiles in + try + List.find (fun msg_fname -> + try + let ic = open_in msg_fname in + let (_,_ref,_) = Pervasiveext.finally (fun () -> of_xml (Xmlm.make_input (`Channel ic))) (fun () -> close_in ic) in + if _ref = self then true else false + with _ -> false + ) allmsgs + with _ -> raise (Api_errors.Server_error (Api_errors.handle_invalid, [Datamodel._message; Ref.string_of self])) + end in let basefilename = List.hd (List.rev (String.split '/' fullpath)) in destroy_real __context basefilename @@ -478,66 +478,66 @@ let destroy ~__context ~self = (* Gc the messages - leave only the number of messages defined in 'Xapi_globs.message_limit' *) let gc ~__context = if (try (Unix.access message_dir [Unix.F_OK]; true) with _ -> false) then - begin - let allmsg = List.filter_map - (fun msg -> - try - Some (float_of_string msg, msg) - with _ -> - None) - (Array.to_list (Sys.readdir message_dir)) - in - if List.length allmsg > Xapi_globs.message_limit then - begin - warn "Messages have reached over the limit %d" Xapi_globs.message_limit; - let sorted = List.sort (fun (t1,_) (t2,_) -> compare t1 t2) allmsg in - let n = List.length sorted in - let to_reap = n - Xapi_globs.message_limit in - let rec reap_one i msgs = - if i=to_reap then () else - begin - begin - try destroy_real __context (snd (List.hd msgs)) - with e -> - debug "Failed to destroy message %s" (snd (List.hd msgs)); - debug "Caught exception %s" (Printexc.to_string e) - end; - reap_one (i+1) (List.tl msgs) - end - in - reap_one 0 sorted - end - end + begin + let allmsg = List.filter_map + (fun msg -> + try + Some (float_of_string msg, msg) + with _ -> + None) + (Array.to_list (Sys.readdir message_dir)) + in + if List.length allmsg > Xapi_globs.message_limit then + begin + warn "Messages have reached over the limit %d" Xapi_globs.message_limit; + let sorted = List.sort (fun (t1,_) (t2,_) -> compare t1 t2) allmsg in + let n = List.length sorted in + let to_reap = n - Xapi_globs.message_limit in + let rec reap_one i msgs = + if i=to_reap then () else + begin + begin + try destroy_real __context (snd (List.hd msgs)) + with e -> + debug "Failed to destroy message %s" (snd (List.hd msgs)); + debug "Caught exception %s" (Printexc.to_string e) + end; + reap_one (i+1) (List.tl msgs) + end + in + reap_one 0 sorted + end + end let get_real_inner dir filter name_filter = try - let allmsgs = Array.to_list (Sys.readdir dir) in - let messages = List.filter name_filter allmsgs in - let messages = List.filter_map (fun msg_fname -> - let filename = dir ^ "/" ^ msg_fname in - try - let ic = open_in filename in - let (gen,_ref,msg) = Pervasiveext.finally (fun () -> of_xml (Xmlm.make_input (`Channel ic))) (fun () -> close_in ic) in - if filter msg then Some (gen,_ref,msg) else None - with _ -> None) messages - in - List.sort (fun (t1,r1,m1) (t2,r2,m2) -> - let r = compare t2 t1 in - if r <> 0 then r else compare (Date.to_float m2.API.message_timestamp) (Date.to_float m1.API.message_timestamp)) messages + let allmsgs = Array.to_list (Sys.readdir dir) in + let messages = List.filter name_filter allmsgs in + let messages = List.filter_map (fun msg_fname -> + let filename = dir ^ "/" ^ msg_fname in + try + let ic = open_in filename in + let (gen,_ref,msg) = Pervasiveext.finally (fun () -> of_xml (Xmlm.make_input (`Channel ic))) (fun () -> close_in ic) in + if filter msg then Some (gen,_ref,msg) else None + with _ -> None) messages + in + List.sort (fun (t1,r1,m1) (t2,r2,m2) -> + let r = compare t2 t1 in + if r <> 0 then r else compare (Date.to_float m2.API.message_timestamp) (Date.to_float m1.API.message_timestamp)) messages with _ -> [] (* Message directory missing *) let since_name_filter since name = - try - float_of_string name > since - with _ -> false + try + float_of_string name > since + with _ -> false let get_from_generation gen = - if gen > 0L - then get_real_inner (gen_symlink ()) (fun x -> true) (fun n -> try Int64.of_string n > gen with _ -> false) - else get_real_inner message_dir (fun _ -> true) (fun n -> try ignore(float_of_string n); true with _ -> false) + if gen > 0L + then get_real_inner (gen_symlink ()) (fun x -> true) (fun n -> try Int64.of_string n > gen with _ -> false) + else get_real_inner message_dir (fun _ -> true) (fun n -> try ignore(float_of_string n); true with _ -> false) let get_real dir filter since = - List.map (fun (_,r,m) -> (r,m)) (get_real_inner dir filter (since_name_filter since)) + List.map (fun (_,r,m) -> (r,m)) (get_real_inner dir filter (since_name_filter since)) let get ~__context ~cls ~obj_uuid ~since = (* Read in all the messages for a particular object *) @@ -550,59 +550,59 @@ let get_since ~__context ~since = get_real message_dir (fun _ -> true) (Date.to_float since) let get_since_for_events ~__context since = - let cached_result = Mutex.execute in_memory_cache_mutex - (fun () -> - match List.rev !in_memory_cache with - | (last_in_memory, _, _) :: _ when last_in_memory < since -> - Some (List.filter_map - (fun (gen,_ref,msg) -> - if gen > since then Some (gen, Xapi_event.Message.Create (_ref, msg)) else None) - !in_memory_cache) - | (last_in_memory, _, _) :: _ -> - debug "get_since_for_events: last_in_memory (%Ld) > since (%Ld): Using slow message lookup" last_in_memory since; - None - | _ -> - warn "get_since_for_events: no in_memory_cache!"; - None) - in - let result = match cached_result with - | Some x -> x - | None -> - List.map (fun (ts,x,y) -> (ts, Xapi_event.Message.Create (x,y))) (get_from_generation since) - in - let delete_results = Mutex.execute deleted_mutex (fun () -> - let deleted = List.filter (fun (deltime,_ref) -> deltime > since) !deleted in - List.map (fun (ts , _ref) -> (ts,Xapi_event.Message.Del _ref)) deleted) in - let all_results = result @ delete_results in - let newsince = List.fold_left (fun acc (ts,m) -> max ts acc) since all_results in - (newsince, List.map snd all_results) + let cached_result = Mutex.execute in_memory_cache_mutex + (fun () -> + match List.rev !in_memory_cache with + | (last_in_memory, _, _) :: _ when last_in_memory < since -> + Some (List.filter_map + (fun (gen,_ref,msg) -> + if gen > since then Some (gen, Xapi_event.Message.Create (_ref, msg)) else None) + !in_memory_cache) + | (last_in_memory, _, _) :: _ -> + debug "get_since_for_events: last_in_memory (%Ld) > since (%Ld): Using slow message lookup" last_in_memory since; + None + | _ -> + warn "get_since_for_events: no in_memory_cache!"; + None) + in + let result = match cached_result with + | Some x -> x + | None -> + List.map (fun (ts,x,y) -> (ts, Xapi_event.Message.Create (x,y))) (get_from_generation since) + in + let delete_results = Mutex.execute deleted_mutex (fun () -> + let deleted = List.filter (fun (deltime,_ref) -> deltime > since) !deleted in + List.map (fun (ts , _ref) -> (ts,Xapi_event.Message.Del _ref)) deleted) in + let all_results = result @ delete_results in + let newsince = List.fold_left (fun acc (ts,m) -> max ts acc) since all_results in + (newsince, List.map snd all_results) let get_by_uuid ~__context ~uuid = try - let message_filename = (uuid_symlink ()) ^ "/" ^ uuid in - let ic = open_in message_filename in - let (_,_ref,_) = Pervasiveext.finally (fun () -> of_xml (Xmlm.make_input (`Channel ic))) (fun () -> close_in ic) in - _ref + let message_filename = (uuid_symlink ()) ^ "/" ^ uuid in + let ic = open_in message_filename in + let (_,_ref,_) = Pervasiveext.finally (fun () -> of_xml (Xmlm.make_input (`Channel ic))) (fun () -> close_in ic) in + _ref with - _ -> raise (Api_errors.Server_error (Api_errors.uuid_invalid, [ "message"; uuid ])) + _ -> raise (Api_errors.Server_error (Api_errors.uuid_invalid, [ "message"; uuid ])) let get_all ~__context = try - let allmsgs = Array.to_list (Sys.readdir (ref_symlink ())) in - List.map (fun r -> Ref.of_string r) allmsgs + let allmsgs = Array.to_list (Sys.readdir (ref_symlink ())) in + List.map (fun r -> Ref.of_string r) allmsgs with _ -> [] let get_record ~__context ~self = try - let symlinkfname = (ref_symlink ()) ^ "/" ^ (Ref.string_of self) in - let fullpath = Unix.readlink symlinkfname in - let ic = open_in fullpath in - let (_,_ref,message) = Pervasiveext.finally - (fun () -> of_xml (Xmlm.make_input (`Channel ic))) - (fun () -> close_in ic) - in message + let symlinkfname = (ref_symlink ()) ^ "/" ^ (Ref.string_of self) in + let fullpath = Unix.readlink symlinkfname in + let ic = open_in fullpath in + let (_,_ref,message) = Pervasiveext.finally + (fun () -> of_xml (Xmlm.make_input (`Channel ic))) + (fun () -> close_in ic) + in message with _ -> - raise (Api_errors.Server_error (Api_errors.handle_invalid, ["message";(Ref.string_of self)])) + raise (Api_errors.Server_error (Api_errors.handle_invalid, ["message";(Ref.string_of self)])) let get_all_records ~__context = get_real message_dir (fun _ -> true) (0.0) @@ -611,87 +611,87 @@ let get_all_records_where ~__context ~expr = get_real message_dir (fun _ -> true) (0.0) let repopulate_cache () = - Mutex.execute in_memory_cache_mutex (fun () -> - let messages = get_real_inner message_dir (fun _ -> true) (fun n -> try ignore(float_of_string n); true with _ -> false) in - let last_256 = List.take 256 messages in - in_memory_cache := last_256; - let get_ts (ts,_,m) = Printf.sprintf "%Ld (%s)" ts (Date.to_string m.API.message_timestamp) in - debug "Constructing in-memory-cache: most length=%d" (List.length last_256); - (try debug "newest=%s oldest=%s" (get_ts (List.hd last_256)) (get_ts (List.hd (List.rev last_256))) with _ -> ()); - in_memory_cache_length := List.length !in_memory_cache) + Mutex.execute in_memory_cache_mutex (fun () -> + let messages = get_real_inner message_dir (fun _ -> true) (fun n -> try ignore(float_of_string n); true with _ -> false) in + let last_256 = List.take 256 messages in + in_memory_cache := last_256; + let get_ts (ts,_,m) = Printf.sprintf "%Ld (%s)" ts (Date.to_string m.API.message_timestamp) in + debug "Constructing in-memory-cache: most length=%d" (List.length last_256); + (try debug "newest=%s oldest=%s" (get_ts (List.hd last_256)) (get_ts (List.hd (List.rev last_256))) with _ -> ()); + in_memory_cache_length := List.length !in_memory_cache) let register_event_hook () = - repopulate_cache (); - Xapi_event.Message.get_since_for_events := get_since_for_events + repopulate_cache (); + Xapi_event.Message.get_since_for_events := get_since_for_events (** Handler for PUTing messages to a host. - Query params: { cls=, uuid= } *) + Query params: { cls=, uuid= } *) let handler (req: Http.Request.t) fd _ = - let query = req.Http.Request.query in - req.Http.Request.close <- true ; - debug "Xapi_message.handler: receiving messages" ; - - let check_query param = - if not (List.mem_assoc param query) then begin - error "Xapi_message.handler: HTTP request for message lacked %s parameter" param ; - Http_svr.headers fd (Http.http_400_badrequest ()) ; - failwith (Printf.sprintf "Xapi_message.handler: Missing %s parameter" param) - end in - - (* Check query for required params *) - check_query "uuid" ; check_query "cls" ; - - Xapi_http.with_context ~dummy:true "Xapi_message.handler" req fd - (fun __context -> try - (* Redirect if we're not master *) - if not (Pool_role.is_master ()) - then - let url = Printf.sprintf "https://%s%s?%s" - (Pool_role.get_master_address ()) - req.Http.Request.uri - (String.concat "&" - (List.map (fun (a,b) -> a^"="^b) query)) in - Http_svr.headers fd (Http.http_302_redirect url) ; - - else - (* Get and check query parameters *) - let uuid = List.assoc "uuid" query - and cls = List.assoc "cls" query in - let cls = try string_to_class cls with _ -> - failwith ("Xapi_message.handler: Bad class " ^ cls) in - if not (check_uuid ~__context ~cls ~uuid) then - failwith ("Xapi_message.handler: Bad uuid " ^ uuid) ; - - (* Tell client we're good to receive *) - Http_svr.headers fd (Http.http_200_ok ()) ; - - (* Read messages in, and write to filesystem *) - let xml_in = Xmlm.make_input - (`Channel (Unix.in_channel_of_descr fd)) in - let messages = import_xml xml_in in - List.iter (function (_,r,m) -> ignore (write ~__context ~_ref:r ~message:m)) messages ; - - (* Flush cache and reload *) - repopulate_cache () ; - - with e -> error "Xapi_message.handler: caught exception '%s'" - (ExnHelper.string_of_exn e) - ) + let query = req.Http.Request.query in + req.Http.Request.close <- true ; + debug "Xapi_message.handler: receiving messages" ; + + let check_query param = + if not (List.mem_assoc param query) then begin + error "Xapi_message.handler: HTTP request for message lacked %s parameter" param ; + Http_svr.headers fd (Http.http_400_badrequest ()) ; + failwith (Printf.sprintf "Xapi_message.handler: Missing %s parameter" param) + end in + + (* Check query for required params *) + check_query "uuid" ; check_query "cls" ; + + Xapi_http.with_context ~dummy:true "Xapi_message.handler" req fd + (fun __context -> try + (* Redirect if we're not master *) + if not (Pool_role.is_master ()) + then + let url = Printf.sprintf "https://%s%s?%s" + (Pool_role.get_master_address ()) + req.Http.Request.uri + (String.concat "&" + (List.map (fun (a,b) -> a^"="^b) query)) in + Http_svr.headers fd (Http.http_302_redirect url) ; + + else + (* Get and check query parameters *) + let uuid = List.assoc "uuid" query + and cls = List.assoc "cls" query in + let cls = try string_to_class cls with _ -> + failwith ("Xapi_message.handler: Bad class " ^ cls) in + if not (check_uuid ~__context ~cls ~uuid) then + failwith ("Xapi_message.handler: Bad uuid " ^ uuid) ; + + (* Tell client we're good to receive *) + Http_svr.headers fd (Http.http_200_ok ()) ; + + (* Read messages in, and write to filesystem *) + let xml_in = Xmlm.make_input + (`Channel (Unix.in_channel_of_descr fd)) in + let messages = import_xml xml_in in + List.iter (function (_,r,m) -> ignore (write ~__context ~_ref:r ~message:m)) messages ; + + (* Flush cache and reload *) + repopulate_cache () ; + + with e -> error "Xapi_message.handler: caught exception '%s'" + (ExnHelper.string_of_exn e) + ) (* Export messages and send to another host/pool over http. *) let send_messages ~__context ~cls ~obj_uuid ~session_id ~remote_address = - let msgs = get ~__context ~cls ~obj_uuid ~since:(Date.of_float 0.0) in - let body = export_xml msgs in - let query = [ "session_id", Ref.string_of session_id - ; "cls", "VM" - ; "uuid", obj_uuid ] in - let subtask_of = Context.string_of_task __context in - let request = Xapi_http.http_request ~subtask_of ~query ~body - Http.Put Constants.message_put_uri in - let open Xmlrpc_client in - let transport = SSL(SSL.make (), remote_address, !Xapi_globs.https_port) in - with_transport transport - (with_http request - (fun (rsp, fd) -> - if rsp.Http.Response.code <> "200" - then error "Error transferring messages")) + let msgs = get ~__context ~cls ~obj_uuid ~since:(Date.of_float 0.0) in + let body = export_xml msgs in + let query = [ "session_id", Ref.string_of session_id + ; "cls", "VM" + ; "uuid", obj_uuid ] in + let subtask_of = Context.string_of_task __context in + let request = Xapi_http.http_request ~subtask_of ~query ~body + Http.Put Constants.message_put_uri in + let open Xmlrpc_client in + let transport = SSL(SSL.make (), remote_address, !Xapi_globs.https_port) in + with_transport transport + (with_http request + (fun (rsp, fd) -> + if rsp.Http.Response.code <> "200" + then error "Error transferring messages")) diff --git a/ocaml/xapi/xapi_mgmt_iface.ml b/ocaml/xapi/xapi_mgmt_iface.ml index f2b5567c838..481b27d647f 100644 --- a/ocaml/xapi/xapi_mgmt_iface.ml +++ b/ocaml/xapi/xapi_mgmt_iface.ml @@ -24,7 +24,7 @@ open D let himn_addr = ref None (* Stores a key into the table in Http_srv which identifies the server thread bound - to the management IP. *) + to the management IP. *) let management_interface_server = ref [] let listening_all = ref false let listening_localhost = ref false @@ -33,175 +33,175 @@ let stunnel_accept = ref None let management_m = Mutex.create () let update_mh_info interface = - let (_: string*string) = Forkhelpers.execute_command_get_output !Xapi_globs.update_mh_info_script [ interface ] in - () + let (_: string*string) = Forkhelpers.execute_command_get_output !Xapi_globs.update_mh_info_script [ interface ] in + () let stunnel_m = Mutex.create () let restart_stunnel_nomutex ~__context ~accept = - info "Restarting stunnel (accepting connections on %s)" accept; - let back_compat ~__context = - if Stunnel.is_legacy_protocol_and_ciphersuites_allowed () - then [ "back_compat_6_5" ] - else [] - in - let xapissl_args = [ "restart"; accept ] @ (back_compat ~__context) in - let (_ : Thread.t) = Thread.create (fun () -> - Mutex.execute management_m (fun () -> - Forkhelpers.execute_command_get_output !Xapi_globs.xapissl_path xapissl_args - ) - ) () in - () + info "Restarting stunnel (accepting connections on %s)" accept; + let back_compat ~__context = + if Stunnel.is_legacy_protocol_and_ciphersuites_allowed () + then [ "back_compat_6_5" ] + else [] + in + let xapissl_args = [ "restart"; accept ] @ (back_compat ~__context) in + let (_ : Thread.t) = Thread.create (fun () -> + Mutex.execute management_m (fun () -> + Forkhelpers.execute_command_get_output !Xapi_globs.xapissl_path xapissl_args + ) + ) () in + () let restart_stunnel ~__context ~accept = - Mutex.execute stunnel_m (fun () -> - stunnel_accept := Some accept; - restart_stunnel_nomutex ~__context ~accept - ) + Mutex.execute stunnel_m (fun () -> + stunnel_accept := Some accept; + restart_stunnel_nomutex ~__context ~accept + ) let reconfigure_stunnel ~__context = - Mutex.execute stunnel_m (fun () -> - match !stunnel_accept with - | None -> () (* We've not yet started stunnel; no action needed *) - | Some accept -> restart_stunnel_nomutex ~__context ~accept - ) + Mutex.execute stunnel_m (fun () -> + match !stunnel_accept with + | None -> () (* We've not yet started stunnel; no action needed *) + | Some accept -> restart_stunnel_nomutex ~__context ~accept + ) let stop () = - debug "Shutting down the old management interface (if any)"; - List.iter (fun i -> Http_svr.stop i) !management_interface_server; - management_interface_server := []; - listening_all := false; - listening_localhost := false; - listening_himn := false + debug "Shutting down the old management interface (if any)"; + List.iter (fun i -> Http_svr.stop i) !management_interface_server; + management_interface_server := []; + listening_all := false; + listening_localhost := false; + listening_himn := false (* Even though xapi listens on all IP addresses, there is still an interface appointed as * _the_ management interface. Slaves in a pool use the IP address of this interface to connect * the pool master. *) let start ~__context ?addr () = - let socket, accept = - match addr with - | None -> - info "Starting new server (listening on all IP addresses)"; - begin - try (* Is it IPv6 ? *) - let addr = Unix.inet6_addr_any in - Xapi_http.bind (Unix.ADDR_INET(addr, Xapi_globs.http_port)), - ":::443" - with _ -> (* No. *) - let addr = Unix.inet_addr_any in - Xapi_http.bind (Unix.ADDR_INET(addr, Xapi_globs.http_port)), - "443" - end - | Some ip -> - info "Starting new server (listening on %s)" ip; - let addr = Unix.inet_addr_of_string ip in - let sockaddr = Unix.ADDR_INET(addr, Xapi_globs.http_port) in - Xapi_http.bind sockaddr, - match Unix.domain_of_sockaddr sockaddr with - | Unix.PF_INET6 -> "::1:443" - | _ -> "127.0.0.1:443" - in - Http_svr.start Xapi_http.server socket; - management_interface_server := socket :: !management_interface_server; - - restart_stunnel ~__context ~accept; - if Pool_role.is_master () && !listening_all then begin - (* NB if we synchronously bring up the management interface on a master with a blank - database this can fail... this is ok because the database will be synchronised later *) - Server_helpers.exec_with_new_task "refreshing consoles" - (fun __context -> - Dbsync_master.set_master_ip ~__context; - Dbsync_master.refresh_console_urls ~__context) - end + let socket, accept = + match addr with + | None -> + info "Starting new server (listening on all IP addresses)"; + begin + try (* Is it IPv6 ? *) + let addr = Unix.inet6_addr_any in + Xapi_http.bind (Unix.ADDR_INET(addr, Xapi_globs.http_port)), + ":::443" + with _ -> (* No. *) + let addr = Unix.inet_addr_any in + Xapi_http.bind (Unix.ADDR_INET(addr, Xapi_globs.http_port)), + "443" + end + | Some ip -> + info "Starting new server (listening on %s)" ip; + let addr = Unix.inet_addr_of_string ip in + let sockaddr = Unix.ADDR_INET(addr, Xapi_globs.http_port) in + Xapi_http.bind sockaddr, + match Unix.domain_of_sockaddr sockaddr with + | Unix.PF_INET6 -> "::1:443" + | _ -> "127.0.0.1:443" + in + Http_svr.start Xapi_http.server socket; + management_interface_server := socket :: !management_interface_server; + + restart_stunnel ~__context ~accept; + if Pool_role.is_master () && !listening_all then begin + (* NB if we synchronously bring up the management interface on a master with a blank + database this can fail... this is ok because the database will be synchronised later *) + Server_helpers.exec_with_new_task "refreshing consoles" + (fun __context -> + Dbsync_master.set_master_ip ~__context; + Dbsync_master.refresh_console_urls ~__context) + end let change interface primary_address_type = - Xapi_inventory.update Xapi_inventory._management_interface interface; - Xapi_inventory.update Xapi_inventory._management_address_type - (Record_util.primary_address_type_to_string primary_address_type); - update_mh_info interface + Xapi_inventory.update Xapi_inventory._management_interface interface; + Xapi_inventory.update Xapi_inventory._management_address_type + (Record_util.primary_address_type_to_string primary_address_type); + update_mh_info interface let run ~__context ~mgmt_enabled = - Mutex.execute management_m (fun () -> - if mgmt_enabled then begin - if not !listening_all then begin - stop (); - start ~__context (); - listening_all := true - end - end else begin - if !listening_all then - stop (); - if not !listening_localhost then begin - start ~__context ~addr:"127.0.0.1" (); - listening_localhost := true - end; - Opt.iter (fun addr -> - if not !listening_himn then begin - start ~__context ~addr (); - listening_himn := true - end - ) !himn_addr; - end - ) + Mutex.execute management_m (fun () -> + if mgmt_enabled then begin + if not !listening_all then begin + stop (); + start ~__context (); + listening_all := true + end + end else begin + if !listening_all then + stop (); + if not !listening_localhost then begin + start ~__context ~addr:"127.0.0.1" (); + listening_localhost := true + end; + Opt.iter (fun addr -> + if not !listening_himn then begin + start ~__context ~addr (); + listening_himn := true + end + ) !himn_addr; + end + ) let enable_himn ~__context ~addr = - Mutex.execute management_m (fun () -> - himn_addr := Some addr; - ); - run ~__context ~mgmt_enabled:!listening_all + Mutex.execute management_m (fun () -> + himn_addr := Some addr; + ); + run ~__context ~mgmt_enabled:!listening_all let rebind ~__context = - run ~__context ~mgmt_enabled:!listening_all + run ~__context ~mgmt_enabled:!listening_all let management_ip_mutex = Mutex.create () let management_ip_cond = Condition.create () let wait_for_management_ip ~__context = - let ip = ref (match Helpers.get_management_ip_addr ~__context with Some x -> x | None -> "") in - Mutex.execute management_ip_mutex - (fun () -> begin while !ip = "" do - Condition.wait management_ip_cond management_ip_mutex; - ip := (match Helpers.get_management_ip_addr ~__context with Some x -> x | None -> "") - done; end); - !ip + let ip = ref (match Helpers.get_management_ip_addr ~__context with Some x -> x | None -> "") in + Mutex.execute management_ip_mutex + (fun () -> begin while !ip = "" do + Condition.wait management_ip_cond management_ip_mutex; + ip := (match Helpers.get_management_ip_addr ~__context with Some x -> x | None -> "") + done; end); + !ip let on_dom0_networking_change ~__context = - debug "Checking to see if hostname or management IP has changed"; - (* Need to update: - 1 Host.hostname - 2 Host.address - 3. Console URIs *) - let new_hostname = Helpers.reget_hostname () in - let localhost = Helpers.get_localhost ~__context in - if Db.Host.get_hostname ~__context ~self:localhost <> new_hostname then begin - debug "Changing Host.hostname in database to: %s" new_hostname; - Db.Host.set_hostname ~__context ~self:localhost ~value:new_hostname - end; - if List.mem - (Db.Host.get_name_label ~__context ~self:localhost) - ["localhost"; "localhost.localdomain"] then - Db.Host.set_name_label ~__context ~self:localhost ~value:new_hostname; - begin match Helpers.get_management_ip_addr ~__context with - | Some ip -> - if Db.Host.get_address ~__context ~self:localhost <> ip then begin - debug "Changing Host.address in database to: %s" ip; - Db.Host.set_address ~__context ~self:localhost ~value:ip; - debug "Refreshing console URIs"; - Dbsync_master.refresh_console_urls ~__context - end - | None -> - if Db.Host.get_address ~__context ~self:localhost <> "" then begin - debug "Changing Host.address in database to: '' (host has no management IP address)"; - Db.Host.set_address ~__context ~self:localhost ~value:"" - end - end; - Helpers.update_domain_zero_name ~__context localhost new_hostname; - (* Running update-issue service on best effort basis *) - try - ignore (Forkhelpers.execute_command_get_output !Xapi_globs.update_issue_script []); - ignore (Forkhelpers.execute_command_get_output !Xapi_globs.kill_process_script ["-q"; "-HUP"; "mingetty"; "agetty"]) - with _ -> (); - debug "Signalling anyone waiting for the management IP address to change"; - Mutex.execute management_ip_mutex - (fun () -> Condition.broadcast management_ip_cond) + debug "Checking to see if hostname or management IP has changed"; + (* Need to update: + 1 Host.hostname + 2 Host.address + 3. Console URIs *) + let new_hostname = Helpers.reget_hostname () in + let localhost = Helpers.get_localhost ~__context in + if Db.Host.get_hostname ~__context ~self:localhost <> new_hostname then begin + debug "Changing Host.hostname in database to: %s" new_hostname; + Db.Host.set_hostname ~__context ~self:localhost ~value:new_hostname + end; + if List.mem + (Db.Host.get_name_label ~__context ~self:localhost) + ["localhost"; "localhost.localdomain"] then + Db.Host.set_name_label ~__context ~self:localhost ~value:new_hostname; + begin match Helpers.get_management_ip_addr ~__context with + | Some ip -> + if Db.Host.get_address ~__context ~self:localhost <> ip then begin + debug "Changing Host.address in database to: %s" ip; + Db.Host.set_address ~__context ~self:localhost ~value:ip; + debug "Refreshing console URIs"; + Dbsync_master.refresh_console_urls ~__context + end + | None -> + if Db.Host.get_address ~__context ~self:localhost <> "" then begin + debug "Changing Host.address in database to: '' (host has no management IP address)"; + Db.Host.set_address ~__context ~self:localhost ~value:"" + end + end; + Helpers.update_domain_zero_name ~__context localhost new_hostname; + (* Running update-issue service on best effort basis *) + try + ignore (Forkhelpers.execute_command_get_output !Xapi_globs.update_issue_script []); + ignore (Forkhelpers.execute_command_get_output !Xapi_globs.kill_process_script ["-q"; "-HUP"; "mingetty"; "agetty"]) + with _ -> (); + debug "Signalling anyone waiting for the management IP address to change"; + Mutex.execute management_ip_mutex + (fun () -> Condition.broadcast management_ip_cond) diff --git a/ocaml/xapi/xapi_mgmt_iface.mli b/ocaml/xapi/xapi_mgmt_iface.mli index 6f5e3736421..65ea31c2556 100644 --- a/ocaml/xapi/xapi_mgmt_iface.mli +++ b/ocaml/xapi/xapi_mgmt_iface.mli @@ -13,7 +13,7 @@ *) (** Controlling the management interface. * @group Networking - *) +*) (** Local IP address of the HIMN (if any) *) val himn_addr : string option ref diff --git a/ocaml/xapi/xapi_network.ml b/ocaml/xapi/xapi_network.ml index fdc9de9cd67..e4460533637 100644 --- a/ocaml/xapi/xapi_network.ml +++ b/ocaml/xapi/xapi_network.ml @@ -24,100 +24,100 @@ open Network let internal_bridge_m = Mutex.create () let create_internal_bridge ~__context ~bridge ~uuid ~persist = - let dbg = Context.string_of_task __context in - let current = Net.Bridge.get_all dbg () in - if List.mem bridge current then - (* No serialisation needed in this case *) - debug "Internal bridge %s exists" bridge - else - (* Atomic test-and-set process *) - Mutex.execute internal_bridge_m (fun () -> - let current = Net.Bridge.get_all dbg () in - if not(List.mem bridge current) then begin - let other_config = ["network-uuids", uuid] in - debug "Creating internal bridge %s (uuid:%s)" bridge uuid; - Net.Bridge.create dbg ~name:bridge ~other_config (); - end - ); - Net.Bridge.set_persistent dbg ~name:bridge ~value:persist + let dbg = Context.string_of_task __context in + let current = Net.Bridge.get_all dbg () in + if List.mem bridge current then + (* No serialisation needed in this case *) + debug "Internal bridge %s exists" bridge + else + (* Atomic test-and-set process *) + Mutex.execute internal_bridge_m (fun () -> + let current = Net.Bridge.get_all dbg () in + if not(List.mem bridge current) then begin + let other_config = ["network-uuids", uuid] in + debug "Creating internal bridge %s (uuid:%s)" bridge uuid; + Net.Bridge.create dbg ~name:bridge ~other_config (); + end + ); + Net.Bridge.set_persistent dbg ~name:bridge ~value:persist let set_himn_ip ~__context bridge other_config = - let open Network_interface in - let dbg = Context.string_of_task __context in - try - let ip = List.assoc "ip_begin" other_config in - let netmask = List.assoc "netmask" other_config in - let persist = try List.assoc "persist" other_config |> bool_of_string with _ -> false in - let ipv4_conf = - (Static4 [Unix.inet_addr_of_string ip, netmask_to_prefixlen netmask]) in - Net.Interface.set_ipv4_conf dbg bridge ipv4_conf; - Xapi_mgmt_iface.enable_himn ~__context ~addr:ip; - Net.Interface.set_persistent dbg bridge persist; - with Not_found -> - error "Cannot setup host internal management network: no other-config:ip_begin or other-config:netmask" + let open Network_interface in + let dbg = Context.string_of_task __context in + try + let ip = List.assoc "ip_begin" other_config in + let netmask = List.assoc "netmask" other_config in + let persist = try List.assoc "persist" other_config |> bool_of_string with _ -> false in + let ipv4_conf = + (Static4 [Unix.inet_addr_of_string ip, netmask_to_prefixlen netmask]) in + Net.Interface.set_ipv4_conf dbg bridge ipv4_conf; + Xapi_mgmt_iface.enable_himn ~__context ~addr:ip; + Net.Interface.set_persistent dbg bridge persist; + with Not_found -> + error "Cannot setup host internal management network: no other-config:ip_begin or other-config:netmask" let check_himn ~__context = - let nets = Db.Network.get_all_records ~__context in - let mnets = - List.filter (fun (_, n) -> - let oc = n.API.network_other_config in - (List.mem_assoc Xapi_globs.is_guest_installer_network oc) - && (List.assoc Xapi_globs.is_guest_installer_network oc = "true") - ) nets - in - match mnets with - | [] -> () - | (_, rc) :: _ -> - let dbg = Context.string_of_task __context in - let bridges = Net.Bridge.get_all dbg () in - if List.mem rc.API.network_bridge bridges then - set_himn_ip ~__context rc.API.network_bridge rc.API.network_other_config + let nets = Db.Network.get_all_records ~__context in + let mnets = + List.filter (fun (_, n) -> + let oc = n.API.network_other_config in + (List.mem_assoc Xapi_globs.is_guest_installer_network oc) + && (List.assoc Xapi_globs.is_guest_installer_network oc = "true") + ) nets + in + match mnets with + | [] -> () + | (_, rc) :: _ -> + let dbg = Context.string_of_task __context in + let bridges = Net.Bridge.get_all dbg () in + if List.mem rc.API.network_bridge bridges then + set_himn_ip ~__context rc.API.network_bridge rc.API.network_other_config let attach_internal ?(management_interface=false) ~__context ~self () = - let host = Helpers.get_localhost ~__context in - let net = Db.Network.get_record ~__context ~self in - let local_pifs = Xapi_network_attach_helpers.get_local_pifs ~__context ~network:self ~host in - let persist = try List.mem_assoc "persist" net.API.network_other_config with _ -> false in + let host = Helpers.get_localhost ~__context in + let net = Db.Network.get_record ~__context ~self in + let local_pifs = Xapi_network_attach_helpers.get_local_pifs ~__context ~network:self ~host in + let persist = try List.mem_assoc "persist" net.API.network_other_config with _ -> false in - (* Ensure internal bridge exists and is up. external bridges will be - brought up by call to interface-reconfigure. *) - if List.length(local_pifs) = 0 then create_internal_bridge ~__context - ~bridge:net.API.network_bridge ~uuid:net.API.network_uuid ~persist; + (* Ensure internal bridge exists and is up. external bridges will be + brought up by call to interface-reconfigure. *) + if List.length(local_pifs) = 0 then create_internal_bridge ~__context + ~bridge:net.API.network_bridge ~uuid:net.API.network_uuid ~persist; - (* Check if we're a Host-Internal Management Network (HIMN) (a.k.a. guest-installer network) *) - if (List.mem_assoc Xapi_globs.is_guest_installer_network net.API.network_other_config) - && (List.assoc Xapi_globs.is_guest_installer_network net.API.network_other_config = "true") then - set_himn_ip ~__context net.API.network_bridge net.API.network_other_config; + (* Check if we're a Host-Internal Management Network (HIMN) (a.k.a. guest-installer network) *) + if (List.mem_assoc Xapi_globs.is_guest_installer_network net.API.network_other_config) + && (List.assoc Xapi_globs.is_guest_installer_network net.API.network_other_config = "true") then + set_himn_ip ~__context net.API.network_bridge net.API.network_other_config; - (* Create the new PIF. - NB if we're doing this as part of a management-interface-reconfigure then - we might be just about to loose our current management interface... *) - List.iter (fun pif -> - let uuid = Db.PIF.get_uuid ~__context ~self:pif in - if Db.PIF.get_managed ~__context ~self:pif then - if Db.PIF.get_currently_attached ~__context ~self:pif = false || management_interface then begin - Xapi_network_attach_helpers.assert_no_slave ~__context pif; - debug "Trying to attach PIF: %s" uuid; - Nm.bring_pif_up ~__context ~management_interface pif - end else - if management_interface then - info "PIF %s is the management interface, but it is not managed by xapi. \ - The bridge and IP must be configured through other means." uuid - else - info "PIF %s is needed by a VM, but not managed by xapi. \ - The bridge must be configured through other means." uuid - ) local_pifs + (* Create the new PIF. + NB if we're doing this as part of a management-interface-reconfigure then + we might be just about to loose our current management interface... *) + List.iter (fun pif -> + let uuid = Db.PIF.get_uuid ~__context ~self:pif in + if Db.PIF.get_managed ~__context ~self:pif then + if Db.PIF.get_currently_attached ~__context ~self:pif = false || management_interface then begin + Xapi_network_attach_helpers.assert_no_slave ~__context pif; + debug "Trying to attach PIF: %s" uuid; + Nm.bring_pif_up ~__context ~management_interface pif + end else + if management_interface then + info "PIF %s is the management interface, but it is not managed by xapi. \ + The bridge and IP must be configured through other means." uuid + else + info "PIF %s is needed by a VM, but not managed by xapi. \ + The bridge must be configured through other means." uuid + ) local_pifs let detach ~__context bridge_name = - let dbg = Context.string_of_task __context in - if Net.Interface.exists dbg ~name:bridge_name then begin - List.iter (fun iface -> - D.warn "Untracked interface %s exists on bridge %s: deleting" iface bridge_name; - Net.Interface.bring_down dbg ~name:iface; - Net.Bridge.remove_port dbg ~bridge:bridge_name ~name:iface - ) (Net.Bridge.get_interfaces dbg ~name:bridge_name); - Net.Bridge.destroy dbg ~name:bridge_name () - end + let dbg = Context.string_of_task __context in + if Net.Interface.exists dbg ~name:bridge_name then begin + List.iter (fun iface -> + D.warn "Untracked interface %s exists on bridge %s: deleting" iface bridge_name; + Net.Interface.bring_down dbg ~name:iface; + Net.Bridge.remove_port dbg ~bridge:bridge_name ~name:iface + ) (Net.Bridge.get_interfaces dbg ~name:bridge_name); + Net.Bridge.destroy dbg ~name:bridge_name () + end let attach ~__context ~network ~host = attach_internal ~__context ~self:network () @@ -125,37 +125,37 @@ let active_vifs_to_networks : (API.ref_VIF, API.ref_network) Hashtbl.t = Hashtbl let active_vifs_to_networks_m = Mutex.create () let register_vif ~__context vif = - let network = Db.VIF.get_network ~__context ~self:vif in - Mutex.execute active_vifs_to_networks_m - (fun () -> - debug "register_vif vif=%s network=%s" (Ref.string_of vif) (Ref.string_of network); - Hashtbl.replace active_vifs_to_networks vif network - ) + let network = Db.VIF.get_network ~__context ~self:vif in + Mutex.execute active_vifs_to_networks_m + (fun () -> + debug "register_vif vif=%s network=%s" (Ref.string_of vif) (Ref.string_of network); + Hashtbl.replace active_vifs_to_networks vif network + ) let deregister_vif ~__context vif = - let network = Db.VIF.get_network ~__context ~self:vif in - let bridge = Db.Network.get_bridge ~__context ~self:network in - let internal_only = Db.Network.get_PIFs ~__context ~self:network = [] in - Mutex.execute active_vifs_to_networks_m - (fun () -> - Hashtbl.remove active_vifs_to_networks vif; - (* If a network has PIFs, then we create/destroy when the PIFs are plugged/unplugged. - If a network is entirely internal, then we remove it after we've stopped using it - *unless* someone else is still using it. *) - if internal_only then begin - (* Are there any more vifs using this network? *) - let others = Hashtbl.fold (fun v n acc -> if n = network then v :: acc else acc) - active_vifs_to_networks [] in - debug "deregister_vif vif=%s network=%s remaining vifs = [ %s ]" (Ref.string_of vif) (Ref.string_of network) (String.concat "; " (List.map Helpers.short_string_of_ref others)); - if others = [] then begin - let dbg = Context.string_of_task __context in - let ifs = Net.Bridge.get_interfaces dbg ~name:bridge in - if ifs = [] - then detach ~__context bridge - else error "Cannot remove bridge %s: other interfaces still present [ %s ]" bridge (String.concat "; " ifs) - end - end - ) + let network = Db.VIF.get_network ~__context ~self:vif in + let bridge = Db.Network.get_bridge ~__context ~self:network in + let internal_only = Db.Network.get_PIFs ~__context ~self:network = [] in + Mutex.execute active_vifs_to_networks_m + (fun () -> + Hashtbl.remove active_vifs_to_networks vif; + (* If a network has PIFs, then we create/destroy when the PIFs are plugged/unplugged. + If a network is entirely internal, then we remove it after we've stopped using it + *unless* someone else is still using it. *) + if internal_only then begin + (* Are there any more vifs using this network? *) + let others = Hashtbl.fold (fun v n acc -> if n = network then v :: acc else acc) + active_vifs_to_networks [] in + debug "deregister_vif vif=%s network=%s remaining vifs = [ %s ]" (Ref.string_of vif) (Ref.string_of network) (String.concat "; " (List.map Helpers.short_string_of_ref others)); + if others = [] then begin + let dbg = Context.string_of_task __context in + let ifs = Net.Bridge.get_interfaces dbg ~name:bridge in + if ifs = [] + then detach ~__context bridge + else error "Cannot remove bridge %s: other interfaces still present [ %s ]" bridge (String.concat "; " ifs) + end + end + ) let counter = ref 0 let mutex = Mutex.create () @@ -170,114 +170,114 @@ let pool_introduce ~__context ~name_label ~name_description ~mTU ~other_config ~ r let create ~__context ~name_label ~name_description ~mTU ~other_config ~tags = - Mutex.execute mutex (fun () -> - let networks = Db.Network.get_all ~__context in - let bridges = List.map (fun self -> Db.Network.get_bridge ~__context ~self) networks in - let mTU = if mTU <= 0L then 1500L else mTU in - let rec loop () = - let name = stem ^ (string_of_int !counter) in - incr counter; - if List.mem name bridges then loop () - else - let r = Ref.make () and uuid = Uuid.make_uuid () in - Db.Network.create ~__context ~ref:r ~uuid:(Uuid.to_string uuid) - ~current_operations:[] ~allowed_operations:[] - ~name_label ~name_description ~mTU ~bridge:name - ~other_config ~blobs:[] ~tags ~default_locking_mode:`unlocked ~assigned_ips:[]; - r in - loop ()) + Mutex.execute mutex (fun () -> + let networks = Db.Network.get_all ~__context in + let bridges = List.map (fun self -> Db.Network.get_bridge ~__context ~self) networks in + let mTU = if mTU <= 0L then 1500L else mTU in + let rec loop () = + let name = stem ^ (string_of_int !counter) in + incr counter; + if List.mem name bridges then loop () + else + let r = Ref.make () and uuid = Uuid.make_uuid () in + Db.Network.create ~__context ~ref:r ~uuid:(Uuid.to_string uuid) + ~current_operations:[] ~allowed_operations:[] + ~name_label ~name_description ~mTU ~bridge:name + ~other_config ~blobs:[] ~tags ~default_locking_mode:`unlocked ~assigned_ips:[]; + r in + loop ()) let destroy ~__context ~self = - let vifs = Db.Network.get_VIFs ~__context ~self in - let connected = List.filter (fun self -> - Db.VIF.get_currently_attached ~__context ~self || Db.VIF.get_reserved ~__context ~self - ) vifs in - if connected <> [] then - raise (Api_errors.Server_error (Api_errors.network_contains_vif,List.map Ref.string_of connected)); - let pifs = Db.Network.get_PIFs ~__context ~self in - if pifs <> [] then - (raise (Api_errors.Server_error (Api_errors.network_contains_pif,List.map Ref.string_of pifs))); - (* CA-43250: don't let people remove the internal management network *) - let oc = Db.Network.get_other_config ~__context ~self in - if List.mem_assoc Xapi_globs.is_host_internal_management_network oc - && (try bool_of_string (List.assoc Xapi_globs.is_host_internal_management_network oc) with _ -> false) - then raise (Api_errors.Server_error (Api_errors.cannot_destroy_system_network, [ Ref.string_of self ])); + let vifs = Db.Network.get_VIFs ~__context ~self in + let connected = List.filter (fun self -> + Db.VIF.get_currently_attached ~__context ~self || Db.VIF.get_reserved ~__context ~self + ) vifs in + if connected <> [] then + raise (Api_errors.Server_error (Api_errors.network_contains_vif,List.map Ref.string_of connected)); + let pifs = Db.Network.get_PIFs ~__context ~self in + if pifs <> [] then + (raise (Api_errors.Server_error (Api_errors.network_contains_pif,List.map Ref.string_of pifs))); + (* CA-43250: don't let people remove the internal management network *) + let oc = Db.Network.get_other_config ~__context ~self in + if List.mem_assoc Xapi_globs.is_host_internal_management_network oc + && (try bool_of_string (List.assoc Xapi_globs.is_host_internal_management_network oc) with _ -> false) + then raise (Api_errors.Server_error (Api_errors.cannot_destroy_system_network, [ Ref.string_of self ])); - (* destroy all the VIFs now rather than wait for the GC thread. *) - List.iter (fun vif -> - Helpers.log_exn_continue (Printf.sprintf "destroying VIF: %s" (Ref.string_of vif)) - (fun vif -> Db.VIF.destroy ~__context ~self:vif) vif - ) vifs; - Db.Network.destroy ~__context ~self + (* destroy all the VIFs now rather than wait for the GC thread. *) + List.iter (fun vif -> + Helpers.log_exn_continue (Printf.sprintf "destroying VIF: %s" (Ref.string_of vif)) + (fun vif -> Db.VIF.destroy ~__context ~self:vif) vif + ) vifs; + Db.Network.destroy ~__context ~self let create_new_blob ~__context ~network ~name ~mime_type ~public = - let blob = Xapi_blob.create ~__context ~mime_type ~public in - Db.Network.add_to_blobs ~__context ~self:network ~key:name ~value:blob; - blob + let blob = Xapi_blob.create ~__context ~mime_type ~public in + Db.Network.add_to_blobs ~__context ~self:network ~key:name ~value:blob; + blob let set_default_locking_mode ~__context ~network ~value = - (* Get all VIFs which are attached and associated with this network. *) - let open Db_filter_types in - match Db.VIF.get_records_where ~__context - ~expr:(And ( - (Eq (Field "network", Literal (Ref.string_of network))), - (Eq (Field "currently_attached", Literal "true")) - )) - with - | [] -> Db.Network.set_default_locking_mode ~__context ~self:network ~value - | (vif,_)::_ -> raise (Api_errors.Server_error (Api_errors.vif_in_use, [Ref.string_of network; Ref.string_of vif])) + (* Get all VIFs which are attached and associated with this network. *) + let open Db_filter_types in + match Db.VIF.get_records_where ~__context + ~expr:(And ( + (Eq (Field "network", Literal (Ref.string_of network))), + (Eq (Field "currently_attached", Literal "true")) + )) + with + | [] -> Db.Network.set_default_locking_mode ~__context ~self:network ~value + | (vif,_)::_ -> raise (Api_errors.Server_error (Api_errors.vif_in_use, [Ref.string_of network; Ref.string_of vif])) let string_of_exn = function - | Api_errors.Server_error(code, params) -> Printf.sprintf "%s [ %s ]" code (String.concat "; " params) - | e -> Printexc.to_string e + | Api_errors.Server_error(code, params) -> Printf.sprintf "%s [ %s ]" code (String.concat "; " params) + | e -> Printexc.to_string e (* Networking helper functions for VMs and VIFs *) let attach_for_vif ~__context ~vif () = - register_vif ~__context vif; - let network = Db.VIF.get_network ~__context ~self:vif in - attach_internal ~__context ~self:network (); - Xapi_udhcpd.maybe_add_lease ~__context vif + register_vif ~__context vif; + let network = Db.VIF.get_network ~__context ~self:vif in + attach_internal ~__context ~self:network (); + Xapi_udhcpd.maybe_add_lease ~__context vif let attach_for_vm ~__context ~host ~vm = - List.iter - (fun vif -> - attach_for_vif ~__context ~vif () - ) (Db.VM.get_VIFs ~__context ~self:vm) + List.iter + (fun vif -> + attach_for_vif ~__context ~vif () + ) (Db.VM.get_VIFs ~__context ~self:vm) let detach_for_vm ~__context ~host ~vm = - try - List.iter - (fun vif -> - deregister_vif ~__context vif - ) (Db.VM.get_VIFs ~__context ~self:vm) - with e -> - error "Caught %s while detaching networks" (string_of_exn e) + try + List.iter + (fun vif -> + deregister_vif ~__context vif + ) (Db.VM.get_VIFs ~__context ~self:vm) + with e -> + error "Caught %s while detaching networks" (string_of_exn e) let with_networks_attached_for_vm ~__context ?host ~vm f = - begin match host with - | None -> (* use local host *) - attach_for_vm ~__context ~host:(Helpers.get_localhost ~__context) ~vm - | Some host -> - Helpers.call_api_functions ~__context (fun rpc session_id -> - Client.Network.attach_for_vm ~rpc ~session_id ~host ~vm - ) - end; - try - f () - with e -> - info "Caught %s: detaching networks" (string_of_exn e); - begin - try - match host with - | None -> (* use local host *) - detach_for_vm ~__context ~host:(Helpers.get_localhost ~__context) ~vm - | Some host -> - Helpers.call_api_functions ~__context (fun rpc session_id -> - Client.Network.detach_for_vm ~rpc ~session_id ~host ~vm - ) - with e -> - error "Caught %s while detaching networks" (string_of_exn e) - end; - raise e + begin match host with + | None -> (* use local host *) + attach_for_vm ~__context ~host:(Helpers.get_localhost ~__context) ~vm + | Some host -> + Helpers.call_api_functions ~__context (fun rpc session_id -> + Client.Network.attach_for_vm ~rpc ~session_id ~host ~vm + ) + end; + try + f () + with e -> + info "Caught %s: detaching networks" (string_of_exn e); + begin + try + match host with + | None -> (* use local host *) + detach_for_vm ~__context ~host:(Helpers.get_localhost ~__context) ~vm + | Some host -> + Helpers.call_api_functions ~__context (fun rpc session_id -> + Client.Network.detach_for_vm ~rpc ~session_id ~host ~vm + ) + with e -> + error "Caught %s while detaching networks" (string_of_exn e) + end; + raise e diff --git a/ocaml/xapi/xapi_network.mli b/ocaml/xapi/xapi_network.mli index db208272ebc..e1e6957619c 100644 --- a/ocaml/xapi/xapi_network.mli +++ b/ocaml/xapi/xapi_network.mli @@ -13,23 +13,23 @@ *) (** Module that defines API functions for Network objects * @group Networking - *) +*) (** -{i Network} objects are used to interconnect multiple VIFs and PIFs within a resource pool. -{ul -{- PIFs and VIFs can be attached to a Network.} -{- A Network that spans multiple hosts has physical links (cables + hub/switch) between the PIFs that are attached to it.} -{- Two VMs are connected iff: both have a VIF on the same Network AND (they are on the same host OR their hosts both have a PIF on this Network).} -{- Within the boundaries of a host, a Network is represented by a bridge if it is brought up. A Network spanning multiple hosts has a bridge on each host.} -{- PIFs on a single host are all on different networks, and each PIF is associated to exactly one network. Hence, all PIFs on a network are on different hosts.} -{- A Network is called {i internal} if there are no PIFs associated with it. VMs that have VIFs on such a network, which is necessarily confined to a single host to be of use, are still able to communicate with each other.} -{- A Network is associated with any number of VIFs, and zero or one PIFs per host.} -{- Networks for physical interfaces are created automatically (when the PIF is created). The name of the bridge for such a network is derived from the device name of the interface. E.g. interface [eth0] is always associated with bridge [xenbr0].} -{- When a PIF or a VIF on a network is plugged, also the associated bridge is ensured to be up.} -{- There is a special case {i guest installer network}, which is used by the internal p2v tool. It is not associated with a PIF, but does some "special magic stuff". It will probably disappear eventually together with the p2v stuff, which is not really used anyway.} -} -Note: It is currently assumed that all PIFs that are associated with a certain Network are physically connected, but this is not checked or enforced anywhere. This means that if a system admin connects the cables in a wrong way, things may be broken. Moreover, if two PIFs are of different Networks, this does not mean that they are not on the same physical network. Ideally, Networks objects should be constructed and maintained automatically by xapi based the actual physical connections. + {i Network} objects are used to interconnect multiple VIFs and PIFs within a resource pool. + {ul + {- PIFs and VIFs can be attached to a Network.} + {- A Network that spans multiple hosts has physical links (cables + hub/switch) between the PIFs that are attached to it.} + {- Two VMs are connected iff: both have a VIF on the same Network AND (they are on the same host OR their hosts both have a PIF on this Network).} + {- Within the boundaries of a host, a Network is represented by a bridge if it is brought up. A Network spanning multiple hosts has a bridge on each host.} + {- PIFs on a single host are all on different networks, and each PIF is associated to exactly one network. Hence, all PIFs on a network are on different hosts.} + {- A Network is called {i internal} if there are no PIFs associated with it. VMs that have VIFs on such a network, which is necessarily confined to a single host to be of use, are still able to communicate with each other.} + {- A Network is associated with any number of VIFs, and zero or one PIFs per host.} + {- Networks for physical interfaces are created automatically (when the PIF is created). The name of the bridge for such a network is derived from the device name of the interface. E.g. interface [eth0] is always associated with bridge [xenbr0].} + {- When a PIF or a VIF on a network is plugged, also the associated bridge is ensured to be up.} + {- There is a special case {i guest installer network}, which is used by the internal p2v tool. It is not associated with a PIF, but does some "special magic stuff". It will probably disappear eventually together with the p2v stuff, which is not really used anyway.} + } + Note: It is currently assumed that all PIFs that are associated with a certain Network are physically connected, but this is not checked or enforced anywhere. This means that if a system admin connects the cables in a wrong way, things may be broken. Moreover, if two PIFs are of different Networks, this does not mean that they are not on the same physical network. Ideally, Networks objects should be constructed and maintained automatically by xapi based the actual physical connections. *) (** This function is called when xapi starts and management is disabled. It ensures @@ -37,13 +37,13 @@ Note: It is currently assumed that all PIFs that are associated with a certain N val check_himn : __context:Context.t -> unit (** Instantiate the bridge associated to this network on the localhost, and bring - up the PIFs on the localhost that are on this network, provided it wouldn't - destroy existing Networks (e.g. slaves of a bond) in use by something (VIF or management interface). - Note special-case handling of new management interfaces: we skip the - check for the existing management interface (essential otherwise switching - from a bond slave to a bond master would fail) and we make sure to call - {!Nm.bring_pif_up} with the [management_interface] argument so it can make sure - the default gateway is set up correctly *) + up the PIFs on the localhost that are on this network, provided it wouldn't + destroy existing Networks (e.g. slaves of a bond) in use by something (VIF or management interface). + Note special-case handling of new management interfaces: we skip the + check for the existing management interface (essential otherwise switching + from a bond slave to a bond master would fail) and we make sure to call + {!Nm.bring_pif_up} with the [management_interface] argument so it can make sure + the default gateway is set up correctly *) val attach_internal : ?management_interface:bool -> __context:Context.t -> self:[ `network ] Ref.t -> unit -> unit @@ -92,33 +92,33 @@ val create_new_blob : name:string -> mime_type:string -> public:bool -> [ `blob ] Ref.t val set_default_locking_mode : - __context:Context.t -> - network:[ `network ] Ref.t -> - value:API.network_default_locking_mode -> unit + __context:Context.t -> + network:[ `network ] Ref.t -> + value:API.network_default_locking_mode -> unit (** {2 Networking helper functions for VMs and VIFs} *) val attach_for_vif : - __context:Context.t -> - vif:[ `VIF ] Ref.t -> - unit -> - unit + __context:Context.t -> + vif:[ `VIF ] Ref.t -> + unit -> + unit val attach_for_vm : - __context:Context.t -> - host:[ `host ] Ref.t -> - vm:[ `VM ] Ref.t -> - unit + __context:Context.t -> + host:[ `host ] Ref.t -> + vm:[ `VM ] Ref.t -> + unit val detach_for_vm : - __context:Context.t -> - host:[ `host ] Ref.t -> - vm:[ `VM ] Ref.t -> - unit + __context:Context.t -> + host:[ `host ] Ref.t -> + vm:[ `VM ] Ref.t -> + unit val with_networks_attached_for_vm : - __context:Context.t -> - ?host:[ `host ] Ref.t -> - vm:[ `VM ] Ref.t -> - (unit -> 'a) -> - 'a + __context:Context.t -> + ?host:[ `host ] Ref.t -> + vm:[ `VM ] Ref.t -> + (unit -> 'a) -> + 'a diff --git a/ocaml/xapi/xapi_network_attach_helpers.ml b/ocaml/xapi/xapi_network_attach_helpers.ml index 36991f7e235..83a830ef27e 100644 --- a/ocaml/xapi/xapi_network_attach_helpers.ml +++ b/ocaml/xapi/xapi_network_attach_helpers.ml @@ -38,7 +38,7 @@ let assert_network_has_no_vifs_in_use_on_me ~__context ~host ~network = then begin debug "Network contains VIF with attach in progress"; raise (Api_errors.Server_error(Api_errors.vif_in_use, [ Ref.string_of network; Ref.string_of self ])) - end + end | _ -> ()) ops; if Db.VIF.get_currently_attached ~__context ~self then @@ -54,89 +54,89 @@ let assert_network_has_no_vifs_in_use_on_me ~__context ~host ~network = (* nice triple negative ;) *) let assert_pif_disallow_unplug_not_set ~__context pif = if (Db.PIF.get_disallow_unplug ~__context ~self:pif) then - raise (Api_errors.Server_error(Api_errors.pif_does_not_allow_unplug, [ Ref.string_of pif ])) + raise (Api_errors.Server_error(Api_errors.pif_does_not_allow_unplug, [ Ref.string_of pif ])) let get_local_pifs ~__context ~network ~host = - (* There should be at most one local PIF by construction *) - Db.PIF.get_refs_where ~__context ~expr:(And ( - Eq (Field "network", Literal (Ref.string_of network)), - Eq (Field "host", Literal (Ref.string_of host)) - )) + (* There should be at most one local PIF by construction *) + Db.PIF.get_refs_where ~__context ~expr:(And ( + Eq (Field "network", Literal (Ref.string_of network)), + Eq (Field "host", Literal (Ref.string_of host)) + )) (* Plugging a bond slave is not allowed *) let assert_no_slave ~__context pif = - if Db.PIF.get_bond_slave_of ~__context ~self:pif <> Ref.null then - raise (Api_errors.Server_error (Api_errors.cannot_plug_bond_slave, [Ref.string_of pif])) + if Db.PIF.get_bond_slave_of ~__context ~self:pif <> Ref.null then + raise (Api_errors.Server_error (Api_errors.cannot_plug_bond_slave, [Ref.string_of pif])) let assert_can_attach_network_on_host ~__context ~self ~host = let local_pifs = get_local_pifs ~__context ~network:self ~host in List.iter (fun pif -> assert_no_slave ~__context pif) local_pifs let assert_can_see_named_networks ~__context ~vm ~host reqd_nets = - let is_network_available_on host net = - (* has the network been actualised by one or more PIFs? *) - let pifs = Db.Network.get_PIFs ~__context ~self:net in - if pifs <> [] then begin - (* network is only available if one of *) - (* the PIFs connects to the target host *) - let hosts = - List.map (fun self -> Db.PIF.get_host ~__context ~self) pifs in - List.mem host hosts - end else begin - let other_config = Db.Network.get_other_config ~__context ~self:net in - if List.mem_assoc Xapi_globs.assume_network_is_shared other_config && (List.assoc Xapi_globs.assume_network_is_shared other_config = "true") then begin - debug "other_config:%s is set on Network %s" Xapi_globs.assume_network_is_shared (Ref.string_of net); - true - end else begin - (* find all the VIFs on this network and whose VM's are running. *) - (* XXX: in many environments this will perform O (Vms) calls to *) - (* VM.getRecord. *) - let vifs = Db.Network.get_VIFs ~__context ~self:net in - let vms = List.map (fun self -> Db.VIF.get_VM ~__context ~self) vifs in - let vms = List.map (fun self -> Db.VM.get_record ~__context ~self) vms in - let vms = List.filter (fun vm -> vm.API.vM_power_state = `Running) vms in - let hosts = List.map (fun vm -> vm.API.vM_resident_on) vms in - (* either not pinned to any host OR pinned to this host already *) - hosts = [] || (List.mem host hosts) - end - end - in + let is_network_available_on host net = + (* has the network been actualised by one or more PIFs? *) + let pifs = Db.Network.get_PIFs ~__context ~self:net in + if pifs <> [] then begin + (* network is only available if one of *) + (* the PIFs connects to the target host *) + let hosts = + List.map (fun self -> Db.PIF.get_host ~__context ~self) pifs in + List.mem host hosts + end else begin + let other_config = Db.Network.get_other_config ~__context ~self:net in + if List.mem_assoc Xapi_globs.assume_network_is_shared other_config && (List.assoc Xapi_globs.assume_network_is_shared other_config = "true") then begin + debug "other_config:%s is set on Network %s" Xapi_globs.assume_network_is_shared (Ref.string_of net); + true + end else begin + (* find all the VIFs on this network and whose VM's are running. *) + (* XXX: in many environments this will perform O (Vms) calls to *) + (* VM.getRecord. *) + let vifs = Db.Network.get_VIFs ~__context ~self:net in + let vms = List.map (fun self -> Db.VIF.get_VM ~__context ~self) vifs in + let vms = List.map (fun self -> Db.VM.get_record ~__context ~self) vms in + let vms = List.filter (fun vm -> vm.API.vM_power_state = `Running) vms in + let hosts = List.map (fun vm -> vm.API.vM_resident_on) vms in + (* either not pinned to any host OR pinned to this host already *) + hosts = [] || (List.mem host hosts) + end + end + in - let avail_nets = List.filter (is_network_available_on host) reqd_nets in - let not_available = List.set_difference reqd_nets avail_nets in + let avail_nets = List.filter (is_network_available_on host) reqd_nets in + let not_available = List.set_difference reqd_nets avail_nets in - List.iter - (fun net -> warn "Host %s cannot see Network %s" - (Helpers.checknull - (fun () -> Db.Host.get_name_label ~__context ~self:host)) - (Helpers.checknull - (fun () -> Db.Network.get_name_label ~__context ~self:net))) - not_available; - if not_available <> [] then - raise (Api_errors.Server_error (Api_errors.vm_requires_net, [ - Ref.string_of vm; - Ref.string_of (List.hd not_available) - ])); + List.iter + (fun net -> warn "Host %s cannot see Network %s" + (Helpers.checknull + (fun () -> Db.Host.get_name_label ~__context ~self:host)) + (Helpers.checknull + (fun () -> Db.Network.get_name_label ~__context ~self:net))) + not_available; + if not_available <> [] then + raise (Api_errors.Server_error (Api_errors.vm_requires_net, [ + Ref.string_of vm; + Ref.string_of (List.hd not_available) + ])); - (* Also, for each of the available networks, we need to ensure that we can bring it - * up on the specified host; i.e. it doesn't need an enslaved PIF. *) - List.iter - (fun network-> - try - assert_can_attach_network_on_host - ~__context - ~self:network - ~host - (* throw exception more appropriate to this context: *) - with exn -> - debug - "Caught exception while checking if network %s could be attached on host %s:%s" - (Ref.string_of network) - (Ref.string_of host) - (ExnHelper.string_of_exn exn); - raise (Api_errors.Server_error ( - Api_errors.host_cannot_attach_network, [ - Ref.string_of host; Ref.string_of network ])) - ) - avail_nets + (* Also, for each of the available networks, we need to ensure that we can bring it + * up on the specified host; i.e. it doesn't need an enslaved PIF. *) + List.iter + (fun network-> + try + assert_can_attach_network_on_host + ~__context + ~self:network + ~host + (* throw exception more appropriate to this context: *) + with exn -> + debug + "Caught exception while checking if network %s could be attached on host %s:%s" + (Ref.string_of network) + (Ref.string_of host) + (ExnHelper.string_of_exn exn); + raise (Api_errors.Server_error ( + Api_errors.host_cannot_attach_network, [ + Ref.string_of host; Ref.string_of network ])) + ) + avail_nets diff --git a/ocaml/xapi/xapi_network_attach_helpers.mli b/ocaml/xapi/xapi_network_attach_helpers.mli index f7edea88a72..ab7293c3ac5 100644 --- a/ocaml/xapi/xapi_network_attach_helpers.mli +++ b/ocaml/xapi/xapi_network_attach_helpers.mli @@ -13,13 +13,13 @@ *) (** Assertion helpers used when attaching a network * @group Networking - *) - +*) + (** Raises an exception if the network has VIFs in use on the host *) val assert_network_has_no_vifs_in_use_on_me : __context:Context.t -> - host:[ `host ] Ref.t -> network:[ `network ] Ref.t -> unit - + host:[ `host ] Ref.t -> network:[ `network ] Ref.t -> unit + (** Raises an exception when the [disallow_unplug] flag is set *) val assert_pif_disallow_unplug_not_set : __context:Context.t -> [ `PIF ] Ref.t -> unit diff --git a/ocaml/xapi/xapi_pbd.ml b/ocaml/xapi/xapi_pbd.ml index 79a6a612ef9..e056c1384bf 100644 --- a/ocaml/xapi/xapi_pbd.ml +++ b/ocaml/xapi/xapi_pbd.ml @@ -13,7 +13,7 @@ *) (** Module that defines API functions for PBD objects * @group XenAPI functions - *) +*) open Db_filter open Db_filter_types @@ -22,27 +22,27 @@ module D=Debug.Make(struct let name="xapi_pbd" end) open D let assert_no_srmaster_key dev_cfg = - let k = "SRmaster" in - if List.mem_assoc k dev_cfg - then raise (Api_errors.Server_error (Api_errors.value_not_supported, - [k; List.assoc k dev_cfg; "This key is for internal use only"])) + let k = "SRmaster" in + if List.mem_assoc k dev_cfg + then raise (Api_errors.Server_error (Api_errors.value_not_supported, + [k; List.assoc k dev_cfg; "This key is for internal use only"])) let create_common ~__context ~host ~sR ~device_config ~currently_attached ~other_config = - let pbds = Db.SR.get_PBDs ~__context ~self:sR in - if List.exists (fun pbd -> Db.PBD.get_host ~__context ~self:pbd = host) pbds - then raise (Api_errors.Server_error (Api_errors.pbd_exists, - [ Ref.string_of sR - ; Ref.string_of host - ; Ref.string_of (List.find (fun pbd -> Db.PBD.get_host ~__context ~self:pbd = host) pbds) - ])); - (* This field should never be present in the record itself *) - assert_no_srmaster_key device_config; - (* Make sure each PBD has a unique secret in the database *) - let dev_cfg = Xapi_secret.duplicate_passwds ~__context device_config in - let ref = Ref.make() in - let uuid = Uuid.to_string (Uuid.make_uuid()) in - Db.PBD.create ~__context ~ref ~uuid ~host ~sR ~device_config:dev_cfg ~currently_attached ~other_config:[]; - ref + let pbds = Db.SR.get_PBDs ~__context ~self:sR in + if List.exists (fun pbd -> Db.PBD.get_host ~__context ~self:pbd = host) pbds + then raise (Api_errors.Server_error (Api_errors.pbd_exists, + [ Ref.string_of sR + ; Ref.string_of host + ; Ref.string_of (List.find (fun pbd -> Db.PBD.get_host ~__context ~self:pbd = host) pbds) + ])); + (* This field should never be present in the record itself *) + assert_no_srmaster_key device_config; + (* Make sure each PBD has a unique secret in the database *) + let dev_cfg = Xapi_secret.duplicate_passwds ~__context device_config in + let ref = Ref.make() in + let uuid = Uuid.to_string (Uuid.make_uuid()) in + Db.PBD.create ~__context ~ref ~uuid ~host ~sR ~device_config:dev_cfg ~currently_attached ~other_config:[]; + ref let create ~__context ~host ~sR ~device_config ~other_config = create_common ~__context ~host ~sR ~device_config ~currently_attached:false ~other_config @@ -54,18 +54,18 @@ let create_thishost ~__context ~sR ~device_config ~currently_attached = let get_active_vdis_by_pbd ~__context ~self = let sr = Db.PBD.get_SR ~__context ~self in let host = Db.PBD.get_host ~__context ~self in - let vms = Db.VM.get_records_where ~__context - ~expr:(Eq(Field "resident_on", Literal (Ref.string_of host))) in + let vms = Db.VM.get_records_where ~__context + ~expr:(Eq(Field "resident_on", Literal (Ref.string_of host))) in let vbds = List.flatten (List.map (fun (vm,vmr) -> vmr.API.vM_VBDs) vms) in let vbds_r = List.map (fun self -> Db.VBD.get_record_internal ~__context ~self) vbds in let active_vbds = List.filter - (fun r -> - (r.Db_actions.vBD_currently_attached || r.Db_actions.vBD_reserved) && not(r.Db_actions.vBD_empty)) vbds_r in - + (fun r -> + (r.Db_actions.vBD_currently_attached || r.Db_actions.vBD_reserved) && not(r.Db_actions.vBD_empty)) vbds_r in + let vdis = List.map (fun r -> r.Db_actions.vBD_VDI) active_vbds in let vdis_in_sr = List.filter (fun vdi -> sr=Db.VDI.get_SR ~__context ~self:vdi) vdis in vdis_in_sr - + (* CA-16480: abort if unplugging this PBD would cause a protected VM to become non-agile *) let abort_if_storage_attached_to_protected_vms ~__context ~self = let pool = Helpers.get_pool ~__context in @@ -76,131 +76,131 @@ let abort_if_storage_attached_to_protected_vms ~__context ~self = let protected_vms = List.filter (fun (_, record) -> Helpers.is_xha_protected_r record) vms in List.iter (fun (vm_ref, vm_record) -> - let vbds = vm_record.API.vM_VBDs in - List.iter - (fun vbd -> - let vdi = Db.VBD.get_VDI ~__context ~self:vbd in - if List.mem vdi vdis then begin - warn "PBD.unplug will make protected VM %s not agile since it has a VBD attached to VDI %s" (Ref.string_of vm_ref) (Ref.string_of vdi); - raise (Api_errors.Server_error(Api_errors.ha_operation_would_break_failover_plan, [])) - end - ) vbds + let vbds = vm_record.API.vM_VBDs in + List.iter + (fun vbd -> + let vdi = Db.VBD.get_VDI ~__context ~self:vbd in + if List.mem vdi vdis then begin + warn "PBD.unplug will make protected VM %s not agile since it has a VBD attached to VDI %s" (Ref.string_of vm_ref) (Ref.string_of vdi); + raise (Api_errors.Server_error(Api_errors.ha_operation_would_break_failover_plan, [])) + end + ) vbds ) protected_vms end (* Split all metadata VDIs in an SR into two lists - metadata VDIs of this pool, and metadata VDIs of a foreign pool. *) let partition_metadata_vdis_by_pool ~__context ~sr = - let pool = Helpers.get_pool ~__context in - let metadata_vdis = List.filter - (fun vdi -> Db.VDI.get_type ~__context ~self:vdi = `metadata) - (Db.SR.get_VDIs ~__context ~self:sr) - in - List.partition - (fun vdi -> Db.VDI.get_metadata_of_pool ~__context ~self:vdi = pool) - metadata_vdis + let pool = Helpers.get_pool ~__context in + let metadata_vdis = List.filter + (fun vdi -> Db.VDI.get_type ~__context ~self:vdi = `metadata) + (Db.SR.get_VDIs ~__context ~self:sr) + in + List.partition + (fun vdi -> Db.VDI.get_metadata_of_pool ~__context ~self:vdi = pool) + metadata_vdis let check_sharing_constraint ~__context ~sr = - if not(Db.SR.get_shared ~__context ~self:sr) then begin - let pbds = Db.SR.get_PBDs ~__context ~self:sr in - (* Filter out the attached PBDs which aren't connected to this host *) - let me = Helpers.get_localhost ~__context in - let others = List.filter (fun self -> - Db.PBD.get_currently_attached ~__context ~self && - Db.PBD.get_host ~__context ~self <> me) pbds in - if others <> [] - then raise (Api_errors.Server_error(Api_errors.sr_not_sharable, - [ Ref.string_of sr; Ref.string_of (Db.PBD.get_host ~__context ~self:(List.hd others)) ])) - end + if not(Db.SR.get_shared ~__context ~self:sr) then begin + let pbds = Db.SR.get_PBDs ~__context ~self:sr in + (* Filter out the attached PBDs which aren't connected to this host *) + let me = Helpers.get_localhost ~__context in + let others = List.filter (fun self -> + Db.PBD.get_currently_attached ~__context ~self && + Db.PBD.get_host ~__context ~self <> me) pbds in + if others <> [] + then raise (Api_errors.Server_error(Api_errors.sr_not_sharable, + [ Ref.string_of sr; Ref.string_of (Db.PBD.get_host ~__context ~self:(List.hd others)) ])) + end module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) let plug ~__context ~self = - (* It's possible to end up with a PBD being plugged after "unbind" is - called if SR.create races with a PBD.plug (see Storage_access.create_sr) - Since "bind" is idempotent it is safe to always call it. *) - let query_result = Storage_access.bind ~__context ~pbd:self in - let currently_attached = Db.PBD.get_currently_attached ~__context ~self in - if not currently_attached then - begin - let sr = Db.PBD.get_SR ~__context ~self in - check_sharing_constraint ~__context ~sr; - let dbg = Ref.string_of (Context.get_task_id __context) in - let device_config = Db.PBD.get_device_config ~__context ~self in - Storage_access.transform_storage_exn - (fun () -> C.SR.attach dbg (Db.SR.get_uuid ~__context ~self:sr) device_config); - Db.PBD.set_currently_attached ~__context ~self ~value:true; - - Xapi_sr_operations.sr_health_check ~__context ~self:sr; - - (* When the plugin is registered it is possible to query the capabilities etc *) - Xapi_sm.register_plugin ~__context query_result; - - (* The allowed-operations depend on the capabilities *) - Xapi_sr_operations.update_allowed_operations ~__context ~self:sr; - end + (* It's possible to end up with a PBD being plugged after "unbind" is + called if SR.create races with a PBD.plug (see Storage_access.create_sr) + Since "bind" is idempotent it is safe to always call it. *) + let query_result = Storage_access.bind ~__context ~pbd:self in + let currently_attached = Db.PBD.get_currently_attached ~__context ~self in + if not currently_attached then + begin + let sr = Db.PBD.get_SR ~__context ~self in + check_sharing_constraint ~__context ~sr; + let dbg = Ref.string_of (Context.get_task_id __context) in + let device_config = Db.PBD.get_device_config ~__context ~self in + Storage_access.transform_storage_exn + (fun () -> C.SR.attach dbg (Db.SR.get_uuid ~__context ~self:sr) device_config); + Db.PBD.set_currently_attached ~__context ~self ~value:true; + + Xapi_sr_operations.sr_health_check ~__context ~self:sr; + + (* When the plugin is registered it is possible to query the capabilities etc *) + Xapi_sm.register_plugin ~__context query_result; + + (* The allowed-operations depend on the capabilities *) + Xapi_sr_operations.update_allowed_operations ~__context ~self:sr; + end let unplug ~__context ~self = - let currently_attached = Db.PBD.get_currently_attached ~__context ~self in - if currently_attached then - begin - let host = Db.PBD.get_host ~__context ~self in - let sr = Db.PBD.get_SR ~__context ~self in - if Db.Host.get_enabled ~__context ~self:host - then abort_if_storage_attached_to_protected_vms ~__context ~self; - - (* If HA is enabled, prevent a PBD whose SR contains a statefile being unplugged *) - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool then begin - let statefiles = Db.Pool.get_ha_statefiles ~__context ~self:pool in - let statefile_srs = List.map (fun self -> Db.VDI.get_SR ~__context ~self:(Ref.of_string self)) statefiles in - if List.mem sr statefile_srs - then raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])) - end; - - let vdis = get_active_vdis_by_pbd ~__context ~self in - let non_metadata_vdis = List.filter (fun vdi -> Db.VDI.get_type ~__context ~self:vdi <> `metadata) vdis in - if List.length non_metadata_vdis > 0 - then raise (Api_errors.Server_error(Api_errors.vdi_in_use,List.map Ref.string_of non_metadata_vdis)); - - if Helpers.i_am_srmaster ~__context ~sr then begin - let (metadata_vdis_of_this_pool, metadata_vdis_of_foreign_pool) = - partition_metadata_vdis_by_pool ~__context ~sr - in - (* Remove all foreign metadata VDIs from the cache so that the metadata_latest of remaining VDIs can be updated. *) - Xapi_dr.remove_vdis_from_cache ~__context ~vdis:metadata_vdis_of_foreign_pool; - (* Set all the removed metadata VDIs of foreign pools to have metadata_latest = false. *) - (* This enables the metadata_latest flag to indicate whether we can recover VMs from a VDI. *) - List.iter - (fun vdi -> Db.VDI.set_metadata_latest ~__context ~self:vdi ~value:false) - metadata_vdis_of_foreign_pool; - (* Disable metadata replication to VDIs in the SR. *) - List.iter - (fun vdi -> - debug "Automatically disabling database replication to VDI %s" (Ref.string_of vdi); - Xapi_vdi_helpers.disable_database_replication ~__context ~vdi) - metadata_vdis_of_this_pool - end; - let dbg = Ref.string_of (Context.get_task_id __context) in - let uuid = Db.SR.get_uuid ~__context ~self:sr in - Storage_access.transform_storage_exn - (fun () -> C.SR.detach dbg uuid); - - Storage_access.unbind ~__context ~pbd:self; - Db.PBD.set_currently_attached ~__context ~self ~value:false; - - Xapi_sr_operations.stop_health_check_thread ~__context ~self:sr; - - Xapi_sr_operations.update_allowed_operations ~__context ~self:sr; - end + let currently_attached = Db.PBD.get_currently_attached ~__context ~self in + if currently_attached then + begin + let host = Db.PBD.get_host ~__context ~self in + let sr = Db.PBD.get_SR ~__context ~self in + if Db.Host.get_enabled ~__context ~self:host + then abort_if_storage_attached_to_protected_vms ~__context ~self; + + (* If HA is enabled, prevent a PBD whose SR contains a statefile being unplugged *) + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool then begin + let statefiles = Db.Pool.get_ha_statefiles ~__context ~self:pool in + let statefile_srs = List.map (fun self -> Db.VDI.get_SR ~__context ~self:(Ref.of_string self)) statefiles in + if List.mem sr statefile_srs + then raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])) + end; + + let vdis = get_active_vdis_by_pbd ~__context ~self in + let non_metadata_vdis = List.filter (fun vdi -> Db.VDI.get_type ~__context ~self:vdi <> `metadata) vdis in + if List.length non_metadata_vdis > 0 + then raise (Api_errors.Server_error(Api_errors.vdi_in_use,List.map Ref.string_of non_metadata_vdis)); + + if Helpers.i_am_srmaster ~__context ~sr then begin + let (metadata_vdis_of_this_pool, metadata_vdis_of_foreign_pool) = + partition_metadata_vdis_by_pool ~__context ~sr + in + (* Remove all foreign metadata VDIs from the cache so that the metadata_latest of remaining VDIs can be updated. *) + Xapi_dr.remove_vdis_from_cache ~__context ~vdis:metadata_vdis_of_foreign_pool; + (* Set all the removed metadata VDIs of foreign pools to have metadata_latest = false. *) + (* This enables the metadata_latest flag to indicate whether we can recover VMs from a VDI. *) + List.iter + (fun vdi -> Db.VDI.set_metadata_latest ~__context ~self:vdi ~value:false) + metadata_vdis_of_foreign_pool; + (* Disable metadata replication to VDIs in the SR. *) + List.iter + (fun vdi -> + debug "Automatically disabling database replication to VDI %s" (Ref.string_of vdi); + Xapi_vdi_helpers.disable_database_replication ~__context ~vdi) + metadata_vdis_of_this_pool + end; + let dbg = Ref.string_of (Context.get_task_id __context) in + let uuid = Db.SR.get_uuid ~__context ~self:sr in + Storage_access.transform_storage_exn + (fun () -> C.SR.detach dbg uuid); + + Storage_access.unbind ~__context ~pbd:self; + Db.PBD.set_currently_attached ~__context ~self ~value:false; + + Xapi_sr_operations.stop_health_check_thread ~__context ~self:sr; + + Xapi_sr_operations.update_allowed_operations ~__context ~self:sr; + end let destroy ~__context ~self = - if Db.PBD.get_currently_attached ~__context ~self - then raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["PBD is currently attached"])); - let device_cfg = Db.PBD.get_device_config ~__context ~self in - Db.PBD.destroy ~__context ~self; - Xapi_secret.clean_out_passwds ~__context device_cfg + if Db.PBD.get_currently_attached ~__context ~self + then raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["PBD is currently attached"])); + let device_cfg = Db.PBD.get_device_config ~__context ~self in + Db.PBD.destroy ~__context ~self; + Xapi_secret.clean_out_passwds ~__context device_cfg -let set_device_config ~__context ~self ~value = +let set_device_config ~__context ~self ~value = (* Only allowed from the SM plugin *) assert_no_srmaster_key value; Db.PBD.set_device_config ~__context ~self ~value diff --git a/ocaml/xapi/xapi_pci.ml b/ocaml/xapi/xapi_pci.ml index a506d597d99..886af836c05 100644 --- a/ocaml/xapi/xapi_pci.ml +++ b/ocaml/xapi/xapi_pci.ml @@ -19,179 +19,179 @@ open Listext open Xstringext type base_class = - | Storage_controller - | Network_controller - | Display_controller + | Storage_controller + | Network_controller + | Display_controller let is_class_of_kind kind id = - let base_class_id_of_kind = function - | Storage_controller -> 0x0100 - | Network_controller -> 0x0200 - | Display_controller -> 0x0300 - in - (* The base_class is the most-significant byte of the class ID *) - id land 0xff00 = base_class_id_of_kind kind + let base_class_id_of_kind = function + | Storage_controller -> 0x0100 + | Network_controller -> 0x0200 + | Display_controller -> 0x0300 + in + (* The base_class is the most-significant byte of the class ID *) + id land 0xff00 = base_class_id_of_kind kind let managed_classes = [ - Storage_controller; - Network_controller; - Display_controller; + Storage_controller; + Network_controller; + Display_controller; ] let string_of_pci ~__context ~self = - let pci = Db.PCI.get_record_internal ~__context ~self in - String.concat "/" [pci.Db_actions.pCI_vendor_id; pci.Db_actions.pCI_device_id] + let pci = Db.PCI.get_record_internal ~__context ~self in + String.concat "/" [pci.Db_actions.pCI_vendor_id; pci.Db_actions.pCI_device_id] (* We use ints within code but schema uses hex strings _without_ leading '0x' *) let int_of_id string_id = - let int_of_hex_str = fun s -> Scanf.sscanf s "%x" (fun x -> x) in - int_of_hex_str string_id + let int_of_hex_str = fun s -> Scanf.sscanf s "%x" (fun x -> x) in + int_of_hex_str string_id let id_of_int hex_id = - Printf.sprintf "%04x" hex_id + Printf.sprintf "%04x" hex_id let create ~__context ~class_id ~class_name ~vendor_id ~vendor_name ~device_id - ~device_name ~host ~pci_id ~functions ~dependencies ~other_config - ~subsystem_vendor_id ~subsystem_vendor_name - ~subsystem_device_id ~subsystem_device_name = - let p = Ref.make () in - let uuid = Uuid.to_string (Uuid.make_uuid ()) in - Db.PCI.create ~__context ~ref:p ~uuid ~class_id ~class_name ~vendor_id ~vendor_name ~device_id - ~device_name ~host ~pci_id ~functions ~dependencies:[] ~other_config:[] - ~subsystem_vendor_id ~subsystem_vendor_name - ~subsystem_device_id ~subsystem_device_name; - debug "PCI %s, %s, %s created" pci_id vendor_name device_name; - p + ~device_name ~host ~pci_id ~functions ~dependencies ~other_config + ~subsystem_vendor_id ~subsystem_vendor_name + ~subsystem_device_id ~subsystem_device_name = + let p = Ref.make () in + let uuid = Uuid.to_string (Uuid.make_uuid ()) in + Db.PCI.create ~__context ~ref:p ~uuid ~class_id ~class_name ~vendor_id ~vendor_name ~device_id + ~device_name ~host ~pci_id ~functions ~dependencies:[] ~other_config:[] + ~subsystem_vendor_id ~subsystem_vendor_name + ~subsystem_device_id ~subsystem_device_name; + debug "PCI %s, %s, %s created" pci_id vendor_name device_name; + p let update_pcis ~__context ~host = - let existing = List.filter_map - (fun pref -> - let prec = Db.PCI.get_record_internal ~__context ~self:pref in - if prec.Db_actions.pCI_host = host then - Some (pref, prec) - else - None) - (Db.PCI.get_all ~__context) - in + let existing = List.filter_map + (fun pref -> + let prec = Db.PCI.get_record_internal ~__context ~self:pref in + if prec.Db_actions.pCI_host = host then + Some (pref, prec) + else + None) + (Db.PCI.get_all ~__context) + in - let open Xapi_pci_helpers in - let strings_of_pci_property = function - | None -> "", "" - | Some property -> id_of_int property.id, property.name - in - let rec update_or_create cur = function - | [] -> cur - | pci :: remaining_pcis -> - let obj = - try - let (subsystem_vendor_id, subsystem_vendor_name) = - strings_of_pci_property pci.subsystem_vendor in - let (subsystem_device_id, subsystem_device_name) = - strings_of_pci_property pci.subsystem_device in - let (rf, rc) = List.find (fun (rf, rc) -> - rc.Db_actions.pCI_pci_id = pci.address && - rc.Db_actions.pCI_vendor_id = id_of_int pci.vendor.id && - rc.Db_actions.pCI_device_id = id_of_int pci.device.id && - rc.Db_actions.pCI_subsystem_vendor_id = subsystem_vendor_id && - rc.Db_actions.pCI_subsystem_device_id = subsystem_device_id) - existing in - (* sync the vendor name. *) - if rc.Db_actions.pCI_vendor_name <> pci.vendor.name - then Db.PCI.set_vendor_name ~__context ~self:rf ~value:pci.vendor.name; - (* sync the device name. *) - if rc.Db_actions.pCI_device_name <> pci.device.name - then Db.PCI.set_device_name ~__context ~self:rf ~value:pci.device.name; - (* sync the subsystem vendor name. *) - if rc.Db_actions.pCI_subsystem_vendor_name <> subsystem_vendor_name - then Db.PCI.set_subsystem_vendor_name ~__context ~self:rf ~value:subsystem_vendor_name; - (* sync the subsystem device name. *) - if rc.Db_actions.pCI_subsystem_device_name <> subsystem_device_name - then Db.PCI.set_subsystem_device_name ~__context ~self:rf ~value:subsystem_device_name; - (* sync the class information. *) - if rc.Db_actions.pCI_class_id <> id_of_int pci.pci_class.id - then Db.PCI.set_class_id ~__context ~self:rf ~value:(id_of_int pci.pci_class.id); - if rc.Db_actions.pCI_class_name <> pci.pci_class.name - then Db.PCI.set_class_name ~__context ~self:rf ~value:pci.pci_class.name; - (* sync the attached VMs. *) - let attached_VMs = List.filter (Db.is_valid_ref __context) rc.Db_actions.pCI_attached_VMs in - if attached_VMs <> rc.Db_actions.pCI_attached_VMs then - Db.PCI.set_attached_VMs ~__context ~self:rf ~value:attached_VMs; - rf, rc - with Not_found -> - let subsystem_vendor_id, subsystem_vendor_name = - strings_of_pci_property pci.subsystem_vendor in - let subsystem_device_id, subsystem_device_name = - strings_of_pci_property pci.subsystem_device in - let self = create ~__context - ~class_id:(id_of_int pci.pci_class.id) - ~class_name:pci.pci_class.name - ~vendor_id:(id_of_int pci.vendor.id) - ~vendor_name:pci.vendor.name - ~device_id:(id_of_int pci.device.id) - ~device_name:pci.device.name ~host ~pci_id:pci.address - ~functions:1L ~dependencies:[] ~other_config:[] - ~subsystem_vendor_id ~subsystem_vendor_name - ~subsystem_device_id ~subsystem_device_name in - self, Db.PCI.get_record_internal ~__context ~self - in - update_or_create ((obj, pci) :: cur) remaining_pcis - in - let host_pcis = Xapi_pci_helpers.get_host_pcis () in - let class_pcis = - List.filter (fun pci -> - List.exists (fun k -> is_class_of_kind k pci.pci_class.id) managed_classes - ) host_pcis in - let deps = List.flatten (List.map (fun pci -> pci.related) class_pcis) in - let deps = List.map (fun dep -> List.find (fun pci -> pci.address = dep) host_pcis) deps in - let managed_pcis = List.setify (class_pcis @ deps) in - let current = update_or_create [] managed_pcis in + let open Xapi_pci_helpers in + let strings_of_pci_property = function + | None -> "", "" + | Some property -> id_of_int property.id, property.name + in + let rec update_or_create cur = function + | [] -> cur + | pci :: remaining_pcis -> + let obj = + try + let (subsystem_vendor_id, subsystem_vendor_name) = + strings_of_pci_property pci.subsystem_vendor in + let (subsystem_device_id, subsystem_device_name) = + strings_of_pci_property pci.subsystem_device in + let (rf, rc) = List.find (fun (rf, rc) -> + rc.Db_actions.pCI_pci_id = pci.address && + rc.Db_actions.pCI_vendor_id = id_of_int pci.vendor.id && + rc.Db_actions.pCI_device_id = id_of_int pci.device.id && + rc.Db_actions.pCI_subsystem_vendor_id = subsystem_vendor_id && + rc.Db_actions.pCI_subsystem_device_id = subsystem_device_id) + existing in + (* sync the vendor name. *) + if rc.Db_actions.pCI_vendor_name <> pci.vendor.name + then Db.PCI.set_vendor_name ~__context ~self:rf ~value:pci.vendor.name; + (* sync the device name. *) + if rc.Db_actions.pCI_device_name <> pci.device.name + then Db.PCI.set_device_name ~__context ~self:rf ~value:pci.device.name; + (* sync the subsystem vendor name. *) + if rc.Db_actions.pCI_subsystem_vendor_name <> subsystem_vendor_name + then Db.PCI.set_subsystem_vendor_name ~__context ~self:rf ~value:subsystem_vendor_name; + (* sync the subsystem device name. *) + if rc.Db_actions.pCI_subsystem_device_name <> subsystem_device_name + then Db.PCI.set_subsystem_device_name ~__context ~self:rf ~value:subsystem_device_name; + (* sync the class information. *) + if rc.Db_actions.pCI_class_id <> id_of_int pci.pci_class.id + then Db.PCI.set_class_id ~__context ~self:rf ~value:(id_of_int pci.pci_class.id); + if rc.Db_actions.pCI_class_name <> pci.pci_class.name + then Db.PCI.set_class_name ~__context ~self:rf ~value:pci.pci_class.name; + (* sync the attached VMs. *) + let attached_VMs = List.filter (Db.is_valid_ref __context) rc.Db_actions.pCI_attached_VMs in + if attached_VMs <> rc.Db_actions.pCI_attached_VMs then + Db.PCI.set_attached_VMs ~__context ~self:rf ~value:attached_VMs; + rf, rc + with Not_found -> + let subsystem_vendor_id, subsystem_vendor_name = + strings_of_pci_property pci.subsystem_vendor in + let subsystem_device_id, subsystem_device_name = + strings_of_pci_property pci.subsystem_device in + let self = create ~__context + ~class_id:(id_of_int pci.pci_class.id) + ~class_name:pci.pci_class.name + ~vendor_id:(id_of_int pci.vendor.id) + ~vendor_name:pci.vendor.name + ~device_id:(id_of_int pci.device.id) + ~device_name:pci.device.name ~host ~pci_id:pci.address + ~functions:1L ~dependencies:[] ~other_config:[] + ~subsystem_vendor_id ~subsystem_vendor_name + ~subsystem_device_id ~subsystem_device_name in + self, Db.PCI.get_record_internal ~__context ~self + in + update_or_create ((obj, pci) :: cur) remaining_pcis + in + let host_pcis = Xapi_pci_helpers.get_host_pcis () in + let class_pcis = + List.filter (fun pci -> + List.exists (fun k -> is_class_of_kind k pci.pci_class.id) managed_classes + ) host_pcis in + let deps = List.flatten (List.map (fun pci -> pci.related) class_pcis) in + let deps = List.map (fun dep -> List.find (fun pci -> pci.address = dep) host_pcis) deps in + let managed_pcis = List.setify (class_pcis @ deps) in + let current = update_or_create [] managed_pcis in - let update_dependencies current = - let rec update = function - | [] -> () - | ((pref, prec), pci) :: remaining -> - let dependencies = List.map - (fun address -> - let (r, _), _ = List.find (fun ((_, rc), _) -> rc.Db_actions.pCI_pci_id = address) current - in r) - pci.related - in - Db.PCI.set_dependencies ~__context ~self:pref ~value:dependencies; - update remaining - in - update current - in - update_dependencies current; + let update_dependencies current = + let rec update = function + | [] -> () + | ((pref, prec), pci) :: remaining -> + let dependencies = List.map + (fun address -> + let (r, _), _ = List.find (fun ((_, rc), _) -> rc.Db_actions.pCI_pci_id = address) current + in r) + pci.related + in + Db.PCI.set_dependencies ~__context ~self:pref ~value:dependencies; + update remaining + in + update current + in + update_dependencies current; - let current = List.map (fun ((pref, prec), _) -> pref, prec) current in - let obsolete = List.set_difference existing current in - List.iter (fun (self, _) -> Db.PCI.destroy ~__context ~self) obsolete + let current = List.map (fun ((pref, prec), _) -> pref, prec) current in + let obsolete = List.set_difference existing current in + List.iter (fun (self, _) -> Db.PCI.destroy ~__context ~self) obsolete let with_vga_arbiter ~readonly f = - Unixext.with_file - "/dev/vga_arbiter" - (if readonly then [Unix.O_RDONLY] else [Unix.O_RDWR]) - 0o000 - f + Unixext.with_file + "/dev/vga_arbiter" + (if readonly then [Unix.O_RDONLY] else [Unix.O_RDWR]) + 0o000 + f let disable_system_display_device () = - with_vga_arbiter ~readonly:false - (fun fd -> Unixext.really_write_string fd "decodes none") + with_vga_arbiter ~readonly:false + (fun fd -> Unixext.really_write_string fd "decodes none") let get_system_display_device () = - try - let line = - with_vga_arbiter ~readonly:true (fun fd -> - let data = Unixext.try_read_string ~limit:1024 fd in - List.hd (String.split ~limit:2 '\n' data) - ) - in - (* Example contents of line: - * count:7,PCI:0000:10:00.0,decodes=io+mem,owns=io+mem,locks=none(0:0) *) - let items = String.split ',' line in - List.fold_left - (fun acc item -> - if String.startswith "PCI" item - then Some (Scanf.sscanf item "PCI:%s" (fun id -> id)) - else acc) - None items - with _ -> None + try + let line = + with_vga_arbiter ~readonly:true (fun fd -> + let data = Unixext.try_read_string ~limit:1024 fd in + List.hd (String.split ~limit:2 '\n' data) + ) + in + (* Example contents of line: + * count:7,PCI:0000:10:00.0,decodes=io+mem,owns=io+mem,locks=none(0:0) *) + let items = String.split ',' line in + List.fold_left + (fun acc item -> + if String.startswith "PCI" item + then Some (Scanf.sscanf item "PCI:%s" (fun id -> id)) + else acc) + None items + with _ -> None diff --git a/ocaml/xapi/xapi_pci.mli b/ocaml/xapi/xapi_pci.mli index cc4ad5031d8..b9ed1402fdd 100644 --- a/ocaml/xapi/xapi_pci.mli +++ b/ocaml/xapi/xapi_pci.mli @@ -12,13 +12,13 @@ * GNU Lesser General Public License for more details. *) (** Module that defines API functions for PCI objects - *) +*) (** Types of PCI devices. *) type base_class = - | Storage_controller - | Network_controller - | Display_controller + | Storage_controller + | Network_controller + | Display_controller (* Check if an class ID is of a given base class *) val is_class_of_kind : base_class -> int -> bool diff --git a/ocaml/xapi/xapi_pci_helpers.ml b/ocaml/xapi/xapi_pci_helpers.ml index 19b39bd2ee2..26572c5a851 100644 --- a/ocaml/xapi/xapi_pci_helpers.ml +++ b/ocaml/xapi/xapi_pci_helpers.ml @@ -13,50 +13,50 @@ *) type pci_property = { - id: int; - name: string; + id: int; + name: string; } type pci = { - address: string; - vendor: pci_property; - device: pci_property; - pci_class: pci_property; - subsystem_vendor: pci_property option; - subsystem_device: pci_property option; - related: string list; + address: string; + vendor: pci_property; + device: pci_property; + pci_class: pci_property; + subsystem_vendor: pci_property option; + subsystem_device: pci_property option; + related: string list; } let get_host_pcis () = - let open Pci in - with_access (fun access -> - let devs = get_devices access in - List.map (fun d -> - let open Pci_dev in - let address_of_dev x = Printf.sprintf "%04x:%02x:%02x.%d" x.domain x.bus x.dev x.func in - let vendor = { id = d.vendor_id; name = lookup_vendor_name access d.vendor_id } in - let device = { id = d.device_id; name = lookup_device_name access d.vendor_id d.device_id } in - let (subsystem_vendor, subsystem_device) = match d.subsystem_id with - | None -> None, None - | Some (sv_id, sd_id) -> - let sv_name = lookup_subsystem_vendor_name access sv_id in - let sd_name = lookup_subsystem_device_name access d.vendor_id d.device_id sv_id sd_id in - Some { id = sv_id; name = sv_name }, Some { id = sd_id; name = sd_name } - in - let pci_class = { id = d.device_class; name = lookup_class_name access d.device_class } in - let related_devs = - List.filter (fun d' -> - let slot x = (x.domain, x.bus, x.dev) in - slot d' = slot d && d' <> d - ) devs in - { address = address_of_dev d; - vendor; device; subsystem_vendor; subsystem_device; pci_class; - related = List.map address_of_dev related_devs; - } - ) devs - ) + let open Pci in + with_access (fun access -> + let devs = get_devices access in + List.map (fun d -> + let open Pci_dev in + let address_of_dev x = Printf.sprintf "%04x:%02x:%02x.%d" x.domain x.bus x.dev x.func in + let vendor = { id = d.vendor_id; name = lookup_vendor_name access d.vendor_id } in + let device = { id = d.device_id; name = lookup_device_name access d.vendor_id d.device_id } in + let (subsystem_vendor, subsystem_device) = match d.subsystem_id with + | None -> None, None + | Some (sv_id, sd_id) -> + let sv_name = lookup_subsystem_vendor_name access sv_id in + let sd_name = lookup_subsystem_device_name access d.vendor_id d.device_id sv_id sd_id in + Some { id = sv_id; name = sv_name }, Some { id = sd_id; name = sd_name } + in + let pci_class = { id = d.device_class; name = lookup_class_name access d.device_class } in + let related_devs = + List.filter (fun d' -> + let slot x = (x.domain, x.bus, x.dev) in + slot d' = slot d && d' <> d + ) devs in + { address = address_of_dev d; + vendor; device; subsystem_vendor; subsystem_device; pci_class; + related = List.map address_of_dev related_devs; + } + ) devs + ) let igd_is_whitelisted ~__context pci = - let vendor_id = Db.PCI.get_vendor_id ~__context ~self:pci in - List.mem vendor_id !Xapi_globs.igd_passthru_vendor_whitelist + let vendor_id = Db.PCI.get_vendor_id ~__context ~self:pci in + List.mem vendor_id !Xapi_globs.igd_passthru_vendor_whitelist diff --git a/ocaml/xapi/xapi_periodic_scheduler.ml b/ocaml/xapi/xapi_periodic_scheduler.ml index f85541988c4..9a351b3ff81 100644 --- a/ocaml/xapi/xapi_periodic_scheduler.ml +++ b/ocaml/xapi/xapi_periodic_scheduler.ml @@ -15,7 +15,7 @@ module D = Debug.Make(struct let name="backgroundscheduler" end) open D -open Stdext.Threadext +open Stdext.Threadext type func_ty = OneShot | Periodic of float @@ -32,50 +32,50 @@ let lock = Mutex.create () let add_to_queue ?(signal=true) name ty start newfunc = Mutex.execute lock (fun () -> - Ipq.add queue { Ipq.ev={ func=newfunc; ty=ty; name=name}; Ipq.time=((Unix.gettimeofday ()) +. start) }); + Ipq.add queue { Ipq.ev={ func=newfunc; ty=ty; name=name}; Ipq.time=((Unix.gettimeofday ()) +. start) }); if signal then Delay.signal delay let remove_from_queue name = - let index = Ipq.find_p queue (fun {name=n} -> name = n) in - if index > -1 then begin - Ipq.remove queue index - end + let index = Ipq.find_p queue (fun {name=n} -> name = n) in + if index > -1 then begin + Ipq.remove queue index + end let loop () = - debug "Periodic scheduler started"; - try - while true do - let empty = Mutex.execute lock (fun () -> Ipq.is_empty queue) in - if empty - then - (Thread.delay 10.0) (* Doesn't happen often - the queue isn't usually empty *) - else - begin - let next = Mutex.execute lock (fun () -> Ipq.maximum queue) in - let now = Unix.gettimeofday () in - if next.Ipq.time < now then begin - let todo = (Mutex.execute lock (fun () -> Ipq.pop_maximum queue)).Ipq.ev in - (try todo.func () with _ -> ()); - match todo.ty with - | OneShot -> () - | Periodic timer -> add_to_queue ~signal:false todo.name todo.ty timer todo.func - end else begin - (* Sleep until next event. *) - let sleep = next.Ipq.time -. now +. 0.001 in - try - ignore(Delay.wait delay sleep) - with e -> - let detailed_msg = - match e with - | Unix.Unix_error (code, _, _) -> Unix.error_message code - | _ -> "unknown error" - in - error "Could not schedule interruptable delay (%s). Falling back to normal delay. New events may be missed." detailed_msg; - Thread.delay sleep - end - end - done - with _ -> - error "Periodic scheduler died! Xapi will no longer function well and should be restarted." + debug "Periodic scheduler started"; + try + while true do + let empty = Mutex.execute lock (fun () -> Ipq.is_empty queue) in + if empty + then + (Thread.delay 10.0) (* Doesn't happen often - the queue isn't usually empty *) + else + begin + let next = Mutex.execute lock (fun () -> Ipq.maximum queue) in + let now = Unix.gettimeofday () in + if next.Ipq.time < now then begin + let todo = (Mutex.execute lock (fun () -> Ipq.pop_maximum queue)).Ipq.ev in + (try todo.func () with _ -> ()); + match todo.ty with + | OneShot -> () + | Periodic timer -> add_to_queue ~signal:false todo.name todo.ty timer todo.func + end else begin + (* Sleep until next event. *) + let sleep = next.Ipq.time -. now +. 0.001 in + try + ignore(Delay.wait delay sleep) + with e -> + let detailed_msg = + match e with + | Unix.Unix_error (code, _, _) -> Unix.error_message code + | _ -> "unknown error" + in + error "Could not schedule interruptable delay (%s). Falling back to normal delay. New events may be missed." detailed_msg; + Thread.delay sleep + end + end + done + with _ -> + error "Periodic scheduler died! Xapi will no longer function well and should be restarted." diff --git a/ocaml/xapi/xapi_periodic_scheduler.mli b/ocaml/xapi/xapi_periodic_scheduler.mli index b07a13b05c1..d4cb270d4eb 100644 --- a/ocaml/xapi/xapi_periodic_scheduler.mli +++ b/ocaml/xapi/xapi_periodic_scheduler.mli @@ -15,8 +15,8 @@ (** Timer type. *) type func_ty = -| OneShot (** Fire just once *) -| Periodic of float (** Fire periodically with a given period in seconds *) + | OneShot (** Fire just once *) + | Periodic of float (** Fire periodically with a given period in seconds *) (** Start a new timer. *) val add_to_queue : diff --git a/ocaml/xapi/xapi_periodic_scheduler_init.ml b/ocaml/xapi/xapi_periodic_scheduler_init.ml index d201a552926..2efc25da8b4 100644 --- a/ocaml/xapi/xapi_periodic_scheduler_init.ml +++ b/ocaml/xapi/xapi_periodic_scheduler_init.ml @@ -22,12 +22,12 @@ let register () = let master = Pool_role.is_master () in (* blob/message/rrd file syncing - sync once a day *) - let sync_timer = - if Xapi_fist.reduce_blob_sync_interval then 60.0 *. 5.0 else !Xapi_globs.pool_data_sync_interval in + let sync_timer = + if Xapi_fist.reduce_blob_sync_interval then 60.0 *. 5.0 else !Xapi_globs.pool_data_sync_interval in let sync_func () = Xapi_sync.do_sync () in let sync_delay = - (* 10 mins if fist point there - to ensure rrd sync happens first *) + (* 10 mins if fist point there - to ensure rrd sync happens first *) if Xapi_fist.reduce_blob_sync_interval then 60.0 *. 10.0 else 7200. in (* Heartbeat to show the queue is still running - will be more useful when there's less logging! *) @@ -35,18 +35,18 @@ let register () = let hb_func () = debug "Periodic scheduler heartbeat" in (* Periodic backup of RRDs *) - let rrdbackup_timer = + let rrdbackup_timer = if Xapi_fist.reduce_rrd_backup_interval then 60.0 *. 5.0 else !Xapi_globs.rrd_backup_interval in - let rrdbackup_func () = - Server_helpers.exec_with_new_task "rrdbackup_func" - (fun __context -> - let hosts = Db.Host.get_all ~__context in - Helpers.call_api_functions ~__context - (fun rpc session_id -> - ignore(List.fold_left (fun delay host -> Client.Client.Host.backup_rrds rpc session_id host delay; (delay +. 60.0)) 0.0 hosts)) - ) - in - let rrdbackup_delay = + let rrdbackup_func () = + Server_helpers.exec_with_new_task "rrdbackup_func" + (fun __context -> + let hosts = Db.Host.get_all ~__context in + Helpers.call_api_functions ~__context + (fun rpc session_id -> + ignore(List.fold_left (fun delay host -> Client.Client.Host.backup_rrds rpc session_id host delay; (delay +. 60.0)) 0.0 hosts)) + ) + in + let rrdbackup_delay = if Xapi_fist.reduce_rrd_backup_interval then 60.0 *. 6.0 else 3600.0 in let session_revalidation_func () = @@ -61,10 +61,10 @@ let register () = if master then Xapi_periodic_scheduler.add_to_queue "Synchronising RRDs/messages" (Xapi_periodic_scheduler.Periodic sync_timer) sync_delay sync_func; if master then Xapi_periodic_scheduler.add_to_queue "Backing up RRDs" (Xapi_periodic_scheduler.Periodic rrdbackup_timer) rrdbackup_delay rrdbackup_func; - if master then Xapi_periodic_scheduler.add_to_queue "Revalidating externally-authenticated sessions" - (Xapi_periodic_scheduler.Periodic !Xapi_globs.session_revalidation_interval) session_revalidation_delay session_revalidation_func; - if master then Xapi_periodic_scheduler.add_to_queue "Trying to update subjects' info using external directory service (if any)" - (Xapi_periodic_scheduler.Periodic !Xapi_globs.update_all_subjects_interval) update_all_subjects_delay update_all_subjects_func; + if master then Xapi_periodic_scheduler.add_to_queue "Revalidating externally-authenticated sessions" + (Xapi_periodic_scheduler.Periodic !Xapi_globs.session_revalidation_interval) session_revalidation_delay session_revalidation_func; + if master then Xapi_periodic_scheduler.add_to_queue "Trying to update subjects' info using external directory service (if any)" + (Xapi_periodic_scheduler.Periodic !Xapi_globs.update_all_subjects_interval) update_all_subjects_delay update_all_subjects_func; Xapi_periodic_scheduler.add_to_queue "Periodic scheduler heartbeat" (Xapi_periodic_scheduler.Periodic hb_timer) 240.0 hb_func; Xapi_periodic_scheduler.add_to_queue "Update monitor configuration" (Xapi_periodic_scheduler.Periodic 3600.0) 3600.0 Monitor_master.update_configuration_from_master diff --git a/ocaml/xapi/xapi_pgpu.ml b/ocaml/xapi/xapi_pgpu.ml index 03b79250d5f..4e1445d9483 100644 --- a/ocaml/xapi/xapi_pgpu.ml +++ b/ocaml/xapi/xapi_pgpu.ml @@ -19,291 +19,291 @@ open Listext open Threadext let calculate_max_capacities ~__context ~pCI ~size ~supported_VGPU_types = - List.map - (fun vgpu_type -> - let max_capacity = - if Xapi_vgpu_type.requires_passthrough ~__context ~self:vgpu_type - then Db.PCI.get_functions ~__context ~self:pCI - else Int64.div size (Db.VGPU_type.get_size ~__context ~self:vgpu_type) - in - vgpu_type, max_capacity) - supported_VGPU_types + List.map + (fun vgpu_type -> + let max_capacity = + if Xapi_vgpu_type.requires_passthrough ~__context ~self:vgpu_type + then Db.PCI.get_functions ~__context ~self:pCI + else Int64.div size (Db.VGPU_type.get_size ~__context ~self:vgpu_type) + in + vgpu_type, max_capacity) + supported_VGPU_types let create ~__context ~pCI ~gPU_group ~host ~other_config - ~supported_VGPU_types ~size ~dom0_access - ~is_system_display_device = - let pgpu = Ref.make () in - let uuid = Uuidm.to_string (Uuidm.create `V4) in - let supported_VGPU_max_capacities = - calculate_max_capacities ~__context ~pCI ~size ~supported_VGPU_types - in - Db.PGPU.create ~__context ~ref:pgpu ~uuid ~pCI - ~gPU_group ~host ~other_config ~size - ~supported_VGPU_max_capacities ~dom0_access - ~is_system_display_device; - Db.PGPU.set_supported_VGPU_types ~__context - ~self:pgpu ~value:supported_VGPU_types; - Db.PGPU.set_enabled_VGPU_types ~__context - ~self:pgpu ~value:supported_VGPU_types; - debug "PGPU ref='%s' created (host = '%s')" (Ref.string_of pgpu) (Ref.string_of host); - pgpu + ~supported_VGPU_types ~size ~dom0_access + ~is_system_display_device = + let pgpu = Ref.make () in + let uuid = Uuidm.to_string (Uuidm.create `V4) in + let supported_VGPU_max_capacities = + calculate_max_capacities ~__context ~pCI ~size ~supported_VGPU_types + in + Db.PGPU.create ~__context ~ref:pgpu ~uuid ~pCI + ~gPU_group ~host ~other_config ~size + ~supported_VGPU_max_capacities ~dom0_access + ~is_system_display_device; + Db.PGPU.set_supported_VGPU_types ~__context + ~self:pgpu ~value:supported_VGPU_types; + Db.PGPU.set_enabled_VGPU_types ~__context + ~self:pgpu ~value:supported_VGPU_types; + debug "PGPU ref='%s' created (host = '%s')" (Ref.string_of pgpu) (Ref.string_of host); + pgpu let sync_pci_hidden ~__context ~pgpu ~pci = - (* Determine whether dom0 can access the GPU. On boot, we determine - * this from the boot config and put the result in the database. - * Otherwise, we determine this from the database. *) - if !Xapi_globs.on_system_boot - then begin - let is_pci_hidden = Pciops.is_pci_hidden ~__context pci in - let dom0_access = - if is_pci_hidden - then `disabled - else `enabled - in - Db.PGPU.set_dom0_access ~__context ~self:pgpu ~value:dom0_access; - is_pci_hidden - end else begin - match Db.PGPU.get_dom0_access ~__context ~self:pgpu with - | `disabled | `enable_on_reboot -> true - | `enabled | `disable_on_reboot -> false - end + (* Determine whether dom0 can access the GPU. On boot, we determine + * this from the boot config and put the result in the database. + * Otherwise, we determine this from the database. *) + if !Xapi_globs.on_system_boot + then begin + let is_pci_hidden = Pciops.is_pci_hidden ~__context pci in + let dom0_access = + if is_pci_hidden + then `disabled + else `enabled + in + Db.PGPU.set_dom0_access ~__context ~self:pgpu ~value:dom0_access; + is_pci_hidden + end else begin + match Db.PGPU.get_dom0_access ~__context ~self:pgpu with + | `disabled | `enable_on_reboot -> true + | `enabled | `disable_on_reboot -> false + end let update_gpus ~__context ~host = - let system_display_device = Xapi_pci.get_system_display_device () in - let existing_pgpus = List.filter (fun (rf, rc) -> rc.API.pGPU_host = host) (Db.PGPU.get_all_records ~__context) in - let pcis = - List.filter (fun self -> - let class_id = Db.PCI.get_class_id ~__context ~self in - Db.PCI.get_host ~__context ~self = host - && Xapi_pci.(is_class_of_kind Display_controller (int_of_id class_id)) - ) (Db.PCI.get_all ~__context) in - let is_host_display_enabled = - match Db.Host.get_display ~__context ~self:host with - | `enabled | `disable_on_reboot -> true - | `disabled | `enable_on_reboot -> false - in - let rec find_or_create cur = function - | [] -> cur - | pci :: remaining_pcis -> - let pci_addr = Some (Db.PCI.get_pci_id ~__context ~self:pci) in - let is_system_display_device = (system_display_device = pci_addr) in - let pgpu = - try - let (rf, rc) = List.find (fun (_, rc) -> rc.API.pGPU_PCI = pci) existing_pgpus in - let is_pci_hidden = sync_pci_hidden ~__context ~pgpu:rf ~pci in - (* Now we've determined whether the PCI is hidden, we can work out the - * list of supported VGPU types. *) - let supported_VGPU_types = - Xapi_vgpu_type.find_or_create_supported_types ~__context ~pci - ~is_system_display_device - ~is_host_display_enabled - ~is_pci_hidden - in - let old_supported_VGPU_types = - Db.PGPU.get_supported_VGPU_types ~__context ~self:rf in - let old_enabled_VGPU_types = - Db.PGPU.get_enabled_VGPU_types ~__context ~self:rf in - (* Pick up any new supported vGPU configs on the host *) - Db.PGPU.set_supported_VGPU_types ~__context ~self:rf ~value:supported_VGPU_types; - (* Calculate the maximum capacities of the supported types. *) - let max_capacities = - calculate_max_capacities - ~__context - ~pCI:pci - ~size:(Db.PGPU.get_size ~__context ~self:rf) - ~supported_VGPU_types - in - Db.PGPU.set_supported_VGPU_max_capacities ~__context - ~self:rf ~value:max_capacities; - (* Enable any new supported types. *) - let new_types_to_enable = - List.filter - (fun t -> not (List.mem t old_supported_VGPU_types)) - supported_VGPU_types - in - (* Disable any types which are no longer supported. *) - let pruned_enabled_types = - List.filter - (fun t -> List.mem t supported_VGPU_types) - old_enabled_VGPU_types - in - Db.PGPU.set_enabled_VGPU_types ~__context - ~self:rf - ~value:(pruned_enabled_types @ new_types_to_enable); - Db.PGPU.set_is_system_display_device ~__context - ~self:rf - ~value:is_system_display_device; - (rf, rc) - with Not_found -> - (* If a new PCI has appeared then we know this is a system boot. - * We determine whether dom0 can access the device by looking in the - * boot config. *) - let is_pci_hidden = Pciops.is_pci_hidden ~__context pci in - let supported_VGPU_types = - Xapi_vgpu_type.find_or_create_supported_types ~__context ~pci - ~is_system_display_device - ~is_host_display_enabled - ~is_pci_hidden - in - let dom0_access = - if is_pci_hidden - then `disabled - else `enabled - in - let self = create ~__context ~pCI:pci - ~gPU_group:(Ref.null) ~host ~other_config:[] - ~supported_VGPU_types - ~size:Constants.pgpu_default_size ~dom0_access - ~is_system_display_device - in - let group = Xapi_gpu_group.find_or_create ~__context self in - Helpers.call_api_functions ~__context (fun rpc session_id -> - Client.Client.PGPU.set_GPU_group rpc session_id self group); - self, Db.PGPU.get_record ~__context ~self - in - find_or_create (pgpu :: cur) remaining_pcis - in - let current_pgpus = find_or_create [] pcis in - let obsolete_pgpus = List.set_difference existing_pgpus current_pgpus in - List.iter (fun (self, _) -> Db.PGPU.destroy ~__context ~self) obsolete_pgpus; - (* Update the supported/enabled VGPU types on any affected GPU groups. *) - let groups_to_update = List.setify - (List.map - (fun (_, pgpu_rec) -> pgpu_rec.API.pGPU_GPU_group) - (current_pgpus @ obsolete_pgpus)) - in - Helpers.call_api_functions ~__context - (fun rpc session_id -> - List.iter - (fun gpu_group -> - let open Client in - Client.GPU_group.update_enabled_VGPU_types - ~rpc ~session_id ~self:gpu_group; - Client.GPU_group.update_supported_VGPU_types - ~rpc ~session_id ~self:gpu_group) - groups_to_update) + let system_display_device = Xapi_pci.get_system_display_device () in + let existing_pgpus = List.filter (fun (rf, rc) -> rc.API.pGPU_host = host) (Db.PGPU.get_all_records ~__context) in + let pcis = + List.filter (fun self -> + let class_id = Db.PCI.get_class_id ~__context ~self in + Db.PCI.get_host ~__context ~self = host + && Xapi_pci.(is_class_of_kind Display_controller (int_of_id class_id)) + ) (Db.PCI.get_all ~__context) in + let is_host_display_enabled = + match Db.Host.get_display ~__context ~self:host with + | `enabled | `disable_on_reboot -> true + | `disabled | `enable_on_reboot -> false + in + let rec find_or_create cur = function + | [] -> cur + | pci :: remaining_pcis -> + let pci_addr = Some (Db.PCI.get_pci_id ~__context ~self:pci) in + let is_system_display_device = (system_display_device = pci_addr) in + let pgpu = + try + let (rf, rc) = List.find (fun (_, rc) -> rc.API.pGPU_PCI = pci) existing_pgpus in + let is_pci_hidden = sync_pci_hidden ~__context ~pgpu:rf ~pci in + (* Now we've determined whether the PCI is hidden, we can work out the + * list of supported VGPU types. *) + let supported_VGPU_types = + Xapi_vgpu_type.find_or_create_supported_types ~__context ~pci + ~is_system_display_device + ~is_host_display_enabled + ~is_pci_hidden + in + let old_supported_VGPU_types = + Db.PGPU.get_supported_VGPU_types ~__context ~self:rf in + let old_enabled_VGPU_types = + Db.PGPU.get_enabled_VGPU_types ~__context ~self:rf in + (* Pick up any new supported vGPU configs on the host *) + Db.PGPU.set_supported_VGPU_types ~__context ~self:rf ~value:supported_VGPU_types; + (* Calculate the maximum capacities of the supported types. *) + let max_capacities = + calculate_max_capacities + ~__context + ~pCI:pci + ~size:(Db.PGPU.get_size ~__context ~self:rf) + ~supported_VGPU_types + in + Db.PGPU.set_supported_VGPU_max_capacities ~__context + ~self:rf ~value:max_capacities; + (* Enable any new supported types. *) + let new_types_to_enable = + List.filter + (fun t -> not (List.mem t old_supported_VGPU_types)) + supported_VGPU_types + in + (* Disable any types which are no longer supported. *) + let pruned_enabled_types = + List.filter + (fun t -> List.mem t supported_VGPU_types) + old_enabled_VGPU_types + in + Db.PGPU.set_enabled_VGPU_types ~__context + ~self:rf + ~value:(pruned_enabled_types @ new_types_to_enable); + Db.PGPU.set_is_system_display_device ~__context + ~self:rf + ~value:is_system_display_device; + (rf, rc) + with Not_found -> + (* If a new PCI has appeared then we know this is a system boot. + * We determine whether dom0 can access the device by looking in the + * boot config. *) + let is_pci_hidden = Pciops.is_pci_hidden ~__context pci in + let supported_VGPU_types = + Xapi_vgpu_type.find_or_create_supported_types ~__context ~pci + ~is_system_display_device + ~is_host_display_enabled + ~is_pci_hidden + in + let dom0_access = + if is_pci_hidden + then `disabled + else `enabled + in + let self = create ~__context ~pCI:pci + ~gPU_group:(Ref.null) ~host ~other_config:[] + ~supported_VGPU_types + ~size:Constants.pgpu_default_size ~dom0_access + ~is_system_display_device + in + let group = Xapi_gpu_group.find_or_create ~__context self in + Helpers.call_api_functions ~__context (fun rpc session_id -> + Client.Client.PGPU.set_GPU_group rpc session_id self group); + self, Db.PGPU.get_record ~__context ~self + in + find_or_create (pgpu :: cur) remaining_pcis + in + let current_pgpus = find_or_create [] pcis in + let obsolete_pgpus = List.set_difference existing_pgpus current_pgpus in + List.iter (fun (self, _) -> Db.PGPU.destroy ~__context ~self) obsolete_pgpus; + (* Update the supported/enabled VGPU types on any affected GPU groups. *) + let groups_to_update = List.setify + (List.map + (fun (_, pgpu_rec) -> pgpu_rec.API.pGPU_GPU_group) + (current_pgpus @ obsolete_pgpus)) + in + Helpers.call_api_functions ~__context + (fun rpc session_id -> + List.iter + (fun gpu_group -> + let open Client in + Client.GPU_group.update_enabled_VGPU_types + ~rpc ~session_id ~self:gpu_group; + Client.GPU_group.update_supported_VGPU_types + ~rpc ~session_id ~self:gpu_group) + groups_to_update) let update_group_enabled_VGPU_types ~__context ~self = - let group = Db.PGPU.get_GPU_group ~__context ~self in - if Db.is_valid_ref __context group - then Xapi_gpu_group.update_enabled_VGPU_types ~__context ~self:group + let group = Db.PGPU.get_GPU_group ~__context ~self in + if Db.is_valid_ref __context group + then Xapi_gpu_group.update_enabled_VGPU_types ~__context ~self:group let pgpu_m = Mutex.create () let add_enabled_VGPU_types ~__context ~self ~value = - Mutex.execute pgpu_m (fun () -> - Xapi_pgpu_helpers.assert_VGPU_type_supported ~__context - ~self ~vgpu_type:value; - Db.PGPU.add_enabled_VGPU_types ~__context ~self ~value; - update_group_enabled_VGPU_types ~__context ~self - ) + Mutex.execute pgpu_m (fun () -> + Xapi_pgpu_helpers.assert_VGPU_type_supported ~__context + ~self ~vgpu_type:value; + Db.PGPU.add_enabled_VGPU_types ~__context ~self ~value; + update_group_enabled_VGPU_types ~__context ~self + ) let remove_enabled_VGPU_types ~__context ~self ~value = - Mutex.execute pgpu_m (fun () -> - Xapi_pgpu_helpers.assert_no_resident_VGPUs_of_type ~__context - ~self ~vgpu_type:value; - Db.PGPU.remove_enabled_VGPU_types ~__context ~self ~value; - update_group_enabled_VGPU_types ~__context ~self - ) + Mutex.execute pgpu_m (fun () -> + Xapi_pgpu_helpers.assert_no_resident_VGPUs_of_type ~__context + ~self ~vgpu_type:value; + Db.PGPU.remove_enabled_VGPU_types ~__context ~self ~value; + update_group_enabled_VGPU_types ~__context ~self + ) let set_enabled_VGPU_types ~__context ~self ~value = - Mutex.execute pgpu_m (fun () -> - let current_types = Db.PGPU.get_enabled_VGPU_types ~__context ~self in - let to_enable = List.set_difference value current_types - and to_disable = List.set_difference current_types value in - List.iter (fun vgpu_type -> - Xapi_pgpu_helpers.assert_VGPU_type_supported ~__context ~self ~vgpu_type) - to_enable; - List.iter (fun vgpu_type -> - Xapi_pgpu_helpers.assert_no_resident_VGPUs_of_type ~__context ~self ~vgpu_type) - to_disable; - Db.PGPU.set_enabled_VGPU_types ~__context ~self ~value; - update_group_enabled_VGPU_types ~__context ~self - ) + Mutex.execute pgpu_m (fun () -> + let current_types = Db.PGPU.get_enabled_VGPU_types ~__context ~self in + let to_enable = List.set_difference value current_types + and to_disable = List.set_difference current_types value in + List.iter (fun vgpu_type -> + Xapi_pgpu_helpers.assert_VGPU_type_supported ~__context ~self ~vgpu_type) + to_enable; + List.iter (fun vgpu_type -> + Xapi_pgpu_helpers.assert_no_resident_VGPUs_of_type ~__context ~self ~vgpu_type) + to_disable; + Db.PGPU.set_enabled_VGPU_types ~__context ~self ~value; + update_group_enabled_VGPU_types ~__context ~self + ) let set_GPU_group ~__context ~self ~value = - debug "Move PGPU %s -> GPU group %s" (Db.PGPU.get_uuid ~__context ~self) - (Db.GPU_group.get_uuid ~__context ~self:value); - Mutex.execute pgpu_m (fun () -> - (* Precondition: PGPU has no resident VGPUs *) - let resident_vgpus = Db.PGPU.get_resident_VGPUs ~__context ~self in - if resident_vgpus <> [] then begin - let resident_vms = List.map - (fun self -> Db.VGPU.get_VM ~__context ~self) resident_vgpus in - raise (Api_errors.Server_error (Api_errors.pgpu_in_use_by_vm, - List.map Ref.string_of resident_vms)) - end; + debug "Move PGPU %s -> GPU group %s" (Db.PGPU.get_uuid ~__context ~self) + (Db.GPU_group.get_uuid ~__context ~self:value); + Mutex.execute pgpu_m (fun () -> + (* Precondition: PGPU has no resident VGPUs *) + let resident_vgpus = Db.PGPU.get_resident_VGPUs ~__context ~self in + if resident_vgpus <> [] then begin + let resident_vms = List.map + (fun self -> Db.VGPU.get_VM ~__context ~self) resident_vgpus in + raise (Api_errors.Server_error (Api_errors.pgpu_in_use_by_vm, + List.map Ref.string_of resident_vms)) + end; - let check_compatibility gpu_type group_types = - match group_types with - | [] -> true, [gpu_type] - | _ -> List.mem gpu_type group_types, group_types in + let check_compatibility gpu_type group_types = + match group_types with + | [] -> true, [gpu_type] + | _ -> List.mem gpu_type group_types, group_types in - let pci = Db.PGPU.get_PCI ~__context ~self in - let gpu_type = Xapi_pci.string_of_pci ~__context ~self:pci - and group_types = Db.GPU_group.get_GPU_types ~__context ~self:value in - match check_compatibility gpu_type group_types with - | true, new_types -> - let old_group = Db.PGPU.get_GPU_group ~__context ~self in - Db.PGPU.set_GPU_group ~__context ~self ~value; - (* Group inherits the device type *) - Db.GPU_group.set_GPU_types ~__context ~self:value ~value:new_types; - debug "PGPU %s moved to GPU group %s. Group GPU types = [ %s ]." - (Db.PGPU.get_uuid ~__context ~self) - (Db.GPU_group.get_uuid ~__context ~self:value) - (String.concat "; " new_types); - (* Update the old and new groups' cached lists of VGPU_types. *) - if Db.is_valid_ref __context old_group - then begin - Xapi_gpu_group.update_enabled_VGPU_types ~__context ~self:old_group; - Xapi_gpu_group.update_supported_VGPU_types ~__context ~self:old_group; - end; - Xapi_gpu_group.update_enabled_VGPU_types ~__context ~self:value; - Xapi_gpu_group.update_supported_VGPU_types ~__context ~self:value - | false, _ -> - raise (Api_errors.Server_error - (Api_errors.pgpu_not_compatible_with_gpu_group, - [gpu_type; "[" ^ String.concat ", " group_types ^ "]"])) - ) + let pci = Db.PGPU.get_PCI ~__context ~self in + let gpu_type = Xapi_pci.string_of_pci ~__context ~self:pci + and group_types = Db.GPU_group.get_GPU_types ~__context ~self:value in + match check_compatibility gpu_type group_types with + | true, new_types -> + let old_group = Db.PGPU.get_GPU_group ~__context ~self in + Db.PGPU.set_GPU_group ~__context ~self ~value; + (* Group inherits the device type *) + Db.GPU_group.set_GPU_types ~__context ~self:value ~value:new_types; + debug "PGPU %s moved to GPU group %s. Group GPU types = [ %s ]." + (Db.PGPU.get_uuid ~__context ~self) + (Db.GPU_group.get_uuid ~__context ~self:value) + (String.concat "; " new_types); + (* Update the old and new groups' cached lists of VGPU_types. *) + if Db.is_valid_ref __context old_group + then begin + Xapi_gpu_group.update_enabled_VGPU_types ~__context ~self:old_group; + Xapi_gpu_group.update_supported_VGPU_types ~__context ~self:old_group; + end; + Xapi_gpu_group.update_enabled_VGPU_types ~__context ~self:value; + Xapi_gpu_group.update_supported_VGPU_types ~__context ~self:value + | false, _ -> + raise (Api_errors.Server_error + (Api_errors.pgpu_not_compatible_with_gpu_group, + [gpu_type; "[" ^ String.concat ", " group_types ^ "]"])) + ) let get_remaining_capacity ~__context ~self ~vgpu_type = - match Xapi_pgpu_helpers.get_remaining_capacity_internal ~__context ~self ~vgpu_type with - | Either.Left _ -> 0L - | Either.Right capacity -> capacity + match Xapi_pgpu_helpers.get_remaining_capacity_internal ~__context ~self ~vgpu_type with + | Either.Left _ -> 0L + | Either.Right capacity -> capacity let assert_can_run_VGPU ~__context ~self ~vgpu = - let vgpu_type = Db.VGPU.get_type ~__context ~self:vgpu in - Xapi_pgpu_helpers.assert_capacity_exists_for_VGPU_type ~__context ~self ~vgpu_type + let vgpu_type = Db.VGPU.get_type ~__context ~self:vgpu in + Xapi_pgpu_helpers.assert_capacity_exists_for_VGPU_type ~__context ~self ~vgpu_type let update_dom0_access ~__context ~self ~action = - let db_current = Db.PGPU.get_dom0_access ~__context ~self in - let db_new = match db_current, action with - | `enabled, `enable - | `disable_on_reboot, `enable -> `enabled - | `disabled, `enable - | `enable_on_reboot, `enable -> `enable_on_reboot - | `enabled, `disable - | `disable_on_reboot, `disable -> `disable_on_reboot - | `disabled, `disable - | `enable_on_reboot, `disable -> `disabled - in + let db_current = Db.PGPU.get_dom0_access ~__context ~self in + let db_new = match db_current, action with + | `enabled, `enable + | `disable_on_reboot, `enable -> `enabled + | `disabled, `enable + | `enable_on_reboot, `enable -> `enable_on_reboot + | `enabled, `disable + | `disable_on_reboot, `disable -> `disable_on_reboot + | `disabled, `disable + | `enable_on_reboot, `disable -> `disabled + in - let pci = Db.PGPU.get_PCI ~__context ~self in - begin - match db_new with - | `enabled - | `enable_on_reboot -> Pciops.unhide_pci ~__context pci - | `disabled - | `disable_on_reboot -> Pciops.hide_pci ~__context pci - end; + let pci = Db.PGPU.get_PCI ~__context ~self in + begin + match db_new with + | `enabled + | `enable_on_reboot -> Pciops.unhide_pci ~__context pci + | `disabled + | `disable_on_reboot -> Pciops.hide_pci ~__context pci + end; - Db.PGPU.set_dom0_access ~__context ~self ~value:db_new; - db_new + Db.PGPU.set_dom0_access ~__context ~self ~value:db_new; + db_new let enable_dom0_access ~__context ~self = - update_dom0_access ~__context ~self ~action:`enable + update_dom0_access ~__context ~self ~action:`enable let disable_dom0_access ~__context ~self = - if not (Pool_features.is_enabled ~__context Features.Integrated_GPU) - then raise Api_errors.(Server_error (feature_restricted, [])); - update_dom0_access ~__context ~self ~action:`disable + if not (Pool_features.is_enabled ~__context Features.Integrated_GPU) + then raise Api_errors.(Server_error (feature_restricted, [])); + update_dom0_access ~__context ~self ~action:`disable diff --git a/ocaml/xapi/xapi_pgpu.mli b/ocaml/xapi/xapi_pgpu.mli index 09c2bbbeb73..eabcc088108 100644 --- a/ocaml/xapi/xapi_pgpu.mli +++ b/ocaml/xapi/xapi_pgpu.mli @@ -13,38 +13,38 @@ *) (** Module that defines API functions for PGPU objects * @group Graphics - *) +*) (** Synchronise the PGPU objects in the database with the actual devices in the host. *) val update_gpus : __context:Context.t -> host:API.ref_host -> unit (** Enable one of the VGPU types supported by the PGPU. *) val add_enabled_VGPU_types : __context:Context.t -> - self:API.ref_PGPU -> value:API.ref_VGPU_type -> unit + self:API.ref_PGPU -> value:API.ref_VGPU_type -> unit (** Disable one of the VGPU types supported by the PGPU. *) val remove_enabled_VGPU_types : __context:Context.t -> - self:API.ref_PGPU -> value:API.ref_VGPU_type -> unit + self:API.ref_PGPU -> value:API.ref_VGPU_type -> unit (** Enable a set of VGPU types supported by the PGPU. *) val set_enabled_VGPU_types : __context:Context.t -> - self:API.ref_PGPU -> value:API.ref_VGPU_type list -> unit + self:API.ref_PGPU -> value:API.ref_VGPU_type list -> unit (** Move the PGPU to a new GPU group. *) val set_GPU_group : __context:Context.t -> self:API.ref_PGPU -> - value: API.ref_GPU_group -> unit + value: API.ref_GPU_group -> unit (* Return the number of VGPUs of the specified type for which capacity * remains on the PGPU. *) val get_remaining_capacity : __context:Context.t -> - self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> int64 + self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> int64 (** Check whether a VGPU can run on a particular PGPU. *) val assert_can_run_VGPU : __context:Context.t -> self:API.ref_PGPU -> - vgpu:API.ref_VGPU -> unit + vgpu:API.ref_VGPU -> unit val enable_dom0_access : __context:Context.t -> self:API.ref_PGPU -> - API.pgpu_dom0_access + API.pgpu_dom0_access val disable_dom0_access : __context:Context.t -> self:API.ref_PGPU -> - API.pgpu_dom0_access + API.pgpu_dom0_access diff --git a/ocaml/xapi/xapi_pgpu_helpers.ml b/ocaml/xapi/xapi_pgpu_helpers.ml index 49cf7b003e8..38993fcb3ab 100644 --- a/ocaml/xapi/xapi_pgpu_helpers.ml +++ b/ocaml/xapi/xapi_pgpu_helpers.ml @@ -17,123 +17,123 @@ open D open Stdext let assert_VGPU_type_supported ~__context ~self ~vgpu_type = - let supported_VGPU_types = - Db.PGPU.get_supported_VGPU_types ~__context ~self - in - if not (List.mem vgpu_type supported_VGPU_types) - then raise (Api_errors.Server_error - (Api_errors.vgpu_type_not_supported, - List.map Ref.string_of (vgpu_type :: supported_VGPU_types))) + let supported_VGPU_types = + Db.PGPU.get_supported_VGPU_types ~__context ~self + in + if not (List.mem vgpu_type supported_VGPU_types) + then raise (Api_errors.Server_error + (Api_errors.vgpu_type_not_supported, + List.map Ref.string_of (vgpu_type :: supported_VGPU_types))) let assert_VGPU_type_enabled ~__context ~self ~vgpu_type = - assert_VGPU_type_supported ~__context ~self ~vgpu_type; - let enabled_VGPU_types = - Db.PGPU.get_enabled_VGPU_types ~__context ~self - in - if not (List.mem vgpu_type enabled_VGPU_types) - then raise (Api_errors.Server_error - (Api_errors.vgpu_type_not_enabled, - List.map Ref.string_of (vgpu_type :: enabled_VGPU_types))) + assert_VGPU_type_supported ~__context ~self ~vgpu_type; + let enabled_VGPU_types = + Db.PGPU.get_enabled_VGPU_types ~__context ~self + in + if not (List.mem vgpu_type enabled_VGPU_types) + then raise (Api_errors.Server_error + (Api_errors.vgpu_type_not_enabled, + List.map Ref.string_of (vgpu_type :: enabled_VGPU_types))) let get_scheduled_VGPUs ~__context ~self = - let open Db_filter_types in - Db.VGPU.get_refs_where ~__context ~expr:(Eq - (Field "scheduled_to_be_resident_on", Literal (Ref.string_of self))) + let open Db_filter_types in + Db.VGPU.get_refs_where ~__context ~expr:(Eq + (Field "scheduled_to_be_resident_on", Literal (Ref.string_of self))) (* Get this list of VGPUs which are either resident on, or scheduled to be * resident on, this PGPU. *) let get_allocated_VGPUs ~__context ~self = - let resident_VGPUs = Db.PGPU.get_resident_VGPUs ~__context ~self in - let scheduled_VGPUs = get_scheduled_VGPUs ~__context ~self in - resident_VGPUs @ scheduled_VGPUs + let resident_VGPUs = Db.PGPU.get_resident_VGPUs ~__context ~self in + let scheduled_VGPUs = get_scheduled_VGPUs ~__context ~self in + resident_VGPUs @ scheduled_VGPUs let assert_VGPU_type_allowed ~__context ~self ~vgpu_type = - assert_VGPU_type_enabled ~__context ~self ~vgpu_type; - (match get_allocated_VGPUs ~__context ~self with - | [] -> () - | resident_VGPU :: _ -> - let running_type = - Db.VGPU.get_type ~__context ~self:resident_VGPU - in - if running_type <> vgpu_type - then raise (Api_errors.Server_error ( - Api_errors.vgpu_type_not_compatible_with_running_type, - [ - Ref.string_of self; - Ref.string_of vgpu_type; - Ref.string_of running_type; - ]))) + assert_VGPU_type_enabled ~__context ~self ~vgpu_type; + (match get_allocated_VGPUs ~__context ~self with + | [] -> () + | resident_VGPU :: _ -> + let running_type = + Db.VGPU.get_type ~__context ~self:resident_VGPU + in + if running_type <> vgpu_type + then raise (Api_errors.Server_error ( + Api_errors.vgpu_type_not_compatible_with_running_type, + [ + Ref.string_of self; + Ref.string_of vgpu_type; + Ref.string_of running_type; + ]))) let assert_no_resident_VGPUs_of_type ~__context ~self ~vgpu_type = - let open Db_filter_types in - match Db.VGPU.get_records_where ~__context - ~expr:(And - (Eq (Field "resident_on", Literal (Ref.string_of self)), - Eq (Field "type", Literal (Ref.string_of vgpu_type)))) - with - | [] -> () - | vgpus_and_records -> - let vms = - List.map - (fun (vgpu, _) -> Db.VGPU.get_VM ~__context ~self:vgpu) - vgpus_and_records - in - raise (Api_errors.Server_error - (Api_errors.pgpu_in_use_by_vm, List.map Ref.string_of vms)) + let open Db_filter_types in + match Db.VGPU.get_records_where ~__context + ~expr:(And + (Eq (Field "resident_on", Literal (Ref.string_of self)), + Eq (Field "type", Literal (Ref.string_of vgpu_type)))) + with + | [] -> () + | vgpus_and_records -> + let vms = + List.map + (fun (vgpu, _) -> Db.VGPU.get_VM ~__context ~self:vgpu) + vgpus_and_records + in + raise (Api_errors.Server_error + (Api_errors.pgpu_in_use_by_vm, List.map Ref.string_of vms)) let get_remaining_capacity_internal ~__context ~self ~vgpu_type = - try - assert_VGPU_type_allowed ~__context ~self ~vgpu_type; - let convert_capacity capacity = - if capacity > 0L - then Either.Right capacity - else Either.Left - (Api_errors.Server_error - (Api_errors.pgpu_insufficient_capacity_for_vgpu, [ - Ref.string_of self; - Ref.string_of vgpu_type - ])) - in - if Xapi_vgpu_type.requires_passthrough ~__context ~self:vgpu_type - then begin - (* For passthrough VGPUs, we check that there are functions available, - * and subtract from this list the number of VGPUs scheduled to run on - * this PGPU. *) - let pci = Db.PGPU.get_PCI ~__context ~self in - let scheduled_VGPUs = get_scheduled_VGPUs ~__context ~self in - convert_capacity - (Int64.of_int ( - (Pciops.get_free_functions ~__context pci) - - (List.length scheduled_VGPUs))) - end else begin - (* For virtual VGPUs, we calculate the number of times the VGPU_type's - * size fits into the PGPU's (size - utilisation). *) - let pgpu_size = Db.PGPU.get_size ~__context ~self in - let utilisation = - List.fold_left - (fun acc vgpu -> - let _type = Db.VGPU.get_type ~__context ~self:vgpu in - let vgpu_size = - Db.VGPU_type.get_size ~__context ~self:_type - in - Int64.add acc vgpu_size) - 0L (get_allocated_VGPUs ~__context ~self) - in - let new_vgpu_size = - Db.VGPU_type.get_size ~__context ~self:vgpu_type - in - convert_capacity - (Int64.div (Int64.sub pgpu_size utilisation) new_vgpu_size) - end - with e -> - Either.Left e + try + assert_VGPU_type_allowed ~__context ~self ~vgpu_type; + let convert_capacity capacity = + if capacity > 0L + then Either.Right capacity + else Either.Left + (Api_errors.Server_error + (Api_errors.pgpu_insufficient_capacity_for_vgpu, [ + Ref.string_of self; + Ref.string_of vgpu_type + ])) + in + if Xapi_vgpu_type.requires_passthrough ~__context ~self:vgpu_type + then begin + (* For passthrough VGPUs, we check that there are functions available, + * and subtract from this list the number of VGPUs scheduled to run on + * this PGPU. *) + let pci = Db.PGPU.get_PCI ~__context ~self in + let scheduled_VGPUs = get_scheduled_VGPUs ~__context ~self in + convert_capacity + (Int64.of_int ( + (Pciops.get_free_functions ~__context pci) - + (List.length scheduled_VGPUs))) + end else begin + (* For virtual VGPUs, we calculate the number of times the VGPU_type's + * size fits into the PGPU's (size - utilisation). *) + let pgpu_size = Db.PGPU.get_size ~__context ~self in + let utilisation = + List.fold_left + (fun acc vgpu -> + let _type = Db.VGPU.get_type ~__context ~self:vgpu in + let vgpu_size = + Db.VGPU_type.get_size ~__context ~self:_type + in + Int64.add acc vgpu_size) + 0L (get_allocated_VGPUs ~__context ~self) + in + let new_vgpu_size = + Db.VGPU_type.get_size ~__context ~self:vgpu_type + in + convert_capacity + (Int64.div (Int64.sub pgpu_size utilisation) new_vgpu_size) + end + with e -> + Either.Left e let get_remaining_capacity ~__context ~self ~vgpu_type = - match get_remaining_capacity_internal ~__context ~self ~vgpu_type with - | Either.Left _ -> 0L - | Either.Right capacity -> capacity + match get_remaining_capacity_internal ~__context ~self ~vgpu_type with + | Either.Left _ -> 0L + | Either.Right capacity -> capacity let assert_capacity_exists_for_VGPU_type ~__context ~self ~vgpu_type = - match get_remaining_capacity_internal ~__context ~self ~vgpu_type with - | Either.Left e -> raise e - | Either.Right capacity -> () + match get_remaining_capacity_internal ~__context ~self ~vgpu_type with + | Either.Left e -> raise e + | Either.Right capacity -> () diff --git a/ocaml/xapi/xapi_pgpu_helpers.mli b/ocaml/xapi/xapi_pgpu_helpers.mli index 24ccc543998..a2e28ccea02 100644 --- a/ocaml/xapi/xapi_pgpu_helpers.mli +++ b/ocaml/xapi/xapi_pgpu_helpers.mli @@ -14,32 +14,32 @@ (** Check that the specified type of VGPU is enabled on this PGPU. *) val assert_VGPU_type_enabled : __context:Context.t -> - self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> unit + self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> unit (** Check that the specified type of VGPU is supported on this PGPU. *) val assert_VGPU_type_supported : __context:Context.t -> - self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> unit + self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> unit (** Any VGPUs already resident on this PGPU must be compatible with the type of * the VGPUs already running on the PGPU. For now, we only allow one VGPU_type * to run on a PGPU at any one time. *) val assert_VGPU_type_allowed : __context:Context.t -> - self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> unit + self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> unit (** Check that no VMs resident on this PGPU have the specified type. *) val assert_no_resident_VGPUs_of_type : __context:Context.t -> - self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> unit + self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> unit (* Return the number of VGPUs of the specified type for which capacity * remains on the PGPU, or an exception if the remaining capacity is zero. *) val get_remaining_capacity_internal : __context:Context.t -> - self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> (exn, int64) Stdext.Either.t + self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> (exn, int64) Stdext.Either.t (* Return the number of VGPUs of the specified type for which capacity * remains on the PGPU. *) val get_remaining_capacity : __context:Context.t -> - self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> int64 + self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> int64 (** Check that the PGPU has capacity to run the specified VGPU. *) val assert_capacity_exists_for_VGPU_type : __context:Context.t -> - self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> unit + self:API.ref_PGPU -> vgpu_type:API.ref_VGPU_type -> unit diff --git a/ocaml/xapi/xapi_pif.ml b/ocaml/xapi/xapi_pif.ml index d6b0362c316..4359024107b 100644 --- a/ocaml/xapi/xapi_pif.ml +++ b/ocaml/xapi/xapi_pif.ml @@ -27,305 +27,305 @@ open Threadext open Network let refresh_internal ~__context ~self = - let device = Db.PIF.get_device ~__context ~self in - let network = Db.PIF.get_network ~__context ~self in - let bridge = Db.Network.get_bridge ~__context ~self:network in - let dbg = Context.string_of_task __context in - - (* Update the specified PIF field in the database, if - * and only if a corresponding value can be read from - * the underlying network device and if that value is - * different from the current field value. - *) - let maybe_update_database - field_name get_field set_field get_value print_value = - Opt.iter - (fun value -> - if value <> (get_field ~__context ~self) - then begin - debug "PIF %s %s <- %s" - (Ref.string_of self) - (field_name) - (print_value value); - set_field ~__context ~self ~value - end) - (Opt.of_exception (fun () -> get_value ())) in - - if Db.PIF.get_physical ~__context ~self then - maybe_update_database "MAC" - (Db.PIF.get_MAC) - (Db.PIF.set_MAC) - (fun () -> Net.Interface.get_mac dbg ~name:device) - (id); - - maybe_update_database "MTU" - (Db.PIF.get_MTU) - (Db.PIF.set_MTU) - (Int64.of_int ++ (fun () -> Net.Interface.get_mtu dbg ~name:bridge)) - (Int64.to_string); - - maybe_update_database "capabilities" - (Db.PIF.get_capabilities) - (Db.PIF.set_capabilities) - (fun () -> Net.Interface.get_capabilities dbg ~name:device) - (String.concat "; ") + let device = Db.PIF.get_device ~__context ~self in + let network = Db.PIF.get_network ~__context ~self in + let bridge = Db.Network.get_bridge ~__context ~self:network in + let dbg = Context.string_of_task __context in + + (* Update the specified PIF field in the database, if + * and only if a corresponding value can be read from + * the underlying network device and if that value is + * different from the current field value. + *) + let maybe_update_database + field_name get_field set_field get_value print_value = + Opt.iter + (fun value -> + if value <> (get_field ~__context ~self) + then begin + debug "PIF %s %s <- %s" + (Ref.string_of self) + (field_name) + (print_value value); + set_field ~__context ~self ~value + end) + (Opt.of_exception (fun () -> get_value ())) in + + if Db.PIF.get_physical ~__context ~self then + maybe_update_database "MAC" + (Db.PIF.get_MAC) + (Db.PIF.set_MAC) + (fun () -> Net.Interface.get_mac dbg ~name:device) + (id); + + maybe_update_database "MTU" + (Db.PIF.get_MTU) + (Db.PIF.set_MTU) + (Int64.of_int ++ (fun () -> Net.Interface.get_mtu dbg ~name:bridge)) + (Int64.to_string); + + maybe_update_database "capabilities" + (Db.PIF.get_capabilities) + (Db.PIF.set_capabilities) + (fun () -> Net.Interface.get_capabilities dbg ~name:device) + (String.concat "; ") let refresh ~__context ~host ~self = - assert (host = Helpers.get_localhost ~__context); - refresh_internal ~__context ~self + assert (host = Helpers.get_localhost ~__context); + refresh_internal ~__context ~self let refresh_all ~__context ~host = - assert (host = Helpers.get_localhost ~__context); - (* Only refresh physical or attached PIFs *) - let pifs = Db.PIF.get_refs_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of host)), - Or (Eq (Field "physical", Literal "true"), - Eq (Field "currently_attached", Literal "true")) - )) in - List.iter (fun self -> refresh_internal ~__context ~self) pifs + assert (host = Helpers.get_localhost ~__context); + (* Only refresh physical or attached PIFs *) + let pifs = Db.PIF.get_refs_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of host)), + Or (Eq (Field "physical", Literal "true"), + Eq (Field "currently_attached", Literal "true")) + )) in + List.iter (fun self -> refresh_internal ~__context ~self) pifs let bridge_naming_convention (device: string) = - if String.startswith "eth" device - then ("xenbr" ^ (String.sub device 3 (String.length device - 3))) - else ("br" ^ device) + if String.startswith "eth" device + then ("xenbr" ^ (String.sub device 3 (String.length device - 3))) + else ("br" ^ device) let read_bridges_from_inventory () = - try - String.split - (' ') - (Xapi_inventory.lookup Xapi_inventory._current_interfaces) - with _ -> - [] + try + String.split + (' ') + (Xapi_inventory.lookup Xapi_inventory._current_interfaces) + with _ -> + [] let assert_not_in_bond ~__context ~self = - (* Prevent bond slaves interfaces *) - let bond = Db.PIF.get_bond_slave_of ~__context ~self in - if - try ignore (Db.Bond.get_uuid ~__context ~self:bond); true - with _ -> false - then raise (Api_errors.Server_error - (Api_errors.pif_already_bonded, - [ Ref.string_of self ])); - (* Disallow for bond masters *) - if Db.PIF.get_bond_master_of ~__context ~self <> [] - then raise (Api_errors.Server_error - (Api_errors.pif_already_bonded, - [ Ref.string_of self ])) + (* Prevent bond slaves interfaces *) + let bond = Db.PIF.get_bond_slave_of ~__context ~self in + if + try ignore (Db.Bond.get_uuid ~__context ~self:bond); true + with _ -> false + then raise (Api_errors.Server_error + (Api_errors.pif_already_bonded, + [ Ref.string_of self ])); + (* Disallow for bond masters *) + if Db.PIF.get_bond_master_of ~__context ~self <> [] + then raise (Api_errors.Server_error + (Api_errors.pif_already_bonded, + [ Ref.string_of self ])) let assert_no_vlans ~__context ~self = - (* Disallow if this is a base interface of any existing VLAN *) - let vlans = Db.PIF.get_VLAN_slave_of ~__context ~self in - debug "PIF %s assert_no_vlans = [ %s ]" - (Db.PIF.get_uuid ~__context ~self) - (String.concat "; " (List.map Ref.string_of vlans)); - if vlans <> [] - then begin - debug "PIF has associated VLANs: [ %s ]" - (String.concat - ("; ") - (List.map - (fun self -> Db.VLAN.get_uuid ~__context ~self) - (vlans))); - raise (Api_errors.Server_error - (Api_errors.pif_vlan_still_exists, - [ Ref.string_of self ])) - end; - (* Disallow if this is a derived interface of a VLAN *) - if - Db.PIF.get_VLAN ~__context ~self <> (-1L) - && not (Xapi_fist.allow_forget_of_vlan_pif ()) - then raise (Api_errors.Server_error - (Api_errors.pif_vlan_still_exists, - [ Ref.string_of self ])) + (* Disallow if this is a base interface of any existing VLAN *) + let vlans = Db.PIF.get_VLAN_slave_of ~__context ~self in + debug "PIF %s assert_no_vlans = [ %s ]" + (Db.PIF.get_uuid ~__context ~self) + (String.concat "; " (List.map Ref.string_of vlans)); + if vlans <> [] + then begin + debug "PIF has associated VLANs: [ %s ]" + (String.concat + ("; ") + (List.map + (fun self -> Db.VLAN.get_uuid ~__context ~self) + (vlans))); + raise (Api_errors.Server_error + (Api_errors.pif_vlan_still_exists, + [ Ref.string_of self ])) + end; + (* Disallow if this is a derived interface of a VLAN *) + if + Db.PIF.get_VLAN ~__context ~self <> (-1L) + && not (Xapi_fist.allow_forget_of_vlan_pif ()) + then raise (Api_errors.Server_error + (Api_errors.pif_vlan_still_exists, + [ Ref.string_of self ])) let assert_no_tunnels ~__context ~self = - (* Disallow if this is a transport interface of any existing tunnel *) - let tunnels = - Db.PIF.get_tunnel_transport_PIF_of ~__context ~self in - debug "PIF %s assert_no_tunnels = [ %s ]" - (Db.PIF.get_uuid ~__context ~self) - (String.concat "; " (List.map Ref.string_of tunnels)); - if tunnels <> [] - then begin - debug "PIF has associated tunnels: [ %s ]" - (String.concat - ("; ") - (List.map - (fun self -> Db.Tunnel.get_uuid ~__context ~self) - (tunnels))); - raise (Api_errors.Server_error - (Api_errors.pif_tunnel_still_exists, - [ Ref.string_of self ])) - end; - (* Disallow if this is an access interface of a tunnel *) - if Db.PIF.get_tunnel_access_PIF_of ~__context ~self <> [] - then raise (Api_errors.Server_error - (Api_errors.pif_tunnel_still_exists, - [ Ref.string_of self ])) + (* Disallow if this is a transport interface of any existing tunnel *) + let tunnels = + Db.PIF.get_tunnel_transport_PIF_of ~__context ~self in + debug "PIF %s assert_no_tunnels = [ %s ]" + (Db.PIF.get_uuid ~__context ~self) + (String.concat "; " (List.map Ref.string_of tunnels)); + if tunnels <> [] + then begin + debug "PIF has associated tunnels: [ %s ]" + (String.concat + ("; ") + (List.map + (fun self -> Db.Tunnel.get_uuid ~__context ~self) + (tunnels))); + raise (Api_errors.Server_error + (Api_errors.pif_tunnel_still_exists, + [ Ref.string_of self ])) + end; + (* Disallow if this is an access interface of a tunnel *) + if Db.PIF.get_tunnel_access_PIF_of ~__context ~self <> [] + then raise (Api_errors.Server_error + (Api_errors.pif_tunnel_still_exists, + [ Ref.string_of self ])) let assert_not_management_pif ~__context ~self = - if Db.PIF.get_management ~__context ~self then - raise (Api_errors.Server_error (Api_errors.pif_is_management_iface, [ Ref.string_of self ])) + if Db.PIF.get_management ~__context ~self then + raise (Api_errors.Server_error (Api_errors.pif_is_management_iface, [ Ref.string_of self ])) let assert_pif_is_managed ~__context ~self = - if Db.PIF.get_managed ~__context ~self <> true then - raise (Api_errors.Server_error (Api_errors.pif_unmanaged, [Ref.string_of self])) + if Db.PIF.get_managed ~__context ~self <> true then + raise (Api_errors.Server_error (Api_errors.pif_unmanaged, [Ref.string_of self])) let assert_not_slave_management_pif ~__context ~self = - if true - && Pool_role.is_slave () - && Db.PIF.get_currently_attached ~__context ~self - && Db.PIF.get_management ~__context ~self - then raise (Api_errors.Server_error - (Api_errors.pif_is_management_iface, - [ Ref.string_of self ])) + if true + && Pool_role.is_slave () + && Db.PIF.get_currently_attached ~__context ~self + && Db.PIF.get_management ~__context ~self + then raise (Api_errors.Server_error + (Api_errors.pif_is_management_iface, + [ Ref.string_of self ])) let assert_no_protection_enabled ~__context ~self = - (* If HA or redo-log is enabled and PIF is attached - * then refuse to reconfigure the interface at all *) - if Db.PIF.get_currently_attached ~__context ~self - then begin - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool - then raise (Api_errors.Server_error - (Api_errors.ha_is_enabled, [])) - else if Db.Pool.get_redo_log_enabled ~__context ~self:pool - then raise (Api_errors.Server_error - (Api_errors.redo_log_is_enabled, [])) - end + (* If HA or redo-log is enabled and PIF is attached + * then refuse to reconfigure the interface at all *) + if Db.PIF.get_currently_attached ~__context ~self + then begin + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool + then raise (Api_errors.Server_error + (Api_errors.ha_is_enabled, [])) + else if Db.Pool.get_redo_log_enabled ~__context ~self:pool + then raise (Api_errors.Server_error + (Api_errors.redo_log_is_enabled, [])) + end let abort_if_network_attached_to_protected_vms ~__context ~self = - (* Abort a PIF.unplug if the Network - * has VIFs connected to protected VMs *) - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool - && not (Db.Pool.get_ha_allow_overcommit ~__context ~self:pool) - then begin - let net = Db.PIF.get_network ~__context ~self in - let vifs = Db.Network.get_VIFs ~__context ~self:net in - let vms = List.map - (fun vif -> Db.VIF.get_VM ~__context ~self:vif) - (vifs) in - List.iter - (fun vm -> - if Helpers.is_xha_protected ~__context ~self:vm - then begin - warn - "PIF.unplug will make protected VM %s not agile since it has a VIF attached to network %s" - (Ref.string_of vm) - (Ref.string_of net); - raise (Api_errors.Server_error - (Api_errors.ha_operation_would_break_failover_plan, - [])) - end) - (vms) - end + (* Abort a PIF.unplug if the Network + * has VIFs connected to protected VMs *) + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool + && not (Db.Pool.get_ha_allow_overcommit ~__context ~self:pool) + then begin + let net = Db.PIF.get_network ~__context ~self in + let vifs = Db.Network.get_VIFs ~__context ~self:net in + let vms = List.map + (fun vif -> Db.VIF.get_VM ~__context ~self:vif) + (vifs) in + List.iter + (fun vm -> + if Helpers.is_xha_protected ~__context ~self:vm + then begin + warn + "PIF.unplug will make protected VM %s not agile since it has a VIF attached to network %s" + (Ref.string_of vm) + (Ref.string_of net); + raise (Api_errors.Server_error + (Api_errors.ha_operation_would_break_failover_plan, + [])) + end) + (vms) + end let assert_no_other_local_pifs ~__context ~host ~network = - let other_pifs = Db.PIF.get_refs_where ~__context ~expr:(And ( - Eq (Field "network", Literal (Ref.string_of network)), - Eq (Field "host", Literal (Ref.string_of host)) - )) in - if other_pifs <> [] - then raise (Api_errors.Server_error - (Api_errors.network_already_connected, - [Ref.string_of host; Ref.string_of (List.hd other_pifs)])) + let other_pifs = Db.PIF.get_refs_where ~__context ~expr:(And ( + Eq (Field "network", Literal (Ref.string_of network)), + Eq (Field "host", Literal (Ref.string_of host)) + )) in + if other_pifs <> [] + then raise (Api_errors.Server_error + (Api_errors.network_already_connected, + [Ref.string_of host; Ref.string_of (List.hd other_pifs)])) let find_or_create_network (bridge: string) (device: string) ~__context = - let nets = Db.Network.get_refs_where ~__context ~expr:(Eq (Field "bridge", Literal bridge)) in - match nets with - | [net] -> net - | _ -> - let net_ref = Ref.make () - and net_uuid = Uuid.to_string (Uuid.make_uuid ()) in - let () = Db.Network.create - ~__context ~ref:net_ref ~uuid:net_uuid - ~current_operations:[] ~allowed_operations:[] - ~name_label:(Helpers.choose_network_name_for_pif device) - ~name_description:"" ~mTU:1500L - ~bridge ~other_config:[] ~blobs:[] - ~tags:[] ~default_locking_mode:`unlocked ~assigned_ips:[] - in - net_ref + let nets = Db.Network.get_refs_where ~__context ~expr:(Eq (Field "bridge", Literal bridge)) in + match nets with + | [net] -> net + | _ -> + let net_ref = Ref.make () + and net_uuid = Uuid.to_string (Uuid.make_uuid ()) in + let () = Db.Network.create + ~__context ~ref:net_ref ~uuid:net_uuid + ~current_operations:[] ~allowed_operations:[] + ~name_label:(Helpers.choose_network_name_for_pif device) + ~name_description:"" ~mTU:1500L + ~bridge ~other_config:[] ~blobs:[] + ~tags:[] ~default_locking_mode:`unlocked ~assigned_ips:[] + in + net_ref type tables = { - device_to_mac_table: (string * string) list; - pif_to_device_table: (API.ref_PIF * string) list; + device_to_mac_table: (string * string) list; + pif_to_device_table: (API.ref_PIF * string) list; } let make_tables ~__context ~host = - let dbg = Context.string_of_task __context in - let devices = - List.filter - (fun name -> Net.Interface.is_physical dbg ~name) - (Net.Interface.get_all dbg ()) in - let pifs = Db.PIF.get_records_where ~__context - ~expr:(And (Eq (Field "host", Literal (Ref.string_of host)), - Eq (Field "physical", Literal "true"))) in - { - device_to_mac_table = - List.combine - (devices) - (List.map (fun name -> Net.Interface.get_mac dbg ~name) devices); - pif_to_device_table = - List.map (fun (pref, prec) -> pref, prec.API.pIF_device) pifs; - } + let dbg = Context.string_of_task __context in + let devices = + List.filter + (fun name -> Net.Interface.is_physical dbg ~name) + (Net.Interface.get_all dbg ()) in + let pifs = Db.PIF.get_records_where ~__context + ~expr:(And (Eq (Field "host", Literal (Ref.string_of host)), + Eq (Field "physical", Literal "true"))) in + { + device_to_mac_table = + List.combine + (devices) + (List.map (fun name -> Net.Interface.get_mac dbg ~name) devices); + pif_to_device_table = + List.map (fun (pref, prec) -> pref, prec.API.pIF_device) pifs; + } let is_my_management_pif ~__context ~self = - let net = Db.PIF.get_network ~__context ~self in - let management_if = - Xapi_inventory.lookup Xapi_inventory._management_interface in - Db.Network.get_bridge ~__context ~self:net = management_if + let net = Db.PIF.get_network ~__context ~self in + let management_if = + Xapi_inventory.lookup Xapi_inventory._management_interface in + Db.Network.get_bridge ~__context ~self:net = management_if let make_pif_metrics ~__context = - let metrics = Ref.make () - and metrics_uuid = Uuid.to_string (Uuid.make_uuid ()) in - let () = Db.PIF_metrics.create - ~__context ~ref:metrics ~uuid:metrics_uuid ~carrier:false - ~device_name:"" ~vendor_name:"" ~device_id:"" ~vendor_id:"" - ~speed:0L ~duplex:false ~pci_bus_path:"" - ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Date.of_float 0.) - ~other_config:[] in - metrics + let metrics = Ref.make () + and metrics_uuid = Uuid.to_string (Uuid.make_uuid ()) in + let () = Db.PIF_metrics.create + ~__context ~ref:metrics ~uuid:metrics_uuid ~carrier:false + ~device_name:"" ~vendor_name:"" ~device_id:"" ~vendor_id:"" + ~speed:0L ~duplex:false ~pci_bus_path:"" + ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Date.of_float 0.) + ~other_config:[] in + metrics let property_names_and_values = [ - "gro", ["on"; "off"] + "gro", ["on"; "off"] ] let default_properties = [ - "gro", "on" + "gro", "on" ] let pif_has_properties ~__context ~self = - (* Only bond masters and physical PIFs *) - Db.PIF.get_bond_master_of ~__context ~self <> [] || Db.PIF.get_physical ~__context ~self + (* Only bond masters and physical PIFs *) + Db.PIF.get_bond_master_of ~__context ~self <> [] || Db.PIF.get_physical ~__context ~self let set_default_properties ~__context ~self = - if pif_has_properties ~__context ~self then - Db.PIF.set_properties ~__context ~self ~value:default_properties - else - Db.PIF.set_properties ~__context ~self ~value:[] + if pif_has_properties ~__context ~self then + Db.PIF.set_properties ~__context ~self ~value:default_properties + else + Db.PIF.set_properties ~__context ~self ~value:[] let pool_introduce - ~__context ~device ~network ~host - ~mAC ~mTU ~vLAN ~physical - ~ip_configuration_mode ~iP ~netmask ~gateway - ~dNS ~bond_slave_of ~vLAN_master_of ~management - ~other_config ~disallow_unplug ~ipv6_configuration_mode - ~iPv6 ~ipv6_gateway ~primary_address_type ~managed ~properties = - let pif_ref = Ref.make () in - let metrics = make_pif_metrics ~__context in - let () = - Db.PIF.create - ~__context ~ref:pif_ref ~uuid:(Uuid.to_string (Uuid.make_uuid ())) - ~device ~device_name:device ~network ~host - ~mAC ~mTU ~vLAN ~metrics - ~physical ~currently_attached:false - ~ip_configuration_mode ~iP ~netmask ~gateway ~dNS - ~bond_slave_of:Ref.null ~vLAN_master_of ~management - ~other_config ~disallow_unplug ~ipv6_configuration_mode - ~iPv6 ~ipv6_gateway ~primary_address_type ~managed ~properties ~capabilities:[] in - pif_ref + ~__context ~device ~network ~host + ~mAC ~mTU ~vLAN ~physical + ~ip_configuration_mode ~iP ~netmask ~gateway + ~dNS ~bond_slave_of ~vLAN_master_of ~management + ~other_config ~disallow_unplug ~ipv6_configuration_mode + ~iPv6 ~ipv6_gateway ~primary_address_type ~managed ~properties = + let pif_ref = Ref.make () in + let metrics = make_pif_metrics ~__context in + let () = + Db.PIF.create + ~__context ~ref:pif_ref ~uuid:(Uuid.to_string (Uuid.make_uuid ())) + ~device ~device_name:device ~network ~host + ~mAC ~mTU ~vLAN ~metrics + ~physical ~currently_attached:false + ~ip_configuration_mode ~iP ~netmask ~gateway ~dNS + ~bond_slave_of:Ref.null ~vLAN_master_of ~management + ~other_config ~disallow_unplug ~ipv6_configuration_mode + ~iPv6 ~ipv6_gateway ~primary_address_type ~managed ~properties ~capabilities:[] in + pif_ref let db_introduce = pool_introduce @@ -333,480 +333,480 @@ let db_forget ~__context ~self = Db.PIF.destroy ~__context ~self (* Internal [introduce] is passed a pre-built table [t] *) let introduce_internal - ?network ?(physical=true) ~t ~__context ~host - ~mAC ~mTU ~device ~vLAN ~vLAN_master_of ?metrics - ~managed ?(disallow_unplug=false) () = - let bridge = bridge_naming_convention device in - - (* If we are not told which network to use, - * apply the default convention *) - let net_ref = - match network with - | None -> find_or_create_network bridge device ~__context - | Some x -> x in - let metrics = match metrics with - | None -> make_pif_metrics ~__context - | Some m -> m - in - let dbg = Context.string_of_task __context in - let capabilities = Net.Interface.get_capabilities dbg device in - - let pif = Ref.make () in - debug - "Creating a new record for NIC: %s: %s" - (device) - (Ref.string_of pif); - let () = Db.PIF.create - ~__context ~ref:pif ~uuid:(Uuid.to_string (Uuid.make_uuid ())) - ~device ~device_name:device ~network:net_ref ~host ~mAC - ~mTU ~vLAN ~metrics ~physical ~currently_attached:false - ~ip_configuration_mode:`None ~iP:"" ~netmask:"" ~gateway:"" - ~dNS:"" ~bond_slave_of:Ref.null ~vLAN_master_of ~management:false - ~other_config:[] ~disallow_unplug ~ipv6_configuration_mode:`None - ~iPv6:[] ~ipv6_gateway:"" ~primary_address_type:`IPv4 ~managed - ~properties:default_properties ~capabilities:capabilities in - - (* If I'm a pool slave and this pif represents my management - * interface then leave it alone: if the interface goes down - * (through a call to "up") then I lose my connection to the - * master's database and the call to "up" (which uses the API - * and requires the database) blocks until the slave restarts - * in emergency mode. - *) - (* Rob: nothing seems to be done with the pool slave case - * mentioned in this comment...? - *) - if is_my_management_pif ~__context ~self:pif - then begin - debug "NIC is the management interface"; - Db.PIF.set_management ~__context ~self:pif ~value:true; - Db.PIF.set_currently_attached ~__context ~self:pif ~value:true; - end; - - (* When a new PIF is introduced then we clear it from the cache w.r.t - * the monitor thread; this ensures that the PIF metrics (including - * carrier and vendor etc.) will eventually get updated [and that - * subsequent changes to this PIFs' device's dom0 configuration - * will be reflected accordingly]. *) - Monitor_dbcalls_cache.clear_cache_for_pif ~pif_name:device; - - (* return ref of newly created pif record *) - pif + ?network ?(physical=true) ~t ~__context ~host + ~mAC ~mTU ~device ~vLAN ~vLAN_master_of ?metrics + ~managed ?(disallow_unplug=false) () = + let bridge = bridge_naming_convention device in + + (* If we are not told which network to use, + * apply the default convention *) + let net_ref = + match network with + | None -> find_or_create_network bridge device ~__context + | Some x -> x in + let metrics = match metrics with + | None -> make_pif_metrics ~__context + | Some m -> m + in + let dbg = Context.string_of_task __context in + let capabilities = Net.Interface.get_capabilities dbg device in + + let pif = Ref.make () in + debug + "Creating a new record for NIC: %s: %s" + (device) + (Ref.string_of pif); + let () = Db.PIF.create + ~__context ~ref:pif ~uuid:(Uuid.to_string (Uuid.make_uuid ())) + ~device ~device_name:device ~network:net_ref ~host ~mAC + ~mTU ~vLAN ~metrics ~physical ~currently_attached:false + ~ip_configuration_mode:`None ~iP:"" ~netmask:"" ~gateway:"" + ~dNS:"" ~bond_slave_of:Ref.null ~vLAN_master_of ~management:false + ~other_config:[] ~disallow_unplug ~ipv6_configuration_mode:`None + ~iPv6:[] ~ipv6_gateway:"" ~primary_address_type:`IPv4 ~managed + ~properties:default_properties ~capabilities:capabilities in + + (* If I'm a pool slave and this pif represents my management + * interface then leave it alone: if the interface goes down + * (through a call to "up") then I lose my connection to the + * master's database and the call to "up" (which uses the API + * and requires the database) blocks until the slave restarts + * in emergency mode. + *) + (* Rob: nothing seems to be done with the pool slave case + * mentioned in this comment...? + *) + if is_my_management_pif ~__context ~self:pif + then begin + debug "NIC is the management interface"; + Db.PIF.set_management ~__context ~self:pif ~value:true; + Db.PIF.set_currently_attached ~__context ~self:pif ~value:true; + end; + + (* When a new PIF is introduced then we clear it from the cache w.r.t + * the monitor thread; this ensures that the PIF metrics (including + * carrier and vendor etc.) will eventually get updated [and that + * subsequent changes to this PIFs' device's dom0 configuration + * will be reflected accordingly]. *) + Monitor_dbcalls_cache.clear_cache_for_pif ~pif_name:device; + + (* return ref of newly created pif record *) + pif (* Internal [forget] is passed a pre-built table [t] *) let forget_internal ~t ~__context ~self = - if Db.PIF.get_managed ~__context ~self = true then - Nm.bring_pif_down ~__context self; - (* NB we are allowed to forget an interface which still exists *) - let device = Db.PIF.get_device ~__context ~self in - if List.mem_assoc device t.device_to_mac_table - then warn "Forgetting PIF record even though device %s still exists" device; - (try - let metrics = Db.PIF.get_metrics ~__context ~self in - Db.PIF_metrics.destroy ~__context ~self:metrics with _ -> ()); - Db.PIF.destroy ~__context ~self + if Db.PIF.get_managed ~__context ~self = true then + Nm.bring_pif_down ~__context self; + (* NB we are allowed to forget an interface which still exists *) + let device = Db.PIF.get_device ~__context ~self in + if List.mem_assoc device t.device_to_mac_table + then warn "Forgetting PIF record even though device %s still exists" device; + (try + let metrics = Db.PIF.get_metrics ~__context ~self in + Db.PIF_metrics.destroy ~__context ~self:metrics with _ -> ()); + Db.PIF.destroy ~__context ~self let update_management_flags ~__context ~host = - try - let management_bridge = Xapi_inventory.lookup Xapi_inventory._management_interface in - let management_networks = Db.Network.get_refs_where ~__context ~expr:( - Eq (Field "bridge", Literal management_bridge) - ) in - let current_management_pifs = - match management_networks with - | [] -> [] - | net :: _ -> - Db.PIF.get_refs_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of host)), - Eq (Field "network", Literal (Ref.string_of net)) - )) - in - let management_pifs_in_db = Db.PIF.get_refs_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of host)), - Eq (Field "management", Literal "true") - )) in - let set_management value self = - debug "PIF %s management <- %b" (Ref.string_of self) value; - Db.PIF.set_management ~__context ~self ~value - in - (* Set management flag of PIFs that are now management PIFs, and do not have this flag set *) - List.iter (set_management true) (List.set_difference current_management_pifs management_pifs_in_db); - (* Clear management flag of PIFs that are no longer management PIFs *) - List.iter (set_management false) (List.set_difference management_pifs_in_db current_management_pifs) - with Xapi_inventory.Missing_inventory_key _ -> - error "Missing field MANAGEMENT_INTERFACE in inventory file" + try + let management_bridge = Xapi_inventory.lookup Xapi_inventory._management_interface in + let management_networks = Db.Network.get_refs_where ~__context ~expr:( + Eq (Field "bridge", Literal management_bridge) + ) in + let current_management_pifs = + match management_networks with + | [] -> [] + | net :: _ -> + Db.PIF.get_refs_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of host)), + Eq (Field "network", Literal (Ref.string_of net)) + )) + in + let management_pifs_in_db = Db.PIF.get_refs_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of host)), + Eq (Field "management", Literal "true") + )) in + let set_management value self = + debug "PIF %s management <- %b" (Ref.string_of self) value; + Db.PIF.set_management ~__context ~self ~value + in + (* Set management flag of PIFs that are now management PIFs, and do not have this flag set *) + List.iter (set_management true) (List.set_difference current_management_pifs management_pifs_in_db); + (* Clear management flag of PIFs that are no longer management PIFs *) + List.iter (set_management false) (List.set_difference management_pifs_in_db current_management_pifs) + with Xapi_inventory.Missing_inventory_key _ -> + error "Missing field MANAGEMENT_INTERFACE in inventory file" let introduce ~__context ~host ~mAC ~device ~managed = - let mAC = String.lowercase mAC in (* just a convention *) - let t = make_tables ~__context ~host in - let dbg = Context.string_of_task __context in - - (* Allow callers to omit the MAC address. Ideally, we should - * use an option type (instead of treating the empty string - * as a special value). However we must preserve the existing - * function signature as it appears in the published API. - *) - let mAC = - if mAC = "" - then List.assoc_default device t.device_to_mac_table "" - else mAC in - - if not (Helpers.is_valid_MAC mAC) - then raise (Api_errors.Server_error - (Api_errors.mac_invalid, [mAC])); - - (* Assert that a local PIF with the given device name does not already exist *) - if List.mem device (List.map snd t.pif_to_device_table) - then raise (Api_errors.Server_error - (Api_errors.duplicate_pif_device_name, [device])); - - (* Assert that a network interface exists with * - * the specified device name and MAC address. *) - if not (List.mem (device, mAC) t.device_to_mac_table) - then raise (Api_errors.Server_error (Api_errors - .could_not_find_network_interface_with_specified_device_name_and_mac_address, - [device; mAC])); - - info - "Introducing PIF: device = %s; MAC = %s" - device mAC; - let mTU = Int64.of_int (Net.Interface.get_mtu dbg ~name:device) in - introduce_internal - ~t ~__context ~host ~mAC ~device ~mTU - ~vLAN:(-1L) ~vLAN_master_of:Ref.null ~managed () + let mAC = String.lowercase mAC in (* just a convention *) + let t = make_tables ~__context ~host in + let dbg = Context.string_of_task __context in + + (* Allow callers to omit the MAC address. Ideally, we should + * use an option type (instead of treating the empty string + * as a special value). However we must preserve the existing + * function signature as it appears in the published API. + *) + let mAC = + if mAC = "" + then List.assoc_default device t.device_to_mac_table "" + else mAC in + + if not (Helpers.is_valid_MAC mAC) + then raise (Api_errors.Server_error + (Api_errors.mac_invalid, [mAC])); + + (* Assert that a local PIF with the given device name does not already exist *) + if List.mem device (List.map snd t.pif_to_device_table) + then raise (Api_errors.Server_error + (Api_errors.duplicate_pif_device_name, [device])); + + (* Assert that a network interface exists with * + * the specified device name and MAC address. *) + if not (List.mem (device, mAC) t.device_to_mac_table) + then raise (Api_errors.Server_error (Api_errors + .could_not_find_network_interface_with_specified_device_name_and_mac_address, + [device; mAC])); + + info + "Introducing PIF: device = %s; MAC = %s" + device mAC; + let mTU = Int64.of_int (Net.Interface.get_mtu dbg ~name:device) in + introduce_internal + ~t ~__context ~host ~mAC ~device ~mTU + ~vLAN:(-1L) ~vLAN_master_of:Ref.null ~managed () let forget ~__context ~self = - assert_not_management_pif ~__context ~self; - assert_not_in_bond ~__context ~self; - assert_no_vlans ~__context ~self; - assert_no_tunnels ~__context ~self; - assert_not_slave_management_pif ~__context ~self; - assert_no_protection_enabled ~__context ~self; + assert_not_management_pif ~__context ~self; + assert_not_in_bond ~__context ~self; + assert_no_vlans ~__context ~self; + assert_no_tunnels ~__context ~self; + assert_not_slave_management_pif ~__context ~self; + assert_no_protection_enabled ~__context ~self; - let host = Db.PIF.get_host ~__context ~self in - let t = make_tables ~__context ~host in - forget_internal ~t ~__context ~self + let host = Db.PIF.get_host ~__context ~self in + let t = make_tables ~__context ~host in + forget_internal ~t ~__context ~self let scan_m = Mutex.create () let scan ~__context ~host = - let dbg = Context.string_of_task __context in - refresh_all ~__context ~host; - - let non_managed_devices, disallow_unplug_devices = - if Sys.file_exists !Xapi_globs.non_managed_pifs then - try - let output, _ = Forkhelpers.execute_command_get_output !Xapi_globs.non_managed_pifs [] in - let dsplit = String.split '\n' output in - match dsplit with - | [] | [""] | "" :: "" :: _ -> - debug "No boot from SAN interface found"; - [], [] - | m :: u :: _ -> - String.split_f String.isspace m, String.split_f String.isspace u - | m :: _ -> - String.split_f String.isspace m, [] - with e -> - warn "Error when executing script %s: %s; ignoring" !Xapi_globs.non_managed_pifs (Printexc.to_string e); - [], [] - else begin - debug "Script %s not found; ignoring" !Xapi_globs.non_managed_pifs; - [], [] - end - in - - Mutex.execute scan_m (fun () -> - let t = make_tables ~__context ~host in - let devices_not_yet_represented_by_pifs = - List.set_difference - (List.map fst t.device_to_mac_table) - (List.map snd t.pif_to_device_table) in - - (* Create PIF records for the new interfaces *) - List.iter - (fun device -> - let mAC = List.assoc device t.device_to_mac_table in - let mTU = Int64.of_int (Net.Interface.get_mtu dbg ~name:device) in - let managed = not (List.mem device non_managed_devices) in - let disallow_unplug = (List.mem device disallow_unplug_devices) in - let (_: API.ref_PIF) = - introduce_internal - ~t ~__context ~host ~mAC ~mTU ~vLAN:(-1L) - ~vLAN_master_of:Ref.null ~device ~managed ~disallow_unplug () in - ()) - (devices_not_yet_represented_by_pifs) - ); - - (* Make sure the right PIF(s) are marked as management PIFs *) - update_management_flags ~__context ~host + let dbg = Context.string_of_task __context in + refresh_all ~__context ~host; + + let non_managed_devices, disallow_unplug_devices = + if Sys.file_exists !Xapi_globs.non_managed_pifs then + try + let output, _ = Forkhelpers.execute_command_get_output !Xapi_globs.non_managed_pifs [] in + let dsplit = String.split '\n' output in + match dsplit with + | [] | [""] | "" :: "" :: _ -> + debug "No boot from SAN interface found"; + [], [] + | m :: u :: _ -> + String.split_f String.isspace m, String.split_f String.isspace u + | m :: _ -> + String.split_f String.isspace m, [] + with e -> + warn "Error when executing script %s: %s; ignoring" !Xapi_globs.non_managed_pifs (Printexc.to_string e); + [], [] + else begin + debug "Script %s not found; ignoring" !Xapi_globs.non_managed_pifs; + [], [] + end + in + + Mutex.execute scan_m (fun () -> + let t = make_tables ~__context ~host in + let devices_not_yet_represented_by_pifs = + List.set_difference + (List.map fst t.device_to_mac_table) + (List.map snd t.pif_to_device_table) in + + (* Create PIF records for the new interfaces *) + List.iter + (fun device -> + let mAC = List.assoc device t.device_to_mac_table in + let mTU = Int64.of_int (Net.Interface.get_mtu dbg ~name:device) in + let managed = not (List.mem device non_managed_devices) in + let disallow_unplug = (List.mem device disallow_unplug_devices) in + let (_: API.ref_PIF) = + introduce_internal + ~t ~__context ~host ~mAC ~mTU ~vLAN:(-1L) + ~vLAN_master_of:Ref.null ~device ~managed ~disallow_unplug () in + ()) + (devices_not_yet_represented_by_pifs) + ); + + (* Make sure the right PIF(s) are marked as management PIFs *) + update_management_flags ~__context ~host (* DEPRECATED! Rewritten to use VLAN.create. *) let create_VLAN ~__context ~device ~network ~host ~vLAN = - (* Find the "tagged PIF" (same device, no VLAN tag) *) - let other_pifs = Db.Host.get_PIFs ~__context ~self:host in - let base_pifs = - List.filter - (fun self -> - (Db.PIF.get_device ~__context ~self = device) - && - (Db.PIF.get_VLAN ~__context ~self = (-1L))) - (other_pifs) in - if List.length base_pifs = 0 - then raise (Api_errors.Server_error - (Api_errors.invalid_value, [ "device"; device ])); - let tagged_PIF = List.hd base_pifs in - let vlan = Helpers.call_api_functions ~__context - (fun rpc session_id -> - Client.Client.VLAN.create - rpc session_id tagged_PIF vLAN network) in - Db.VLAN.get_untagged_PIF ~__context ~self:vlan + (* Find the "tagged PIF" (same device, no VLAN tag) *) + let other_pifs = Db.Host.get_PIFs ~__context ~self:host in + let base_pifs = + List.filter + (fun self -> + (Db.PIF.get_device ~__context ~self = device) + && + (Db.PIF.get_VLAN ~__context ~self = (-1L))) + (other_pifs) in + if List.length base_pifs = 0 + then raise (Api_errors.Server_error + (Api_errors.invalid_value, [ "device"; device ])); + let tagged_PIF = List.hd base_pifs in + let vlan = Helpers.call_api_functions ~__context + (fun rpc session_id -> + Client.Client.VLAN.create + rpc session_id tagged_PIF vLAN network) in + Db.VLAN.get_untagged_PIF ~__context ~self:vlan (* DEPRECATED! Rewritten to use VLAN.destroy. *) let destroy ~__context ~self = - if Db.PIF.get_VLAN ~__context ~self < 0L - then raise (Api_errors.Server_error (Api_errors.pif_is_physical, [])); - let vlan = Db.PIF.get_VLAN_master_of ~__context ~self in - Helpers.call_api_functions ~__context - (fun rpc session_id -> - Client.Client.VLAN.destroy rpc session_id vlan) + if Db.PIF.get_VLAN ~__context ~self < 0L + then raise (Api_errors.Server_error (Api_errors.pif_is_physical, [])); + let vlan = Db.PIF.get_VLAN_master_of ~__context ~self in + Helpers.call_api_functions ~__context + (fun rpc session_id -> + Client.Client.VLAN.destroy rpc session_id vlan) let reconfigure_ipv6 ~__context ~self ~mode ~iPv6 ~gateway ~dNS = - assert_pif_is_managed ~__context ~self; - assert_no_protection_enabled ~__context ~self; - - if gateway <> "" then - Helpers.assert_is_valid_ip `ipv6 "gateway" gateway; - - (* If we have an IPv6 address, check that it is valid and a prefix length is specified *) - if iPv6 <> "" then - Helpers.assert_is_valid_cidr `ipv6 "IPv6" iPv6; - - if dNS <> "" then - List.iter - (fun address -> Helpers.assert_is_valid_ip `ipv6 "DNS" address) - (String.split ',' dNS); - - (* Management iface must have an address for the primary address type *) - let management = Db.PIF.get_management ~__context ~self in - let primary_address_type = Db.PIF.get_primary_address_type ~__context ~self in - if management && mode = `None && primary_address_type = `IPv6 then - raise (Api_errors.Server_error - (Api_errors.pif_is_management_iface, [ Ref.string_of self ])); - - let old_mode = Db.PIF.get_ipv6_configuration_mode ~__context ~self in - - (* Set the values in the DB *) - Db.PIF.set_ipv6_configuration_mode ~__context ~self ~value:mode; - Db.PIF.set_ipv6_gateway ~__context ~self ~value:gateway; - Db.PIF.set_IPv6 ~__context ~self ~value:[iPv6]; - if dNS <> "" then Db.PIF.set_DNS ~__context ~self ~value:dNS; - - if Db.PIF.get_currently_attached ~__context ~self then begin - debug - "PIF %s is currently_attached and the configuration has changed; calling out to reconfigure" - (Db.PIF.get_uuid ~__context ~self); - Db.PIF.set_currently_attached ~__context ~self ~value:false; - Nm.bring_pif_up ~__context ~management_interface:management self; - if mode = `DHCP || mode = `Autoconf then - (* Refresh IP address fields in case dhclient was already running, and - * we are not getting a host-signal-networking-change callback. *) - Helpers.update_pif_address ~__context ~self - end; - Monitor_dbcalls_cache.clear_cache_for_pif ~pif_name:(Db.PIF.get_device ~__context ~self); - if ((old_mode == `None && mode <> `None) || (old_mode <> `None && mode == `None)) then - begin - debug "IPv6 mode has changed - updating management interface"; - Xapi_mgmt_iface.rebind ~__context; - end + assert_pif_is_managed ~__context ~self; + assert_no_protection_enabled ~__context ~self; + + if gateway <> "" then + Helpers.assert_is_valid_ip `ipv6 "gateway" gateway; + + (* If we have an IPv6 address, check that it is valid and a prefix length is specified *) + if iPv6 <> "" then + Helpers.assert_is_valid_cidr `ipv6 "IPv6" iPv6; + + if dNS <> "" then + List.iter + (fun address -> Helpers.assert_is_valid_ip `ipv6 "DNS" address) + (String.split ',' dNS); + + (* Management iface must have an address for the primary address type *) + let management = Db.PIF.get_management ~__context ~self in + let primary_address_type = Db.PIF.get_primary_address_type ~__context ~self in + if management && mode = `None && primary_address_type = `IPv6 then + raise (Api_errors.Server_error + (Api_errors.pif_is_management_iface, [ Ref.string_of self ])); + + let old_mode = Db.PIF.get_ipv6_configuration_mode ~__context ~self in + + (* Set the values in the DB *) + Db.PIF.set_ipv6_configuration_mode ~__context ~self ~value:mode; + Db.PIF.set_ipv6_gateway ~__context ~self ~value:gateway; + Db.PIF.set_IPv6 ~__context ~self ~value:[iPv6]; + if dNS <> "" then Db.PIF.set_DNS ~__context ~self ~value:dNS; + + if Db.PIF.get_currently_attached ~__context ~self then begin + debug + "PIF %s is currently_attached and the configuration has changed; calling out to reconfigure" + (Db.PIF.get_uuid ~__context ~self); + Db.PIF.set_currently_attached ~__context ~self ~value:false; + Nm.bring_pif_up ~__context ~management_interface:management self; + if mode = `DHCP || mode = `Autoconf then + (* Refresh IP address fields in case dhclient was already running, and + * we are not getting a host-signal-networking-change callback. *) + Helpers.update_pif_address ~__context ~self + end; + Monitor_dbcalls_cache.clear_cache_for_pif ~pif_name:(Db.PIF.get_device ~__context ~self); + if ((old_mode == `None && mode <> `None) || (old_mode <> `None && mode == `None)) then + begin + debug "IPv6 mode has changed - updating management interface"; + Xapi_mgmt_iface.rebind ~__context; + end let reconfigure_ip ~__context ~self ~mode ~iP ~netmask ~gateway ~dNS = - assert_pif_is_managed ~__context ~self; - assert_no_protection_enabled ~__context ~self; - - if mode = `Static then begin - (* require these parameters if mode is static *) - Helpers.assert_is_valid_ip `ipv4 "IP" iP; - Helpers.assert_is_valid_ip `ipv4 "netmask" netmask - end; - - (* for all IP parameters, if they're not empty - * then check they contain valid IP address *) - List.iter - (fun (param, value) -> if value <> "" then Helpers.assert_is_valid_ip `ipv4 param value) - ["IP",iP; "netmask",netmask; "gateway",gateway]; - if dNS <> "" then - List.iter - (fun address -> Helpers.assert_is_valid_ip `ipv4 "DNS" address) - (String.split ',' dNS); - - (* If this is a management PIF, make sure the IP config mode isn't None *) - let management=Db.PIF.get_management ~__context ~self in - let primary_address_type=Db.PIF.get_primary_address_type ~__context ~self in - - if management && mode = `None && primary_address_type=`IPv4 - then raise (Api_errors.Server_error - (Api_errors.pif_is_management_iface, [ Ref.string_of self ])); - - Db.PIF.set_ip_configuration_mode ~__context ~self ~value:mode; - Db.PIF.set_IP ~__context ~self ~value:iP; - Db.PIF.set_netmask ~__context ~self ~value:netmask; - Db.PIF.set_gateway ~__context ~self ~value:gateway; - Db.PIF.set_DNS ~__context ~self ~value:dNS; - if Db.PIF.get_currently_attached ~__context ~self - then begin - debug - "PIF %s is currently_attached and the configuration has changed; calling out to reconfigure" - (Db.PIF.get_uuid ~__context ~self); - Db.PIF.set_currently_attached ~__context ~self ~value:false; - Nm.bring_pif_up ~__context ~management_interface:management self; - if mode = `DHCP then - (* Refresh IP address fields in case dhclient was already running, and - * we are not getting a host-signal-networking-change callback. *) - Helpers.update_pif_address ~__context ~self - end; - (* We clear the monitor thread's cache for the PIF to resync the dom0 device - * state with the PIF db record; this fixes a race where the you do a - * PIF.reconfigure_ip to set mode=dhcp, but you have already got an IP on - * the dom0 device (e.g. because it's a management i/f that was brought up - * independently by init scripts) *) - Monitor_dbcalls_cache.clear_cache_for_pif ~pif_name:(Db.PIF.get_device ~__context ~self) + assert_pif_is_managed ~__context ~self; + assert_no_protection_enabled ~__context ~self; + + if mode = `Static then begin + (* require these parameters if mode is static *) + Helpers.assert_is_valid_ip `ipv4 "IP" iP; + Helpers.assert_is_valid_ip `ipv4 "netmask" netmask + end; + + (* for all IP parameters, if they're not empty + * then check they contain valid IP address *) + List.iter + (fun (param, value) -> if value <> "" then Helpers.assert_is_valid_ip `ipv4 param value) + ["IP",iP; "netmask",netmask; "gateway",gateway]; + if dNS <> "" then + List.iter + (fun address -> Helpers.assert_is_valid_ip `ipv4 "DNS" address) + (String.split ',' dNS); + + (* If this is a management PIF, make sure the IP config mode isn't None *) + let management=Db.PIF.get_management ~__context ~self in + let primary_address_type=Db.PIF.get_primary_address_type ~__context ~self in + + if management && mode = `None && primary_address_type=`IPv4 + then raise (Api_errors.Server_error + (Api_errors.pif_is_management_iface, [ Ref.string_of self ])); + + Db.PIF.set_ip_configuration_mode ~__context ~self ~value:mode; + Db.PIF.set_IP ~__context ~self ~value:iP; + Db.PIF.set_netmask ~__context ~self ~value:netmask; + Db.PIF.set_gateway ~__context ~self ~value:gateway; + Db.PIF.set_DNS ~__context ~self ~value:dNS; + if Db.PIF.get_currently_attached ~__context ~self + then begin + debug + "PIF %s is currently_attached and the configuration has changed; calling out to reconfigure" + (Db.PIF.get_uuid ~__context ~self); + Db.PIF.set_currently_attached ~__context ~self ~value:false; + Nm.bring_pif_up ~__context ~management_interface:management self; + if mode = `DHCP then + (* Refresh IP address fields in case dhclient was already running, and + * we are not getting a host-signal-networking-change callback. *) + Helpers.update_pif_address ~__context ~self + end; + (* We clear the monitor thread's cache for the PIF to resync the dom0 device + * state with the PIF db record; this fixes a race where the you do a + * PIF.reconfigure_ip to set mode=dhcp, but you have already got an IP on + * the dom0 device (e.g. because it's a management i/f that was brought up + * independently by init scripts) *) + Monitor_dbcalls_cache.clear_cache_for_pif ~pif_name:(Db.PIF.get_device ~__context ~self) let set_primary_address_type ~__context ~self ~primary_address_type = - assert_no_protection_enabled ~__context ~self; + assert_no_protection_enabled ~__context ~self; - let management=Db.PIF.get_management ~__context ~self in - if management then raise (Api_errors.Server_error(Api_errors.pif_is_management_iface, [ Ref.string_of self ])); + let management=Db.PIF.get_management ~__context ~self in + if management then raise (Api_errors.Server_error(Api_errors.pif_is_management_iface, [ Ref.string_of self ])); - Db.PIF.set_primary_address_type ~__context ~self ~value:primary_address_type; - Monitor_dbcalls_cache.clear_cache_for_pif ~pif_name:(Db.PIF.get_device ~__context ~self) + Db.PIF.set_primary_address_type ~__context ~self ~value:primary_address_type; + Monitor_dbcalls_cache.clear_cache_for_pif ~pif_name:(Db.PIF.get_device ~__context ~self) let set_property ~__context ~self ~name ~value = - let fail () = raise (Api_errors.Server_error - (Api_errors.invalid_value, ["properties"; Printf.sprintf "%s = %s" name value])) - in - if not (List.mem_assoc name property_names_and_values) then - fail () - else if not (List.mem value (List.assoc name property_names_and_values)) then - fail (); - - (* Only bond masters and unbonded physical PIFs can be configured *) - if not (pif_has_properties ~__context ~self) || Db.PIF.get_bond_slave_of ~__context ~self <> Ref.null then - raise (Api_errors.Server_error (Api_errors.cannot_change_pif_properties, [Ref.string_of self])); - - (* Remove the existing property with this name, then add the new value. *) - let properties = List.filter - (fun (property_name, _) -> property_name <> name) - (Db.PIF.get_properties ~__context ~self) - in - let properties = (name, value) :: properties in - Db.PIF.set_properties ~__context ~self ~value:properties; - - (* For a bond, also set the properties on the slaves *) - let bond = Db.PIF.get_bond_master_of ~__context ~self in - List.iter (fun bond -> - List.iter (fun self -> - Db.PIF.set_properties ~__context ~self ~value:properties - ) (Db.Bond.get_slaves ~__context ~self:bond) - ) bond; - - (* Make it happen, also for VLANs that may be on top of the PIF *) - let vlans = Db.PIF.get_VLAN_slave_of ~__context ~self in - let vlan_pifs = List.map (fun self -> Db.VLAN.get_untagged_PIF ~__context ~self) vlans in - List.iter (fun pif -> - if Db.PIF.get_currently_attached ~__context ~self then - Nm.bring_pif_up ~__context pif - ) (self :: vlan_pifs) + let fail () = raise (Api_errors.Server_error + (Api_errors.invalid_value, ["properties"; Printf.sprintf "%s = %s" name value])) + in + if not (List.mem_assoc name property_names_and_values) then + fail () + else if not (List.mem value (List.assoc name property_names_and_values)) then + fail (); + + (* Only bond masters and unbonded physical PIFs can be configured *) + if not (pif_has_properties ~__context ~self) || Db.PIF.get_bond_slave_of ~__context ~self <> Ref.null then + raise (Api_errors.Server_error (Api_errors.cannot_change_pif_properties, [Ref.string_of self])); + + (* Remove the existing property with this name, then add the new value. *) + let properties = List.filter + (fun (property_name, _) -> property_name <> name) + (Db.PIF.get_properties ~__context ~self) + in + let properties = (name, value) :: properties in + Db.PIF.set_properties ~__context ~self ~value:properties; + + (* For a bond, also set the properties on the slaves *) + let bond = Db.PIF.get_bond_master_of ~__context ~self in + List.iter (fun bond -> + List.iter (fun self -> + Db.PIF.set_properties ~__context ~self ~value:properties + ) (Db.Bond.get_slaves ~__context ~self:bond) + ) bond; + + (* Make it happen, also for VLANs that may be on top of the PIF *) + let vlans = Db.PIF.get_VLAN_slave_of ~__context ~self in + let vlan_pifs = List.map (fun self -> Db.VLAN.get_untagged_PIF ~__context ~self) vlans in + List.iter (fun pif -> + if Db.PIF.get_currently_attached ~__context ~self then + Nm.bring_pif_up ~__context pif + ) (self :: vlan_pifs) let rec unplug ~__context ~self = - assert_pif_is_managed ~__context ~self; - assert_no_protection_enabled ~__context ~self; - assert_not_management_pif ~__context ~self; - let host = Db.PIF.get_host ~__context ~self in - if Db.Host.get_enabled ~__context ~self:host - then abort_if_network_attached_to_protected_vms ~__context ~self; - - let network = Db.PIF.get_network ~__context ~self in - Xapi_network_attach_helpers.assert_network_has_no_vifs_in_use_on_me ~__context ~host:(Helpers.get_localhost ~__context) ~network; - Xapi_network_attach_helpers.assert_pif_disallow_unplug_not_set ~__context self; - - let tunnel = Db.PIF.get_tunnel_transport_PIF_of ~__context ~self in - if tunnel <> [] - then begin - debug "PIF is tunnel transport PIF... also bringing down access PIF"; - let tunnel = List.hd tunnel in - let access_PIF = Db.Tunnel.get_access_PIF ~__context ~self:tunnel in - unplug ~__context ~self:access_PIF - end; - Nm.bring_pif_down ~__context self + assert_pif_is_managed ~__context ~self; + assert_no_protection_enabled ~__context ~self; + assert_not_management_pif ~__context ~self; + let host = Db.PIF.get_host ~__context ~self in + if Db.Host.get_enabled ~__context ~self:host + then abort_if_network_attached_to_protected_vms ~__context ~self; + + let network = Db.PIF.get_network ~__context ~self in + Xapi_network_attach_helpers.assert_network_has_no_vifs_in_use_on_me ~__context ~host:(Helpers.get_localhost ~__context) ~network; + Xapi_network_attach_helpers.assert_pif_disallow_unplug_not_set ~__context self; + + let tunnel = Db.PIF.get_tunnel_transport_PIF_of ~__context ~self in + if tunnel <> [] + then begin + debug "PIF is tunnel transport PIF... also bringing down access PIF"; + let tunnel = List.hd tunnel in + let access_PIF = Db.Tunnel.get_access_PIF ~__context ~self:tunnel in + unplug ~__context ~self:access_PIF + end; + Nm.bring_pif_down ~__context self let rec plug ~__context ~self = - assert_pif_is_managed ~__context ~self; - let tunnel = Db.PIF.get_tunnel_access_PIF_of ~__context ~self in - if tunnel <> [] - then begin - let tunnel = List.hd tunnel in - let transport_PIF = - Db.Tunnel.get_transport_PIF ~__context ~self:tunnel in - if Db.PIF.get_ip_configuration_mode - ~__context ~self:transport_PIF = `None - then raise (Api_errors.Server_error - (Api_errors.transport_pif_not_configured, - [Ref.string_of transport_PIF])) - else begin - debug "PIF is tunnel access PIF... also bringing up transport PIF"; - plug ~__context ~self:transport_PIF - end - end; - if Db.PIF.get_bond_slave_of ~__context ~self <> Ref.null then - raise (Api_errors.Server_error (Api_errors.cannot_plug_bond_slave, [Ref.string_of self])); - Nm.bring_pif_up ~__context ~management_interface:false self + assert_pif_is_managed ~__context ~self; + let tunnel = Db.PIF.get_tunnel_access_PIF_of ~__context ~self in + if tunnel <> [] + then begin + let tunnel = List.hd tunnel in + let transport_PIF = + Db.Tunnel.get_transport_PIF ~__context ~self:tunnel in + if Db.PIF.get_ip_configuration_mode + ~__context ~self:transport_PIF = `None + then raise (Api_errors.Server_error + (Api_errors.transport_pif_not_configured, + [Ref.string_of transport_PIF])) + else begin + debug "PIF is tunnel access PIF... also bringing up transport PIF"; + plug ~__context ~self:transport_PIF + end + end; + if Db.PIF.get_bond_slave_of ~__context ~self <> Ref.null then + raise (Api_errors.Server_error (Api_errors.cannot_plug_bond_slave, [Ref.string_of self])); + Nm.bring_pif_up ~__context ~management_interface:false self let calculate_pifs_required_at_start_of_day ~__context = - let localhost = Helpers.get_localhost ~__context in - (* Select all PIFs on the host that are not bond slaves, and are physical, or bond master, or - * have IP configuration. The latter means that any VLAN or tunnel PIFs without IP address - * are excluded. *) - Db.PIF.get_records_where ~__context - ~expr:( - And ( - Eq (Field "managed", Literal "true"), - And ( - And ( - Eq (Field "host", Literal (Ref.string_of localhost)), - Eq (Field "bond_slave_of", Literal (Ref.string_of Ref.null)) - ), - Or (Or ( - Not (Eq (Field "bond_master_of", Literal "()")), - Eq (Field "physical", Literal "true")), - Not (Eq (Field "ip_configuration_mode", Literal "None")) - ) - ) - ) - ) + let localhost = Helpers.get_localhost ~__context in + (* Select all PIFs on the host that are not bond slaves, and are physical, or bond master, or + * have IP configuration. The latter means that any VLAN or tunnel PIFs without IP address + * are excluded. *) + Db.PIF.get_records_where ~__context + ~expr:( + And ( + Eq (Field "managed", Literal "true"), + And ( + And ( + Eq (Field "host", Literal (Ref.string_of localhost)), + Eq (Field "bond_slave_of", Literal (Ref.string_of Ref.null)) + ), + Or (Or ( + Not (Eq (Field "bond_master_of", Literal "()")), + Eq (Field "physical", Literal "true")), + Not (Eq (Field "ip_configuration_mode", Literal "None")) + ) + ) + ) + ) let start_of_day_best_effort_bring_up () = - begin - Server_helpers.exec_with_new_task - "Bringing up managed physical PIFs" - (fun __context -> - let dbg = Context.string_of_task __context in - debug - "Configured network backend: %s" - (Network_interface.string_of_kind (Net.Bridge.get_kind dbg ())); - (* Clear the state of the network daemon, before refreshing it by plugging - * the most important PIFs (see above). *) - Net.clear_state (); - List.iter - (fun (pif, pifr) -> - Helpers.log_exn_continue - (Printf.sprintf - "error trying to bring up pif: %s" - pifr.API.pIF_uuid) - (fun pif -> - debug - "Best effort attempt to bring up PIF: %s" - pifr.API.pIF_uuid; - plug ~__context ~self:pif) - (pif)) - (calculate_pifs_required_at_start_of_day ~__context)) - end + begin + Server_helpers.exec_with_new_task + "Bringing up managed physical PIFs" + (fun __context -> + let dbg = Context.string_of_task __context in + debug + "Configured network backend: %s" + (Network_interface.string_of_kind (Net.Bridge.get_kind dbg ())); + (* Clear the state of the network daemon, before refreshing it by plugging + * the most important PIFs (see above). *) + Net.clear_state (); + List.iter + (fun (pif, pifr) -> + Helpers.log_exn_continue + (Printf.sprintf + "error trying to bring up pif: %s" + pifr.API.pIF_uuid) + (fun pif -> + debug + "Best effort attempt to bring up PIF: %s" + pifr.API.pIF_uuid; + plug ~__context ~self:pif) + (pif)) + (calculate_pifs_required_at_start_of_day ~__context)) + end diff --git a/ocaml/xapi/xapi_pif.mli b/ocaml/xapi/xapi_pif.mli index e0ff1ea46f3..1669898ed2b 100644 --- a/ocaml/xapi/xapi_pif.mli +++ b/ocaml/xapi/xapi_pif.mli @@ -13,32 +13,32 @@ *) (** Module that defines API functions for PIF objects * @group Networking - *) +*) (** -A {i PIF} object in the datamodel represents a network interface and contains relevant information about it. -{ul -{- There are three types of PIFs. A PIF can represent... - {ol - {- A network-interface card. For each physical interface there should be a PIF. Such a PIF has [PIF.physical = true].} - {- A bond-master: a higher-level PIF representing the combination of multiple PIFs. Such a PIF has [PIF.bond_master_of] set to the Bond object.} - {- A VLAN interface: a higher-level PIF (called the {i untagged} PIF, or {i VLAN master} that tags its outgoing traffic before sending it out to the underlying physical interface (the {i tagged} PIF, or {i VLAN slave}).} - }} -{- PIF objects are typically created automatically on first boot. There is also a [PIF.scan] API call to automatically discover any new network interfaces and create the necessary objects in the database.} -{- A PIF is always accompanied by a Network object (see below) that associates the interface with a bridge.} -{- A PIF can be {i plugged} or {i unplugged}, also known as {i attached} or {i unattached} respectively. - {ul - {- Plugging a PIF is also referred to as {i bringing up} the PIF, while unplugging is {i bringing down} a PIF.} - {- After plugging a PIF, any underlying network devices (bridges, bonds, VLANs, physical interfaces) are configured, such that the interface can be used. Unplugging will clean up any underlying network devices {i that are not used anymore}.} - {- No PIFs are harmed during unplugging, nor does unplugging have anything to do with pulling out cables.} - {- A PIF that is plugged has [PIF.currently_attached] set to [true], a PIF that is unplugged has this field set to [false].} - }} -{- A PIF can be specialised to be... - {ul - {- the {i management interface}, which is the interface used by xapi for communication between hosts in a pool and XenAPI clients; this PIF has [PIF.management = true]; the inventory file stores the name of the bridge the the management interface is on (this is where the management interface is ultimately defined);} - {- dedicated to a specific function, especially for storage traffic (in this case, the [disallow-unplug] field on the PIF is set to [true], and an other-config flag is set); this does not seem to be enforced, but only used by XC.} - }} -} + A {i PIF} object in the datamodel represents a network interface and contains relevant information about it. + {ul + {- There are three types of PIFs. A PIF can represent... + {ol + {- A network-interface card. For each physical interface there should be a PIF. Such a PIF has [PIF.physical = true].} + {- A bond-master: a higher-level PIF representing the combination of multiple PIFs. Such a PIF has [PIF.bond_master_of] set to the Bond object.} + {- A VLAN interface: a higher-level PIF (called the {i untagged} PIF, or {i VLAN master} that tags its outgoing traffic before sending it out to the underlying physical interface (the {i tagged} PIF, or {i VLAN slave}).} + }} + {- PIF objects are typically created automatically on first boot. There is also a [PIF.scan] API call to automatically discover any new network interfaces and create the necessary objects in the database.} + {- A PIF is always accompanied by a Network object (see below) that associates the interface with a bridge.} + {- A PIF can be {i plugged} or {i unplugged}, also known as {i attached} or {i unattached} respectively. + {ul + {- Plugging a PIF is also referred to as {i bringing up} the PIF, while unplugging is {i bringing down} a PIF.} + {- After plugging a PIF, any underlying network devices (bridges, bonds, VLANs, physical interfaces) are configured, such that the interface can be used. Unplugging will clean up any underlying network devices {i that are not used anymore}.} + {- No PIFs are harmed during unplugging, nor does unplugging have anything to do with pulling out cables.} + {- A PIF that is plugged has [PIF.currently_attached] set to [true], a PIF that is unplugged has this field set to [false].} + }} + {- A PIF can be specialised to be... + {ul + {- the {i management interface}, which is the interface used by xapi for communication between hosts in a pool and XenAPI clients; this PIF has [PIF.management = true]; the inventory file stores the name of the bridge the the management interface is on (this is where the management interface is ultimately defined);} + {- dedicated to a specific function, especially for storage traffic (in this case, the [disallow-unplug] field on the PIF is set to [true], and an other-config flag is set); this does not seem to be enforced, but only used by XC.} + }} + } *) (** {2 API functions} *) @@ -76,7 +76,7 @@ val db_introduce : managed:bool -> properties:(string * string) list -> [ `PIF ] Ref.t - + (** Perform a database delete of the PIF record on the pool master. *) val db_forget : __context:Context.t -> self:[ `PIF ] Ref.t -> unit @@ -114,14 +114,14 @@ val reconfigure_ip : self:API.ref_PIF -> mode:[`DHCP | `None | `Static] -> iP:string -> netmask:string -> gateway:string -> dNS:string -> unit - + (** Change the IPv6 configuration of a PIF *) val reconfigure_ipv6 : __context:Context.t -> self:API.ref_PIF -> mode:[ `DHCP | `None | `Static | `Autoconf ] -> iPv6:string -> gateway:string -> dNS:string -> unit - + (** Change the primary address type between IPv4 and IPv6 *) val set_primary_address_type : __context:Context.t -> @@ -155,7 +155,7 @@ val plug : __context:Context.t -> self:[ `PIF ] Ref.t -> unit (** Constructs a bridge name from a device (network interface) name by replacing * [eth] by [xenbr], or prepending [br] if the device name does not start with [eth]. - *) +*) val bridge_naming_convention : string -> string (** Return the list of bridges in the CURRENT_INTERFACES field in the inventory file. *) @@ -163,14 +163,14 @@ val read_bridges_from_inventory : unit -> string list (** If a network for the given bridge already exists, then return a reference to this network, * otherwise create a new network and return its reference. - *) +*) val find_or_create_network : string -> string -> __context:Context.t -> [ `network ] Ref.t (** Convenient lookup tables for scanning etc *) type tables = { - device_to_mac_table : (string * string) list; - pif_to_device_table : (API.ref_PIF * string) list; + device_to_mac_table : (string * string) list; + pif_to_device_table : (API.ref_PIF * string) list; } (** Construct and return lookup {!tables} with information about the network interfaces *) @@ -201,7 +201,7 @@ val pool_introduce : vLAN_master_of:[ `VLAN ] Ref.t -> management:bool -> other_config:(string * string) list -> - disallow_unplug:bool -> + disallow_unplug:bool -> ipv6_configuration_mode:[< `DHCP | `None | `Static | `Autoconf ] -> iPv6:string list -> ipv6_gateway:string -> @@ -231,11 +231,11 @@ val introduce_internal : ?disallow_unplug:bool -> unit -> [ `PIF ] Ref.t - + (** Brings down the network interface and removes the PIF object. *) val forget_internal : t:tables -> __context:Context.t -> self:API.ref_PIF -> unit - + (** Look over all this host's PIFs and reset the management flag. * The management interface is ultimately defined by the inventory file, * which holds the bridge of the management interface in the MANAGEMENT_INTERFACE field. *) @@ -246,12 +246,12 @@ val update_management_flags : start of day code. These are the PIFs on the localhost that are not bond slaves. For PIFs that have [disallow_unplug] set to true, and the management interface, will actually be brought up ahead of time by the init scripts, so we don't have to plug them in. - These are written to the xensource-inventory file when HA is enabled so that HA can bring up + These are written to the xensource-inventory file when HA is enabled so that HA can bring up interfaces required by storage NICs etc. (these interface are not filtered out at the moment). - *) +*) val calculate_pifs_required_at_start_of_day : __context:Context.t -> ('b Ref.t * API.pIF_t) list - + (** Attempt to bring up (plug) the required PIFs when the host starts up. * Uses {!calculate_pifs_required_at_start_of_day}. *) val start_of_day_best_effort_bring_up : unit -> unit @@ -268,15 +268,15 @@ val assert_no_vlans : __context:Context.t -> self:[ `PIF ] Ref.t -> unit (** Ensure the PIF is not the management interface. *) val assert_not_management_pif : __context:Context.t -> self:[ `PIF ] Ref.t -> unit - + (** Ensure the PIF is not the management interface if the host is a pool slave. *) val assert_not_slave_management_pif : __context:Context.t -> self:[ `PIF ] Ref.t -> unit - + (** Ensure neither HA nor the general redo-log are enabled. *) val assert_no_protection_enabled : __context:Context.t -> self:[ `PIF ] Ref.t -> unit - + (** Ensure the Network attached to the given PIF has not VIFs on it * belonging to VMs that are protected by HA. *) val abort_if_network_attached_to_protected_vms : diff --git a/ocaml/xapi/xapi_plugins.ml b/ocaml/xapi/xapi_plugins.ml index 53bd204b386..97dcd087746 100644 --- a/ocaml/xapi/xapi_plugins.ml +++ b/ocaml/xapi/xapi_plugins.ml @@ -19,35 +19,35 @@ open D (* Only scripts in the Xapi_globs.xapi_plugins_root can be called *) let find_plugin name = - let all = try Array.to_list (Sys.readdir !Xapi_globs.xapi_plugins_root) with _ -> [] in - (* Sys.readdir output doesn't include "." or ".." *) - if List.mem name all - then Filename.concat !Xapi_globs.xapi_plugins_root name - else raise (Api_errors.Server_error(Api_errors.xenapi_missing_plugin, [ name ])) + let all = try Array.to_list (Sys.readdir !Xapi_globs.xapi_plugins_root) with _ -> [] in + (* Sys.readdir output doesn't include "." or ".." *) + if List.mem name all + then Filename.concat !Xapi_globs.xapi_plugins_root name + else raise (Api_errors.Server_error(Api_errors.xenapi_missing_plugin, [ name ])) (* Execute the plugin with XMLRPC-over-cmdline/stdout convention, like the SM plugins. The args provided are a Map(String, String) and these will be passed as an XMLRPC struct *) let call_plugin session_id plugin_name fn_name args = - let plugin_name = find_plugin plugin_name in + let plugin_name = find_plugin plugin_name in - (* Marshal the args as XMLRPC *) - let args = List.map (fun (k, v) -> k, XMLRPC.To.string v) args in - let call = XMLRPC.To.methodCall fn_name [ XMLRPC.To.string (Ref.string_of session_id); XMLRPC.To.structure args ] in - let output, _ = - try - Forkhelpers.execute_command_get_output plugin_name [ Xml.to_string call ] - with - | Forkhelpers.Spawn_internal_error(log, output, Unix.WSTOPPED i) -> - raise (Api_errors.Server_error (Api_errors.xenapi_plugin_failure, ["task stopped"; output; log ])) - | Forkhelpers.Spawn_internal_error(log, output, Unix.WSIGNALED i) -> - raise (Api_errors.Server_error (Api_errors.xenapi_plugin_failure, [Printf.sprintf "signal: %s" (Stdext.Unixext.string_of_signal i); output; log ])) - | Forkhelpers.Spawn_internal_error(log, output, Unix.WEXITED i) -> - raise (Api_errors.Server_error (Api_errors.xenapi_plugin_failure, ["non-zero exit"; output; log ])) in - try - match XMLRPC.From.methodResponse (Xml.parse_string output) with - | XMLRPC.Fault(code, reason) -> raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ "fault"; Int32.to_string code; reason ])) - | XMLRPC.Success [ result ] -> XMLRPC.From.string result - | XMLRPC.Failure(code, params) -> raise (Api_errors.Server_error(code, params)) - | _ -> raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ "unexpected XMLRPC result"; output ])) - with Xml.Error e -> - raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ "failed to parse plugin output"; output; Xml.error e ])) + (* Marshal the args as XMLRPC *) + let args = List.map (fun (k, v) -> k, XMLRPC.To.string v) args in + let call = XMLRPC.To.methodCall fn_name [ XMLRPC.To.string (Ref.string_of session_id); XMLRPC.To.structure args ] in + let output, _ = + try + Forkhelpers.execute_command_get_output plugin_name [ Xml.to_string call ] + with + | Forkhelpers.Spawn_internal_error(log, output, Unix.WSTOPPED i) -> + raise (Api_errors.Server_error (Api_errors.xenapi_plugin_failure, ["task stopped"; output; log ])) + | Forkhelpers.Spawn_internal_error(log, output, Unix.WSIGNALED i) -> + raise (Api_errors.Server_error (Api_errors.xenapi_plugin_failure, [Printf.sprintf "signal: %s" (Stdext.Unixext.string_of_signal i); output; log ])) + | Forkhelpers.Spawn_internal_error(log, output, Unix.WEXITED i) -> + raise (Api_errors.Server_error (Api_errors.xenapi_plugin_failure, ["non-zero exit"; output; log ])) in + try + match XMLRPC.From.methodResponse (Xml.parse_string output) with + | XMLRPC.Fault(code, reason) -> raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ "fault"; Int32.to_string code; reason ])) + | XMLRPC.Success [ result ] -> XMLRPC.From.string result + | XMLRPC.Failure(code, params) -> raise (Api_errors.Server_error(code, params)) + | _ -> raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ "unexpected XMLRPC result"; output ])) + with Xml.Error e -> + raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ "failed to parse plugin output"; output; Xml.error e ])) diff --git a/ocaml/xapi/xapi_pool.ml b/ocaml/xapi/xapi_pool.ml index bd56425db92..4070e611070 100644 --- a/ocaml/xapi/xapi_pool.ml +++ b/ocaml/xapi/xapi_pool.ml @@ -27,699 +27,699 @@ open D open Workload_balancing (* Surpress exceptions *) -let no_exn f x = - try ignore (f x) +let no_exn f x = + try ignore (f x) with exn -> debug "Ignoring exception: %s" (ExnHelper.string_of_exn exn) let rpc host_address xml = - try - Helpers.make_remote_rpc host_address xml - with Xmlrpc_client.Connection_reset -> - raise (Api_errors.Server_error(Api_errors.pool_joining_host_connection_failed, [])) + try + Helpers.make_remote_rpc host_address xml + with Xmlrpc_client.Connection_reset -> + raise (Api_errors.Server_error(Api_errors.pool_joining_host_connection_failed, [])) let get_master ~rpc ~session_id = - let pool = List.hd (Client.Pool.get_all rpc session_id) in - Client.Pool.get_master rpc session_id pool - + let pool = List.hd (Client.Pool.get_all rpc session_id) in + Client.Pool.get_master rpc session_id pool + (* Pre-join asserts *) let pre_join_checks ~__context ~rpc ~session_id ~force = - (* I cannot join a Pool unless my management interface exists in the db, otherwise - Pool.eject will fail to rewrite network interface files. *) - let assert_management_interface_exists () = - try - let (_: API.ref_PIF) = Xapi_host.get_management_interface ~__context ~host:(Helpers.get_localhost ~__context) in - () - with _ -> - error "Pool.join/Pool.eject requires a properly configured management interface. Wait for xapi/firstboot initialisation to complete and then retry."; - raise (Api_errors.Server_error(Api_errors.host_still_booting, [])) in - - (* I cannot join a Pool if I have HA already enabled on me *) - let ha_is_not_enable_on_me () = - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool then begin - error "Cannot join pool as HA is enabled"; - raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])) - end in - - (* I Cannot join a Pool if it has HA enabled on it *) - let ha_is_not_enable_on_the_distant_pool () = - let pool = List.hd (Client.Pool.get_all rpc session_id) in - if Client.Pool.get_ha_enabled rpc session_id pool then begin - error "Cannot join pool which already has HA enabled"; - raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])); - end in - - (* CA-26975: Pool edition MUST match *) - let assert_restrictions_match () = - let my_edition = Db.Host.get_edition ~__context ~self:(Helpers.get_localhost ~__context) in - let host_records = List.map snd (Client.Host.get_all_records ~rpc ~session_id) in - let pool_editions = List.map (fun host_r -> host_r.API.host_edition) host_records in - (* If all hosts have the same edition string, we need do no more. *) - if List.fold_left (fun b edn -> edn = my_edition && b) true pool_editions then () - else try - (* We have different edition strings so must consult v6d for their significance. - * This will fail with v6d_failure if v6d is not running. *) - let editions = V6client.get_editions "assert_restrictions_match" in - let edition_to_int e = - try - match List.find (fun (name, _, _, _) -> name = e) editions with _, _, _, a -> a - with Not_found -> - (* Happens if pool has edition "free/libre" (no v6d) *) - error "Pool.join failed: pool has a host with edition unknown to v6d: %s" e; - raise (Api_errors.Server_error(Api_errors.license_host_pool_mismatch, - ["Edition \"" ^ e ^ "\" from pool is not known to v6d."])) - in - let min_edition l = - List.fold_left (fun m e -> if edition_to_int e < edition_to_int m then e else m) (List.hd l) l - in - (* get pool edition: the "minimum" edition among all hosts *) - let pool_edition = min_edition pool_editions in - (* compare my edition to pool edition *) - if (edition_to_int pool_edition) <> (edition_to_int my_edition) then begin - error "Pool.join failed due to edition mismatch"; - error "Remote has %s" pool_edition; - error "Local has %s" my_edition; - raise (Api_errors.Server_error(Api_errors.license_host_pool_mismatch, - ["host edition = \""^my_edition^"\""; "pool edition = \""^pool_edition^"\""])) - end - with Api_errors.Server_error (code, []) when code = Api_errors.v6d_failure -> - error "Pool.join failed because edition strings differ and local has no license daemon running."; - let pool_edn_list_str = "[" ^ (String.concat "; " pool_editions) ^ "]" in - error "Remote editions: %s" pool_edn_list_str; - error "Local edition: %s" my_edition; - raise (Api_errors.Server_error (code, ["The pool uses v6d. Pool edition list = " ^ pool_edn_list_str])) - in - - (* CA-73264 Applied patches must match *) - let assert_applied_patches_match () = - let get_patches patches get_pool_patch get_uuid = - let patch_refs = List.map (fun x -> get_pool_patch ~self:x) patches in - let patch_uuids = List.map (fun x -> get_uuid ~self:x) patch_refs in - patch_uuids in - let pool_patches = get_patches - (Client.Host.get_patches ~rpc ~session_id ~self:(get_master ~rpc ~session_id)) - (Client.Host_patch.get_pool_patch ~rpc ~session_id) - (Client.Pool_patch.get_uuid ~rpc ~session_id) in - let host_patches = get_patches - (Db.Host.get_patches ~__context ~self:(Helpers.get_localhost ~__context)) - (Db.Host_patch.get_pool_patch ~__context) (Db.Pool_patch.get_uuid ~__context) in - let string_of_patches ps = (String.concat " " (List.map (fun patch -> patch) ps)) in - let diff = (List.set_difference host_patches pool_patches) @ - (List.set_difference pool_patches host_patches)in - if (List.length diff > 0) then begin - error "Pool.join failed because of patches mismatch"; - error "Remote has %s" (string_of_patches pool_patches); - error "Local has %s" (string_of_patches host_patches); - raise (Api_errors.Server_error(Api_errors.pool_hosts_not_homogeneous, - [(Printf.sprintf "Patches applied differ: Remote has %s -- Local has %s" - (string_of_patches pool_patches) (string_of_patches host_patches))])) - end - in - - (* CP-700: Restrict pool.join if AD configuration of slave-to-be does not match *) - (* that of master of pool-to-join *) - let assert_external_auth_matches () = - let master = get_master rpc session_id in - let slavetobe = Helpers.get_localhost ~__context in - let slavetobe_auth_type = Db.Host.get_external_auth_type ~__context ~self:slavetobe in - let slavetobe_auth_service_name = Db.Host.get_external_auth_service_name ~__context ~self:slavetobe in - let master_auth_type = Client.Host.get_external_auth_type ~rpc ~session_id ~self:master in - let master_auth_service_name = Client.Host.get_external_auth_service_name ~rpc ~session_id ~self:master in - debug "Verifying if external auth configuration of master %s (auth_type=%s service_name=%s) matches that of slave-to-be %s (auth-type=%s service_name=%s)" - (Client.Host.get_name_label ~rpc ~session_id ~self:master) master_auth_type master_auth_service_name - (Db.Host.get_name_label ~__context ~self:slavetobe) slavetobe_auth_type slavetobe_auth_service_name; - if (slavetobe_auth_type <> master_auth_type) - || (slavetobe_auth_service_name <> master_auth_service_name) then begin - error "Cannot join pool whose external authentication configuration is different"; - raise (Api_errors.Server_error(Api_errors.pool_joining_external_auth_mismatch, [])) - end in - - let assert_i_know_of_no_other_hosts () = - let hosts = Db.Host.get_all ~__context in - if List.length hosts > 1 then begin - error "The current host is already the master of other hosts: it cannot join a new pool"; - raise (Api_errors.Server_error(Api_errors.pool_joining_host_cannot_be_master_of_other_hosts, [])) - end in - - let assert_no_running_vms_on_me () = - let my_vms = Db.VM.get_all_records ~__context in - let my_running_vms = - List.filter - (fun (_,vmrec) -> (not (Helpers.is_domain_zero ~__context (Db.VM.get_by_uuid ~__context ~uuid:vmrec.API.vM_uuid))) - && vmrec.API.vM_power_state = `Running) my_vms in - if List.length my_running_vms > 0 then begin - error "The current host has running or suspended VMs: it cannot join a new pool"; - raise (Api_errors.Server_error(Api_errors.pool_joining_host_cannot_have_running_VMs, [])) - end in - - let assert_no_vms_with_current_ops () = - let my_vms = Db.VM.get_all_records ~__context in - let vms_with_current_ops = - List.filter (fun (_,vmr) -> (List.length vmr.API.vM_current_operations)>0 ) my_vms in - if List.length vms_with_current_ops > 0 then begin - error "The current host has VMs with current operations: it cannot join a new pool"; - raise (Api_errors.Server_error(Api_errors.pool_joining_host_cannot_have_vms_with_current_operations, [])) - end in - - let assert_no_shared_srs_on_me () = - let my_srs = Db.SR.get_all_records ~__context in - let my_shared_srs = List.filter (fun (sr,srec)-> srec.API.sR_shared && not srec.API.sR_is_tools_sr) my_srs in - if not (my_shared_srs = []) then begin - error "The current host has shared SRs: it cannot join a new pool"; - raise (Api_errors.Server_error(Api_errors.pool_joining_host_cannot_contain_shared_SRs, [])) - end in - - let assert_only_physical_pifs () = - let non_physical_pifs = Db.PIF.get_refs_where ~__context ~expr:( - Eq (Field "physical", Literal "false") - ) in - if non_physical_pifs <> [] then begin - error "The current host has network bonds, VLANs or tunnels: it cannot join a new pool"; - raise (Api_errors.Server_error(Api_errors.pool_joining_host_must_only_have_physical_pifs, [])) - end in - - (* Used to tell XCP and XenServer apart - use PRODUCT_BRAND if present, else use PLATFORM_NAME. *) - let get_compatibility_name software_version = - if List.mem_assoc Xapi_globs._product_brand software_version then - Some (List.assoc Xapi_globs._product_brand software_version) - else if List.mem_assoc Xapi_globs._platform_name software_version then - Some (List.assoc Xapi_globs._platform_name software_version) - else - None - in - - let assert_hosts_compatible () = - let me = Db.Host.get_record ~__context ~self:(Helpers.get_localhost ~__context) in - let master_ref = get_master rpc session_id in - let master = Client.Host.get_record ~rpc ~session_id ~self:master_ref in - let my_software_version = me.API.host_software_version in - let master_software_version = master.API.host_software_version in - let compatibility_info x = - let open Xapi_globs in - let platform_version = if List.mem_assoc _platform_version x - then Some (List.assoc _platform_version x) - else None in - let compatibility_name = get_compatibility_name x in - (platform_version, compatibility_name) - in - let master_compatibility_info = compatibility_info master_software_version in - let my_compatibility_info = compatibility_info my_software_version in - if master_compatibility_info <> my_compatibility_info then begin - debug "master PLATFORM_VERSION = %s, master compatibility name = %s; my PLATFORM_VERSION = %s, my compatibility name = %s; " - (Opt.default "Unknown" (fst master_compatibility_info)) - (Opt.default "Unknown" (snd master_compatibility_info)) - (Opt.default "Unknown" (fst my_compatibility_info)) - (Opt.default "Unknown" (snd my_compatibility_info)); - raise (Api_errors.Server_error(Api_errors.pool_hosts_not_compatible, [])) - end in - - let assert_hosts_homogeneous () = - let me = Helpers.get_localhost ~__context in - let master_ref = get_master rpc session_id in - let master = Client.Host.get_record ~rpc ~session_id ~self:master_ref in - - (* Check software version *) - - let get_software_version_fields fields = - let open Xapi_globs in - begin try List.assoc _platform_version fields with _ -> "" end, - begin match get_compatibility_name fields with Some x -> x | None -> "" end, - begin try List.assoc _build_number fields with _ -> "" end, - begin try List.assoc _git_id fields with _ -> "" end, - begin try - if List.mem_assoc linux_pack_vsn_key fields then "installed" - else "not present" - with _ -> "not present" end - in - let print_software_version (version,name,number,id,linux_pack) = - debug "version:%s, name:%s, build:%s, id:%s, linux_pack:%s" version name number id linux_pack in - - let master_software_version = master.API.host_software_version in - let my_software_version = Db.Host.get_software_version ~__context ~self:me in - - let my_software_compare = get_software_version_fields my_software_version in - let master_software_compare = get_software_version_fields master_software_version in - - debug "Pool pre-join Software homogeneity check:"; - debug "Slave software:"; - print_software_version my_software_compare; - debug "Master software:"; - print_software_version master_software_compare; - - if my_software_compare <> master_software_compare then - raise (Api_errors.Server_error(Api_errors.pool_hosts_not_homogeneous,["Software version differs"])); - - (* Check CPUs *) - - let my_cpu_vendor = Db.Host.get_cpu_info ~__context ~self:me |> List.assoc "vendor" in - let pool_cpu_vendor = - let pool = List.hd (Client.Pool.get_all rpc session_id) in - Client.Pool.get_cpu_info rpc session_id pool |> List.assoc "vendor" - in - debug "Pool pre-join CPU homogeneity check:"; - debug "Slave CPUs: %s" my_cpu_vendor; - debug "Pool CPUs: %s" pool_cpu_vendor; - - if my_cpu_vendor <> pool_cpu_vendor then - raise (Api_errors.Server_error(Api_errors.pool_hosts_not_homogeneous,["CPUs differ"])) in - - let assert_not_joining_myself () = - let master = get_master rpc session_id in - let master_uuid = Client.Host.get_uuid rpc session_id master in - let my_uuid = Db.Host.get_uuid ~__context ~self:(Helpers.get_localhost ~__context) in - if master_uuid = my_uuid then - let error_msg = - if 1 < List.length (Db.Host.get_all ~__context) - then "Host is already part of a pool" - else "Host cannot become slave of itself" in - raise (Api_errors.Server_error(Api_errors.operation_not_allowed, [error_msg])) in - - let assert_homogeneous_vswitch_configuration () = - (* The network backend must be the same as the remote master's backend *) - let my_pool = Helpers.get_pool __context in - let dbg = Context.string_of_task __context in - let my_backend' = Net.Bridge.get_kind dbg () in - let my_backend = Network_interface.string_of_kind my_backend' in - let pool = List.hd (Client.Pool.get_all rpc session_id) in - let remote_master = Client.Pool.get_master ~rpc ~session_id ~self:pool in - let remote_masters_backend = - let v = Client.Host.get_software_version ~rpc ~session_id ~self:remote_master in - if not (List.mem_assoc "network_backend" v) then - Network_interface.string_of_kind Network_interface.Bridge - else - List.assoc "network_backend" v - in - if my_backend <> remote_masters_backend then - raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["Network backends differ"])); - - match my_backend' with - | Network_interface.Openvswitch -> - let my_controller = Db.Pool.get_vswitch_controller ~__context ~self:my_pool in - let controller = Client.Pool.get_vswitch_controller ~rpc ~session_id ~self:pool in - if my_controller <> controller && my_controller <> "" then - raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["vswitch controller address differs"])) - | _ -> () - in - - let assert_homogeneous_primary_address_type () = - let mgmt_iface = Xapi_host.get_management_interface ~__context ~host:(Helpers.get_localhost ~__context) in - let mgmt_addr_type = Db.PIF.get_primary_address_type ~__context ~self:mgmt_iface in - let master = get_master rpc session_id in - let master_mgmt_iface = Client.Host.get_management_interface rpc session_id master in - let master_addr_type = Client.PIF.get_primary_address_type rpc session_id master_mgmt_iface in - if (mgmt_addr_type <> master_addr_type) then - raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["Primary address type differs"])); - in - - (* call pre-join asserts *) - assert_management_interface_exists (); - ha_is_not_enable_on_me (); - ha_is_not_enable_on_the_distant_pool (); - assert_not_joining_myself(); - assert_i_know_of_no_other_hosts(); - assert_no_running_vms_on_me (); - assert_no_vms_with_current_ops (); - assert_hosts_compatible (); - if (not force) then assert_hosts_homogeneous (); - assert_no_shared_srs_on_me (); - assert_only_physical_pifs (); - assert_external_auth_matches (); - assert_restrictions_match (); - assert_homogeneous_vswitch_configuration (); - assert_applied_patches_match (); - assert_homogeneous_primary_address_type () + (* I cannot join a Pool unless my management interface exists in the db, otherwise + Pool.eject will fail to rewrite network interface files. *) + let assert_management_interface_exists () = + try + let (_: API.ref_PIF) = Xapi_host.get_management_interface ~__context ~host:(Helpers.get_localhost ~__context) in + () + with _ -> + error "Pool.join/Pool.eject requires a properly configured management interface. Wait for xapi/firstboot initialisation to complete and then retry."; + raise (Api_errors.Server_error(Api_errors.host_still_booting, [])) in + + (* I cannot join a Pool if I have HA already enabled on me *) + let ha_is_not_enable_on_me () = + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool then begin + error "Cannot join pool as HA is enabled"; + raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])) + end in + + (* I Cannot join a Pool if it has HA enabled on it *) + let ha_is_not_enable_on_the_distant_pool () = + let pool = List.hd (Client.Pool.get_all rpc session_id) in + if Client.Pool.get_ha_enabled rpc session_id pool then begin + error "Cannot join pool which already has HA enabled"; + raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])); + end in + + (* CA-26975: Pool edition MUST match *) + let assert_restrictions_match () = + let my_edition = Db.Host.get_edition ~__context ~self:(Helpers.get_localhost ~__context) in + let host_records = List.map snd (Client.Host.get_all_records ~rpc ~session_id) in + let pool_editions = List.map (fun host_r -> host_r.API.host_edition) host_records in + (* If all hosts have the same edition string, we need do no more. *) + if List.fold_left (fun b edn -> edn = my_edition && b) true pool_editions then () + else try + (* We have different edition strings so must consult v6d for their significance. + * This will fail with v6d_failure if v6d is not running. *) + let editions = V6client.get_editions "assert_restrictions_match" in + let edition_to_int e = + try + match List.find (fun (name, _, _, _) -> name = e) editions with _, _, _, a -> a + with Not_found -> + (* Happens if pool has edition "free/libre" (no v6d) *) + error "Pool.join failed: pool has a host with edition unknown to v6d: %s" e; + raise (Api_errors.Server_error(Api_errors.license_host_pool_mismatch, + ["Edition \"" ^ e ^ "\" from pool is not known to v6d."])) + in + let min_edition l = + List.fold_left (fun m e -> if edition_to_int e < edition_to_int m then e else m) (List.hd l) l + in + (* get pool edition: the "minimum" edition among all hosts *) + let pool_edition = min_edition pool_editions in + (* compare my edition to pool edition *) + if (edition_to_int pool_edition) <> (edition_to_int my_edition) then begin + error "Pool.join failed due to edition mismatch"; + error "Remote has %s" pool_edition; + error "Local has %s" my_edition; + raise (Api_errors.Server_error(Api_errors.license_host_pool_mismatch, + ["host edition = \""^my_edition^"\""; "pool edition = \""^pool_edition^"\""])) + end + with Api_errors.Server_error (code, []) when code = Api_errors.v6d_failure -> + error "Pool.join failed because edition strings differ and local has no license daemon running."; + let pool_edn_list_str = "[" ^ (String.concat "; " pool_editions) ^ "]" in + error "Remote editions: %s" pool_edn_list_str; + error "Local edition: %s" my_edition; + raise (Api_errors.Server_error (code, ["The pool uses v6d. Pool edition list = " ^ pool_edn_list_str])) + in + + (* CA-73264 Applied patches must match *) + let assert_applied_patches_match () = + let get_patches patches get_pool_patch get_uuid = + let patch_refs = List.map (fun x -> get_pool_patch ~self:x) patches in + let patch_uuids = List.map (fun x -> get_uuid ~self:x) patch_refs in + patch_uuids in + let pool_patches = get_patches + (Client.Host.get_patches ~rpc ~session_id ~self:(get_master ~rpc ~session_id)) + (Client.Host_patch.get_pool_patch ~rpc ~session_id) + (Client.Pool_patch.get_uuid ~rpc ~session_id) in + let host_patches = get_patches + (Db.Host.get_patches ~__context ~self:(Helpers.get_localhost ~__context)) + (Db.Host_patch.get_pool_patch ~__context) (Db.Pool_patch.get_uuid ~__context) in + let string_of_patches ps = (String.concat " " (List.map (fun patch -> patch) ps)) in + let diff = (List.set_difference host_patches pool_patches) @ + (List.set_difference pool_patches host_patches)in + if (List.length diff > 0) then begin + error "Pool.join failed because of patches mismatch"; + error "Remote has %s" (string_of_patches pool_patches); + error "Local has %s" (string_of_patches host_patches); + raise (Api_errors.Server_error(Api_errors.pool_hosts_not_homogeneous, + [(Printf.sprintf "Patches applied differ: Remote has %s -- Local has %s" + (string_of_patches pool_patches) (string_of_patches host_patches))])) + end + in + + (* CP-700: Restrict pool.join if AD configuration of slave-to-be does not match *) + (* that of master of pool-to-join *) + let assert_external_auth_matches () = + let master = get_master rpc session_id in + let slavetobe = Helpers.get_localhost ~__context in + let slavetobe_auth_type = Db.Host.get_external_auth_type ~__context ~self:slavetobe in + let slavetobe_auth_service_name = Db.Host.get_external_auth_service_name ~__context ~self:slavetobe in + let master_auth_type = Client.Host.get_external_auth_type ~rpc ~session_id ~self:master in + let master_auth_service_name = Client.Host.get_external_auth_service_name ~rpc ~session_id ~self:master in + debug "Verifying if external auth configuration of master %s (auth_type=%s service_name=%s) matches that of slave-to-be %s (auth-type=%s service_name=%s)" + (Client.Host.get_name_label ~rpc ~session_id ~self:master) master_auth_type master_auth_service_name + (Db.Host.get_name_label ~__context ~self:slavetobe) slavetobe_auth_type slavetobe_auth_service_name; + if (slavetobe_auth_type <> master_auth_type) + || (slavetobe_auth_service_name <> master_auth_service_name) then begin + error "Cannot join pool whose external authentication configuration is different"; + raise (Api_errors.Server_error(Api_errors.pool_joining_external_auth_mismatch, [])) + end in + + let assert_i_know_of_no_other_hosts () = + let hosts = Db.Host.get_all ~__context in + if List.length hosts > 1 then begin + error "The current host is already the master of other hosts: it cannot join a new pool"; + raise (Api_errors.Server_error(Api_errors.pool_joining_host_cannot_be_master_of_other_hosts, [])) + end in + + let assert_no_running_vms_on_me () = + let my_vms = Db.VM.get_all_records ~__context in + let my_running_vms = + List.filter + (fun (_,vmrec) -> (not (Helpers.is_domain_zero ~__context (Db.VM.get_by_uuid ~__context ~uuid:vmrec.API.vM_uuid))) + && vmrec.API.vM_power_state = `Running) my_vms in + if List.length my_running_vms > 0 then begin + error "The current host has running or suspended VMs: it cannot join a new pool"; + raise (Api_errors.Server_error(Api_errors.pool_joining_host_cannot_have_running_VMs, [])) + end in + + let assert_no_vms_with_current_ops () = + let my_vms = Db.VM.get_all_records ~__context in + let vms_with_current_ops = + List.filter (fun (_,vmr) -> (List.length vmr.API.vM_current_operations)>0 ) my_vms in + if List.length vms_with_current_ops > 0 then begin + error "The current host has VMs with current operations: it cannot join a new pool"; + raise (Api_errors.Server_error(Api_errors.pool_joining_host_cannot_have_vms_with_current_operations, [])) + end in + + let assert_no_shared_srs_on_me () = + let my_srs = Db.SR.get_all_records ~__context in + let my_shared_srs = List.filter (fun (sr,srec)-> srec.API.sR_shared && not srec.API.sR_is_tools_sr) my_srs in + if not (my_shared_srs = []) then begin + error "The current host has shared SRs: it cannot join a new pool"; + raise (Api_errors.Server_error(Api_errors.pool_joining_host_cannot_contain_shared_SRs, [])) + end in + + let assert_only_physical_pifs () = + let non_physical_pifs = Db.PIF.get_refs_where ~__context ~expr:( + Eq (Field "physical", Literal "false") + ) in + if non_physical_pifs <> [] then begin + error "The current host has network bonds, VLANs or tunnels: it cannot join a new pool"; + raise (Api_errors.Server_error(Api_errors.pool_joining_host_must_only_have_physical_pifs, [])) + end in + + (* Used to tell XCP and XenServer apart - use PRODUCT_BRAND if present, else use PLATFORM_NAME. *) + let get_compatibility_name software_version = + if List.mem_assoc Xapi_globs._product_brand software_version then + Some (List.assoc Xapi_globs._product_brand software_version) + else if List.mem_assoc Xapi_globs._platform_name software_version then + Some (List.assoc Xapi_globs._platform_name software_version) + else + None + in + + let assert_hosts_compatible () = + let me = Db.Host.get_record ~__context ~self:(Helpers.get_localhost ~__context) in + let master_ref = get_master rpc session_id in + let master = Client.Host.get_record ~rpc ~session_id ~self:master_ref in + let my_software_version = me.API.host_software_version in + let master_software_version = master.API.host_software_version in + let compatibility_info x = + let open Xapi_globs in + let platform_version = if List.mem_assoc _platform_version x + then Some (List.assoc _platform_version x) + else None in + let compatibility_name = get_compatibility_name x in + (platform_version, compatibility_name) + in + let master_compatibility_info = compatibility_info master_software_version in + let my_compatibility_info = compatibility_info my_software_version in + if master_compatibility_info <> my_compatibility_info then begin + debug "master PLATFORM_VERSION = %s, master compatibility name = %s; my PLATFORM_VERSION = %s, my compatibility name = %s; " + (Opt.default "Unknown" (fst master_compatibility_info)) + (Opt.default "Unknown" (snd master_compatibility_info)) + (Opt.default "Unknown" (fst my_compatibility_info)) + (Opt.default "Unknown" (snd my_compatibility_info)); + raise (Api_errors.Server_error(Api_errors.pool_hosts_not_compatible, [])) + end in + + let assert_hosts_homogeneous () = + let me = Helpers.get_localhost ~__context in + let master_ref = get_master rpc session_id in + let master = Client.Host.get_record ~rpc ~session_id ~self:master_ref in + + (* Check software version *) + + let get_software_version_fields fields = + let open Xapi_globs in + begin try List.assoc _platform_version fields with _ -> "" end, + begin match get_compatibility_name fields with Some x -> x | None -> "" end, + begin try List.assoc _build_number fields with _ -> "" end, + begin try List.assoc _git_id fields with _ -> "" end, + begin try + if List.mem_assoc linux_pack_vsn_key fields then "installed" + else "not present" + with _ -> "not present" end + in + let print_software_version (version,name,number,id,linux_pack) = + debug "version:%s, name:%s, build:%s, id:%s, linux_pack:%s" version name number id linux_pack in + + let master_software_version = master.API.host_software_version in + let my_software_version = Db.Host.get_software_version ~__context ~self:me in + + let my_software_compare = get_software_version_fields my_software_version in + let master_software_compare = get_software_version_fields master_software_version in + + debug "Pool pre-join Software homogeneity check:"; + debug "Slave software:"; + print_software_version my_software_compare; + debug "Master software:"; + print_software_version master_software_compare; + + if my_software_compare <> master_software_compare then + raise (Api_errors.Server_error(Api_errors.pool_hosts_not_homogeneous,["Software version differs"])); + + (* Check CPUs *) + + let my_cpu_vendor = Db.Host.get_cpu_info ~__context ~self:me |> List.assoc "vendor" in + let pool_cpu_vendor = + let pool = List.hd (Client.Pool.get_all rpc session_id) in + Client.Pool.get_cpu_info rpc session_id pool |> List.assoc "vendor" + in + debug "Pool pre-join CPU homogeneity check:"; + debug "Slave CPUs: %s" my_cpu_vendor; + debug "Pool CPUs: %s" pool_cpu_vendor; + + if my_cpu_vendor <> pool_cpu_vendor then + raise (Api_errors.Server_error(Api_errors.pool_hosts_not_homogeneous,["CPUs differ"])) in + + let assert_not_joining_myself () = + let master = get_master rpc session_id in + let master_uuid = Client.Host.get_uuid rpc session_id master in + let my_uuid = Db.Host.get_uuid ~__context ~self:(Helpers.get_localhost ~__context) in + if master_uuid = my_uuid then + let error_msg = + if 1 < List.length (Db.Host.get_all ~__context) + then "Host is already part of a pool" + else "Host cannot become slave of itself" in + raise (Api_errors.Server_error(Api_errors.operation_not_allowed, [error_msg])) in + + let assert_homogeneous_vswitch_configuration () = + (* The network backend must be the same as the remote master's backend *) + let my_pool = Helpers.get_pool __context in + let dbg = Context.string_of_task __context in + let my_backend' = Net.Bridge.get_kind dbg () in + let my_backend = Network_interface.string_of_kind my_backend' in + let pool = List.hd (Client.Pool.get_all rpc session_id) in + let remote_master = Client.Pool.get_master ~rpc ~session_id ~self:pool in + let remote_masters_backend = + let v = Client.Host.get_software_version ~rpc ~session_id ~self:remote_master in + if not (List.mem_assoc "network_backend" v) then + Network_interface.string_of_kind Network_interface.Bridge + else + List.assoc "network_backend" v + in + if my_backend <> remote_masters_backend then + raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["Network backends differ"])); + + match my_backend' with + | Network_interface.Openvswitch -> + let my_controller = Db.Pool.get_vswitch_controller ~__context ~self:my_pool in + let controller = Client.Pool.get_vswitch_controller ~rpc ~session_id ~self:pool in + if my_controller <> controller && my_controller <> "" then + raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["vswitch controller address differs"])) + | _ -> () + in + + let assert_homogeneous_primary_address_type () = + let mgmt_iface = Xapi_host.get_management_interface ~__context ~host:(Helpers.get_localhost ~__context) in + let mgmt_addr_type = Db.PIF.get_primary_address_type ~__context ~self:mgmt_iface in + let master = get_master rpc session_id in + let master_mgmt_iface = Client.Host.get_management_interface rpc session_id master in + let master_addr_type = Client.PIF.get_primary_address_type rpc session_id master_mgmt_iface in + if (mgmt_addr_type <> master_addr_type) then + raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["Primary address type differs"])); + in + + (* call pre-join asserts *) + assert_management_interface_exists (); + ha_is_not_enable_on_me (); + ha_is_not_enable_on_the_distant_pool (); + assert_not_joining_myself(); + assert_i_know_of_no_other_hosts(); + assert_no_running_vms_on_me (); + assert_no_vms_with_current_ops (); + assert_hosts_compatible (); + if (not force) then assert_hosts_homogeneous (); + assert_no_shared_srs_on_me (); + assert_only_physical_pifs (); + assert_external_auth_matches (); + assert_restrictions_match (); + assert_homogeneous_vswitch_configuration (); + assert_applied_patches_match (); + assert_homogeneous_primary_address_type () let rec create_or_get_host_on_master __context rpc session_id (host_ref, host) : API.ref_host = - let my_uuid = host.API.host_uuid in - - let new_host_ref = - try Client.Host.get_by_uuid rpc session_id my_uuid - with _ -> - debug "Found no host with uuid = '%s' on the master, so creating one." my_uuid; - - (* CA-51925: Copy the local cache SR *) - let my_local_cache_sr = Db.Host.get_local_cache_sr ~__context ~self:host_ref in - let local_cache_sr = if my_local_cache_sr = Ref.null then Ref.null else - begin - let my_local_cache_sr_rec = Db.SR.get_record ~__context ~self:my_local_cache_sr in - debug "Copying the local cache SR (uuid=%s)" my_local_cache_sr_rec.API.sR_uuid; - create_or_get_sr_on_master __context rpc session_id (my_local_cache_sr, my_local_cache_sr_rec) - end in - - (* Look up the value on the master of the pool we are about to join *) - let master_ssl = Client.Host.get_ssl_legacy ~rpc ~session_id ~self:(get_master rpc session_id) in - (* Set value in inventory (to control initial behaviour on next xapi start) - * but not in the database of the current pool (the one we're about to leave) *) - Xapi_inventory.update Xapi_inventory._stunnel_legacy (string_of_bool master_ssl); - - debug "Creating host object on master"; - let ref = Client.Host.create ~rpc ~session_id - ~uuid:my_uuid - ~name_label:host.API.host_name_label - ~name_description:host.API.host_name_description - ~hostname:host.API.host_hostname - ~address:host.API.host_address - ~external_auth_type:host.API.host_external_auth_type - ~external_auth_service_name:host.API.host_external_auth_service_name - ~external_auth_configuration:host.API.host_external_auth_configuration - ~license_params:host.API.host_license_params - ~edition:host.API.host_edition - ~license_server:host.API.host_license_server - (* CA-51925: local_cache_sr can only be written by Host.enable_local_caching_sr but this API - * call is forwarded to the host in question. Since, during a pool-join, the host is offline, - * we need an alternative way of preserving the value of the local_cache_sr field, so it's - * been added to the constructor. *) - ~local_cache_sr - ~chipset_info:host.API.host_chipset_info - ~ssl_legacy:master_ssl - in - - (* Copy other-config into newly created host record: *) - no_exn (fun () -> Client.Host.set_other_config ~rpc ~session_id ~self:ref ~value:host.API.host_other_config) (); - - (* Copy the crashdump SR *) - let my_crashdump_sr = Db.Host.get_crash_dump_sr ~__context ~self:host_ref in - if my_crashdump_sr <> Ref.null then begin - let my_crashdump_sr_rec = Db.SR.get_record ~__context ~self:my_crashdump_sr in - debug "Copying the crashdump SR (uuid=%s)" my_crashdump_sr_rec.API.sR_uuid; - let crashdump_sr = create_or_get_sr_on_master __context rpc session_id (my_crashdump_sr, my_crashdump_sr_rec) in - no_exn (fun () -> Client.Host.set_crash_dump_sr ~rpc ~session_id ~self:ref ~value:crashdump_sr) () - end; - - (* Copy the suspend image SR *) - let my_suspend_image_sr = Db.Host.get_crash_dump_sr ~__context ~self:host_ref in - if my_suspend_image_sr <> Ref.null then begin - let my_suspend_image_sr_rec = Db.SR.get_record ~__context ~self:my_suspend_image_sr in - debug "Copying the suspend-image SR (uuid=%s)" my_suspend_image_sr_rec.API.sR_uuid; - let suspend_image_sr = create_or_get_sr_on_master __context rpc session_id (my_suspend_image_sr, my_suspend_image_sr_rec) in - no_exn (fun () -> Client.Host.set_suspend_image_sr ~rpc ~session_id ~self:ref ~value:suspend_image_sr) () - end; - - ref in - - new_host_ref + let my_uuid = host.API.host_uuid in + + let new_host_ref = + try Client.Host.get_by_uuid rpc session_id my_uuid + with _ -> + debug "Found no host with uuid = '%s' on the master, so creating one." my_uuid; + + (* CA-51925: Copy the local cache SR *) + let my_local_cache_sr = Db.Host.get_local_cache_sr ~__context ~self:host_ref in + let local_cache_sr = if my_local_cache_sr = Ref.null then Ref.null else + begin + let my_local_cache_sr_rec = Db.SR.get_record ~__context ~self:my_local_cache_sr in + debug "Copying the local cache SR (uuid=%s)" my_local_cache_sr_rec.API.sR_uuid; + create_or_get_sr_on_master __context rpc session_id (my_local_cache_sr, my_local_cache_sr_rec) + end in + + (* Look up the value on the master of the pool we are about to join *) + let master_ssl = Client.Host.get_ssl_legacy ~rpc ~session_id ~self:(get_master rpc session_id) in + (* Set value in inventory (to control initial behaviour on next xapi start) + * but not in the database of the current pool (the one we're about to leave) *) + Xapi_inventory.update Xapi_inventory._stunnel_legacy (string_of_bool master_ssl); + + debug "Creating host object on master"; + let ref = Client.Host.create ~rpc ~session_id + ~uuid:my_uuid + ~name_label:host.API.host_name_label + ~name_description:host.API.host_name_description + ~hostname:host.API.host_hostname + ~address:host.API.host_address + ~external_auth_type:host.API.host_external_auth_type + ~external_auth_service_name:host.API.host_external_auth_service_name + ~external_auth_configuration:host.API.host_external_auth_configuration + ~license_params:host.API.host_license_params + ~edition:host.API.host_edition + ~license_server:host.API.host_license_server + (* CA-51925: local_cache_sr can only be written by Host.enable_local_caching_sr but this API + * call is forwarded to the host in question. Since, during a pool-join, the host is offline, + * we need an alternative way of preserving the value of the local_cache_sr field, so it's + * been added to the constructor. *) + ~local_cache_sr + ~chipset_info:host.API.host_chipset_info + ~ssl_legacy:master_ssl + in + + (* Copy other-config into newly created host record: *) + no_exn (fun () -> Client.Host.set_other_config ~rpc ~session_id ~self:ref ~value:host.API.host_other_config) (); + + (* Copy the crashdump SR *) + let my_crashdump_sr = Db.Host.get_crash_dump_sr ~__context ~self:host_ref in + if my_crashdump_sr <> Ref.null then begin + let my_crashdump_sr_rec = Db.SR.get_record ~__context ~self:my_crashdump_sr in + debug "Copying the crashdump SR (uuid=%s)" my_crashdump_sr_rec.API.sR_uuid; + let crashdump_sr = create_or_get_sr_on_master __context rpc session_id (my_crashdump_sr, my_crashdump_sr_rec) in + no_exn (fun () -> Client.Host.set_crash_dump_sr ~rpc ~session_id ~self:ref ~value:crashdump_sr) () + end; + + (* Copy the suspend image SR *) + let my_suspend_image_sr = Db.Host.get_crash_dump_sr ~__context ~self:host_ref in + if my_suspend_image_sr <> Ref.null then begin + let my_suspend_image_sr_rec = Db.SR.get_record ~__context ~self:my_suspend_image_sr in + debug "Copying the suspend-image SR (uuid=%s)" my_suspend_image_sr_rec.API.sR_uuid; + let suspend_image_sr = create_or_get_sr_on_master __context rpc session_id (my_suspend_image_sr, my_suspend_image_sr_rec) in + no_exn (fun () -> Client.Host.set_suspend_image_sr ~rpc ~session_id ~self:ref ~value:suspend_image_sr) () + end; + + ref in + + new_host_ref and create_or_get_sr_on_master __context rpc session_id (sr_ref, sr) : API.ref_SR = - let my_uuid = sr.API.sR_uuid in - - let new_sr_ref = - try Client.SR.get_by_uuid ~rpc ~session_id ~uuid:my_uuid - with _ -> - let my_pbd_ref = List.hd (Db.SR.get_PBDs ~__context ~self:sr_ref) in - let my_pbd = Db.PBD.get_record ~__context ~self:my_pbd_ref in - let pbds_on_master = Client.PBD.get_all_records ~rpc ~session_id in - - (* The only possible shared SRs are ISO, as other SRs cannot be shared properly accross pools. *) - (* In this case, if we find a SR with a PBD having the same device_config field, we pick this SR instead of building a new one *) - let iso_already_exists_on_master () = List.exists (fun (_,x) -> Listext.List.set_equiv x.API.pBD_device_config my_pbd.API.pBD_device_config) pbds_on_master in - if sr.API.sR_shared && sr.API.sR_content_type = "iso" && iso_already_exists_on_master () then begin - let similar_pbd_ref, similar_pbd = List.find (fun (_,x) -> Listext.List.set_equiv x.API.pBD_device_config my_pbd.API.pBD_device_config) pbds_on_master in - similar_pbd.API.pBD_SR - - end else begin - debug "Found no SR with uuid = '%s' on the master, so creating one." my_uuid; - let ref = Client.SR.introduce ~rpc ~session_id - ~uuid:my_uuid - ~name_label:sr.API.sR_name_label - ~name_description:sr.API.sR_name_description - ~_type:sr.API.sR_type - ~content_type:sr.API.sR_content_type - ~shared:false - ~sm_config:sr.API.sR_sm_config in - (* copy other-config into newly created sr record: *) - no_exn (fun () -> Client.SR.set_other_config ~rpc ~session_id ~self:ref ~value:sr.API.sR_other_config) (); - ref - end in - - new_sr_ref + let my_uuid = sr.API.sR_uuid in + + let new_sr_ref = + try Client.SR.get_by_uuid ~rpc ~session_id ~uuid:my_uuid + with _ -> + let my_pbd_ref = List.hd (Db.SR.get_PBDs ~__context ~self:sr_ref) in + let my_pbd = Db.PBD.get_record ~__context ~self:my_pbd_ref in + let pbds_on_master = Client.PBD.get_all_records ~rpc ~session_id in + + (* The only possible shared SRs are ISO, as other SRs cannot be shared properly accross pools. *) + (* In this case, if we find a SR with a PBD having the same device_config field, we pick this SR instead of building a new one *) + let iso_already_exists_on_master () = List.exists (fun (_,x) -> Listext.List.set_equiv x.API.pBD_device_config my_pbd.API.pBD_device_config) pbds_on_master in + if sr.API.sR_shared && sr.API.sR_content_type = "iso" && iso_already_exists_on_master () then begin + let similar_pbd_ref, similar_pbd = List.find (fun (_,x) -> Listext.List.set_equiv x.API.pBD_device_config my_pbd.API.pBD_device_config) pbds_on_master in + similar_pbd.API.pBD_SR + + end else begin + debug "Found no SR with uuid = '%s' on the master, so creating one." my_uuid; + let ref = Client.SR.introduce ~rpc ~session_id + ~uuid:my_uuid + ~name_label:sr.API.sR_name_label + ~name_description:sr.API.sR_name_description + ~_type:sr.API.sR_type + ~content_type:sr.API.sR_content_type + ~shared:false + ~sm_config:sr.API.sR_sm_config in + (* copy other-config into newly created sr record: *) + no_exn (fun () -> Client.SR.set_other_config ~rpc ~session_id ~self:ref ~value:sr.API.sR_other_config) (); + ref + end in + + new_sr_ref let create_or_get_pbd_on_master __context rpc session_id (pbd_ref, pbd) : API.ref_PBD = - let my_uuid = pbd.API.pBD_uuid in + let my_uuid = pbd.API.pBD_uuid in - let new_pbd_ref = - try Client.PBD.get_by_uuid ~rpc ~session_id ~uuid:my_uuid - with _ -> - let my_host_ref = pbd.API.pBD_host in - let my_host = Db.Host.get_record ~__context ~self:my_host_ref in - let new_host_ref = create_or_get_host_on_master __context rpc session_id (my_host_ref, my_host) in + let new_pbd_ref = + try Client.PBD.get_by_uuid ~rpc ~session_id ~uuid:my_uuid + with _ -> + let my_host_ref = pbd.API.pBD_host in + let my_host = Db.Host.get_record ~__context ~self:my_host_ref in + let new_host_ref = create_or_get_host_on_master __context rpc session_id (my_host_ref, my_host) in - let my_sr_ref = pbd.API.pBD_SR in - let my_sr = Db.SR.get_record ~__context ~self:my_sr_ref in - let new_sr_ref = create_or_get_sr_on_master __context rpc session_id (my_sr_ref, my_sr) in + let my_sr_ref = pbd.API.pBD_SR in + let my_sr = Db.SR.get_record ~__context ~self:my_sr_ref in + let new_sr_ref = create_or_get_sr_on_master __context rpc session_id (my_sr_ref, my_sr) in - debug "Found no PBD with uuid = '%s' on the master, so creating one." my_uuid; - Client.PBD.create ~rpc ~session_id - ~host:new_host_ref - ~sR:new_sr_ref - ~other_config:pbd.API.pBD_other_config - ~device_config:pbd.API.pBD_device_config in + debug "Found no PBD with uuid = '%s' on the master, so creating one." my_uuid; + Client.PBD.create ~rpc ~session_id + ~host:new_host_ref + ~sR:new_sr_ref + ~other_config:pbd.API.pBD_other_config + ~device_config:pbd.API.pBD_device_config in - new_pbd_ref + new_pbd_ref let create_or_get_vdi_on_master __context rpc session_id (vdi_ref, vdi) : API.ref_VDI = - let my_uuid = vdi.API.vDI_uuid in - let my_sr_ref = vdi.API.vDI_SR in - let my_sr = Db.SR.get_record ~__context ~self:my_sr_ref in - - let new_sr_ref = create_or_get_sr_on_master __context rpc session_id (my_sr_ref, my_sr) in - - let new_vdi_ref = - try Client.VDI.get_by_uuid ~rpc ~session_id ~uuid:my_uuid - with _ -> - debug "Found no VDI with uuid = '%s' on the master, so creating one." my_uuid; - Client.VDI.pool_introduce ~rpc ~session_id - ~uuid:my_uuid - ~name_label:vdi.API.vDI_name_label - ~name_description:vdi.API.vDI_name_description - ~sR:new_sr_ref - ~_type:vdi.API.vDI_type - ~sharable:vdi.API.vDI_sharable - ~read_only:vdi.API.vDI_read_only - ~other_config:vdi.API.vDI_other_config - ~location:(Db.VDI.get_location ~__context ~self:vdi_ref) - ~xenstore_data:vdi.API.vDI_xenstore_data - ~sm_config:vdi.API.vDI_sm_config - ~managed:vdi.API.vDI_managed - ~virtual_size:vdi.API.vDI_virtual_size - ~physical_utilisation:vdi.API.vDI_physical_utilisation - ~metadata_of_pool:vdi.API.vDI_metadata_of_pool - ~is_a_snapshot:vdi.API.vDI_is_a_snapshot - ~snapshot_time:vdi.API.vDI_snapshot_time - ~snapshot_of:vdi.API.vDI_snapshot_of in - new_vdi_ref + let my_uuid = vdi.API.vDI_uuid in + let my_sr_ref = vdi.API.vDI_SR in + let my_sr = Db.SR.get_record ~__context ~self:my_sr_ref in + + let new_sr_ref = create_or_get_sr_on_master __context rpc session_id (my_sr_ref, my_sr) in + + let new_vdi_ref = + try Client.VDI.get_by_uuid ~rpc ~session_id ~uuid:my_uuid + with _ -> + debug "Found no VDI with uuid = '%s' on the master, so creating one." my_uuid; + Client.VDI.pool_introduce ~rpc ~session_id + ~uuid:my_uuid + ~name_label:vdi.API.vDI_name_label + ~name_description:vdi.API.vDI_name_description + ~sR:new_sr_ref + ~_type:vdi.API.vDI_type + ~sharable:vdi.API.vDI_sharable + ~read_only:vdi.API.vDI_read_only + ~other_config:vdi.API.vDI_other_config + ~location:(Db.VDI.get_location ~__context ~self:vdi_ref) + ~xenstore_data:vdi.API.vDI_xenstore_data + ~sm_config:vdi.API.vDI_sm_config + ~managed:vdi.API.vDI_managed + ~virtual_size:vdi.API.vDI_virtual_size + ~physical_utilisation:vdi.API.vDI_physical_utilisation + ~metadata_of_pool:vdi.API.vDI_metadata_of_pool + ~is_a_snapshot:vdi.API.vDI_is_a_snapshot + ~snapshot_time:vdi.API.vDI_snapshot_time + ~snapshot_of:vdi.API.vDI_snapshot_of in + new_vdi_ref let create_or_get_network_on_master __context rpc session_id (network_ref, network) : API.ref_network = - let my_bridge = network.API.network_bridge in - let is_physical = match network.API.network_PIFs with - | [] -> false - | hd :: _ -> Db.PIF.get_physical ~__context ~self:hd - in - let is_himn = - (List.mem_assoc Xapi_globs.is_host_internal_management_network network.API.network_other_config) && - (List.assoc Xapi_globs.is_host_internal_management_network network.API.network_other_config = "true") - in - - let new_network_ref = - if is_physical || is_himn then - (* Physical network or Host Internal Management Network: - * try to join an existing network with the same bridge name, or create one. - * This relies on the convention that physical PIFs with the same device name need to be connected. - * Furthermore, there should be only one Host Internal Management Network in a pool. *) - try - let pool_networks = Client.Network.get_all_records ~rpc ~session_id in - let net_ref, _ = List.find (fun (_, net) -> net.API.network_bridge = my_bridge) pool_networks in - net_ref - with _ -> - debug "Found no network with bridge = '%s' on the master, so creating one." my_bridge; - Client.Network.pool_introduce ~rpc ~session_id - ~name_label:network.API.network_name_label - ~name_description:network.API.network_name_description - ~mTU:network.API.network_MTU - ~other_config:network.API.network_other_config - ~bridge:network.API.network_bridge - else begin - debug "Recreating network '%s' as internal network." network.API.network_name_label; - (* This call will generate a new 'xapi#' bridge name rather than keeping the - * current, possibly colliding one. *) - Client.Network.create ~rpc ~session_id - ~name_label:network.API.network_name_label - ~name_description:network.API.network_name_description - ~mTU:network.API.network_MTU - ~other_config:network.API.network_other_config - ~tags:network.API.network_tags - end - in - - new_network_ref + let my_bridge = network.API.network_bridge in + let is_physical = match network.API.network_PIFs with + | [] -> false + | hd :: _ -> Db.PIF.get_physical ~__context ~self:hd + in + let is_himn = + (List.mem_assoc Xapi_globs.is_host_internal_management_network network.API.network_other_config) && + (List.assoc Xapi_globs.is_host_internal_management_network network.API.network_other_config = "true") + in + + let new_network_ref = + if is_physical || is_himn then + (* Physical network or Host Internal Management Network: + * try to join an existing network with the same bridge name, or create one. + * This relies on the convention that physical PIFs with the same device name need to be connected. + * Furthermore, there should be only one Host Internal Management Network in a pool. *) + try + let pool_networks = Client.Network.get_all_records ~rpc ~session_id in + let net_ref, _ = List.find (fun (_, net) -> net.API.network_bridge = my_bridge) pool_networks in + net_ref + with _ -> + debug "Found no network with bridge = '%s' on the master, so creating one." my_bridge; + Client.Network.pool_introduce ~rpc ~session_id + ~name_label:network.API.network_name_label + ~name_description:network.API.network_name_description + ~mTU:network.API.network_MTU + ~other_config:network.API.network_other_config + ~bridge:network.API.network_bridge + else begin + debug "Recreating network '%s' as internal network." network.API.network_name_label; + (* This call will generate a new 'xapi#' bridge name rather than keeping the + * current, possibly colliding one. *) + Client.Network.create ~rpc ~session_id + ~name_label:network.API.network_name_label + ~name_description:network.API.network_name_description + ~mTU:network.API.network_MTU + ~other_config:network.API.network_other_config + ~tags:network.API.network_tags + end + in + + new_network_ref let create_or_get_pif_on_master __context rpc session_id (pif_ref, pif) : API.ref_PIF = - let my_uuid = pif.API.pIF_uuid in - - let my_host_ref = pif.API.pIF_host in - let my_host = Db.Host.get_record ~__context ~self:my_host_ref in - let new_host_ref = create_or_get_host_on_master __context rpc session_id (my_host_ref, my_host) in - - let my_network_ref = pif.API.pIF_network in - let my_network = Db.Network.get_record ~__context ~self:my_network_ref in - let new_network_ref = create_or_get_network_on_master __context rpc session_id (my_network_ref, my_network) in - - let new_pif_ref = - try Client.PIF.get_by_uuid ~rpc ~session_id ~uuid:my_uuid - with _ -> - debug "Found no PIF with uuid = '%s' on the master, so creating one." my_uuid; - Client.PIF.pool_introduce ~rpc ~session_id - ~device:pif.API.pIF_device - ~network:new_network_ref - ~host:new_host_ref - ~mAC:pif.API.pIF_MAC - ~mTU:pif.API.pIF_MTU - ~vLAN:pif.API.pIF_VLAN - ~physical:pif.API.pIF_physical - ~ip_configuration_mode:pif.API.pIF_ip_configuration_mode - ~iP:pif.API.pIF_IP - ~netmask:pif.API.pIF_netmask - ~gateway:pif.API.pIF_gateway - ~dNS:pif.API.pIF_DNS - ~bond_slave_of:pif.API.pIF_bond_slave_of - ~vLAN_master_of:pif.API.pIF_VLAN_master_of - ~management:pif.API.pIF_management - ~other_config:pif.API.pIF_other_config - ~disallow_unplug:pif.API.pIF_disallow_unplug - ~ipv6_configuration_mode:pif.API.pIF_ipv6_configuration_mode - ~iPv6:pif.API.pIF_IPv6 - ~ipv6_gateway:pif.API.pIF_ipv6_gateway - ~primary_address_type:pif.API.pIF_primary_address_type - ~managed:pif.API.pIF_managed - ~properties:pif.API.pIF_properties in - - new_pif_ref + let my_uuid = pif.API.pIF_uuid in + + let my_host_ref = pif.API.pIF_host in + let my_host = Db.Host.get_record ~__context ~self:my_host_ref in + let new_host_ref = create_or_get_host_on_master __context rpc session_id (my_host_ref, my_host) in + + let my_network_ref = pif.API.pIF_network in + let my_network = Db.Network.get_record ~__context ~self:my_network_ref in + let new_network_ref = create_or_get_network_on_master __context rpc session_id (my_network_ref, my_network) in + + let new_pif_ref = + try Client.PIF.get_by_uuid ~rpc ~session_id ~uuid:my_uuid + with _ -> + debug "Found no PIF with uuid = '%s' on the master, so creating one." my_uuid; + Client.PIF.pool_introduce ~rpc ~session_id + ~device:pif.API.pIF_device + ~network:new_network_ref + ~host:new_host_ref + ~mAC:pif.API.pIF_MAC + ~mTU:pif.API.pIF_MTU + ~vLAN:pif.API.pIF_VLAN + ~physical:pif.API.pIF_physical + ~ip_configuration_mode:pif.API.pIF_ip_configuration_mode + ~iP:pif.API.pIF_IP + ~netmask:pif.API.pIF_netmask + ~gateway:pif.API.pIF_gateway + ~dNS:pif.API.pIF_DNS + ~bond_slave_of:pif.API.pIF_bond_slave_of + ~vLAN_master_of:pif.API.pIF_VLAN_master_of + ~management:pif.API.pIF_management + ~other_config:pif.API.pIF_other_config + ~disallow_unplug:pif.API.pIF_disallow_unplug + ~ipv6_configuration_mode:pif.API.pIF_ipv6_configuration_mode + ~iPv6:pif.API.pIF_IPv6 + ~ipv6_gateway:pif.API.pIF_ipv6_gateway + ~primary_address_type:pif.API.pIF_primary_address_type + ~managed:pif.API.pIF_managed + ~properties:pif.API.pIF_properties in + + new_pif_ref let create_or_get_secret_on_master __context rpc session_id (secret_ref, secret) : API.ref_secret = - let my_uuid = secret.API.secret_uuid in - let my_value = secret.API.secret_value in - let new_secret_ref = - try Client.Secret.get_by_uuid ~rpc ~session_id ~uuid:my_uuid - with _ -> - debug "Found no secret with uuid = '%s' on master, so creating one." my_uuid; - Client.Secret.introduce ~rpc ~session_id ~uuid:my_uuid ~value:my_value ~other_config:[] - in - new_secret_ref + let my_uuid = secret.API.secret_uuid in + let my_value = secret.API.secret_value in + let new_secret_ref = + try Client.Secret.get_by_uuid ~rpc ~session_id ~uuid:my_uuid + with _ -> + debug "Found no secret with uuid = '%s' on master, so creating one." my_uuid; + Client.Secret.introduce ~rpc ~session_id ~uuid:my_uuid ~value:my_value ~other_config:[] + in + new_secret_ref let protect_exn f x = - try Some (f x) - with e -> - debug "Ignoring exception: %s" (Printexc.to_string e); - Debug.log_backtrace e (Backtrace.get e); - None + try Some (f x) + with e -> + debug "Ignoring exception: %s" (Printexc.to_string e); + Debug.log_backtrace e (Backtrace.get e); + None (* Remark: the order in which we create the object in the distant database is not very important, as we have *) (* an unique way to identify each object and thus we know if we need to create them or if it is already done *) let update_non_vm_metadata ~__context ~rpc ~session_id = - (* Update hosts *) - let my_hosts = Db.Host.get_all_records ~__context in - let (_ : API.ref_host option list) = - List.map (protect_exn (create_or_get_host_on_master __context rpc session_id)) my_hosts in - - (* Update SRs *) - let my_srs = Db.SR.get_all_records ~__context in - let (_ : API.ref_SR option list) = - List.map (protect_exn (create_or_get_sr_on_master __context rpc session_id)) my_srs in - - (* Update PBDs *) - let my_pbds = Db.PBD.get_all_records ~__context in - let (_ : API.ref_PBD option list) = - List.map (protect_exn (create_or_get_pbd_on_master __context rpc session_id)) my_pbds in - - (* Update VDIs *) - let my_vdis = Db.VDI.get_all_records ~__context in - let (_ : API.ref_VDI option list) = - List.map (protect_exn (create_or_get_vdi_on_master __context rpc session_id)) my_vdis in - - (* Update networks *) - let my_networks = Db.Network.get_all_records ~__context in - let (_ : API.ref_network option list) = - List.map (protect_exn (create_or_get_network_on_master __context rpc session_id)) my_networks in - - (* update PIFs *) - let my_pifs = Db.PIF.get_records_where ~__context ~expr:( - Eq (Field "physical", Literal "true") - ) in - let (_ : API.ref_PIF option list) = - List.map (protect_exn (create_or_get_pif_on_master __context rpc session_id)) my_pifs in - - (* update Secrets *) - let my_secrets = Db.Secret.get_all_records ~__context in - let (_ : API.ref_secret option list) = - List.map (protect_exn (create_or_get_secret_on_master __context rpc session_id)) my_secrets - in - - () + (* Update hosts *) + let my_hosts = Db.Host.get_all_records ~__context in + let (_ : API.ref_host option list) = + List.map (protect_exn (create_or_get_host_on_master __context rpc session_id)) my_hosts in + + (* Update SRs *) + let my_srs = Db.SR.get_all_records ~__context in + let (_ : API.ref_SR option list) = + List.map (protect_exn (create_or_get_sr_on_master __context rpc session_id)) my_srs in + + (* Update PBDs *) + let my_pbds = Db.PBD.get_all_records ~__context in + let (_ : API.ref_PBD option list) = + List.map (protect_exn (create_or_get_pbd_on_master __context rpc session_id)) my_pbds in + + (* Update VDIs *) + let my_vdis = Db.VDI.get_all_records ~__context in + let (_ : API.ref_VDI option list) = + List.map (protect_exn (create_or_get_vdi_on_master __context rpc session_id)) my_vdis in + + (* Update networks *) + let my_networks = Db.Network.get_all_records ~__context in + let (_ : API.ref_network option list) = + List.map (protect_exn (create_or_get_network_on_master __context rpc session_id)) my_networks in + + (* update PIFs *) + let my_pifs = Db.PIF.get_records_where ~__context ~expr:( + Eq (Field "physical", Literal "true") + ) in + let (_ : API.ref_PIF option list) = + List.map (protect_exn (create_or_get_pif_on_master __context rpc session_id)) my_pifs in + + (* update Secrets *) + let my_secrets = Db.Secret.get_all_records ~__context in + let (_ : API.ref_secret option list) = + List.map (protect_exn (create_or_get_secret_on_master __context rpc session_id)) my_secrets + in + + () let assert_pooling_licensed ~__context = - if (not (Pool_features.is_enabled ~__context Features.Pooling)) - then raise (Api_errors.Server_error(Api_errors.license_restriction, [])) + if (not (Pool_features.is_enabled ~__context Features.Pooling)) + then raise (Api_errors.Server_error(Api_errors.license_restriction, [])) let join_common ~__context ~master_address ~master_username ~master_password ~force = - assert_pooling_licensed ~__context; - (* get hold of cluster secret - this is critical; if this fails whole pool join fails *) - (* Note: this is where the license restrictions are checked on the other side.. if we're trying to join - a host that does not support pooling then an error will be thrown at this stage *) - let rpc = rpc master_address in - let session_id = - try Client.Session.login_with_password rpc master_username master_password Xapi_globs.api_version_string Xapi_globs.xapi_user_agent - with Http_client.Http_request_rejected _ | Http_client.Http_error _ -> - raise (Api_errors.Server_error(Api_errors.pool_joining_host_service_failed, [])) in - - let cluster_secret = ref "" in - - finally (fun () -> - pre_join_checks ~__context ~rpc ~session_id ~force; - cluster_secret := Client.Pool.initial_auth rpc session_id; - - (* get pool db from new master so I have a backup ready if we failover to me *) - begin try - Pool_db_backup.fetch_database_backup ~master_address ~pool_secret:!cluster_secret ~force:None - with e -> - error "Failed fetching a database backup from the master: %s" (ExnHelper.string_of_exn e) - end; - - (* this is where we try and sync up as much state as we can - with the master. This is "best effort" rather than - critical; if we fail part way through this then we carry - on with the join *) - try - update_non_vm_metadata ~__context ~rpc ~session_id; - ignore(Importexport.remote_metadata_export_import ~__context ~rpc ~session_id ~remote_address:master_address ~restore:true `All) - with e -> - debug "Error whilst importing db objects into master; aborted: %s" (Printexc.to_string e); - warn "Error whilst importing db objects to master. The pool-join operation will continue, but some of the slave's VMs may not be available on the master.") - (fun () -> - Client.Session.logout rpc session_id); - - (* Attempt to unplug all our local storage. This is needed because - when we restart as a slave, all the references will be wrong - and these may have been cached by the storage layer. *) - Helpers.call_api_functions ~__context (fun rpc session_id -> - let me = Helpers.get_localhost ~__context in - List.iter - (fun self -> - Helpers.log_exn_continue (Printf.sprintf "Unplugging PBD %s" (Ref.string_of self)) - (fun () -> - Client.PBD.unplug rpc session_id self - ) () - ) (Db.Host.get_PBDs ~__context ~self:me) - ); - - (* Rewrite the pool secret on every host of the current pool, and restart all the agent as slave of the distant pool master. *) - Helpers.call_api_functions ~__context (fun my_rpc my_session_id -> - List.iter - (fun (host, _) -> - Client.Host.update_pool_secret my_rpc my_session_id host !cluster_secret; - Client.Host.update_master my_rpc my_session_id host master_address) - (Db.Host.get_all_records ~__context)); - Xapi_hooks.pool_join_hook ~__context + assert_pooling_licensed ~__context; + (* get hold of cluster secret - this is critical; if this fails whole pool join fails *) + (* Note: this is where the license restrictions are checked on the other side.. if we're trying to join + a host that does not support pooling then an error will be thrown at this stage *) + let rpc = rpc master_address in + let session_id = + try Client.Session.login_with_password rpc master_username master_password Xapi_globs.api_version_string Xapi_globs.xapi_user_agent + with Http_client.Http_request_rejected _ | Http_client.Http_error _ -> + raise (Api_errors.Server_error(Api_errors.pool_joining_host_service_failed, [])) in + + let cluster_secret = ref "" in + + finally (fun () -> + pre_join_checks ~__context ~rpc ~session_id ~force; + cluster_secret := Client.Pool.initial_auth rpc session_id; + + (* get pool db from new master so I have a backup ready if we failover to me *) + begin try + Pool_db_backup.fetch_database_backup ~master_address ~pool_secret:!cluster_secret ~force:None + with e -> + error "Failed fetching a database backup from the master: %s" (ExnHelper.string_of_exn e) + end; + + (* this is where we try and sync up as much state as we can + with the master. This is "best effort" rather than + critical; if we fail part way through this then we carry + on with the join *) + try + update_non_vm_metadata ~__context ~rpc ~session_id; + ignore(Importexport.remote_metadata_export_import ~__context ~rpc ~session_id ~remote_address:master_address ~restore:true `All) + with e -> + debug "Error whilst importing db objects into master; aborted: %s" (Printexc.to_string e); + warn "Error whilst importing db objects to master. The pool-join operation will continue, but some of the slave's VMs may not be available on the master.") + (fun () -> + Client.Session.logout rpc session_id); + + (* Attempt to unplug all our local storage. This is needed because + when we restart as a slave, all the references will be wrong + and these may have been cached by the storage layer. *) + Helpers.call_api_functions ~__context (fun rpc session_id -> + let me = Helpers.get_localhost ~__context in + List.iter + (fun self -> + Helpers.log_exn_continue (Printf.sprintf "Unplugging PBD %s" (Ref.string_of self)) + (fun () -> + Client.PBD.unplug rpc session_id self + ) () + ) (Db.Host.get_PBDs ~__context ~self:me) + ); + + (* Rewrite the pool secret on every host of the current pool, and restart all the agent as slave of the distant pool master. *) + Helpers.call_api_functions ~__context (fun my_rpc my_session_id -> + List.iter + (fun (host, _) -> + Client.Host.update_pool_secret my_rpc my_session_id host !cluster_secret; + Client.Host.update_master my_rpc my_session_id host master_address) + (Db.Host.get_all_records ~__context)); + Xapi_hooks.pool_join_hook ~__context let join ~__context ~master_address ~master_username ~master_password = join_common ~__context ~master_address ~master_username ~master_password ~force:false @@ -728,12 +728,12 @@ let join_force ~__context ~master_address ~master_username ~master_password = (* Assume that db backed up from master will be there and ready to go... *) let emergency_transition_to_master ~__context = - if Localdb.get Constants.ha_armed = "true" - then raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])); + if Localdb.get Constants.ha_armed = "true" + then raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])); Xapi_pool_transition.become_master () let emergency_reset_master ~__context ~master_address = - if Localdb.get Constants.ha_armed = "true" + if Localdb.get Constants.ha_armed = "true" then raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])); let master_address = Helpers.gethostbyname master_address in Xapi_pool_transition.become_another_masters_slave master_address @@ -745,21 +745,21 @@ let recover_slaves ~__context = let recover_slave hostref = if not (hostref = !Xapi_globs.localhost_ref) then begin - try - let local_fn = emergency_reset_master ~master_address:my_address in - - (* We have to use a new context here because the slave is currently doing a - Task.get_name_label on real tasks, which will block on slaves that we're - trying to recover. Get around this by creating a dummy task, for which - the name-label bit is bypassed *) - let newcontext = Context.make "emergency_reset_master" in - Message_forwarding.do_op_on_localsession_nolivecheck ~local_fn ~__context:newcontext ~host:hostref - (fun session_id rpc -> Client.Pool.emergency_reset_master rpc session_id my_address); - recovered_hosts := hostref::!recovered_hosts - with _ -> () + try + let local_fn = emergency_reset_master ~master_address:my_address in + + (* We have to use a new context here because the slave is currently doing a + Task.get_name_label on real tasks, which will block on slaves that we're + trying to recover. Get around this by creating a dummy task, for which + the name-label bit is bypassed *) + let newcontext = Context.make "emergency_reset_master" in + Message_forwarding.do_op_on_localsession_nolivecheck ~local_fn ~__context:newcontext ~host:hostref + (fun session_id rpc -> Client.Pool.emergency_reset_master rpc session_id my_address); + recovered_hosts := hostref::!recovered_hosts + with _ -> () end in - List.iter recover_slave hosts; - !recovered_hosts + List.iter recover_slave hosts; + !recovered_hosts exception Cannot_eject_master let no_exn f = try f() with _ -> () @@ -767,180 +767,180 @@ let unplug_pbds ~__context host = let pbds = Db.Host.get_PBDs ~__context ~self:host in let srs = List.map (fun self -> Db.PBD.get_SR ~__context ~self) pbds in let srs_to_delete = List.filter (fun self -> List.length (Db.SR.get_PBDs ~__context ~self) = 1) srs in - Helpers.call_api_functions ~__context - (fun rpc session_id -> - List.iter (fun pbd -> Client.PBD.unplug ~rpc ~session_id ~self:pbd) pbds; - List.iter (fun sr -> Client.SR.forget ~rpc ~session_id ~sr) srs_to_delete) + Helpers.call_api_functions ~__context + (fun rpc session_id -> + List.iter (fun pbd -> Client.PBD.unplug ~rpc ~session_id ~self:pbd) pbds; + List.iter (fun sr -> Client.SR.forget ~rpc ~session_id ~sr) srs_to_delete) (* This means eject me, since will have been forwarded from master *) let eject ~__context ~host = - (* If HA is enabled then refuse *) - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool - then raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])); - - if Pool_role.is_master () then raise Cannot_eject_master - else begin - (* Fail the operation if any VMs are running here (except dom0) *) - let my_vms_with_records = Db.VM.get_records_where ~__context ~expr:(Eq(Field "resident_on", Literal (Ref.string_of host))) in - List.iter (fun (_, x) -> - if (not (Helpers.is_domain_zero ~__context (Db.VM.get_by_uuid ~__context ~uuid:x.API.vM_uuid))) && x.API.vM_power_state <>`Halted - then begin - error "VM uuid %s not in Halted state and resident_on this host" (x.API.vM_uuid); - raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["VM resident on host"])) - end) my_vms_with_records; - - (* all control domains resident on me should be destroyed once I leave the - pool, therefore pick them out as follows: if they have a valid resident_on, - the latter should be me; if they don't (e.g. they are halted), they should have - disks on my local storage *) - let vm_is_resident_on_host vm_rec host = - (Db.is_valid_ref __context vm_rec.API.vM_resident_on) && (vm_rec.API.vM_resident_on = host) - in - let vm_has_disks_on_local_sr_of_host vm_ref host = - let is_sr_local x = not (Helpers.is_sr_shared ~__context ~self:x) in - let host_has_sr x = (Helpers.check_sr_exists_for_host ~__context ~self:x ~host:host) <> None in - Db.VM.get_VBDs ~__context ~self:vm_ref - |> List.map (fun x -> Db.VBD.get_VDI ~__context ~self:x) - |> List.filter (fun x -> x <> Ref.null) (* filter out null ref VDIs (can happen e.g. for CDs) *) - |> List.map (fun x -> Db.VDI.get_SR ~__context ~self:x) - |> List.exists (fun x -> (is_sr_local x) && (host_has_sr x)) - in - let is_obsolete_control_domain (vm_ref, vm_rec) = - vm_rec.API.vM_is_control_domain - && ((vm_is_resident_on_host vm_rec host) || (vm_has_disks_on_local_sr_of_host vm_ref host)) - in - let control_domains_to_destroy = List.filter is_obsolete_control_domain (Db.VM.get_all_records ~__context) in - - debug "Pool.eject: unplugging PBDs"; - (* unplug all my PBDs; will deliberately fail if any unplugs fail *) - unplug_pbds ~__context host; - - debug "Pool.eject: disabling external authentication in slave-to-be-ejected"; - (* disable the external authentication of this slave being ejected *) - (* this call will return an exception if something goes wrong *) - Xapi_host.disable_external_auth_common ~during_pool_eject:true ~__context ~host - ~config:[]; - (* FIXME: in the future, we should send the windows AD admin/pass here *) - (* in order to remove the slave from the AD database during pool-eject *) - - debug "Pool.eject: rewrite networking first-boot files"; - let management_pif = Xapi_host.get_management_interface ~__context ~host in - let pif = Db.PIF.get_record ~__context ~self:management_pif in - let management_device = - (* assumes that the management interface is either physical or a bond *) - if pif.API.pIF_bond_master_of <> [] then - let bond = List.hd pif.API.pIF_bond_master_of in - let primary_slave = Db.Bond.get_primary_slave ~__context ~self:bond in - Db.PIF.get_device ~__context ~self:primary_slave - else - pif.API.pIF_device - in - let mode = match pif.API.pIF_ip_configuration_mode with - | `None -> "none" - | `DHCP -> "dhcp" - | `Static -> "static" - in - - let write_first_boot_management_interface_configuration_file () = - let bridge = Xapi_pif.bridge_naming_convention management_device in - Xapi_inventory.update Xapi_inventory._management_interface bridge; - let primary_address_type = Db.PIF.get_primary_address_type ~__context ~self:management_pif in - Xapi_inventory.update Xapi_inventory._management_address_type - (Record_util.primary_address_type_to_string primary_address_type); - let configuration_file_contents = begin - "LABEL='" ^ management_device ^ "'\nMODE=" ^ mode ^ - if mode = "static" then - "\nIP=" ^ pif.API.pIF_IP ^ - "\nNETMASK=" ^ pif.API.pIF_netmask ^ - "\nGATEWAY=" ^ pif.API.pIF_gateway ^ - "\nDNS=" ^ pif.API.pIF_DNS ^ "\n" - else - "\n" - end in - Unixext.write_string_to_file - (Xapi_globs.first_boot_dir ^ "data/management.conf") - (configuration_file_contents) in - - write_first_boot_management_interface_configuration_file (); - - Net.reset_state (); - Xapi_inventory.update Xapi_inventory._current_interfaces ""; - - debug "Pool.eject: deleting Host record (the point of no return)"; - (* delete me from the database - this will in turn cause PBDs and PIFs to be GCed *) - Db.Host.destroy ~__context ~self:host; - Create_misc.create_pool_cpuinfo ~__context; - - (* Update pool features, in case this host had a different license to the - * rest of the pool. *) - Pool_features.update_pool_features ~__context; - - (* and destroy my control domains, since you can't do this from the API [operation not allowed] *) - begin try - List.iter (fun x -> Db.VM.destroy ~__context ~self:(fst x)) control_domains_to_destroy; - with _ -> () end; - debug "Pool.eject: setting our role to be master"; - Pool_role.set_role Pool_role.Master; - debug "Pool.eject: forgetting pool secret"; - Unixext.unlink_safe !Xapi_globs.pool_secret_path; (* forget current pool secret *) - (* delete backup databases and any temporary restore databases *) - Unixext.unlink_safe Xapi_globs.backup_db_xml; - Unixext.unlink_safe Xapi_globs.db_temporary_restore_path; - Unixext.unlink_safe Xapi_globs.ha_metadata_db; - Unixext.unlink_safe Xapi_globs.gen_metadata_db; - - (* If we've got local storage, remove it *) - if (Helpers.local_storage_exists ()) then begin - ignore(Forkhelpers.execute_command_get_output "/bin/rm" ["-rf"; Xapi_globs.xapi_blob_location]); - Unixext.mkdir_safe Xapi_globs.xapi_blob_location 0o700; - end; - - (* delete /local/ databases specified in the db.conf, so they get recreated on restart. - * We must leave any remote database alone because these are owned by the pool and - * not by this node. *) - (* get the slave backup lock so we know no more backups are going to be taken -- - * we keep this lock till the bitter end, where we restart below ;) - *) - Mutex.lock Pool_db_backup.slave_backup_m; - finally - (fun () -> - let dbs = Parse_db_conf.parse_db_conf !Xapi_globs.db_conf_path in - (* We need to delete all local dbs but leave remote ones alone *) - let local = List.filter (fun db -> not db.Parse_db_conf.is_on_remote_storage) dbs in - List.iter Unixext.unlink_safe (List.map (fun db->db.Parse_db_conf.path) local); - List.iter Unixext.unlink_safe (List.map Parse_db_conf.generation_filename local); - (* remove any shared databases from my db.conf *) - (* XXX: on OEM edition the db.conf is rebuilt on every boot *) - Parse_db_conf.write_db_conf local; - (* Forget anything we know about configured remote databases: this prevents - any initscript reminding us about them after reboot *) - Helpers.log_exn_continue - (Printf.sprintf "Moving remote database file to backup: %s" - !Xapi_globs.remote_db_conf_fragment_path) - (fun () -> - Unix.rename - !Xapi_globs.remote_db_conf_fragment_path - (!Xapi_globs.remote_db_conf_fragment_path ^ ".bak")) (); - (* Reset the domain 0 network interface naming configuration - * back to a fresh-install state for the currently-installed - * hardware. - *) - ignore - (Forkhelpers.execute_command_get_output - "/etc/sysconfig/network-scripts/interface-rename.py" - ["--reset-to-install"]); - ) - (fun () -> Xapi_fuse.light_fuse_and_reboot_after_eject()); - Xapi_hooks.pool_eject_hook ~__context - end + (* If HA is enabled then refuse *) + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool + then raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])); + + if Pool_role.is_master () then raise Cannot_eject_master + else begin + (* Fail the operation if any VMs are running here (except dom0) *) + let my_vms_with_records = Db.VM.get_records_where ~__context ~expr:(Eq(Field "resident_on", Literal (Ref.string_of host))) in + List.iter (fun (_, x) -> + if (not (Helpers.is_domain_zero ~__context (Db.VM.get_by_uuid ~__context ~uuid:x.API.vM_uuid))) && x.API.vM_power_state <>`Halted + then begin + error "VM uuid %s not in Halted state and resident_on this host" (x.API.vM_uuid); + raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["VM resident on host"])) + end) my_vms_with_records; + + (* all control domains resident on me should be destroyed once I leave the + pool, therefore pick them out as follows: if they have a valid resident_on, + the latter should be me; if they don't (e.g. they are halted), they should have + disks on my local storage *) + let vm_is_resident_on_host vm_rec host = + (Db.is_valid_ref __context vm_rec.API.vM_resident_on) && (vm_rec.API.vM_resident_on = host) + in + let vm_has_disks_on_local_sr_of_host vm_ref host = + let is_sr_local x = not (Helpers.is_sr_shared ~__context ~self:x) in + let host_has_sr x = (Helpers.check_sr_exists_for_host ~__context ~self:x ~host:host) <> None in + Db.VM.get_VBDs ~__context ~self:vm_ref + |> List.map (fun x -> Db.VBD.get_VDI ~__context ~self:x) + |> List.filter (fun x -> x <> Ref.null) (* filter out null ref VDIs (can happen e.g. for CDs) *) + |> List.map (fun x -> Db.VDI.get_SR ~__context ~self:x) + |> List.exists (fun x -> (is_sr_local x) && (host_has_sr x)) + in + let is_obsolete_control_domain (vm_ref, vm_rec) = + vm_rec.API.vM_is_control_domain + && ((vm_is_resident_on_host vm_rec host) || (vm_has_disks_on_local_sr_of_host vm_ref host)) + in + let control_domains_to_destroy = List.filter is_obsolete_control_domain (Db.VM.get_all_records ~__context) in + + debug "Pool.eject: unplugging PBDs"; + (* unplug all my PBDs; will deliberately fail if any unplugs fail *) + unplug_pbds ~__context host; + + debug "Pool.eject: disabling external authentication in slave-to-be-ejected"; + (* disable the external authentication of this slave being ejected *) + (* this call will return an exception if something goes wrong *) + Xapi_host.disable_external_auth_common ~during_pool_eject:true ~__context ~host + ~config:[]; + (* FIXME: in the future, we should send the windows AD admin/pass here *) + (* in order to remove the slave from the AD database during pool-eject *) + + debug "Pool.eject: rewrite networking first-boot files"; + let management_pif = Xapi_host.get_management_interface ~__context ~host in + let pif = Db.PIF.get_record ~__context ~self:management_pif in + let management_device = + (* assumes that the management interface is either physical or a bond *) + if pif.API.pIF_bond_master_of <> [] then + let bond = List.hd pif.API.pIF_bond_master_of in + let primary_slave = Db.Bond.get_primary_slave ~__context ~self:bond in + Db.PIF.get_device ~__context ~self:primary_slave + else + pif.API.pIF_device + in + let mode = match pif.API.pIF_ip_configuration_mode with + | `None -> "none" + | `DHCP -> "dhcp" + | `Static -> "static" + in + + let write_first_boot_management_interface_configuration_file () = + let bridge = Xapi_pif.bridge_naming_convention management_device in + Xapi_inventory.update Xapi_inventory._management_interface bridge; + let primary_address_type = Db.PIF.get_primary_address_type ~__context ~self:management_pif in + Xapi_inventory.update Xapi_inventory._management_address_type + (Record_util.primary_address_type_to_string primary_address_type); + let configuration_file_contents = begin + "LABEL='" ^ management_device ^ "'\nMODE=" ^ mode ^ + if mode = "static" then + "\nIP=" ^ pif.API.pIF_IP ^ + "\nNETMASK=" ^ pif.API.pIF_netmask ^ + "\nGATEWAY=" ^ pif.API.pIF_gateway ^ + "\nDNS=" ^ pif.API.pIF_DNS ^ "\n" + else + "\n" + end in + Unixext.write_string_to_file + (Xapi_globs.first_boot_dir ^ "data/management.conf") + (configuration_file_contents) in + + write_first_boot_management_interface_configuration_file (); + + Net.reset_state (); + Xapi_inventory.update Xapi_inventory._current_interfaces ""; + + debug "Pool.eject: deleting Host record (the point of no return)"; + (* delete me from the database - this will in turn cause PBDs and PIFs to be GCed *) + Db.Host.destroy ~__context ~self:host; + Create_misc.create_pool_cpuinfo ~__context; + + (* Update pool features, in case this host had a different license to the + * rest of the pool. *) + Pool_features.update_pool_features ~__context; + + (* and destroy my control domains, since you can't do this from the API [operation not allowed] *) + begin try + List.iter (fun x -> Db.VM.destroy ~__context ~self:(fst x)) control_domains_to_destroy; + with _ -> () end; + debug "Pool.eject: setting our role to be master"; + Pool_role.set_role Pool_role.Master; + debug "Pool.eject: forgetting pool secret"; + Unixext.unlink_safe !Xapi_globs.pool_secret_path; (* forget current pool secret *) + (* delete backup databases and any temporary restore databases *) + Unixext.unlink_safe Xapi_globs.backup_db_xml; + Unixext.unlink_safe Xapi_globs.db_temporary_restore_path; + Unixext.unlink_safe Xapi_globs.ha_metadata_db; + Unixext.unlink_safe Xapi_globs.gen_metadata_db; + + (* If we've got local storage, remove it *) + if (Helpers.local_storage_exists ()) then begin + ignore(Forkhelpers.execute_command_get_output "/bin/rm" ["-rf"; Xapi_globs.xapi_blob_location]); + Unixext.mkdir_safe Xapi_globs.xapi_blob_location 0o700; + end; + + (* delete /local/ databases specified in the db.conf, so they get recreated on restart. + * We must leave any remote database alone because these are owned by the pool and + * not by this node. *) + (* get the slave backup lock so we know no more backups are going to be taken -- + * we keep this lock till the bitter end, where we restart below ;) + *) + Mutex.lock Pool_db_backup.slave_backup_m; + finally + (fun () -> + let dbs = Parse_db_conf.parse_db_conf !Xapi_globs.db_conf_path in + (* We need to delete all local dbs but leave remote ones alone *) + let local = List.filter (fun db -> not db.Parse_db_conf.is_on_remote_storage) dbs in + List.iter Unixext.unlink_safe (List.map (fun db->db.Parse_db_conf.path) local); + List.iter Unixext.unlink_safe (List.map Parse_db_conf.generation_filename local); + (* remove any shared databases from my db.conf *) + (* XXX: on OEM edition the db.conf is rebuilt on every boot *) + Parse_db_conf.write_db_conf local; + (* Forget anything we know about configured remote databases: this prevents + any initscript reminding us about them after reboot *) + Helpers.log_exn_continue + (Printf.sprintf "Moving remote database file to backup: %s" + !Xapi_globs.remote_db_conf_fragment_path) + (fun () -> + Unix.rename + !Xapi_globs.remote_db_conf_fragment_path + (!Xapi_globs.remote_db_conf_fragment_path ^ ".bak")) (); + (* Reset the domain 0 network interface naming configuration + * back to a fresh-install state for the currently-installed + * hardware. + *) + ignore + (Forkhelpers.execute_command_get_output + "/etc/sysconfig/network-scripts/interface-rename.py" + ["--reset-to-install"]); + ) + (fun () -> Xapi_fuse.light_fuse_and_reboot_after_eject()); + Xapi_hooks.pool_eject_hook ~__context + end (* Prohibit parallel flushes since they're so expensive *) let sync_m = Mutex.create () open Db_cache_types -let sync_database ~__context = +let sync_database ~__context = Mutex.execute sync_m (fun () -> (* If HA is enabled I'll first try to flush to the LUN *) @@ -949,43 +949,43 @@ let sync_database ~__context = if flushed_to_vdi then debug "flushed database to metadata VDI: assuming this is sufficient." else begin - debug "flushing database to all online nodes"; - let generation = Db_lock.with_lock (fun () -> Manifest.generation (Database.manifest (Db_ref.get_database (Context.database_of __context)))) in - Threadext.thread_iter - (fun host -> - Helpers.call_api_functions ~__context - (fun rpc session_id -> Client.Host.request_backup rpc session_id host generation true)) - (Db.Host.get_all ~__context) + debug "flushing database to all online nodes"; + let generation = Db_lock.with_lock (fun () -> Manifest.generation (Database.manifest (Db_ref.get_database (Context.database_of __context)))) in + Threadext.thread_iter + (fun host -> + Helpers.call_api_functions ~__context + (fun rpc session_id -> Client.Host.request_backup rpc session_id host generation true)) + (Db.Host.get_all ~__context) end - ) + ) (* This also means me, since call will have been forwarded from the current master *) let designate_new_master ~__context ~host = - if not (Pool_role.is_master ()) then begin - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool - then raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])); - - (* Only the master can sync the *current* database; only the master - knows the current generation count etc. *) - Helpers.call_api_functions ~__context - (fun rpc session_id -> - Client.Pool.sync_database rpc session_id); - - let all_hosts = Db.Host.get_all ~__context in - (* We make no attempt to demand a quorum or anything. *) - let addresses = List.map (fun self -> Db.Host.get_address ~__context ~self) all_hosts in - let my_address = Db.Host.get_address ~__context ~self:(Helpers.get_localhost ~__context) in - let peers = List.filter (fun x -> x <> my_address) addresses in - Xapi_pool_transition.attempt_two_phase_commit_of_new_master ~__context true peers my_address - end + if not (Pool_role.is_master ()) then begin + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool + then raise (Api_errors.Server_error(Api_errors.ha_is_enabled, [])); + + (* Only the master can sync the *current* database; only the master + knows the current generation count etc. *) + Helpers.call_api_functions ~__context + (fun rpc session_id -> + Client.Pool.sync_database rpc session_id); + + let all_hosts = Db.Host.get_all ~__context in + (* We make no attempt to demand a quorum or anything. *) + let addresses = List.map (fun self -> Db.Host.get_address ~__context ~self) all_hosts in + let my_address = Db.Host.get_address ~__context ~self:(Helpers.get_localhost ~__context) in + let peers = List.filter (fun x -> x <> my_address) addresses in + Xapi_pool_transition.attempt_two_phase_commit_of_new_master ~__context true peers my_address + end let initial_auth ~__context = !Xapi_globs.pool_secret (** This call is used during master startup so we should check to see whether we need to re-establish our database connection and resynchronise lost database state i.e. state which is non-persistent or reverted over a master crash *) -let is_slave ~__context ~host = +let is_slave ~__context ~host = let is_slave = not (Pool_role.is_master ()) in info "Pool.is_slave call received (I'm a %s)" (if is_slave then "slave" else "master"); debug "About to kick the database connection to make sure it's still working..."; @@ -998,54 +998,54 @@ let hello ~__context ~host_uuid ~host_address = None -> `unknown_host | Some host_ref -> - try - let slave_current_address = Db.Host.get_address ~__context ~self:host_ref in - if host_address<>slave_current_address then - begin - (* update slave address in master db because we know its changed *) - Db.Host.set_address ~__context ~self:host_ref ~value:host_address; - (* and refresh console URLs to reflect this change of address *) - Dbsync_master.refresh_console_urls ~__context - end; - let local_fn = is_slave ~host:host_ref in - (* Nb. next call is purely there to establish that we can talk back to the host that initiated this call *) - (* We don't care about the return type, only that no exception is raised while talking to it *) - (try - ignore(Message_forwarding.do_op_on_nolivecheck_no_retry ~local_fn ~__context ~host:host_ref - (fun session_id rpc -> Client.Pool.is_slave rpc session_id host_ref)) - with Api_errors.Server_error(code, [ "pool.is_slave"; "1"; "2" ]) as e when code = Api_errors.message_parameter_count_mismatch -> - debug "Caught %s: this host is a Rio box" (ExnHelper.string_of_exn e) - | Api_errors.Server_error(code, _) as e when code = Api_errors.host_still_booting -> - debug "Caught %s: this host is a Miami box" (ExnHelper.string_of_exn e) - ); - - (* Set the host to disabled initially: when it has finished initialising and is ready to - host VMs it will mark itself as enabled again. *) - info "Host.enabled: setting host %s (%s) to disabled" (Ref.string_of host_ref) (Db.Host.get_hostname ~__context ~self:host_ref); - Db.Host.set_enabled ~__context ~self:host_ref ~value:false; - let pool = Helpers.get_pool ~__context in - if not (Db.Pool.get_ha_enabled ~__context ~self:pool) then begin - debug "Host_metrics.live: setting host %s (%s) to alive" (Ref.string_of host_ref) (Db.Host.get_hostname ~__context ~self:host_ref); - let metrics = Db.Host.get_metrics ~__context ~self:host_ref in - Db.Host_metrics.set_live ~__context ~self:metrics ~value:true; - end; - (* Cancel tasks on behalf of slave *) - debug "Hello message from slave OK: cancelling tasks on behalf of slave"; - Cancel_tasks.cancel_tasks_on_host ~__context ~host_opt:(Some host_ref); - - (* Make sure we mark this host as live again *) - Mutex.execute Xapi_globs.hosts_which_are_shutting_down_m - (fun () -> Xapi_globs.hosts_which_are_shutting_down := List.filter (fun x -> x <> host_ref) !Xapi_globs.hosts_which_are_shutting_down); - - (* Update the heartbeat timestamp for this host so we don't mark it as - offline in the next db_gc *) - let (_: (string * string) list) = Db_gc.tickle_heartbeat ~__context host_ref [] in - `ok - with e -> - debug "Caught exception: %s" (ExnHelper.string_of_exn e); - `cannot_talk_back - -(** Create PIF on each pool host for specified VLAN/device pair. + try + let slave_current_address = Db.Host.get_address ~__context ~self:host_ref in + if host_address<>slave_current_address then + begin + (* update slave address in master db because we know its changed *) + Db.Host.set_address ~__context ~self:host_ref ~value:host_address; + (* and refresh console URLs to reflect this change of address *) + Dbsync_master.refresh_console_urls ~__context + end; + let local_fn = is_slave ~host:host_ref in + (* Nb. next call is purely there to establish that we can talk back to the host that initiated this call *) + (* We don't care about the return type, only that no exception is raised while talking to it *) + (try + ignore(Message_forwarding.do_op_on_nolivecheck_no_retry ~local_fn ~__context ~host:host_ref + (fun session_id rpc -> Client.Pool.is_slave rpc session_id host_ref)) + with Api_errors.Server_error(code, [ "pool.is_slave"; "1"; "2" ]) as e when code = Api_errors.message_parameter_count_mismatch -> + debug "Caught %s: this host is a Rio box" (ExnHelper.string_of_exn e) + | Api_errors.Server_error(code, _) as e when code = Api_errors.host_still_booting -> + debug "Caught %s: this host is a Miami box" (ExnHelper.string_of_exn e) + ); + + (* Set the host to disabled initially: when it has finished initialising and is ready to + host VMs it will mark itself as enabled again. *) + info "Host.enabled: setting host %s (%s) to disabled" (Ref.string_of host_ref) (Db.Host.get_hostname ~__context ~self:host_ref); + Db.Host.set_enabled ~__context ~self:host_ref ~value:false; + let pool = Helpers.get_pool ~__context in + if not (Db.Pool.get_ha_enabled ~__context ~self:pool) then begin + debug "Host_metrics.live: setting host %s (%s) to alive" (Ref.string_of host_ref) (Db.Host.get_hostname ~__context ~self:host_ref); + let metrics = Db.Host.get_metrics ~__context ~self:host_ref in + Db.Host_metrics.set_live ~__context ~self:metrics ~value:true; + end; + (* Cancel tasks on behalf of slave *) + debug "Hello message from slave OK: cancelling tasks on behalf of slave"; + Cancel_tasks.cancel_tasks_on_host ~__context ~host_opt:(Some host_ref); + + (* Make sure we mark this host as live again *) + Mutex.execute Xapi_globs.hosts_which_are_shutting_down_m + (fun () -> Xapi_globs.hosts_which_are_shutting_down := List.filter (fun x -> x <> host_ref) !Xapi_globs.hosts_which_are_shutting_down); + + (* Update the heartbeat timestamp for this host so we don't mark it as + offline in the next db_gc *) + let (_: (string * string) list) = Db_gc.tickle_heartbeat ~__context host_ref [] in + `ok + with e -> + debug "Caught exception: %s" (ExnHelper.string_of_exn e); + `cannot_talk_back + +(** Create PIF on each pool host for specified VLAN/device pair. On error, destroy all of the PIFs that have already been created. *) (* !!! THIS CALL IS FUNDAMENTALLY BROKEN wrt bonds -- see CA-22613; it should no longer be used. I have pulled together the function definitions specific to create_VLAN and moved them into create_VLAN definition @@ -1053,47 +1053,47 @@ let hello ~__context ~host_uuid ~host_address = so we don't break existing API clients) there is no need to factor the commonality between these 2 fns. *) let create_VLAN ~__context ~device ~network ~vLAN = - (* Destroy the list of PIFs - try destroying them with the client API, and if - the host is offline, just destroy the record *) - let safe_destroy_PIFs ~__context pifs = - Helpers.call_api_functions ~__context - (fun rpc session_id -> - List.iter - (fun pif -> - try - (* This call destroys the metrics too *) - Client.PIF.destroy rpc session_id pif - with - | Api_errors.Server_error (a,b) -> - if a=Api_errors.host_offline - then - Db.PIF.destroy ~__context ~self:pif - else - (* If theres any other error, leave the PIF to be destroyed - manually. We certainly don't want the Db to be out of - sync with reality *) - () - | _ -> () - ) pifs) in + (* Destroy the list of PIFs - try destroying them with the client API, and if + the host is offline, just destroy the record *) + let safe_destroy_PIFs ~__context pifs = + Helpers.call_api_functions ~__context + (fun rpc session_id -> + List.iter + (fun pif -> + try + (* This call destroys the metrics too *) + Client.PIF.destroy rpc session_id pif + with + | Api_errors.Server_error (a,b) -> + if a=Api_errors.host_offline + then + Db.PIF.destroy ~__context ~self:pif + else + (* If theres any other error, leave the PIF to be destroyed + manually. We certainly don't want the Db to be out of + sync with reality *) + () + | _ -> () + ) pifs) in let created = ref [] in let hosts = Db.Host.get_all ~__context in Helpers.call_api_functions ~__context (fun rpc session_id -> let pifs = List.map ( - fun host -> - try - let pif = Client.PIF.create_VLAN rpc session_id device network host vLAN in - created := pif :: (!created); - pif - with - | e -> - (* Any error and we'll clean up and exit *) - safe_destroy_PIFs ~__context !created; - raise e - ) hosts in + fun host -> + try + let pif = Client.PIF.create_VLAN rpc session_id device network host vLAN in + created := pif :: (!created); + pif + with + | e -> + (* Any error and we'll clean up and exit *) + safe_destroy_PIFs ~__context !created; + raise e + ) hosts in (* CA-22381: best-effort plug of the newly-created VLAN PIFs. Note if any of these calls fail - then nothing is rolled-back and the system will be left with some unplugged VLAN PIFs, which may - confuse the HA agility calculation (but nothing else since everything else can plug on demand) *) + then nothing is rolled-back and the system will be left with some unplugged VLAN PIFs, which may + confuse the HA agility calculation (but nothing else since everything else can plug on demand) *) List.iter (fun pif -> Helpers.log_exn_continue (Printf.sprintf "Plugging VLAN PIF %s" (Ref.string_of pif)) (fun () -> Client.PIF.plug rpc session_id pif) ()) pifs; pifs ) @@ -1102,57 +1102,57 @@ let create_VLAN ~__context ~device ~network ~vLAN = explicitly instead of a device name we ensure that this call works for creating VLANs on bonds across pools.. *) let create_VLAN_from_PIF ~__context ~pif ~network ~vLAN = - (* Destroy the list of VLANs, best-effort *) - let safe_destroy_VLANs ~__context vlans = - Helpers.call_api_functions ~__context - (fun rpc session_id -> - List.iter - (fun vlan -> - try Client.VLAN.destroy rpc session_id vlan - with _ -> () - ) vlans - ) in - (* Read the network that the pif is attached to; get the list of all pifs on that network - -- that'll be the pif for each host that we want to make the vlan on. Then go and make - the vlan on all these pifs. Then attempt to do a best-effort plug of the newly created pifs - in order to satisfy ca-22381 *) - let network_to_get_pifs_from = Db.PIF.get_network ~__context ~self:pif in - let pifs_on_network = Db.Network.get_PIFs ~__context ~self:network_to_get_pifs_from in - let is_host_live pif = - let h = Db.PIF.get_host ~__context ~self:pif in - let host_metric = Db.Host.get_metrics ~__context ~self:h in - Db.Host_metrics.get_live ~__context ~self:host_metric in - let pifs_on_live_hosts = List.filter is_host_live pifs_on_network in - - (* Keep track of what we've created *) - let created = ref [] in - Helpers.call_api_functions ~__context - (fun rpc session_id -> - let vlans = - List.map - (fun pif -> - try - let vlan = Client.VLAN.create rpc session_id pif vLAN network in - created := vlan :: !created; - vlan - with - | e -> - (* Any error and we'll clean up and exit *) - safe_destroy_VLANs ~__context !created; - raise e - ) pifs_on_live_hosts in - let vlan_pifs = List.map (fun vlan -> Db.VLAN.get_untagged_PIF ~__context ~self:vlan) vlans in - (* CA-22381: best-effort plug of the newly-created VLAN PIFs. Note if any of these calls fail - then nothing is rolled-back and the system will be left with some unplugged VLAN PIFs, which may - confuse the HA agility calculation (but nothing else since everything else can plug on demand) *) - List.iter - (fun pif -> - Helpers.log_exn_continue - (Printf.sprintf "Plugging VLAN PIF %s" (Ref.string_of pif)) - (fun () -> Client.PIF.plug rpc session_id pif) () - ) vlan_pifs; - vlan_pifs - ) + (* Destroy the list of VLANs, best-effort *) + let safe_destroy_VLANs ~__context vlans = + Helpers.call_api_functions ~__context + (fun rpc session_id -> + List.iter + (fun vlan -> + try Client.VLAN.destroy rpc session_id vlan + with _ -> () + ) vlans + ) in + (* Read the network that the pif is attached to; get the list of all pifs on that network + -- that'll be the pif for each host that we want to make the vlan on. Then go and make + the vlan on all these pifs. Then attempt to do a best-effort plug of the newly created pifs + in order to satisfy ca-22381 *) + let network_to_get_pifs_from = Db.PIF.get_network ~__context ~self:pif in + let pifs_on_network = Db.Network.get_PIFs ~__context ~self:network_to_get_pifs_from in + let is_host_live pif = + let h = Db.PIF.get_host ~__context ~self:pif in + let host_metric = Db.Host.get_metrics ~__context ~self:h in + Db.Host_metrics.get_live ~__context ~self:host_metric in + let pifs_on_live_hosts = List.filter is_host_live pifs_on_network in + + (* Keep track of what we've created *) + let created = ref [] in + Helpers.call_api_functions ~__context + (fun rpc session_id -> + let vlans = + List.map + (fun pif -> + try + let vlan = Client.VLAN.create rpc session_id pif vLAN network in + created := vlan :: !created; + vlan + with + | e -> + (* Any error and we'll clean up and exit *) + safe_destroy_VLANs ~__context !created; + raise e + ) pifs_on_live_hosts in + let vlan_pifs = List.map (fun vlan -> Db.VLAN.get_untagged_PIF ~__context ~self:vlan) vlans in + (* CA-22381: best-effort plug of the newly-created VLAN PIFs. Note if any of these calls fail + then nothing is rolled-back and the system will be left with some unplugged VLAN PIFs, which may + confuse the HA agility calculation (but nothing else since everything else can plug on demand) *) + List.iter + (fun pif -> + Helpers.log_exn_continue + (Printf.sprintf "Plugging VLAN PIF %s" (Ref.string_of pif)) + (fun () -> Client.PIF.plug rpc session_id pif) () + ) vlan_pifs; + vlan_pifs + ) let slave_network_report ~__context ~phydevs ~dev_to_mac ~dev_to_mtu ~slave_host = [] @@ -1162,31 +1162,31 @@ let slave_network_report ~__context ~phydevs ~dev_to_mac ~dev_to_mtu ~slave_host (* Let's only process one enable/disable at a time. I would have used an allowed_operation for this but it would involve a datamodel change and it's too late for Orlando. *) let enable_disable_m = Mutex.create () -let enable_ha ~__context ~heartbeat_srs ~configuration = - if not (Helpers.pool_has_different_host_platform_versions ~__context) - then Mutex.execute enable_disable_m (fun () -> Xapi_ha.enable __context heartbeat_srs configuration) - else - raise (Api_errors.Server_error (Api_errors.not_supported_during_upgrade, [])) +let enable_ha ~__context ~heartbeat_srs ~configuration = + if not (Helpers.pool_has_different_host_platform_versions ~__context) + then Mutex.execute enable_disable_m (fun () -> Xapi_ha.enable __context heartbeat_srs configuration) + else + raise (Api_errors.Server_error (Api_errors.not_supported_during_upgrade, [])) let disable_ha ~__context = Mutex.execute enable_disable_m (fun () -> Xapi_ha.disable __context) let ha_prevent_restarts_for ~__context ~seconds = Xapi_ha.ha_prevent_restarts_for __context seconds -let ha_failover_plan_exists ~__context ~n = +let ha_failover_plan_exists ~__context ~n = let n = Int64.to_int n in let all_protected_vms = Xapi_ha_vm_failover.all_protected_vms ~__context in match Xapi_ha_vm_failover.plan_for_n_failures ~__context ~all_protected_vms n with | Xapi_ha_vm_failover.Plan_exists_for_all_VMs -> - info "HA failover plan exists for all protected VMs for up to %d host failures" n; - true + info "HA failover plan exists for all protected VMs for up to %d host failures" n; + true | Xapi_ha_vm_failover.Plan_exists_excluding_non_agile_VMs -> - info "HA failover plan exists for all protected VMs, excluding some non-agile VMs, for up to %d host failures" n; - false (* might define this as true later *) + info "HA failover plan exists for all protected VMs, excluding some non-agile VMs, for up to %d host failures" n; + false (* might define this as true later *) | Xapi_ha_vm_failover.No_plan_exists -> - info "No HA failover plan exists for %d host failures" n; - false + info "No HA failover plan exists for %d host failures" n; + false -let ha_compute_max_host_failures_to_tolerate ~__context = +let ha_compute_max_host_failures_to_tolerate ~__context = let n = Xapi_ha_vm_failover.compute_max_host_failures_to_tolerate ~__context () in (* Update the Pool with this information if HA is currently enabled *) let pool = Helpers.get_pool ~__context in @@ -1204,30 +1204,30 @@ let ha_compute_max_host_failures_to_tolerate ~__context = end; n -let ha_compute_hypothetical_max_host_failures_to_tolerate ~__context ~configuration = +let ha_compute_hypothetical_max_host_failures_to_tolerate ~__context ~configuration = (* Check the restart priorities all look valid *) - List.iter (fun (_, pri) -> - if not(List.mem pri Constants.ha_valid_restart_priorities) - then raise (Api_errors.Server_error(Api_errors.invalid_value, [ "ha_restart_priority"; pri ]))) configuration; + List.iter (fun (_, pri) -> + if not(List.mem pri Constants.ha_valid_restart_priorities) + then raise (Api_errors.Server_error(Api_errors.invalid_value, [ "ha_restart_priority"; pri ]))) configuration; let protected_vms = List.map fst (List.filter (fun (vm, priority) -> Helpers.vm_should_always_run true priority) configuration) in let protected_vms = List.map (fun vm -> vm, Db.VM.get_record ~__context ~self:vm) protected_vms in Xapi_ha_vm_failover.compute_max_host_failures_to_tolerate ~__context ~protected_vms () -let ha_compute_vm_failover_plan ~__context ~failed_hosts ~failed_vms = +let ha_compute_vm_failover_plan ~__context ~failed_hosts ~failed_vms = let vms = List.map (fun vm -> vm, Db.VM.get_record ~__context ~self:vm) failed_vms in let all_hosts = Db.Host.get_all ~__context in let currently_live_hosts = List.filter (fun h -> try Db.Host_metrics.get_live ~__context ~self:(Db.Host.get_metrics ~__context ~self:h) with _ -> false) all_hosts in let live_hosts = List.filter (fun host -> not(List.mem host failed_hosts)) currently_live_hosts in debug "using live_hosts = [ %s ]" (String.concat "; " (List.map Ref.string_of live_hosts)); (* All failed_vms must be agile *) - let errors = List.concat - (List.map - (fun self -> - try Agility.vm_assert_agile ~__context ~self; [ self, [ "error_code", Api_errors.host_not_enough_free_memory ] ] (* default *) - with Api_errors.Server_error(code, params) -> [ self, [ "error_code", code ]]) failed_vms) in - let plan = List.map (fun (vm, host) -> vm, [ "host", Ref.string_of host ]) - (Xapi_ha_vm_failover.compute_evacuation_plan ~__context (List.length all_hosts) live_hosts vms) in + let errors = List.concat + (List.map + (fun self -> + try Agility.vm_assert_agile ~__context ~self; [ self, [ "error_code", Api_errors.host_not_enough_free_memory ] ] (* default *) + with Api_errors.Server_error(code, params) -> [ self, [ "error_code", code ]]) failed_vms) in + let plan = List.map (fun (vm, host) -> vm, [ "host", Ref.string_of host ]) + (Xapi_ha_vm_failover.compute_evacuation_plan ~__context (List.length all_hosts) live_hosts vms) in (List.filter (fun (vm, _) -> not(List.mem_assoc vm plan)) errors) @ plan let create_new_blob ~__context ~pool ~name ~mime_type ~public = @@ -1235,7 +1235,7 @@ let create_new_blob ~__context ~pool ~name ~mime_type ~public = Db.Pool.add_to_blobs ~__context ~self:pool ~key:name ~value:blob; blob -let set_ha_host_failures_to_tolerate ~__context ~self ~value = +let set_ha_host_failures_to_tolerate ~__context ~self ~value = if value < 0L then raise (Api_errors.Server_error(Api_errors.invalid_value, [ "ha_host_failures_to_tolerate"; Int64.to_string value ])); (* Don't block changes if we have no plan at all *) @@ -1243,21 +1243,21 @@ let set_ha_host_failures_to_tolerate ~__context ~self ~value = if Db.Pool.get_ha_plan_exists_for ~__context ~self:pool > 0L then Xapi_ha_vm_failover.assert_nfailures_change_preserves_ha_plan ~__context (Int64.to_int value); Db.Pool.set_ha_host_failures_to_tolerate ~__context ~self ~value; - let (_: bool) = Xapi_ha_vm_failover.update_pool_status ~__context () in () + let (_: bool) = Xapi_ha_vm_failover.update_pool_status ~__context () in () -let ha_schedule_plan_recomputation ~__context = +let ha_schedule_plan_recomputation ~__context = Xapi_ha.Monitor.plan_out_of_date := true let call_fn_on_host ~__context f host = - Helpers.call_api_functions ~__context (fun rpc session_id -> - try - f ~rpc ~session_id ~host - with e -> begin - warn "Exception raised while performing operation on host %s error: %s" - (Ref.string_of host) (ExnHelper.string_of_exn e); - raise e - end - ) + Helpers.call_api_functions ~__context (fun rpc session_id -> + try + f ~rpc ~session_id ~host + with e -> begin + warn "Exception raised while performing operation on host %s error: %s" + (Ref.string_of host) (ExnHelper.string_of_exn e); + raise e + end + ) let enable_binary_storage ~__context = Xapi_pool_helpers.call_fn_on_slaves_then_master ~__context Client.Host.enable_binary_storage @@ -1294,17 +1294,17 @@ let certificate_sync = Certificates.pool_sync (* destroy all subject not validated in external authentication *) let revalidate_subjects ~__context = - let revalidate_subject ~__context ~self = - let subj_id = Db.Subject.get_subject_identifier ~__context ~self in - debug "Revalidating subject %s" subj_id; - try - let open Auth_signature in - ignore((Extauth.Ext_auth.d()).query_subject_information subj_id) - with Not_found -> - debug "Destroying subject %s" subj_id; - Xapi_subject.destroy ~__context ~self in - let subjects_in_db = Db.Subject.get_all ~__context in - List.iter (fun subj -> revalidate_subject ~__context ~self:subj) subjects_in_db + let revalidate_subject ~__context ~self = + let subj_id = Db.Subject.get_subject_identifier ~__context ~self in + debug "Revalidating subject %s" subj_id; + try + let open Auth_signature in + ignore((Extauth.Ext_auth.d()).query_subject_information subj_id) + with Not_found -> + debug "Destroying subject %s" subj_id; + Xapi_subject.destroy ~__context ~self in + let subjects_in_db = Db.Subject.get_all ~__context in + List.iter (fun subj -> revalidate_subject ~__context ~self:subj) subjects_in_db (* CP-719: Enables external auth/directory service across a whole pool; *) @@ -1313,486 +1313,486 @@ let revalidate_subjects ~__context = * If a call to a single host to enable external auth fails, then Pool.enable_external_auth fails, and there is a best-effort attempt to disable any hosts who had their external auth successfully enabled before the failure occured *) -let enable_external_auth ~__context ~pool ~config ~service_name ~auth_type = - - (* CP-825: Serialize execution of pool-enable-extauth and pool-disable-extauth *) - (* enabling/disabling the pool's extauth at the same time could produce inconsistent states for extauth in each host of the pool *) - Mutex.execute Xapi_globs.serialize_pool_enable_disable_extauth (fun () -> - - (* the first element in the hosts list needs to be the pool's master, because we *) - (* always want to update first the master's record due to homogeneity checks in CA-24856 *) - let hosts = Xapi_pool_helpers.get_master_slaves_list ~__context in - - (* 1. verifies if any of the pool hosts already have external auth enabled, and fails if so *) - (* this step isn't strictly necessary, since we will anyway fail in (2) if that is the case, but *) - (* it avoids unnecessary network roundtrips in the pool *) - try - let is_external_auth_enabled host = (Db.Host.get_external_auth_type ~__context ~self:host <> "") in - let host = List.find is_external_auth_enabled hosts in - begin - let host_name_label = Db.Host.get_name_label ~__context ~self:host in - let msg = ("external authentication service in host "^host_name_label^" is already enabled") in - debug "Failed to enable external authentication type %s for service name %s in pool: %s" auth_type service_name msg; - raise (Api_errors.Server_error(Api_errors.pool_auth_already_enabled, [(Ref.string_of host)])) - end - with Not_found -> () (* that's expected, no host had external_auth enabled*) - ; - (* 1b. assert that there are no duplicate hostnames in the pool *) - if (List.length hosts) - <> - (List.length - (Listext.List.setify - (List.map (fun h->Db.Host.get_hostname ~__context ~self:h) hosts)) - ) - then begin - let errmsg = "At least two hosts in the pool have the same hostname" in - debug "%s" errmsg; - raise (Api_errors.Server_error(Api_errors.pool_auth_enable_failed_duplicate_hostname, - [(Ref.string_of (List.hd hosts));errmsg])) - end - else - (* 2. tries to enable the external authentication in each host of the pool *) - let host_error_msg = ref ("","","") in - let rollback_list = - let _rollback_list = ref [] in - (* builds a list of hosts to rollback, if any *) - if List.for_all (*List.for_all goes through the list up to the point when the predicate fails, inclusive *) - (fun h -> - try(* forward the call to the host in the pool *) - begin - debug "trying to enable external authentication on host %s" (Db.Host.get_name_label ~__context ~self:h); - call_fn_on_host ~__context (Client.Host.enable_external_auth ~config ~service_name ~auth_type) h; - _rollback_list := h::!_rollback_list; (* add h to potential rollback list *) - true (* h was successfully enabled. try next in the pool *) - end - with - | Api_errors.Server_error (err,[msg]) as e -> begin - debug "received exception while enabling external authentication for host %s: %s" - (Db.Host.get_name_label ~__context ~self:h) (err^": "^msg); - host_error_msg := (err,msg,ExnHelper.string_of_exn e); - (* error enabling h. we add h here so that we also explicitly disable it during rollback *) - (* [that's because it might be in an inconsistent external_auth state] *) - _rollback_list := h::!_rollback_list; - false - end - | e -> begin - debug "received exception while enabling external authentication for host %s: %s" - (Db.Host.get_name_label ~__context ~self:h) (ExnHelper.string_of_exn e); - host_error_msg := ("","",ExnHelper.string_of_exn e); - (* error enabling h. we add h here so that we also explicitly disable it during rollback *) - (* [that's because it might be in an inconsistent external_auth state] *) - _rollback_list := h::!_rollback_list; - false - end - ) hosts - then (* if List.for_all returned true, then we have successfully enabled all hosts in the pool *) - begin - _rollback_list := [] (* we do not need to rollback any hosts in this case *) - end; - !_rollback_list - in - (* 3. if any failed, then do a best-effort rollback, disabling any host that has been just enabled *) - if (List.length rollback_list > 0) - then begin (* FAILED *) - let failed_host = (* the failed host is the first item in the rollback list *) - (List.hd rollback_list) in - let failed_host_name_label = Db.Host.get_name_label ~__context ~self:failed_host in - match !host_error_msg with (err_of_e,msg_of_e,string_of_e) -> - debug "Rolling back any enabled host, because failed to enable external authentication for host %s in the pool: %s" failed_host_name_label string_of_e; - List.iter (fun host -> - (* best-effort attempt to disable all enabled hosts, swallowing any exceptions *) - try (call_fn_on_host ~__context (Client.Host.disable_external_auth ~config) host) - with e-> (debug "During rollback: Failed to disable external authentication for host %s: %s" - (Db.Host.get_name_label ~__context ~self:host) (ExnHelper.string_of_exn e) - ) - ) (List.rev rollback_list); - (* we bubble up the exception returned by the failed host *) - match err_of_e with - | "" -> (* generic unknown exception *) - raise (Api_errors.Server_error(Api_errors.pool_auth_enable_failed, [(Ref.string_of failed_host);string_of_e])) - | err_of_e when err_of_e=Api_errors.auth_unknown_type -> - raise (Api_errors.Server_error(Api_errors.auth_unknown_type, [msg_of_e])) - | err_of_e when Xstringext.String.startswith Api_errors.auth_enable_failed err_of_e -> - raise (Api_errors.Server_error(Api_errors.pool_auth_prefix^err_of_e, [(Ref.string_of failed_host);msg_of_e])) - | _ -> (* Api_errors.Server_error *) - raise (Api_errors.Server_error(Api_errors.pool_auth_enable_failed, [(Ref.string_of failed_host);string_of_e])) - end - - else begin (* OK *) - debug "External authentication enabled for all hosts in the pool"; - - (* CA-59647: remove subjects that do not belong to the new domain *) - revalidate_subjects ~__context; - end - ) +let enable_external_auth ~__context ~pool ~config ~service_name ~auth_type = + + (* CP-825: Serialize execution of pool-enable-extauth and pool-disable-extauth *) + (* enabling/disabling the pool's extauth at the same time could produce inconsistent states for extauth in each host of the pool *) + Mutex.execute Xapi_globs.serialize_pool_enable_disable_extauth (fun () -> + + (* the first element in the hosts list needs to be the pool's master, because we *) + (* always want to update first the master's record due to homogeneity checks in CA-24856 *) + let hosts = Xapi_pool_helpers.get_master_slaves_list ~__context in + + (* 1. verifies if any of the pool hosts already have external auth enabled, and fails if so *) + (* this step isn't strictly necessary, since we will anyway fail in (2) if that is the case, but *) + (* it avoids unnecessary network roundtrips in the pool *) + try + let is_external_auth_enabled host = (Db.Host.get_external_auth_type ~__context ~self:host <> "") in + let host = List.find is_external_auth_enabled hosts in + begin + let host_name_label = Db.Host.get_name_label ~__context ~self:host in + let msg = ("external authentication service in host "^host_name_label^" is already enabled") in + debug "Failed to enable external authentication type %s for service name %s in pool: %s" auth_type service_name msg; + raise (Api_errors.Server_error(Api_errors.pool_auth_already_enabled, [(Ref.string_of host)])) + end + with Not_found -> () (* that's expected, no host had external_auth enabled*) + ; + (* 1b. assert that there are no duplicate hostnames in the pool *) + if (List.length hosts) + <> + (List.length + (Listext.List.setify + (List.map (fun h->Db.Host.get_hostname ~__context ~self:h) hosts)) + ) + then begin + let errmsg = "At least two hosts in the pool have the same hostname" in + debug "%s" errmsg; + raise (Api_errors.Server_error(Api_errors.pool_auth_enable_failed_duplicate_hostname, + [(Ref.string_of (List.hd hosts));errmsg])) + end + else + (* 2. tries to enable the external authentication in each host of the pool *) + let host_error_msg = ref ("","","") in + let rollback_list = + let _rollback_list = ref [] in + (* builds a list of hosts to rollback, if any *) + if List.for_all (*List.for_all goes through the list up to the point when the predicate fails, inclusive *) + (fun h -> + try(* forward the call to the host in the pool *) + begin + debug "trying to enable external authentication on host %s" (Db.Host.get_name_label ~__context ~self:h); + call_fn_on_host ~__context (Client.Host.enable_external_auth ~config ~service_name ~auth_type) h; + _rollback_list := h::!_rollback_list; (* add h to potential rollback list *) + true (* h was successfully enabled. try next in the pool *) + end + with + | Api_errors.Server_error (err,[msg]) as e -> begin + debug "received exception while enabling external authentication for host %s: %s" + (Db.Host.get_name_label ~__context ~self:h) (err^": "^msg); + host_error_msg := (err,msg,ExnHelper.string_of_exn e); + (* error enabling h. we add h here so that we also explicitly disable it during rollback *) + (* [that's because it might be in an inconsistent external_auth state] *) + _rollback_list := h::!_rollback_list; + false + end + | e -> begin + debug "received exception while enabling external authentication for host %s: %s" + (Db.Host.get_name_label ~__context ~self:h) (ExnHelper.string_of_exn e); + host_error_msg := ("","",ExnHelper.string_of_exn e); + (* error enabling h. we add h here so that we also explicitly disable it during rollback *) + (* [that's because it might be in an inconsistent external_auth state] *) + _rollback_list := h::!_rollback_list; + false + end + ) hosts + then (* if List.for_all returned true, then we have successfully enabled all hosts in the pool *) + begin + _rollback_list := [] (* we do not need to rollback any hosts in this case *) + end; + !_rollback_list + in + (* 3. if any failed, then do a best-effort rollback, disabling any host that has been just enabled *) + if (List.length rollback_list > 0) + then begin (* FAILED *) + let failed_host = (* the failed host is the first item in the rollback list *) + (List.hd rollback_list) in + let failed_host_name_label = Db.Host.get_name_label ~__context ~self:failed_host in + match !host_error_msg with (err_of_e,msg_of_e,string_of_e) -> + debug "Rolling back any enabled host, because failed to enable external authentication for host %s in the pool: %s" failed_host_name_label string_of_e; + List.iter (fun host -> + (* best-effort attempt to disable all enabled hosts, swallowing any exceptions *) + try (call_fn_on_host ~__context (Client.Host.disable_external_auth ~config) host) + with e-> (debug "During rollback: Failed to disable external authentication for host %s: %s" + (Db.Host.get_name_label ~__context ~self:host) (ExnHelper.string_of_exn e) + ) + ) (List.rev rollback_list); + (* we bubble up the exception returned by the failed host *) + match err_of_e with + | "" -> (* generic unknown exception *) + raise (Api_errors.Server_error(Api_errors.pool_auth_enable_failed, [(Ref.string_of failed_host);string_of_e])) + | err_of_e when err_of_e=Api_errors.auth_unknown_type -> + raise (Api_errors.Server_error(Api_errors.auth_unknown_type, [msg_of_e])) + | err_of_e when Xstringext.String.startswith Api_errors.auth_enable_failed err_of_e -> + raise (Api_errors.Server_error(Api_errors.pool_auth_prefix^err_of_e, [(Ref.string_of failed_host);msg_of_e])) + | _ -> (* Api_errors.Server_error *) + raise (Api_errors.Server_error(Api_errors.pool_auth_enable_failed, [(Ref.string_of failed_host);string_of_e])) + end + + else begin (* OK *) + debug "External authentication enabled for all hosts in the pool"; + + (* CA-59647: remove subjects that do not belong to the new domain *) + revalidate_subjects ~__context; + end + ) (* CP-719: Calls Host.disable_external_auth() on each of the hosts in the pool * Reports failure if any of the individual Host.disable_external_auth calls failed or timed-out * Guarantees to call Host.disable_external_auth() on every pool host, regardless of whether some of these calls fail *) -let disable_external_auth ~__context ~pool ~config = - - (* CP-825: Serialize execution of pool-enable-extauth and pool-disable-extauth *) - (* enabling/disabling the pool's extauth at the same time could produce inconsistent states for extauth in each host of the pool *) - Mutex.execute Xapi_globs.serialize_pool_enable_disable_extauth (fun () -> - - (* the first element in the hosts list needs to be the pool's master, because we *) - (* always want to update first the master's record due to homogeneity checks in CA-24856 *) - let hosts = Xapi_pool_helpers.get_master_slaves_list ~__context in - let host_msgs_list = - List.map (fun host -> - try (* forward the call to the host in the pool *) - call_fn_on_host ~__context (Client.Host.disable_external_auth ~config) host; - (* no failed host to add to the filtered list, just visit next host *) - (host,"","") - with - | Api_errors.Server_error (err,[host_msg]) -> begin - let msg = (Printf.sprintf "%s: %s" - (Db.Host.get_name_label ~__context ~self:host) host_msg) in - debug "Failed to disable the external authentication of pool in host %s" msg; - (* no exception should be raised here, we want to visit every host in hosts *) - (host,err,msg) - end - | e-> (* add failed host to the filtered list and visit next host *) - let msg = (Printf.sprintf "%s: %s" - (Db.Host.get_name_label ~__context ~self:host) (ExnHelper.string_of_exn e)) in - debug "Failed to disable the external authentication of pool in host %s" msg; - (* no exception should be raised here, we want to visit every host in hosts *) - (host,"err",msg) - ) - hosts - in - let failedhosts_list = List.filter (fun (host,err,msg) -> err<>"") host_msgs_list in - if (List.length failedhosts_list > 0) - then begin (* FAILED *) - match List.hd failedhosts_list with (host,err,msg) -> - debug "Failed to disable the external authentication of at least one host in the pool"; - if Xstringext.String.startswith Api_errors.auth_disable_failed err - then (* tagged exception *) - raise (Api_errors.Server_error(Api_errors.pool_auth_prefix^err, [(Ref.string_of host);msg])) - else (* generic exception *) - raise (Api_errors.Server_error(Api_errors.pool_auth_disable_failed, [(Ref.string_of host);msg])); - end - else begin (* OK *) - debug "The external authentication of all hosts in the pool was disabled successfully"; - end - ) - +let disable_external_auth ~__context ~pool ~config = + + (* CP-825: Serialize execution of pool-enable-extauth and pool-disable-extauth *) + (* enabling/disabling the pool's extauth at the same time could produce inconsistent states for extauth in each host of the pool *) + Mutex.execute Xapi_globs.serialize_pool_enable_disable_extauth (fun () -> + + (* the first element in the hosts list needs to be the pool's master, because we *) + (* always want to update first the master's record due to homogeneity checks in CA-24856 *) + let hosts = Xapi_pool_helpers.get_master_slaves_list ~__context in + let host_msgs_list = + List.map (fun host -> + try (* forward the call to the host in the pool *) + call_fn_on_host ~__context (Client.Host.disable_external_auth ~config) host; + (* no failed host to add to the filtered list, just visit next host *) + (host,"","") + with + | Api_errors.Server_error (err,[host_msg]) -> begin + let msg = (Printf.sprintf "%s: %s" + (Db.Host.get_name_label ~__context ~self:host) host_msg) in + debug "Failed to disable the external authentication of pool in host %s" msg; + (* no exception should be raised here, we want to visit every host in hosts *) + (host,err,msg) + end + | e-> (* add failed host to the filtered list and visit next host *) + let msg = (Printf.sprintf "%s: %s" + (Db.Host.get_name_label ~__context ~self:host) (ExnHelper.string_of_exn e)) in + debug "Failed to disable the external authentication of pool in host %s" msg; + (* no exception should be raised here, we want to visit every host in hosts *) + (host,"err",msg) + ) + hosts + in + let failedhosts_list = List.filter (fun (host,err,msg) -> err<>"") host_msgs_list in + if (List.length failedhosts_list > 0) + then begin (* FAILED *) + match List.hd failedhosts_list with (host,err,msg) -> + debug "Failed to disable the external authentication of at least one host in the pool"; + if Xstringext.String.startswith Api_errors.auth_disable_failed err + then (* tagged exception *) + raise (Api_errors.Server_error(Api_errors.pool_auth_prefix^err, [(Ref.string_of host);msg])) + else (* generic exception *) + raise (Api_errors.Server_error(Api_errors.pool_auth_disable_failed, [(Ref.string_of host);msg])); + end + else begin (* OK *) + debug "The external authentication of all hosts in the pool was disabled successfully"; + end + ) + (* CA-24856: detect non-homogeneous external-authentication config in pool *) let detect_nonhomogeneous_external_auth_in_pool ~__context = - Helpers.call_api_functions ~__context (fun rpc session_id -> - let slaves = Xapi_pool_helpers.get_slaves_list ~__context in - List.iter (fun slave -> - (* check every *slave* in the pool... (the master is always homogeneous to the pool by definition) *) - (* (also, checking the master inside this function would create an infinite recursion loop) *) - Xapi_host.detect_nonhomogeneous_external_auth_in_host ~__context ~host:slave - ) slaves - ) + Helpers.call_api_functions ~__context (fun rpc session_id -> + let slaves = Xapi_pool_helpers.get_slaves_list ~__context in + List.iter (fun slave -> + (* check every *slave* in the pool... (the master is always homogeneous to the pool by definition) *) + (* (also, checking the master inside this function would create an infinite recursion loop) *) + Xapi_host.detect_nonhomogeneous_external_auth_in_host ~__context ~host:slave + ) slaves + ) let run_detect_nonhomogeneous_external_auth_in_pool () = - (* we do not want to run this test while the pool's extauth is being enabled or disabled *) - Threadext.Mutex.execute Xapi_globs.serialize_pool_enable_disable_extauth (fun () -> - ignore (Server_helpers.exec_with_new_task "run_detect_nonhomogeneous_external_auth" - (fun __context -> - detect_nonhomogeneous_external_auth_in_pool ~__context - ) - ) - ) + (* we do not want to run this test while the pool's extauth is being enabled or disabled *) + Threadext.Mutex.execute Xapi_globs.serialize_pool_enable_disable_extauth (fun () -> + ignore (Server_helpers.exec_with_new_task "run_detect_nonhomogeneous_external_auth" + (fun __context -> + detect_nonhomogeneous_external_auth_in_pool ~__context + ) + ) + ) let asynchronously_run_detect_nonhomogeneous_external_auth_in_pool = - At_least_once_more.make "running detect_nonhomogeneous_external_auth" run_detect_nonhomogeneous_external_auth_in_pool + At_least_once_more.make "running detect_nonhomogeneous_external_auth" run_detect_nonhomogeneous_external_auth_in_pool (* non-blocking asynchronous call to verify if the external authentication configuration of the pool is homogeneous *) let detect_nonhomogeneous_external_auth () = - At_least_once_more.again asynchronously_run_detect_nonhomogeneous_external_auth_in_pool + At_least_once_more.again asynchronously_run_detect_nonhomogeneous_external_auth_in_pool (* CA-24856: API call to detect non-homogeneous external-authentication config in pool *) let detect_nonhomogeneous_external_auth ~__context ~pool = - detect_nonhomogeneous_external_auth () - - + detect_nonhomogeneous_external_auth () + + let create_redo_log_vdi ~__context ~sr = - Helpers.call_api_functions ~__context - (fun rpc session_id -> - Client.VDI.create ~rpc ~session_id - ~name_label:"Metadata redo-log" - ~name_description:"Used when HA is disabled, while extra security is still desired" - ~sR:sr - ~virtual_size:Redo_log.minimum_vdi_size - ~_type:`redo_log - ~sharable:true - ~read_only:false - ~other_config:[] - ~xenstore_data:[] - ~sm_config:Redo_log.redo_log_sm_config - ~tags:[] + Helpers.call_api_functions ~__context + (fun rpc session_id -> + Client.VDI.create ~rpc ~session_id + ~name_label:"Metadata redo-log" + ~name_description:"Used when HA is disabled, while extra security is still desired" + ~sR:sr + ~virtual_size:Redo_log.minimum_vdi_size + ~_type:`redo_log + ~sharable:true + ~read_only:false + ~other_config:[] + ~xenstore_data:[] + ~sm_config:Redo_log.redo_log_sm_config + ~tags:[] ) - -let find_or_create_redo_log_vdi ~__context ~sr = - match - List.filter - (fun self -> true - && (Db.VDI.get_type ~__context ~self = `redo_log) - && (Db.VDI.get_virtual_size ~__context ~self >= Redo_log.minimum_vdi_size)) - (Db.SR.get_VDIs ~__context ~self:sr) with - | x :: _ -> - info "re-using existing redo-log VDI: %s" (Db.VDI.get_uuid ~__context ~self:x); - x - | [] -> - info "no suitable existing redo-log VDI found; creating a fresh one"; - create_redo_log_vdi ~__context ~sr - - + +let find_or_create_redo_log_vdi ~__context ~sr = + match + List.filter + (fun self -> true + && (Db.VDI.get_type ~__context ~self = `redo_log) + && (Db.VDI.get_virtual_size ~__context ~self >= Redo_log.minimum_vdi_size)) + (Db.SR.get_VDIs ~__context ~self:sr) with + | x :: _ -> + info "re-using existing redo-log VDI: %s" (Db.VDI.get_uuid ~__context ~self:x); + x + | [] -> + info "no suitable existing redo-log VDI found; creating a fresh one"; + create_redo_log_vdi ~__context ~sr + + let enable_redo_log ~__context ~sr = - info "Enabling redo log..."; - - (* find or create suitable VDI *) - let vdi = - try - find_or_create_redo_log_vdi ~__context ~sr - with e -> - let msg = "failed to create a VDI for the redo log on the SR with the given UUID." in - raise (Api_errors.Server_error(Api_errors.cannot_enable_redo_log, [msg])) - in - - (* ensure VDI is static, and set a flag in the local DB, such that the redo log can be - * re-enabled after a restart of xapi *) - begin try - debug "Ensuring redo-log VDI is static on all hosts in the pool"; - let hosts = Db.Host.get_all ~__context in - let attach host = - debug "Attaching VDI on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); - Helpers.call_api_functions ~__context (fun rpc session_id -> - Client.Host.attach_static_vdis rpc session_id host [vdi, Xapi_globs.gen_metadata_vdi_reason]); - debug "Setting redo-log local-DB flag on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); - Helpers.call_api_functions ~__context (fun rpc session_id -> - Client.Host.set_localdb_key rpc session_id host Constants.redo_log_enabled "true"); - in - List.iter attach hosts; - debug "VDI is static on all hosts" - with e -> - let msg = "failed to make VDI static." in - raise (Api_errors.Server_error(Api_errors.cannot_enable_redo_log, [msg])) - end; - - (* update state *) - debug "Updating state..."; - let pool = Helpers.get_pool ~__context in - Db.Pool.set_redo_log_vdi ~__context ~self:pool ~value:vdi; - Db.Pool.set_redo_log_enabled ~__context ~self:pool ~value:true; - - (* enable the new redo log, unless HA is enabled (which means a redo log - * is already in use) *) - if not (Db.Pool.get_ha_enabled ~__context ~self:pool) then begin - Redo_log.enable Xapi_ha.ha_redo_log Xapi_globs.gen_metadata_vdi_reason; - Localdb.put Constants.redo_log_enabled "true" - end; - info "The redo log is now enabled" - + info "Enabling redo log..."; + + (* find or create suitable VDI *) + let vdi = + try + find_or_create_redo_log_vdi ~__context ~sr + with e -> + let msg = "failed to create a VDI for the redo log on the SR with the given UUID." in + raise (Api_errors.Server_error(Api_errors.cannot_enable_redo_log, [msg])) + in + + (* ensure VDI is static, and set a flag in the local DB, such that the redo log can be + * re-enabled after a restart of xapi *) + begin try + debug "Ensuring redo-log VDI is static on all hosts in the pool"; + let hosts = Db.Host.get_all ~__context in + let attach host = + debug "Attaching VDI on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); + Helpers.call_api_functions ~__context (fun rpc session_id -> + Client.Host.attach_static_vdis rpc session_id host [vdi, Xapi_globs.gen_metadata_vdi_reason]); + debug "Setting redo-log local-DB flag on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); + Helpers.call_api_functions ~__context (fun rpc session_id -> + Client.Host.set_localdb_key rpc session_id host Constants.redo_log_enabled "true"); + in + List.iter attach hosts; + debug "VDI is static on all hosts" + with e -> + let msg = "failed to make VDI static." in + raise (Api_errors.Server_error(Api_errors.cannot_enable_redo_log, [msg])) + end; + + (* update state *) + debug "Updating state..."; + let pool = Helpers.get_pool ~__context in + Db.Pool.set_redo_log_vdi ~__context ~self:pool ~value:vdi; + Db.Pool.set_redo_log_enabled ~__context ~self:pool ~value:true; + + (* enable the new redo log, unless HA is enabled (which means a redo log + * is already in use) *) + if not (Db.Pool.get_ha_enabled ~__context ~self:pool) then begin + Redo_log.enable Xapi_ha.ha_redo_log Xapi_globs.gen_metadata_vdi_reason; + Localdb.put Constants.redo_log_enabled "true" + end; + info "The redo log is now enabled" + let disable_redo_log ~__context = - info "Disabling redo log..."; - - (* disable redo-log state flag and switch off redo log if HA is disabled *) - let pool = Helpers.get_pool ~__context in - Db.Pool.set_redo_log_enabled ~__context ~self:pool ~value:false; - if not (Db.Pool.get_ha_enabled ~__context ~self:pool) then begin - Redo_log_usage.stop_using_redo_log Xapi_ha.ha_redo_log; - Redo_log.disable Xapi_ha.ha_redo_log; - - (* disable static-ness of the VDI and clear local-DB flags *) - let vdi = Db.Pool.get_redo_log_vdi ~__context ~self:pool in - let hosts = Db.Host.get_all ~__context in - begin try - let detach host = - debug "Detaching VDI from host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); - Helpers.call_api_functions ~__context (fun rpc session_id -> - Client.Host.detach_static_vdis rpc session_id host [vdi]); - debug "Clearing redo-log local-DB flag on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); - Helpers.call_api_functions ~__context (fun rpc session_id -> - Client.Host.set_localdb_key rpc session_id host Constants.redo_log_enabled "false"); - in - List.iter detach hosts; - with e -> info "Failed to detach static VDIs from all hosts." - end; - end; - info "The redo log is now disabled" + info "Disabling redo log..."; + + (* disable redo-log state flag and switch off redo log if HA is disabled *) + let pool = Helpers.get_pool ~__context in + Db.Pool.set_redo_log_enabled ~__context ~self:pool ~value:false; + if not (Db.Pool.get_ha_enabled ~__context ~self:pool) then begin + Redo_log_usage.stop_using_redo_log Xapi_ha.ha_redo_log; + Redo_log.disable Xapi_ha.ha_redo_log; + + (* disable static-ness of the VDI and clear local-DB flags *) + let vdi = Db.Pool.get_redo_log_vdi ~__context ~self:pool in + let hosts = Db.Host.get_all ~__context in + begin try + let detach host = + debug "Detaching VDI from host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); + Helpers.call_api_functions ~__context (fun rpc session_id -> + Client.Host.detach_static_vdis rpc session_id host [vdi]); + debug "Clearing redo-log local-DB flag on host '%s' ('%s')" (Db.Host.get_name_label ~__context ~self:host) (Ref.string_of host); + Helpers.call_api_functions ~__context (fun rpc session_id -> + Client.Host.set_localdb_key rpc session_id host Constants.redo_log_enabled "false"); + in + List.iter detach hosts; + with e -> info "Failed to detach static VDIs from all hosts." + end; + end; + info "The redo log is now disabled" let set_vswitch_controller ~__context ~address = - let dbg = Context.string_of_task __context in - match Net.Bridge.get_kind dbg () with - | Network_interface.Openvswitch -> - let pool = Helpers.get_pool ~__context in - let current_address = Db.Pool.get_vswitch_controller ~__context ~self:pool in - if current_address <> address then begin - if address <> "" then - Helpers.assert_is_valid_ip `ipv4 "address" address; - Db.Pool.set_vswitch_controller ~__context ~self:pool ~value:address; - List.iter (fun host -> Helpers.update_vswitch_controller ~__context ~host) (Db.Host.get_all ~__context) - end - | _ -> raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["host not configured for vswitch operation"])) + let dbg = Context.string_of_task __context in + match Net.Bridge.get_kind dbg () with + | Network_interface.Openvswitch -> + let pool = Helpers.get_pool ~__context in + let current_address = Db.Pool.get_vswitch_controller ~__context ~self:pool in + if current_address <> address then begin + if address <> "" then + Helpers.assert_is_valid_ip `ipv4 "address" address; + Db.Pool.set_vswitch_controller ~__context ~self:pool ~value:address; + List.iter (fun host -> Helpers.update_vswitch_controller ~__context ~host) (Db.Host.get_all ~__context) + end + | _ -> raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["host not configured for vswitch operation"])) (* internal intra-pool call to allow slaves to log http actions on the master *) let audit_log_append ~__context ~line = - (* populate friendly names for the references of the call arguments *) - (* this is necessary here because the slave doesn't have access to these names *) - let line = Rbac_audit.populate_audit_record_with_obj_names_of_refs line in - (* copy audit record from slave exactly as it is, without any new prefixes *) - let (_: string) = Rbac_audit.append_line ~raw:true "%s" line in - () + (* populate friendly names for the references of the call arguments *) + (* this is necessary here because the slave doesn't have access to these names *) + let line = Rbac_audit.populate_audit_record_with_obj_names_of_refs line in + (* copy audit record from slave exactly as it is, without any new prefixes *) + let (_: string) = Rbac_audit.append_line ~raw:true "%s" line in + () let test_archive_target ~__context ~self ~config = - raise (Api_errors.Server_error (Api_errors.message_removed, [])) + raise (Api_errors.Server_error (Api_errors.message_removed, [])) let enable_local_storage_caching ~__context ~self = - let srs = Db.SR.get_all_records ~__context in - let pbds = Db.PBD.get_all_records ~__context in - let hosts = Db.Host.get_all ~__context in - - (* Exception handler is to cope with transient PBDs with invalid references *) - let hosts_and_srs = List.filter_map (fun (pbdref,pbdrec) -> - try Some (pbdrec.API.pBD_host, pbdrec.API.pBD_SR, List.assoc pbdrec.API.pBD_SR srs) with _ -> None) pbds - in - - let acceptable = List.filter (fun (href,srref,srrec) -> - (not srrec.API.sR_shared) && - (List.length srrec.API.sR_PBDs = 1) && - (List.mem_assoc - Smint.Sr_supports_local_caching - (Sm.features_of_driver srrec.API.sR_type)) - ) hosts_and_srs in - - let failed_hosts = - Helpers.call_api_functions ~__context - (fun rpc session_id -> - let failed = List.filter_map (fun host -> - let result = ref (Some host) in - let acceptable_srs = List.filter (fun (href,srref,srrec) -> href=host) acceptable in - List.iter (fun (href,ref,sr) -> - try Client.Host.enable_local_storage_caching rpc session_id host ref; result := None with _ -> ()) acceptable_srs; - !result - ) hosts in - failed) - in - if List.length failed_hosts > 0 then - raise (Api_errors.Server_error (Api_errors.hosts_failed_to_enable_caching, List.map Ref.string_of failed_hosts)) - else () - - + let srs = Db.SR.get_all_records ~__context in + let pbds = Db.PBD.get_all_records ~__context in + let hosts = Db.Host.get_all ~__context in + + (* Exception handler is to cope with transient PBDs with invalid references *) + let hosts_and_srs = List.filter_map (fun (pbdref,pbdrec) -> + try Some (pbdrec.API.pBD_host, pbdrec.API.pBD_SR, List.assoc pbdrec.API.pBD_SR srs) with _ -> None) pbds + in + + let acceptable = List.filter (fun (href,srref,srrec) -> + (not srrec.API.sR_shared) && + (List.length srrec.API.sR_PBDs = 1) && + (List.mem_assoc + Smint.Sr_supports_local_caching + (Sm.features_of_driver srrec.API.sR_type)) + ) hosts_and_srs in + + let failed_hosts = + Helpers.call_api_functions ~__context + (fun rpc session_id -> + let failed = List.filter_map (fun host -> + let result = ref (Some host) in + let acceptable_srs = List.filter (fun (href,srref,srrec) -> href=host) acceptable in + List.iter (fun (href,ref,sr) -> + try Client.Host.enable_local_storage_caching rpc session_id host ref; result := None with _ -> ()) acceptable_srs; + !result + ) hosts in + failed) + in + if List.length failed_hosts > 0 then + raise (Api_errors.Server_error (Api_errors.hosts_failed_to_enable_caching, List.map Ref.string_of failed_hosts)) + else () + + let disable_local_storage_caching ~__context ~self = - let hosts = Db.Host.get_all ~__context in - let failed_hosts = Helpers.call_api_functions ~__context - (fun rpc session_id -> - List.filter_map (fun host -> - try - Client.Host.disable_local_storage_caching ~rpc ~session_id ~host; - None - with _ -> - Some host) hosts) - in - if List.length failed_hosts > 0 then - raise (Api_errors.Server_error (Api_errors.hosts_failed_to_disable_caching, List.map Ref.string_of failed_hosts)) - else () + let hosts = Db.Host.get_all ~__context in + let failed_hosts = Helpers.call_api_functions ~__context + (fun rpc session_id -> + List.filter_map (fun host -> + try + Client.Host.disable_local_storage_caching ~rpc ~session_id ~host; + None + with _ -> + Some host) hosts) + in + if List.length failed_hosts > 0 then + raise (Api_errors.Server_error (Api_errors.hosts_failed_to_disable_caching, List.map Ref.string_of failed_hosts)) + else () let get_license_state ~__context ~self = - let edition_to_int = List.map (fun (e, _, _, i) -> e, i) (V6client.get_editions "get_license_state") in - let hosts = Db.Host.get_all ~__context in - let pool_edition, expiry = Xapi_pool_license.get_lowest_edition_with_expiry ~__context ~hosts ~edition_to_int in - let pool_expiry = - match expiry with - | None -> "never" - | Some date -> if date = Date.of_float License_check.never then "never" else Date.to_string date - in - [ - "edition", pool_edition; - "expiry", pool_expiry; - ] + let edition_to_int = List.map (fun (e, _, _, i) -> e, i) (V6client.get_editions "get_license_state") in + let hosts = Db.Host.get_all ~__context in + let pool_edition, expiry = Xapi_pool_license.get_lowest_edition_with_expiry ~__context ~hosts ~edition_to_int in + let pool_expiry = + match expiry with + | None -> "never" + | Some date -> if date = Date.of_float License_check.never then "never" else Date.to_string date + in + [ + "edition", pool_edition; + "expiry", pool_expiry; + ] let apply_edition ~__context ~self ~edition = - let hosts = Db.Host.get_all ~__context in - let apply_fn = - (fun ~__context ~host ~edition -> Helpers.call_api_functions ~__context - (fun rpc session_id -> - Client.Host.apply_edition ~rpc ~session_id ~host ~edition ~force:false)) - in - Xapi_pool_license.apply_edition_with_rollback ~__context ~hosts ~edition ~apply_fn + let hosts = Db.Host.get_all ~__context in + let apply_fn = + (fun ~__context ~host ~edition -> Helpers.call_api_functions ~__context + (fun rpc session_id -> + Client.Host.apply_edition ~rpc ~session_id ~host ~edition ~force:false)) + in + Xapi_pool_license.apply_edition_with_rollback ~__context ~hosts ~edition ~apply_fn (* This is expensive, so should always be run on the master. *) let assert_mac_seeds_available ~__context ~self ~seeds = - let module StringSet = Set.Make(String) in - let all_guests = - Db.VM.get_records_where - ~__context - ~expr:(Eq(Field "is_control_domain", Literal "false")) - in - (* Create a set of all MAC seeds in use by guests in the pool. *) - let mac_seeds_in_use = - List.fold_left - (fun acc (_, vm_rec) -> - try - let mac_seed = - List.assoc - Xapi_globs.mac_seed - vm_rec.API.vM_other_config - in - StringSet.add mac_seed acc - with Not_found -> - acc) - StringSet.empty all_guests - in - (* Create a set of the MAC seeds we want to test. *) - let mac_seeds_to_test = - List.fold_left - (fun acc mac_seed -> StringSet.add mac_seed acc) StringSet.empty seeds - in - (* Check if the intersection of these sets is non-empty. *) - let problem_mac_seeds = StringSet.inter mac_seeds_in_use mac_seeds_to_test in - if not(StringSet.is_empty problem_mac_seeds) then - raise (Api_errors.Server_error - (Api_errors.duplicate_mac_seed, [StringSet.choose problem_mac_seeds])) + let module StringSet = Set.Make(String) in + let all_guests = + Db.VM.get_records_where + ~__context + ~expr:(Eq(Field "is_control_domain", Literal "false")) + in + (* Create a set of all MAC seeds in use by guests in the pool. *) + let mac_seeds_in_use = + List.fold_left + (fun acc (_, vm_rec) -> + try + let mac_seed = + List.assoc + Xapi_globs.mac_seed + vm_rec.API.vM_other_config + in + StringSet.add mac_seed acc + with Not_found -> + acc) + StringSet.empty all_guests + in + (* Create a set of the MAC seeds we want to test. *) + let mac_seeds_to_test = + List.fold_left + (fun acc mac_seed -> StringSet.add mac_seed acc) StringSet.empty seeds + in + (* Check if the intersection of these sets is non-empty. *) + let problem_mac_seeds = StringSet.inter mac_seeds_in_use mac_seeds_to_test in + if not(StringSet.is_empty problem_mac_seeds) then + raise (Api_errors.Server_error + (Api_errors.duplicate_mac_seed, [StringSet.choose problem_mac_seeds])) let set_ssl_legacy_on_each_host ~__context ~self ~value = - let f ~rpc ~session_id ~host = - Client.Host.set_ssl_legacy ~rpc ~session_id ~self:host ~value - in - Xapi_pool_helpers.call_fn_on_slaves_then_master ~__context f + let f ~rpc ~session_id ~host = + Client.Host.set_ssl_legacy ~rpc ~session_id ~self:host ~value + in + Xapi_pool_helpers.call_fn_on_slaves_then_master ~__context f let disable_ssl_legacy = set_ssl_legacy_on_each_host ~value:false let enable_ssl_legacy = set_ssl_legacy_on_each_host ~value:true let has_extension ~__context ~self ~name = - let hosts = Db.Host.get_all ~__context in - List.for_all (fun host -> - Helpers.call_api_functions ~__context (fun rpc session_id -> - Client.Host.has_extension rpc session_id host name - ) - ) hosts + let hosts = Db.Host.get_all ~__context in + List.for_all (fun host -> + Helpers.call_api_functions ~__context (fun rpc session_id -> + Client.Host.has_extension rpc session_id host name + ) + ) hosts let guest_agent_config_requirements = - let open Map_check in - [ - { - key = Xapi_xenops.Guest_agent_features.Xapi.auto_update_enabled; - default_value = None; - is_valid_value = (fun x -> - try let (_:bool) = bool_of_string x in true - with Invalid_argument _ -> false); - }; - { - key = Xapi_xenops.Guest_agent_features.Xapi.auto_update_url; - default_value = None; - is_valid_value = (fun url -> - match Uri.of_string url |> Uri.scheme with - | Some "http" | Some "https" -> true - | _ -> false) - }; - ] + let open Map_check in + [ + { + key = Xapi_xenops.Guest_agent_features.Xapi.auto_update_enabled; + default_value = None; + is_valid_value = (fun x -> + try let (_:bool) = bool_of_string x in true + with Invalid_argument _ -> false); + }; + { + key = Xapi_xenops.Guest_agent_features.Xapi.auto_update_url; + default_value = None; + is_valid_value = (fun url -> + match Uri.of_string url |> Uri.scheme with + | Some "http" | Some "https" -> true + | _ -> false) + }; + ] let add_to_guest_agent_config ~__context ~self ~key ~value = - Map_check.validate_kvpair "guest_agent_config" - guest_agent_config_requirements (key, value); - Db.Pool.add_to_guest_agent_config ~__context ~self ~key ~value; - Xapi_pool_helpers.apply_guest_agent_config ~__context + Map_check.validate_kvpair "guest_agent_config" + guest_agent_config_requirements (key, value); + Db.Pool.add_to_guest_agent_config ~__context ~self ~key ~value; + Xapi_pool_helpers.apply_guest_agent_config ~__context let remove_from_guest_agent_config ~__context ~self ~key = - Db.Pool.remove_from_guest_agent_config ~__context ~self ~key; - Xapi_pool_helpers.apply_guest_agent_config ~__context + Db.Pool.remove_from_guest_agent_config ~__context ~self ~key; + Xapi_pool_helpers.apply_guest_agent_config ~__context diff --git a/ocaml/xapi/xapi_pool.mli b/ocaml/xapi/xapi_pool.mli index fb80e1f772c..5231962dc53 100644 --- a/ocaml/xapi/xapi_pool.mli +++ b/ocaml/xapi/xapi_pool.mli @@ -13,7 +13,7 @@ *) (** Module that defines API functions for Pool objects * @group XenAPI functions - *) +*) (** {2 (Fill in Title!)} *) @@ -91,9 +91,9 @@ val create_VLAN_from_PIF : val slave_network_report : __context:'a -> phydevs:'b -> dev_to_mac:'c -> dev_to_mtu:'d -> slave_host:'e -> 'f list - + (** {2 High availability (HA)} *) - + val enable_disable_m : Mutex.t val enable_ha : __context:Context.t -> @@ -195,6 +195,6 @@ val enable_ssl_legacy : __context:Context.t -> self:API.ref_pool -> unit val has_extension : __context:Context.t -> self:API.ref_pool -> name:string -> bool val add_to_guest_agent_config : - __context:Context.t -> self:API.ref_pool -> key:string -> value:string -> unit + __context:Context.t -> self:API.ref_pool -> key:string -> value:string -> unit val remove_from_guest_agent_config : - __context:Context.t -> self:API.ref_pool -> key:string -> unit + __context:Context.t -> self:API.ref_pool -> key:string -> unit diff --git a/ocaml/xapi/xapi_pool_helpers.ml b/ocaml/xapi/xapi_pool_helpers.ml index 4bfe46ad124..84759063d95 100644 --- a/ocaml/xapi/xapi_pool_helpers.ml +++ b/ocaml/xapi/xapi_pool_helpers.ml @@ -24,120 +24,120 @@ let all_operations = [ `ha_enable; `ha_disable ] (** Returns a table of operations -> API error options (None if the operation would be ok) *) let valid_operations ~__context record _ref' = - let _ref = Ref.string_of _ref' in - let current_ops = List.map snd record.Db_actions.pool_current_operations in - - let table = Hashtbl.create 10 in - List.iter (fun x -> Hashtbl.replace table x None) all_operations; - let set_errors (code: string) (params: string list) (ops: API.pool_allowed_operations_set) = - List.iter (fun op -> - if Hashtbl.find table op = None - then Hashtbl.replace table op (Some(code, params))) ops in - - (* HA enable or disable cannot run if HA enable is in progress *) - if List.mem `ha_enable current_ops - then begin - set_errors Api_errors.ha_enable_in_progress [] [ `ha_enable ]; - set_errors Api_errors.ha_enable_in_progress [] [ `ha_disable ] - end; - (* HA enable or disable cannot run if HA disable is in progress *) - if List.mem `ha_disable current_ops - then begin - set_errors Api_errors.ha_disable_in_progress [] [ `ha_enable ]; - set_errors Api_errors.ha_disable_in_progress [] [ `ha_disable ] - end; - - (* HA disable cannot run if HA is already disabled on a pool *) - (* HA enable cannot run if HA is already enabled on a pool *) - let ha_enabled = Db.Pool.get_ha_enabled ~__context ~self:(Helpers.get_pool ~__context) in - if ha_enabled then - set_errors Api_errors.ha_is_enabled [] [ `ha_enable ] - else - set_errors Api_errors.ha_not_enabled [] [ `ha_disable ]; - - table + let _ref = Ref.string_of _ref' in + let current_ops = List.map snd record.Db_actions.pool_current_operations in + + let table = Hashtbl.create 10 in + List.iter (fun x -> Hashtbl.replace table x None) all_operations; + let set_errors (code: string) (params: string list) (ops: API.pool_allowed_operations_set) = + List.iter (fun op -> + if Hashtbl.find table op = None + then Hashtbl.replace table op (Some(code, params))) ops in + + (* HA enable or disable cannot run if HA enable is in progress *) + if List.mem `ha_enable current_ops + then begin + set_errors Api_errors.ha_enable_in_progress [] [ `ha_enable ]; + set_errors Api_errors.ha_enable_in_progress [] [ `ha_disable ] + end; + (* HA enable or disable cannot run if HA disable is in progress *) + if List.mem `ha_disable current_ops + then begin + set_errors Api_errors.ha_disable_in_progress [] [ `ha_enable ]; + set_errors Api_errors.ha_disable_in_progress [] [ `ha_disable ] + end; + + (* HA disable cannot run if HA is already disabled on a pool *) + (* HA enable cannot run if HA is already enabled on a pool *) + let ha_enabled = Db.Pool.get_ha_enabled ~__context ~self:(Helpers.get_pool ~__context) in + if ha_enabled then + set_errors Api_errors.ha_is_enabled [] [ `ha_enable ] + else + set_errors Api_errors.ha_not_enabled [] [ `ha_disable ]; + + table let throw_error table op = - if not(Hashtbl.mem table op) - then raise (Api_errors.Server_error(Api_errors.internal_error, [ Printf.sprintf "xapi_pool_helpers.assert_operation_valid unknown operation: %s" (pool_operation_to_string op) ])); + if not(Hashtbl.mem table op) + then raise (Api_errors.Server_error(Api_errors.internal_error, [ Printf.sprintf "xapi_pool_helpers.assert_operation_valid unknown operation: %s" (pool_operation_to_string op) ])); - match Hashtbl.find table op with - | Some (code, params) -> raise (Api_errors.Server_error(code, params)) - | None -> () + match Hashtbl.find table op with + | Some (code, params) -> raise (Api_errors.Server_error(code, params)) + | None -> () let assert_operation_valid ~__context ~self ~(op:API.pool_allowed_operations) = - let all = Db.Pool.get_record_internal ~__context ~self in - let table = valid_operations ~__context all self in - throw_error table op + let all = Db.Pool.get_record_internal ~__context ~self in + let table = valid_operations ~__context all self in + throw_error table op let update_allowed_operations ~__context ~self : unit = - let all = Db.Pool.get_record_internal ~__context ~self in - let valid = valid_operations ~__context all self in - let keys = Hashtbl.fold (fun k v acc -> if v = None then k :: acc else acc) valid [] in - Db.Pool.set_allowed_operations ~__context ~self ~value:keys + let all = Db.Pool.get_record_internal ~__context ~self in + let valid = valid_operations ~__context all self in + let keys = Hashtbl.fold (fun k v acc -> if v = None then k :: acc else acc) valid [] in + Db.Pool.set_allowed_operations ~__context ~self ~value:keys (* Checks whether HA enable is in progress *) let ha_enable_in_progress ~__context = - let pool = Helpers.get_pool ~__context in - let current_ops = Db.Pool.get_current_operations ~__context ~self:pool in - if List.exists (fun (_, x) -> x = `ha_enable) current_ops then true else false + let pool = Helpers.get_pool ~__context in + let current_ops = Db.Pool.get_current_operations ~__context ~self:pool in + if List.exists (fun (_, x) -> x = `ha_enable) current_ops then true else false (* Checks whether HA disable is in progress *) let ha_disable_in_progress ~__context = - let pool = Helpers.get_pool ~__context in - let current_ops = Db.Pool.get_current_operations ~__context ~self:pool in - if List.exists (fun (_, x) -> x = `ha_disable) current_ops then true else false + let pool = Helpers.get_pool ~__context in + let current_ops = Db.Pool.get_current_operations ~__context ~self:pool in + if List.exists (fun (_, x) -> x = `ha_disable) current_ops then true else false let get_master_slaves_list_with_fn ~__context fn = - let _unsorted_hosts = Db.Host.get_all ~__context in - let master = Helpers.get_master ~__context in - let slaves = List.filter (fun h -> h <> master) _unsorted_hosts in (* anything not a master *) - debug "MASTER=%s, SLAVES=%s" (Db.Host.get_name_label ~__context ~self:master) - (List.fold_left (fun str h -> (str^","^(Db.Host.get_name_label ~__context ~self:h))) "" slaves); - fn master slaves + let _unsorted_hosts = Db.Host.get_all ~__context in + let master = Helpers.get_master ~__context in + let slaves = List.filter (fun h -> h <> master) _unsorted_hosts in (* anything not a master *) + debug "MASTER=%s, SLAVES=%s" (Db.Host.get_name_label ~__context ~self:master) + (List.fold_left (fun str h -> (str^","^(Db.Host.get_name_label ~__context ~self:h))) "" slaves); + fn master slaves (* returns the list of hosts in the pool, with the master being the first element of the list *) let get_master_slaves_list ~__context = - get_master_slaves_list_with_fn ~__context (fun master slaves -> master::slaves) + get_master_slaves_list_with_fn ~__context (fun master slaves -> master::slaves) (* returns the list of slaves in the pool *) let get_slaves_list ~__context = - get_master_slaves_list_with_fn ~__context (fun master slaves -> slaves) + get_master_slaves_list_with_fn ~__context (fun master slaves -> slaves) let call_fn_on_hosts ~__context hosts f = - Helpers.call_api_functions ~__context (fun rpc session_id -> - let errs = List.fold_left - (fun acc host -> - try - f ~rpc ~session_id ~host; - acc - with x -> - (host,x)::acc) [] hosts - in - if List.length errs > 0 then begin - warn "Exception raised while performing operation on hosts:"; - List.iter (fun (host,x) -> warn "Host: %s error: %s" (Ref.string_of host) (ExnHelper.string_of_exn x)) errs; - raise (snd (List.hd errs)) - end) + Helpers.call_api_functions ~__context (fun rpc session_id -> + let errs = List.fold_left + (fun acc host -> + try + f ~rpc ~session_id ~host; + acc + with x -> + (host,x)::acc) [] hosts + in + if List.length errs > 0 then begin + warn "Exception raised while performing operation on hosts:"; + List.iter (fun (host,x) -> warn "Host: %s error: %s" (Ref.string_of host) (ExnHelper.string_of_exn x)) errs; + raise (snd (List.hd errs)) + end) let call_fn_on_master_then_slaves ~__context f = - let hosts = get_master_slaves_list ~__context in - call_fn_on_hosts ~__context hosts f + let hosts = get_master_slaves_list ~__context in + call_fn_on_hosts ~__context hosts f (* Note: fn exposed in .mli *) (** Call the function on the slaves first. When those calls have all * returned, call the function on the master. *) let call_fn_on_slaves_then_master ~__context f = - (* Get list with master as LAST element: important for ssl_legacy calls *) - let hosts = List.rev (get_master_slaves_list ~__context) in - call_fn_on_hosts ~__context hosts f + (* Get list with master as LAST element: important for ssl_legacy calls *) + let hosts = List.rev (get_master_slaves_list ~__context) in + call_fn_on_hosts ~__context hosts f let apply_guest_agent_config ~__context = - let f ~rpc ~session_id ~host = - try Client.Host.apply_guest_agent_config ~rpc ~session_id ~host - with e -> - error "Failed to apply guest agent config to host %s: %s" - (Db.Host.get_uuid ~__context ~self:host) - (Printexc.to_string e) - in - call_fn_on_slaves_then_master ~__context f + let f ~rpc ~session_id ~host = + try Client.Host.apply_guest_agent_config ~rpc ~session_id ~host + with e -> + error "Failed to apply guest agent config to host %s: %s" + (Db.Host.get_uuid ~__context ~self:host) + (Printexc.to_string e) + in + call_fn_on_slaves_then_master ~__context f diff --git a/ocaml/xapi/xapi_pool_helpers.mli b/ocaml/xapi/xapi_pool_helpers.mli index 9b602213efc..63680b50d2a 100644 --- a/ocaml/xapi/xapi_pool_helpers.mli +++ b/ocaml/xapi/xapi_pool_helpers.mli @@ -13,9 +13,9 @@ *) val assert_operation_valid : __context:Context.t -> - self:API.ref_pool -> - op:API.pool_allowed_operations -> - unit + self:API.ref_pool -> + op:API.pool_allowed_operations -> + unit val update_allowed_operations : __context:Context.t -> self:API.ref_pool -> unit @@ -26,10 +26,10 @@ val ha_enable_in_progress : __context:Context.t -> bool (** Call the function on the master, then on each of the slaves in turn. Useful when attaching an SR to all hosts in the pool. *) val call_fn_on_master_then_slaves : - __context:Context.t -> - (rpc:(Rpc.call -> Rpc.response) -> - session_id:API.ref_session -> host:API.ref_host -> 'a) -> - unit + __context:Context.t -> + (rpc:(Rpc.call -> Rpc.response) -> + session_id:API.ref_session -> host:API.ref_host -> 'a) -> + unit (** Call the function on the slaves first. When those calls have all * returned, call the function on the master. *) diff --git a/ocaml/xapi/xapi_pool_license.ml b/ocaml/xapi/xapi_pool_license.ml index 50bb3a92087..c1e0422928b 100644 --- a/ocaml/xapi/xapi_pool_license.ml +++ b/ocaml/xapi/xapi_pool_license.ml @@ -19,67 +19,67 @@ open D (* Compare two date options, where None is always greater than (Some _) *) let compare_dates (a: Stdext.Date.iso8601 option) (b: Stdext.Date.iso8601 option) = - match a, b with - | None, None -> 0 - | None, Some _ -> 1 - | Some _, None -> -1 - | Some a', Some b' -> compare a' b' + match a, b with + | None, None -> 0 + | None, Some _ -> 1 + | Some _, None -> -1 + | Some a', Some b' -> compare a' b' let get_lowest_edition_with_expiry ~__context ~hosts ~edition_to_int = - let all_editions_with_expiry = - List.map - (fun host -> - Db.Host.get_edition ~__context ~self:host, - License_check.get_expiry_date ~__context ~host - ) hosts - in - let pool_edition, _ = - List.filter_map (fun (edition, _) -> - if List.mem_assoc edition edition_to_int then - Some (edition, List.assoc edition edition_to_int) - else - None - ) all_editions_with_expiry - |> List.sort (fun a b -> compare (snd a) (snd b)) - |> List.hd - in + let all_editions_with_expiry = + List.map + (fun host -> + Db.Host.get_edition ~__context ~self:host, + License_check.get_expiry_date ~__context ~host + ) hosts + in + let pool_edition, _ = + List.filter_map (fun (edition, _) -> + if List.mem_assoc edition edition_to_int then + Some (edition, List.assoc edition edition_to_int) + else + None + ) all_editions_with_expiry + |> List.sort (fun a b -> compare (snd a) (snd b)) + |> List.hd + in - (* Get the earliest expiry date of a list of hosts, given a pool edition. - * Only the expiry dates of the hosts that match the edition are taken into account. *) - let pool_expiry = - List.filter_map - (fun (edition, expiry) -> if edition = pool_edition then Some expiry else None) - all_editions_with_expiry - |> List.sort compare_dates - |> List.hd - in - pool_edition, pool_expiry + (* Get the earliest expiry date of a list of hosts, given a pool edition. + * Only the expiry dates of the hosts that match the edition are taken into account. *) + let pool_expiry = + List.filter_map + (fun (edition, expiry) -> if edition = pool_edition then Some expiry else None) + all_editions_with_expiry + |> List.sort compare_dates + |> List.hd + in + pool_edition, pool_expiry (* Separate this logic out from Xapi_pool.apply_edition for testing purposes. *) let apply_edition_with_rollback ~__context ~hosts ~edition ~apply_fn = - (* Snapshot the current state of the pool in case we need to roll back; - * list the hosts against the edition we're upgrading them *from*. *) - let pool_license_state = - List.map - (fun host -> (host, Db.Host.get_edition ~__context ~self:host)) - hosts - in - (* This list will be added to as hosts have the new edition applied. *) - let to_rollback = ref [] in - try - List.iter - (fun (host, old_edition) -> - apply_fn ~__context ~host ~edition; - to_rollback := (host, old_edition) :: !to_rollback) - pool_license_state - with e -> - error - "Caught %s while trying to upgrade pool to edition %s - attempting rollback" - (Printexc.to_string e) edition; - (* Best-effort attempt to roll everything back. *) - List.iter - (fun (host, old_edition) -> - try apply_fn ~__context ~host ~edition:old_edition with _ -> ()) - !to_rollback; - (* Raise the original exception. *) - raise e + (* Snapshot the current state of the pool in case we need to roll back; + * list the hosts against the edition we're upgrading them *from*. *) + let pool_license_state = + List.map + (fun host -> (host, Db.Host.get_edition ~__context ~self:host)) + hosts + in + (* This list will be added to as hosts have the new edition applied. *) + let to_rollback = ref [] in + try + List.iter + (fun (host, old_edition) -> + apply_fn ~__context ~host ~edition; + to_rollback := (host, old_edition) :: !to_rollback) + pool_license_state + with e -> + error + "Caught %s while trying to upgrade pool to edition %s - attempting rollback" + (Printexc.to_string e) edition; + (* Best-effort attempt to roll everything back. *) + List.iter + (fun (host, old_edition) -> + try apply_fn ~__context ~host ~edition:old_edition with _ -> ()) + !to_rollback; + (* Raise the original exception. *) + raise e diff --git a/ocaml/xapi/xapi_pool_patch.ml b/ocaml/xapi/xapi_pool_patch.ml index 0a67cead662..7c6dc68127a 100644 --- a/ocaml/xapi/xapi_pool_patch.ml +++ b/ocaml/xapi/xapi_pool_patch.ml @@ -13,7 +13,7 @@ *) (** * @group Pool Management - *) +*) open Stdext open Pervasiveext @@ -36,8 +36,8 @@ open D after-apply-guidance="restartHVM restartPV restartHost" /> *) -type patch_info = { uuid: string; name_label: string; - name_description: string; version: string; +type patch_info = { uuid: string; name_label: string; + name_description: string; version: string; after_apply_guidance: API.after_apply_guidance list } exception Missing_patch_key of string @@ -50,51 +50,51 @@ let rm = "/bin/rm" let patch_dir = "/var/patch" let check_unsigned_patch_fist path = - match Xapi_fist.allowed_unsigned_patches () with - | None -> false - | Some fist -> - let sha1 = - Sha1sum.sha1sum (fun checksum_fd -> - let (_: int64) = Unixext.with_file path [ Unix.O_RDONLY ] 0 (fun fd -> - Unixext.copy_file fd checksum_fd - ) in - () - ) - in - debug "Patch Sha1sum: %s" sha1; - let fist_sha1s = String.split_f String.isspace fist in - debug "FIST allowed_unsigned_patches: %s" fist; - List.mem sha1 fist_sha1s + match Xapi_fist.allowed_unsigned_patches () with + | None -> false + | Some fist -> + let sha1 = + Sha1sum.sha1sum (fun checksum_fd -> + let (_: int64) = Unixext.with_file path [ Unix.O_RDONLY ] 0 (fun fd -> + Unixext.copy_file fd checksum_fd + ) in + () + ) + in + debug "Patch Sha1sum: %s" sha1; + let fist_sha1s = String.split_f String.isspace fist in + debug "FIST allowed_unsigned_patches: %s" fist; + List.mem sha1 fist_sha1s let extract_patch path = let run_path = path ^ ".run" in try Unixext.with_file run_path [ Unix.O_WRONLY; Unix.O_CREAT ] 0o755 (fun fd -> - Gpg.with_signed_cleartext path - (fun fingerprint fd' -> - (match fingerprint with - | Some f -> - let enc = Base64.encode f in - let acceptable_keys = - if Xapi_fist.allow_test_patches () then - [ !Xapi_globs.trusted_patch_key; Xapi_globs.test_patch_key ] else [ !Xapi_globs.trusted_patch_key ] - in - if not (List.mem enc acceptable_keys) - then - ( - debug "Got fingerprint: %s" f; - (*debug "Encoded: %s" (Base64.encode f); -- don't advertise the fact that we've got an encoded string in here! *) - raise Gpg.InvalidSignature - ) - else - debug "Fingerprint verified." - | None -> - debug "No fingerprint!"; - raise Gpg.InvalidSignature); - let (_: int64) = Unixext.copy_file fd' fd in - () - ) + Gpg.with_signed_cleartext path + (fun fingerprint fd' -> + (match fingerprint with + | Some f -> + let enc = Base64.encode f in + let acceptable_keys = + if Xapi_fist.allow_test_patches () then + [ !Xapi_globs.trusted_patch_key; Xapi_globs.test_patch_key ] else [ !Xapi_globs.trusted_patch_key ] + in + if not (List.mem enc acceptable_keys) + then + ( + debug "Got fingerprint: %s" f; + (*debug "Encoded: %s" (Base64.encode f); -- don't advertise the fact that we've got an encoded string in here! *) + raise Gpg.InvalidSignature + ) + else + debug "Fingerprint verified." + | None -> + debug "No fingerprint!"; + raise Gpg.InvalidSignature); + let (_: int64) = Unixext.copy_file fd' fd in + () + ) ); run_path with e -> @@ -102,11 +102,11 @@ let extract_patch path = debug "Patch not signed, but still letting it through"; Unixext.with_file run_path [ Unix.O_WRONLY; Unix.O_CREAT ] 0o755 (fun fd -> - Unixext.with_file path [ Unix.O_RDONLY ] 0 - (fun fd' -> - let (_: int64) = Unixext.copy_file fd' fd in - run_path - ) + Unixext.with_file path [ Unix.O_RDONLY ] 0 + (fun fd' -> + let (_: int64) = Unixext.copy_file fd' fd in + run_path + ) ) end else begin Unixext.unlink_safe run_path; @@ -115,22 +115,22 @@ let extract_patch path = - (* We execute the patch with arguments (name-label|name-description|version|apply) to - query its metadata and apply it *) - let execute_patch path args = - if not (Sys.file_exists path) - then raise (Api_errors.Server_error (Api_errors.cannot_find_patch, [])); +(* We execute the patch with arguments (name-label|name-description|version|apply) to + query its metadata and apply it *) +let execute_patch path args = + if not (Sys.file_exists path) + then raise (Api_errors.Server_error (Api_errors.cannot_find_patch, [])); - debug "Stripping header on patch %s then running with args '%s'" path (String.concat " " args); - let run_path = extract_patch path in - finally - (fun () -> - with_logfile_fd "patch" - (fun log_fd -> + debug "Stripping header on patch %s then running with args '%s'" path (String.concat " " args); + let run_path = extract_patch path in + finally + (fun () -> + with_logfile_fd "patch" + (fun log_fd -> let pid = safe_close_and_exec None (Some log_fd) (Some log_fd) [] run_path args in waitpid_fail_if_bad_exit pid) - ) - (fun () -> Unixext.unlink_safe run_path) + ) + (fun () -> Unixext.unlink_safe run_path) let guidance_from_string = function | "restartHVM" -> `restartHVM @@ -142,100 +142,100 @@ let guidance_from_string = function let precheck_patch_uuid uuid = let uuid = String.lowercase uuid in if not (Uuid.is_uuid uuid) - then raise (Invalid_patch_uuid uuid); + then raise (Invalid_patch_uuid uuid); uuid let patch_info_of_xml = function | Element("info", attr, _) -> - let find x = - if List.mem_assoc x attr - then List.assoc x attr - else raise (Missing_patch_key x) in - let label = find "name-label" - and descr = find "name-description" - and version = find "version" - and uuid = precheck_patch_uuid (find "uuid") - and guidance = find "after-apply-guidance" - in - let guidance = - if guidance <> "" then - let guidance = String.split ' ' guidance in - List.map guidance_from_string guidance - else - [] - in - { uuid = uuid; name_label = label; name_description = descr; - version = version; after_apply_guidance = guidance } + let find x = + if List.mem_assoc x attr + then List.assoc x attr + else raise (Missing_patch_key x) in + let label = find "name-label" + and descr = find "name-description" + and version = find "version" + and uuid = precheck_patch_uuid (find "uuid") + and guidance = find "after-apply-guidance" + in + let guidance = + if guidance <> "" then + let guidance = String.split ' ' guidance in + List.map guidance_from_string guidance + else + [] + in + { uuid = uuid; name_label = label; name_description = descr; + version = version; after_apply_guidance = guidance } | _ -> raise Bad_patch_info -let patch_info_of_string s = +let patch_info_of_string s = let xml = Xml.parse_string s in - debug "xml: %s" (Xml.to_string xml); - patch_info_of_xml xml + debug "xml: %s" (Xml.to_string xml); + patch_info_of_xml xml let get_patch_info path = match execute_patch path [ "info" ] with | Success(output, _) -> - begin - try - debug "executing patch successful; parsing XML" ; - patch_info_of_string output - with e -> - raise (Api_errors.Server_error(Api_errors.invalid_patch, [Printexc.to_string e])) - end + begin + try + debug "executing patch successful; parsing XML" ; + patch_info_of_string output + with e -> + raise (Api_errors.Server_error(Api_errors.invalid_patch, [Printexc.to_string e])) + end | Failure(log, exn) -> - debug "error from patch application: %s" log; - begin - match exn with - | Subprocess_failed 2 -> - debug "probably bad line endings..."; - raise (Api_errors.Server_error(Api_errors.invalid_patch_with_log, ["Bad line endings?"])) - | _ -> raise exn - end + debug "error from patch application: %s" log; + begin + match exn with + | Subprocess_failed 2 -> + debug "probably bad line endings..."; + raise (Api_errors.Server_error(Api_errors.invalid_patch_with_log, ["Bad line endings?"])) + | _ -> raise exn + end let read_in_and_check_patch length s path = - try + try debug "Will stream patch to file: %s" path; - + (* Stream the contents to path *) begin match length with - | None -> - Unixext.with_file path [ Unix.O_WRONLY; Unix.O_CREAT ] 0o440 - (fun fd -> let (_: int64) = Unixext.copy_file s fd in ()) - | Some i -> - Unixext.with_file path [ Unix.O_WRONLY; Unix.O_CREAT ] 0o440 - (fun fd -> let (_: int64) = Unixext.copy_file ~limit:i s fd in ()) + | None -> + Unixext.with_file path [ Unix.O_WRONLY; Unix.O_CREAT ] 0o440 + (fun fd -> let (_: int64) = Unixext.copy_file s fd in ()) + | Some i -> + Unixext.with_file path [ Unix.O_WRONLY; Unix.O_CREAT ] 0o440 + (fun fd -> let (_: int64) = Unixext.copy_file ~limit:i s fd in ()) end; debug "Streaming complete; executing gpg"; - + let run_path = extract_patch path in Unixext.unlink_safe run_path with - | Unix.Unix_error (errno, _, _) when errno = Unix.ENOSPC -> - warn "Not enough space on filesystem to upload patch."; - raise (Api_errors.Server_error (Api_errors.out_of_space, [patch_dir])) - | exn -> + | Unix.Unix_error (errno, _, _) when errno = Unix.ENOSPC -> + warn "Not enough space on filesystem to upload patch."; + raise (Api_errors.Server_error (Api_errors.out_of_space, [patch_dir])) + | exn -> debug "Caught exception while checking signature: %s" (ExnHelper.string_of_exn exn); Unixext.unlink_safe path; raise (Api_errors.Server_error(Api_errors.invalid_patch, [])) let create_patch_record ~__context ?path patch_info = let r = Ref.make () in - let path, size = + let path, size = match path with - | None -> "", Int64.zero - | Some path -> - let stat = Unix.stat path in - path, Int64.of_int stat.Unix.st_size - in - Db.Pool_patch.create ~__context ~ref:r + | None -> "", Int64.zero + | Some path -> + let stat = Unix.stat path in + path, Int64.of_int stat.Unix.st_size + in + Db.Pool_patch.create ~__context ~ref:r ~uuid:patch_info.uuid - ~name_label:patch_info.name_label + ~name_label:patch_info.name_label ~name_description:patch_info.name_description - ~version:patch_info.version - ~filename:path + ~version:patch_info.version + ~filename:path ~size ~pool_applied:false ~after_apply_guidance:patch_info.after_apply_guidance @@ -248,74 +248,74 @@ exception CannotUploadPatchToSlave space on the filesystem as the size of the patch, which is where the multiplier comes from. *) let assert_space_available ?(multiplier=3L) patch_size = - let open Unixext in - ignore (Unixext.mkdir_safe patch_dir 0o755); - let stat = statvfs patch_dir in - let free_bytes = - (* block size times free blocks *) - Int64.mul stat.f_frsize stat.f_bfree in - let really_required = Int64.mul multiplier patch_size in - if really_required > free_bytes - then - begin - warn "Not enough space on filesystem to upload patch. Required %Ld, \ - but only %Ld available" really_required free_bytes; - raise (Api_errors.Server_error (Api_errors.out_of_space, [patch_dir])) - end + let open Unixext in + ignore (Unixext.mkdir_safe patch_dir 0o755); + let stat = statvfs patch_dir in + let free_bytes = + (* block size times free blocks *) + Int64.mul stat.f_frsize stat.f_bfree in + let really_required = Int64.mul multiplier patch_size in + if really_required > free_bytes + then + begin + warn "Not enough space on filesystem to upload patch. Required %Ld, \ + but only %Ld available" really_required free_bytes; + raise (Api_errors.Server_error (Api_errors.out_of_space, [patch_dir])) + end let pool_patch_upload_handler (req: Request.t) s _ = debug "Patch Upload Handler - Entered..."; if not (Pool_role.is_master ()) then raise CannotUploadPatchToSlave; - + Xapi_http.with_context "Uploading host patch" req s (fun __context -> - if on_oem ~__context - then raise (Api_errors.Server_error (Api_errors.not_allowed_on_oem_edition, ["patch-upload"])); - - debug "Patch Upload Handler - Authenticated..."; - - let _ = Unixext.mkdir_safe patch_dir 0o755 in - let new_path = patch_dir ^ "/" ^ (Uuid.to_string (Uuid.make_uuid ())) in - let task_id = Context.get_task_id __context in - begin - - debug "Patch Upload Handler - Sending headers..."; - - Http_svr.headers s (Http.http_200_ok ()); - - (match req.Request.content_length with - | None -> () - | Some size -> assert_space_available size); - - read_in_and_check_patch req.Request.content_length s new_path; - - try - let r = create_patch_record ~__context ~path:new_path (get_patch_info new_path) in - Db.Task.set_result ~__context ~self:task_id ~value:(Ref.string_of r) - with Db_exn.Uniqueness_constraint_violation (_, _, uuid) -> - (* patch already uploaded. if the patch file has been cleaned, then put this one in its place. - otherwise, error *) - debug "duplicate patch with uuid %s found." uuid; - let patch_ref = Db.Pool_patch.get_by_uuid ~__context ~uuid in - let old_path = Db.Pool_patch.get_filename ~__context ~self:patch_ref in - debug "checking for file %s. If it doesn't exist new patch will replace it." old_path; - if Sys.file_exists old_path - then - begin - Unixext.unlink_safe new_path; - raise (Api_errors.Server_error(Api_errors.patch_already_exists, [uuid])) - end - else - begin - let stat = Unix.stat new_path in - let size = Int64.of_int stat.Unix.st_size in - Db.Pool_patch.set_filename ~__context ~self:patch_ref ~value:new_path; - Db.Pool_patch.set_size ~__context ~self:patch_ref ~value:size; - Db.Task.set_result ~__context ~self:task_id ~value:(Ref.string_of patch_ref) - end - end + if on_oem ~__context + then raise (Api_errors.Server_error (Api_errors.not_allowed_on_oem_edition, ["patch-upload"])); + + debug "Patch Upload Handler - Authenticated..."; + + let _ = Unixext.mkdir_safe patch_dir 0o755 in + let new_path = patch_dir ^ "/" ^ (Uuid.to_string (Uuid.make_uuid ())) in + let task_id = Context.get_task_id __context in + begin + + debug "Patch Upload Handler - Sending headers..."; + + Http_svr.headers s (Http.http_200_ok ()); + + (match req.Request.content_length with + | None -> () + | Some size -> assert_space_available size); + + read_in_and_check_patch req.Request.content_length s new_path; + + try + let r = create_patch_record ~__context ~path:new_path (get_patch_info new_path) in + Db.Task.set_result ~__context ~self:task_id ~value:(Ref.string_of r) + with Db_exn.Uniqueness_constraint_violation (_, _, uuid) -> + (* patch already uploaded. if the patch file has been cleaned, then put this one in its place. + otherwise, error *) + debug "duplicate patch with uuid %s found." uuid; + let patch_ref = Db.Pool_patch.get_by_uuid ~__context ~uuid in + let old_path = Db.Pool_patch.get_filename ~__context ~self:patch_ref in + debug "checking for file %s. If it doesn't exist new patch will replace it." old_path; + if Sys.file_exists old_path + then + begin + Unixext.unlink_safe new_path; + raise (Api_errors.Server_error(Api_errors.patch_already_exists, [uuid])) + end + else + begin + let stat = Unix.stat new_path in + let size = Int64.of_int stat.Unix.st_size in + Db.Pool_patch.set_filename ~__context ~self:patch_ref ~value:new_path; + Db.Pool_patch.set_size ~__context ~self:patch_ref ~value:size; + Db.Task.set_result ~__context ~self:task_id ~value:(Ref.string_of patch_ref) + end + end ) let bin_sync = "/bin/sync" @@ -325,36 +325,36 @@ let sync () = with_logfile_fd "sync" (fun log_fd -> let pid = safe_close_and_exec None (Some log_fd) (Some log_fd) [] bin_sync [] in - waitpid_fail_if_bad_exit pid) - in - match output with - | Failure(log, exn) -> - debug "error from sync application: %s" log; - raise exn - | Success(output, _) -> () + waitpid_fail_if_bad_exit pid) + in + match output with + | Failure(log, exn) -> + debug "error from sync application: %s" log; + raise exn + | Success(output, _) -> () let patch_header_length = 8 let pool_patch_download_handler (req: Request.t) s _ = Xapi_http.with_context "Downloading pool patch" req s - (fun __context -> - if not(List.mem_assoc "uuid" req.Request.query) then begin - Http_svr.headers s (Http.http_400_badrequest ()); - error "HTTP request for pool patch lacked 'uuid' parameter" - end else begin - let uuid = List.assoc "uuid" req.Request.query in - (* ensure its a valid uuid *) - let r = Db.Pool_patch.get_by_uuid ~__context ~uuid in - let path = Db.Pool_patch.get_filename ~__context ~self:r in - - if not (Sys.file_exists path) - then raise (Api_errors.Server_error (Api_errors.cannot_find_patch, [])); - - Http_svr.response_file s path; - end; - req.Request.close <- true + (fun __context -> + if not(List.mem_assoc "uuid" req.Request.query) then begin + Http_svr.headers s (Http.http_400_badrequest ()); + error "HTTP request for pool patch lacked 'uuid' parameter" + end else begin + let uuid = List.assoc "uuid" req.Request.query in + (* ensure its a valid uuid *) + let r = Db.Pool_patch.get_by_uuid ~__context ~uuid in + let path = Db.Pool_patch.get_filename ~__context ~self:r in + + if not (Sys.file_exists path) + then raise (Api_errors.Server_error (Api_errors.cannot_find_patch, [])); + + Http_svr.response_file s path; + end; + req.Request.close <- true ) - + let get_patch_to_local ~__context ~self = if not (Pool_role.is_master ()) then begin @@ -364,139 +364,139 @@ let get_patch_to_local ~__context ~self = let pool_secret = !Xapi_globs.pool_secret in let uuid = Db.Pool_patch.get_uuid ~__context ~self in Server_helpers.exec_with_new_task - ~task_in_database:true ~subtask_of:(Context.get_task_id __context) - ~session_id:(Context.get_session_id __context) - (Printf.sprintf "Get patch %s from master" uuid) - - (fun __context -> - let task = Context.get_task_id __context in - let uri = Printf.sprintf "%s?pool_secret=%s&uuid=%s&task_id=%s" - Constants.pool_patch_download_uri - pool_secret uuid (Ref.string_of task) in - let request = Xapi_http.http_request ~version:"1.1" Http.Get uri in - let master_address = Pool_role.get_master_address () in - let open Xmlrpc_client in - let transport = SSL(SSL.make ~use_stunnel_cache:true - ~task_id:(Ref.string_of task) (), - master_address, !Xapi_globs.https_port) in - - try - with_transport transport - (with_http request - (fun (response, fd) -> - let _ = Unixext.mkdir_safe patch_dir 0o755 in - read_in_and_check_patch (Some length) fd path)) - - with _ -> - begin - let error = Db.Task.get_error_info ~__context ~self:task in - if List.length error > 0 - then - begin - debug "Error %s fetching patch from master." (List.hd error); - raise (Api_errors.Server_error (List.hd error, List.tl error)) - end - else raise (Api_errors.Server_error - (Api_errors.cannot_fetch_patch, [uuid])) - end) + ~task_in_database:true ~subtask_of:(Context.get_task_id __context) + ~session_id:(Context.get_session_id __context) + (Printf.sprintf "Get patch %s from master" uuid) + + (fun __context -> + let task = Context.get_task_id __context in + let uri = Printf.sprintf "%s?pool_secret=%s&uuid=%s&task_id=%s" + Constants.pool_patch_download_uri + pool_secret uuid (Ref.string_of task) in + let request = Xapi_http.http_request ~version:"1.1" Http.Get uri in + let master_address = Pool_role.get_master_address () in + let open Xmlrpc_client in + let transport = SSL(SSL.make ~use_stunnel_cache:true + ~task_id:(Ref.string_of task) (), + master_address, !Xapi_globs.https_port) in + + try + with_transport transport + (with_http request + (fun (response, fd) -> + let _ = Unixext.mkdir_safe patch_dir 0o755 in + read_in_and_check_patch (Some length) fd path)) + + with _ -> + begin + let error = Db.Task.get_error_info ~__context ~self:task in + if List.length error > 0 + then + begin + debug "Error %s fetching patch from master." (List.hd error); + raise (Api_errors.Server_error (List.hd error, List.tl error)) + end + else raise (Api_errors.Server_error + (Api_errors.cannot_fetch_patch, [uuid])) + end) end open Db_filter open Db_filter_types let patch_apply_in_progress ~__context ~patch ~host = - let message = Printf.sprintf "Applying the same patch on %s is in progress. If you believe this is not true or the patching process is hung/frozen, try to restart the toolstack or host." (Db.Host.get_name_label ~__context ~self:host) in - raise (Api_errors.Server_error (Api_errors.other_operation_in_progress, ["Pool_patch"; Ref.string_of patch; message])) + let message = Printf.sprintf "Applying the same patch on %s is in progress. If you believe this is not true or the patching process is hung/frozen, try to restart the toolstack or host." (Db.Host.get_name_label ~__context ~self:host) in + raise (Api_errors.Server_error (Api_errors.other_operation_in_progress, ["Pool_patch"; Ref.string_of patch; message])) (* The [get_patch_applied_to] gives the patching status of a pool patch on the given host. It returns [None] if the patch is not on the host, i.e. no corresponding host_patch; returns [Some (ref, true)] if it's on the host and fully applied (as host_patch [ref]); returns [Some (ref, false)] if it's on the host but isn't applied yet or the application is in progress. *) let get_patch_applied_to ~__context ~patch ~host = - let expr = - And (Eq (Field "pool_patch", Literal (Ref.string_of patch)), - Eq (Field "host", Literal (Ref.string_of host))) - in - let result = Db.Host_patch.get_records_where ~__context ~expr in - match result with - | [] -> None - | (rf, rc) :: _ -> Some (rf, rc.API.host_patch_applied) + let expr = + And (Eq (Field "pool_patch", Literal (Ref.string_of patch)), + Eq (Field "host", Literal (Ref.string_of host))) + in + let result = Db.Host_patch.get_records_where ~__context ~expr in + match result with + | [] -> None + | (rf, rc) :: _ -> Some (rf, rc.API.host_patch_applied) let patch_applied_dir = "/var/patch/applied" -let write_patch_applied ~__context ~self = +let write_patch_applied ~__context ~self = (* This will write a small file containing xml to /var/patch/applied/ detailing what patches have been applied*) (* This allows the agent to remember what patches have been applied across pool-ejects *) let path = Db.Pool_patch.get_filename ~__context ~self in let _ = Unixext.mkdir_safe patch_applied_dir 0o755 in - match execute_patch path [ "info" ] with - | Success(output, _) -> - begin - let uuid = Db.Pool_patch.get_uuid ~__context ~self in - let path = patch_applied_dir ^ "/" ^ uuid in - Unixext.with_file path [ Unix.O_WRONLY; Unix.O_CREAT ] 0o440 - (fun fd -> let (_: int) = Unix.write fd output 0 (String.length output) in ()) - end - | Failure(log, exn) -> - debug "error from patch application: %s" log; - raise exn + match execute_patch path [ "info" ] with + | Success(output, _) -> + begin + let uuid = Db.Pool_patch.get_uuid ~__context ~self in + let path = patch_applied_dir ^ "/" ^ uuid in + Unixext.with_file path [ Unix.O_WRONLY; Unix.O_CREAT ] 0o440 + (fun fd -> let (_: int) = Unix.write fd output 0 (String.length output) in ()) + end + | Failure(log, exn) -> + debug "error from patch application: %s" log; + raise exn let write_patch_applied_db ~__context ?date ?(applied=true) ~self ~host () = - let date = Date.of_float (match date with - | Some d -> d - | None -> Unix.gettimeofday ()) - in - match get_patch_applied_to ~__context ~patch:self ~host with - | Some(r, is_applied) -> - if not (is_applied = applied) then begin - Db.Host_patch.set_timestamp_applied ~__context ~self:r ~value:date; - Db.Host_patch.set_applied ~__context ~self:r ~value:applied - end - | None -> - let uuid = Uuid.make_uuid () in - let r = Ref.make () in - Db.Host_patch.create ~__context - ~ref:r - ~uuid:(Uuid.to_string uuid) - ~host - ~pool_patch:self - ~timestamp_applied:date - ~name_label:"" - ~name_description:"" - ~version:"" - ~filename:"" - ~applied - ~size:Int64.zero - ~other_config:[] + let date = Date.of_float (match date with + | Some d -> d + | None -> Unix.gettimeofday ()) + in + match get_patch_applied_to ~__context ~patch:self ~host with + | Some(r, is_applied) -> + if not (is_applied = applied) then begin + Db.Host_patch.set_timestamp_applied ~__context ~self:r ~value:date; + Db.Host_patch.set_applied ~__context ~self:r ~value:applied + end + | None -> + let uuid = Uuid.make_uuid () in + let r = Ref.make () in + Db.Host_patch.create ~__context + ~ref:r + ~uuid:(Uuid.to_string uuid) + ~host + ~pool_patch:self + ~timestamp_applied:date + ~name_label:"" + ~name_description:"" + ~version:"" + ~filename:"" + ~applied + ~size:Int64.zero + ~other_config:[] let erase_patch_applied_db ~__context ~self ~host () = - match get_patch_applied_to ~__context ~patch:self ~host with - | Some (r, _) -> - debug "Removing Host_patch record for patch %s" (Ref.string_of self); - Db.Host_patch.destroy ~__context ~self:r - | None -> () + match get_patch_applied_to ~__context ~patch:self ~host with + | Some (r, _) -> + debug "Removing Host_patch record for patch %s" (Ref.string_of self); + Db.Host_patch.destroy ~__context ~self:r + | None -> () let update_db ~__context = (* We need to check the patch_applied_dir for applied patches and check they are present in the db *) (* Used from dbsync_slave - DO NOT THROW ANY EXCEPTIONS *) try (* First look in the patch applied dir for the definitive list of locally-applied patches *) - let local_patch_details = + let local_patch_details = (* Full paths of the /var/patch/applied files *) let stampfiles = List.map (Filename.concat patch_applied_dir) (try Array.to_list (Sys.readdir patch_applied_dir) with _ -> []) in - let parse x = - try [ patch_info_of_string (Unixext.string_of_file x), (Unix.stat x).Unix.st_mtime ] - with e -> warn "Error parsing patch stampfile %s: %s" x (ExnHelper.string_of_exn e); [] in + let parse x = + try [ patch_info_of_string (Unixext.string_of_file x), (Unix.stat x).Unix.st_mtime ] + with e -> warn "Error parsing patch stampfile %s: %s" x (ExnHelper.string_of_exn e); [] in List.concat (List.map parse stampfiles) in (* Make sure all the patches in the filesystem have global Pool_patch records *) - let pool_patches_in_fs = List.map - (fun (details , _)-> - try Db.Pool_patch.get_by_uuid ~__context ~uuid:details.uuid - with _ -> - debug "Patch uuid %s does not exist in Pool_patch table; creating" details.uuid; - create_patch_record ~__context details) local_patch_details in + let pool_patches_in_fs = List.map + (fun (details , _)-> + try Db.Pool_patch.get_by_uuid ~__context ~uuid:details.uuid + with _ -> + debug "Patch uuid %s does not exist in Pool_patch table; creating" details.uuid; + create_patch_record ~__context details) local_patch_details in (* Construct a table of pool_patch to mtime, necessary if we create Host_patch records *) let pool_patch_to_mtime = List.combine pool_patches_in_fs (List.map snd local_patch_details) in @@ -512,25 +512,25 @@ let update_db ~__context = List.iter (fun pp -> - let msg = Printf.sprintf "Adding new Host_patch record for patch %s" (Ref.string_of pp) in - Helpers.log_exn_continue msg - (fun () -> - debug "%s" msg; - let date = List.assoc pp pool_patch_to_mtime in - let host = Helpers.get_localhost ~__context in - write_patch_applied_db ~__context ~date ~self:pp ~host ()) ()) new_pool_patches; + let msg = Printf.sprintf "Adding new Host_patch record for patch %s" (Ref.string_of pp) in + Helpers.log_exn_continue msg + (fun () -> + debug "%s" msg; + let date = List.assoc pp pool_patch_to_mtime in + let host = Helpers.get_localhost ~__context in + write_patch_applied_db ~__context ~date ~self:pp ~host ()) ()) new_pool_patches; List.iter (fun pp -> - let msg = Printf.sprintf "Removing Host_patch record for patch %s" (Ref.string_of pp) in - Helpers.log_exn_continue msg - (fun () -> - debug "%s" msg; - Db.Host_patch.destroy ~__context ~self:(List.assoc pp pool_patch_to_host_patch)) ()) old_pool_patches - with - | End_of_file -> - () - | e -> - debug "Error updating patch status. %s" (ExnHelper.string_of_exn e) + let msg = Printf.sprintf "Removing Host_patch record for patch %s" (Ref.string_of pp) in + Helpers.log_exn_continue msg + (fun () -> + debug "%s" msg; + Db.Host_patch.destroy ~__context ~self:(List.assoc pp pool_patch_to_host_patch)) ()) old_pool_patches + with + | End_of_file -> + () + | e -> + debug "Error updating patch status. %s" (ExnHelper.string_of_exn e) exception Bad_precheck_xml of string @@ -542,207 +542,207 @@ let parse_patch_precheck_xml patch xml = in match xml with | Element ("error", [("errorcode", "PATCH_PRECHECK_FAILED_UNKNOWN_ERROR")], [Element("info", _, [PCData info])]) -> - (* - Any message in text - for errors that don't fit into another category - *) - raise (Api_errors.Server_error (Api_errors.patch_precheck_failed_unknown_error, [Ref.string_of patch; info])) + (* + Any message in text - for errors that don't fit into another category + *) + raise (Api_errors.Server_error (Api_errors.patch_precheck_failed_unknown_error, [Ref.string_of patch; info])) | Element("error" , [("errorcode","PATCH_PRECHECK_FAILED_ISO_MOUNTED")], [Element ("info",_, [PCData info])]) -> - raise (Api_errors.Server_error (Api_errors.patch_precheck_tools_iso_mounted, [Ref.string_of patch; info])) + raise (Api_errors.Server_error (Api_errors.patch_precheck_tools_iso_mounted, [Ref.string_of patch; info])) | Element ("error", [("errorcode", "PATCH_PRECHECK_FAILED_PREREQUISITE_MISSING")], children) -> - (* - - - *) - let rec collectUuids = function - | (Element("prerequisite", [("uuid", uuid)], _))::tail -> uuid::(collectUuids tail) - | [] -> [] - | _ -> raise (Bad_precheck_xml "Malformed prerequisite list") - in - let uuids = collectUuids children in - raise (Api_errors.Server_error (Api_errors.patch_precheck_failed_prerequisite_missing, [Ref.string_of patch; String.concat ";" uuids])) + (* + + + *) + let rec collectUuids = function + | (Element("prerequisite", [("uuid", uuid)], _))::tail -> uuid::(collectUuids tail) + | [] -> [] + | _ -> raise (Bad_precheck_xml "Malformed prerequisite list") + in + let uuids = collectUuids children in + raise (Api_errors.Server_error (Api_errors.patch_precheck_failed_prerequisite_missing, [Ref.string_of patch; String.concat ";" uuids])) | Element ("error", [("errorcode", "PATCH_PRECHECK_FAILED_WRONG_SERVER_VERSION")], children) -> - (* - 4.0.91 - 4.0.95 or newer - *) - let found = findElement "found" children in - let required = findElement "required" children in - raise (Api_errors.Server_error (Api_errors.patch_precheck_failed_wrong_server_version, [Ref.string_of patch; found; required])) + (* + 4.0.91 + 4.0.95 or newer + *) + let found = findElement "found" children in + let required = findElement "required" children in + raise (Api_errors.Server_error (Api_errors.patch_precheck_failed_wrong_server_version, [Ref.string_of patch; found; required])) | Element ("error", [("errorcode", "PATCH_PRECHECK_FAILED_WRONG_SERVER_BUILD")], children) -> - (* Exactly like the previous one but SERVER_BUILD instead of SERVER_VERSION *) - (* - 50845c - ^58332[pc]$ - *) - let found = findElement "found" children in - let required = findElement "required" children in - raise (Api_errors.Server_error (Api_errors.patch_precheck_failed_wrong_server_build, [Ref.string_of patch; found; required])) + (* Exactly like the previous one but SERVER_BUILD instead of SERVER_VERSION *) + (* + 50845c + ^58332[pc]$ + *) + let found = findElement "found" children in + let required = findElement "required" children in + raise (Api_errors.Server_error (Api_errors.patch_precheck_failed_wrong_server_build, [Ref.string_of patch; found; required])) | Element ("error", [("errorcode", "PATCH_PRECHECK_FAILED_VM_RUNNING")], _) -> - (* *) - raise (Api_errors.Server_error (Api_errors.patch_precheck_failed_vm_running, [Ref.string_of patch])) + (* *) + raise (Api_errors.Server_error (Api_errors.patch_precheck_failed_vm_running, [Ref.string_of patch])) | Element ("error", [("errorcode", "PATCH_PRECHECK_FAILED_OUT_OF_SPACE")], children) -> - (* - * 165396480 - * 1073741824000 - * - *) - let found = findElement "found" children in - let required = findElement "required" children in - raise (Api_errors.Server_error (Api_errors.patch_precheck_failed_out_of_space, [Ref.string_of patch; found; required])) + (* + * 165396480 + * 1073741824000 + * + *) + let found = findElement "found" children in + let required = findElement "required" children in + raise (Api_errors.Server_error (Api_errors.patch_precheck_failed_out_of_space, [Ref.string_of patch; found; required])) | _ -> - raise (Bad_precheck_xml "Unknown error code or malformed xml") - + raise (Bad_precheck_xml "Unknown error code or malformed xml") + (* calls the parse function, which throws the correct error based on the XML the patch precheck put on stdout *) let throw_patch_precheck_error patch s = try let xml = Xml.parse_string s in - debug "precheck xml: %s" (Xml.to_string xml); - parse_patch_precheck_xml patch xml - with + debug "precheck xml: %s" (Xml.to_string xml); + parse_patch_precheck_xml patch xml + with | Xml.Error error -> let msg = Printf.sprintf "error parsing patch precheck xml: %s" (Xml.error error) in - debug "%s" msg; - raise (Api_errors.Server_error (Api_errors.invalid_patch_with_log, [msg])) + debug "%s" msg; + raise (Api_errors.Server_error (Api_errors.invalid_patch_with_log, [msg])) | Bad_precheck_xml error -> - raise (Api_errors.Server_error (Api_errors.invalid_patch_with_log, [error])) + raise (Api_errors.Server_error (Api_errors.invalid_patch_with_log, [error])) let run_precheck ~__context ~self ~host = let path = Db.Pool_patch.get_filename ~__context ~self in - match execute_patch path [ "precheck" ] with - | Success(output, _) -> output - | Failure(xml, Subprocess_failed 1) -> - (* if precheck returns 1, the patch should have written error xml to stdout *) - debug "Prechecks on patch %s failed with return code 1. XML is %s" (Ref.string_of self) xml; - throw_patch_precheck_error self xml - | Failure(log, _) -> - let msg = Printf.sprintf "Error running prechecks on patch %s: %s" (Ref.string_of self) log in - debug "%s" msg; - raise (Api_errors.Server_error(Api_errors.patch_precheck_failed_unknown_error, [Ref.string_of self; msg])) + match execute_patch path [ "precheck" ] with + | Success(output, _) -> output + | Failure(xml, Subprocess_failed 1) -> + (* if precheck returns 1, the patch should have written error xml to stdout *) + debug "Prechecks on patch %s failed with return code 1. XML is %s" (Ref.string_of self) xml; + throw_patch_precheck_error self xml + | Failure(log, _) -> + let msg = Printf.sprintf "Error running prechecks on patch %s: %s" (Ref.string_of self) log in + debug "%s" msg; + raise (Api_errors.Server_error(Api_errors.patch_precheck_failed_unknown_error, [Ref.string_of self; msg])) (* precheck API call entrypoint *) let precheck ~__context ~self ~host = (* check we're not on oem *) if on_oem ~__context - then raise (Api_errors.Server_error (Api_errors.not_allowed_on_oem_edition, ["patch-precheck"])); + then raise (Api_errors.Server_error (Api_errors.not_allowed_on_oem_edition, ["patch-precheck"])); (* get the patch from the master (no-op if we're the master) *) get_patch_to_local ~__context ~self; - finally - (fun () -> run_precheck ~__context ~self ~host) - (fun () -> - (* This prevents leaking space on the slave if the patch is repeatedly uploaded, prechecked and then destroyed *) - if not (Pool_role.is_master ()) then begin - let path = Db.Pool_patch.get_filename ~__context ~self in - Unixext.unlink_safe path; - end - ) - -let apply ~__context ~self ~host = + finally + (fun () -> run_precheck ~__context ~self ~host) + (fun () -> + (* This prevents leaking space on the slave if the patch is repeatedly uploaded, prechecked and then destroyed *) + if not (Pool_role.is_master ()) then begin + let path = Db.Pool_patch.get_filename ~__context ~self in + Unixext.unlink_safe path; + end + ) + +let apply ~__context ~self ~host = (* 0th, check we're not on oem *) if on_oem ~__context then raise (Api_errors.Server_error (Api_errors.not_allowed_on_oem_edition, ["patch-apply"])); (* 1st, check patch isn't already applied *) let () = match get_patch_applied_to ~__context ~patch:self ~host with - | Some (r, applied) -> - if applied then raise (Api_errors.Server_error(Api_errors.patch_already_applied, [ Ref.string_of self ])) - else patch_apply_in_progress ~__context ~patch:self ~host - | None -> () in + | Some (r, applied) -> + if applied then raise (Api_errors.Server_error(Api_errors.patch_already_applied, [ Ref.string_of self ])) + else patch_apply_in_progress ~__context ~patch:self ~host + | None -> () in (* 2nd, get the patch from the master (no-op if we're the master) *) get_patch_to_local ~__context ~self; - + let path = Db.Pool_patch.get_filename ~__context ~self in - (* 3rd, run prechecks *) - let (_: string) = run_precheck ~__context ~self ~host in - - (* 4th, apply the patch *) - begin - write_patch_applied_db ~__context ~applied:false ~self ~host (); - match execute_patch path [ "apply" ] with - | Success(output, _) -> - debug "executing patch successful"; - write_patch_applied_db ~__context ~self ~host (); - (* 5th, write out patch applied file to hd *) - write_patch_applied ~__context ~self; - (* CA-27145: to handle rolled-up patches, rescan the patch applied directory *) - begin - try update_db ~__context - with e -> - (* should never happen but just in case... *) - error "Caught exception rescanning patch applied directory: %s" (ExnHelper.string_of_exn e) - end; - Create_misc.create_patches_requiring_reboot_info ~__context ~host; - Create_misc.create_software_version ~__context; - output; - | Failure(log, exn) -> - debug "error from patch application: %s" log; - erase_patch_applied_db ~__context ~self ~host (); - let error_string = "Backup files already present" in - if List.length (Xstringext.String.find_all error_string log) = 0 then - raise (Api_errors.Server_error(Api_errors.patch_apply_failed, [log])) - else begin - let xml = Xml.parse_string log in - match xml with - | Element ("error", [("errorcode", "PATCH_PRECHECK_FAILED_UNKNOWN_ERROR")], [Element("info", _, [PCData info])]) -> - raise (Api_errors.Server_error(Api_errors.patch_apply_failed_backup_files_exist, [info])) - | _ -> - raise (Bad_precheck_xml "Could not find element info") - end - end + (* 3rd, run prechecks *) + let (_: string) = run_precheck ~__context ~self ~host in + + (* 4th, apply the patch *) + begin + write_patch_applied_db ~__context ~applied:false ~self ~host (); + match execute_patch path [ "apply" ] with + | Success(output, _) -> + debug "executing patch successful"; + write_patch_applied_db ~__context ~self ~host (); + (* 5th, write out patch applied file to hd *) + write_patch_applied ~__context ~self; + (* CA-27145: to handle rolled-up patches, rescan the patch applied directory *) + begin + try update_db ~__context + with e -> + (* should never happen but just in case... *) + error "Caught exception rescanning patch applied directory: %s" (ExnHelper.string_of_exn e) + end; + Create_misc.create_patches_requiring_reboot_info ~__context ~host; + Create_misc.create_software_version ~__context; + output; + | Failure(log, exn) -> + debug "error from patch application: %s" log; + erase_patch_applied_db ~__context ~self ~host (); + let error_string = "Backup files already present" in + if List.length (Xstringext.String.find_all error_string log) = 0 then + raise (Api_errors.Server_error(Api_errors.patch_apply_failed, [log])) + else begin + let xml = Xml.parse_string log in + match xml with + | Element ("error", [("errorcode", "PATCH_PRECHECK_FAILED_UNKNOWN_ERROR")], [Element("info", _, [PCData info])]) -> + raise (Api_errors.Server_error(Api_errors.patch_apply_failed_backup_files_exist, [info])) + | _ -> + raise (Bad_precheck_xml "Could not find element info") + end + end let pool_apply ~__context ~self = let hosts = - List.filter + List.filter (fun x -> not (is_oem ~__context ~host:x) && match get_patch_applied_to ~__context ~patch:self ~host:x with - | None -> true - | Some (_, applied) -> not applied && patch_apply_in_progress ~__context ~patch:self ~host:x) - (Db.Host.get_all ~__context) + | None -> true + | Some (_, applied) -> not applied && patch_apply_in_progress ~__context ~patch:self ~host:x) + (Db.Host.get_all ~__context) in - let (_: string list) = - List.map + let (_: string list) = + List.map (fun host -> Helpers.call_api_functions ~__context (fun rpc session_id -> Client.Client.Pool_patch.apply ~rpc ~session_id ~self ~host) ) - hosts + hosts in let _ = Db.Pool_patch.set_pool_applied ~__context ~self ~value:true in - () - -let clean ~__context ~self = + () + +let clean ~__context ~self = let path = Db.Pool_patch.get_filename ~__context ~self in - Unixext.unlink_safe path - -let clean_on_host ~__context ~self ~host = - debug "pool_patch.clean_on_host"; - clean ~__context ~self - -let pool_clean ~__context ~self = - debug "pool_patch.pool_clean"; - let hosts = Db.Host.get_all ~__context in - List.iter - (fun host -> - Helpers.call_api_functions ~__context - (fun rpc session_id -> Client.Client.Pool_patch.clean_on_host ~rpc ~session_id ~self ~host) - ) - hosts; - Db.Pool_patch.set_filename ~__context ~self ~value:"" - -let destroy ~__context ~self = + Unixext.unlink_safe path + +let clean_on_host ~__context ~self ~host = + debug "pool_patch.clean_on_host"; + clean ~__context ~self + +let pool_clean ~__context ~self = + debug "pool_patch.pool_clean"; + let hosts = Db.Host.get_all ~__context in + List.iter + (fun host -> + Helpers.call_api_functions ~__context + (fun rpc session_id -> Client.Client.Pool_patch.clean_on_host ~rpc ~session_id ~self ~host) + ) + hosts; + Db.Pool_patch.set_filename ~__context ~self ~value:"" + +let destroy ~__context ~self = let hosts = Db.Host.get_all ~__context in let applied = List.exists (fun host -> get_patch_applied_to ~__context ~patch:self ~host <> None ) hosts in if applied then raise (Api_errors.Server_error(Api_errors.patch_is_applied, [])); - List.iter - (fun host -> - Helpers.call_api_functions ~__context - (fun rpc session_id -> Client.Client.Pool_patch.clean_on_host ~rpc ~session_id ~self ~host) - ) - hosts; - Db.Pool_patch.destroy ~__context ~self + List.iter + (fun host -> + Helpers.call_api_functions ~__context + (fun rpc session_id -> Client.Client.Pool_patch.clean_on_host ~rpc ~session_id ~self ~host) + ) + hosts; + Db.Pool_patch.destroy ~__context ~self diff --git a/ocaml/xapi/xapi_pool_transition.ml b/ocaml/xapi/xapi_pool_transition.ml index e63fe697013..1f073f609da 100644 --- a/ocaml/xapi/xapi_pool_transition.ml +++ b/ocaml/xapi/xapi_pool_transition.ml @@ -13,7 +13,7 @@ *) (** * @group Pool Management - *) +*) open Stdext open Threadext @@ -25,31 +25,31 @@ open D (** Execute scripts in the "master-scripts" dir when changing role from master to slave or back again. Remember whether the scripts have been run using state in the local database. *) -let run_external_scripts becoming_master = - let call_scripts () = - let arg = if becoming_master then "start" else "stop" in +let run_external_scripts becoming_master = + let call_scripts () = + let arg = if becoming_master then "start" else "stop" in debug "Calling scripts in %s with argument %s" !Xapi_globs.master_scripts_dir arg; let all = try Array.to_list (Sys.readdir !Xapi_globs.master_scripts_dir) with _ -> [] in let order = List.sort (fun a b -> if becoming_master then compare a b else -(compare a b)) all in List.iter (fun filename -> - try - let filename = !Xapi_globs.master_scripts_dir ^ "/" ^ filename in - debug "Executing %s %s" filename arg; - ignore(Forkhelpers.execute_command_get_output filename [arg]) - with Forkhelpers.Spawn_internal_error(_, _, Unix.WEXITED n) -> - debug "%s %s exited with code %d" filename arg n + try + let filename = !Xapi_globs.master_scripts_dir ^ "/" ^ filename in + debug "Executing %s %s" filename arg; + ignore(Forkhelpers.execute_command_get_output filename [arg]) + with Forkhelpers.Spawn_internal_error(_, _, Unix.WEXITED n) -> + debug "%s %s exited with code %d" filename arg n ) order in let already_run = try bool_of_string (Localdb.get Constants.master_scripts) with _ -> false in (* Only do anything if we're switching mode *) - if already_run <> becoming_master + if already_run <> becoming_master then (call_scripts (); - Localdb.put Constants.master_scripts (string_of_bool becoming_master)) + Localdb.put Constants.master_scripts (string_of_bool becoming_master)) (** Switch into master mode using the backup database *) -let become_master () = +let become_master () = Pool_role.set_role Pool_role.Master; (* picked up as master on next boot *) (* Since we're becoming the master (and in the HA case the old master is dead) we save ourselves some trouble by saving the stats locally. *) @@ -60,7 +60,7 @@ let become_master () = call; must be careful not to rely on the database layer and to use only slave_local logins. This code runs on the new master. *) -let attempt_two_phase_commit_of_new_master ~__context (manual: bool) (peer_addresses: string list) (my_address: string) = +let attempt_two_phase_commit_of_new_master ~__context (manual: bool) (peer_addresses: string list) (my_address: string) = debug "attempting %s two-phase commit of new master. My address = %s; peer addresses = [ %s ]" (if manual then "manual" else "automatic") my_address (String.concat "; " peer_addresses); @@ -68,26 +68,26 @@ let attempt_two_phase_commit_of_new_master ~__context (manual: bool) (peer_addre let all_addresses = peer_addresses @ [ my_address ] in let done_so_far = ref [] in - let abort () = + let abort () = (* Tell as many nodes to abort as possible *) List.iter (fun address -> - Helpers.log_exn_continue (Printf.sprintf "Telling %s to abort" address) - (fun () -> - debug "Issuing abort to host address: %s" address; - Helpers.call_emergency_mode_functions address - (fun rpc session_id -> Client.Host.abort_new_master rpc session_id my_address) - ) () + Helpers.log_exn_continue (Printf.sprintf "Telling %s to abort" address) + (fun () -> + debug "Issuing abort to host address: %s" address; + Helpers.call_emergency_mode_functions address + (fun rpc session_id -> Client.Host.abort_new_master rpc session_id my_address) + ) () ) !done_so_far in debug "Phase 1: proposing myself as new master"; (try - List.iter - (fun address -> - debug "Proposing myself as a new master to host address: %s" address; - Helpers.call_emergency_mode_functions address - (fun rpc session_id -> Client.Host.propose_new_master rpc session_id my_address manual); - done_so_far := address :: !done_so_far) all_addresses + List.iter + (fun address -> + debug "Proposing myself as a new master to host address: %s" address; + Helpers.call_emergency_mode_functions address + (fun rpc session_id -> Client.Host.propose_new_master rpc session_id my_address manual); + done_so_far := address :: !done_so_far) all_addresses with e -> debug "Phase 1 aborting, caught exception: %s" (ExnHelper.string_of_exn e); abort (); @@ -95,7 +95,7 @@ let attempt_two_phase_commit_of_new_master ~__context (manual: bool) (peer_addre ); (* Uncomment this to check that timeout of phase 1 request works *) - (* abort (); raise (Api_errors.Server_error (Api_errors.ha_abort_new_master, [ "debug" ])); *) + (* abort (); raise (Api_errors.Server_error (Api_errors.ha_abort_new_master, [ "debug" ])); *) let am_master_already = Pool_role.get_role () = Pool_role.Master in @@ -105,11 +105,11 @@ let attempt_two_phase_commit_of_new_master ~__context (manual: bool) (peer_addre debug "Phase 2: committing transaction"; (* It's very bad if someone fails now *) let hosts_which_failed = ref [] in - let tell_host_to_commit address = + let tell_host_to_commit address = debug "Signalling commit to host address: %s" address; try Helpers.call_emergency_mode_functions address - (fun rpc session_id -> Client.Host.commit_new_master rpc session_id my_address) + (fun rpc session_id -> Client.Host.commit_new_master rpc session_id my_address) with e -> debug "Caught exception %s while telling host to commit new master" (ExnHelper.string_of_exn e); hosts_which_failed := address :: !hosts_which_failed in @@ -141,35 +141,35 @@ let attempt_two_phase_commit_of_new_master ~__context (manual: bool) (peer_addre then info "Not restarting since we are the master already" else Db_cache_impl.flush_and_exit (Db_connections.preferred_write_db ()) Xapi_globs.restart_return_code; - (* If manual, periodicly access to the database to check whether the old master has restarted. *) - if manual then - let (_ : Thread.t) = Thread.create (fun () -> - try while true do - (* Access to a random value in the database *) - let (_ : API.ref_pool list) = Db.Pool.get_all ~__context in - let n = 3. in - debug "The old master has not restarted yet. Sleep for %.0f seconds" n; - Thread.delay n; - done with _ -> - debug "The old master has restarted as slave; I am the only master now.") () - in () + (* If manual, periodicly access to the database to check whether the old master has restarted. *) + if manual then + let (_ : Thread.t) = Thread.create (fun () -> + try while true do + (* Access to a random value in the database *) + let (_ : API.ref_pool list) = Db.Pool.get_all ~__context in + let n = 3. in + debug "The old master has not restarted yet. Sleep for %.0f seconds" n; + Thread.delay n; + done with _ -> + debug "The old master has restarted as slave; I am the only master now.") () + in () (** Point ourselves at another master *) -let become_another_masters_slave master_address = +let become_another_masters_slave master_address = let new_role = Pool_role.Slave master_address in if Pool_role.get_role () = new_role then begin debug "We are already a slave of %s; nothing to do" master_address; end else begin - debug "Setting pool.conf to point to %s" master_address; + debug "Setting pool.conf to point to %s" master_address; Pool_role.set_role new_role; run_external_scripts false; Xapi_fuse.light_fuse_and_run () end (** If we just transitioned slave -> master (as indicated by the localdb flag) then generate a single alert *) -let consider_sending_alert __context () = +let consider_sending_alert __context () = if (try bool_of_string (Localdb.get Constants.this_node_just_became_master) with _ -> false) - then + then let obj_uuid = Helpers.get_localhost_uuid () in let (name, priority) = Api_messages.pool_master_transition in let (_: 'a Ref.t) = Xapi_message.create ~__context ~name ~priority ~cls:`Host ~obj_uuid ~body:"" in diff --git a/ocaml/xapi/xapi_pv_driver_version.ml b/ocaml/xapi/xapi_pv_driver_version.ml index 5325e9e05c8..4193cc45f65 100644 --- a/ocaml/xapi/xapi_pv_driver_version.ml +++ b/ocaml/xapi/xapi_pv_driver_version.ml @@ -22,51 +22,51 @@ open D (** Represents the detected PV driver version *) type t = - | Linux of int * int * int * int - | Windows of int * int * int * int - | Unknown + | Linux of int * int * int * int + | Windows of int * int * int * int + | Unknown let string_of = function - | Linux(major, minor, micro, build) -> Printf.sprintf "Linux %d.%d.%d-%d" major minor micro build - | Windows(major, minor, micro, build) -> Printf.sprintf "Windows %d.%d.%d-%d" major minor micro build - | Unknown -> "Unknown" + | Linux(major, minor, micro, build) -> Printf.sprintf "Linux %d.%d.%d-%d" major minor micro build + | Windows(major, minor, micro, build) -> Printf.sprintf "Windows %d.%d.%d-%d" major minor micro build + | Unknown -> "Unknown" let has_pv_drivers x = x <> Unknown let get_drivers_version os_version drivers_version = - try - let is_windows = - try List.assoc "distro" os_version = "windows" - with Not_found -> false - in - let lookup_driver_key_with_default key default = - if not (List.mem_assoc key drivers_version) then default - else int_of_string (List.assoc key drivers_version) in - let major = int_of_string (List.assoc "major" drivers_version) in - let minor = int_of_string (List.assoc "minor" drivers_version) in - (* in rolling upgrade rio slaves will not put micro vsn in database, but we musn't report - "Unknown", since then is_ok_for_migrate check will fail... *) - let micro = lookup_driver_key_with_default "micro" (-1) in - (* added in Orlando *) (* XXX: linux guest agent doesn't report build number while all windows ones do *) - let build = lookup_driver_key_with_default "build" (-1) in - if is_windows then Windows(major, minor, micro, build) else Linux(major, minor, micro, build) - with _ -> Unknown + try + let is_windows = + try List.assoc "distro" os_version = "windows" + with Not_found -> false + in + let lookup_driver_key_with_default key default = + if not (List.mem_assoc key drivers_version) then default + else int_of_string (List.assoc key drivers_version) in + let major = int_of_string (List.assoc "major" drivers_version) in + let minor = int_of_string (List.assoc "minor" drivers_version) in + (* in rolling upgrade rio slaves will not put micro vsn in database, but we musn't report + "Unknown", since then is_ok_for_migrate check will fail... *) + let micro = lookup_driver_key_with_default "micro" (-1) in + (* added in Orlando *) (* XXX: linux guest agent doesn't report build number while all windows ones do *) + let build = lookup_driver_key_with_default "build" (-1) in + if is_windows then Windows(major, minor, micro, build) else Linux(major, minor, micro, build) + with _ -> Unknown let of_guest_metrics gmr = - match gmr with - | Some gmr -> - get_drivers_version - gmr.Db_actions.vM_guest_metrics_os_version - gmr.Db_actions.vM_guest_metrics_PV_drivers_version - | None -> Unknown + match gmr with + | Some gmr -> + get_drivers_version + gmr.Db_actions.vM_guest_metrics_os_version + gmr.Db_actions.vM_guest_metrics_PV_drivers_version + | None -> Unknown (** Returns an API error option if the PV drivers are missing *) let make_error_opt version vm = - if has_pv_drivers version then None - else Some(Api_errors.vm_missing_pv_drivers, [ Ref.string_of vm ]) + if has_pv_drivers version then None + else Some(Api_errors.vm_missing_pv_drivers, [ Ref.string_of vm ]) let is_windows_and_orlando_or_newer gmr = - match of_guest_metrics (Some gmr) with - | Windows (_, _, _, build) -> (build >= 0) - | Linux _ - | Unknown -> false + match of_guest_metrics (Some gmr) with + | Windows (_, _, _, build) -> (build >= 0) + | Linux _ + | Unknown -> false diff --git a/ocaml/xapi/xapi_pv_driver_version.mli b/ocaml/xapi/xapi_pv_driver_version.mli index ced61536de7..dd74130857e 100644 --- a/ocaml/xapi/xapi_pv_driver_version.mli +++ b/ocaml/xapi/xapi_pv_driver_version.mli @@ -24,8 +24,8 @@ val has_pv_drivers: t -> bool val make_error_opt: t -> API.ref_VM -> (string * string list) option (** [make_error_opt x] returns None if has_pv_drivers, - and Some(code,params) otherwise. *) + and Some(code,params) otherwise. *) val is_windows_and_orlando_or_newer: Db_actions.vM_guest_metrics_t -> bool (** True if the pv driver version info is a Windows version and has a build - number (build number was new in Orlando). *) + number (build number was new in Orlando). *) diff --git a/ocaml/xapi/xapi_remotecmd.ml b/ocaml/xapi/xapi_remotecmd.ml index 0704e47316b..013ac6e9d2c 100644 --- a/ocaml/xapi/xapi_remotecmd.ml +++ b/ocaml/xapi/xapi_remotecmd.ml @@ -23,19 +23,19 @@ open Forkhelpers let do_cmd s cmd args = match with_logfile_fd "execute_command_get_output" - (fun log_fd -> - (* Capture stderr output for logging *) - let pid = safe_close_and_exec (Some s) (Some s) (Some log_fd) [] cmd args in - snd(waitpid pid) - ) with - | Success(log, status) -> - debug "log: %s" log; - begin match status with - | Unix.WEXITED 0 -> ignore(log) - | _ -> raise (Spawn_internal_error(log, "", status)) - end - | Failure(log, exn) -> - raise exn + (fun log_fd -> + (* Capture stderr output for logging *) + let pid = safe_close_and_exec (Some s) (Some s) (Some log_fd) [] cmd args in + snd(waitpid pid) + ) with + | Success(log, status) -> + debug "log: %s" log; + begin match status with + | Unix.WEXITED 0 -> ignore(log) + | _ -> raise (Spawn_internal_error(log, "", status)) + end + | Failure(log, exn) -> + raise exn let allowed_cmds = ["rsync","/usr/bin/rsync"] @@ -45,15 +45,15 @@ let handler (req: Http.Request.t) s _ = debug "remotecmd handler running"; Xapi_http.with_context "Remote command" req s (fun __context -> - let session_id = Context.get_session_id __context in - if not (Db.Session.get_pool ~__context ~self:session_id) then - begin - failwith "Not a pool session" - end; - let cmd=List.assoc "cmd" q in - let cmd=List.assoc cmd allowed_cmds in - let args = List.map snd (List.filter (fun (x,y) -> x="arg") q) in - do_cmd s cmd args + let session_id = Context.get_session_id __context in + if not (Db.Session.get_pool ~__context ~self:session_id) then + begin + failwith "Not a pool session" + end; + let cmd=List.assoc "cmd" q in + let cmd=List.assoc cmd allowed_cmds in + let args = List.map snd (List.filter (fun (x,y) -> x="arg") q) in + do_cmd s cmd args ) - + diff --git a/ocaml/xapi/xapi_role.ml b/ocaml/xapi/xapi_role.ml index 5746c28b710..95765107a92 100644 --- a/ocaml/xapi/xapi_role.ml +++ b/ocaml/xapi/xapi_role.ml @@ -13,7 +13,7 @@ *) (** Module that defines API functions for Role objects * @group XenAPI functions - *) +*) module D = Debug.Make(struct let name="xapi_role" end) open D @@ -23,10 +23,10 @@ open Db_actions (* A note on roles: *) (* Here, roles and permissons are treated as a recursive type, where the *) (* permissions are the leaves and roles are intermediate nodes of the tree *) -(* For each permission there is one and only one XAPI/HTTP call *) +(* For each permission there is one and only one XAPI/HTTP call *) let get_all_static_roles = - Rbac_static.all_static_permissions @ Rbac_static.all_static_roles + Rbac_static.all_static_permissions @ Rbac_static.all_static_roles (* In Db, Ref is a pointer to the hashtable row. Here, ref="OpaqueRef:"^uuid *) let ref_of_role ~role = String_to_DM.ref_role (Ref.ref_prefix ^ role.role_uuid) @@ -36,128 +36,128 @@ let static_role_by_ref_tbl = Hashtbl.create (List.length get_all_static_roles) let static_role_by_uuid_tbl = Hashtbl.create (List.length get_all_static_roles) let static_role_by_name_label_tbl = Hashtbl.create (List.length get_all_static_roles) let _ = - List.iter (* initialize static_role_by_ref_tbl *) - (fun r->Hashtbl.add static_role_by_ref_tbl (ref_of_role r) r) - get_all_static_roles; - List.iter (* initialize static_role_by_uuid_tbl *) - (fun r->Hashtbl.add static_role_by_uuid_tbl (r.role_uuid) r) - get_all_static_roles; - List.iter (* initialize static_role_by_name_tbl *) - (fun r->Hashtbl.add static_role_by_name_label_tbl (r.role_name_label) r) - get_all_static_roles + List.iter (* initialize static_role_by_ref_tbl *) + (fun r->Hashtbl.add static_role_by_ref_tbl (ref_of_role r) r) + get_all_static_roles; + List.iter (* initialize static_role_by_uuid_tbl *) + (fun r->Hashtbl.add static_role_by_uuid_tbl (r.role_uuid) r) + get_all_static_roles; + List.iter (* initialize static_role_by_name_tbl *) + (fun r->Hashtbl.add static_role_by_name_label_tbl (r.role_name_label) r) + get_all_static_roles let find_role_by_ref ref = Hashtbl.find static_role_by_ref_tbl ref let find_role_by_uuid uuid = Hashtbl.find static_role_by_uuid_tbl uuid let find_role_by_name_label name_label = Hashtbl.find static_role_by_name_label_tbl name_label (* val get_all : __context:Context.t -> ref_role_set*) -let get_all ~__context = - List.map (fun r -> ref_of_role r) get_all_static_roles - (*@ (* concatenate with Db table *) - Db.Role.get_all ~__context*) +let get_all ~__context = + List.map (fun r -> ref_of_role r) get_all_static_roles +(*@ (* concatenate with Db table *) + Db.Role.get_all ~__context*) let is_valid_role ~__context ~role = - Hashtbl.mem static_role_by_ref_tbl role + Hashtbl.mem static_role_by_ref_tbl role let get_common ~__context ~self ~static_fn ~db_fn = - try (* first look up across the static roles *) - let static_record = find_role_by_ref self in - static_fn static_record - with Not_found -> (* then look up across the roles in the Db *) - db_fn ~__context ~self + try (* first look up across the static roles *) + let static_record = find_role_by_ref self in + static_fn static_record + with Not_found -> (* then look up across the roles in the Db *) + db_fn ~__context ~self (* val get_record : __context:Context.t -> self:ref_role -> role_t*) let get_api_record ~static_record = - { (* Db_actions.role_t -> API.role_t *) - API.role_uuid=static_record.Db_actions.role_uuid; - API.role_name_label=static_record.Db_actions.role_name_label; - API.role_name_description=static_record.Db_actions.role_name_description; - API.role_subroles=static_record.Db_actions.role_subroles; - (*API.role_is_basic=static_record.Db_actions.role_is_basic;*) - (*API.role_is_complete=static_record.Db_actions.role_is_complete;*) - (*API.role_subjects=static_record.Db_actions.role_subjects;*) - } + { (* Db_actions.role_t -> API.role_t *) + API.role_uuid=static_record.Db_actions.role_uuid; + API.role_name_label=static_record.Db_actions.role_name_label; + API.role_name_description=static_record.Db_actions.role_name_description; + API.role_subroles=static_record.Db_actions.role_subroles; + (*API.role_is_basic=static_record.Db_actions.role_is_basic;*) + (*API.role_is_complete=static_record.Db_actions.role_is_complete;*) + (*API.role_subjects=static_record.Db_actions.role_subjects;*) + } let get_record ~__context ~self = - get_common ~__context ~self - ~static_fn:(fun static_record -> get_api_record static_record) - ~db_fn:(fun ~__context ~self -> Db.Role.get_record ~__context ~self) + get_common ~__context ~self + ~static_fn:(fun static_record -> get_api_record static_record) + ~db_fn:(fun ~__context ~self -> Db.Role.get_record ~__context ~self) (* val get_all_records_where : __context:Context.t -> expr:string -> ref_role_to_role_t_map*) let expr_no_permissions = "subroles<>[]" -let expr_only_permissions = "subroles=[]" +let expr_only_permissions = "subroles=[]" let get_all_records_where ~__context ~expr = - if expr = expr_no_permissions then (* composite role, ie. not a permission *) - List.map - (fun r -> ((ref_of_role r),(get_api_record ~static_record:r))) - Rbac_static.all_static_roles - else if expr = expr_only_permissions then (* composite role, ie. a permission *) - List.map - (fun r -> ((ref_of_role r),(get_api_record ~static_record:r))) - Rbac_static.all_static_permissions - else (* anything in this table, ie. roles+permissions *) - List.map - (fun r -> ((ref_of_role r),(get_api_record ~static_record:r))) - get_all_static_roles - (*@ (* concatenate with Db table *) - (* TODO: this line is crashing for some unknown reason, but not needed in RBAC 1 *) - Db.Role.get_all_records_where ~__context ~expr*) + if expr = expr_no_permissions then (* composite role, ie. not a permission *) + List.map + (fun r -> ((ref_of_role r),(get_api_record ~static_record:r))) + Rbac_static.all_static_roles + else if expr = expr_only_permissions then (* composite role, ie. a permission *) + List.map + (fun r -> ((ref_of_role r),(get_api_record ~static_record:r))) + Rbac_static.all_static_permissions + else (* anything in this table, ie. roles+permissions *) + List.map + (fun r -> ((ref_of_role r),(get_api_record ~static_record:r))) + get_all_static_roles +(*@ (* concatenate with Db table *) + (* TODO: this line is crashing for some unknown reason, but not needed in RBAC 1 *) + Db.Role.get_all_records_where ~__context ~expr*) (* val get_all_records : __context:Context.t -> ref_role_to_role_t_map*) let get_all_records ~__context = - get_all_records_where ~__context ~expr:"True" + get_all_records_where ~__context ~expr:"True" (* val get_by_uuid : __context:Context.t -> uuid:string -> ref_role*) let get_by_uuid ~__context ~uuid = - try - let static_record = find_role_by_uuid uuid in - ref_of_role static_record - with Not_found -> - (* pass-through to Db *) - Db.Role.get_by_uuid ~__context ~uuid + try + let static_record = find_role_by_uuid uuid in + ref_of_role static_record + with Not_found -> + (* pass-through to Db *) + Db.Role.get_by_uuid ~__context ~uuid let get_by_name_label ~__context ~label = - try - let static_record = find_role_by_name_label label in - [ref_of_role static_record] - with Not_found -> - (* pass-through to Db *) - Db.Role.get_by_name_label ~__context ~label + try + let static_record = find_role_by_name_label label in + [ref_of_role static_record] + with Not_found -> + (* pass-through to Db *) + Db.Role.get_by_name_label ~__context ~label (* val get_uuid : __context:Context.t -> self:ref_role -> string*) let get_uuid ~__context ~self = - get_common ~__context ~self - ~static_fn:(fun static_record -> static_record.role_uuid) - ~db_fn:(fun ~__context ~self -> Db.Role.get_uuid ~__context ~self) + get_common ~__context ~self + ~static_fn:(fun static_record -> static_record.role_uuid) + ~db_fn:(fun ~__context ~self -> Db.Role.get_uuid ~__context ~self) (* val get_name : __context:Context.t -> self:ref_role -> string*) let get_name_label ~__context ~self = - get_common ~__context ~self - ~static_fn:(fun static_record -> static_record.role_name_label) - ~db_fn:(fun ~__context ~self -> Db.Role.get_name_label ~__context ~self) + get_common ~__context ~self + ~static_fn:(fun static_record -> static_record.role_name_label) + ~db_fn:(fun ~__context ~self -> Db.Role.get_name_label ~__context ~self) (* val get_description : __context:Context.t -> self:ref_role -> string*) let get_name_description ~__context ~self = - get_common ~__context ~self - ~static_fn:(fun static_record -> static_record.role_name_description) - ~db_fn:(fun ~__context ~self -> Db.Role.get_name_description ~__context ~self) + get_common ~__context ~self + ~static_fn:(fun static_record -> static_record.role_name_description) + ~db_fn:(fun ~__context ~self -> Db.Role.get_name_description ~__context ~self) (* val get_permissions : __context:Context.t -> self:ref_role -> string_set*) let get_subroles ~__context ~self = - get_common ~__context ~self - ~static_fn:(fun static_record -> static_record.role_subroles) - ~db_fn:(fun ~__context ~self -> Db.Role.get_subroles ~__context ~self) + get_common ~__context ~self + ~static_fn:(fun static_record -> static_record.role_subroles) + ~db_fn:(fun ~__context ~self -> Db.Role.get_subroles ~__context ~self) (* val get_is_basic : __context:Context.t -> self:ref_role -> bool*) (*let get_is_basic ~__context ~self = - get_common ~__context ~self - ~static_fn:(fun static_record -> static_record.role_is_basic) - ~db_fn:(fun ~__context ~self -> Db.Role.get_is_basic ~__context ~self) + get_common ~__context ~self + ~static_fn:(fun static_record -> static_record.role_is_basic) + ~db_fn:(fun ~__context ~self -> Db.Role.get_is_basic ~__context ~self) *) (* val get_is_complete : __context:Context.t -> self:ref_role -> bool*) (*let get_is_complete ~__context ~self = - get_common ~__context ~self - ~static_fn:(fun static_record -> static_record.role_is_complete) - ~db_fn:(fun ~__context ~self -> Db.Role.get_is_complete ~__context ~self) + get_common ~__context ~self + ~static_fn:(fun static_record -> static_record.role_is_complete) + ~db_fn:(fun ~__context ~self -> Db.Role.get_is_complete ~__context ~self) *) (* XenCenter needs these functions *) @@ -171,56 +171,56 @@ let get_subroles ~__context ~self = (* This function recursively expands a role into its basic permission set. In other words, it *) (* returns the leaves of the tree whose root is the role passed as parameter *) let get_permissions_common ~__context ~role ~ret_value_fn = - let rec rec_get_permissions_of_role ~__context ~role = - let subroles = get_subroles ~__context ~self:role in - if List.length subroles = 0 - then (* base case = leaf node = permission is role itself *) - [ret_value_fn role] - else (* step = go recursively down composite roles *) - (List.fold_left - (fun accu role -> - List.rev_append - (rec_get_permissions_of_role ~__context ~role) - accu - ) - [] - (subroles) - ) - in - Stdext.Listext.List.setify (rec_get_permissions_of_role ~__context ~role) + let rec rec_get_permissions_of_role ~__context ~role = + let subroles = get_subroles ~__context ~self:role in + if List.length subroles = 0 + then (* base case = leaf node = permission is role itself *) + [ret_value_fn role] + else (* step = go recursively down composite roles *) + (List.fold_left + (fun accu role -> + List.rev_append + (rec_get_permissions_of_role ~__context ~role) + accu + ) + [] + (subroles) + ) + in + Stdext.Listext.List.setify (rec_get_permissions_of_role ~__context ~role) let get_permissions ~__context ~self = - get_permissions_common ~__context ~role:self - ~ret_value_fn:(fun role -> role) + get_permissions_common ~__context ~role:self + ~ret_value_fn:(fun role -> role) let get_permissions_name_label ~__context ~self = - get_permissions_common ~__context ~role:self - ~ret_value_fn:(fun role -> get_name_label ~__context ~self:role) + get_permissions_common ~__context ~role:self + ~ret_value_fn:(fun role -> get_name_label ~__context ~self:role) (*3. get_all_roles: permission->[roles]*) (* return all roles that contain this permission *) (* including the transitive closure *) let get_by_permission_common ~__context ~permission ~cmp_fn = - List.filter - (fun role -> List.exists (cmp_fn) (get_permissions ~__context ~self:role)) - (List.filter - (fun r -> r <> permission) (* do not include permission itself *) - (get_all ~__context) (* get all roles and permissions *) - ) + List.filter + (fun role -> List.exists (cmp_fn) (get_permissions ~__context ~self:role)) + (List.filter + (fun r -> r <> permission) (* do not include permission itself *) + (get_all ~__context) (* get all roles and permissions *) + ) let get_by_permission ~__context ~permission = - get_by_permission_common ~__context ~permission - ~cmp_fn:(fun perm -> permission = perm) + get_by_permission_common ~__context ~permission + ~cmp_fn:(fun perm -> permission = perm) let get_by_permission_name_label ~__context ~label = - let permission = - let ps = get_by_name_label ~__context ~label in - if List.length ps > 0 - then List.hd ps (* names are unique, there's either 0 or 1*) - else Ref.null (* name not found *) - in - get_by_permission_common ~__context ~permission - ~cmp_fn:(fun perm -> label = (get_name_label ~__context ~self:perm)) + let permission = + let ps = get_by_name_label ~__context ~label in + if List.length ps > 0 + then List.hd ps (* names are unique, there's either 0 or 1*) + else Ref.null (* name not found *) + in + get_by_permission_common ~__context ~permission + ~cmp_fn:(fun perm -> label = (get_name_label ~__context ~self:perm)) (* @@ -233,10 +233,10 @@ let get_by_permission_name_label ~__context ~label = let create ~__context ~name_label ~name_description ~subroles = (* disabled in RBAC 1.0 *) (* - let ref=Ref.make() in + let ref=Ref.make() in let uuid=Uuid.to_string (Uuid.make_uuid()) in (* TODO: verify the uniqueness of id *) - if id = "no" + if id = "no" then raise (Api_errors.Server_error (Api_errors.role_not_found, [])) else Db.Role.create ~__context ~ref ~uuid ~id ~name ~description ~permissions ~is_basic ~is_complete; diff --git a/ocaml/xapi/xapi_secret.ml b/ocaml/xapi/xapi_secret.ml index fc1b0b95d34..84194c276f8 100644 --- a/ocaml/xapi/xapi_secret.ml +++ b/ocaml/xapi/xapi_secret.ml @@ -13,7 +13,7 @@ *) (** Module that defines API functions for Secret objects * @group XenAPI functions - *) +*) open Stdext.Xstringext @@ -21,41 +21,41 @@ module D = Debug.Make(struct let name = "xapi_secret" end) open D let introduce ~__context ~uuid ~value ~other_config = - let ref = Ref.make () in - Db.Secret.create ~__context ~ref ~uuid ~value ~other_config; - ref + let ref = Ref.make () in + Db.Secret.create ~__context ~ref ~uuid ~value ~other_config; + ref let create ~__context ~value ~other_config= - let uuid = Uuid.to_string(Uuid.make_uuid()) in - let ref = introduce ~__context ~uuid ~value ~other_config in - ref + let uuid = Uuid.to_string(Uuid.make_uuid()) in + let ref = introduce ~__context ~uuid ~value ~other_config in + ref let destroy ~__context ~self = - Db.Secret.destroy ~__context ~self + Db.Secret.destroy ~__context ~self (* Delete the passwords references in a string2string map *) let clean_out_passwds ~__context strmap = - let delete_secret uuid = - try - let s = Db.Secret.get_by_uuid ~__context ~uuid in - Db.Secret.destroy ~__context ~self:s - with _ -> () - in - let check_key (k, _) = String.endswith "password_secret" k in - let secrets = List.map snd (List.filter check_key strmap) in - List.iter delete_secret secrets + let delete_secret uuid = + try + let s = Db.Secret.get_by_uuid ~__context ~uuid in + Db.Secret.destroy ~__context ~self:s + with _ -> () + in + let check_key (k, _) = String.endswith "password_secret" k in + let secrets = List.map snd (List.filter check_key strmap) in + List.iter delete_secret secrets (* Modify a ((string * string) list) by duplicating all the passwords found in -* it *) + * it *) let duplicate_passwds ~__context strmap = - let check_key k = String.endswith "password_secret" k in - let possibly_duplicate (k, v) = if check_key k - then - let sr = Db.Secret.get_by_uuid ~__context ~uuid:v in - let v = Db.Secret.get_value ~__context ~self:sr in - let new_sr = create ~__context ~value:v ~other_config:[] in - let new_uuid = Db.Secret.get_uuid ~__context ~self:new_sr in - (k, new_uuid) - else (k, v) - in - List.map possibly_duplicate strmap + let check_key k = String.endswith "password_secret" k in + let possibly_duplicate (k, v) = if check_key k + then + let sr = Db.Secret.get_by_uuid ~__context ~uuid:v in + let v = Db.Secret.get_value ~__context ~self:sr in + let new_sr = create ~__context ~value:v ~other_config:[] in + let new_uuid = Db.Secret.get_uuid ~__context ~self:new_sr in + (k, new_uuid) + else (k, v) + in + List.map possibly_duplicate strmap diff --git a/ocaml/xapi/xapi_services.ml b/ocaml/xapi/xapi_services.ml index 96144530c74..d727483c5be 100644 --- a/ocaml/xapi/xapi_services.ml +++ b/ocaml/xapi/xapi_services.ml @@ -12,7 +12,7 @@ * GNU Lesser General Public License for more details. *) (** Advertise services - *) +*) module D=Debug.Make(struct let name="xapi" end) open D @@ -28,194 +28,194 @@ open Xstringext type driver_list = Storage_interface.query_result list with rpc let list_sm_drivers ~__context = - let all = List.map (Smint.query_result_of_sr_driver_info ++ Sm.info_of_driver) (Sm.supported_drivers ()) in - rpc_of_driver_list all + let all = List.map (Smint.query_result_of_sr_driver_info ++ Sm.info_of_driver) (Sm.supported_drivers ()) in + rpc_of_driver_list all let respond req rpc s = - let txt = Jsonrpc.to_string rpc in - Http_svr.headers s (Http.http_200_ok ~version:"1.0" ~keep_alive:false ()); - req.Http.Request.close <- true; - Unixext.really_write s txt 0 (String.length txt) + let txt = Jsonrpc.to_string rpc in + Http_svr.headers s (Http.http_200_ok ~version:"1.0" ~keep_alive:false ()); + req.Http.Request.close <- true; + Unixext.really_write s txt 0 (String.length txt) let list_drivers req s = respond req (System_domains.rpc_of_services (System_domains.list_services ())) s let fix_cookie = function - | [] -> [] - | cookie -> begin - let str_cookie = String.concat "; " (List.map (fun (k,v) -> Printf.sprintf "%s=%s" k v) cookie) in - - let bounded_split_delim re s n = - let rec extract_comps_inner start acc m = - let get_fin () = List.rev ((String.sub s start (String.length s - start))::acc) in - if m=1 then get_fin () else - try - let first_end, all_end = Re.get_ofs (Re.exec ~pos:start re s) 0 in - extract_comps_inner all_end ((String.sub s start (first_end - start))::acc) (m-1) - with Not_found -> - get_fin () - in extract_comps_inner 0 [] n - in - - let comps = bounded_split_delim (Re.compile (Re_emacs.re "[;,][ \t]*")) str_cookie 0 in - - (* We don't handle $Path, $Domain, $Port, $Version (or $anything $else) *) - let cookies = List.filter (fun s -> s.[0] != '$') comps in - let split_pair nvp = - match String.split '=' nvp with - | [] -> ("","") - | n :: [] -> (n, "") - | n :: v :: _ -> (n, v) - in - (List.map split_pair cookies) - end + | [] -> [] + | cookie -> begin + let str_cookie = String.concat "; " (List.map (fun (k,v) -> Printf.sprintf "%s=%s" k v) cookie) in + + let bounded_split_delim re s n = + let rec extract_comps_inner start acc m = + let get_fin () = List.rev ((String.sub s start (String.length s - start))::acc) in + if m=1 then get_fin () else + try + let first_end, all_end = Re.get_ofs (Re.exec ~pos:start re s) 0 in + extract_comps_inner all_end ((String.sub s start (first_end - start))::acc) (m-1) + with Not_found -> + get_fin () + in extract_comps_inner 0 [] n + in + + let comps = bounded_split_delim (Re.compile (Re_emacs.re "[;,][ \t]*")) str_cookie 0 in + + (* We don't handle $Path, $Domain, $Port, $Version (or $anything $else) *) + let cookies = List.filter (fun s -> s.[0] != '$') comps in + let split_pair nvp = + match String.split '=' nvp with + | [] -> ("","") + | n :: [] -> (n, "") + | n :: v :: _ -> (n, v) + in + (List.map split_pair cookies) + end (* Transmits [req] and [s] to the service listening on [path] *) let hand_over_connection req s path = - try - debug "hand_over_connection %s %s to %s" (Http.string_of_method_t req.Http.Request.m) req.Http.Request.uri path; - let control_fd = Unix.socket Unix.PF_UNIX Unix.SOCK_STREAM 0 in - finally - (fun () -> - let req = Http.Request.({ req with cookie=fix_cookie req.cookie}) in - Unix.connect control_fd (Unix.ADDR_UNIX path); - let msg = req |> Http.Request.rpc_of_t |> Jsonrpc.to_string in - let len = String.length msg in - let written = Unixext.send_fd control_fd msg 0 len [] s in - if written <> len then begin - error "Failed to transfer fd to %s" path; - Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()); - req.Http.Request.close <- true; - None - end else begin - let response = Http_client.response_of_fd control_fd in - match response with - | Some res -> res.Http.Response.task - | None -> None - end - ) - (fun () -> Unix.close control_fd) - with e -> - error "Failed to transfer fd to %s: %s" path (Printexc.to_string e); - Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()); - req.Http.Request.close <- true; - None + try + debug "hand_over_connection %s %s to %s" (Http.string_of_method_t req.Http.Request.m) req.Http.Request.uri path; + let control_fd = Unix.socket Unix.PF_UNIX Unix.SOCK_STREAM 0 in + finally + (fun () -> + let req = Http.Request.({ req with cookie=fix_cookie req.cookie}) in + Unix.connect control_fd (Unix.ADDR_UNIX path); + let msg = req |> Http.Request.rpc_of_t |> Jsonrpc.to_string in + let len = String.length msg in + let written = Unixext.send_fd control_fd msg 0 len [] s in + if written <> len then begin + error "Failed to transfer fd to %s" path; + Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()); + req.Http.Request.close <- true; + None + end else begin + let response = Http_client.response_of_fd control_fd in + match response with + | Some res -> res.Http.Response.task + | None -> None + end + ) + (fun () -> Unix.close control_fd) + with e -> + error "Failed to transfer fd to %s: %s" path (Printexc.to_string e); + Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()); + req.Http.Request.close <- true; + None let http_proxy_to req from addr = - let s = Unix.socket (Unix.domain_of_sockaddr addr) Unix.SOCK_STREAM 0 in - finally - (fun () -> - let () = - try - Unix.connect s addr; - with e -> - error "Failed to proxy HTTP request to: %s" (match addr with - | Unix.ADDR_UNIX path -> "UNIX:" ^ path - | Unix.ADDR_INET(ip, port) -> "IP:" ^ (Unix.string_of_inet_addr ip) ^ ":" ^ (string_of_int port) - ); - Http_svr.headers from (Http.http_404_missing ~version:"1.0" ()); - raise e in - Http_proxy.one req from s) - (fun () -> Unix.close s) + let s = Unix.socket (Unix.domain_of_sockaddr addr) Unix.SOCK_STREAM 0 in + finally + (fun () -> + let () = + try + Unix.connect s addr; + with e -> + error "Failed to proxy HTTP request to: %s" (match addr with + | Unix.ADDR_UNIX path -> "UNIX:" ^ path + | Unix.ADDR_INET(ip, port) -> "IP:" ^ (Unix.string_of_inet_addr ip) ^ ":" ^ (string_of_int port) + ); + Http_svr.headers from (Http.http_404_missing ~version:"1.0" ()); + raise e in + Http_proxy.one req from s) + (fun () -> Unix.close s) let http_proxy_to_plugin req from name = - let path = Filename.concat "/var/lib/xcp" (Printf.sprintf "plugin/%s" name) in - if not (Sys.file_exists path) then begin - req.Http.Request.close <- true; - error "There is no Unix domain socket %s for plugin %s" path name; - Http_svr.headers from (Http.http_404_missing ~version:"1.0" ()) - end else - http_proxy_to req from (Unix.ADDR_UNIX path) + let path = Filename.concat "/var/lib/xcp" (Printf.sprintf "plugin/%s" name) in + if not (Sys.file_exists path) then begin + req.Http.Request.close <- true; + error "There is no Unix domain socket %s for plugin %s" path name; + Http_svr.headers from (Http.http_404_missing ~version:"1.0" ()) + end else + http_proxy_to req from (Unix.ADDR_UNIX path) let post_handler (req: Http.Request.t) s _ = - Xapi_http.with_context ~dummy:true "Querying services" req s - (fun __context -> - match String.split '/' req.Http.Request.uri with - | "" :: services :: "xenops" :: _ when services = _services -> - (* over the network we still use XMLRPC *) - let request = Http_svr.read_body req (Buf_io.of_fd s) in - let response = - if !Xcp_client.use_switch then begin - let req = Xmlrpc.call_of_string request in - let res = Xcp_client.switch_rpc !Xapi_globs.default_xenopsd Jsonrpc.string_of_call Jsonrpc.response_of_string req in - Xmlrpc.string_of_response res - end else - Xcp_client.http_rpc (fun x -> x) (fun x -> x) ~srcstr:"remote" ~dststr:"xenops" Xenops_interface.default_uri request in - Http_svr.response_str req ~hdrs:[] s response - | "" :: services :: "plugin" :: name :: _ when services = _services -> - http_proxy_to_plugin req s name - | [ ""; services; "SM" ] when services = _services -> - Storage_impl.Local_domain_socket.xmlrpc_handler Storage_mux.Server.process req (Buf_io.of_fd s) () - | _ -> - Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()); - req.Http.Request.close <- true - ) + Xapi_http.with_context ~dummy:true "Querying services" req s + (fun __context -> + match String.split '/' req.Http.Request.uri with + | "" :: services :: "xenops" :: _ when services = _services -> + (* over the network we still use XMLRPC *) + let request = Http_svr.read_body req (Buf_io.of_fd s) in + let response = + if !Xcp_client.use_switch then begin + let req = Xmlrpc.call_of_string request in + let res = Xcp_client.switch_rpc !Xapi_globs.default_xenopsd Jsonrpc.string_of_call Jsonrpc.response_of_string req in + Xmlrpc.string_of_response res + end else + Xcp_client.http_rpc (fun x -> x) (fun x -> x) ~srcstr:"remote" ~dststr:"xenops" Xenops_interface.default_uri request in + Http_svr.response_str req ~hdrs:[] s response + | "" :: services :: "plugin" :: name :: _ when services = _services -> + http_proxy_to_plugin req s name + | [ ""; services; "SM" ] when services = _services -> + Storage_impl.Local_domain_socket.xmlrpc_handler Storage_mux.Server.process req (Buf_io.of_fd s) () + | _ -> + Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()); + req.Http.Request.close <- true + ) let rpc ~srcstr ~dststr call = - let url = Http.Url.(File { path = Filename.concat "/var/lib/xcp" "storage" }, { uri = "/"; query_params = [] }) in - let open Xmlrpc_client in - XMLRPC_protocol.rpc ~transport:(transport_of_url url) ~srcstr ~dststr - ~http:(xmlrpc ~version:"1.0" ?auth:(Http.Url.auth_of url) ~query:(Http.Url.get_query_params url) (Http.Url.get_uri url)) call + let url = Http.Url.(File { path = Filename.concat "/var/lib/xcp" "storage" }, { uri = "/"; query_params = [] }) in + let open Xmlrpc_client in + XMLRPC_protocol.rpc ~transport:(transport_of_url url) ~srcstr ~dststr + ~http:(xmlrpc ~version:"1.0" ?auth:(Http.Url.auth_of url) ~query:(Http.Url.get_query_params url) (Http.Url.get_uri url)) call module Local = Storage_interface.Client(struct let rpc = rpc ~srcstr:"xapi" ~dststr:"smapiv2" end) let put_handler (req: Http.Request.t) s _ = - Xapi_http.with_context ~dummy:true "Querying services" req s - (fun __context -> - match String.split '/' req.Http.Request.uri with - | "" :: services :: "xenops" :: _ when services = _services -> - ignore (hand_over_connection req s (Filename.concat "/var/lib/xcp" "xenopsd.forwarded")) - | "" :: services :: "plugin" :: name :: _ when services = _services -> - http_proxy_to_plugin req s name - | [ ""; services; "SM"; "data"; sr; vdi ] when services = _services -> - let vdi, _ = Storage_access.find_vdi ~__context sr vdi in - Import_raw_vdi.import vdi req s () - | [ ""; services; "SM"; "nbd"; sr; vdi; dp ] when services = _services -> - Storage_migrate.nbd_handler req s sr vdi dp - | _ -> - Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()); - req.Http.Request.close <- true - ) + Xapi_http.with_context ~dummy:true "Querying services" req s + (fun __context -> + match String.split '/' req.Http.Request.uri with + | "" :: services :: "xenops" :: _ when services = _services -> + ignore (hand_over_connection req s (Filename.concat "/var/lib/xcp" "xenopsd.forwarded")) + | "" :: services :: "plugin" :: name :: _ when services = _services -> + http_proxy_to_plugin req s name + | [ ""; services; "SM"; "data"; sr; vdi ] when services = _services -> + let vdi, _ = Storage_access.find_vdi ~__context sr vdi in + Import_raw_vdi.import vdi req s () + | [ ""; services; "SM"; "nbd"; sr; vdi; dp ] when services = _services -> + Storage_migrate.nbd_handler req s sr vdi dp + | _ -> + Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()); + req.Http.Request.close <- true + ) let get_handler (req: Http.Request.t) s _ = - Xapi_http.with_context ~dummy:true "Querying services" req s - (fun __context -> - debug "uri = %s" req.Http.Request.uri; - match String.split '/' req.Http.Request.uri with - | "" :: services :: "xenops" :: _ when services = _services -> - ignore (hand_over_connection req s (Filename.concat "/var/lib/xcp" "xenopsd.forwarded")) - | "" :: services :: "plugin" :: name :: _ when services = _services -> - http_proxy_to_plugin req s name - | "" :: services :: "driver" :: [] when services = _services -> - list_drivers req s - | [ ""; services; "SM"; driver ] when services = _services -> - begin - try - respond req (Storage_interface.rpc_of_query_result (Smint.query_result_of_sr_driver_info (Sm.info_of_driver driver))) s - with _ -> - Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()); - req.Http.Request.close <- true - end - | [ ""; services; "SM" ] when services = _services -> - let rpc = list_sm_drivers ~__context in - respond req rpc s - | [ ""; services ] when services = _services -> - let q = { - Storage_interface.driver = "mux"; - name = "storage multiplexor"; - description = "forwards calls to other plugins"; - vendor = "XCP"; - copyright = "see the source code"; - version = "2.0"; - required_api_version = "2.0"; - features = List.map (fun x -> (path [_services; x])) [ _SM ]; - configuration = []; - required_cluster_stack = []; - } in - respond req (Storage_interface.rpc_of_query_result q) s - | _ -> - Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()); - req.Http.Request.close <- true - ) - - + Xapi_http.with_context ~dummy:true "Querying services" req s + (fun __context -> + debug "uri = %s" req.Http.Request.uri; + match String.split '/' req.Http.Request.uri with + | "" :: services :: "xenops" :: _ when services = _services -> + ignore (hand_over_connection req s (Filename.concat "/var/lib/xcp" "xenopsd.forwarded")) + | "" :: services :: "plugin" :: name :: _ when services = _services -> + http_proxy_to_plugin req s name + | "" :: services :: "driver" :: [] when services = _services -> + list_drivers req s + | [ ""; services; "SM"; driver ] when services = _services -> + begin + try + respond req (Storage_interface.rpc_of_query_result (Smint.query_result_of_sr_driver_info (Sm.info_of_driver driver))) s + with _ -> + Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()); + req.Http.Request.close <- true + end + | [ ""; services; "SM" ] when services = _services -> + let rpc = list_sm_drivers ~__context in + respond req rpc s + | [ ""; services ] when services = _services -> + let q = { + Storage_interface.driver = "mux"; + name = "storage multiplexor"; + description = "forwards calls to other plugins"; + vendor = "XCP"; + copyright = "see the source code"; + version = "2.0"; + required_api_version = "2.0"; + features = List.map (fun x -> (path [_services; x])) [ _SM ]; + configuration = []; + required_cluster_stack = []; + } in + respond req (Storage_interface.rpc_of_query_result q) s + | _ -> + Http_svr.headers s (Http.http_404_missing ~version:"1.0" ()); + req.Http.Request.close <- true + ) + + diff --git a/ocaml/xapi/xapi_session.ml b/ocaml/xapi/xapi_session.ml index 1732fd81841..df63cb072e7 100644 --- a/ocaml/xapi/xapi_session.ml +++ b/ocaml/xapi/xapi_session.ml @@ -13,9 +13,9 @@ *) (** Module that defines API functions for Session objects * @group XenAPI functions - *) +*) + - (* include Custom_actions.DebugVersion.Session *) module D = Debug.Make(struct let name="xapi" end) @@ -36,9 +36,9 @@ let wipe_string_contents str = for i = 0 to String.length str - 1 do str.[i] <- let wipe ss = List.iter (fun s -> wipe_string_contents s) ss (* wrapper that erases sensitive string parameters from functions *) let wipe_params_after_fn params fn = - try (let r=fn () in wipe params; r) with e -> (wipe params; raise e) + try (let r=fn () in wipe params; r) with e -> (wipe params; raise e) -let do_external_auth uname pwd = +let do_external_auth uname pwd = Mutex.execute serialize_auth (fun () -> (Ext_auth.d()).authenticate_username_password uname pwd) let do_local_auth uname pwd = @@ -51,86 +51,86 @@ let trackid session_id = (Context.trackid_of_session (Some session_id)) (* finds the intersection between group_membership_closure and pool's table of subject_ids *) let get_intersection ~__context subject_ids_in_db subject_identifier group_membership_closure = - let reflexive_membership_closure = subject_identifier::group_membership_closure in - let intersection = Listext.List.intersect reflexive_membership_closure subject_ids_in_db in - intersection + let reflexive_membership_closure = subject_identifier::group_membership_closure in + let intersection = Listext.List.intersect reflexive_membership_closure subject_ids_in_db in + intersection let get_subject_in_intersection ~__context subjects_in_db intersection = - List.find (fun subj -> (* is this the subject ref that returned the non-empty intersection?*) - (List.hd intersection) = (Db.Subject.get_subject_identifier ~__context ~self:subj) - ) subjects_in_db + List.find (fun subj -> (* is this the subject ref that returned the non-empty intersection?*) + (List.hd intersection) = (Db.Subject.get_subject_identifier ~__context ~self:subj) + ) subjects_in_db let get_permissions ~__context ~subject_membership = (* see also rbac.ml *) - let get_union_of_subsets ~get_subset_fn ~set = - Listext.List.setify - (List.fold_left (* efficiently compute unions of subsets in set *) - (fun accu elem -> List.rev_append (get_subset_fn elem) accu) - [] - set - ) - in - let role_membership = - get_union_of_subsets (*automatically removes duplicated roles*) - ~get_subset_fn:(fun subj -> - Db.Subject.get_roles ~__context ~self:subj) - ~set:subject_membership - in - let permission_membership = - get_union_of_subsets (*automatically removes duplicated perms*) - ~get_subset_fn:(fun role -> - try - (Xapi_role.get_name_label ~__context ~self:role):: - (Xapi_role.get_permissions_name_label ~__context ~self:role) - with _ -> [] (* if the role disappeared, ignore it *) - ) - ~set:role_membership - in - permission_membership + let get_union_of_subsets ~get_subset_fn ~set = + Listext.List.setify + (List.fold_left (* efficiently compute unions of subsets in set *) + (fun accu elem -> List.rev_append (get_subset_fn elem) accu) + [] + set + ) + in + let role_membership = + get_union_of_subsets (*automatically removes duplicated roles*) + ~get_subset_fn:(fun subj -> + Db.Subject.get_roles ~__context ~self:subj) + ~set:subject_membership + in + let permission_membership = + get_union_of_subsets (*automatically removes duplicated perms*) + ~get_subset_fn:(fun role -> + try + (Xapi_role.get_name_label ~__context ~self:role):: + (Xapi_role.get_permissions_name_label ~__context ~self:role) + with _ -> [] (* if the role disappeared, ignore it *) + ) + ~set:role_membership + in + permission_membership (* CP-827: finds out if the subject was suspended (ie. disabled,expired,locked-out) *) let is_subject_suspended subject_identifier = - (* obtains the subject's info containing suspension information *) - let info = - (try - (Ext_auth.d()).query_subject_information subject_identifier - with - | Auth_signature.Subject_cannot_be_resolved - | Not_found -> (* user was not found in external directory in order to obtain info *) - begin - debug "Subject %s not found in external directory while re-obtaining info" subject_identifier; - [] (* returns no user info, which will result in is_suspended = true *) - end - ) - in - let subject_name = - if List.mem_assoc Auth_signature.subject_information_field_subject_name info - then List.assoc Auth_signature.subject_information_field_subject_name info - else "" - in - let get_suspension_value name info = - if List.mem_assoc name info (* is the required field present? *) - then ((String.lowercase (List.assoc name info))<>"false") (* no suspension only if value is explicitly false *) - else true (* if we didn't find the field, assumes the worse, ie. subject is suspended *) - in - (* obtains each field that could suspend an existing subject *) - let is_subject_account_disabled = get_suspension_value "subject-account-disabled" info in - let is_subject_account_expired = get_suspension_value "subject-account-expired" info in - let is_subject_account_locked = get_suspension_value "subject-account-locked" info in - let is_subject_password_expired = get_suspension_value "subject-password-expired" info in - debug "Subject Suspension Status: a.disabled=%B a.expired=%B a.locked=%B p.expired=%B" - is_subject_account_disabled is_subject_account_expired is_subject_account_locked is_subject_password_expired; - (* decides if the subject is suspended *) - let is_suspended = (* either one of those is sufficient for suspension *) - (is_subject_account_disabled || is_subject_account_expired || - is_subject_account_locked || is_subject_password_expired) - in begin - if (is_suspended) then begin - debug "Subject identifier %s is suspended" subject_identifier - end; - (is_suspended,subject_name) - end - -let destroy_db_session ~__context ~self = + (* obtains the subject's info containing suspension information *) + let info = + (try + (Ext_auth.d()).query_subject_information subject_identifier + with + | Auth_signature.Subject_cannot_be_resolved + | Not_found -> (* user was not found in external directory in order to obtain info *) + begin + debug "Subject %s not found in external directory while re-obtaining info" subject_identifier; + [] (* returns no user info, which will result in is_suspended = true *) + end + ) + in + let subject_name = + if List.mem_assoc Auth_signature.subject_information_field_subject_name info + then List.assoc Auth_signature.subject_information_field_subject_name info + else "" + in + let get_suspension_value name info = + if List.mem_assoc name info (* is the required field present? *) + then ((String.lowercase (List.assoc name info))<>"false") (* no suspension only if value is explicitly false *) + else true (* if we didn't find the field, assumes the worse, ie. subject is suspended *) + in + (* obtains each field that could suspend an existing subject *) + let is_subject_account_disabled = get_suspension_value "subject-account-disabled" info in + let is_subject_account_expired = get_suspension_value "subject-account-expired" info in + let is_subject_account_locked = get_suspension_value "subject-account-locked" info in + let is_subject_password_expired = get_suspension_value "subject-password-expired" info in + debug "Subject Suspension Status: a.disabled=%B a.expired=%B a.locked=%B p.expired=%B" + is_subject_account_disabled is_subject_account_expired is_subject_account_locked is_subject_password_expired; + (* decides if the subject is suspended *) + let is_suspended = (* either one of those is sufficient for suspension *) + (is_subject_account_disabled || is_subject_account_expired || + is_subject_account_locked || is_subject_password_expired) + in begin + if (is_suspended) then begin + debug "Subject identifier %s is suspended" subject_identifier + end; + (is_suspended,subject_name) + end + +let destroy_db_session ~__context ~self = Xapi_event.on_session_deleted self; (* unregister from the event system *) (* This info line is important for tracking, auditability and client accountability purposes on XenServer *) (* Never print the session id nor uuid: they are secret values that should be known only to the user that *) @@ -138,7 +138,7 @@ let destroy_db_session ~__context ~self = (* see also task creation in context.ml *) (* CP-982: create tracking id in log files to link username to actions *) info "Session.destroy %s" (trackid self); - Rbac_audit.session_destroy ~__context ~session_id:self; + Rbac_audit.session_destroy ~__context ~session_id:self; (try Db.Session.destroy ~__context ~self; with _->()); Rbac.destroy_session_permissions_tbl ~session_id:self @@ -146,173 +146,173 @@ let destroy_db_session ~__context ~self = (* in response to external authentication/directory services updates, such as *) (* e.g. group membership changes, or even account disabled *) let revalidate_external_session ~__context ~session = - try - (* guard: we only want to revalidate external sessions, where is_local_superuser is false *) - (* Neither do we want to revalidate the special read-only external database sessions, since they can exist independent of external authentication. *) - if not (Db.Session.get_is_local_superuser ~__context ~self:session || Db_backend.is_session_registered session) then - - (* 1. is the external authentication disabled in the pool? *) - let master = Helpers.get_master ~__context in - let auth_type = Db.Host.get_external_auth_type ~__context ~self:master in - if auth_type = "" - then begin (* if so, we must immediatelly destroy this external session *) - let msg = (Printf.sprintf "External authentication has been disabled, destroying session %s" (trackid session)) in - debug "%s" msg; - destroy_db_session ~__context ~self:session - end - else begin (* otherwise, we try to revalidate it against the external authentication service *) - let session_lifespan = 60.0 *. 30.0 in (* allowed session lifespan = 30 minutes *) - let random_lifespan = Random.float 60.0 *. 10.0 in (* extra random (up to 10min) lifespan to spread access to external directory *) - - (* 2. has the external session expired/does it need revalidation? *) - let session_last_validation_time = Date.to_float (Db.Session.get_validation_time ~__context ~self:session) in - let now = (Unix.time ()) in - let session_needs_revalidation = - now > - (session_last_validation_time +. session_lifespan +. random_lifespan) - in - if session_needs_revalidation - then begin (* if so, then:*) - debug "session %s needs revalidation" (trackid session); - let authenticated_user_sid = Db.Session.get_auth_user_sid ~__context ~self:session in - - (* 2a. revalidate external authentication *) - - (* CP-827: if the user was suspended (disabled,expired,locked-out), then we must destroy the session *) - let (suspended,_)=is_subject_suspended authenticated_user_sid in - if suspended - then begin - debug "Subject (identifier %s) has been suspended, destroying session %s" authenticated_user_sid (trackid session); - (* we must destroy the session in this case *) - destroy_db_session ~__context ~self:session - end - else - try - (* if the user is not in the external directory service anymore, this call raises Not_found *) - let group_membership_closure = (Ext_auth.d()).query_group_membership authenticated_user_sid in - debug "obtained group membership for session %s, sid %s " (trackid session) authenticated_user_sid; - - (* 2b. revalidate membership intersection *) - (* this verifies if the user still has permission to have a session *) - let subjects_in_db = Db.Subject.get_all ~__context in - let subject_ids_in_db = List.map (fun subj -> Db.Subject.get_subject_identifier ~__context ~self:subj) subjects_in_db in - let intersection = get_intersection ~__context subject_ids_in_db authenticated_user_sid group_membership_closure in - debug "verified intersection for session %s, sid %s " (trackid session) authenticated_user_sid; - let in_intersection = (List.length intersection > 0) in - if not in_intersection then - begin (* empty intersection: externally-authenticated subject no longer has login rights in the pool *) - let msg = (Printf.sprintf "Subject (identifier %s) has no access rights in this pool, destroying session %s" authenticated_user_sid (trackid session)) in - debug "%s" msg; - (* we must destroy the session in this case *) - destroy_db_session ~__context ~self:session - end - else - begin (* non-empty intersection: externally-authenticated subject still has login rights in the pool *) - - (* OK, SESSION REVALIDATED SUCCESSFULLY *) - (* 2c. update session state *) - - (* session passed revalidation, let's update its last revalidation time *) - Db.Session.set_validation_time ~__context ~self:session ~value:(Date.of_float now); - debug "updated validation time for session %s, sid %s " (trackid session) authenticated_user_sid; - - (* let's also update the session's subject ref *) - try( - let subject_in_intersection = get_subject_in_intersection ~__context subjects_in_db intersection in - if (subject_in_intersection <> Db.Session.get_subject ~__context ~self:session) - then begin (* the subject in the intersection has changed!!! *) - Db.Session.set_subject ~__context ~self:session ~value:subject_in_intersection; - debug "updated subject for session %s, sid %s " (trackid session) authenticated_user_sid; - end - ) with Not_found -> (* subject ref for intersection's sid does not exist in our metadata!!! *) - begin - (* this should never happen, it's an internal metadata inconsistency between steps 2b and 2c *) - let msg = (Printf.sprintf "Subject (identifier %s) is not present in this pool, destroying session %s" authenticated_user_sid (trackid session)) in - debug "%s" msg; - (* we must destroy the session in this case *) - destroy_db_session ~__context ~self:session - end - end - with - | Auth_signature.Subject_cannot_be_resolved - | Not_found -> (* user was not found in external directory in order to obtain group membership *) - begin - let msg = (Printf.sprintf "Subject %s not found in external directory while re-obtaining its group membership closure, destroying session %s" authenticated_user_sid (trackid session)) in - debug "%s" msg; - (* user is not in the external directory anymore: we must destroy the session in this case *) - destroy_db_session ~__context ~self:session - end - end; - debug "end revalidation of session %s " (trackid session); - end - with e -> (*unexpected exception: we absorb it and print out a debug line *) - begin - debug "Unexpected exception while revalidating session %s: %s" (trackid session) (ExnHelper.string_of_exn e) - end + try + (* guard: we only want to revalidate external sessions, where is_local_superuser is false *) + (* Neither do we want to revalidate the special read-only external database sessions, since they can exist independent of external authentication. *) + if not (Db.Session.get_is_local_superuser ~__context ~self:session || Db_backend.is_session_registered session) then + + (* 1. is the external authentication disabled in the pool? *) + let master = Helpers.get_master ~__context in + let auth_type = Db.Host.get_external_auth_type ~__context ~self:master in + if auth_type = "" + then begin (* if so, we must immediatelly destroy this external session *) + let msg = (Printf.sprintf "External authentication has been disabled, destroying session %s" (trackid session)) in + debug "%s" msg; + destroy_db_session ~__context ~self:session + end + else begin (* otherwise, we try to revalidate it against the external authentication service *) + let session_lifespan = 60.0 *. 30.0 in (* allowed session lifespan = 30 minutes *) + let random_lifespan = Random.float 60.0 *. 10.0 in (* extra random (up to 10min) lifespan to spread access to external directory *) + + (* 2. has the external session expired/does it need revalidation? *) + let session_last_validation_time = Date.to_float (Db.Session.get_validation_time ~__context ~self:session) in + let now = (Unix.time ()) in + let session_needs_revalidation = + now > + (session_last_validation_time +. session_lifespan +. random_lifespan) + in + if session_needs_revalidation + then begin (* if so, then:*) + debug "session %s needs revalidation" (trackid session); + let authenticated_user_sid = Db.Session.get_auth_user_sid ~__context ~self:session in + + (* 2a. revalidate external authentication *) + + (* CP-827: if the user was suspended (disabled,expired,locked-out), then we must destroy the session *) + let (suspended,_)=is_subject_suspended authenticated_user_sid in + if suspended + then begin + debug "Subject (identifier %s) has been suspended, destroying session %s" authenticated_user_sid (trackid session); + (* we must destroy the session in this case *) + destroy_db_session ~__context ~self:session + end + else + try + (* if the user is not in the external directory service anymore, this call raises Not_found *) + let group_membership_closure = (Ext_auth.d()).query_group_membership authenticated_user_sid in + debug "obtained group membership for session %s, sid %s " (trackid session) authenticated_user_sid; + + (* 2b. revalidate membership intersection *) + (* this verifies if the user still has permission to have a session *) + let subjects_in_db = Db.Subject.get_all ~__context in + let subject_ids_in_db = List.map (fun subj -> Db.Subject.get_subject_identifier ~__context ~self:subj) subjects_in_db in + let intersection = get_intersection ~__context subject_ids_in_db authenticated_user_sid group_membership_closure in + debug "verified intersection for session %s, sid %s " (trackid session) authenticated_user_sid; + let in_intersection = (List.length intersection > 0) in + if not in_intersection then + begin (* empty intersection: externally-authenticated subject no longer has login rights in the pool *) + let msg = (Printf.sprintf "Subject (identifier %s) has no access rights in this pool, destroying session %s" authenticated_user_sid (trackid session)) in + debug "%s" msg; + (* we must destroy the session in this case *) + destroy_db_session ~__context ~self:session + end + else + begin (* non-empty intersection: externally-authenticated subject still has login rights in the pool *) + + (* OK, SESSION REVALIDATED SUCCESSFULLY *) + (* 2c. update session state *) + + (* session passed revalidation, let's update its last revalidation time *) + Db.Session.set_validation_time ~__context ~self:session ~value:(Date.of_float now); + debug "updated validation time for session %s, sid %s " (trackid session) authenticated_user_sid; + + (* let's also update the session's subject ref *) + try( + let subject_in_intersection = get_subject_in_intersection ~__context subjects_in_db intersection in + if (subject_in_intersection <> Db.Session.get_subject ~__context ~self:session) + then begin (* the subject in the intersection has changed!!! *) + Db.Session.set_subject ~__context ~self:session ~value:subject_in_intersection; + debug "updated subject for session %s, sid %s " (trackid session) authenticated_user_sid; + end + ) with Not_found -> (* subject ref for intersection's sid does not exist in our metadata!!! *) + begin + (* this should never happen, it's an internal metadata inconsistency between steps 2b and 2c *) + let msg = (Printf.sprintf "Subject (identifier %s) is not present in this pool, destroying session %s" authenticated_user_sid (trackid session)) in + debug "%s" msg; + (* we must destroy the session in this case *) + destroy_db_session ~__context ~self:session + end + end + with + | Auth_signature.Subject_cannot_be_resolved + | Not_found -> (* user was not found in external directory in order to obtain group membership *) + begin + let msg = (Printf.sprintf "Subject %s not found in external directory while re-obtaining its group membership closure, destroying session %s" authenticated_user_sid (trackid session)) in + debug "%s" msg; + (* user is not in the external directory anymore: we must destroy the session in this case *) + destroy_db_session ~__context ~self:session + end + end; + debug "end revalidation of session %s " (trackid session); + end + with e -> (*unexpected exception: we absorb it and print out a debug line *) + begin + debug "Unexpected exception while revalidating session %s: %s" (trackid session) (ExnHelper.string_of_exn e) + end (* CP-703: ensure that activate sessions are invalidated in a bounded time *) (* in response to external authentication/directory services updates, such as *) (* e.g. group membership changes, or even account disabled *) let revalidate_all_sessions ~__context = - try( - debug "revalidating all external sessions in the local host"; - (* obtain all sessions in the pool *) - let sessions = Db.Session.get_all ~__context in - (* filter out those sessions where is_local_superuser bit is true *) - (* we only want to revalidate the sessions created using the external authentication service *) - let external_sessions = List.filter (fun session -> - not (Db.Session.get_is_local_superuser ~__context ~self:session) - ) sessions in - (* revalidate each external session *) - List.iter (fun session -> revalidate_external_session ~__context ~session) external_sessions - )with e -> (*unexpected exception: we absorb it and print out a debug line *) - debug "Unexpected exception while revalidating external sessions: %s" (ExnHelper.string_of_exn e) + try( + debug "revalidating all external sessions in the local host"; + (* obtain all sessions in the pool *) + let sessions = Db.Session.get_all ~__context in + (* filter out those sessions where is_local_superuser bit is true *) + (* we only want to revalidate the sessions created using the external authentication service *) + let external_sessions = List.filter (fun session -> + not (Db.Session.get_is_local_superuser ~__context ~self:session) + ) sessions in + (* revalidate each external session *) + List.iter (fun session -> revalidate_external_session ~__context ~session) external_sessions + )with e -> (*unexpected exception: we absorb it and print out a debug line *) + debug "Unexpected exception while revalidating external sessions: %s" (ExnHelper.string_of_exn e) let login_no_password_common ~__context ~uname ~originator ~host ~pool ~is_local_superuser ~subject ~auth_user_sid ~auth_user_name ~rbac_permissions ~db_ref = - let create_session () = - let session_id = Ref.make () in - let uuid = Uuid.to_string (Uuid.make_uuid ()) in - let user = Ref.null in (* always return a null reference to the deprecated user object *) - let parent = try Context.get_session_id __context with _ -> Ref.null in - (*match uname with (* the user object is deprecated in favor of subject *) - Some uname -> Helpers.get_user ~__context uname - | None -> Ref.null in*) - (* This info line is important for tracking, auditability and client accountability purposes on XenServer *) - (* Never print the session id nor uuid: they are secret values that should be known only to the user that *) - (* has just logged in. Instead, we print a non-invertible hash as the tracking id for the session id *) - (* see also task creation in context.ml *) - (* CP-982: promote tracking debug line to info status *) - (* CP-982: create tracking id in log files to link username to actions *) - info "Session.create %s pool=%b uname=%s originator=%s is_local_superuser=%b auth_user_sid=%s parent=%s" - (trackid session_id) pool (match uname with None->""|Some u->u) originator is_local_superuser auth_user_sid (trackid parent); - Db.Session.create ~__context ~ref:session_id ~uuid - ~this_user:user ~this_host:host ~pool:pool - ~last_active:(Date.of_float (Unix.time ())) ~other_config:[] - ~subject:subject ~is_local_superuser:is_local_superuser - ~auth_user_sid ~validation_time:(Date.of_float (Unix.time ())) - ~auth_user_name ~rbac_permissions ~parent ~originator; - session_id - in - let session_id = match db_ref with - | Some db_ref -> Db_backend.create_registered_session create_session db_ref - | None -> create_session () - in - Rbac_audit.session_create ~__context ~session_id ~uname; - (* At this point, the session is created, but with an incorrect time *) - (* Force the time to be updated by calling an API function with this session *) - let rpc = Helpers.make_rpc ~__context in - ignore(Client.Pool.get_all rpc session_id); - session_id + let create_session () = + let session_id = Ref.make () in + let uuid = Uuid.to_string (Uuid.make_uuid ()) in + let user = Ref.null in (* always return a null reference to the deprecated user object *) + let parent = try Context.get_session_id __context with _ -> Ref.null in + (*match uname with (* the user object is deprecated in favor of subject *) + Some uname -> Helpers.get_user ~__context uname + | None -> Ref.null in*) + (* This info line is important for tracking, auditability and client accountability purposes on XenServer *) + (* Never print the session id nor uuid: they are secret values that should be known only to the user that *) + (* has just logged in. Instead, we print a non-invertible hash as the tracking id for the session id *) + (* see also task creation in context.ml *) + (* CP-982: promote tracking debug line to info status *) + (* CP-982: create tracking id in log files to link username to actions *) + info "Session.create %s pool=%b uname=%s originator=%s is_local_superuser=%b auth_user_sid=%s parent=%s" + (trackid session_id) pool (match uname with None->""|Some u->u) originator is_local_superuser auth_user_sid (trackid parent); + Db.Session.create ~__context ~ref:session_id ~uuid + ~this_user:user ~this_host:host ~pool:pool + ~last_active:(Date.of_float (Unix.time ())) ~other_config:[] + ~subject:subject ~is_local_superuser:is_local_superuser + ~auth_user_sid ~validation_time:(Date.of_float (Unix.time ())) + ~auth_user_name ~rbac_permissions ~parent ~originator; + session_id + in + let session_id = match db_ref with + | Some db_ref -> Db_backend.create_registered_session create_session db_ref + | None -> create_session () + in + Rbac_audit.session_create ~__context ~session_id ~uname; + (* At this point, the session is created, but with an incorrect time *) + (* Force the time to be updated by calling an API function with this session *) + let rpc = Helpers.make_rpc ~__context in + ignore(Client.Pool.get_all rpc session_id); + session_id (* XXX: only used internally by the code which grants the guest access to the API. Needs to be protected by a proper access control system *) let login_no_password ~__context ~uname ~host ~pool ~is_local_superuser ~subject ~auth_user_sid ~auth_user_name ~rbac_permissions = - login_no_password_common ~__context ~uname - ~originator:xapi_internal_originator ~host ~pool ~is_local_superuser - ~subject ~auth_user_sid ~auth_user_name ~rbac_permissions ~db_ref:None + login_no_password_common ~__context ~uname + ~originator:xapi_internal_originator ~host ~pool ~is_local_superuser + ~subject ~auth_user_sid ~auth_user_name ~rbac_permissions ~db_ref:None (** Cause the master to update the session last_active every 30s or so *) -let consider_touching_session rpc session_id = +let consider_touching_session rpc session_id = let time = ref (Unix.gettimeofday ()) in let interval = 30. in (* 30 seconds *) fun () -> @@ -324,7 +324,7 @@ let consider_touching_session rpc session_id = let pool_authenticate ~__context psecret = if psecret = !Xapi_globs.pool_secret then () - else failwith "Pool credentials invalid" + else failwith "Pool credentials invalid" (* Make sure the pool secret matches *) let slave_login_common ~__context ~host_str ~psecret = @@ -335,31 +335,31 @@ let slave_login_common ~__context ~host_str ~psecret = raise (Api_errors.Server_error (Api_errors.session_authentication_failed,[host_str;msg])) (* Normal login, uses the master's database *) -let slave_login ~__context ~host ~psecret = +let slave_login ~__context ~host ~psecret = slave_login_common ~__context ~host_str:(Ref.string_of host) ~psecret; - login_no_password ~__context ~uname:None ~host:host ~pool:true - ~is_local_superuser:true ~subject:(Ref.null) ~auth_user_sid:"" - ~auth_user_name:(Ref.string_of host) ~rbac_permissions:[] + login_no_password ~__context ~uname:None ~host:host ~pool:true + ~is_local_superuser:true ~subject:(Ref.null) ~auth_user_sid:"" + ~auth_user_name:(Ref.string_of host) ~rbac_permissions:[] (* Emergency mode login, uses local storage *) -let slave_local_login ~__context ~psecret = +let slave_local_login ~__context ~psecret = slave_login_common ~__context ~host_str:"localhost" ~psecret; debug "Add session to local storage"; Xapi_local_session.create ~__context ~pool:true (* Emergency mode login, uses local storage *) let slave_local_login_with_password ~__context ~uname ~pwd = wipe_params_after_fn [pwd] (fun () -> - if not (Context.preauth ~__context) - then - (try - (* CP696 - only tries to authenticate against LOCAL superuser account *) - do_local_auth uname pwd; - with (Failure msg) -> - debug "Failed to authenticate user %s: %s" uname msg; - raise (Api_errors.Server_error (Api_errors.session_authentication_failed,[uname;msg]))); - debug "Add session to local storage"; - Xapi_local_session.create ~__context ~pool:false -) + if not (Context.preauth ~__context) + then + (try + (* CP696 - only tries to authenticate against LOCAL superuser account *) + do_local_auth uname pwd; + with (Failure msg) -> + debug "Failed to authenticate user %s: %s" uname msg; + raise (Api_errors.Server_error (Api_errors.session_authentication_failed,[uname;msg]))); + debug "Add session to local storage"; + Xapi_local_session.create ~__context ~pool:false + ) (* CP-714: Modify session.login_with_password to first try local super-user login; and then call into external auth plugin if this is enabled *) (* 1. If the pool master's Host.external_auth_type field is not none, then the Session.login_with_password XenAPI method will: @@ -368,241 +368,241 @@ let slave_local_login_with_password ~__context ~uname ~pwd = wipe_params_after_f 2. otherwise, Session.login_with_password will only attempt to authenticate against the local superuser credentials *) let login_with_password ~__context ~uname ~pwd ~version ~originator = wipe_params_after_fn [pwd] (fun () -> - (* !!! Do something with the version number *) - if (Context.preauth ~__context) then - begin - (* in this case, the context origin of this login request is a unix socket bound locally to a filename *) - (* we trust requests from local unix filename sockets, so no need to authenticate them before login *) - login_no_password_common ~__context ~uname:(Some uname) ~originator ~host:(Helpers.get_localhost ~__context) - ~pool:false ~is_local_superuser:true ~subject:(Ref.null) - ~auth_user_sid:"" ~auth_user_name:uname ~rbac_permissions:[] ~db_ref:None - end - else - let () = - if Pool_role.is_slave() then - raise (Api_errors.Server_error (Api_errors.host_is_slave, [Pool_role.get_master_address()])) in - let login_as_local_superuser auth_type = - if (auth_type <> "") && (uname <> local_superuser) - then (* makes local superuser = root only*) - failwith ("Local superuser must be "^local_superuser) - else begin - do_local_auth uname pwd; - debug "Success: local auth, user %s from %s" uname (Context.get_origin __context); - login_no_password_common ~__context ~uname:(Some uname) ~originator ~host:(Helpers.get_localhost ~__context) - ~pool:false ~is_local_superuser:true ~subject:(Ref.null) ~auth_user_sid:"" ~auth_user_name:uname - ~rbac_permissions:[] ~db_ref:None - end - in - let thread_delay_and_raise_error ?(error=Api_errors.session_authentication_failed) uname msg = - let some_seconds = 5.0 in - Thread.delay some_seconds; (* sleep a bit to avoid someone brute-forcing the password *) - if error = Api_errors.session_authentication_failed (*default*) - then raise (Api_errors.Server_error (error,[uname;msg])) - else raise (Api_errors.Server_error (error,["session.login_with_password";msg])) - in - ( match (Db.Host.get_external_auth_type ~__context ~self:(Helpers.get_localhost ~__context)) with - - | "" as auth_type -> (* no external authentication *) - begin - (*debug "External authentication is disabled";*) - (* only attempts to authenticate against the local superuser credentials *) - try - login_as_local_superuser auth_type - with (Failure msg) -> - begin - info "Failed to locally authenticate user %s from %s: %s" uname (Context.get_origin __context) msg; - thread_delay_and_raise_error uname msg - end - end - - | _ as auth_type -> (* external authentication required *) - begin - debug "External authentication %s is enabled" auth_type; - (* 1. first attempts to authenticate against the local superuser *) - try - login_as_local_superuser auth_type - with (Failure msg) -> - begin - try - debug "Failed to locally authenticate user %s from %s: %s" uname (Context.get_origin __context) msg; - - (* 2. then against the external auth service *) - (* 2.1. we first check the external auth service status *) - let rec waiting_event_hook_auth_on_xapi_initialize_succeeded seconds = - if not !Xapi_globs.event_hook_auth_on_xapi_initialize_succeeded then - begin - if seconds <= 0 then - let msg = (Printf.sprintf "External authentication %s service still initializing" auth_type) in - error "%s" msg; - thread_delay_and_raise_error uname msg ~error:Api_errors.session_invalid - else - debug "External authentication %s service initializing..." auth_type; - Thread.delay 1.0; - waiting_event_hook_auth_on_xapi_initialize_succeeded (seconds-1); - end - in - waiting_event_hook_auth_on_xapi_initialize_succeeded 120; - (* 2.2. we then authenticate the usee using the external authentication plugin *) - (* so that we know that he/she exists there *) - let subject_identifier = (try - begin - let _subject_identifier = do_external_auth uname pwd in - debug "Successful external authentication user %s (subject_identifier, %s from %s)" uname _subject_identifier (Context.get_origin __context); - _subject_identifier - end - with (Auth_signature.Auth_failure msg) -> - begin - info "Failed to externally authenticate user %s from %s: %s" uname (Context.get_origin __context) msg; - thread_delay_and_raise_error uname msg - end - ) in - - (* as per tests in CP-827, there should be no need to call is_subject_suspended function here, *) - (* because the authentication server in 2.1 will already reflect if account/password expired, *) - (* disabled, locked-out etc, but since likewise doesn't timely reflect this information *) - (* at the same time for both authentication and subject info queries (modification in the AD *) - (* reflects immediately for AD authentication, but can take 1 hour to reflect on subject info), *) - (* we need to call it here in order to be consistent with the session revalidation function. *) - (* Otherwise, there might be cases where the initial authentication/login succeeds, but *) - (* then a few minutes later the revalidation finds that the user is 'suspended' (due to *) - (* subject info caching problems in likewise) and closes the user's session *) - let (subject_suspended,subject_name) = (try - is_subject_suspended subject_identifier - with (Auth_signature.Auth_service_error (errtag,msg)) -> - begin - debug "Failed to find if user %s (subject_id %s, from %s) is suspended: %s" uname subject_identifier (Context.get_origin __context) msg; - thread_delay_and_raise_error uname msg - end - ) in - if subject_suspended - then begin - let msg = (Printf.sprintf "User %s (subject_id %s, from %s) suspended in external directory" uname subject_identifier (Context.get_origin __context)) in - debug "%s" msg; - thread_delay_and_raise_error uname msg - end - else - - (* 2.2. then, we verify if any elements of the the membership closure of the externally *) - (* authenticated subject_id is inside our local allowed-to-login subjects list *) - (* finds all the groups a user belongs to (non-reflexive closure of member-of relation) *) - let group_membership_closure = - (try - (Ext_auth.d()).query_group_membership subject_identifier; - with - | Not_found | Auth_signature.Subject_cannot_be_resolved -> - begin - let msg = (Printf.sprintf "Failed to obtain the group membership closure for user %s (subject_id %s, from %s): user not found in external directory" uname (Context.get_origin __context) subject_identifier) in - debug "%s" msg; - thread_delay_and_raise_error uname msg - end - | Auth_signature.Auth_service_error (errtag,msg) -> - begin - debug "Failed to obtain the group membership closure for user %s (subject_id %s, from %s): %s" uname subject_identifier (Context.get_origin __context) msg; - thread_delay_and_raise_error uname msg - end - ) in - (* finds the intersection between group_membership_closure and pool's table of subject_ids *) - let subjects_in_db = Db.Subject.get_all ~__context in - let subject_ids_in_db = List.map (fun subj -> (subj,(Db.Subject.get_subject_identifier ~__context ~self:subj))) subjects_in_db in - let reflexive_membership_closure = subject_identifier::group_membership_closure in - (* returns all elements of reflexive_membership_closure that are inside subject_ids_in_db *) - let intersect ext_sids db_sids = List.filter (fun (subj,db_sid) -> List.mem db_sid ext_sids) db_sids in - let intersection = intersect reflexive_membership_closure subject_ids_in_db in - - (* 2.3. finally, we create the session for the authenticated subject if any membership intersection was found *) - let in_intersection = (List.length intersection > 0) in - if not in_intersection then - begin (* empty intersection: externally-authenticated subject has no login rights in the pool *) - let msg = (Printf.sprintf "Subject %s (identifier %s, from %s) has no access rights in this pool" uname subject_identifier (Context.get_origin __context)) in - info "%s" msg; - thread_delay_and_raise_error uname msg - end - else - - (* compute RBAC structures for the session *) - let subject_membership = (List.map (fun (subj_ref,sid) -> subj_ref) intersection) in - debug "subject membership intersection with subject-list=[%s]" - (List.fold_left - (fun i (subj_ref,sid)-> - let subj_ref= - try (* attempt to resolve subject_ref -> subject_name *) - List.assoc - Auth_signature.subject_information_field_subject_name - (Db.Subject.get_other_config ~__context ~self:subj_ref) - with _ -> Ref.string_of subj_ref - in if i="" then subj_ref^" ("^sid^")" - else i^","^subj_ref^" ("^sid^")" - ) - "" - intersection - ); - let rbac_permissions = get_permissions ~__context ~subject_membership in - (* CP-1260: If a subject has no roles assigned, then authentication will fail with an error such as PERMISSION_DENIED.*) - if List.length rbac_permissions < 1 then - let msg = (Printf.sprintf "Subject %s (identifier %s) has no roles in this pool" uname subject_identifier) in - info "%s" msg; - thread_delay_and_raise_error uname msg ~error:Api_errors.rbac_permission_denied - else - - begin (* non-empty intersection: externally-authenticated subject has login rights in the pool *) - let subject = (* return reference for the subject obj in the db *) - (* obs: this obj ref can point to either a user or a group contained in the local subject db list *) - (try - List.find (fun subj -> (* is this the subject ref that returned the non-empty intersection?*) - (List.hd intersection) = (subj,(Db.Subject.get_subject_identifier ~__context ~self:subj)) - ) subjects_in_db (* goes through exactly the same subject list that we went when computing the intersection, *) - (* so that no one is able to undetectably remove/add another subject with the same subject_identifier *) - (* between that time 2.2 and now 2.3 *) - with Not_found -> (* this should never happen, it shows an inconsistency in the db between 2.2 and 2.3 *) - begin - let msg = (Printf.sprintf "Subject %s (identifier %s, from %s) is not present in this pool" uname subject_identifier (Context.get_origin __context)) in - debug "%s" msg; - thread_delay_and_raise_error uname msg - end - ) in - login_no_password_common ~__context ~uname:(Some uname) ~originator ~host:(Helpers.get_localhost ~__context) - ~pool:false ~is_local_superuser:false ~subject:subject ~auth_user_sid:subject_identifier ~auth_user_name:subject_name - ~rbac_permissions ~db_ref:None - end - (* we only reach this point if for some reason a function above forgot to catch a possible exception in the Auth_signature module*) - with - | Not_found - | Auth_signature.Subject_cannot_be_resolved -> - begin - let msg = (Printf.sprintf "user %s from %s not found in external directory" uname (Context.get_origin __context)) in - debug "A function failed to catch this exception for user %s during external authentication: %s" uname msg; - thread_delay_and_raise_error uname msg - end - | Auth_signature.Auth_failure msg - | Auth_signature.Auth_service_error (_,msg) -> - begin - debug "A function failed to catch this exception for user %s from %s during external authentication: %s" uname (Context.get_origin __context) msg; - thread_delay_and_raise_error uname msg - end - | Api_errors.Server_error _ as e -> (* bubble up any api_error already generated *) - begin - raise e - end - | e -> (* generic catch-all for unexpected exceptions during external authentication *) - begin - let msg = (ExnHelper.string_of_exn e) in - debug "(generic) A function failed to catch this exception for user %s from %s during external authentication: %s" uname (Context.get_origin __context) msg; - thread_delay_and_raise_error uname msg - end - end - - end - ) -) + (* !!! Do something with the version number *) + if (Context.preauth ~__context) then + begin + (* in this case, the context origin of this login request is a unix socket bound locally to a filename *) + (* we trust requests from local unix filename sockets, so no need to authenticate them before login *) + login_no_password_common ~__context ~uname:(Some uname) ~originator ~host:(Helpers.get_localhost ~__context) + ~pool:false ~is_local_superuser:true ~subject:(Ref.null) + ~auth_user_sid:"" ~auth_user_name:uname ~rbac_permissions:[] ~db_ref:None + end + else + let () = + if Pool_role.is_slave() then + raise (Api_errors.Server_error (Api_errors.host_is_slave, [Pool_role.get_master_address()])) in + let login_as_local_superuser auth_type = + if (auth_type <> "") && (uname <> local_superuser) + then (* makes local superuser = root only*) + failwith ("Local superuser must be "^local_superuser) + else begin + do_local_auth uname pwd; + debug "Success: local auth, user %s from %s" uname (Context.get_origin __context); + login_no_password_common ~__context ~uname:(Some uname) ~originator ~host:(Helpers.get_localhost ~__context) + ~pool:false ~is_local_superuser:true ~subject:(Ref.null) ~auth_user_sid:"" ~auth_user_name:uname + ~rbac_permissions:[] ~db_ref:None + end + in + let thread_delay_and_raise_error ?(error=Api_errors.session_authentication_failed) uname msg = + let some_seconds = 5.0 in + Thread.delay some_seconds; (* sleep a bit to avoid someone brute-forcing the password *) + if error = Api_errors.session_authentication_failed (*default*) + then raise (Api_errors.Server_error (error,[uname;msg])) + else raise (Api_errors.Server_error (error,["session.login_with_password";msg])) + in + ( match (Db.Host.get_external_auth_type ~__context ~self:(Helpers.get_localhost ~__context)) with + + | "" as auth_type -> (* no external authentication *) + begin + (*debug "External authentication is disabled";*) + (* only attempts to authenticate against the local superuser credentials *) + try + login_as_local_superuser auth_type + with (Failure msg) -> + begin + info "Failed to locally authenticate user %s from %s: %s" uname (Context.get_origin __context) msg; + thread_delay_and_raise_error uname msg + end + end + + | _ as auth_type -> (* external authentication required *) + begin + debug "External authentication %s is enabled" auth_type; + (* 1. first attempts to authenticate against the local superuser *) + try + login_as_local_superuser auth_type + with (Failure msg) -> + begin + try + debug "Failed to locally authenticate user %s from %s: %s" uname (Context.get_origin __context) msg; + + (* 2. then against the external auth service *) + (* 2.1. we first check the external auth service status *) + let rec waiting_event_hook_auth_on_xapi_initialize_succeeded seconds = + if not !Xapi_globs.event_hook_auth_on_xapi_initialize_succeeded then + begin + if seconds <= 0 then + let msg = (Printf.sprintf "External authentication %s service still initializing" auth_type) in + error "%s" msg; + thread_delay_and_raise_error uname msg ~error:Api_errors.session_invalid + else + debug "External authentication %s service initializing..." auth_type; + Thread.delay 1.0; + waiting_event_hook_auth_on_xapi_initialize_succeeded (seconds-1); + end + in + waiting_event_hook_auth_on_xapi_initialize_succeeded 120; + (* 2.2. we then authenticate the usee using the external authentication plugin *) + (* so that we know that he/she exists there *) + let subject_identifier = (try + begin + let _subject_identifier = do_external_auth uname pwd in + debug "Successful external authentication user %s (subject_identifier, %s from %s)" uname _subject_identifier (Context.get_origin __context); + _subject_identifier + end + with (Auth_signature.Auth_failure msg) -> + begin + info "Failed to externally authenticate user %s from %s: %s" uname (Context.get_origin __context) msg; + thread_delay_and_raise_error uname msg + end + ) in + + (* as per tests in CP-827, there should be no need to call is_subject_suspended function here, *) + (* because the authentication server in 2.1 will already reflect if account/password expired, *) + (* disabled, locked-out etc, but since likewise doesn't timely reflect this information *) + (* at the same time for both authentication and subject info queries (modification in the AD *) + (* reflects immediately for AD authentication, but can take 1 hour to reflect on subject info), *) + (* we need to call it here in order to be consistent with the session revalidation function. *) + (* Otherwise, there might be cases where the initial authentication/login succeeds, but *) + (* then a few minutes later the revalidation finds that the user is 'suspended' (due to *) + (* subject info caching problems in likewise) and closes the user's session *) + let (subject_suspended,subject_name) = (try + is_subject_suspended subject_identifier + with (Auth_signature.Auth_service_error (errtag,msg)) -> + begin + debug "Failed to find if user %s (subject_id %s, from %s) is suspended: %s" uname subject_identifier (Context.get_origin __context) msg; + thread_delay_and_raise_error uname msg + end + ) in + if subject_suspended + then begin + let msg = (Printf.sprintf "User %s (subject_id %s, from %s) suspended in external directory" uname subject_identifier (Context.get_origin __context)) in + debug "%s" msg; + thread_delay_and_raise_error uname msg + end + else + + (* 2.2. then, we verify if any elements of the the membership closure of the externally *) + (* authenticated subject_id is inside our local allowed-to-login subjects list *) + (* finds all the groups a user belongs to (non-reflexive closure of member-of relation) *) + let group_membership_closure = + (try + (Ext_auth.d()).query_group_membership subject_identifier; + with + | Not_found | Auth_signature.Subject_cannot_be_resolved -> + begin + let msg = (Printf.sprintf "Failed to obtain the group membership closure for user %s (subject_id %s, from %s): user not found in external directory" uname (Context.get_origin __context) subject_identifier) in + debug "%s" msg; + thread_delay_and_raise_error uname msg + end + | Auth_signature.Auth_service_error (errtag,msg) -> + begin + debug "Failed to obtain the group membership closure for user %s (subject_id %s, from %s): %s" uname subject_identifier (Context.get_origin __context) msg; + thread_delay_and_raise_error uname msg + end + ) in + (* finds the intersection between group_membership_closure and pool's table of subject_ids *) + let subjects_in_db = Db.Subject.get_all ~__context in + let subject_ids_in_db = List.map (fun subj -> (subj,(Db.Subject.get_subject_identifier ~__context ~self:subj))) subjects_in_db in + let reflexive_membership_closure = subject_identifier::group_membership_closure in + (* returns all elements of reflexive_membership_closure that are inside subject_ids_in_db *) + let intersect ext_sids db_sids = List.filter (fun (subj,db_sid) -> List.mem db_sid ext_sids) db_sids in + let intersection = intersect reflexive_membership_closure subject_ids_in_db in + + (* 2.3. finally, we create the session for the authenticated subject if any membership intersection was found *) + let in_intersection = (List.length intersection > 0) in + if not in_intersection then + begin (* empty intersection: externally-authenticated subject has no login rights in the pool *) + let msg = (Printf.sprintf "Subject %s (identifier %s, from %s) has no access rights in this pool" uname subject_identifier (Context.get_origin __context)) in + info "%s" msg; + thread_delay_and_raise_error uname msg + end + else + + (* compute RBAC structures for the session *) + let subject_membership = (List.map (fun (subj_ref,sid) -> subj_ref) intersection) in + debug "subject membership intersection with subject-list=[%s]" + (List.fold_left + (fun i (subj_ref,sid)-> + let subj_ref= + try (* attempt to resolve subject_ref -> subject_name *) + List.assoc + Auth_signature.subject_information_field_subject_name + (Db.Subject.get_other_config ~__context ~self:subj_ref) + with _ -> Ref.string_of subj_ref + in if i="" then subj_ref^" ("^sid^")" + else i^","^subj_ref^" ("^sid^")" + ) + "" + intersection + ); + let rbac_permissions = get_permissions ~__context ~subject_membership in + (* CP-1260: If a subject has no roles assigned, then authentication will fail with an error such as PERMISSION_DENIED.*) + if List.length rbac_permissions < 1 then + let msg = (Printf.sprintf "Subject %s (identifier %s) has no roles in this pool" uname subject_identifier) in + info "%s" msg; + thread_delay_and_raise_error uname msg ~error:Api_errors.rbac_permission_denied + else + + begin (* non-empty intersection: externally-authenticated subject has login rights in the pool *) + let subject = (* return reference for the subject obj in the db *) + (* obs: this obj ref can point to either a user or a group contained in the local subject db list *) + (try + List.find (fun subj -> (* is this the subject ref that returned the non-empty intersection?*) + (List.hd intersection) = (subj,(Db.Subject.get_subject_identifier ~__context ~self:subj)) + ) subjects_in_db (* goes through exactly the same subject list that we went when computing the intersection, *) + (* so that no one is able to undetectably remove/add another subject with the same subject_identifier *) + (* between that time 2.2 and now 2.3 *) + with Not_found -> (* this should never happen, it shows an inconsistency in the db between 2.2 and 2.3 *) + begin + let msg = (Printf.sprintf "Subject %s (identifier %s, from %s) is not present in this pool" uname subject_identifier (Context.get_origin __context)) in + debug "%s" msg; + thread_delay_and_raise_error uname msg + end + ) in + login_no_password_common ~__context ~uname:(Some uname) ~originator ~host:(Helpers.get_localhost ~__context) + ~pool:false ~is_local_superuser:false ~subject:subject ~auth_user_sid:subject_identifier ~auth_user_name:subject_name + ~rbac_permissions ~db_ref:None + end + (* we only reach this point if for some reason a function above forgot to catch a possible exception in the Auth_signature module*) + with + | Not_found + | Auth_signature.Subject_cannot_be_resolved -> + begin + let msg = (Printf.sprintf "user %s from %s not found in external directory" uname (Context.get_origin __context)) in + debug "A function failed to catch this exception for user %s during external authentication: %s" uname msg; + thread_delay_and_raise_error uname msg + end + | Auth_signature.Auth_failure msg + | Auth_signature.Auth_service_error (_,msg) -> + begin + debug "A function failed to catch this exception for user %s from %s during external authentication: %s" uname (Context.get_origin __context) msg; + thread_delay_and_raise_error uname msg + end + | Api_errors.Server_error _ as e -> (* bubble up any api_error already generated *) + begin + raise e + end + | e -> (* generic catch-all for unexpected exceptions during external authentication *) + begin + let msg = (ExnHelper.string_of_exn e) in + debug "(generic) A function failed to catch this exception for user %s from %s during external authentication: %s" uname (Context.get_origin __context) msg; + thread_delay_and_raise_error uname msg + end + end + + end + ) + ) let change_password ~__context ~old_pwd ~new_pwd = wipe_params_after_fn [old_pwd;new_pwd] (fun () -> - let session_id = Context.get_session_id __context in - (*let user = Db.Session.get_this_user ~__context ~self:session_id in - let uname = Db.User.get_short_name ~__context ~self:user in*) - let uname = local_superuser in (* user class has been deprecated *) + let session_id = Context.get_session_id __context in + (*let user = Db.Session.get_this_user ~__context ~self:session_id in + let uname = Db.User.get_short_name ~__context ~self:user in*) + let uname = local_superuser in (* user class has been deprecated *) - if (Db.Session.get_is_local_superuser ~__context ~self:session_id) then - begin (* CP-696: only change password if session has is_local_superuser bit set *) + if (Db.Session.get_is_local_superuser ~__context ~self:session_id) then + begin (* CP-696: only change password if session has is_local_superuser bit set *) (* CA-13567: If you have root priviledges then we do not authenticate old_pwd; right now, since we only ever have root priviledges we just comment this out. @@ -615,109 +615,109 @@ let change_password ~__context ~old_pwd ~new_pwd = wipe_params_after_fn [old_pw raise (Api_errors.Server_error (Api_errors.session_authentication_failed,[uname;msg])) end; *) - try - do_local_change_password uname new_pwd; - info "Password changed successfully for user %s" uname; - info "Syncing password change across hosts in pool"; - (* tell all hosts (except me to sync new passwd file) *) - let hash = Helpers.compute_hash () in - let hosts = Db.Host.get_all ~__context in - let hosts = List.filter (fun hostref -> hostref <> !Xapi_globs.localhost_ref) hosts in - Helpers.call_api_functions ~__context - (fun rpc session_id -> - List.iter - (fun host-> - try - Client.Host.request_config_file_sync rpc session_id host hash - with e -> - error "Failed to sync password to host %s: %s" (Db.Host.get_name_label ~__context ~self:host) (Printexc.to_string e); - ) hosts); - info "Finished syncing password across pool"; - with (Failure msg) -> - error "Failed to change password for user %s: %s" uname msg; - raise (Api_errors.Server_error(Api_errors.change_password_rejected, [ msg ])) - end - else (* CP-696: session does not have is_local_superuser bit set, so we must fail *) - begin - let msg = (Printf.sprintf "Failed to change password for user %s" uname) in - debug "User %s is not local superuser: %s" uname msg; - raise (Api_errors.Server_error (Api_errors.user_is_not_local_superuser,[ msg ])) - end -) + try + do_local_change_password uname new_pwd; + info "Password changed successfully for user %s" uname; + info "Syncing password change across hosts in pool"; + (* tell all hosts (except me to sync new passwd file) *) + let hash = Helpers.compute_hash () in + let hosts = Db.Host.get_all ~__context in + let hosts = List.filter (fun hostref -> hostref <> !Xapi_globs.localhost_ref) hosts in + Helpers.call_api_functions ~__context + (fun rpc session_id -> + List.iter + (fun host-> + try + Client.Host.request_config_file_sync rpc session_id host hash + with e -> + error "Failed to sync password to host %s: %s" (Db.Host.get_name_label ~__context ~self:host) (Printexc.to_string e); + ) hosts); + info "Finished syncing password across pool"; + with (Failure msg) -> + error "Failed to change password for user %s: %s" uname msg; + raise (Api_errors.Server_error(Api_errors.change_password_rejected, [ msg ])) + end + else (* CP-696: session does not have is_local_superuser bit set, so we must fail *) + begin + let msg = (Printf.sprintf "Failed to change password for user %s" uname) in + debug "User %s is not local superuser: %s" uname msg; + raise (Api_errors.Server_error (Api_errors.user_is_not_local_superuser,[ msg ])) + end + ) let logout ~__context = let session_id = Context.get_session_id __context in destroy_db_session ~__context ~self:session_id -let local_logout ~__context = - let session_id = Context.get_session_id __context in - Xapi_local_session.destroy ~__context ~self:session_id +let local_logout ~__context = + let session_id = Context.get_session_id __context in + Xapi_local_session.destroy ~__context ~self:session_id let get_group_subject_identifier_from_session ~__context ~session = - let subj = Db.Session.get_subject ~__context ~self:session in - try - Db.Subject.get_subject_identifier ~__context ~self:subj - with - | Db_exn.DBCache_NotFound ("missing row",_,_) -> - (* expected error: subject was removed from subject list *) - "" - | e -> (* unexpected error *) - debug "error obtaining sid from subject %s from session %s: %s" (Ref.string_of subj) (Ref.string_of session) (ExnHelper.string_of_exn e); - "" - -let get_all_subject_identifiers ~__context = - let all_sessions = Db.Session.get_all ~__context in - let all_extauth_sessions = List.filter (fun session -> - (* an externally-authenticated session is one which is not a local_superuser session *) - not (Db.Session.get_is_local_superuser ~__context ~self:session) - ) all_sessions in - (* we only want to return sids of externally-authenticated sessions *) - let all_auth_user_sids_in_sessions = List.map (fun session -> - Db.Session.get_auth_user_sid ~__context ~self:session - ) all_extauth_sessions in - let all_subject_list_sids_in_sessions = (List.filter (fun e->e<>"") - (List.map (fun session -> - (* TODO: better to look up the membership closure *) - get_group_subject_identifier_from_session ~__context ~session - ) all_extauth_sessions) - ) in - (* avoid returning repeated sids *) - Listext.List.setify (all_auth_user_sids_in_sessions@all_subject_list_sids_in_sessions) - + let subj = Db.Session.get_subject ~__context ~self:session in + try + Db.Subject.get_subject_identifier ~__context ~self:subj + with + | Db_exn.DBCache_NotFound ("missing row",_,_) -> + (* expected error: subject was removed from subject list *) + "" + | e -> (* unexpected error *) + debug "error obtaining sid from subject %s from session %s: %s" (Ref.string_of subj) (Ref.string_of session) (ExnHelper.string_of_exn e); + "" + +let get_all_subject_identifiers ~__context = + let all_sessions = Db.Session.get_all ~__context in + let all_extauth_sessions = List.filter (fun session -> + (* an externally-authenticated session is one which is not a local_superuser session *) + not (Db.Session.get_is_local_superuser ~__context ~self:session) + ) all_sessions in + (* we only want to return sids of externally-authenticated sessions *) + let all_auth_user_sids_in_sessions = List.map (fun session -> + Db.Session.get_auth_user_sid ~__context ~self:session + ) all_extauth_sessions in + let all_subject_list_sids_in_sessions = (List.filter (fun e->e<>"") + (List.map (fun session -> + (* TODO: better to look up the membership closure *) + get_group_subject_identifier_from_session ~__context ~session + ) all_extauth_sessions) + ) in + (* avoid returning repeated sids *) + Listext.List.setify (all_auth_user_sids_in_sessions@all_subject_list_sids_in_sessions) + let logout_subject_identifier ~__context ~subject_identifier= - let all_sessions = Db.Session.get_all ~__context in - let current_session = Context.get_session_id __context in - - (* we filter the sessions to be destroyed *) - let sessions = List.filter (fun s -> - - (* 1. we never allow local_superuser sessions to be forcibly logged out *) - (not (Db.Session.get_is_local_superuser ~__context ~self:s)) - && - (* 2. we remove the session associated with this function call from the list of all sessions to be destroyed *) - (Db.Session.get_uuid ~__context ~self:s) <> (Db.Session.get_uuid ~__context ~self:current_session) - && - (* 3. we only consider those sessions associated with the specific subject_id received as parameter *) - ( - (* 3.1. the sid of the authenticated user *) - (Db.Session.get_auth_user_sid ~__context ~self:s) = subject_identifier - || - (* 3.2. any sids of the group that authenticated the user *) - (* TODO: better to look up the membership closure *) - (get_group_subject_identifier_from_session ~__context ~session:s) = subject_identifier - ) - - ) all_sessions in - debug "This session %s (user=%s subject_identifier=%s) is forcing the logout of these other sessions associated with subject_identifier=%s: trackids=[%s]" - (trackid current_session) - (if Db.Session.get_is_local_superuser ~__context ~self:current_session then local_superuser else "") - (Db.Session.get_auth_user_sid ~__context ~self:current_session) - subject_identifier - (List.fold_right (fun s str -> (trackid s)^","^str) sessions ""); - - (* kill all filtered sessions *) - List.iter (fun s -> destroy_db_session ~__context ~self:s) sessions + let all_sessions = Db.Session.get_all ~__context in + let current_session = Context.get_session_id __context in + + (* we filter the sessions to be destroyed *) + let sessions = List.filter (fun s -> + + (* 1. we never allow local_superuser sessions to be forcibly logged out *) + (not (Db.Session.get_is_local_superuser ~__context ~self:s)) + && + (* 2. we remove the session associated with this function call from the list of all sessions to be destroyed *) + (Db.Session.get_uuid ~__context ~self:s) <> (Db.Session.get_uuid ~__context ~self:current_session) + && + (* 3. we only consider those sessions associated with the specific subject_id received as parameter *) + ( + (* 3.1. the sid of the authenticated user *) + (Db.Session.get_auth_user_sid ~__context ~self:s) = subject_identifier + || + (* 3.2. any sids of the group that authenticated the user *) + (* TODO: better to look up the membership closure *) + (get_group_subject_identifier_from_session ~__context ~session:s) = subject_identifier + ) + + ) all_sessions in + debug "This session %s (user=%s subject_identifier=%s) is forcing the logout of these other sessions associated with subject_identifier=%s: trackids=[%s]" + (trackid current_session) + (if Db.Session.get_is_local_superuser ~__context ~self:current_session then local_superuser else "") + (Db.Session.get_auth_user_sid ~__context ~self:current_session) + subject_identifier + (List.fold_right (fun s str -> (trackid s)^","^str) sessions ""); + + (* kill all filtered sessions *) + List.iter (fun s -> destroy_db_session ~__context ~self:s) sessions (* returns the ancestry chain of session s, starting with s *) @@ -732,7 +732,7 @@ let rec get_ancestry ~__context ~self = ; Ref.null in - self::(get_ancestry ~__context ~self:parent) + self::(get_ancestry ~__context ~self:parent) ) (* returns the original session up the ancestry chain that created s *) @@ -744,20 +744,20 @@ let get_top ~__context ~self = (* This function should only be called from inside XAPI. *) let create_readonly_session ~__context ~uname ~db_ref = - debug "Creating readonly session."; - let role = List.hd (Xapi_role.get_by_name_label ~__context ~label:Datamodel.role_read_only) in - let rbac_permissions = Xapi_role.get_permissions_name_label ~__context ~self:role in - let master = Helpers.get_master ~__context in - login_no_password_common ~__context ~uname:(Some uname) - ~originator:xapi_internal_originator ~host:master ~pool:false - ~is_local_superuser:false ~subject:Ref.null ~auth_user_sid:"readonly-sid" - ~auth_user_name:uname ~rbac_permissions ~db_ref + debug "Creating readonly session."; + let role = List.hd (Xapi_role.get_by_name_label ~__context ~label:Datamodel.role_read_only) in + let rbac_permissions = Xapi_role.get_permissions_name_label ~__context ~self:role in + let master = Helpers.get_master ~__context in + login_no_password_common ~__context ~uname:(Some uname) + ~originator:xapi_internal_originator ~host:master ~pool:false + ~is_local_superuser:false ~subject:Ref.null ~auth_user_sid:"readonly-sid" + ~auth_user_name:uname ~rbac_permissions ~db_ref (* Create a database reference from a DB dump, and register it with a new readonly session. *) let create_from_db_file ~__context ~filename = - let db = - (Db_xml.From.file (Datamodel_schema.of_datamodel ()) filename) - |> Db_upgrade.generic_database_upgrade - in - let db_ref = Some (Db_ref.in_memory (ref (ref db))) in - create_readonly_session ~__context ~uname:"db-from-file" ~db_ref + let db = + (Db_xml.From.file (Datamodel_schema.of_datamodel ()) filename) + |> Db_upgrade.generic_database_upgrade + in + let db_ref = Some (Db_ref.in_memory (ref (ref db))) in + create_readonly_session ~__context ~uname:"db-from-file" ~db_ref diff --git a/ocaml/xapi/xapi_sm.ml b/ocaml/xapi/xapi_sm.ml index bb6a14e6c9e..caf4f81e45a 100644 --- a/ocaml/xapi/xapi_sm.ml +++ b/ocaml/xapi/xapi_sm.ml @@ -13,7 +13,7 @@ *) (** * @group Storage - *) +*) (* The SMAPIv1 plugins are a static set in the filesystem. The SMAPIv2 plugins are a dynamic set hosted in driver domains. *) @@ -28,108 +28,108 @@ open Fun lexicographic ordering. *) type version = int list let version_of_string = List.map int_of_string ++ (String.split '.') - + module D=Debug.Make(struct let name="xapi" end) open D let create_from_query_result ~__context q = - let r = Ref.make () and u = Uuid.string_of_uuid (Uuid.make_uuid ()) in - let open Storage_interface in - if (String.lowercase q.driver) <> "storage_access" - then begin - let features = Smint.parse_string_int64_features q.features in - let capabilities = List.map fst features in - info "Registering SM plugin %s (version %s)" (String.lowercase q.driver) q.version; - Db.SM.create ~__context ~ref:r ~uuid:u ~_type:(String.lowercase q.driver) - ~name_label:q.name - ~name_description:q.description - ~vendor:q.vendor - ~copyright:q.copyright - ~version:q.version - ~required_api_version:q.required_api_version - ~capabilities - ~features - ~configuration:q.configuration - ~other_config:[] - ~driver_filename:(Sm_exec.cmd_name q.driver) - ~required_cluster_stack:q.required_cluster_stack - end + let r = Ref.make () and u = Uuid.string_of_uuid (Uuid.make_uuid ()) in + let open Storage_interface in + if (String.lowercase q.driver) <> "storage_access" + then begin + let features = Smint.parse_string_int64_features q.features in + let capabilities = List.map fst features in + info "Registering SM plugin %s (version %s)" (String.lowercase q.driver) q.version; + Db.SM.create ~__context ~ref:r ~uuid:u ~_type:(String.lowercase q.driver) + ~name_label:q.name + ~name_description:q.description + ~vendor:q.vendor + ~copyright:q.copyright + ~version:q.version + ~required_api_version:q.required_api_version + ~capabilities + ~features + ~configuration:q.configuration + ~other_config:[] + ~driver_filename:(Sm_exec.cmd_name q.driver) + ~required_cluster_stack:q.required_cluster_stack + end let update_from_query_result ~__context (self, r) query_result = - let open Storage_interface in - let _type = String.lowercase query_result.driver in - if _type <> "storage_access" - then begin - let driver_filename = Sm_exec.cmd_name query_result.driver in - let features = Smint.parse_string_int64_features query_result.features in - let capabilities = List.map fst features in - info "Registering SM plugin %s (version %s)" (String.lowercase query_result.driver) query_result.version; - if r.API.sM_type <> _type - then Db.SM.set_type ~__context ~self ~value:_type; - if r.API.sM_name_label <> query_result.name - then Db.SM.set_name_label ~__context ~self ~value:query_result.name; - if r.API.sM_name_description <> query_result.description - then Db.SM.set_name_description ~__context ~self ~value:query_result.description; - if r.API.sM_vendor <> query_result.vendor - then Db.SM.set_vendor ~__context ~self ~value:query_result.vendor; - if r.API.sM_copyright <> query_result.copyright - then Db.SM.set_copyright ~__context ~self ~value:query_result.copyright; - if r.API.sM_required_api_version <> query_result.required_api_version - then Db.SM.set_required_api_version ~__context ~self ~value:query_result.required_api_version; - if (r.API.sM_capabilities <> capabilities || r.API.sM_features <> features) - then begin - Db.SM.set_capabilities ~__context ~self ~value:capabilities; - Db.SM.set_features ~__context ~self ~value:features; - end; - if r.API.sM_configuration <> query_result.configuration - then Db.SM.set_configuration ~__context ~self ~value:query_result.configuration; - if r.API.sM_driver_filename <> driver_filename - then Db.SM.set_driver_filename ~__context ~self ~value:driver_filename - end + let open Storage_interface in + let _type = String.lowercase query_result.driver in + if _type <> "storage_access" + then begin + let driver_filename = Sm_exec.cmd_name query_result.driver in + let features = Smint.parse_string_int64_features query_result.features in + let capabilities = List.map fst features in + info "Registering SM plugin %s (version %s)" (String.lowercase query_result.driver) query_result.version; + if r.API.sM_type <> _type + then Db.SM.set_type ~__context ~self ~value:_type; + if r.API.sM_name_label <> query_result.name + then Db.SM.set_name_label ~__context ~self ~value:query_result.name; + if r.API.sM_name_description <> query_result.description + then Db.SM.set_name_description ~__context ~self ~value:query_result.description; + if r.API.sM_vendor <> query_result.vendor + then Db.SM.set_vendor ~__context ~self ~value:query_result.vendor; + if r.API.sM_copyright <> query_result.copyright + then Db.SM.set_copyright ~__context ~self ~value:query_result.copyright; + if r.API.sM_required_api_version <> query_result.required_api_version + then Db.SM.set_required_api_version ~__context ~self ~value:query_result.required_api_version; + if (r.API.sM_capabilities <> capabilities || r.API.sM_features <> features) + then begin + Db.SM.set_capabilities ~__context ~self ~value:capabilities; + Db.SM.set_features ~__context ~self ~value:features; + end; + if r.API.sM_configuration <> query_result.configuration + then Db.SM.set_configuration ~__context ~self ~value:query_result.configuration; + if r.API.sM_driver_filename <> driver_filename + then Db.SM.set_driver_filename ~__context ~self ~value:driver_filename + end let is_v1 x = version_of_string x < [ 2; 0 ] let _serialize_reg = - let lock = Mutex.create () in - let holder = ref None in - begin fun f -> - match !holder with - | Some t when t = Thread.self () -> - (* inside a nested layer where the lock is held by myself *) - f () - | _ -> - Mutex.execute lock begin fun () -> - holder := Some (Thread.self ()); - Pervasiveext.finally f (fun () -> holder := None) - end - end + let lock = Mutex.create () in + let holder = ref None in + begin fun f -> + match !holder with + | Some t when t = Thread.self () -> + (* inside a nested layer where the lock is held by myself *) + f () + | _ -> + Mutex.execute lock begin fun () -> + holder := Some (Thread.self ()); + Pervasiveext.finally f (fun () -> holder := None) + end + end let unregister_plugin ~__context query_result = - _serialize_reg begin fun () -> - let open Storage_interface in - let driver = String.lowercase query_result.driver in - if is_v1 query_result.required_api_version then begin - info "Not unregistering SM plugin %s (required_api_version %s < 2.0)" driver query_result.required_api_version; - end else - List.iter - (fun (rf, rc) -> - if rc.API.sM_type = driver then - try - info "Unregistering SM plugin %s (version %s)" driver query_result.version; - Db.SM.destroy ~__context ~self:rf - with e -> - warn "Ignore unregistering SM plugin failure: %s" (Printexc.to_string e)) - (Db.SM.get_all_records ~__context) - end + _serialize_reg begin fun () -> + let open Storage_interface in + let driver = String.lowercase query_result.driver in + if is_v1 query_result.required_api_version then begin + info "Not unregistering SM plugin %s (required_api_version %s < 2.0)" driver query_result.required_api_version; + end else + List.iter + (fun (rf, rc) -> + if rc.API.sM_type = driver then + try + info "Unregistering SM plugin %s (version %s)" driver query_result.version; + Db.SM.destroy ~__context ~self:rf + with e -> + warn "Ignore unregistering SM plugin failure: %s" (Printexc.to_string e)) + (Db.SM.get_all_records ~__context) + end let register_plugin ~__context query_result = - _serialize_reg begin fun () -> - let open Storage_interface in - let driver = String.lowercase query_result.driver in - if is_v1 query_result.required_api_version then begin - info "Not registering SM plugin %s (required_api_version %s < 2.0)" driver query_result.required_api_version; - end else begin - unregister_plugin ~__context query_result; - create_from_query_result ~__context query_result - end - end + _serialize_reg begin fun () -> + let open Storage_interface in + let driver = String.lowercase query_result.driver in + if is_v1 query_result.required_api_version then begin + info "Not registering SM plugin %s (required_api_version %s < 2.0)" driver query_result.required_api_version; + end else begin + unregister_plugin ~__context query_result; + create_from_query_result ~__context query_result + end + end diff --git a/ocaml/xapi/xapi_sr.ml b/ocaml/xapi/xapi_sr.ml index edfdf6b3981..021cbc4d34f 100644 --- a/ocaml/xapi/xapi_sr.ml +++ b/ocaml/xapi/xapi_sr.ml @@ -13,7 +13,7 @@ *) (** Module that defines API functions for SR objects * @group XenAPI functions - *) +*) module Rrdd = Rrd_client.Client open Printf @@ -39,161 +39,161 @@ let scans_in_progress_m = Mutex.create () let scans_in_progress_c = Condition.create () let i_should_scan_sr sr = - Mutex.execute scans_in_progress_m - (fun () -> - if Hashtbl.mem scans_in_progress sr - then false (* someone else already is *) - else (Hashtbl.replace scans_in_progress sr true; true)) + Mutex.execute scans_in_progress_m + (fun () -> + if Hashtbl.mem scans_in_progress sr + then false (* someone else already is *) + else (Hashtbl.replace scans_in_progress sr true; true)) let scan_finished sr = - Mutex.execute scans_in_progress_m - (fun () -> - Hashtbl.remove scans_in_progress sr; - Condition.broadcast scans_in_progress_c) + Mutex.execute scans_in_progress_m + (fun () -> + Hashtbl.remove scans_in_progress sr; + Condition.broadcast scans_in_progress_c) (* Perform a single scan of an SR in a background thread. Limit to one thread per SR *) (* If a callback is supplied, call it once the scan is complete. *) let scan_one ~__context ?callback sr = - let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in - if i_should_scan_sr sr - then - ignore(Thread.create - (fun () -> - Server_helpers.exec_with_subtask ~__context "scan one" (fun ~__context -> - finally - (fun () -> - try - Helpers.call_api_functions ~__context - (fun rpc session_id -> - Helpers.log_exn_continue (Printf.sprintf "scanning SR %s" (Ref.string_of sr)) - (fun sr -> - Client.SR.scan rpc session_id sr) sr) - with e -> - error "Caught exception attempting an SR.scan: %s" (ExnHelper.string_of_exn e) - ) - (fun () -> - scan_finished sr; - debug "Scan of SR %s complete." sr_uuid; - Opt.iter (fun f -> - debug "Starting callback for SR %s." sr_uuid; - f (); - debug "Callback for SR %s finished." sr_uuid) callback) - )) ()) - else - (* If a callback was supplied but a scan is already in progress, call the callback once the scan is complete. *) - Opt.iter (fun f -> - ignore (Thread.create - (fun () -> - debug "Tried to scan SR %s but scan already in progress - waiting for scan to complete." sr_uuid; - Mutex.execute scans_in_progress_m (fun () -> - while Hashtbl.mem scans_in_progress sr do - Condition.wait scans_in_progress_c scans_in_progress_m; - done); - debug "Got signal that scan of SR %s is complete - starting callback." sr_uuid; - f (); - debug "Callback for SR %s finished." sr_uuid) - () - )) callback + let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in + if i_should_scan_sr sr + then + ignore(Thread.create + (fun () -> + Server_helpers.exec_with_subtask ~__context "scan one" (fun ~__context -> + finally + (fun () -> + try + Helpers.call_api_functions ~__context + (fun rpc session_id -> + Helpers.log_exn_continue (Printf.sprintf "scanning SR %s" (Ref.string_of sr)) + (fun sr -> + Client.SR.scan rpc session_id sr) sr) + with e -> + error "Caught exception attempting an SR.scan: %s" (ExnHelper.string_of_exn e) + ) + (fun () -> + scan_finished sr; + debug "Scan of SR %s complete." sr_uuid; + Opt.iter (fun f -> + debug "Starting callback for SR %s." sr_uuid; + f (); + debug "Callback for SR %s finished." sr_uuid) callback) + )) ()) + else + (* If a callback was supplied but a scan is already in progress, call the callback once the scan is complete. *) + Opt.iter (fun f -> + ignore (Thread.create + (fun () -> + debug "Tried to scan SR %s but scan already in progress - waiting for scan to complete." sr_uuid; + Mutex.execute scans_in_progress_m (fun () -> + while Hashtbl.mem scans_in_progress sr do + Condition.wait scans_in_progress_c scans_in_progress_m; + done); + debug "Got signal that scan of SR %s is complete - starting callback." sr_uuid; + f (); + debug "Callback for SR %s finished." sr_uuid) + () + )) callback let scan_all ~__context = - let srs = Helpers.get_all_plugged_srs ~__context in - (* only scan those with the dirty/auto_scan key set *) - let scannable_srs = - List.filter (fun sr -> - let oc = Db.SR.get_other_config ~__context ~self:sr in - (List.mem_assoc Xapi_globs.auto_scan oc && (List.assoc Xapi_globs.auto_scan oc = "true")) - || (List.mem_assoc "dirty" oc)) srs in - if List.length scannable_srs > 0 then - debug "Automatically scanning SRs = [ %s ]" (String.concat ";" (List.map Ref.string_of scannable_srs)); - List.iter (scan_one ~__context) scannable_srs + let srs = Helpers.get_all_plugged_srs ~__context in + (* only scan those with the dirty/auto_scan key set *) + let scannable_srs = + List.filter (fun sr -> + let oc = Db.SR.get_other_config ~__context ~self:sr in + (List.mem_assoc Xapi_globs.auto_scan oc && (List.assoc Xapi_globs.auto_scan oc = "true")) + || (List.mem_assoc "dirty" oc)) srs in + if List.length scannable_srs > 0 then + debug "Automatically scanning SRs = [ %s ]" (String.concat ";" (List.map Ref.string_of scannable_srs)); + List.iter (scan_one ~__context) scannable_srs let scanning_thread () = Debug.with_thread_named "scanning_thread" (fun () -> - Server_helpers.exec_with_new_task "SR scanner" (fun __context -> - let host = Helpers.get_localhost ~__context in - - let get_delay () = - try - let oc = Db.Host.get_other_config ~__context ~self:host in - float_of_string (List.assoc Xapi_globs.auto_scan_interval oc) - with _ -> 30. - in - - while true do - Thread.delay (get_delay ()); - try scan_all ~__context - with e -> debug "Exception in SR scanning thread: %s" (Printexc.to_string e) - done) - ) () + Server_helpers.exec_with_new_task "SR scanner" (fun __context -> + let host = Helpers.get_localhost ~__context in + + let get_delay () = + try + let oc = Db.Host.get_other_config ~__context ~self:host in + float_of_string (List.assoc Xapi_globs.auto_scan_interval oc) + with _ -> 30. + in + + while true do + Thread.delay (get_delay ()); + try scan_all ~__context + with e -> debug "Exception in SR scanning thread: %s" (Printexc.to_string e) + done) + ) () (* introduce, creates a record for the SR in the database. It has no other side effect *) let introduce ~__context ~uuid ~name_label - ~name_description ~_type ~content_type ~shared ~sm_config = - let _type = String.lowercase _type in - let uuid = if uuid="" then Uuid.to_string (Uuid.make_uuid()) else uuid in (* fill in uuid if none specified *) - let sr_ref = Ref.make () in - (* Create SR record in DB *) - try - Db.SR.create ~__context ~ref:sr_ref ~uuid - ~name_label ~name_description - ~allowed_operations:[] ~current_operations:[] - ~virtual_allocation:0L - ~physical_utilisation: (-1L) - ~physical_size: (-1L) - ~content_type - ~_type ~shared ~other_config:[] ~default_vdi_visibility:true - ~sm_config ~blobs:[] ~tags:[] ~local_cache_enabled:false - ~introduced_by:Ref.null - ~clustered:false - ~is_tools_sr:false; - - Xapi_sr_operations.update_allowed_operations ~__context ~self:sr_ref; - (* Return ref of newly created sr *) - sr_ref - with Db_exn.Uniqueness_constraint_violation("SR", "uuid", _) -> - raise (Api_errors.Server_error (Api_errors.sr_uuid_exists, [uuid])) + ~name_description ~_type ~content_type ~shared ~sm_config = + let _type = String.lowercase _type in + let uuid = if uuid="" then Uuid.to_string (Uuid.make_uuid()) else uuid in (* fill in uuid if none specified *) + let sr_ref = Ref.make () in + (* Create SR record in DB *) + try + Db.SR.create ~__context ~ref:sr_ref ~uuid + ~name_label ~name_description + ~allowed_operations:[] ~current_operations:[] + ~virtual_allocation:0L + ~physical_utilisation: (-1L) + ~physical_size: (-1L) + ~content_type + ~_type ~shared ~other_config:[] ~default_vdi_visibility:true + ~sm_config ~blobs:[] ~tags:[] ~local_cache_enabled:false + ~introduced_by:Ref.null + ~clustered:false + ~is_tools_sr:false; + + Xapi_sr_operations.update_allowed_operations ~__context ~self:sr_ref; + (* Return ref of newly created sr *) + sr_ref + with Db_exn.Uniqueness_constraint_violation("SR", "uuid", _) -> + raise (Api_errors.Server_error (Api_errors.sr_uuid_exists, [uuid])) let make ~__context ~host ~device_config ~physical_size ~name_label ~name_description ~_type ~content_type ~sm_config = - raise (Api_errors.Server_error (Api_errors.message_deprecated, [])) + raise (Api_errors.Server_error (Api_errors.message_deprecated, [])) let get_pbds ~__context ~self ~attached ~master_pos = - let master = Helpers.get_master ~__context in - let master_condition = Eq (Field "host", Literal (Ref.string_of master)) in - let sr_condition = Eq (Field "SR", Literal (Ref.string_of self)) in - let plugged_condition = Eq (Field "currently_attached", Literal (string_of_bool attached)) in - let all = List.fold_left (fun acc p -> And (acc, p)) True in - let master_pbds = Db.PBD.get_refs_where ~__context - ~expr:(all [master_condition; sr_condition; plugged_condition]) in - let slave_pbds = Db.PBD.get_refs_where ~__context - ~expr:(all [Not master_condition; sr_condition; plugged_condition]) in - match master_pos with - | `First -> master_pbds @ slave_pbds - | `Last -> slave_pbds @ master_pbds + let master = Helpers.get_master ~__context in + let master_condition = Eq (Field "host", Literal (Ref.string_of master)) in + let sr_condition = Eq (Field "SR", Literal (Ref.string_of self)) in + let plugged_condition = Eq (Field "currently_attached", Literal (string_of_bool attached)) in + let all = List.fold_left (fun acc p -> And (acc, p)) True in + let master_pbds = Db.PBD.get_refs_where ~__context + ~expr:(all [master_condition; sr_condition; plugged_condition]) in + let slave_pbds = Db.PBD.get_refs_where ~__context + ~expr:(all [Not master_condition; sr_condition; plugged_condition]) in + match master_pos with + | `First -> master_pbds @ slave_pbds + | `Last -> slave_pbds @ master_pbds let probe ~__context ~host ~device_config ~_type ~sm_config = - debug "SR.probe sm_config=[ %s ]" (String.concat "; " (List.map (fun (k, v) -> k ^ " = " ^ v) sm_config)); - let _type = String.lowercase _type in - let open Storage_interface in - let open Storage_access in - - let queue = !Storage_interface.queue_name ^ "." ^ _type in - let uri () = Storage_interface.uri () ^ ".d/" ^ _type in - let rpc = external_rpc queue uri in - let module Client = Storage_interface.Client(struct let rpc = rpc end) in - let dbg = Context.string_of_task __context in - - transform_storage_exn - (fun () -> - match Client.SR.probe ~dbg ~queue ~device_config ~sm_config with - | Raw x -> x - | Probe _ as x -> Xmlrpc.to_string (rpc_of_probe_result x) - ) + debug "SR.probe sm_config=[ %s ]" (String.concat "; " (List.map (fun (k, v) -> k ^ " = " ^ v) sm_config)); + let _type = String.lowercase _type in + let open Storage_interface in + let open Storage_access in + + let queue = !Storage_interface.queue_name ^ "." ^ _type in + let uri () = Storage_interface.uri () ^ ".d/" ^ _type in + let rpc = external_rpc queue uri in + let module Client = Storage_interface.Client(struct let rpc = rpc end) in + let dbg = Context.string_of_task __context in + + transform_storage_exn + (fun () -> + match Client.SR.probe ~dbg ~queue ~device_config ~sm_config with + | Raw x -> x + | Probe _ as x -> Xmlrpc.to_string (rpc_of_probe_result x) + ) (* Create actually makes the SR on disk, and introduces it into db, and creates PDB record for current host *) let create ~__context ~host ~device_config ~(physical_size:int64) ~name_label ~name_description - ~_type ~content_type ~shared ~sm_config = - Helpers.assert_rolling_upgrade_not_in_progress ~__context ; - debug "SR.create name_label=%s sm_config=[ %s ]" name_label (String.concat "; " (List.map (fun (k, v) -> k ^ " = " ^ v) sm_config)); - (* This breaks the udev SR which doesn't support sr_probe *) + ~_type ~content_type ~shared ~sm_config = + Helpers.assert_rolling_upgrade_not_in_progress ~__context ; + debug "SR.create name_label=%s sm_config=[ %s ]" name_label (String.concat "; " (List.map (fun (k, v) -> k ^ " = " ^ v) sm_config)); + (* This breaks the udev SR which doesn't support sr_probe *) (* let probe_result = probe ~__context ~host ~device_config ~_type ~sm_config in begin @@ -210,159 +210,159 @@ let create ~__context ~host ~device_config ~(physical_size:int64) ~name_label ~ | _ -> () end; *) - let sr_uuid = Uuid.make_uuid() in - let sr_uuid_str = Uuid.to_string sr_uuid in - (* Create the SR in the database before creating on disk, so the backends can read the sm_config field. If an error happens here - we have to clean up the record.*) - let sr_ref = - introduce ~__context ~uuid:sr_uuid_str ~name_label - ~name_description ~_type ~content_type ~shared ~sm_config - in - - let pbds = - if shared then - let create_on_host host = - Xapi_pbd.create ~__context ~sR:sr_ref ~device_config ~host ~other_config:[] - in - let master = Helpers.get_master ~__context in - let hosts = master :: (List.filter (fun x -> x <> master) (Db.Host.get_all ~__context)) in - List.map create_on_host hosts - else - [Xapi_pbd.create_thishost ~__context ~sR:sr_ref ~device_config ~currently_attached:false ] - in - begin - try - Storage_access.create_sr ~__context ~sr:sr_ref ~name_label ~name_description ~physical_size - with e -> - Db.SR.destroy ~__context ~self:sr_ref; - List.iter (fun pbd -> Db.PBD.destroy ~__context ~self:pbd) pbds; - raise e - end; - Helpers.call_api_functions ~__context - (fun rpc session_id -> - List.iter - (fun self -> - try - Client.PBD.plug ~rpc ~session_id ~self - with e -> warn "Could not plug PBD '%s': %s" (Db.PBD.get_uuid ~__context ~self) (Printexc.to_string e)) - pbds); - sr_ref + let sr_uuid = Uuid.make_uuid() in + let sr_uuid_str = Uuid.to_string sr_uuid in + (* Create the SR in the database before creating on disk, so the backends can read the sm_config field. If an error happens here + we have to clean up the record.*) + let sr_ref = + introduce ~__context ~uuid:sr_uuid_str ~name_label + ~name_description ~_type ~content_type ~shared ~sm_config + in + + let pbds = + if shared then + let create_on_host host = + Xapi_pbd.create ~__context ~sR:sr_ref ~device_config ~host ~other_config:[] + in + let master = Helpers.get_master ~__context in + let hosts = master :: (List.filter (fun x -> x <> master) (Db.Host.get_all ~__context)) in + List.map create_on_host hosts + else + [Xapi_pbd.create_thishost ~__context ~sR:sr_ref ~device_config ~currently_attached:false ] + in + begin + try + Storage_access.create_sr ~__context ~sr:sr_ref ~name_label ~name_description ~physical_size + with e -> + Db.SR.destroy ~__context ~self:sr_ref; + List.iter (fun pbd -> Db.PBD.destroy ~__context ~self:pbd) pbds; + raise e + end; + Helpers.call_api_functions ~__context + (fun rpc session_id -> + List.iter + (fun self -> + try + Client.PBD.plug ~rpc ~session_id ~self + with e -> warn "Could not plug PBD '%s': %s" (Db.PBD.get_uuid ~__context ~self) (Printexc.to_string e)) + pbds); + sr_ref let check_no_pbds_attached ~__context ~sr = - let all_pbds_attached_to_this_sr = - Db.PBD.get_records_where ~__context ~expr:(And(Eq(Field "SR", Literal (Ref.string_of sr)), Eq(Field "currently_attached", Literal "true"))) in - if List.length all_pbds_attached_to_this_sr > 0 - then raise (Api_errors.Server_error(Api_errors.sr_has_pbd, [ Ref.string_of sr ])) + let all_pbds_attached_to_this_sr = + Db.PBD.get_records_where ~__context ~expr:(And(Eq(Field "SR", Literal (Ref.string_of sr)), Eq(Field "currently_attached", Literal "true"))) in + if List.length all_pbds_attached_to_this_sr > 0 + then raise (Api_errors.Server_error(Api_errors.sr_has_pbd, [ Ref.string_of sr ])) let find_or_create_rrd_vdi ~__context ~sr = - let open Db_filter_types in - match Db.VDI.get_refs_where ~__context ~expr:(And ( - Eq (Field "SR", Literal (Ref.string_of sr)), - Eq (Field "type", Literal "rrd") - )) - with - | [] -> begin - let virtual_size = Int64.of_int Xapi_vdi_helpers.VDI_CStruct.vdi_size in - let vdi = Helpers.call_api_functions ~__context (fun rpc session_id -> Client.VDI.create ~rpc ~session_id - ~name_label:"SR-stats VDI" ~name_description:"Disk stores SR-level RRDs" ~sR:sr ~virtual_size - ~_type:`rrd ~sharable:false ~read_only:false ~other_config:[] ~xenstore_data:[] ~sm_config:[] ~tags:[]) - in - debug "New SR-stats VDI created vdi=%s on sr=%s" (Ref.string_of vdi) (Ref.string_of sr); - vdi - end - | vdi :: _ -> - debug "Found existing SR-stats VDI vdi=%s on sr=%s" (Ref.string_of vdi) (Ref.string_of sr); - vdi + let open Db_filter_types in + match Db.VDI.get_refs_where ~__context ~expr:(And ( + Eq (Field "SR", Literal (Ref.string_of sr)), + Eq (Field "type", Literal "rrd") + )) + with + | [] -> begin + let virtual_size = Int64.of_int Xapi_vdi_helpers.VDI_CStruct.vdi_size in + let vdi = Helpers.call_api_functions ~__context (fun rpc session_id -> Client.VDI.create ~rpc ~session_id + ~name_label:"SR-stats VDI" ~name_description:"Disk stores SR-level RRDs" ~sR:sr ~virtual_size + ~_type:`rrd ~sharable:false ~read_only:false ~other_config:[] ~xenstore_data:[] ~sm_config:[] ~tags:[]) + in + debug "New SR-stats VDI created vdi=%s on sr=%s" (Ref.string_of vdi) (Ref.string_of sr); + vdi + end + | vdi :: _ -> + debug "Found existing SR-stats VDI vdi=%s on sr=%s" (Ref.string_of vdi) (Ref.string_of sr); + vdi let should_manage_stats ~__context sr = - let sr_record = Db.SR.get_record_internal ~__context ~self:sr in - let sr_features = Xapi_sr_operations.features_of_sr ~__context sr_record in - Smint.(has_capability Sr_stats sr_features) - && Helpers.i_am_srmaster ~__context ~sr + let sr_record = Db.SR.get_record_internal ~__context ~self:sr in + let sr_features = Xapi_sr_operations.features_of_sr ~__context sr_record in + Smint.(has_capability Sr_stats sr_features) + && Helpers.i_am_srmaster ~__context ~sr let maybe_push_sr_rrds ~__context ~sr = - if should_manage_stats ~__context sr then - let vdi = find_or_create_rrd_vdi ~__context ~sr in - match Xapi_vdi_helpers.read_raw ~__context ~vdi with - | None -> debug "Stats VDI has no SR RRDs" - | Some x -> - let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in - let tmp_path = Filename.temp_file "push_sr_rrds" ".gz" in - finally (fun () -> - Unixext.write_string_to_file tmp_path x; - Rrdd.push_sr_rrd ~sr_uuid ~path:tmp_path - ) (fun () -> Unixext.unlink_safe tmp_path) + if should_manage_stats ~__context sr then + let vdi = find_or_create_rrd_vdi ~__context ~sr in + match Xapi_vdi_helpers.read_raw ~__context ~vdi with + | None -> debug "Stats VDI has no SR RRDs" + | Some x -> + let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in + let tmp_path = Filename.temp_file "push_sr_rrds" ".gz" in + finally (fun () -> + Unixext.write_string_to_file tmp_path x; + Rrdd.push_sr_rrd ~sr_uuid ~path:tmp_path + ) (fun () -> Unixext.unlink_safe tmp_path) let maybe_copy_sr_rrds ~__context ~sr = - if should_manage_stats ~__context sr then - let vdi = find_or_create_rrd_vdi ~__context ~sr in - let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in - try - let archive_path = Rrdd.archive_sr_rrd ~sr_uuid in - let contents = Unixext.string_of_file archive_path in - Xapi_vdi_helpers.write_raw ~__context ~vdi ~text:contents - with Rrd_interface.Archive_failed(msg) -> - warn "Archiving of SR RRDs to stats VDI failed: %s" msg + if should_manage_stats ~__context sr then + let vdi = find_or_create_rrd_vdi ~__context ~sr in + let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in + try + let archive_path = Rrdd.archive_sr_rrd ~sr_uuid in + let contents = Unixext.string_of_file archive_path in + Xapi_vdi_helpers.write_raw ~__context ~vdi ~text:contents + with Rrd_interface.Archive_failed(msg) -> + warn "Archiving of SR RRDs to stats VDI failed: %s" msg (* Remove SR record from database without attempting to remove SR from disk. Fail if any PBD still is attached (plugged); force the user to unplug it first. *) let forget ~__context ~sr = - (* NB we fail if ANY host is connected to this SR *) - check_no_pbds_attached ~__context ~sr; - List.iter (fun self -> Xapi_pbd.destroy ~__context ~self) (Db.SR.get_PBDs ~__context ~self:sr); - let vdis = Db.VDI.get_refs_where ~__context ~expr:(Eq(Field "SR", Literal (Ref.string_of sr))) in - List.iter (fun vdi -> Db.VDI.destroy ~__context ~self:vdi) vdis; - Db.SR.destroy ~__context ~self:sr + (* NB we fail if ANY host is connected to this SR *) + check_no_pbds_attached ~__context ~sr; + List.iter (fun self -> Xapi_pbd.destroy ~__context ~self) (Db.SR.get_PBDs ~__context ~self:sr); + let vdis = Db.VDI.get_refs_where ~__context ~expr:(Eq(Field "SR", Literal (Ref.string_of sr))) in + List.iter (fun vdi -> Db.VDI.destroy ~__context ~self:vdi) vdis; + Db.SR.destroy ~__context ~self:sr (** Remove SR from disk and remove SR record from database. (This operation uses the SR's associated - PBD record on current host to determine device_config reqd by sr backend) *) + PBD record on current host to determine device_config reqd by sr backend) *) let destroy ~__context ~sr = - check_no_pbds_attached ~__context ~sr; - let pbds = Db.SR.get_PBDs ~__context ~self:sr in - - (* raise exception if the 'indestructible' flag is set in other_config *) - let oc = Db.SR.get_other_config ~__context ~self:sr in - if (List.mem_assoc "indestructible" oc) && (List.assoc "indestructible" oc = "true") then - raise (Api_errors.Server_error(Api_errors.sr_indestructible, [ Ref.string_of sr ])); - - (* raise exception if SR is being used as local_cache_sr *) - let all_hosts = Db.Host.get_all ~__context in - List.iter - (fun host -> - let local_cache_sr = Db.Host.get_local_cache_sr ~__context ~self:host in - if local_cache_sr = sr then - raise (Api_errors.Server_error(Api_errors.sr_is_cache_sr, [ Ref.string_of host ])); - ) all_hosts; - - let vdis_to_destroy = - if should_manage_stats ~__context sr then [find_or_create_rrd_vdi ~__context ~sr] - else [] in - - Storage_access.destroy_sr ~__context ~sr ~and_vdis:vdis_to_destroy; - - (* The sr_delete may have deleted some VDI records *) - let vdis = Db.SR.get_VDIs ~__context ~self:sr in - let sm_cfg = Db.SR.get_sm_config ~__context ~self:sr in - - Xapi_secret.clean_out_passwds ~__context sm_cfg; - List.iter (fun self -> Xapi_pbd.destroy ~__context ~self) pbds; - List.iter (fun vdi -> Db.VDI.destroy ~__context ~self:vdi) vdis; - Db.SR.destroy ~__context ~self:sr + check_no_pbds_attached ~__context ~sr; + let pbds = Db.SR.get_PBDs ~__context ~self:sr in + + (* raise exception if the 'indestructible' flag is set in other_config *) + let oc = Db.SR.get_other_config ~__context ~self:sr in + if (List.mem_assoc "indestructible" oc) && (List.assoc "indestructible" oc = "true") then + raise (Api_errors.Server_error(Api_errors.sr_indestructible, [ Ref.string_of sr ])); + + (* raise exception if SR is being used as local_cache_sr *) + let all_hosts = Db.Host.get_all ~__context in + List.iter + (fun host -> + let local_cache_sr = Db.Host.get_local_cache_sr ~__context ~self:host in + if local_cache_sr = sr then + raise (Api_errors.Server_error(Api_errors.sr_is_cache_sr, [ Ref.string_of host ])); + ) all_hosts; + + let vdis_to_destroy = + if should_manage_stats ~__context sr then [find_or_create_rrd_vdi ~__context ~sr] + else [] in + + Storage_access.destroy_sr ~__context ~sr ~and_vdis:vdis_to_destroy; + + (* The sr_delete may have deleted some VDI records *) + let vdis = Db.SR.get_VDIs ~__context ~self:sr in + let sm_cfg = Db.SR.get_sm_config ~__context ~self:sr in + + Xapi_secret.clean_out_passwds ~__context sm_cfg; + List.iter (fun self -> Xapi_pbd.destroy ~__context ~self) pbds; + List.iter (fun vdi -> Db.VDI.destroy ~__context ~self:vdi) vdis; + Db.SR.destroy ~__context ~self:sr let update ~__context ~sr = - let open Storage_access in - let task = Context.get_task_id __context in - let open Storage_interface in - let module C = Client(struct let rpc = rpc end) in - transform_storage_exn - (fun () -> - let sr' = Db.SR.get_uuid ~__context ~self:sr in - let sr_info = C.SR.stat ~dbg:(Ref.string_of task) ~sr:sr' in - Db.SR.set_physical_size ~__context ~self:sr ~value:sr_info.total_space; - Db.SR.set_physical_utilisation ~__context ~self:sr ~value:(Int64.sub sr_info.total_space sr_info.free_space); - Db.SR.set_clustered ~__context ~self:sr ~value:sr_info.clustered; - ) + let open Storage_access in + let task = Context.get_task_id __context in + let open Storage_interface in + let module C = Client(struct let rpc = rpc end) in + transform_storage_exn + (fun () -> + let sr' = Db.SR.get_uuid ~__context ~self:sr in + let sr_info = C.SR.stat ~dbg:(Ref.string_of task) ~sr:sr' in + Db.SR.set_physical_size ~__context ~self:sr ~value:sr_info.total_space; + Db.SR.set_physical_utilisation ~__context ~self:sr ~value:(Int64.sub sr_info.total_space sr_info.free_space); + Db.SR.set_clustered ~__context ~self:sr ~value:sr_info.clustered; + ) let get_supported_types ~__context = Sm.supported_drivers () @@ -371,321 +371,321 @@ module StringMap = Map.Make(struct type t = string let compare = compare end) (* Update VDI records in the database to be in sync with new information from a storage backend. *) let update_vdis ~__context ~sr db_vdis vdi_infos = - let open Storage_interface in - let db_vdi_map = List.fold_left - (fun m (r, v) -> - StringMap.add v.API.vDI_location (r, v) m - ) StringMap.empty - db_vdis in - let scan_vdi_map = List.fold_left - (fun m v -> StringMap.add v.vdi v m) StringMap.empty vdi_infos in - let to_delete = StringMap.merge (fun loc db scan -> match loc, db, scan with - | loc, Some (r, v), None -> Some r - | _, _, _ -> None - ) db_vdi_map scan_vdi_map in - let to_create = StringMap.merge (fun loc db scan -> match loc, db, scan with - | loc, None, Some v -> Some v - | _, _, _ -> None - ) db_vdi_map scan_vdi_map in - let to_update = StringMap.merge (fun loc db scan -> match loc, db, scan with - | loc, Some (r, v), Some vi -> Some (r, v, vi) - | _, _, _ -> None - ) db_vdi_map scan_vdi_map in - - let find_vdi db_vdi_map loc = - if StringMap.mem loc db_vdi_map - then fst (StringMap.find loc db_vdi_map) - else Ref.null in - - let get_is_tools_iso vdi = - List.mem_assoc "xs-tools" vdi.sm_config && List.assoc "xs-tools" vdi.sm_config = "true" in - - (* Delete ones which have gone away *) - StringMap.iter - (fun loc r -> - debug "Forgetting VDI: %s" (Ref.string_of r); - Db.VDI.destroy ~__context ~self:r - ) to_delete; - (* Create the new ones *) - let db_vdi_map = StringMap.fold - (fun loc vdi m -> - let ref = Ref.make () in - let uuid = match vdi.uuid with - | Some x -> Uuid.of_string x - | None -> Uuid.make_uuid () in - - debug "Creating VDI: %s (ref=%s)" (string_of_vdi_info vdi) (Ref.string_of ref); - Db.VDI.create ~__context ~ref ~uuid:(Uuid.string_of_uuid uuid) - ~name_label:vdi.name_label ~name_description:vdi.name_description - ~current_operations:[] ~allowed_operations:[] - ~is_a_snapshot:vdi.is_a_snapshot - ~snapshot_of:(find_vdi db_vdi_map vdi.snapshot_of) - ~snapshot_time:(Date.of_string vdi.snapshot_time) - ~sR:sr ~virtual_size:vdi.virtual_size - ~physical_utilisation:vdi.physical_utilisation - ~_type:(try Storage_utils.vdi_type_of_string vdi.ty with _ -> `user) - ~sharable:false ~read_only:vdi.read_only - ~xenstore_data:[] ~sm_config:[] - ~other_config:[] ~storage_lock:false ~location:vdi.vdi - ~managed:true ~missing:false ~parent:Ref.null ~tags:[] - ~on_boot:`persist ~allow_caching:false - ~metadata_of_pool:(Ref.of_string vdi.metadata_of_pool) - ~metadata_latest:false - ~is_tools_iso:(get_is_tools_iso vdi); - StringMap.add vdi.vdi (ref, Db.VDI.get_record ~__context ~self:ref) m - ) to_create db_vdi_map in - (* Update the ones which already exist *) - StringMap.iter - (fun loc (r, v, (vi: vdi_info)) -> - if v.API.vDI_name_label <> vi.name_label then begin - debug "%s name_label <- %s" (Ref.string_of r) vi.name_label; - Db.VDI.set_name_label ~__context ~self:r ~value:vi.name_label - end; - if v.API.vDI_name_description <> vi.name_description then begin - debug "%s name_description <- %s" (Ref.string_of r) vi.name_description; - Db.VDI.set_name_description ~__context ~self:r ~value:vi.name_description - end; - let ty = (try Storage_utils.vdi_type_of_string vi.ty with _ -> `user) in - if v.API.vDI_type <> ty then begin - debug "%s type <- %s" (Ref.string_of r) vi.ty; - Db.VDI.set_type ~__context ~self:r ~value:ty - end; - let mop = Ref.of_string vi.metadata_of_pool in - if v.API.vDI_metadata_of_pool <> mop then begin - debug "%s metadata_of_pool <- %s" (Ref.string_of r) vi.metadata_of_pool; - Db.VDI.set_metadata_of_pool ~__context ~self:r ~value:mop - end; - if v.API.vDI_is_a_snapshot <> vi.is_a_snapshot then begin - debug "%s is_a_snapshot <- %b" (Ref.string_of r) vi.is_a_snapshot; - Db.VDI.set_is_a_snapshot ~__context ~self:r ~value:vi.is_a_snapshot - end; - if v.API.vDI_snapshot_time <> Date.of_string vi.snapshot_time then begin - debug "%s snapshot_time <- %s" (Ref.string_of r) vi.snapshot_time; - Db.VDI.set_snapshot_time ~__context ~self:r ~value:(Date.of_string vi.snapshot_time) - end; - let snapshot_of = find_vdi db_vdi_map vi.snapshot_of in - if v.API.vDI_snapshot_of <> snapshot_of then begin - debug "%s snapshot_of <- %s" (Ref.string_of r) (Ref.string_of snapshot_of); - Db.VDI.set_snapshot_of ~__context ~self:r ~value:snapshot_of - end; - if v.API.vDI_read_only <> vi.read_only then begin - debug "%s read_only <- %b" (Ref.string_of r) vi.read_only; - Db.VDI.set_read_only ~__context ~self:r ~value:vi.read_only - end; - if v.API.vDI_virtual_size <> vi.virtual_size then begin - debug "%s virtual_size <- %Ld" (Ref.string_of r) vi.virtual_size; - Db.VDI.set_virtual_size ~__context ~self:r ~value:vi.virtual_size - end; - if v.API.vDI_physical_utilisation <> vi.physical_utilisation then begin - debug "%s physical_utilisation <- %Ld" (Ref.string_of r) vi.physical_utilisation; - Db.VDI.set_physical_utilisation ~__context ~self:r ~value:vi.physical_utilisation - end; - let is_tools_iso = get_is_tools_iso vi in - if v.API.vDI_is_tools_iso <> is_tools_iso then begin - debug "%s is_tools_iso <- %b" (Ref.string_of r) is_tools_iso; - Db.VDI.set_is_tools_iso ~__context ~self:r ~value:is_tools_iso - end - ) to_update + let open Storage_interface in + let db_vdi_map = List.fold_left + (fun m (r, v) -> + StringMap.add v.API.vDI_location (r, v) m + ) StringMap.empty + db_vdis in + let scan_vdi_map = List.fold_left + (fun m v -> StringMap.add v.vdi v m) StringMap.empty vdi_infos in + let to_delete = StringMap.merge (fun loc db scan -> match loc, db, scan with + | loc, Some (r, v), None -> Some r + | _, _, _ -> None + ) db_vdi_map scan_vdi_map in + let to_create = StringMap.merge (fun loc db scan -> match loc, db, scan with + | loc, None, Some v -> Some v + | _, _, _ -> None + ) db_vdi_map scan_vdi_map in + let to_update = StringMap.merge (fun loc db scan -> match loc, db, scan with + | loc, Some (r, v), Some vi -> Some (r, v, vi) + | _, _, _ -> None + ) db_vdi_map scan_vdi_map in + + let find_vdi db_vdi_map loc = + if StringMap.mem loc db_vdi_map + then fst (StringMap.find loc db_vdi_map) + else Ref.null in + + let get_is_tools_iso vdi = + List.mem_assoc "xs-tools" vdi.sm_config && List.assoc "xs-tools" vdi.sm_config = "true" in + + (* Delete ones which have gone away *) + StringMap.iter + (fun loc r -> + debug "Forgetting VDI: %s" (Ref.string_of r); + Db.VDI.destroy ~__context ~self:r + ) to_delete; + (* Create the new ones *) + let db_vdi_map = StringMap.fold + (fun loc vdi m -> + let ref = Ref.make () in + let uuid = match vdi.uuid with + | Some x -> Uuid.of_string x + | None -> Uuid.make_uuid () in + + debug "Creating VDI: %s (ref=%s)" (string_of_vdi_info vdi) (Ref.string_of ref); + Db.VDI.create ~__context ~ref ~uuid:(Uuid.string_of_uuid uuid) + ~name_label:vdi.name_label ~name_description:vdi.name_description + ~current_operations:[] ~allowed_operations:[] + ~is_a_snapshot:vdi.is_a_snapshot + ~snapshot_of:(find_vdi db_vdi_map vdi.snapshot_of) + ~snapshot_time:(Date.of_string vdi.snapshot_time) + ~sR:sr ~virtual_size:vdi.virtual_size + ~physical_utilisation:vdi.physical_utilisation + ~_type:(try Storage_utils.vdi_type_of_string vdi.ty with _ -> `user) + ~sharable:false ~read_only:vdi.read_only + ~xenstore_data:[] ~sm_config:[] + ~other_config:[] ~storage_lock:false ~location:vdi.vdi + ~managed:true ~missing:false ~parent:Ref.null ~tags:[] + ~on_boot:`persist ~allow_caching:false + ~metadata_of_pool:(Ref.of_string vdi.metadata_of_pool) + ~metadata_latest:false + ~is_tools_iso:(get_is_tools_iso vdi); + StringMap.add vdi.vdi (ref, Db.VDI.get_record ~__context ~self:ref) m + ) to_create db_vdi_map in + (* Update the ones which already exist *) + StringMap.iter + (fun loc (r, v, (vi: vdi_info)) -> + if v.API.vDI_name_label <> vi.name_label then begin + debug "%s name_label <- %s" (Ref.string_of r) vi.name_label; + Db.VDI.set_name_label ~__context ~self:r ~value:vi.name_label + end; + if v.API.vDI_name_description <> vi.name_description then begin + debug "%s name_description <- %s" (Ref.string_of r) vi.name_description; + Db.VDI.set_name_description ~__context ~self:r ~value:vi.name_description + end; + let ty = (try Storage_utils.vdi_type_of_string vi.ty with _ -> `user) in + if v.API.vDI_type <> ty then begin + debug "%s type <- %s" (Ref.string_of r) vi.ty; + Db.VDI.set_type ~__context ~self:r ~value:ty + end; + let mop = Ref.of_string vi.metadata_of_pool in + if v.API.vDI_metadata_of_pool <> mop then begin + debug "%s metadata_of_pool <- %s" (Ref.string_of r) vi.metadata_of_pool; + Db.VDI.set_metadata_of_pool ~__context ~self:r ~value:mop + end; + if v.API.vDI_is_a_snapshot <> vi.is_a_snapshot then begin + debug "%s is_a_snapshot <- %b" (Ref.string_of r) vi.is_a_snapshot; + Db.VDI.set_is_a_snapshot ~__context ~self:r ~value:vi.is_a_snapshot + end; + if v.API.vDI_snapshot_time <> Date.of_string vi.snapshot_time then begin + debug "%s snapshot_time <- %s" (Ref.string_of r) vi.snapshot_time; + Db.VDI.set_snapshot_time ~__context ~self:r ~value:(Date.of_string vi.snapshot_time) + end; + let snapshot_of = find_vdi db_vdi_map vi.snapshot_of in + if v.API.vDI_snapshot_of <> snapshot_of then begin + debug "%s snapshot_of <- %s" (Ref.string_of r) (Ref.string_of snapshot_of); + Db.VDI.set_snapshot_of ~__context ~self:r ~value:snapshot_of + end; + if v.API.vDI_read_only <> vi.read_only then begin + debug "%s read_only <- %b" (Ref.string_of r) vi.read_only; + Db.VDI.set_read_only ~__context ~self:r ~value:vi.read_only + end; + if v.API.vDI_virtual_size <> vi.virtual_size then begin + debug "%s virtual_size <- %Ld" (Ref.string_of r) vi.virtual_size; + Db.VDI.set_virtual_size ~__context ~self:r ~value:vi.virtual_size + end; + if v.API.vDI_physical_utilisation <> vi.physical_utilisation then begin + debug "%s physical_utilisation <- %Ld" (Ref.string_of r) vi.physical_utilisation; + Db.VDI.set_physical_utilisation ~__context ~self:r ~value:vi.physical_utilisation + end; + let is_tools_iso = get_is_tools_iso vi in + if v.API.vDI_is_tools_iso <> is_tools_iso then begin + debug "%s is_tools_iso <- %b" (Ref.string_of r) is_tools_iso; + Db.VDI.set_is_tools_iso ~__context ~self:r ~value:is_tools_iso + end + ) to_update (* Perform a scan of this locally-attached SR *) let scan ~__context ~sr = - let open Storage_access in - let task = Context.get_task_id __context in - let open Storage_interface in - let module C = Client(struct let rpc = rpc end) in - let sr' = Ref.string_of sr in - transform_storage_exn - (fun () -> - let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in - let vs = C.SR.scan ~dbg:(Ref.string_of task) ~sr:sr_uuid in - let db_vdis = Db.VDI.get_records_where ~__context ~expr:(Eq(Field "SR", Literal sr')) in - update_vdis ~__context ~sr:sr db_vdis vs; - let sr_info = C.SR.stat ~dbg:(Ref.string_of task) ~sr:sr_uuid in - let virtual_allocation = List.fold_left Int64.add 0L (List.map (fun v -> v.virtual_size) vs) in - Db.SR.set_virtual_allocation ~__context ~self:sr ~value:virtual_allocation; - Db.SR.set_physical_size ~__context ~self:sr ~value:sr_info.total_space; - Db.SR.set_physical_utilisation ~__context ~self:sr ~value:(Int64.sub sr_info.total_space sr_info.free_space); - Db.SR.remove_from_other_config ~__context ~self:sr ~key:"dirty"; - Db.SR.set_clustered ~__context ~self:sr ~value:sr_info.clustered; - ) + let open Storage_access in + let task = Context.get_task_id __context in + let open Storage_interface in + let module C = Client(struct let rpc = rpc end) in + let sr' = Ref.string_of sr in + transform_storage_exn + (fun () -> + let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in + let vs = C.SR.scan ~dbg:(Ref.string_of task) ~sr:sr_uuid in + let db_vdis = Db.VDI.get_records_where ~__context ~expr:(Eq(Field "SR", Literal sr')) in + update_vdis ~__context ~sr:sr db_vdis vs; + let sr_info = C.SR.stat ~dbg:(Ref.string_of task) ~sr:sr_uuid in + let virtual_allocation = List.fold_left Int64.add 0L (List.map (fun v -> v.virtual_size) vs) in + Db.SR.set_virtual_allocation ~__context ~self:sr ~value:virtual_allocation; + Db.SR.set_physical_size ~__context ~self:sr ~value:sr_info.total_space; + Db.SR.set_physical_utilisation ~__context ~self:sr ~value:(Int64.sub sr_info.total_space sr_info.free_space); + Db.SR.remove_from_other_config ~__context ~self:sr ~key:"dirty"; + Db.SR.set_clustered ~__context ~self:sr ~value:sr_info.clustered; + ) let set_shared ~__context ~sr ~value = - if value then - (* We can always set an SR to be shared... *) - Db.SR.set_shared ~__context ~self:sr ~value - else - begin - let pbds = Db.PBD.get_all ~__context in - let pbds = List.filter (fun pbd -> Db.PBD.get_SR ~__context ~self:pbd = sr) pbds in - if List.length pbds > 1 then - raise (Api_errors.Server_error (Api_errors.sr_has_multiple_pbds,List.map (fun pbd -> Ref.string_of pbd) pbds)); - Db.SR.set_shared ~__context ~self:sr ~value - end + if value then + (* We can always set an SR to be shared... *) + Db.SR.set_shared ~__context ~self:sr ~value + else + begin + let pbds = Db.PBD.get_all ~__context in + let pbds = List.filter (fun pbd -> Db.PBD.get_SR ~__context ~self:pbd = sr) pbds in + if List.length pbds > 1 then + raise (Api_errors.Server_error (Api_errors.sr_has_multiple_pbds,List.map (fun pbd -> Ref.string_of pbd) pbds)); + Db.SR.set_shared ~__context ~self:sr ~value + end let set_name_label ~__context ~sr ~value = - let open Storage_access in - let open Storage_interface in - let task = Context.get_task_id __context in - let sr' = Db.SR.get_uuid ~__context ~self:sr in - let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in - transform_storage_exn - (fun () -> - C.SR.set_name_label ~dbg:(Ref.string_of task) ~sr:sr' ~new_name_label:value - ); - update ~__context ~sr + let open Storage_access in + let open Storage_interface in + let task = Context.get_task_id __context in + let sr' = Db.SR.get_uuid ~__context ~self:sr in + let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in + transform_storage_exn + (fun () -> + C.SR.set_name_label ~dbg:(Ref.string_of task) ~sr:sr' ~new_name_label:value + ); + update ~__context ~sr let set_name_description ~__context ~sr ~value = - let open Storage_access in - let open Storage_interface in - let task = Context.get_task_id __context in - let sr' = Db.SR.get_uuid ~__context ~self:sr in - let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in - transform_storage_exn - (fun () -> - C.SR.set_name_description ~dbg:(Ref.string_of task) ~sr:sr' ~new_name_description:value - ); - update ~__context ~sr + let open Storage_access in + let open Storage_interface in + let task = Context.get_task_id __context in + let sr' = Db.SR.get_uuid ~__context ~self:sr in + let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in + transform_storage_exn + (fun () -> + C.SR.set_name_description ~dbg:(Ref.string_of task) ~sr:sr' ~new_name_description:value + ); + update ~__context ~sr let set_virtual_allocation ~__context ~self ~value = - Db.SR.set_virtual_allocation ~__context ~self ~value + Db.SR.set_virtual_allocation ~__context ~self ~value let set_physical_size ~__context ~self ~value = - Db.SR.set_physical_size ~__context ~self ~value + Db.SR.set_physical_size ~__context ~self ~value let set_physical_utilisation ~__context ~self ~value = - Db.SR.set_physical_utilisation ~__context ~self ~value + Db.SR.set_physical_utilisation ~__context ~self ~value let assert_can_host_ha_statefile ~__context ~sr = - let cluster_stack = Cluster_stack_constraints.choose_cluster_stack ~__context in - Xha_statefile.assert_sr_can_host_statefile ~__context ~sr ~cluster_stack + let cluster_stack = Cluster_stack_constraints.choose_cluster_stack ~__context in + Xha_statefile.assert_sr_can_host_statefile ~__context ~sr ~cluster_stack let assert_supports_database_replication ~__context ~sr = - (* Check that each host has a PBD to this SR *) - let pbds = Db.SR.get_PBDs ~__context ~self:sr in - let connected_hosts = List.setify (List.map (fun self -> Db.PBD.get_host ~__context ~self) pbds) in - let all_hosts = Db.Host.get_all ~__context in - if List.length connected_hosts < (List.length all_hosts) then begin - error "Cannot enable database replication to SR %s: some hosts lack a PBD: [ %s ]" - (Ref.string_of sr) - (String.concat "; " (List.map Ref.string_of (List.set_difference all_hosts connected_hosts))); - raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of sr ])) - end; - (* Check that each PBD is plugged in *) - List.iter (fun self -> - if not(Db.PBD.get_currently_attached ~__context ~self) then begin - error "Cannot enable database replication to SR %s: PBD %s is not plugged" - (Ref.string_of sr) (Ref.string_of self); - (* Same exception is used in this case (see Helpers.assert_pbd_is_plugged) *) - raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of sr ])) - end) pbds; - (* Check the exported capabilities of the SR's SM plugin *) - let srtype = Db.SR.get_type ~__context ~self:sr in - match Db.SM.get_internal_records_where ~__context ~expr:(Eq (Field "type", Literal srtype)) with - | [] -> - (* This should never happen because the PBDs are plugged in *) - raise (Api_errors.Server_error(Api_errors.internal_error, [ "SR does not have corresponding SM record"; Ref.string_of sr; srtype ])) - | (_, sm) :: _ -> - if not (List.mem_assoc "SR_METADATA" sm.Db_actions.sM_features) - then raise (Api_errors.Server_error (Api_errors.sr_operation_not_supported, [Ref.string_of sr])) + (* Check that each host has a PBD to this SR *) + let pbds = Db.SR.get_PBDs ~__context ~self:sr in + let connected_hosts = List.setify (List.map (fun self -> Db.PBD.get_host ~__context ~self) pbds) in + let all_hosts = Db.Host.get_all ~__context in + if List.length connected_hosts < (List.length all_hosts) then begin + error "Cannot enable database replication to SR %s: some hosts lack a PBD: [ %s ]" + (Ref.string_of sr) + (String.concat "; " (List.map Ref.string_of (List.set_difference all_hosts connected_hosts))); + raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of sr ])) + end; + (* Check that each PBD is plugged in *) + List.iter (fun self -> + if not(Db.PBD.get_currently_attached ~__context ~self) then begin + error "Cannot enable database replication to SR %s: PBD %s is not plugged" + (Ref.string_of sr) (Ref.string_of self); + (* Same exception is used in this case (see Helpers.assert_pbd_is_plugged) *) + raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of sr ])) + end) pbds; + (* Check the exported capabilities of the SR's SM plugin *) + let srtype = Db.SR.get_type ~__context ~self:sr in + match Db.SM.get_internal_records_where ~__context ~expr:(Eq (Field "type", Literal srtype)) with + | [] -> + (* This should never happen because the PBDs are plugged in *) + raise (Api_errors.Server_error(Api_errors.internal_error, [ "SR does not have corresponding SM record"; Ref.string_of sr; srtype ])) + | (_, sm) :: _ -> + if not (List.mem_assoc "SR_METADATA" sm.Db_actions.sM_features) + then raise (Api_errors.Server_error (Api_errors.sr_operation_not_supported, [Ref.string_of sr])) (* Metadata replication to SRs *) let find_or_create_metadata_vdi ~__context ~sr = - let pool = Helpers.get_pool ~__context in - let vdi_can_be_used vdi = - Db.VDI.get_type ~__context ~self:vdi = `metadata && - Db.VDI.get_metadata_of_pool ~__context ~self:vdi = pool && - Db.VDI.get_virtual_size ~__context ~self:vdi >= Redo_log.minimum_vdi_size - in - match (List.filter vdi_can_be_used (Db.SR.get_VDIs ~__context ~self:sr)) with - | vdi :: _ -> - (* Found a suitable VDI - try to use it *) - debug "Using VDI [%s:%s] for metadata replication" - (Db.VDI.get_name_label ~__context ~self:vdi) (Db.VDI.get_uuid ~__context ~self:vdi); - vdi - | [] -> - (* Did not find a suitable VDI *) - debug "Creating a new VDI for metadata replication."; - let vdi = Helpers.call_api_functions ~__context (fun rpc session_id -> - Client.VDI.create ~rpc ~session_id ~name_label:"Metadata for DR" - ~name_description:"Used for disaster recovery" - ~sR:sr ~virtual_size:Redo_log.minimum_vdi_size ~_type:`metadata ~sharable:false - ~read_only:false ~other_config:[] ~xenstore_data:[] ~sm_config:Redo_log.redo_log_sm_config ~tags:[]) - in - Db.VDI.set_metadata_latest ~__context ~self:vdi ~value:false; - Db.VDI.set_metadata_of_pool ~__context ~self:vdi ~value:pool; - (* Call vdi_update to make sure the value of metadata_of_pool is persisted. *) - Helpers.call_api_functions ~__context - (fun rpc session_id -> Client.VDI.update ~rpc ~session_id ~vdi); - vdi + let pool = Helpers.get_pool ~__context in + let vdi_can_be_used vdi = + Db.VDI.get_type ~__context ~self:vdi = `metadata && + Db.VDI.get_metadata_of_pool ~__context ~self:vdi = pool && + Db.VDI.get_virtual_size ~__context ~self:vdi >= Redo_log.minimum_vdi_size + in + match (List.filter vdi_can_be_used (Db.SR.get_VDIs ~__context ~self:sr)) with + | vdi :: _ -> + (* Found a suitable VDI - try to use it *) + debug "Using VDI [%s:%s] for metadata replication" + (Db.VDI.get_name_label ~__context ~self:vdi) (Db.VDI.get_uuid ~__context ~self:vdi); + vdi + | [] -> + (* Did not find a suitable VDI *) + debug "Creating a new VDI for metadata replication."; + let vdi = Helpers.call_api_functions ~__context (fun rpc session_id -> + Client.VDI.create ~rpc ~session_id ~name_label:"Metadata for DR" + ~name_description:"Used for disaster recovery" + ~sR:sr ~virtual_size:Redo_log.minimum_vdi_size ~_type:`metadata ~sharable:false + ~read_only:false ~other_config:[] ~xenstore_data:[] ~sm_config:Redo_log.redo_log_sm_config ~tags:[]) + in + Db.VDI.set_metadata_latest ~__context ~self:vdi ~value:false; + Db.VDI.set_metadata_of_pool ~__context ~self:vdi ~value:pool; + (* Call vdi_update to make sure the value of metadata_of_pool is persisted. *) + Helpers.call_api_functions ~__context + (fun rpc session_id -> Client.VDI.update ~rpc ~session_id ~vdi); + vdi let enable_database_replication ~__context ~sr = - Pool_features.assert_enabled ~__context ~f:Features.DR; - assert_supports_database_replication ~__context ~sr; - let get_vdi_callback = (fun () -> find_or_create_metadata_vdi ~__context ~sr) in - Xapi_vdi_helpers.enable_database_replication ~__context ~get_vdi_callback + Pool_features.assert_enabled ~__context ~f:Features.DR; + assert_supports_database_replication ~__context ~sr; + let get_vdi_callback = (fun () -> find_or_create_metadata_vdi ~__context ~sr) in + Xapi_vdi_helpers.enable_database_replication ~__context ~get_vdi_callback (* Disable metadata replication to all metadata VDIs in this SR. *) let disable_database_replication ~__context ~sr = - let metadata_vdis = List.filter - (fun vdi -> - Db.VDI.get_type ~__context ~self:vdi = `metadata && - (Db.VDI.get_metadata_of_pool ~__context ~self:vdi = Helpers.get_pool ~__context)) - (Db.SR.get_VDIs ~__context ~self:sr) - in - List.iter - (fun vdi -> - Xapi_vdi_helpers.disable_database_replication ~__context ~vdi; - (* The VDI may have VBDs hanging around other than those created by the database replication code. *) - (* They must be destroyed before the VDI can be destroyed. *) - Xapi_vdi_helpers.destroy_all_vbds ~__context ~vdi; - Helpers.call_api_functions ~__context (fun rpc session_id -> - Client.VDI.destroy ~rpc ~session_id ~self:vdi) - ) - metadata_vdis + let metadata_vdis = List.filter + (fun vdi -> + Db.VDI.get_type ~__context ~self:vdi = `metadata && + (Db.VDI.get_metadata_of_pool ~__context ~self:vdi = Helpers.get_pool ~__context)) + (Db.SR.get_VDIs ~__context ~self:sr) + in + List.iter + (fun vdi -> + Xapi_vdi_helpers.disable_database_replication ~__context ~vdi; + (* The VDI may have VBDs hanging around other than those created by the database replication code. *) + (* They must be destroyed before the VDI can be destroyed. *) + Xapi_vdi_helpers.destroy_all_vbds ~__context ~vdi; + Helpers.call_api_functions ~__context (fun rpc session_id -> + Client.VDI.destroy ~rpc ~session_id ~self:vdi) + ) + metadata_vdis let create_new_blob ~__context ~sr ~name ~mime_type ~public = - let blob = Xapi_blob.create ~__context ~mime_type ~public in - Db.SR.add_to_blobs ~__context ~self:sr ~key:name ~value:blob; - blob + let blob = Xapi_blob.create ~__context ~mime_type ~public in + Db.SR.add_to_blobs ~__context ~self:sr ~key:name ~value:blob; + blob let physical_utilisation_thread ~__context () = - let module SRMap = - Map.Make(struct type t = [`SR] Ref.t let compare = compare end) in - - let sr_cache : bool SRMap.t ref = ref SRMap.empty in - - let srs_to_update () = - let plugged_srs = Helpers.get_all_plugged_srs ~__context in - (* Remove SRs that are no longer plugged *) - sr_cache := SRMap.filter (fun sr _ -> List.mem sr plugged_srs) !sr_cache; - (* Cache wether we should manage stats for newly plugged SRs *) - sr_cache := List.fold_left (fun m sr -> - if SRMap.mem sr m then m - else SRMap.add sr (should_manage_stats ~__context sr) m - ) !sr_cache plugged_srs; - SRMap.(filter (fun _ b -> b) !sr_cache |> bindings) |> List.map fst in - - while true do - Thread.delay 120.; - try - List.iter (fun sr -> - let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in - try - let value = Rrdd.query_sr_ds ~sr_uuid ~ds_name:"physical_utilisation" |> Int64.of_float in - Db.SR.set_physical_utilisation ~__context ~self:sr ~value - with Rrd_interface.Internal_error("Not_found") -> - debug "Cannot update physical utilisation for SR %s: RRD unavailable" sr_uuid - ) (srs_to_update ()) - with e -> warn "Exception in SR physical utilisation scanning thread: %s" (Printexc.to_string e) - done + let module SRMap = + Map.Make(struct type t = [`SR] Ref.t let compare = compare end) in + + let sr_cache : bool SRMap.t ref = ref SRMap.empty in + + let srs_to_update () = + let plugged_srs = Helpers.get_all_plugged_srs ~__context in + (* Remove SRs that are no longer plugged *) + sr_cache := SRMap.filter (fun sr _ -> List.mem sr plugged_srs) !sr_cache; + (* Cache wether we should manage stats for newly plugged SRs *) + sr_cache := List.fold_left (fun m sr -> + if SRMap.mem sr m then m + else SRMap.add sr (should_manage_stats ~__context sr) m + ) !sr_cache plugged_srs; + SRMap.(filter (fun _ b -> b) !sr_cache |> bindings) |> List.map fst in + + while true do + Thread.delay 120.; + try + List.iter (fun sr -> + let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in + try + let value = Rrdd.query_sr_ds ~sr_uuid ~ds_name:"physical_utilisation" |> Int64.of_float in + Db.SR.set_physical_utilisation ~__context ~self:sr ~value + with Rrd_interface.Internal_error("Not_found") -> + debug "Cannot update physical utilisation for SR %s: RRD unavailable" sr_uuid + ) (srs_to_update ()) + with e -> warn "Exception in SR physical utilisation scanning thread: %s" (Printexc.to_string e) + done (* APIs for accessing SR level stats *) let get_data_sources ~__context ~sr = - List.map Rrdd_helper.to_API_data_source (Rrdd.query_possible_sr_dss ~sr_uuid:(Db.SR.get_uuid ~__context ~self:sr)) + List.map Rrdd_helper.to_API_data_source (Rrdd.query_possible_sr_dss ~sr_uuid:(Db.SR.get_uuid ~__context ~self:sr)) let record_data_source ~__context ~sr ~data_source = - Rrdd.add_sr_ds ~sr_uuid:(Db.SR.get_uuid ~__context ~self:sr) - ~ds_name:data_source + Rrdd.add_sr_ds ~sr_uuid:(Db.SR.get_uuid ~__context ~self:sr) + ~ds_name:data_source let query_data_source ~__context ~sr ~data_source = Rrdd.query_sr_ds ~sr_uuid:(Db.SR.get_uuid ~__context ~self:sr) ~ds_name:data_source diff --git a/ocaml/xapi/xapi_sr_operations.ml b/ocaml/xapi/xapi_sr_operations.ml index 76699a26211..877e1ec2256 100644 --- a/ocaml/xapi/xapi_sr_operations.ml +++ b/ocaml/xapi/xapi_sr_operations.ml @@ -13,8 +13,8 @@ *) (** Module that defines API functions for SR objects * @group XenAPI functions - *) - +*) + open Printf open Stdext open Threadext @@ -35,11 +35,11 @@ open D open Record_util -let all_ops : API.storage_operations_set = +let all_ops : API.storage_operations_set = [ `scan; `destroy; `forget; `plug; `unplug; `vdi_create; `vdi_destroy; `vdi_resize; `vdi_clone; `vdi_snapshot; `vdi_mirror; `vdi_introduce; `update; `pbd_create; `pbd_destroy ] -let sm_cap_table = +let sm_cap_table = [ `vdi_create, Smint.Vdi_create; `vdi_destroy, Smint.Vdi_delete; `vdi_resize, Smint.Vdi_resize; @@ -52,25 +52,25 @@ let sm_cap_table = type table = (API.storage_operations, ((string * (string list)) option)) Hashtbl.t let features_of_sr_internal ~__context ~_type = - let open Db_filter_types in - match Db.SM.get_internal_records_where ~__context ~expr:(Eq (Field "type", Literal _type)) with - | [] -> - [] - | (_, sm) :: _ -> - Listext.List.filter_map - (fun (name, v) -> - try - Some (List.assoc name Smint.string_to_capability_table, v) - with Not_found -> - None - ) sm.Db_actions.sM_features + let open Db_filter_types in + match Db.SM.get_internal_records_where ~__context ~expr:(Eq (Field "type", Literal _type)) with + | [] -> + [] + | (_, sm) :: _ -> + Listext.List.filter_map + (fun (name, v) -> + try + Some (List.assoc name Smint.string_to_capability_table, v) + with Not_found -> + None + ) sm.Db_actions.sM_features let features_of_sr ~__context record = - features_of_sr_internal ~__context ~_type:record.Db_actions.sR_type + features_of_sr_internal ~__context ~_type:record.Db_actions.sR_type (** Returns a table of operations -> API error options (None if the operation would be ok) * If op is specified, the table may omit reporting errors for ops other than that one. *) -let valid_operations ~__context ?op record _ref' : table = +let valid_operations ~__context ?op record _ref' : table = let _ref = Ref.string_of _ref' in let current_ops = record.Db_actions.sR_current_operations in @@ -78,8 +78,8 @@ let valid_operations ~__context ?op record _ref' : table = List.iter (fun x -> Hashtbl.replace table x None) all_ops; let set_errors (code: string) (params: string list) (ops: API.storage_operations_set) = List.iter (fun op -> - if Hashtbl.find table op = None - then Hashtbl.replace table op (Some(code, params))) ops in + if Hashtbl.find table op = None + then Hashtbl.replace table op (Some(code, params))) ops in (* Policy: Anyone may attach and detach VDIs in parallel but we serialise @@ -95,13 +95,13 @@ let valid_operations ~__context ?op record _ref' : table = let sm_features = if record.Db_actions.sR_is_tools_sr then List.filter - (fun f -> not Smint.(List.mem (capability_of_feature f) [Vdi_create; Vdi_delete])) - sm_features + (fun f -> not Smint.(List.mem (capability_of_feature f) [Vdi_create; Vdi_delete])) + sm_features else sm_features in - let forbidden_by_backend = + let forbidden_by_backend = List.filter (fun op -> List.mem_assoc op sm_cap_table - && not (Smint.has_capability (List.assoc op sm_cap_table) sm_features)) + && not (Smint.has_capability (List.assoc op sm_cap_table) sm_features)) all_ops in set_errors Api_errors.sr_operation_not_supported [ _ref ] forbidden_by_backend in @@ -136,31 +136,31 @@ let valid_operations ~__context ?op record _ref' : table = let check_parallel_ops ~__context record = let safe_to_parallelise = [ ] in let current_ops = List.setify (List.map snd current_ops) in - + (* If there are any current operations, all the non_parallelisable operations must definitely be stopped *) if current_ops <> [] then set_errors Api_errors.other_operation_in_progress - [ "SR"; _ref; sr_operation_to_string (List.hd current_ops) ] - (List.set_difference all_ops safe_to_parallelise); + [ "SR"; _ref; sr_operation_to_string (List.hd current_ops) ] + (List.set_difference all_ops safe_to_parallelise); - let all_are_parallelisable = List.fold_left (&&) true - (List.map (fun op -> List.mem op safe_to_parallelise) current_ops) in - (* If not all are parallelisable (eg a vdi_resize), ban the otherwise + let all_are_parallelisable = List.fold_left (&&) true + (List.map (fun op -> List.mem op safe_to_parallelise) current_ops) in + (* If not all are parallelisable (eg a vdi_resize), ban the otherwise parallelisable operations too *) if not(all_are_parallelisable) then set_errors Api_errors.other_operation_in_progress - [ "SR"; _ref; sr_operation_to_string (List.hd current_ops) ] - safe_to_parallelise + [ "SR"; _ref; sr_operation_to_string (List.hd current_ops) ] + safe_to_parallelise in let check_cluster_stack_compatible ~__context record = (* Check whether there are any conflicts with HA that prevent us from * plugging a PBD for this SR *) (try - Cluster_stack_constraints.assert_cluster_stack_compatible ~__context _ref' - with Api_errors.Server_error (e, args) -> - set_errors e args [`plug]) + Cluster_stack_constraints.assert_cluster_stack_compatible ~__context _ref' + with Api_errors.Server_error (e, args) -> + set_errors e args [`plug]) in (* List of (operations * function which checks for errors relevant to those operations) *) @@ -179,10 +179,10 @@ let valid_operations ~__context ?op record _ref' : table = | Some op -> List.filter (fun (ops, _) -> List.mem op ops) relevant_functions in List.iter (fun (_, f) -> f ~__context record) relevant_functions; - + table -let throw_error (table: table) op = +let throw_error (table: table) op = if not(Hashtbl.mem table op) then raise (Api_errors.Server_error(Api_errors.internal_error, [ Printf.sprintf "xapi_sr.assert_operation_valid unknown operation: %s" (sr_operation_to_string op) ])); @@ -190,11 +190,11 @@ let throw_error (table: table) op = | Some (code, params) -> raise (Api_errors.Server_error(code, params)) | None -> () -let assert_operation_valid ~__context ~self ~(op:API.storage_operations) = +let assert_operation_valid ~__context ~self ~(op:API.storage_operations) = let all = Db.SR.get_record_internal ~__context ~self in let table = valid_operations ~__context ~op all self in throw_error table op - + let update_allowed_operations ~__context ~self : unit = let all = Db.SR.get_record_internal ~__context ~self in let valid = valid_operations ~__context all self in @@ -202,7 +202,7 @@ let update_allowed_operations ~__context ~self : unit = Db.SR.set_allowed_operations ~__context ~self ~value:keys (** Someone is cancelling a task so remove it from the current_operations *) -let cancel_task ~__context ~self ~task_id = +let cancel_task ~__context ~self ~task_id = let all = List.map fst (Db.SR.get_current_operations ~__context ~self) in if List.mem task_id all then begin @@ -218,35 +218,35 @@ let cancel_tasks ~__context ~self ~all_tasks_in_db ~task_ids = module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) let sr_health_check ~__context ~self = - if Helpers.i_am_srmaster ~__context ~sr:self then - let dbg = Ref.string_of (Context.get_task_id __context) in - let info = C.SR.stat dbg (Db.SR.get_uuid ~__context ~self) in - if info.Storage_interface.clustered && info.Storage_interface.health = Storage_interface.Recovering then begin - Helpers.call_api_functions ~__context (fun rpc session_id -> - let task = Client.Task.create ~rpc ~session_id - ~label:Xapi_globs.sr_health_check_task_label ~description:(Ref.string_of self) in - Xapi_host_helpers.update_allowed_operations_all_hosts ~__context; - let _ = Thread.create (fun () -> - let rec loop () = - Thread.delay 30.; - let info = C.SR.stat dbg (Db.SR.get_uuid ~__context ~self) in - if not (Db.Task.get_status ~__context ~self:task = `cancelling) && - info.Storage_interface.clustered && info.Storage_interface.health = Storage_interface.Recovering - then - loop () - else begin - Db.Task.destroy ~__context ~self:task; - Xapi_host_helpers.update_allowed_operations_all_hosts ~__context - end - in - loop () - ) - in () - ) - end + if Helpers.i_am_srmaster ~__context ~sr:self then + let dbg = Ref.string_of (Context.get_task_id __context) in + let info = C.SR.stat dbg (Db.SR.get_uuid ~__context ~self) in + if info.Storage_interface.clustered && info.Storage_interface.health = Storage_interface.Recovering then begin + Helpers.call_api_functions ~__context (fun rpc session_id -> + let task = Client.Task.create ~rpc ~session_id + ~label:Xapi_globs.sr_health_check_task_label ~description:(Ref.string_of self) in + Xapi_host_helpers.update_allowed_operations_all_hosts ~__context; + let _ = Thread.create (fun () -> + let rec loop () = + Thread.delay 30.; + let info = C.SR.stat dbg (Db.SR.get_uuid ~__context ~self) in + if not (Db.Task.get_status ~__context ~self:task = `cancelling) && + info.Storage_interface.clustered && info.Storage_interface.health = Storage_interface.Recovering + then + loop () + else begin + Db.Task.destroy ~__context ~self:task; + Xapi_host_helpers.update_allowed_operations_all_hosts ~__context + end + in + loop () + ) + in () + ) + end let stop_health_check_thread ~__context ~self = - if Helpers.i_am_srmaster ~__context ~sr:self then - let tasks = Helpers.find_health_check_task ~__context ~sr:self in - List.iter (fun task -> Db.Task.set_status ~__context ~self:task ~value:`cancelling) tasks + if Helpers.i_am_srmaster ~__context ~sr:self then + let tasks = Helpers.find_health_check_task ~__context ~sr:self in + List.iter (fun task -> Db.Task.set_status ~__context ~self:task ~value:`cancelling) tasks diff --git a/ocaml/xapi/xapi_stats.ml b/ocaml/xapi/xapi_stats.ml index b55ae0421af..7a391eba6f3 100644 --- a/ocaml/xapi/xapi_stats.ml +++ b/ocaml/xapi/xapi_stats.ml @@ -16,40 +16,40 @@ open Rrdd_plugin module D = Debug.Make(struct let name = "xapi_stats" end) let generate_master_stats ~__context = - let session_count = - Db.Session.get_all ~__context - |> List.length - |> Int64.of_int - in - let session_count_ds = - Rrd.Host, - Ds.ds_make - ~name:"pool_session_count" - ~description:"Number of sessions" - ~value:(Rrd.VT_Int64 session_count) - ~ty:Rrd.Gauge - ~default:true - ~min:0.0 - ~units:"sessions" - () - in - let task_count = Db.Task.get_all ~__context - |> List.length - |> Int64.of_int - in - let task_count_ds = - Rrd.Host, - Ds.ds_make - ~name:"pool_task_count" - ~description:"Number of tasks" - ~value:(Rrd.VT_Int64 task_count) - ~ty:Rrd.Gauge - ~default:true - ~min:0.0 - ~units:"tasks" - () - in - [session_count_ds; task_count_ds] + let session_count = + Db.Session.get_all ~__context + |> List.length + |> Int64.of_int + in + let session_count_ds = + Rrd.Host, + Ds.ds_make + ~name:"pool_session_count" + ~description:"Number of sessions" + ~value:(Rrd.VT_Int64 session_count) + ~ty:Rrd.Gauge + ~default:true + ~min:0.0 + ~units:"sessions" + () + in + let task_count = Db.Task.get_all ~__context + |> List.length + |> Int64.of_int + in + let task_count_ds = + Rrd.Host, + Ds.ds_make + ~name:"pool_task_count" + ~description:"Number of tasks" + ~value:(Rrd.VT_Int64 task_count) + ~ty:Rrd.Gauge + ~default:true + ~min:0.0 + ~units:"tasks" + () + in + [session_count_ds; task_count_ds] let gc_debug = ref true let previous_oldness = ref 0 @@ -57,80 +57,80 @@ let previous_free_words = ref 0 let previous_live_words = ref 0 let generate_gc_stats () = - let gcstat = - if !gc_debug then ( - if !previous_oldness > 5 then ( - let stat = Gc.stat () in - previous_free_words := stat.Gc.free_words; - previous_live_words := stat.Gc.live_words; - previous_oldness := 0; - stat - ) else ( - incr previous_oldness; - {(Gc.quick_stat ()) with - Gc.free_words = !previous_free_words; - Gc.live_words = !previous_live_words;} - ) - ) else Gc.quick_stat () - in - let xapigrad_kib = - (gcstat.Gc.minor_words +. gcstat.Gc.major_words -. gcstat.Gc.promoted_words) - /. 256. - in - let xapitotal_kib = Int64.of_int (gcstat.Gc.heap_words / 256) in - let xapiactualfree_kib = Int64.of_int (gcstat.Gc.free_words / 256) in - let xapiactuallive_kib = Int64.of_int (gcstat.Gc.live_words / 256) in - [ - (Rrd.Host, Ds.ds_make ~name:"xapi_memory_usage_kib" ~units:"KiB" - ~description:"Total memory allocated used by xapi daemon" - ~value:(Rrd.VT_Int64 xapitotal_kib) ~ty:Rrd.Gauge ~min:0.0 - ~default:true ()); - (Rrd.Host, Ds.ds_make ~name:"xapi_free_memory_kib" ~units:"KiB" - ~description:"Free memory available to the xapi daemon" - ~value:(Rrd.VT_Int64 xapiactualfree_kib) ~ty:Rrd.Gauge ~min:0.0 - ~default:true ()); - (Rrd.Host, Ds.ds_make ~name:"xapi_live_memory_kib" ~units:"KiB" - ~description:"Live memory used by xapi daemon" - ~value:(Rrd.VT_Int64 xapiactuallive_kib) ~ty:Rrd.Gauge ~min:0.0 - ~default:true ()); - (Rrd.Host, Ds.ds_make ~name:"xapi_allocation_kib" ~units:"KiB" - ~description:"Memory allocation done by the xapi daemon" - ~value:(Rrd.VT_Float xapigrad_kib) ~ty:Rrd.Derive ~min:0.0 - ~default:true ()); - ] + let gcstat = + if !gc_debug then ( + if !previous_oldness > 5 then ( + let stat = Gc.stat () in + previous_free_words := stat.Gc.free_words; + previous_live_words := stat.Gc.live_words; + previous_oldness := 0; + stat + ) else ( + incr previous_oldness; + {(Gc.quick_stat ()) with + Gc.free_words = !previous_free_words; + Gc.live_words = !previous_live_words;} + ) + ) else Gc.quick_stat () + in + let xapigrad_kib = + (gcstat.Gc.minor_words +. gcstat.Gc.major_words -. gcstat.Gc.promoted_words) + /. 256. + in + let xapitotal_kib = Int64.of_int (gcstat.Gc.heap_words / 256) in + let xapiactualfree_kib = Int64.of_int (gcstat.Gc.free_words / 256) in + let xapiactuallive_kib = Int64.of_int (gcstat.Gc.live_words / 256) in + [ + (Rrd.Host, Ds.ds_make ~name:"xapi_memory_usage_kib" ~units:"KiB" + ~description:"Total memory allocated used by xapi daemon" + ~value:(Rrd.VT_Int64 xapitotal_kib) ~ty:Rrd.Gauge ~min:0.0 + ~default:true ()); + (Rrd.Host, Ds.ds_make ~name:"xapi_free_memory_kib" ~units:"KiB" + ~description:"Free memory available to the xapi daemon" + ~value:(Rrd.VT_Int64 xapiactualfree_kib) ~ty:Rrd.Gauge ~min:0.0 + ~default:true ()); + (Rrd.Host, Ds.ds_make ~name:"xapi_live_memory_kib" ~units:"KiB" + ~description:"Live memory used by xapi daemon" + ~value:(Rrd.VT_Int64 xapiactuallive_kib) ~ty:Rrd.Gauge ~min:0.0 + ~default:true ()); + (Rrd.Host, Ds.ds_make ~name:"xapi_allocation_kib" ~units:"KiB" + ~description:"Memory allocation done by the xapi daemon" + ~value:(Rrd.VT_Float xapigrad_kib) ~ty:Rrd.Derive ~min:0.0 + ~default:true ()); + ] let generate_other_stats () = - let open_fds = - Utils.list_directory_entries_unsafe "/proc/self/fd" - |> List.length - |> Int64.of_int - in - let open_fds_ds = - Rrd.Host, - Ds.ds_make - ~name:"xapi_open_fds" - ~description:"Number of open file descriptors held by xapi" - ~value:(Rrd.VT_Int64 open_fds) - ~ty:Rrd.Gauge - ~default:true - ~min:0.0 - ~units:"file descriptors" - () - in - [open_fds_ds] + let open_fds = + Utils.list_directory_entries_unsafe "/proc/self/fd" + |> List.length + |> Int64.of_int + in + let open_fds_ds = + Rrd.Host, + Ds.ds_make + ~name:"xapi_open_fds" + ~description:"Number of open file descriptors held by xapi" + ~value:(Rrd.VT_Int64 open_fds) + ~ty:Rrd.Gauge + ~default:true + ~min:0.0 + ~units:"file descriptors" + () + in + [open_fds_ds] let generate_stats ~__context ~master = - let master_only_stats = - if master - then generate_master_stats ~__context - else [] - in - let gc_stats = generate_gc_stats () in - let other_stats = generate_other_stats () in - List.fold_left - (fun acc stats -> List.rev_append acc stats) - [] - [master_only_stats; gc_stats; other_stats] + let master_only_stats = + if master + then generate_master_stats ~__context + else [] + in + let gc_stats = generate_gc_stats () in + let other_stats = generate_other_stats () in + List.fold_left + (fun acc stats -> List.rev_append acc stats) + [] + [master_only_stats; gc_stats; other_stats] let reporter_cache : Reporter.t option ref = ref None let reporter_m = Mutex.create () @@ -140,30 +140,30 @@ let reporter_m = Mutex.create () let shared_page_count = 1 let start () = - let __context = Context.make "xapi_stats" in - let master = (Pool_role.is_master ()) in - Stdext.Threadext.Mutex.execute reporter_m - (fun () -> - match !reporter_cache with - | Some _ -> () - | None -> - let reporter = - Reporter.start_async - (module D : Debug.DEBUG) - ~uid:"xapi-stats" - ~neg_shift:0.5 - ~target:(Reporter.Local shared_page_count) - ~protocol:Rrd_interface.V2 - ~dss_f:(fun () -> generate_stats ~__context ~master) - in - reporter_cache := (Some reporter)) + let __context = Context.make "xapi_stats" in + let master = (Pool_role.is_master ()) in + Stdext.Threadext.Mutex.execute reporter_m + (fun () -> + match !reporter_cache with + | Some _ -> () + | None -> + let reporter = + Reporter.start_async + (module D : Debug.DEBUG) + ~uid:"xapi-stats" + ~neg_shift:0.5 + ~target:(Reporter.Local shared_page_count) + ~protocol:Rrd_interface.V2 + ~dss_f:(fun () -> generate_stats ~__context ~master) + in + reporter_cache := (Some reporter)) let stop () = - Stdext.Threadext.Mutex.execute reporter_m - (fun () -> - match !reporter_cache with - | None -> () - | Some reporter -> begin - Reporter.cancel reporter; - reporter_cache := None - end) + Stdext.Threadext.Mutex.execute reporter_m + (fun () -> + match !reporter_cache with + | None -> () + | Some reporter -> begin + Reporter.cancel reporter; + reporter_cache := None + end) diff --git a/ocaml/xapi/xapi_subject.ml b/ocaml/xapi/xapi_subject.ml index 7bc7688c7d4..94721e07f11 100644 --- a/ocaml/xapi/xapi_subject.ml +++ b/ocaml/xapi/xapi_subject.ml @@ -13,229 +13,229 @@ *) (** Module that defines API functions for Subject objects * @group XenAPI functions - *) - +*) + module D = Debug.Make(struct let name="xapi_subject" end) open D let run_hook_script_after_subject_add () = - (* CP-825: Serialize execution of pool-enable-extauth and pool-disable-extauth *) - (* We should not call the hook script while enabling/disabling the pool's extauth, since that will *) - (* potentially create different sshd configuration files in different hosts of the pool. *) - Stdext.Threadext.Mutex.execute Xapi_globs.serialize_pool_enable_disable_extauth (fun () -> - ignore (Server_helpers.exec_with_new_task "run_hook_script_after_subject_add" - (fun __context -> - Extauth.call_extauth_hook_script_in_pool ~__context Extauth.event_name_after_subject_add - ) - ) - ) + (* CP-825: Serialize execution of pool-enable-extauth and pool-disable-extauth *) + (* We should not call the hook script while enabling/disabling the pool's extauth, since that will *) + (* potentially create different sshd configuration files in different hosts of the pool. *) + Stdext.Threadext.Mutex.execute Xapi_globs.serialize_pool_enable_disable_extauth (fun () -> + ignore (Server_helpers.exec_with_new_task "run_hook_script_after_subject_add" + (fun __context -> + Extauth.call_extauth_hook_script_in_pool ~__context Extauth.event_name_after_subject_add + ) + ) + ) let asynchronously_run_hook_script_after_subject_add = - At_least_once_more.make "running after-subject-add hook script" run_hook_script_after_subject_add + At_least_once_more.make "running after-subject-add hook script" run_hook_script_after_subject_add let create ~__context ~subject_identifier ~other_config = - (* If at least one of the hosts uses AD external auth, then assert that the AD feature is enabled *) - let hosts = Db.Host.get_all ~__context in - let auth_types = List.map (fun self -> Db.Host.get_external_auth_type ~__context ~self) hosts in - if List.exists (fun x -> x = Extauth.auth_type_AD_Likewise) auth_types then - Pool_features.assert_enabled ~__context ~f:Features.AD; - - (* we need to find if subject is already in the pool *) - let subjects = Db.Subject.get_all_records ~__context in - if List.exists (fun (subj,record) -> (* visits each subject in the table o(n) *) - let subject_id_in_db = record.API.subject_subject_identifier in - (subject_identifier = subject_id_in_db) (* is it the subject we are looking for? *) - ) subjects - then - begin - (* we found an already existing user with the same subject identifier. *) - (* we should not add another one with the same subject id *) - debug "subject-id %s already exists in pool" subject_identifier; - raise (Api_errors.Server_error(Api_errors.subject_already_exists, [])) - end - else - (* + (* If at least one of the hosts uses AD external auth, then assert that the AD feature is enabled *) + let hosts = Db.Host.get_all ~__context in + let auth_types = List.map (fun self -> Db.Host.get_external_auth_type ~__context ~self) hosts in + if List.exists (fun x -> x = Extauth.auth_type_AD_Likewise) auth_types then + Pool_features.assert_enabled ~__context ~f:Features.AD; + + (* we need to find if subject is already in the pool *) + let subjects = Db.Subject.get_all_records ~__context in + if List.exists (fun (subj,record) -> (* visits each subject in the table o(n) *) + let subject_id_in_db = record.API.subject_subject_identifier in + (subject_identifier = subject_id_in_db) (* is it the subject we are looking for? *) + ) subjects + then + begin + (* we found an already existing user with the same subject identifier. *) + (* we should not add another one with the same subject id *) + debug "subject-id %s already exists in pool" subject_identifier; + raise (Api_errors.Server_error(Api_errors.subject_already_exists, [])) + end + else + (* (* one of other_config's fields MUST be 'subject_name' (see interface requirement: ocaml/auth/auth_signature.ml) *) (* any other name-value pair is optional *) - if not (List.mem_assoc "subject_name" other_config) + if not (List.mem_assoc "subject_name" other_config) then let msg = "" (*(String.concat " " (List.map (fun (a,b) -> Printf.sprintf "(%s:%s)" a b) other_config))*) in raise (Api_errors.Server_error(Api_errors.subject_name_not_provided, [])) else *) - (* add the new subject to the db *) - let ref=Ref.make() in - let uuid=Uuid.to_string (Uuid.make_uuid()) in - - (* CP-1224: Free Edition: Newly created subjects will have the Pool Administrator role. *) - (* CP-1224: Paid-for Edition: Newly created subjects will have an empty role. *) - let default_roles = - if (Pool_features.is_enabled ~__context Features.RBAC) - then (* paid-for edition: we can only create a subject with no roles*) - [] - else (*free edition: one fixed role of pool-admin only*) - Rbac_static.get_refs [Rbac_static.role_pool_admin] - in - - Db.Subject.create ~__context ~ref ~uuid ~subject_identifier ~other_config - ~roles:default_roles; - - (* CP-709: call extauth hook-script after subject.add *) - (* we fork this call in a new thread so that subject.add *) - (* does not have to wait for the script to finish in all hosts of the pool *) - (* optimization to minimize number of concurrent runs of idempotent functions *) - At_least_once_more.again asynchronously_run_hook_script_after_subject_add; - - ref - + (* add the new subject to the db *) + let ref=Ref.make() in + let uuid=Uuid.to_string (Uuid.make_uuid()) in + + (* CP-1224: Free Edition: Newly created subjects will have the Pool Administrator role. *) + (* CP-1224: Paid-for Edition: Newly created subjects will have an empty role. *) + let default_roles = + if (Pool_features.is_enabled ~__context Features.RBAC) + then (* paid-for edition: we can only create a subject with no roles*) + [] + else (*free edition: one fixed role of pool-admin only*) + Rbac_static.get_refs [Rbac_static.role_pool_admin] + in + + Db.Subject.create ~__context ~ref ~uuid ~subject_identifier ~other_config + ~roles:default_roles; + + (* CP-709: call extauth hook-script after subject.add *) + (* we fork this call in a new thread so that subject.add *) + (* does not have to wait for the script to finish in all hosts of the pool *) + (* optimization to minimize number of concurrent runs of idempotent functions *) + At_least_once_more.again asynchronously_run_hook_script_after_subject_add; + + ref + let run_hook_script_after_subject_remove () = - (* CP-825: Serialize execution of pool-enable-extauth and pool-disable-extauth *) - (* We should not call the hook script while enabling/disabling the pool's extauth, since that will *) - (* potentially create different sshd configuration files in different hosts of the pool. *) - Stdext.Threadext.Mutex.execute Xapi_globs.serialize_pool_enable_disable_extauth (fun () -> - ignore (Server_helpers.exec_with_new_task "run_hook_script_after_subject_remove" - (fun __context -> - Extauth.call_extauth_hook_script_in_pool ~__context Extauth.event_name_after_subject_remove - ) - ) - ) + (* CP-825: Serialize execution of pool-enable-extauth and pool-disable-extauth *) + (* We should not call the hook script while enabling/disabling the pool's extauth, since that will *) + (* potentially create different sshd configuration files in different hosts of the pool. *) + Stdext.Threadext.Mutex.execute Xapi_globs.serialize_pool_enable_disable_extauth (fun () -> + ignore (Server_helpers.exec_with_new_task "run_hook_script_after_subject_remove" + (fun __context -> + Extauth.call_extauth_hook_script_in_pool ~__context Extauth.event_name_after_subject_remove + ) + ) + ) let asynchronously_run_hook_script_after_subject_remove = - At_least_once_more.make "running after-subject-remove hook script" run_hook_script_after_subject_remove - -let destroy ~__context ~self = - - Db.Subject.destroy ~__context ~self; - - (* CP-709: call extauth hook-script after subject.remove *) - (* we fork this call in a new thread so that subject.add *) - (* does not have to wait for the script to finish in all hosts of the pool *) - (* optimization to minimize number of concurrent runs of idempotent functions *) - At_least_once_more.again asynchronously_run_hook_script_after_subject_remove + At_least_once_more.make "running after-subject-remove hook script" run_hook_script_after_subject_remove + +let destroy ~__context ~self = + + Db.Subject.destroy ~__context ~self; + + (* CP-709: call extauth hook-script after subject.remove *) + (* we fork this call in a new thread so that subject.add *) + (* does not have to wait for the script to finish in all hosts of the pool *) + (* optimization to minimize number of concurrent runs of idempotent functions *) + At_least_once_more.again asynchronously_run_hook_script_after_subject_remove let update ~__context ~self = - let subject_identifier = Db.Subject.get_subject_identifier ~__context ~self in - (* query external directory service *) - (* this might raise an exception *) - let subject_info = Xapi_auth.get_subject_information_from_identifier ~__context ~subject_identifier in - (* update locally the fresh information received from external directory service *) - Db.Subject.set_other_config ~__context ~self ~value:subject_info + let subject_identifier = Db.Subject.get_subject_identifier ~__context ~self in + (* query external directory service *) + (* this might raise an exception *) + let subject_info = Xapi_auth.get_subject_information_from_identifier ~__context ~subject_identifier in + (* update locally the fresh information received from external directory service *) + Db.Subject.set_other_config ~__context ~self ~value:subject_info let update_all_subjects ~__context = - (* checks if external authentication is enabled, otherwise it's useless to try to do the update *) - let host = Helpers.get_localhost ~__context in - let auth_type = Db.Host.get_external_auth_type ~__context ~self:host in - if auth_type = "" - then begin (* external authentication is disabled *) - (*debug "External authentication is disabled during update_all_subjects";*) - end - else (* external authentication is enabled *) - let subjects = Db.Subject.get_all ~__context in - (* visits each subject in the table o(n) *) - List.iter (fun subj -> - (* uses a best-effort attempt to update the subject information *) - (* therefore, if an exception was raised, just ignore it *) - try - update ~__context ~self:subj - with - | e -> begin - debug "Error trying to update subject %s: %s" - (Db.Subject.get_subject_identifier ~__context ~self:subj) - (ExnHelper.string_of_exn e) - (* ignore this exception e, do not raise it again *) - end - ) subjects + (* checks if external authentication is enabled, otherwise it's useless to try to do the update *) + let host = Helpers.get_localhost ~__context in + let auth_type = Db.Host.get_external_auth_type ~__context ~self:host in + if auth_type = "" + then begin (* external authentication is disabled *) + (*debug "External authentication is disabled during update_all_subjects";*) + end + else (* external authentication is enabled *) + let subjects = Db.Subject.get_all ~__context in + (* visits each subject in the table o(n) *) + List.iter (fun subj -> + (* uses a best-effort attempt to update the subject information *) + (* therefore, if an exception was raised, just ignore it *) + try + update ~__context ~self:subj + with + | e -> begin + debug "Error trying to update subject %s: %s" + (Db.Subject.get_subject_identifier ~__context ~self:subj) + (ExnHelper.string_of_exn e) + (* ignore this exception e, do not raise it again *) + end + ) subjects (* This function returns all permissions associated with a subject *) let get_permissions_name_label ~__context ~self = - (* for each role in subject.roles: - fold get_all_permissions ~__context ~role - setify - *) - Stdext.Listext.List.setify - (List.fold_left - (fun accu role -> - List.rev_append - (Xapi_role.get_permissions_name_label ~__context ~self:role) - accu - ) - [] - (Db.Subject.get_roles ~__context ~self) - ) + (* for each role in subject.roles: + fold get_all_permissions ~__context ~role + setify + *) + Stdext.Listext.List.setify + (List.fold_left + (fun accu role -> + List.rev_append + (Xapi_role.get_permissions_name_label ~__context ~self:role) + accu + ) + [] + (Db.Subject.get_roles ~__context ~self) + ) let run_hook_script_after_subject_roles_update () = - (* CP-825: Serialize execution of pool-enable-extauth and pool-disable-extauth *) - (* We should not call the hook script while enabling/disabling the pool's extauth, since that will *) - (* potentially create different sshd configuration files in different hosts of the pool. *) - Stdext.Threadext.Mutex.execute Xapi_globs.serialize_pool_enable_disable_extauth (fun () -> - ignore (Server_helpers.exec_with_new_task "run_hook_script_after_subject_roles_update" - (fun __context -> - Extauth.call_extauth_hook_script_in_pool ~__context Extauth.event_name_after_roles_update - ) - ) - ) + (* CP-825: Serialize execution of pool-enable-extauth and pool-disable-extauth *) + (* We should not call the hook script while enabling/disabling the pool's extauth, since that will *) + (* potentially create different sshd configuration files in different hosts of the pool. *) + Stdext.Threadext.Mutex.execute Xapi_globs.serialize_pool_enable_disable_extauth (fun () -> + ignore (Server_helpers.exec_with_new_task "run_hook_script_after_subject_roles_update" + (fun __context -> + Extauth.call_extauth_hook_script_in_pool ~__context Extauth.event_name_after_roles_update + ) + ) + ) let asynchronously_run_hook_script_after_subject_roles_update = - At_least_once_more.make - "running after-subject-roles-update hook script" - run_hook_script_after_subject_roles_update + At_least_once_more.make + "running after-subject-roles-update hook script" + run_hook_script_after_subject_roles_update let add_to_roles ~__context ~self ~role = - - (* CP-1224: Free Edition: Attempts to add or remove roles *) - (* will fail with a LICENSE_RESTRICTION error.*) - Pool_features.assert_enabled ~__context ~f:Features.RBAC; - - if (Xapi_role.is_valid_role ~__context ~role) - then - begin - if (List.mem role (Db.Subject.get_roles ~__context ~self)) - then - begin - debug "subject %s already has role %s" - (Db.Subject.get_subject_identifier - ~__context - ~self - ) - (Ref.string_of role); - raise (Api_errors.Server_error - (Api_errors.role_already_exists, [])) - end - else - begin - Db.Subject.add_roles ~__context ~self ~value:role; - (* CP-710: call extauth hook-script after subject.add_roles *) - At_least_once_more.again - asynchronously_run_hook_script_after_subject_roles_update - end - end - else - begin - debug "role %s is not valid" (Ref.string_of role); - raise (Api_errors.Server_error(Api_errors.role_not_found, [])) - end + + (* CP-1224: Free Edition: Attempts to add or remove roles *) + (* will fail with a LICENSE_RESTRICTION error.*) + Pool_features.assert_enabled ~__context ~f:Features.RBAC; + + if (Xapi_role.is_valid_role ~__context ~role) + then + begin + if (List.mem role (Db.Subject.get_roles ~__context ~self)) + then + begin + debug "subject %s already has role %s" + (Db.Subject.get_subject_identifier + ~__context + ~self + ) + (Ref.string_of role); + raise (Api_errors.Server_error + (Api_errors.role_already_exists, [])) + end + else + begin + Db.Subject.add_roles ~__context ~self ~value:role; + (* CP-710: call extauth hook-script after subject.add_roles *) + At_least_once_more.again + asynchronously_run_hook_script_after_subject_roles_update + end + end + else + begin + debug "role %s is not valid" (Ref.string_of role); + raise (Api_errors.Server_error(Api_errors.role_not_found, [])) + end let remove_from_roles ~__context ~self ~role = - (* CP-1224: Free Edition: Attempts to add or remove roles *) - (* will fail with a LICENSE_RESTRICTION error.*) - Pool_features.assert_enabled ~__context ~f:Features.RBAC; - - if (List.mem role (Db.Subject.get_roles ~__context ~self)) - then - begin - Db.Subject.remove_roles ~__context ~self ~value:role; - (* CP-710: call extauth hook-script after subject.remove_roles *) - At_least_once_more.again - asynchronously_run_hook_script_after_subject_roles_update - end - else - begin - debug "subject %s does not have role %s" - (Db.Subject.get_subject_identifier - ~__context - ~self - ) - (Ref.string_of role); - raise (Api_errors.Server_error (Api_errors.role_not_found, [])) - end + (* CP-1224: Free Edition: Attempts to add or remove roles *) + (* will fail with a LICENSE_RESTRICTION error.*) + Pool_features.assert_enabled ~__context ~f:Features.RBAC; + + if (List.mem role (Db.Subject.get_roles ~__context ~self)) + then + begin + Db.Subject.remove_roles ~__context ~self ~value:role; + (* CP-710: call extauth hook-script after subject.remove_roles *) + At_least_once_more.again + asynchronously_run_hook_script_after_subject_roles_update + end + else + begin + debug "subject %s does not have role %s" + (Db.Subject.get_subject_identifier + ~__context + ~self + ) + (Ref.string_of role); + raise (Api_errors.Server_error (Api_errors.role_not_found, [])) + end diff --git a/ocaml/xapi/xapi_support.ml b/ocaml/xapi/xapi_support.ml index ae05beedaf8..7032153b8c9 100644 --- a/ocaml/xapi/xapi_support.ml +++ b/ocaml/xapi/xapi_support.ml @@ -18,22 +18,22 @@ let support_url = "ftp://support.xensource.com/uploads/" (* URL to which the crashdump/whatever will be uploaded *) let upload_url name = - let uuid = Xapi_inventory.lookup Xapi_inventory._installation_uuid in - Printf.sprintf "%s%s-%s" support_url uuid name + let uuid = Xapi_inventory.lookup Xapi_inventory._installation_uuid in + Printf.sprintf "%s%s-%s" support_url uuid name open Forkhelpers let do_upload label file url options = - let proxy = - if List.mem_assoc "http_proxy" options - then List.assoc "http_proxy" options - else try Unix.getenv "http_proxy" with _ -> "" in + let proxy = + if List.mem_assoc "http_proxy" options + then List.assoc "http_proxy" options + else try Unix.getenv "http_proxy" with _ -> "" in - match with_logfile_fd label - (fun log_fd -> - let pid = safe_close_and_exec None (Some log_fd) (Some log_fd) [] !Xapi_globs.upload_wrapper [file; url; proxy] in - waitpid_fail_if_bad_exit pid) with - | Success _ -> debug "Upload succeeded" - | Failure (log, exn) -> - debug "Upload failed, output: %s" log; - raise exn + match with_logfile_fd label + (fun log_fd -> + let pid = safe_close_and_exec None (Some log_fd) (Some log_fd) [] !Xapi_globs.upload_wrapper [file; url; proxy] in + waitpid_fail_if_bad_exit pid) with + | Success _ -> debug "Upload succeeded" + | Failure (log, exn) -> + debug "Upload failed, output: %s" log; + raise exn diff --git a/ocaml/xapi/xapi_sync.ml b/ocaml/xapi/xapi_sync.ml index 5b26ba95899..a01a12afc60 100644 --- a/ocaml/xapi/xapi_sync.ml +++ b/ocaml/xapi/xapi_sync.ml @@ -22,57 +22,57 @@ open Threadext let sync_lock = Mutex.create () let sync_host ~__context host = - Mutex.execute sync_lock (fun () -> - try - let localhost = host = !Xapi_globs.localhost_ref - and host_has_storage = not (List.mem_assoc Xapi_globs.host_no_local_storage (Db.Host.get_other_config ~__context ~self:host)) in + Mutex.execute sync_lock (fun () -> + try + let localhost = host = !Xapi_globs.localhost_ref + and host_has_storage = not (List.mem_assoc Xapi_globs.host_no_local_storage (Db.Host.get_other_config ~__context ~self:host)) in - if (not localhost) && host_has_storage then begin - let address = Db.Host.get_address ~__context ~self:host in - debug "Beginning sync with host at address: %s" address; + if (not localhost) && host_has_storage then begin + let address = Db.Host.get_address ~__context ~self:host in + debug "Beginning sync with host at address: %s" address; - let localpath = Printf.sprintf "%s/" Xapi_globs.xapi_blob_location - and remotepath = Printf.sprintf "%s:%s" address Xapi_globs.xapi_blob_location - and session = Xapi_session.slave_login ~__context ~host:(Helpers.get_localhost ~__context) ~psecret:!Xapi_globs.pool_secret in - Unix.putenv "XSH_SESSION" (Ref.string_of session); - Unix.putenv "XSH_SSL_LEGACY" (string_of_bool (Db.Host.get_ssl_legacy ~__context ~self:host)); - (match !Xapi_globs.ciphersuites_good_outbound with - | Some c -> Unix.putenv "XSH_GOOD_CIPHERSUITES" c - | None -> raise (Api_errors.Server_error (Api_errors.internal_error,["Xapi_sync found no good ciphersuites in Xapi_globs."])) - ); - Unix.putenv "XSH_LEGACY_CIPHERSUITES" !Xapi_globs.ciphersuites_legacy_outbound; + let localpath = Printf.sprintf "%s/" Xapi_globs.xapi_blob_location + and remotepath = Printf.sprintf "%s:%s" address Xapi_globs.xapi_blob_location + and session = Xapi_session.slave_login ~__context ~host:(Helpers.get_localhost ~__context) ~psecret:!Xapi_globs.pool_secret in + Unix.putenv "XSH_SESSION" (Ref.string_of session); + Unix.putenv "XSH_SSL_LEGACY" (string_of_bool (Db.Host.get_ssl_legacy ~__context ~self:host)); + (match !Xapi_globs.ciphersuites_good_outbound with + | Some c -> Unix.putenv "XSH_GOOD_CIPHERSUITES" c + | None -> raise (Api_errors.Server_error (Api_errors.internal_error,["Xapi_sync found no good ciphersuites in Xapi_globs."])) + ); + Unix.putenv "XSH_LEGACY_CIPHERSUITES" !Xapi_globs.ciphersuites_legacy_outbound; - let output,log = Forkhelpers.execute_command_get_output - ~env:(Unix.environment ()) - "/usr/bin/rsync" - ["--delete";"--stats";"-az";localpath;remotepath;"-e"; !Xapi_globs.xsh] in - debug "sync output: \n%s" output; - debug "log output: '%s'" log; + let output,log = Forkhelpers.execute_command_get_output + ~env:(Unix.environment ()) + "/usr/bin/rsync" + ["--delete";"--stats";"-az";localpath;remotepath;"-e"; !Xapi_globs.xsh] in + debug "sync output: \n%s" output; + debug "log output: '%s'" log; - (* Store the last blob sync time in the Host.other_config *) - (try Db.Host.remove_from_other_config ~__context ~self:host ~key:Xapi_globs.last_blob_sync_time with _ -> ()); - Db.Host.add_to_other_config ~__context ~self:host ~key:Xapi_globs.last_blob_sync_time ~value:(string_of_float (Unix.gettimeofday ())); - end + (* Store the last blob sync time in the Host.other_config *) + (try Db.Host.remove_from_other_config ~__context ~self:host ~key:Xapi_globs.last_blob_sync_time with _ -> ()); + Db.Host.add_to_other_config ~__context ~self:host ~key:Xapi_globs.last_blob_sync_time ~value:(string_of_float (Unix.gettimeofday ())); + end - else begin - debug "Ignoring host synchronise: localhost=%b host_has_storage=%b" localhost host_has_storage - end; + else begin + debug "Ignoring host synchronise: localhost=%b host_has_storage=%b" localhost host_has_storage + end; - with Forkhelpers.Spawn_internal_error(log,output,status) -> - (* Do we think the host is supposed to be online? *) - let online = - try - let m = Db.Host.get_metrics ~__context ~self:host in - Db.Host_metrics.get_live ~__context ~self:m - with _ -> false in + with Forkhelpers.Spawn_internal_error(log,output,status) -> + (* Do we think the host is supposed to be online? *) + let online = + try + let m = Db.Host.get_metrics ~__context ~self:host in + Db.Host_metrics.get_live ~__context ~self:m + with _ -> false in - (* In rolling upgrade mode we would also expect a failure *) - let rolling_upgrade = Helpers.rolling_upgrade_in_progress ~__context in - if online && not rolling_upgrade - then error "Unexpected failure synchronising blobs to host %s; log='%s'; output='%s'" (Ref.string_of host) log output; - ) + (* In rolling upgrade mode we would also expect a failure *) + let rolling_upgrade = Helpers.rolling_upgrade_in_progress ~__context in + if online && not rolling_upgrade + then error "Unexpected failure synchronising blobs to host %s; log='%s'; output='%s'" (Ref.string_of host) log output; + ) let do_sync () = - Server_helpers.exec_with_new_task "blob sync" (fun __context -> - let hosts = Db.Host.get_all ~__context in - List.iter (sync_host ~__context) hosts) + Server_helpers.exec_with_new_task "blob sync" (fun __context -> + let hosts = Db.Host.get_all ~__context in + List.iter (sync_host ~__context) hosts) diff --git a/ocaml/xapi/xapi_task.ml b/ocaml/xapi/xapi_task.ml index 1e267c39a8e..02e9c1c8407 100644 --- a/ocaml/xapi/xapi_task.ml +++ b/ocaml/xapi/xapi_task.ml @@ -13,7 +13,7 @@ *) (** Module that defines API functions for Task objects * @group XenAPI functions - *) +*) module D = Debug.Make(struct let name="xapi" end) open D @@ -21,28 +21,28 @@ open D let get_allowed_messages ~__context ~self = [] let create ~__context ~label ~description = - (* This call will have a dummy task ID already but we need to make a fresh one *) + (* This call will have a dummy task ID already but we need to make a fresh one *) let subtask_of = Context.get_task_id __context in let session_id = try Some (Context.get_session_id __context) with _->None in - let c = Context.make ?session_id ~task_description:description ~subtask_of ~task_in_database:true label in - let t = Context.get_task_id c in + let c = Context.make ?session_id ~task_description:description ~subtask_of ~task_in_database:true label in + let t = Context.get_task_id c in (*info "Task.create ref = %s; label = %s" (Ref.string_of t) label;*) - t + t let destroy ~__context ~self = TaskHelper.assert_can_destroy ~__context self; - if TaskHelper.status_is_completed (Db.Task.get_status ~__context ~self) + if TaskHelper.status_is_completed (Db.Task.get_status ~__context ~self) then Db.Task.destroy ~__context ~self else Db.Task.add_to_current_operations ~__context ~self ~key:"task" ~value:`destroy let cancel ~__context ~task = - let localhost = Helpers.get_localhost ~__context in - let forwarded_to = Db.Task.get_forwarded_to ~__context ~self:task in - if Db.is_valid_ref __context forwarded_to && (localhost <> forwarded_to) - then failwith (Printf.sprintf "Task.cancel not forwarded to the correct host (expecting %s but this is %s)" - (Db.Host.get_hostname ~__context ~self:forwarded_to) (Db.Host.get_hostname ~__context ~self:localhost) - ); - TaskHelper.assert_can_destroy ~__context task; - Db.Task.set_current_operations ~__context ~self:task ~value:[(Ref.string_of (Context.get_task_id __context)), `cancel]; - if not(Xapi_xenops.task_cancel ~__context ~self:task) && not(Storage_access.task_cancel ~__context ~self:task) - then info "Task.cancel is falling back to polling" + let localhost = Helpers.get_localhost ~__context in + let forwarded_to = Db.Task.get_forwarded_to ~__context ~self:task in + if Db.is_valid_ref __context forwarded_to && (localhost <> forwarded_to) + then failwith (Printf.sprintf "Task.cancel not forwarded to the correct host (expecting %s but this is %s)" + (Db.Host.get_hostname ~__context ~self:forwarded_to) (Db.Host.get_hostname ~__context ~self:localhost) + ); + TaskHelper.assert_can_destroy ~__context task; + Db.Task.set_current_operations ~__context ~self:task ~value:[(Ref.string_of (Context.get_task_id __context)), `cancel]; + if not(Xapi_xenops.task_cancel ~__context ~self:task) && not(Storage_access.task_cancel ~__context ~self:task) + then info "Task.cancel is falling back to polling" diff --git a/ocaml/xapi/xapi_templates.ml b/ocaml/xapi/xapi_templates.ml index 719a80982f6..fe9b2cbd3fd 100644 --- a/ocaml/xapi/xapi_templates.ml +++ b/ocaml/xapi/xapi_templates.ml @@ -13,13 +13,13 @@ *) (** * @group Virtual-Machine Management - *) +*) (** Here we define a template to be a VM with 'is_a_template = true' which, - when initially booted after having been cloned, inspects its own - configuration (stored by the UI/CLI in VM.other_config) and uses the API - to provision disks, make filesystems, perform any install steps and then - leave the VM in a state such that it comes up properly on subsequent reboots. *) + when initially booted after having been cloned, inspects its own + configuration (stored by the UI/CLI in VM.other_config) and uses the API + to provision disks, make filesystems, perform any install steps and then + leave the VM in a state such that it comes up properly on subsequent reboots. *) (** Should make a dummy one of these for in-guest installers: *) module D = Debug.Make(struct let name="xapi" end) @@ -27,43 +27,43 @@ open D (** A record which describes a disk provision request *) type disk = { device: string; (** device inside the guest eg xvda *) - size: int64; (** size in bytes *) - sr: string; (** name or UUID of the SR in which to make the disk *) - bootable: bool; - _type: API.vdi_type - } + size: int64; (** size in bytes *) + sr: string; (** name or UUID of the SR in which to make the disk *) + bootable: bool; + _type: API.vdi_type + } (** A record which describes the template *) type template = { disks: disk list; - post_install_script: string option } + post_install_script: string option } (** The disk records are marshalled as XML *) open Xml let string2vdi_type s = match s with - "system" -> `system - | "user" -> `user - | "ephemeral" -> `ephemeral - | "suspend" -> `suspend - | "crashdump" -> `crashdump - | _ -> assert false + "system" -> `system + | "user" -> `user + | "ephemeral" -> `ephemeral + | "suspend" -> `suspend + | "crashdump" -> `crashdump + | _ -> assert false exception Parse_failure let disk_of_xml = function | Element("disk", params, []) -> - begin - try - let device = List.assoc "device" params - and size = List.assoc "size" params - and sr = List.assoc "sr" params - and bootable = List.assoc "bootable" params - and _type = try string2vdi_type (List.assoc "type" params) with _ -> `system - in - { device = device; size = Int64.of_string size; sr = sr; - bootable = (bootable = "true"); _type = _type} - with _ -> raise Parse_failure - end + begin + try + let device = List.assoc "device" params + and size = List.assoc "size" params + and sr = List.assoc "sr" params + and bootable = List.assoc "bootable" params + and _type = try string2vdi_type (List.assoc "type" params) with _ -> `system + in + { device = device; size = Int64.of_string size; sr = sr; + bootable = (bootable = "true"); _type = _type} + with _ -> raise Parse_failure + end | _ -> raise Parse_failure let disks_of_xml = function | Element("provision", [], disks) -> List.map disk_of_xml disks @@ -78,7 +78,7 @@ let post_install_key = "postinstall" open Client (** From a VM reference, return an 'install' record option. *) -let get_template_record rpc session_id vm = +let get_template_record rpc session_id vm = let other_config = Client.VM.get_other_config rpc session_id vm in let disks = if List.mem_assoc disks_key other_config then disks_of_xml (Xml.parse_string (List.assoc disks_key other_config)) else [] in @@ -86,35 +86,35 @@ let get_template_record rpc session_id vm = then Some (List.assoc post_install_key other_config) else None in if disks = [] && script = None then None - else Some { disks = disks; post_install_script = script } + else Some { disks = disks; post_install_script = script } (** A special bootloader which takes care of the initial boot -- fakeserver only *) let bootloader = "installer" (** Returns true if the given VM is actually a template and must be pre-installed *) -let needs_to_be_installed rpc session_id vm = +let needs_to_be_installed rpc session_id vm = get_template_record rpc session_id vm <> None (** For a VM and a disk record, create a VDI, VBD and return the VBD. Pass in the logging functions to avoid having to link this module against the log library. Hopefully we can link this code directly into the in-guest installer. *) -let create_disk rpc session_id vm sm_config disk = - let sr = +let create_disk rpc session_id vm sm_config disk = + let sr = try - Client.SR.get_by_uuid rpc session_id disk.sr + Client.SR.get_by_uuid rpc session_id disk.sr with _ -> D.error "Unable to find SR (uuid: %s) to provision the disk" disk.sr; raise (Api_errors.Server_error (Api_errors.uuid_invalid, ["sr"; disk.sr ])) in debug "Provisioning VDI for new VM"; let vdi = Client.VDI.create ~rpc ~session_id - ~name_label:"" ~name_description:"Created by template provisioner" - ~sR:sr ~virtual_size:disk.size - ~_type:disk._type ~sharable:false ~read_only:false ~other_config:[] ~xenstore_data:[] ~sm_config ~tags:[] in + ~name_label:"" ~name_description:"Created by template provisioner" + ~sR:sr ~virtual_size:disk.size + ~_type:disk._type ~sharable:false ~read_only:false ~other_config:[] ~xenstore_data:[] ~sm_config ~tags:[] in let vbd_ref = Client.VBD.create ~rpc ~session_id - ~vM:vm ~vDI:vdi ~userdevice:disk.device ~bootable:disk.bootable ~mode:`RW ~_type:`Disk - ~unpluggable:(disk._type <> `system) - ~empty:false ~qos_algorithm_type:"" ~qos_algorithm_params:[] ~other_config:[Xapi_globs.owner_key,""] in + ~vM:vm ~vDI:vdi ~userdevice:disk.device ~bootable:disk.bootable ~mode:`RW ~_type:`Disk + ~unpluggable:(disk._type <> `system) + ~empty:false ~qos_algorithm_type:"" ~qos_algorithm_params:[] ~other_config:[Xapi_globs.owner_key,""] in let device=Client.VBD.get_userdevice ~rpc ~session_id ~self:vbd_ref in Client.VDI.set_name_label ~rpc ~session_id ~self:vdi ~value:device; vbd_ref @@ -122,7 +122,7 @@ let create_disk rpc session_id vm sm_config disk = (** For a given VM, if it needs to be installed, create each disk and return the optional post-install script and a list of created VBDs *) -let pre_install rpc session_id vm = +let pre_install rpc session_id vm = debug "Performing pre_install actions (ie creating disks)"; (* driver params for each call - vmhint and epochhint for netapp *) let vmuuid = Client.VM.get_uuid rpc session_id vm in @@ -130,15 +130,15 @@ let pre_install rpc session_id vm = [ Xapi_globs._sm_vm_hint, vmuuid ] in match get_template_record rpc session_id vm with | Some { disks = disks; post_install_script = script } -> - let vbds = List.map (create_disk rpc session_id vm sm_config) disks in - script, vbds + let vbds = List.map (create_disk rpc session_id vm sm_config) disks in + script, vbds | None -> - None, [] + None, [] (** For a given VM, perform post-install tidy-up (ie remove keys from other_config which would cause the template to be installed twice) *) -let post_install rpc session_id vm = +let post_install rpc session_id vm = debug "Performing post_install actions (ie removing template information from VM)"; (try Client.VM.remove_from_other_config rpc session_id vm disks_key with _ -> ()); (try Client.VM.remove_from_other_config rpc session_id vm post_install_key with _ -> ()) - + diff --git a/ocaml/xapi/xapi_templates_install.ml b/ocaml/xapi/xapi_templates_install.ml index f18d7abe267..bed4245bd65 100644 --- a/ocaml/xapi/xapi_templates_install.ml +++ b/ocaml/xapi/xapi_templates_install.ml @@ -13,7 +13,7 @@ *) (** * @group Virtual-Machine Management - *) +*) open Stdext open Pervasiveext @@ -35,7 +35,7 @@ let is_whitelisted script = let assert_script_is_whitelisted script = if not (is_whitelisted script) then raise (Api_errors.Server_error (Api_errors.permission_denied, [ - (Printf.sprintf "illegal provision script %s" script)])) + (Printf.sprintf "illegal provision script %s" script)])) (** Execute the post install script of 'vm' having attached all the vbds to the 'install_vm' *) let post_install_script rpc session_id __context install_vm vm (script, vbds) = @@ -47,59 +47,59 @@ let post_install_script rpc session_id __context install_vm vm (script, vbds) = match script with | None -> () (* nothing to do *) | Some script -> - assert_script_is_whitelisted script; - let vdis = List.map (fun self -> Client.VBD.get_VDI rpc session_id self) vbds in - with_vbds rpc session_id __context install_vm vdis `RW - (fun install_vm_vbds -> - let devices = List.map - (fun (install_vm_vbd, vbd) -> - let hvm = Client.VM.get_HVM_boot_policy rpc session_id vm <> "" in - let device = Vbdops.translate_vbd_device vbd (Client.VBD.get_userdevice rpc session_id vbd) hvm in - Device_number.to_linux_device device, - "/dev/" ^ (Client.VBD.get_device rpc session_id install_vm_vbd)) (List.combine install_vm_vbds vbds) in - let env = ("vm", Ref.string_of vm) :: devices in - let env = List.map (fun (k, v) -> k ^ "=" ^ v) env in - debug "Executing script %s with env %s" script (String.concat "; " env); + assert_script_is_whitelisted script; + let vdis = List.map (fun self -> Client.VBD.get_VDI rpc session_id self) vbds in + with_vbds rpc session_id __context install_vm vdis `RW + (fun install_vm_vbds -> + let devices = List.map + (fun (install_vm_vbd, vbd) -> + let hvm = Client.VM.get_HVM_boot_policy rpc session_id vm <> "" in + let device = Vbdops.translate_vbd_device vbd (Client.VBD.get_userdevice rpc session_id vbd) hvm in + Device_number.to_linux_device device, + "/dev/" ^ (Client.VBD.get_device rpc session_id install_vm_vbd)) (List.combine install_vm_vbds vbds) in + let env = ("vm", Ref.string_of vm) :: devices in + let env = List.map (fun (k, v) -> k ^ "=" ^ v) env in + debug "Executing script %s with env %s" script (String.concat "; " env); + + match with_logfile_fd "install-log" + (fun log -> + let pid = safe_close_and_exec ~env:(Array.of_list env) None (Some log) (Some log) [] script [] in + let starttime = Unix.time () in + let rec update_progress () = + (* Check for cancelling *) + if TaskHelper.is_cancelling ~__context + then + begin + Unix.kill (Forkhelpers.getpid pid) Sys.sigterm; + let _ = Forkhelpers.waitpid pid in + TaskHelper.raise_cancelled ~__context + end; + + let (newpid,status) = Forkhelpers.waitpid_nohang pid in + if newpid <> 0 + then + (match status with + | Unix.WEXITED 0 -> (newpid,status) + | (Unix.WEXITED n|Unix.WSIGNALED n|Unix.WSTOPPED n) -> raise (Subprocess_failed n)) + else + begin + Thread.delay 1.0; + refresh_session (); + let curtime = Unix.time () in + let elapsed = curtime -. starttime in + let f x = 0.1 +. (0.9 -. 0.9 *. exp (-. elapsed /. 60.0)) in + let progress = f elapsed in + TaskHelper.set_progress ~__context progress; + update_progress () + end + in update_progress () + ) with + | Success _ -> debug "Install script exited successfully." + | Failure(log, Subprocess_failed n) -> + error "post_install_script failed: message='%s' (assuming this was because the disk was too small)" log; + raise (Api_errors.Server_error (Api_errors.provision_failed_out_of_space, [])) + | Failure(log, exn) -> + raise exn + ) + - match with_logfile_fd "install-log" - (fun log -> - let pid = safe_close_and_exec ~env:(Array.of_list env) None (Some log) (Some log) [] script [] in - let starttime = Unix.time () in - let rec update_progress () = - (* Check for cancelling *) - if TaskHelper.is_cancelling ~__context - then - begin - Unix.kill (Forkhelpers.getpid pid) Sys.sigterm; - let _ = Forkhelpers.waitpid pid in - TaskHelper.raise_cancelled ~__context - end; - - let (newpid,status) = Forkhelpers.waitpid_nohang pid in - if newpid <> 0 - then - (match status with - | Unix.WEXITED 0 -> (newpid,status) - | (Unix.WEXITED n|Unix.WSIGNALED n|Unix.WSTOPPED n) -> raise (Subprocess_failed n)) - else - begin - Thread.delay 1.0; - refresh_session (); - let curtime = Unix.time () in - let elapsed = curtime -. starttime in - let f x = 0.1 +. (0.9 -. 0.9 *. exp (-. elapsed /. 60.0)) in - let progress = f elapsed in - TaskHelper.set_progress ~__context progress; - update_progress () - end - in update_progress () - ) with - | Success _ -> debug "Install script exited successfully." - | Failure(log, Subprocess_failed n) -> - error "post_install_script failed: message='%s' (assuming this was because the disk was too small)" log; - raise (Api_errors.Server_error (Api_errors.provision_failed_out_of_space, [])) - | Failure(log, exn) -> - raise exn - ) - - diff --git a/ocaml/xapi/xapi_tunnel.ml b/ocaml/xapi/xapi_tunnel.ml index f4194fbd0a9..c903d0e3d23 100644 --- a/ocaml/xapi/xapi_tunnel.ml +++ b/ocaml/xapi/xapi_tunnel.ml @@ -11,65 +11,65 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -module D = Debug.Make(struct let name="xapi" end) +module D = Debug.Make(struct let name="xapi" end) open D open Db_filter_types -let choose_tunnel_device_name ~__context ~host = - (* list all the tunnel access PIFs on this host *) - let pifs = Db.PIF.get_refs_where ~__context ~expr:(And ( - Eq (Field "host", Literal (Ref.string_of host)), - Not (Eq (Field "tunnel_access_PIF_of", Literal "()")) - )) in - let devices = List.map (fun self -> Db.PIF.get_device ~__context ~self) pifs in - let rec choose n = - let name = Printf.sprintf "tunnel%d" n in - if List.mem name devices - then choose (n + 1) - else name in - choose 0 +let choose_tunnel_device_name ~__context ~host = + (* list all the tunnel access PIFs on this host *) + let pifs = Db.PIF.get_refs_where ~__context ~expr:(And ( + Eq (Field "host", Literal (Ref.string_of host)), + Not (Eq (Field "tunnel_access_PIF_of", Literal "()")) + )) in + let devices = List.map (fun self -> Db.PIF.get_device ~__context ~self) pifs in + let rec choose n = + let name = Printf.sprintf "tunnel%d" n in + if List.mem name devices + then choose (n + 1) + else name in + choose 0 let create_internal ~__context ~transport_PIF ~network ~host = - let tunnel = Ref.make () in - let access_PIF = Ref.make () in - let device = choose_tunnel_device_name ~__context ~host in - let device_name = device in - let mAC = Xapi_vif_helpers.gen_mac (0, Uuid.to_string (Uuid.make_uuid ())) in - let metrics = Db.PIF.get_metrics ~__context ~self:transport_PIF in - Db.PIF.create ~__context ~ref:access_PIF ~uuid:(Uuid.to_string (Uuid.make_uuid ())) - ~device ~device_name ~network ~host ~mAC ~mTU:(-1L) ~vLAN:(-1L) ~metrics - ~physical:false ~currently_attached:false - ~ip_configuration_mode:`None ~iP:"" ~netmask:"" ~gateway:"" ~dNS:"" ~bond_slave_of:Ref.null - ~vLAN_master_of:Ref.null ~management:false ~other_config:[] ~disallow_unplug:false ~ipv6_configuration_mode:`None - ~iPv6:[""] ~ipv6_gateway:"" ~primary_address_type:`IPv4 ~managed:true ~properties:[] ~capabilities:[]; - Db.Tunnel.create ~__context ~ref:tunnel ~uuid:(Uuid.to_string (Uuid.make_uuid ())) - ~access_PIF ~transport_PIF ~status:["active", "false"] ~other_config:[]; - tunnel, access_PIF + let tunnel = Ref.make () in + let access_PIF = Ref.make () in + let device = choose_tunnel_device_name ~__context ~host in + let device_name = device in + let mAC = Xapi_vif_helpers.gen_mac (0, Uuid.to_string (Uuid.make_uuid ())) in + let metrics = Db.PIF.get_metrics ~__context ~self:transport_PIF in + Db.PIF.create ~__context ~ref:access_PIF ~uuid:(Uuid.to_string (Uuid.make_uuid ())) + ~device ~device_name ~network ~host ~mAC ~mTU:(-1L) ~vLAN:(-1L) ~metrics + ~physical:false ~currently_attached:false + ~ip_configuration_mode:`None ~iP:"" ~netmask:"" ~gateway:"" ~dNS:"" ~bond_slave_of:Ref.null + ~vLAN_master_of:Ref.null ~management:false ~other_config:[] ~disallow_unplug:false ~ipv6_configuration_mode:`None + ~iPv6:[""] ~ipv6_gateway:"" ~primary_address_type:`IPv4 ~managed:true ~properties:[] ~capabilities:[]; + Db.Tunnel.create ~__context ~ref:tunnel ~uuid:(Uuid.to_string (Uuid.make_uuid ())) + ~access_PIF ~transport_PIF ~status:["active", "false"] ~other_config:[]; + tunnel, access_PIF let create ~__context ~transport_PIF ~network = - if Db.PIF.get_managed ~__context ~self:transport_PIF <> true then - raise (Api_errors.Server_error (Api_errors.pif_unmanaged, [Ref.string_of transport_PIF])); - if Db.PIF.get_bond_slave_of ~__context ~self:transport_PIF <> Ref.null then - raise (Api_errors.Server_error (Api_errors.cannot_add_tunnel_to_bond_slave, [Ref.string_of transport_PIF])); - let host = Db.PIF.get_host ~__context ~self:transport_PIF in - Xapi_pif.assert_no_other_local_pifs ~__context ~host ~network; - let hosts = Db.Host.get_all ~__context in - List.iter - (fun h -> - let v = Db.Host.get_software_version ~__context ~self:h in - if not (List.mem_assoc "network_backend" v && List.assoc "network_backend" v = "openvswitch") then - raise (Api_errors.Server_error (Api_errors.openvswitch_not_active, [])); - ) hosts; - if Db.PIF.get_tunnel_access_PIF_of ~__context ~self:transport_PIF <> [] then - raise (Api_errors.Server_error (Api_errors.is_tunnel_access_pif, [Ref.string_of transport_PIF])); - let tunnel, access_PIF = create_internal ~__context ~transport_PIF ~network ~host in - Xapi_pif.plug ~__context ~self:access_PIF; - tunnel - + if Db.PIF.get_managed ~__context ~self:transport_PIF <> true then + raise (Api_errors.Server_error (Api_errors.pif_unmanaged, [Ref.string_of transport_PIF])); + if Db.PIF.get_bond_slave_of ~__context ~self:transport_PIF <> Ref.null then + raise (Api_errors.Server_error (Api_errors.cannot_add_tunnel_to_bond_slave, [Ref.string_of transport_PIF])); + let host = Db.PIF.get_host ~__context ~self:transport_PIF in + Xapi_pif.assert_no_other_local_pifs ~__context ~host ~network; + let hosts = Db.Host.get_all ~__context in + List.iter + (fun h -> + let v = Db.Host.get_software_version ~__context ~self:h in + if not (List.mem_assoc "network_backend" v && List.assoc "network_backend" v = "openvswitch") then + raise (Api_errors.Server_error (Api_errors.openvswitch_not_active, [])); + ) hosts; + if Db.PIF.get_tunnel_access_PIF_of ~__context ~self:transport_PIF <> [] then + raise (Api_errors.Server_error (Api_errors.is_tunnel_access_pif, [Ref.string_of transport_PIF])); + let tunnel, access_PIF = create_internal ~__context ~transport_PIF ~network ~host in + Xapi_pif.plug ~__context ~self:access_PIF; + tunnel + let destroy ~__context ~self = - let pif = Db.Tunnel.get_access_PIF ~__context ~self in - Xapi_pif.unplug ~__context ~self:pif; - Db.PIF.destroy ~__context ~self:pif; - Db.Tunnel.destroy ~__context ~self + let pif = Db.Tunnel.get_access_PIF ~__context ~self in + Xapi_pif.unplug ~__context ~self:pif; + Db.PIF.destroy ~__context ~self:pif; + Db.Tunnel.destroy ~__context ~self diff --git a/ocaml/xapi/xapi_tunnel.mli b/ocaml/xapi/xapi_tunnel.mli index 9a69bcb1742..d98b58e8b76 100644 --- a/ocaml/xapi/xapi_tunnel.mli +++ b/ocaml/xapi/xapi_tunnel.mli @@ -13,7 +13,7 @@ *) (** Module that defines API functions for tunnels * @group Networking - *) +*) (** Create a tunnel for a given transport PIF and network *) val create : diff --git a/ocaml/xapi/xapi_udhcpd.ml b/ocaml/xapi/xapi_udhcpd.ml index 00e35e8e0f3..a2452f813e3 100644 --- a/ocaml/xapi/xapi_udhcpd.ml +++ b/ocaml/xapi/xapi_udhcpd.ml @@ -30,208 +30,208 @@ let pxe_server_key = "pxe_server" let (|>) x f = f x module Ip = struct - type t = int * int * int * int with rpc - - exception Invalid_ip of t - - let check ((a, b, c, d) as ip) = - if a >= 256 || b >= 256 || c >= 256 || d >=256 then raise (Invalid_ip ip) else ip - - let string_of (a, b, c, d) = Printf.sprintf "%d.%d.%d.%d" a b c d - let of_string s = Scanf.sscanf s "%d.%d.%d.%d" (fun a b c d -> check (a,b,c,d)) - - (** [succ ip] returns the "next" address after [ip] *) - let succ (a, b, c, d) = - let (a, b, c, d) = (a, b, c, d + 1) in - let (a, b, c, d) = if d < 256 then (a, b, c, d) else (a, b, c + 1, 0) in - let (a, b, c, d) = if c < 256 then (a, b, c, d) else (a, b + 1, 0, d) in - let (a, b, c, d) = if b < 256 then (a, b, c, d) else (a + 1, 0, c, d) in - check (a, b, c, d) - - (** [gt a b] returns true iff [a] is later than [b] in the sequence *) - let gt (a, b, c, d) (a', b', c', d') = - (a > a') || ((a = a') && (b > b')) || ((a = a') && (b = b') && (c > c')) || ((a = a') && (b = b') && (c = c') && (d > d')) - - (** [first a b f] returns [Some x] where [x] is the first address in the sequence from - [a] to [b] where [f x] is true if it exists, and [None] otherwise. *) - let rec first a b f = - if gt a b then None - else - if f a then Some a - else first (succ a) b f + type t = int * int * int * int with rpc + + exception Invalid_ip of t + + let check ((a, b, c, d) as ip) = + if a >= 256 || b >= 256 || c >= 256 || d >=256 then raise (Invalid_ip ip) else ip + + let string_of (a, b, c, d) = Printf.sprintf "%d.%d.%d.%d" a b c d + let of_string s = Scanf.sscanf s "%d.%d.%d.%d" (fun a b c d -> check (a,b,c,d)) + + (** [succ ip] returns the "next" address after [ip] *) + let succ (a, b, c, d) = + let (a, b, c, d) = (a, b, c, d + 1) in + let (a, b, c, d) = if d < 256 then (a, b, c, d) else (a, b, c + 1, 0) in + let (a, b, c, d) = if c < 256 then (a, b, c, d) else (a, b + 1, 0, d) in + let (a, b, c, d) = if b < 256 then (a, b, c, d) else (a + 1, 0, c, d) in + check (a, b, c, d) + + (** [gt a b] returns true iff [a] is later than [b] in the sequence *) + let gt (a, b, c, d) (a', b', c', d') = + (a > a') || ((a = a') && (b > b')) || ((a = a') && (b = b') && (c > c')) || ((a = a') && (b = b') && (c = c') && (d > d')) + + (** [first a b f] returns [Some x] where [x] is the first address in the sequence from + [a] to [b] where [f x] is true if it exists, and [None] otherwise. *) + let rec first a b f = + if gt a b then None + else + if f a then Some a + else first (succ a) b f end -type static_lease = { - mac : string; - ip : Ip.t; - vif : string; (* API.ref_VIF *) - network : string; (* API.ref_network *) +type static_lease = { + mac : string; + ip : Ip.t; + vif : string; (* API.ref_VIF *) + network : string; (* API.ref_network *) } with rpc type static_leases = static_lease list with rpc (** List of static leases. Protected by mutex below. *) -let assigned = ref [] +let assigned = ref [] (** Updates the assigned_ips field of networks in xapi's database *) let update_db_nolock ~__context = - let loc_assigned = !assigned in - let networks = List.map (fun lease -> lease.network) loc_assigned |> Listext.List.setify in - let update_network net = - let cur_assigned = Db.Network.get_assigned_ips ~__context ~self:(Ref.of_string net) in - let cur_vifs = List.filter (fun lease -> lease.network = net) loc_assigned - |> List.map (fun l -> Ref.of_string l.vif) in - let db_vifs = List.map fst cur_assigned in - let new_lease_vifs = Listext.List.set_difference cur_vifs db_vifs in - let released_lease_vifs = Listext.List.set_difference db_vifs cur_vifs in - List.iter (fun new_lease_vif -> - let lease = - List.find (fun x -> x.vif = Ref.string_of new_lease_vif) loc_assigned in - Db.Network.add_to_assigned_ips ~__context ~self:(Ref.of_string net) - ~key:new_lease_vif ~value:(Ip.string_of lease.ip)) new_lease_vifs; - List.iter (fun released_lease_vif -> - Db.Network.remove_from_assigned_ips ~__context ~self:(Ref.of_string net) ~key:released_lease_vif - ) released_lease_vifs - in - List.iter update_network networks + let loc_assigned = !assigned in + let networks = List.map (fun lease -> lease.network) loc_assigned |> Listext.List.setify in + let update_network net = + let cur_assigned = Db.Network.get_assigned_ips ~__context ~self:(Ref.of_string net) in + let cur_vifs = List.filter (fun lease -> lease.network = net) loc_assigned + |> List.map (fun l -> Ref.of_string l.vif) in + let db_vifs = List.map fst cur_assigned in + let new_lease_vifs = Listext.List.set_difference cur_vifs db_vifs in + let released_lease_vifs = Listext.List.set_difference db_vifs cur_vifs in + List.iter (fun new_lease_vif -> + let lease = + List.find (fun x -> x.vif = Ref.string_of new_lease_vif) loc_assigned in + Db.Network.add_to_assigned_ips ~__context ~self:(Ref.of_string net) + ~key:new_lease_vif ~value:(Ip.string_of lease.ip)) new_lease_vifs; + List.iter (fun released_lease_vif -> + Db.Network.remove_from_assigned_ips ~__context ~self:(Ref.of_string net) ~key:released_lease_vif + ) released_lease_vifs + in + List.iter update_network networks (** Called on startup to reload the leases database *) let load_db_nolock () = - let s = Unixext.string_of_file !Xapi_globs.udhcpd_leases_db in - let rpc = Jsonrpc.of_string s in - assigned := static_leases_of_rpc rpc; - info "Host internal management network successfully loaded DHCP leases db from %s" !Xapi_globs.udhcpd_leases_db + let s = Unixext.string_of_file !Xapi_globs.udhcpd_leases_db in + let rpc = Jsonrpc.of_string s in + assigned := static_leases_of_rpc rpc; + info "Host internal management network successfully loaded DHCP leases db from %s" !Xapi_globs.udhcpd_leases_db (** Called before every update to save the leases database *) let save_db_nolock () = - let rpc = rpc_of_static_leases !assigned in - let s = Jsonrpc.to_string rpc in - Unixext.write_string_to_file !Xapi_globs.udhcpd_leases_db s + let rpc = rpc_of_static_leases !assigned in + let s = Jsonrpc.to_string rpc in + Unixext.write_string_to_file !Xapi_globs.udhcpd_leases_db s module Udhcpd_conf = struct - type t = { - interface: string; - subnet: string; - router: Ip.t; - leases: static_leases; - } - - let make ~__context leases router = - let network = Helpers.get_guest_installer_network ~__context in - let interface = Db.Network.get_bridge ~__context ~self:network in - let other_config = Db.Network.get_other_config ~__context ~self:network in - let subnet = List.assoc "netmask" other_config in - { - interface; - subnet; - router; - leases; - } - - let to_string ~__context t = - let skel = Unixext.string_of_file !Xapi_globs.udhcpd_skel in - let interface = Printf.sprintf "interface\t%s" t.interface in - let subnet = Printf.sprintf "option\tsubnet\t%s" t.subnet in - let router = Printf.sprintf "option\trouter\t%s" (Ip.string_of t.router) in - let pxe = Printf.sprintf "siaddr\t%s\nboot_file\t/pxelinux.0" (Ip.string_of t.router) in - let string_of_lease l = - Printf.sprintf "static_lease\t%s\t%s # %s\n" l.mac (Ip.string_of l.ip) l.vif in - let leases = List.map string_of_lease t.leases in - let network = Helpers.get_guest_installer_network ~__context in - let other_config = Db.Network.get_other_config ~__context ~self:network in - let include_gw = - try not (List.assoc ip_disable_gw_key other_config = "true") - with Not_found -> true in - let include_pxe = - try List.assoc pxe_server_key other_config = "true" - with Not_found -> false in - let config_list = - skel - :: interface - :: subnet - :: (if include_gw then [router] else []) - @ (if include_pxe then [pxe] else []) - @ leases in - String.concat "\n" config_list + type t = { + interface: string; + subnet: string; + router: Ip.t; + leases: static_leases; + } + + let make ~__context leases router = + let network = Helpers.get_guest_installer_network ~__context in + let interface = Db.Network.get_bridge ~__context ~self:network in + let other_config = Db.Network.get_other_config ~__context ~self:network in + let subnet = List.assoc "netmask" other_config in + { + interface; + subnet; + router; + leases; + } + + let to_string ~__context t = + let skel = Unixext.string_of_file !Xapi_globs.udhcpd_skel in + let interface = Printf.sprintf "interface\t%s" t.interface in + let subnet = Printf.sprintf "option\tsubnet\t%s" t.subnet in + let router = Printf.sprintf "option\trouter\t%s" (Ip.string_of t.router) in + let pxe = Printf.sprintf "siaddr\t%s\nboot_file\t/pxelinux.0" (Ip.string_of t.router) in + let string_of_lease l = + Printf.sprintf "static_lease\t%s\t%s # %s\n" l.mac (Ip.string_of l.ip) l.vif in + let leases = List.map string_of_lease t.leases in + let network = Helpers.get_guest_installer_network ~__context in + let other_config = Db.Network.get_other_config ~__context ~self:network in + let include_gw = + try not (List.assoc ip_disable_gw_key other_config = "true") + with Not_found -> true in + let include_pxe = + try List.assoc pxe_server_key other_config = "true" + with Not_found -> false in + let config_list = + skel + :: interface + :: subnet + :: (if include_gw then [router] else []) + @ (if include_pxe then [pxe] else []) + @ leases in + String.concat "\n" config_list end let write_config_nolock ~__context ip_router = - let config = Udhcpd_conf.make ~__context (!assigned) ip_router in - Unixext.unlink_safe !Xapi_globs.udhcpd_conf; - Unixext.write_string_to_file !Xapi_globs.udhcpd_conf (Udhcpd_conf.to_string ~__context config) + let config = Udhcpd_conf.make ~__context (!assigned) ip_router in + Unixext.unlink_safe !Xapi_globs.udhcpd_conf; + Unixext.write_string_to_file !Xapi_globs.udhcpd_conf (Udhcpd_conf.to_string ~__context config) let restart_nolock () = - let pid = try Unixext.pidfile_read !Xapi_globs.udhcpd_pidfile with _ -> None in - Opt.iter Unixext.kill_and_wait pid; - let (_: string * string) = execute_command_get_output !Xapi_globs.busybox [ "udhcpd"; !Xapi_globs.udhcpd_conf ] in - () + let pid = try Unixext.pidfile_read !Xapi_globs.udhcpd_pidfile with _ -> None in + Opt.iter Unixext.kill_and_wait pid; + let (_: string * string) = execute_command_get_output !Xapi_globs.busybox [ "udhcpd"; !Xapi_globs.udhcpd_conf ] in + () let find_lease_nolock vif = - try Some (List.find (fun l -> l.vif = vif) !assigned) - with Not_found -> None + try Some (List.find (fun l -> l.vif = vif) !assigned) + with Not_found -> None (* We only expire leases when the VIFs are *destroyed* from the database. Otherwise we get into trouble with sequences like VM.suspend, VM.resume *) let gc_leases_nolock ~__context = - let vif_still_exists l = Db.is_valid_ref __context (Ref.of_string l.vif) in - let good, bad = List.partition vif_still_exists !assigned in - List.iter (fun l -> - info "Host internal management network removing lease for VIF %s -> %s" l.vif (Ip.string_of l.ip) - ) bad; - assigned := good + let vif_still_exists l = Db.is_valid_ref __context (Ref.of_string l.vif) in + let good, bad = List.partition vif_still_exists !assigned in + List.iter (fun l -> + info "Host internal management network removing lease for VIF %s -> %s" l.vif (Ip.string_of l.ip) + ) bad; + assigned := good let maybe_add_lease_nolock ~__context vif = - let network = Helpers.get_host_internal_management_network ~__context in - if network = Db.VIF.get_network ~__context ~self:vif - then begin - let other_config = Db.Network.get_other_config ~__context ~self:network in - if not(List.mem_assoc ip_begin_key other_config) || not(List.mem_assoc ip_end_key other_config) - then failwith (Printf.sprintf "Host internal management network %s other_config has no ip_begin/ip_end keys" (Ref.string_of network)); - - let ip_begin = Ip.of_string (List.assoc ip_begin_key other_config) - and ip_end = Ip.of_string (List.assoc ip_end_key other_config) in - match find_lease_nolock (Ref.string_of vif) with - | Some l -> - info "VIF %s on host-internal management network already has lease: %s" - (Ref.string_of vif) (Ip.string_of l.ip); - restart_nolock () - | None -> - gc_leases_nolock ~__context; - let mac = Db.VIF.get_MAC ~__context ~self:vif in - (* NB ip_begin is the address on the bridge itself *) - begin match Ip.first (Ip.succ ip_begin) ip_end - (fun ip -> List.filter (fun l -> l.ip = ip) !assigned = []) with - | Some ip -> - assigned := {mac; ip; vif = Ref.string_of vif; network = Ref.string_of network} :: !assigned; - save_db_nolock (); - update_db_nolock ~__context; - write_config_nolock ~__context ip_begin; - restart_nolock () - | None -> - error "VM on guest installer network, but not IPs available"; - failwith "No IP addresses left" - end - end + let network = Helpers.get_host_internal_management_network ~__context in + if network = Db.VIF.get_network ~__context ~self:vif + then begin + let other_config = Db.Network.get_other_config ~__context ~self:network in + if not(List.mem_assoc ip_begin_key other_config) || not(List.mem_assoc ip_end_key other_config) + then failwith (Printf.sprintf "Host internal management network %s other_config has no ip_begin/ip_end keys" (Ref.string_of network)); + + let ip_begin = Ip.of_string (List.assoc ip_begin_key other_config) + and ip_end = Ip.of_string (List.assoc ip_end_key other_config) in + match find_lease_nolock (Ref.string_of vif) with + | Some l -> + info "VIF %s on host-internal management network already has lease: %s" + (Ref.string_of vif) (Ip.string_of l.ip); + restart_nolock () + | None -> + gc_leases_nolock ~__context; + let mac = Db.VIF.get_MAC ~__context ~self:vif in + (* NB ip_begin is the address on the bridge itself *) + begin match Ip.first (Ip.succ ip_begin) ip_end + (fun ip -> List.filter (fun l -> l.ip = ip) !assigned = []) with + | Some ip -> + assigned := {mac; ip; vif = Ref.string_of vif; network = Ref.string_of network} :: !assigned; + save_db_nolock (); + update_db_nolock ~__context; + write_config_nolock ~__context ip_begin; + restart_nolock () + | None -> + error "VM on guest installer network, but not IPs available"; + failwith "No IP addresses left" + end + end let mutex = Mutex.create () let maybe_add_lease ~__context vif = - Helpers.log_exn_continue (Printf.sprintf "maybe_add_lease VIF:%s" (Ref.string_of vif)) (fun () -> - Mutex.execute mutex (fun () -> - maybe_add_lease_nolock ~__context vif - ) - ) () + Helpers.log_exn_continue (Printf.sprintf "maybe_add_lease VIF:%s" (Ref.string_of vif)) (fun () -> + Mutex.execute mutex (fun () -> + maybe_add_lease_nolock ~__context vif + ) + ) () let get_ip ~__context vif = - let vif = Ref.string_of vif in - Mutex.execute mutex (fun () -> - Opt.map (fun l -> l.ip) (find_lease_nolock vif) - ) + let vif = Ref.string_of vif in + Mutex.execute mutex (fun () -> + Opt.map (fun l -> l.ip) (find_lease_nolock vif) + ) let init () = - Mutex.execute mutex (fun () -> - try load_db_nolock () - with e -> - info "Caught exception %s loading %s: creating new empty leases database" (Printexc.to_string e) !Xapi_globs.udhcpd_leases_db; - assigned := [] - ) + Mutex.execute mutex (fun () -> + try load_db_nolock () + with e -> + info "Caught exception %s loading %s: creating new empty leases database" (Printexc.to_string e) !Xapi_globs.udhcpd_leases_db; + assigned := [] + ) diff --git a/ocaml/xapi/xapi_upgrade.ml b/ocaml/xapi/xapi_upgrade.ml index 419b25b2820..66207e17aa7 100644 --- a/ocaml/xapi/xapi_upgrade.ml +++ b/ocaml/xapi/xapi_upgrade.ml @@ -17,8 +17,8 @@ open D (* High-level functions called when rolling upgrade 'starts' and 'stops' where start and stop are defined by the logic in db_gc.ml. *) -let start () = +let start () = () -let stop () = +let stop () = () diff --git a/ocaml/xapi/xapi_user.ml b/ocaml/xapi/xapi_user.ml index e9d2e5a9fba..bb9346f0faa 100644 --- a/ocaml/xapi/xapi_user.ml +++ b/ocaml/xapi/xapi_user.ml @@ -13,16 +13,16 @@ *) (** Module that defines API functions for User objects * @group XenAPI functions - *) +*) let get_allowed_messages ~__context ~self = [] let create ~__context ~short_name ~fullname ~other_config = - let uuid = Uuid.make_uuid () in - let ref = Ref.make () in - Db.User.create ~__context ~ref ~uuid:(Uuid.to_string uuid) - ~short_name ~fullname ~other_config; - ref + let uuid = Uuid.make_uuid () in + let ref = Ref.make () in + Db.User.create ~__context ~ref ~uuid:(Uuid.to_string uuid) + ~short_name ~fullname ~other_config; + ref let destroy ~__context ~self = - Db.User.destroy ~__context ~self + Db.User.destroy ~__context ~self diff --git a/ocaml/xapi/xapi_vbd.ml b/ocaml/xapi/xapi_vbd.ml index ce2efdcac01..7e669d0128f 100644 --- a/ocaml/xapi/xapi_vbd.ml +++ b/ocaml/xapi/xapi_vbd.ml @@ -13,7 +13,7 @@ *) (** Module that defines API functions for VBD objects * @group XenAPI functions - *) +*) open Stdext open Xstringext @@ -22,71 +22,71 @@ open Vbdops open Threadext open D -let assert_operation_valid ~__context ~self ~(op:API.vbd_operations) = +let assert_operation_valid ~__context ~self ~(op:API.vbd_operations) = assert_operation_valid ~__context ~self ~op let update_allowed_operations ~__context ~self : unit = update_allowed_operations ~__context ~self -let assert_attachable ~__context ~self : unit = +let assert_attachable ~__context ~self : unit = assert_attachable ~__context ~self let set_mode ~__context ~self ~value = - let vm = Db.VBD.get_VM ~__context ~self in - Xapi_vm_lifecycle.assert_power_state_is ~__context ~self:vm ~expected:`Halted; - Db.VBD.set_mode ~__context ~self ~value + let vm = Db.VBD.get_VM ~__context ~self in + Xapi_vm_lifecycle.assert_power_state_is ~__context ~self:vm ~expected:`Halted; + Db.VBD.set_mode ~__context ~self ~value let plug ~__context ~self = - let vm = Db.VBD.get_VM ~__context ~self in - let domid = Int64.to_int (Db.VM.get_domid ~__context ~self:vm) in - let force_loopback_vbd = Helpers.force_loopback_vbd ~__context in - let hvm = Helpers.has_booted_hvm ~__context ~self:vm in - if System_domains.storage_driver_domain_of_vbd ~__context ~vbd:self = vm && not force_loopback_vbd then begin - debug "VBD.plug of loopback VBD '%s'" (Ref.string_of self); - Storage_access.attach_and_activate ~__context ~vbd:self ~domid ~hvm - (fun attach_info -> - let params = attach_info.Storage_interface.params in - let prefix = "/dev/" in - let prefix_len = String.length prefix in - let path = String.sub params prefix_len (String.length params - prefix_len) in - Db.VBD.set_device ~__context ~self ~value:path; - Db.VBD.set_currently_attached ~__context ~self ~value:true; - ) - end - else begin - (* CA-83260: prevent HVM guests having readonly disk VBDs *) - let dev_type = Db.VBD.get_type ~__context ~self in - let mode = Db.VBD.get_mode ~__context ~self in - if hvm && dev_type <> `CD && mode = `RO then - raise (Api_errors.Server_error(Api_errors.disk_vbd_must_be_readwrite_for_hvm, [ Ref.string_of self ])); - Xapi_xenops.vbd_plug ~__context ~self - end + let vm = Db.VBD.get_VM ~__context ~self in + let domid = Int64.to_int (Db.VM.get_domid ~__context ~self:vm) in + let force_loopback_vbd = Helpers.force_loopback_vbd ~__context in + let hvm = Helpers.has_booted_hvm ~__context ~self:vm in + if System_domains.storage_driver_domain_of_vbd ~__context ~vbd:self = vm && not force_loopback_vbd then begin + debug "VBD.plug of loopback VBD '%s'" (Ref.string_of self); + Storage_access.attach_and_activate ~__context ~vbd:self ~domid ~hvm + (fun attach_info -> + let params = attach_info.Storage_interface.params in + let prefix = "/dev/" in + let prefix_len = String.length prefix in + let path = String.sub params prefix_len (String.length params - prefix_len) in + Db.VBD.set_device ~__context ~self ~value:path; + Db.VBD.set_currently_attached ~__context ~self ~value:true; + ) + end + else begin + (* CA-83260: prevent HVM guests having readonly disk VBDs *) + let dev_type = Db.VBD.get_type ~__context ~self in + let mode = Db.VBD.get_mode ~__context ~self in + if hvm && dev_type <> `CD && mode = `RO then + raise (Api_errors.Server_error(Api_errors.disk_vbd_must_be_readwrite_for_hvm, [ Ref.string_of self ])); + Xapi_xenops.vbd_plug ~__context ~self + end let unplug ~__context ~self = - let vm = Db.VBD.get_VM ~__context ~self in - let force_loopback_vbd = Helpers.force_loopback_vbd ~__context in - if System_domains.storage_driver_domain_of_vbd ~__context ~vbd:self = vm && not force_loopback_vbd then begin - debug "VBD.unplug of loopback VBD '%s'" (Ref.string_of self); - let domid = Int64.to_int (Db.VM.get_domid ~__context ~self:vm) in - Storage_access.deactivate_and_detach ~__context ~vbd:self ~domid; - Db.VBD.set_currently_attached ~__context ~self ~value:false - end - else Xapi_xenops.vbd_unplug ~__context ~self false + let vm = Db.VBD.get_VM ~__context ~self in + let force_loopback_vbd = Helpers.force_loopback_vbd ~__context in + if System_domains.storage_driver_domain_of_vbd ~__context ~vbd:self = vm && not force_loopback_vbd then begin + debug "VBD.unplug of loopback VBD '%s'" (Ref.string_of self); + let domid = Int64.to_int (Db.VM.get_domid ~__context ~self:vm) in + Storage_access.deactivate_and_detach ~__context ~vbd:self ~domid; + Db.VBD.set_currently_attached ~__context ~self ~value:false + end + else Xapi_xenops.vbd_unplug ~__context ~self false let unplug_force ~__context ~self = - let vm = Db.VBD.get_VM ~__context ~self in - let force_loopback_vbd = Helpers.force_loopback_vbd ~__context in - if System_domains.storage_driver_domain_of_vbd ~__context ~vbd:self = vm && not force_loopback_vbd - then unplug ~__context ~self - else Xapi_xenops.vbd_unplug ~__context ~self true + let vm = Db.VBD.get_VM ~__context ~self in + let force_loopback_vbd = Helpers.force_loopback_vbd ~__context in + if System_domains.storage_driver_domain_of_vbd ~__context ~vbd:self = vm && not force_loopback_vbd + then unplug ~__context ~self + else Xapi_xenops.vbd_unplug ~__context ~self true let unplug_force_no_safety_check ~__context ~self = - let vm = Db.VBD.get_VM ~__context ~self in - let force_loopback_vbd = Helpers.force_loopback_vbd ~__context in - if System_domains.storage_driver_domain_of_vbd ~__context ~vbd:self = vm && not force_loopback_vbd - then unplug ~__context ~self - else Xapi_xenops.vbd_unplug ~__context ~self true + let vm = Db.VBD.get_VM ~__context ~self in + let force_loopback_vbd = Helpers.force_loopback_vbd ~__context in + if System_domains.storage_driver_domain_of_vbd ~__context ~vbd:self = vm && not force_loopback_vbd + then unplug ~__context ~self + else Xapi_xenops.vbd_unplug ~__context ~self true (** Hold this mutex while resolving the 'autodetect' device names to prevent two concurrent VBD.creates racing with each other and choosing the same device. For simplicity keep this @@ -96,112 +96,112 @@ let autodetect_mutex = Mutex.create () (** VBD.create doesn't require any interaction with xen *) let create ~__context ~vM ~vDI ~userdevice ~bootable ~mode ~_type ~unpluggable ~empty - ~other_config ~qos_algorithm_type ~qos_algorithm_params = - - if not empty then begin - let vdi_type = Db.VDI.get_type ~__context ~self:vDI in - if not(List.mem vdi_type [ `system; `user; `ephemeral; `suspend; `crashdump; `metadata; `rrd]) - then raise (Api_errors.Server_error(Api_errors.vdi_incompatible_type, [ Ref.string_of vDI; Record_util.vdi_type_to_string vdi_type ])) - end; - - (* All "CD" VBDs must be readonly *) - if _type = `CD && mode <> `RO - then raise (Api_errors.Server_error(Api_errors.vbd_cds_must_be_readonly, [])); - (* Only "CD" VBDs may be empty *) - if _type <> `CD && empty - then raise (Api_errors.Server_error(Api_errors.vbd_not_removable_media, [ "in constructor" ])); - - (* Prevent RW VBDs being created pointing to RO VDIs *) - if mode = `RW && Db.VDI.get_read_only ~__context ~self:vDI - then raise (Api_errors.Server_error(Api_errors.vdi_readonly, [ Ref.string_of vDI ])); - - (* CA-75697: Disallow VBD.create on a VM that's in the middle of a migration *) - debug "Checking whether there's a migrate in progress..."; - let vm_current_ops = Listext.List.setify (List.map snd (Db.VM.get_current_operations ~__context ~self:vM)) in - let migrate_ops = [ `migrate_send; `pool_migrate ] in - let migrate_ops_in_progress = List.filter (fun op -> List.mem op vm_current_ops) migrate_ops in - match migrate_ops_in_progress with - | op::_ -> - raise (Api_errors.Server_error( - Api_errors.other_operation_in_progress, - [ "VM"; Ref.string_of vM; Record_util.vm_operation_to_string op ])); - | _ -> - - Mutex.execute autodetect_mutex (fun () -> - let possibilities = Xapi_vm_helpers.allowed_VBD_devices ~__context ~vm:vM ~_type in - - if not (valid_device userdevice) || (userdevice = "autodetect" && possibilities = []) then - raise (Api_errors.Server_error (Api_errors.invalid_device,[userdevice])); - - (* Resolve the "autodetect" into a fixed device name now *) - let userdevice = - if userdevice = "autodetect" - then match _type with - (* already checked for [] above *) - | `Floppy -> Device_number.to_linux_device (List.hd possibilities) - | `CD | `Disk -> string_of_int (Device_number.to_disk_number (List.hd possibilities)) - else userdevice - in - - let uuid = Uuid.make_uuid () in - let ref = Ref.make () in - debug "VBD.create (device = %s; uuid = %s; ref = %s)" - userdevice (Uuid.string_of_uuid uuid) (Ref.string_of ref); - - (* Check that the device is definitely unique. If the requested device is numerical - (eg 1) then we 'expand' it into other possible names (eg 'hdb' 'xvdb') to detect - all possible clashes. *) - let userdevices = Xapi_vm_helpers.possible_VBD_devices_of_string userdevice in - let existing_devices = Xapi_vm_helpers.all_used_VBD_devices ~__context ~self:vM in - if Listext.List.intersect userdevices existing_devices <> [] - then raise (Api_errors.Server_error (Api_errors.device_already_exists, [userdevice])); - - (* Make people aware that non-shared disks make VMs not agile *) - if not empty then assert_doesnt_make_vm_non_agile ~__context ~vm:vM ~vdi:vDI; - - let metrics = Ref.make () and metrics_uuid = Uuid.to_string (Uuid.make_uuid ()) in - Db.VBD_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid - ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Date.of_float 0.) - ~other_config:[]; - - (* Enable the SM driver to specify a VBD backend kind for the VDI *) - let other_config = - try - let vdi_sc = Db.VDI.get_sm_config ~__context ~self:vDI in - let k = Xapi_globs.vbd_backend_key in - let v = List.assoc k vdi_sc in - (k, v) :: other_config - with _ -> other_config - in - - Db.VBD.create ~__context ~ref ~uuid:(Uuid.to_string uuid) - ~current_operations:[] ~allowed_operations:[] ~storage_lock:false - ~vM ~vDI ~userdevice ~device:"" ~bootable ~mode ~_type ~unpluggable - ~empty ~reserved:false ~qos_algorithm_type ~qos_algorithm_params - ~qos_supported_algorithms:[] ~currently_attached:false - ~status_code:Int64.zero ~status_detail:"" ~runtime_properties:[] - ~other_config ~metrics; - update_allowed_operations ~__context ~self:ref; - ref - ) + ~other_config ~qos_algorithm_type ~qos_algorithm_params = + + if not empty then begin + let vdi_type = Db.VDI.get_type ~__context ~self:vDI in + if not(List.mem vdi_type [ `system; `user; `ephemeral; `suspend; `crashdump; `metadata; `rrd]) + then raise (Api_errors.Server_error(Api_errors.vdi_incompatible_type, [ Ref.string_of vDI; Record_util.vdi_type_to_string vdi_type ])) + end; + + (* All "CD" VBDs must be readonly *) + if _type = `CD && mode <> `RO + then raise (Api_errors.Server_error(Api_errors.vbd_cds_must_be_readonly, [])); + (* Only "CD" VBDs may be empty *) + if _type <> `CD && empty + then raise (Api_errors.Server_error(Api_errors.vbd_not_removable_media, [ "in constructor" ])); + + (* Prevent RW VBDs being created pointing to RO VDIs *) + if mode = `RW && Db.VDI.get_read_only ~__context ~self:vDI + then raise (Api_errors.Server_error(Api_errors.vdi_readonly, [ Ref.string_of vDI ])); + + (* CA-75697: Disallow VBD.create on a VM that's in the middle of a migration *) + debug "Checking whether there's a migrate in progress..."; + let vm_current_ops = Listext.List.setify (List.map snd (Db.VM.get_current_operations ~__context ~self:vM)) in + let migrate_ops = [ `migrate_send; `pool_migrate ] in + let migrate_ops_in_progress = List.filter (fun op -> List.mem op vm_current_ops) migrate_ops in + match migrate_ops_in_progress with + | op::_ -> + raise (Api_errors.Server_error( + Api_errors.other_operation_in_progress, + [ "VM"; Ref.string_of vM; Record_util.vm_operation_to_string op ])); + | _ -> + + Mutex.execute autodetect_mutex (fun () -> + let possibilities = Xapi_vm_helpers.allowed_VBD_devices ~__context ~vm:vM ~_type in + + if not (valid_device userdevice) || (userdevice = "autodetect" && possibilities = []) then + raise (Api_errors.Server_error (Api_errors.invalid_device,[userdevice])); + + (* Resolve the "autodetect" into a fixed device name now *) + let userdevice = + if userdevice = "autodetect" + then match _type with + (* already checked for [] above *) + | `Floppy -> Device_number.to_linux_device (List.hd possibilities) + | `CD | `Disk -> string_of_int (Device_number.to_disk_number (List.hd possibilities)) + else userdevice + in + + let uuid = Uuid.make_uuid () in + let ref = Ref.make () in + debug "VBD.create (device = %s; uuid = %s; ref = %s)" + userdevice (Uuid.string_of_uuid uuid) (Ref.string_of ref); + + (* Check that the device is definitely unique. If the requested device is numerical + (eg 1) then we 'expand' it into other possible names (eg 'hdb' 'xvdb') to detect + all possible clashes. *) + let userdevices = Xapi_vm_helpers.possible_VBD_devices_of_string userdevice in + let existing_devices = Xapi_vm_helpers.all_used_VBD_devices ~__context ~self:vM in + if Listext.List.intersect userdevices existing_devices <> [] + then raise (Api_errors.Server_error (Api_errors.device_already_exists, [userdevice])); + + (* Make people aware that non-shared disks make VMs not agile *) + if not empty then assert_doesnt_make_vm_non_agile ~__context ~vm:vM ~vdi:vDI; + + let metrics = Ref.make () and metrics_uuid = Uuid.to_string (Uuid.make_uuid ()) in + Db.VBD_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid + ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Date.of_float 0.) + ~other_config:[]; + + (* Enable the SM driver to specify a VBD backend kind for the VDI *) + let other_config = + try + let vdi_sc = Db.VDI.get_sm_config ~__context ~self:vDI in + let k = Xapi_globs.vbd_backend_key in + let v = List.assoc k vdi_sc in + (k, v) :: other_config + with _ -> other_config + in + + Db.VBD.create ~__context ~ref ~uuid:(Uuid.to_string uuid) + ~current_operations:[] ~allowed_operations:[] ~storage_lock:false + ~vM ~vDI ~userdevice ~device:"" ~bootable ~mode ~_type ~unpluggable + ~empty ~reserved:false ~qos_algorithm_type ~qos_algorithm_params + ~qos_supported_algorithms:[] ~currently_attached:false + ~status_code:Int64.zero ~status_detail:"" ~runtime_properties:[] + ~other_config ~metrics; + update_allowed_operations ~__context ~self:ref; + ref + ) let destroy ~__context ~self = destroy ~__context ~self (** Throws VBD_NOT_REMOVABLE_ERROR if the VBD doesn't represent removable media. Currently this just means "CD" but might change in future? *) let assert_removable ~__context ~vbd = - if not(Helpers.is_removable ~__context ~vbd) - then raise (Api_errors.Server_error(Api_errors.vbd_not_removable_media, [ Ref.string_of vbd ])) + if not(Helpers.is_removable ~__context ~vbd) + then raise (Api_errors.Server_error(Api_errors.vbd_not_removable_media, [ Ref.string_of vbd ])) (** Throws VBD_NOT_EMPTY if the VBD already has a VDI *) let assert_empty ~__context ~vbd = - if not(Db.VBD.get_empty ~__context ~self:vbd) - then raise (Api_errors.Server_error(Api_errors.vbd_not_empty, [ Ref.string_of vbd ])) + if not(Db.VBD.get_empty ~__context ~self:vbd) + then raise (Api_errors.Server_error(Api_errors.vbd_not_empty, [ Ref.string_of vbd ])) (** Throws VBD_IS_EMPTY if the VBD has no VDI *) let assert_not_empty ~__context ~vbd = - if Db.VBD.get_empty ~__context ~self:vbd - then raise (Api_errors.Server_error(Api_errors.vbd_is_empty, [ Ref.string_of vbd ])) + if Db.VBD.get_empty ~__context ~self:vbd + then raise (Api_errors.Server_error(Api_errors.vbd_is_empty, [ Ref.string_of vbd ])) (** Throws BAD_POWER_STATE if the VM is suspended *) let assert_not_suspended ~__context ~vm = @@ -211,34 +211,34 @@ let assert_not_suspended ~__context ~vm = raise (Api_errors.Server_error(Api_errors.vm_bad_power_state, error_params)) let assert_ok_to_insert ~__context ~vbd ~vdi = - let vm = Db.VBD.get_VM ~__context ~self:vbd in - assert_not_suspended ~__context ~vm; - assert_removable ~__context ~vbd; - assert_empty ~__context ~vbd; - Xapi_vdi_helpers.assert_managed ~__context ~vdi; - assert_doesnt_make_vm_non_agile ~__context ~vm ~vdi + let vm = Db.VBD.get_VM ~__context ~self:vbd in + assert_not_suspended ~__context ~vm; + assert_removable ~__context ~vbd; + assert_empty ~__context ~vbd; + Xapi_vdi_helpers.assert_managed ~__context ~vdi; + assert_doesnt_make_vm_non_agile ~__context ~vm ~vdi let insert ~__context ~vbd ~vdi = - assert_ok_to_insert ~__context ~vbd ~vdi; - Xapi_xenops.vbd_insert ~__context ~self:vbd ~vdi + assert_ok_to_insert ~__context ~vbd ~vdi; + Xapi_xenops.vbd_insert ~__context ~self:vbd ~vdi let assert_ok_to_eject ~__context ~vbd = - let vm = Db.VBD.get_VM ~__context ~self:vbd in - assert_removable ~__context ~vbd; - assert_not_empty ~__context ~vbd; - assert_not_suspended ~__context ~vm + let vm = Db.VBD.get_VM ~__context ~self:vbd in + assert_removable ~__context ~vbd; + assert_not_empty ~__context ~vbd; + assert_not_suspended ~__context ~vm let eject ~__context ~vbd = - assert_ok_to_eject ~__context ~vbd; - Xapi_xenops.vbd_eject ~__context ~self:vbd + assert_ok_to_eject ~__context ~vbd; + Xapi_xenops.vbd_eject ~__context ~self:vbd open Threadext open Pervasiveext open Fun let pause ~__context ~self = - let vdi = Db.VBD.get_VDI ~__context ~self in - let sr = Db.VDI.get_SR ~__context ~self:vdi |> Ref.string_of in - raise (Api_errors.Server_error(Api_errors.sr_operation_not_supported, [ sr ])) + let vdi = Db.VBD.get_VDI ~__context ~self in + let sr = Db.VDI.get_SR ~__context ~self:vdi |> Ref.string_of in + raise (Api_errors.Server_error(Api_errors.sr_operation_not_supported, [ sr ])) let unpause = pause diff --git a/ocaml/xapi/xapi_vbd_helpers.ml b/ocaml/xapi/xapi_vbd_helpers.ml index 3592d10599f..44a8bb98447 100644 --- a/ocaml/xapi/xapi_vbd_helpers.ml +++ b/ocaml/xapi/xapi_vbd_helpers.ml @@ -13,7 +13,7 @@ *) (** Common code between the fake and real servers for dealing with VBDs * @group Storage - *) +*) open Stdext open Listext @@ -29,17 +29,17 @@ open D open Record_util let all_ops : API.vbd_operations_set = [ `attach; `eject; `unplug; `unplug_force; `insert; `plug; - `pause; `unpause ] + `pause; `unpause ] type table = (API.vbd_operations, ((string * (string list)) option)) Hashtbl.t (** Returns a table of operations -> API error options (None if the operation would be ok) - The flag 'expensive_sharing_checks' indicates whether to perform the VDI sharing checks. + The flag 'expensive_sharing_checks' indicates whether to perform the VDI sharing checks. We avoid performing these calculations on server start (CA-20808) and therefore end up with a slightly underconservative allowed_operations. This is still safe because we always perform these calculations in the error path. - *) -let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = +*) +let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = let _ref = Ref.string_of _ref' in let current_ops = List.setify (List.map snd record.Db_actions.vBD_current_operations) in (* Policy: @@ -48,7 +48,7 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = * Look up every other VBD pointing to the same VDI as this one and generate the subset for which either currently-attached is true or current_operations is non-empty. With reference to the VDI.sharable and VDI.read_only flags, perform the sharing - check. + check. * Consider the powerstate of the VM * Exempt the control domain from current-operations checking * NB must skip empty VBDs *) @@ -56,8 +56,8 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = List.iter (fun x -> Hashtbl.replace table x None) all_ops; let set_errors (code: string) (params: string list) (ops: API.vbd_operations_set) = List.iter (fun op -> - if Hashtbl.find table op = None - then Hashtbl.replace table op (Some(code, params))) ops in + if Hashtbl.find table op = None + then Hashtbl.replace table op (Some(code, params))) ops in let vm = Db.VBD.get_VM ~__context ~self:_ref' in let vm_r = Db.VM.get_record ~__context ~self:vm in @@ -68,24 +68,24 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = (* Any current_operations preclude everything that isn't safe to parallelise *) if current_ops <> [] then begin let concurrent_op = List.hd current_ops in - set_errors Api_errors.other_operation_in_progress - [ "VBD"; _ref; vbd_operation_to_string concurrent_op ] + set_errors Api_errors.other_operation_in_progress + [ "VBD"; _ref; vbd_operation_to_string concurrent_op ] (List.set_difference all_ops safe_to_parallelise); end; (* If not all operations are parallisable then preclude pause *) - let all_are_parallelisable = List.fold_left (&&) true - (List.map (fun op -> List.mem op safe_to_parallelise) current_ops) in - (* If not all are parallelisable, ban the otherwise + let all_are_parallelisable = List.fold_left (&&) true + (List.map (fun op -> List.mem op safe_to_parallelise) current_ops) in + (* If not all are parallelisable, ban the otherwise parallelisable operations too *) if not(all_are_parallelisable) then set_errors Api_errors.other_operation_in_progress - [ "VBD"; _ref; vbd_operation_to_string (List.hd current_ops) ] - [ `pause ]; + [ "VBD"; _ref; vbd_operation_to_string (List.hd current_ops) ] + [ `pause ]; (* If something other than `pause `unpause *and* `attach (for VM.reboot, see CA-24282) then disallow unpause *) if List.set_difference current_ops (`attach :: safe_to_parallelise) <> [] then set_errors Api_errors.other_operation_in_progress - [ "VBD"; _ref; vbd_operation_to_string (List.hd current_ops) ] - [ `unpause ]; + [ "VBD"; _ref; vbd_operation_to_string (List.hd current_ops) ] + [ `unpause ]; (* Drives marked as not unpluggable cannot be unplugged *) @@ -98,59 +98,59 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = (* Empty devices cannot be ejected *) let empty = Db.VBD.get_empty ~__context ~self:_ref' in - if empty + if empty then set_errors Api_errors.vbd_is_empty [ _ref ] [ `eject ] - else set_errors Api_errors.vbd_not_empty [ _ref ] [ `insert ]; + else set_errors Api_errors.vbd_not_empty [ _ref ] [ `insert ]; (* VM must be online to support plug/unplug *) let power_state = Db.VM.get_power_state ~__context ~self:vm in let plugged = record.Db_actions.vBD_currently_attached || record.Db_actions.vBD_reserved in (match power_state, plugged with - | `Running, true -> set_errors Api_errors.device_already_attached [ _ref ] [ `plug ] - | `Running, false -> set_errors Api_errors.device_already_detached [ _ref ] [ `unplug; `unplug_force ] - | _, _ -> - let actual = Record_util.power_to_string power_state in - let expected = Record_util.power_to_string `Running in - (* If not Running, always block these operations: *) - let bad_ops = [ `plug; `unplug; `unplug_force ] in - (* However allow VBD pause and unpause if the VM is paused: *) - let bad_ops' = if power_state = `Paused then bad_ops else `pause :: `unpause :: bad_ops in - set_errors Api_errors.vm_bad_power_state [ Ref.string_of vm; expected; actual ] bad_ops'); + | `Running, true -> set_errors Api_errors.device_already_attached [ _ref ] [ `plug ] + | `Running, false -> set_errors Api_errors.device_already_detached [ _ref ] [ `unplug; `unplug_force ] + | _, _ -> + let actual = Record_util.power_to_string power_state in + let expected = Record_util.power_to_string `Running in + (* If not Running, always block these operations: *) + let bad_ops = [ `plug; `unplug; `unplug_force ] in + (* However allow VBD pause and unpause if the VM is paused: *) + let bad_ops' = if power_state = `Paused then bad_ops else `pause :: `unpause :: bad_ops in + set_errors Api_errors.vm_bad_power_state [ Ref.string_of vm; expected; actual ] bad_ops'); (* VBD plug/unplug must fail for current_operations * like [clean_shutdown; hard_shutdown; suspend; pause] on VM *) let vm_current_ops = Db.VM.get_current_operations ~__context ~self:vm in List.iter (fun (task,op) -> - if List.mem op [ `clean_shutdown; `hard_shutdown; `suspend; `pause ] then begin - let current_op_str = "Current operation on VM:" ^ (Ref.string_of vm) ^ " is " - ^ (Record_util.vm_operation_to_string op) in - set_errors Api_errors.operation_not_allowed [ current_op_str ] [ `plug; `unplug ] - end - ) vm_current_ops; + if List.mem op [ `clean_shutdown; `hard_shutdown; `suspend; `pause ] then begin + let current_op_str = "Current operation on VM:" ^ (Ref.string_of vm) ^ " is " + ^ (Record_util.vm_operation_to_string op) in + set_errors Api_errors.operation_not_allowed [ current_op_str ] [ `plug; `unplug ] + end + ) vm_current_ops; (* HVM guests MAY support plug/unplug IF they have PV drivers. Assume * all drivers have such support unless they specify that they do not. *) (* They can only eject/insert CDs not plug/unplug *) let vm_gm = Db.VM.get_guest_metrics ~__context ~self:vm in - let vm_gmr = try Some (Db.VM_guest_metrics.get_record_internal ~__context ~self:vm_gm) with _ -> None in + let vm_gmr = try Some (Db.VM_guest_metrics.get_record_internal ~__context ~self:vm_gm) with _ -> None in if power_state = `Running && Helpers.has_booted_hvm ~__context ~self:vm then begin let plug_ops = [ `plug; `unplug; `unplug_force ] in let fallback () = match Xapi_pv_driver_version.make_error_opt (Xapi_pv_driver_version.of_guest_metrics vm_gmr) vm with - | Some(code, params) -> set_errors code params plug_ops - | None -> () in + | Some(code, params) -> set_errors code params plug_ops + | None -> () in (match vm_gmr with - | None -> fallback () - | Some gmr -> - (match gmr.Db_actions.vM_guest_metrics_can_use_hotplug_vbd with - | `yes -> () (* Drivers have made an explicit claim of support. *) - | `no -> set_errors Api_errors.operation_not_allowed ["VM states it does not support VBD hotplug."] plug_ops - | `unspecified -> fallback ()) + | None -> fallback () + | Some gmr -> + (match gmr.Db_actions.vM_guest_metrics_can_use_hotplug_vbd with + | `yes -> () (* Drivers have made an explicit claim of support. *) + | `no -> set_errors Api_errors.operation_not_allowed ["VM states it does not support VBD hotplug."] plug_ops + | `unspecified -> fallback ()) ); if record.Db_actions.vBD_type = `CD - then set_errors Api_errors.operation_not_allowed - [ "HVM CDROMs cannot be hotplugged/unplugged, only inserted or ejected" ] plug_ops + then set_errors Api_errors.operation_not_allowed + [ "HVM CDROMs cannot be hotplugged/unplugged, only inserted or ejected" ] plug_ops end; (* When a VM is suspended, no operations are allowed for CD. *) @@ -167,57 +167,57 @@ let valid_operations ~expensive_sharing_checks ~__context record _ref' : table = if not vdi_record.Db_actions.vDI_managed then set_errors Api_errors.vdi_not_managed [ _ref ] all_ops; - let vdi_operations_besides_copy = - List.exists - (fun (_, operation) -> operation <> `copy) - vdi_record.Db_actions.vDI_current_operations - in + let vdi_operations_besides_copy = + List.exists + (fun (_, operation) -> operation <> `copy) + vdi_record.Db_actions.vDI_current_operations + in if vdi_operations_besides_copy then begin debug "VBD operation %s not allowed because VDI.current-operations = [ %s ]" - (String.concat ";" (List.map vbd_operation_to_string current_ops)) - (String.concat "; " - (List.map (fun (task, op) -> task ^ " -> " ^ (vdi_operation_to_string op)) vdi_record.Db_actions.vDI_current_operations)); + (String.concat ";" (List.map vbd_operation_to_string current_ops)) + (String.concat "; " + (List.map (fun (task, op) -> task ^ " -> " ^ (vdi_operation_to_string op)) vdi_record.Db_actions.vDI_current_operations)); let concurrent_op = snd (List.hd vdi_record.Db_actions.vDI_current_operations) in set_errors Api_errors.other_operation_in_progress - [ "VDI"; Ref.string_of vdi; vdi_operation_to_string concurrent_op] [ `attach; `plug; `insert ] + [ "VDI"; Ref.string_of vdi; vdi_operation_to_string concurrent_op] [ `attach; `plug; `insert ] end; - if not record.Db_actions.vBD_currently_attached && expensive_sharing_checks + if not record.Db_actions.vBD_currently_attached && expensive_sharing_checks then begin (* Perform the sharing checks *) (* Careful to not count this VBD and be careful to be robust to parallel deletions of unrelated VBDs *) - let vbd_records = - let vbds = List.filter (fun vbd -> vbd <> _ref') vdi_record.Db_actions.vDI_VBDs in - List.concat (List.map (fun self -> try [ Db.VBD.get_record_internal ~__context ~self ] with _ -> []) vbds) in - let pointing_to_a_suspended_VM vbd = - Db.VM.get_power_state ~__context ~self:(vbd.Db_actions.vBD_VM) = `Suspended in - let pointing_to_a_system_domain vbd = - System_domains.get_is_system_domain ~__context ~self:(vbd.Db_actions.vBD_VM) in - - let vbds_to_check = List.filter - (fun self -> - not (pointing_to_a_suspended_VM self) (* these are really offline *) - && not (pointing_to_a_system_domain self) (* these can share the disk safely *) - && ( - self.Db_actions.vBD_currently_attached - || self.Db_actions.vBD_reserved - || self.Db_actions.vBD_current_operations <> [] - ) - ) vbd_records in - let someones_got_rw_access = - try let (_: Db_actions.vBD_t) = List.find (fun vbd -> vbd.Db_actions.vBD_mode = `RW) vbds_to_check in true with _ -> false - in - let need_write = record.Db_actions.vBD_mode = `RW in - (* Read-only access doesn't require VDI to be marked sharable *) - if not(vdi_record.Db_actions.vDI_sharable) && (not is_system_domain) - && (someones_got_rw_access || need_write && vbds_to_check <> []) - then set_errors Api_errors.vdi_in_use [ Ref.string_of vdi ] [ `attach; `insert; `plug ]; - if need_write && vdi_record.Db_actions.vDI_read_only - then set_errors Api_errors.vdi_readonly [ Ref.string_of vdi ] [ `attach; `insert; `plug ] - end + let vbd_records = + let vbds = List.filter (fun vbd -> vbd <> _ref') vdi_record.Db_actions.vDI_VBDs in + List.concat (List.map (fun self -> try [ Db.VBD.get_record_internal ~__context ~self ] with _ -> []) vbds) in + let pointing_to_a_suspended_VM vbd = + Db.VM.get_power_state ~__context ~self:(vbd.Db_actions.vBD_VM) = `Suspended in + let pointing_to_a_system_domain vbd = + System_domains.get_is_system_domain ~__context ~self:(vbd.Db_actions.vBD_VM) in + + let vbds_to_check = List.filter + (fun self -> + not (pointing_to_a_suspended_VM self) (* these are really offline *) + && not (pointing_to_a_system_domain self) (* these can share the disk safely *) + && ( + self.Db_actions.vBD_currently_attached + || self.Db_actions.vBD_reserved + || self.Db_actions.vBD_current_operations <> [] + ) + ) vbd_records in + let someones_got_rw_access = + try let (_: Db_actions.vBD_t) = List.find (fun vbd -> vbd.Db_actions.vBD_mode = `RW) vbds_to_check in true with _ -> false + in + let need_write = record.Db_actions.vBD_mode = `RW in + (* Read-only access doesn't require VDI to be marked sharable *) + if not(vdi_record.Db_actions.vDI_sharable) && (not is_system_domain) + && (someones_got_rw_access || need_write && vbds_to_check <> []) + then set_errors Api_errors.vdi_in_use [ Ref.string_of vdi ] [ `attach; `insert; `plug ]; + if need_write && vdi_record.Db_actions.vDI_read_only + then set_errors Api_errors.vdi_readonly [ Ref.string_of vdi ] [ `attach; `insert; `plug ] + end end; (* empty *) table -let throw_error (table: table) op = +let throw_error (table: table) op = if not(Hashtbl.mem table op) then raise (Api_errors.Server_error(Api_errors.internal_error, [ Printf.sprintf "xapi_vbd_helpers.assert_operation_valid unknown operation: %s" (vbd_operation_to_string op) ])); @@ -225,12 +225,12 @@ let throw_error (table: table) op = | Some (code, params) -> raise (Api_errors.Server_error(code, params)) | None -> () -let assert_operation_valid ~__context ~self ~(op:API.vbd_operations) = +let assert_operation_valid ~__context ~self ~(op:API.vbd_operations) = let all = Db.VBD.get_record_internal ~__context ~self in let table = valid_operations ~expensive_sharing_checks:true ~__context all self in throw_error table op - -let assert_attachable ~__context ~self = + +let assert_attachable ~__context ~self = let all = Db.VBD.get_record_internal ~__context ~self in let table = valid_operations ~expensive_sharing_checks:true ~__context all self in throw_error table `attach @@ -239,13 +239,13 @@ let assert_doesnt_make_vm_non_agile ~__context ~vm ~vdi = let pool = Helpers.get_pool ~__context in let properly_shared = Agility.is_sr_properly_shared ~__context ~self:(Db.VDI.get_SR ~__context ~self:vdi) in if true - && Db.Pool.get_ha_enabled ~__context ~self:pool - && not(Db.Pool.get_ha_allow_overcommit ~__context ~self:pool) - && Helpers.is_xha_protected ~__context ~self:vm - && not properly_shared then begin - warn "Attaching VDI %s makes VM %s not agile" (Ref.string_of vdi) (Ref.string_of vm); - raise (Api_errors.Server_error(Api_errors.ha_operation_would_break_failover_plan, [])) - end + && Db.Pool.get_ha_enabled ~__context ~self:pool + && not(Db.Pool.get_ha_allow_overcommit ~__context ~self:pool) + && Helpers.is_xha_protected ~__context ~self:vm + && not properly_shared then begin + warn "Attaching VDI %s makes VM %s not agile" (Ref.string_of vdi) (Ref.string_of vm); + raise (Api_errors.Server_error(Api_errors.ha_operation_would_break_failover_plan, [])) + end let update_allowed_operations ~__context ~self : unit = let all = Db.VBD.get_record_internal ~__context ~self in @@ -254,7 +254,7 @@ let update_allowed_operations ~__context ~self : unit = Db.VBD.set_allowed_operations ~__context ~self ~value:keys (** Someone is cancelling a task so remove it from the current_operations *) -let cancel_task ~__context ~self ~task_id = +let cancel_task ~__context ~self ~task_id = let all = List.map fst (Db.VBD.get_current_operations ~__context ~self) in if List.mem task_id all then begin @@ -267,7 +267,7 @@ let cancel_tasks ~__context ~self ~all_tasks_in_db ~task_ids = let set = (fun value -> Db.VBD.set_current_operations ~__context ~self ~value) in Helpers.cancel_tasks ~__context ~ops ~all_tasks_in_db ~task_ids ~set -let clear_current_operations ~__context ~self = +let clear_current_operations ~__context ~self = if (Db.VBD.get_current_operations ~__context ~self)<>[] then begin Db.VBD.set_current_operations ~__context ~self ~value:[]; @@ -280,77 +280,77 @@ let clear_current_operations ~__context ~self = (** Check if the device string has the right form *) let valid_device dev = let check_rest rest = (* checks the rest of the device name = [] is ok, or a number is ok *) - if rest=[] - then true + if rest=[] + then true else try ignore(int_of_string (String.implode rest)); true with _ -> false in - dev = "autodetect" || - match String.explode dev with - | 's' :: 'd' :: ('a'..'p') :: rest -> check_rest rest - | 'x' :: 'v' :: 'd' :: ('a'..'p') :: rest -> check_rest rest - | 'h' :: 'd' :: ('a'..'p') :: rest -> check_rest rest - | _ -> try let n = int_of_string dev in n >= 0 || n <16 with _ -> false + dev = "autodetect" || + match String.explode dev with + | 's' :: 'd' :: ('a'..'p') :: rest -> check_rest rest + | 'x' :: 'v' :: 'd' :: ('a'..'p') :: rest -> check_rest rest + | 'h' :: 'd' :: ('a'..'p') :: rest -> check_rest rest + | _ -> try let n = int_of_string dev in n >= 0 || n <16 with _ -> false (** VBD.destroy doesn't require any interaction with xen *) let destroy ~__context ~self = - debug "VBD.destroy (uuid = %s; ref = %s)" (Db.VBD.get_uuid ~__context ~self) (Ref.string_of self); - let r = Db.VBD.get_record_internal ~__context ~self in - let vm = r.Db_actions.vBD_VM in - - (* Force the user to unplug first *) - if r.Db_actions.vBD_currently_attached || r.Db_actions.vBD_reserved - then raise (Api_errors.Server_error(Api_errors.operation_not_allowed, - [Printf.sprintf "VBD '%s' still attached to '%s'" r.Db_actions.vBD_uuid (Db.VM.get_uuid __context vm)])); - - let metrics = Db.VBD.get_metrics ~__context ~self in - (* Don't let a failure to destroy the metrics stop us *) - Helpers.log_exn_continue "VBD_metrics.destroy" - (fun self -> Db.VBD_metrics.destroy ~__context ~self) metrics; - Db.VBD.destroy ~__context ~self + debug "VBD.destroy (uuid = %s; ref = %s)" (Db.VBD.get_uuid ~__context ~self) (Ref.string_of self); + let r = Db.VBD.get_record_internal ~__context ~self in + let vm = r.Db_actions.vBD_VM in + + (* Force the user to unplug first *) + if r.Db_actions.vBD_currently_attached || r.Db_actions.vBD_reserved + then raise (Api_errors.Server_error(Api_errors.operation_not_allowed, + [Printf.sprintf "VBD '%s' still attached to '%s'" r.Db_actions.vBD_uuid (Db.VM.get_uuid __context vm)])); + + let metrics = Db.VBD.get_metrics ~__context ~self in + (* Don't let a failure to destroy the metrics stop us *) + Helpers.log_exn_continue "VBD_metrics.destroy" + (fun self -> Db.VBD_metrics.destroy ~__context ~self) metrics; + Db.VBD.destroy ~__context ~self (** Type of a function which does the actual hotplug/ hotunplug *) type do_hotplug_fn = __context:Context.t -> vbd:API.ref_VBD -> unit (* copy a vbd *) let copy ~__context ?vdi ~vm vbd = - let all = Db.VBD.get_record ~__context ~self:vbd in - let new_vbd = Ref.make () in - let vbd_uuid = Uuid.to_string (Uuid.make_uuid ()) in - let metrics = Ref.make () in - let metrics_uuid = Uuid.to_string (Uuid.make_uuid ()) in - let vdi = Pervasiveext.default all.API.vBD_VDI vdi in - Db.VBD_metrics.create ~__context - ~ref:metrics - ~uuid:metrics_uuid - ~io_read_kbs:0. - ~io_write_kbs:0. - ~last_updated:(Date.of_float 0.) - ~other_config:[]; - Db.VBD.create ~__context - ~ref:new_vbd - ~uuid:vbd_uuid - ~allowed_operations:[] - ~current_operations:[] - ~storage_lock:false - ~vM:vm - ~vDI:vdi - ~empty:(all.API.vBD_empty || vdi = Ref.null) - ~reserved:false - ~userdevice:all.API.vBD_userdevice - ~device:all.API.vBD_device - ~bootable:all.API.vBD_bootable - ~mode:all.API.vBD_mode - ~currently_attached:all.API.vBD_currently_attached - ~status_code:0L - ~_type:all.API.vBD_type - ~unpluggable:all.API.vBD_unpluggable - ~status_detail:"" - ~other_config:all.API.vBD_other_config - ~qos_algorithm_type:all.API.vBD_qos_algorithm_type - ~qos_algorithm_params:all.API.vBD_qos_algorithm_params - ~qos_supported_algorithms:[] - ~runtime_properties:[] - ~metrics:metrics; - new_vbd + let all = Db.VBD.get_record ~__context ~self:vbd in + let new_vbd = Ref.make () in + let vbd_uuid = Uuid.to_string (Uuid.make_uuid ()) in + let metrics = Ref.make () in + let metrics_uuid = Uuid.to_string (Uuid.make_uuid ()) in + let vdi = Pervasiveext.default all.API.vBD_VDI vdi in + Db.VBD_metrics.create ~__context + ~ref:metrics + ~uuid:metrics_uuid + ~io_read_kbs:0. + ~io_write_kbs:0. + ~last_updated:(Date.of_float 0.) + ~other_config:[]; + Db.VBD.create ~__context + ~ref:new_vbd + ~uuid:vbd_uuid + ~allowed_operations:[] + ~current_operations:[] + ~storage_lock:false + ~vM:vm + ~vDI:vdi + ~empty:(all.API.vBD_empty || vdi = Ref.null) + ~reserved:false + ~userdevice:all.API.vBD_userdevice + ~device:all.API.vBD_device + ~bootable:all.API.vBD_bootable + ~mode:all.API.vBD_mode + ~currently_attached:all.API.vBD_currently_attached + ~status_code:0L + ~_type:all.API.vBD_type + ~unpluggable:all.API.vBD_unpluggable + ~status_detail:"" + ~other_config:all.API.vBD_other_config + ~qos_algorithm_type:all.API.vBD_qos_algorithm_type + ~qos_algorithm_params:all.API.vBD_qos_algorithm_params + ~qos_supported_algorithms:[] + ~runtime_properties:[] + ~metrics:metrics; + new_vbd diff --git a/ocaml/xapi/xapi_vdi.ml b/ocaml/xapi/xapi_vdi.ml index 0325b7687ef..bdabcaf0cb4 100644 --- a/ocaml/xapi/xapi_vdi.ml +++ b/ocaml/xapi/xapi_vdi.ml @@ -13,8 +13,8 @@ *) (** Module that defines API functions for VDI objects * @group XenAPI functions - *) - +*) + module D=Debug.Make(struct let name="xapi" end) open D @@ -28,203 +28,203 @@ open Printf (** Checks to see if an operation is valid in this state. Returns Some exception if not and None if everything is ok. *) let check_operation_error ~__context ?(sr_records=[]) ?(pbd_records=[]) ?(vbd_records=[]) ha_enabled record _ref' op = - let _ref = Ref.string_of _ref' in - let current_ops = record.Db_actions.vDI_current_operations in - let reset_on_boot = record.Db_actions.vDI_on_boot = `reset in - - (* Policy: - 1. any current_operation besides copy implies exclusivity; fail everything - else; except vdi mirroring is in current operations and destroy is performed - as part of vdi_pool_migrate. - 2. if a copy is ongoing, don't fail with other_operation_in_progress, as - blocked operations could then get stuck behind a long-running copy. - Instead, rely on the blocked_by_attach check further down to decide - whether an operation should be allowed. - 3. if doing a VM start then assume the sharing check is done elsewhere - (so VMs may share disks but our operations cannot) - 4. for other operations, fail if any VBD has currently-attached=true or any VBD - has a current_operation itself - 5. HA prevents you from deleting statefiles or metadata volumes - *) - (* Don't fail with other_operation_in_progress if VDI mirroring is in progress - * and destroy is called as part of VDI mirroring *) - let is_vdi_mirroring_in_progress = (List.exists (fun (_, op) -> op = `mirror) current_ops) && (op = `destroy) in - if (List.exists (fun (_, op) -> op <> `copy) current_ops) && not is_vdi_mirroring_in_progress - then Some(Api_errors.other_operation_in_progress,["VDI"; _ref]) - else - (* check to see whether it's a local cd drive *) - let sr = record.Db_actions.vDI_SR in - let sr_type = Db.SR.get_type ~__context ~self:sr in - let is_tools_sr = Db.SR.get_is_tools_sr ~__context ~self:sr in - - (* Check to see if any PBDs are attached *) - let open Db_filter_types in - let pbds_attached = match pbd_records with - | [] -> Db.PBD.get_records_where ~__context ~expr:(And(Eq(Field "SR", Literal (Ref.string_of sr)), Eq(Field "currently_attached", Literal "true"))) - | _ -> List.filter (fun (_, pbd_record) -> (pbd_record.API.pBD_SR = sr) && pbd_record.API.pBD_currently_attached) pbd_records - in - if (List.length pbds_attached = 0) && List.mem op [`resize;] - then Some(Api_errors.sr_no_pbds, [Ref.string_of sr]) - else - (* check to see whether VBDs exist which are using this VDI *) - - (* Only a 'live' operation can be performed if there are active (even RO) devices *) - let my_active_vbd_records = match vbd_records with - | [] -> List.map snd (Db.VBD.get_internal_records_where ~__context - ~expr:( - And(Eq (Field "VDI", Literal _ref), - Or( - Eq (Field "currently_attached", Literal "true"), - Eq (Field "reserved", Literal "true"))) - )) - | _ -> List.map snd (List.filter (fun (_, vbd_record) -> - vbd_record.Db_actions.vBD_VDI = _ref' && (vbd_record.Db_actions.vBD_currently_attached || vbd_record.Db_actions.vBD_reserved) - ) vbd_records) - in - let my_active_rw_vbd_records = List.filter - (fun vbd -> vbd.Db_actions.vBD_mode = `RW) - my_active_vbd_records - in - - (* VBD operations (plug/unplug) (which should be transient) cause us to serialise *) - let my_has_current_operation_vbd_records = match vbd_records with - | [] -> List.map snd (Db.VBD.get_internal_records_where ~__context - ~expr:( - And(Eq (Field "VDI", Literal _ref), Not (Eq (Field "current_operations", Literal "()"))) - )) - | _ -> List.map snd (List.filter (fun (_, vbd_record) -> - vbd_record.Db_actions.vBD_VDI = _ref' && vbd_record.Db_actions.vBD_current_operations <> [] - ) vbd_records) - in - - (* If the VBD is currently_attached then some operations can still be performed ie: - VDI.clone (if the VM is suspended we have to have the 'allow_clone_suspended_vm'' flag) - VDI.snapshot; VDI.resize_online; 'blocked' (CP-831) *) - let operation_can_be_performed_live = match op with - | `snapshot | `resize_online | `blocked | `clone | `mirror -> true - | _ -> false in - - let operation_can_be_performed_with_ro_attach = - operation_can_be_performed_live || - (match op with - | `copy -> true - | _ -> false) - in - - (* NB RO vs RW sharing checks are done in xapi_vbd.ml *) - - let sm_features = Xapi_sr_operations.features_of_sr_internal ~__context ~_type:sr_type in - - let blocked_by_attach = - if operation_can_be_performed_live - then false - else begin - if operation_can_be_performed_with_ro_attach - then (my_active_rw_vbd_records <> []) - else (my_active_vbd_records <> []) - end - in - if blocked_by_attach - then Some (Api_errors.vdi_in_use,[_ref; (Record_util.vdi_operation_to_string op)]) - else if my_has_current_operation_vbd_records <> [] - then Some (Api_errors.other_operation_in_progress, [ "VDI"; _ref ]) - else ( - match op with - | `forget -> - if ha_enabled && List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] - then Some (Api_errors.ha_is_enabled, []) - else if List.mem record.Db_actions.vDI_type [ `rrd ] - then Some (Api_errors.vdi_has_rrds, [_ref]) - else None - | `destroy -> - if sr_type = "udev" - then Some (Api_errors.vdi_is_a_physical_device, [_ref]) - else - if is_tools_sr - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) - else if List.mem record.Db_actions.vDI_type [ `rrd ] - then Some (Api_errors.vdi_has_rrds, [_ref]) - else - if ha_enabled && List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] - then Some (Api_errors.ha_is_enabled, []) - else if List.mem record.Db_actions.vDI_type [`ha_statefile; `metadata ] && Xapi_pool_helpers.ha_enable_in_progress ~__context - then Some (Api_errors.ha_enable_in_progress, []) - else if List.mem record.Db_actions.vDI_type [`ha_statefile; `metadata ] && Xapi_pool_helpers.ha_disable_in_progress ~__context - then Some (Api_errors.ha_disable_in_progress, []) - else - if not Smint.(has_capability Vdi_delete sm_features) - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) - else None - | `resize -> - if ha_enabled && List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] - then Some (Api_errors.ha_is_enabled, []) - else - if not Smint.(has_capability Vdi_resize sm_features) - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) - else None - | `update -> - if not Smint.(has_capability Vdi_update sm_features) - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) - else None - | `resize_online -> - if ha_enabled && List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] - then Some (Api_errors.ha_is_enabled, []) - else - if not Smint.(has_capability Vdi_resize_online sm_features) - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) - else None - | `generate_config -> - if not Smint.(has_capability Vdi_generate_config sm_features) - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) - else None - | `snapshot when record.Db_actions.vDI_sharable -> - Some (Api_errors.vdi_is_sharable, [ _ref ]) - | `snapshot when reset_on_boot -> - Some (Api_errors.vdi_on_boot_mode_incompatible_with_operation, []) - | `snapshot -> - if List.exists (fun (_, op) -> op = `copy) current_ops - then Some (Api_errors.operation_not_allowed, ["Snapshot operation not allowed during copy."]) - else None - | `copy -> - if List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] - then Some (Api_errors.operation_not_allowed, ["VDI containing HA statefile or redo log cannot be copied (check the VDI's allowed operations)."]) - else None - | `clone -> - if not Smint.(has_capability Vdi_clone sm_features) - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) - else None - | `mirror -> - if not Smint.(has_capability Vdi_mirror sm_features) - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) - else None - | _ -> None - ) - -let assert_operation_valid ~__context ~self ~(op:API.vdi_operations) = + let _ref = Ref.string_of _ref' in + let current_ops = record.Db_actions.vDI_current_operations in + let reset_on_boot = record.Db_actions.vDI_on_boot = `reset in + + (* Policy: + 1. any current_operation besides copy implies exclusivity; fail everything + else; except vdi mirroring is in current operations and destroy is performed + as part of vdi_pool_migrate. + 2. if a copy is ongoing, don't fail with other_operation_in_progress, as + blocked operations could then get stuck behind a long-running copy. + Instead, rely on the blocked_by_attach check further down to decide + whether an operation should be allowed. + 3. if doing a VM start then assume the sharing check is done elsewhere + (so VMs may share disks but our operations cannot) + 4. for other operations, fail if any VBD has currently-attached=true or any VBD + has a current_operation itself + 5. HA prevents you from deleting statefiles or metadata volumes + *) + (* Don't fail with other_operation_in_progress if VDI mirroring is in progress + * and destroy is called as part of VDI mirroring *) + let is_vdi_mirroring_in_progress = (List.exists (fun (_, op) -> op = `mirror) current_ops) && (op = `destroy) in + if (List.exists (fun (_, op) -> op <> `copy) current_ops) && not is_vdi_mirroring_in_progress + then Some(Api_errors.other_operation_in_progress,["VDI"; _ref]) + else + (* check to see whether it's a local cd drive *) + let sr = record.Db_actions.vDI_SR in + let sr_type = Db.SR.get_type ~__context ~self:sr in + let is_tools_sr = Db.SR.get_is_tools_sr ~__context ~self:sr in + + (* Check to see if any PBDs are attached *) + let open Db_filter_types in + let pbds_attached = match pbd_records with + | [] -> Db.PBD.get_records_where ~__context ~expr:(And(Eq(Field "SR", Literal (Ref.string_of sr)), Eq(Field "currently_attached", Literal "true"))) + | _ -> List.filter (fun (_, pbd_record) -> (pbd_record.API.pBD_SR = sr) && pbd_record.API.pBD_currently_attached) pbd_records + in + if (List.length pbds_attached = 0) && List.mem op [`resize;] + then Some(Api_errors.sr_no_pbds, [Ref.string_of sr]) + else + (* check to see whether VBDs exist which are using this VDI *) + + (* Only a 'live' operation can be performed if there are active (even RO) devices *) + let my_active_vbd_records = match vbd_records with + | [] -> List.map snd (Db.VBD.get_internal_records_where ~__context + ~expr:( + And(Eq (Field "VDI", Literal _ref), + Or( + Eq (Field "currently_attached", Literal "true"), + Eq (Field "reserved", Literal "true"))) + )) + | _ -> List.map snd (List.filter (fun (_, vbd_record) -> + vbd_record.Db_actions.vBD_VDI = _ref' && (vbd_record.Db_actions.vBD_currently_attached || vbd_record.Db_actions.vBD_reserved) + ) vbd_records) + in + let my_active_rw_vbd_records = List.filter + (fun vbd -> vbd.Db_actions.vBD_mode = `RW) + my_active_vbd_records + in + + (* VBD operations (plug/unplug) (which should be transient) cause us to serialise *) + let my_has_current_operation_vbd_records = match vbd_records with + | [] -> List.map snd (Db.VBD.get_internal_records_where ~__context + ~expr:( + And(Eq (Field "VDI", Literal _ref), Not (Eq (Field "current_operations", Literal "()"))) + )) + | _ -> List.map snd (List.filter (fun (_, vbd_record) -> + vbd_record.Db_actions.vBD_VDI = _ref' && vbd_record.Db_actions.vBD_current_operations <> [] + ) vbd_records) + in + + (* If the VBD is currently_attached then some operations can still be performed ie: + VDI.clone (if the VM is suspended we have to have the 'allow_clone_suspended_vm'' flag) + VDI.snapshot; VDI.resize_online; 'blocked' (CP-831) *) + let operation_can_be_performed_live = match op with + | `snapshot | `resize_online | `blocked | `clone | `mirror -> true + | _ -> false in + + let operation_can_be_performed_with_ro_attach = + operation_can_be_performed_live || + (match op with + | `copy -> true + | _ -> false) + in + + (* NB RO vs RW sharing checks are done in xapi_vbd.ml *) + + let sm_features = Xapi_sr_operations.features_of_sr_internal ~__context ~_type:sr_type in + + let blocked_by_attach = + if operation_can_be_performed_live + then false + else begin + if operation_can_be_performed_with_ro_attach + then (my_active_rw_vbd_records <> []) + else (my_active_vbd_records <> []) + end + in + if blocked_by_attach + then Some (Api_errors.vdi_in_use,[_ref; (Record_util.vdi_operation_to_string op)]) + else if my_has_current_operation_vbd_records <> [] + then Some (Api_errors.other_operation_in_progress, [ "VDI"; _ref ]) + else ( + match op with + | `forget -> + if ha_enabled && List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] + then Some (Api_errors.ha_is_enabled, []) + else if List.mem record.Db_actions.vDI_type [ `rrd ] + then Some (Api_errors.vdi_has_rrds, [_ref]) + else None + | `destroy -> + if sr_type = "udev" + then Some (Api_errors.vdi_is_a_physical_device, [_ref]) + else + if is_tools_sr + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) + else if List.mem record.Db_actions.vDI_type [ `rrd ] + then Some (Api_errors.vdi_has_rrds, [_ref]) + else + if ha_enabled && List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] + then Some (Api_errors.ha_is_enabled, []) + else if List.mem record.Db_actions.vDI_type [`ha_statefile; `metadata ] && Xapi_pool_helpers.ha_enable_in_progress ~__context + then Some (Api_errors.ha_enable_in_progress, []) + else if List.mem record.Db_actions.vDI_type [`ha_statefile; `metadata ] && Xapi_pool_helpers.ha_disable_in_progress ~__context + then Some (Api_errors.ha_disable_in_progress, []) + else + if not Smint.(has_capability Vdi_delete sm_features) + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) + else None + | `resize -> + if ha_enabled && List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] + then Some (Api_errors.ha_is_enabled, []) + else + if not Smint.(has_capability Vdi_resize sm_features) + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) + else None + | `update -> + if not Smint.(has_capability Vdi_update sm_features) + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) + else None + | `resize_online -> + if ha_enabled && List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] + then Some (Api_errors.ha_is_enabled, []) + else + if not Smint.(has_capability Vdi_resize_online sm_features) + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) + else None + | `generate_config -> + if not Smint.(has_capability Vdi_generate_config sm_features) + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) + else None + | `snapshot when record.Db_actions.vDI_sharable -> + Some (Api_errors.vdi_is_sharable, [ _ref ]) + | `snapshot when reset_on_boot -> + Some (Api_errors.vdi_on_boot_mode_incompatible_with_operation, []) + | `snapshot -> + if List.exists (fun (_, op) -> op = `copy) current_ops + then Some (Api_errors.operation_not_allowed, ["Snapshot operation not allowed during copy."]) + else None + | `copy -> + if List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] + then Some (Api_errors.operation_not_allowed, ["VDI containing HA statefile or redo log cannot be copied (check the VDI's allowed operations)."]) + else None + | `clone -> + if not Smint.(has_capability Vdi_clone sm_features) + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) + else None + | `mirror -> + if not Smint.(has_capability Vdi_mirror sm_features) + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) + else None + | _ -> None + ) + +let assert_operation_valid ~__context ~self ~(op:API.vdi_operations) = let pool = Helpers.get_pool ~__context in let ha_enabled = Db.Pool.get_ha_enabled ~__context ~self:pool in let all = Db.VDI.get_record_internal ~__context ~self in match check_operation_error ~__context ha_enabled all self op with - None -> () - | Some (a,b) -> raise (Api_errors.Server_error (a,b)) + None -> () + | Some (a,b) -> raise (Api_errors.Server_error (a,b)) let update_allowed_operations_internal ~__context ~self ~sr_records ~pbd_records ~vbd_records = let pool = Helpers.get_pool ~__context in let ha_enabled = Db.Pool.get_ha_enabled ~__context ~self:pool in let all = Db.VDI.get_record_internal ~__context ~self in - let allowed = + let allowed = let check x = match check_operation_error ~__context ~sr_records ~pbd_records ~vbd_records ha_enabled all self x with None -> [ x ] | _ -> [] in - List.fold_left (fun accu op -> check op @ accu) [] - [ `snapshot; `copy; `clone; `destroy; `resize; `update; `generate_config; `resize_online; `forget ] in + List.fold_left (fun accu op -> check op @ accu) [] + [ `snapshot; `copy; `clone; `destroy; `resize; `update; `generate_config; `resize_online; `forget ] in Db.VDI.set_allowed_operations ~__context ~self ~value:allowed let update_allowed_operations ~__context ~self : unit = - update_allowed_operations_internal ~__context ~self ~sr_records:[] ~pbd_records:[] ~vbd_records:[] + update_allowed_operations_internal ~__context ~self ~sr_records:[] ~pbd_records:[] ~vbd_records:[] (** Someone is cancelling a task so remove it from the current_operations *) -let cancel_task ~__context ~self ~task_id = +let cancel_task ~__context ~self ~task_id = let all = List.map fst (Db.VDI.get_current_operations ~__context ~self) in if List.mem task_id all then begin @@ -241,21 +241,21 @@ let cancel_tasks ~__context ~self ~all_tasks_in_db ~task_ids = (** Helper function to create a new VDI record with all fields copied from an original, except ref and *_operations, UUID and others supplied as optional arguments. - If a new UUID is not supplied, a fresh one is generated. + If a new UUID is not supplied, a fresh one is generated. storage_lock defaults to false. Parent defaults to Ref.null. - *) +*) (*let clone_record ~uuid ?name_label ?name_description ?sR ?virtual_size ?location ?physical_utilisation ?_type ?sharable ?read_only ?storage_lock ?other_config ?parent ?xenstore_data ?sm_config ~current_operations ~__context ~original () = let a = Db.VDI.get_record_internal ~__context ~self:original in let r = Ref.make () in - Db.VDI.create ~__context ~ref:r + Db.VDI.create ~__context ~ref:r ~uuid:(Uuid.to_string uuid) ~name_label:(default a.Db_actions.vDI_name_label name_label) ~name_description:(default a.Db_actions.vDI_name_description name_description) ~allowed_operations:[] ~current_operations - ~sR:(default a.Db_actions.vDI_SR sR) + ~sR:(default a.Db_actions.vDI_SR sR) ~virtual_size:(default a.Db_actions.vDI_virtual_size virtual_size) ~physical_utilisation:(default a.Db_actions.vDI_physical_utilisation physical_utilisation) ~_type:(default a.Db_actions.vDI_type _type) @@ -269,63 +269,63 @@ let cancel_tasks ~__context ~self ~all_tasks_in_db ~task_ids = ~parent:(default Ref.null parent); r*) -let require_uuid vdi_info = +let require_uuid vdi_info = match vdi_info.Smint.vdi_info_uuid with | Some uuid -> uuid - | None -> failwith "SM backend failed to return field" + | None -> failwith "SM backend failed to return field" (* This function updates xapi's database for a single VDI. The row will be created if it doesn't exist *) let update_vdi_db ~__context ~sr newvdi = - let open Storage_interface in - let open Db_filter_types in - let expr = And(Eq(Field "location", Literal newvdi.vdi), Eq(Field "SR", Literal (Ref.string_of sr))) in - let db_vdis = Db.VDI.get_records_where ~__context ~expr in - Xapi_sr.update_vdis ~__context ~sr db_vdis [ newvdi ]; - match Db.VDI.get_records_where ~__context ~expr with - | (vdi, _) :: _ -> vdi - | [] -> failwith (Printf.sprintf "newvdi failed to create a VDI for %s" (string_of_vdi_info newvdi)) + let open Storage_interface in + let open Db_filter_types in + let expr = And(Eq(Field "location", Literal newvdi.vdi), Eq(Field "SR", Literal (Ref.string_of sr))) in + let db_vdis = Db.VDI.get_records_where ~__context ~expr in + Xapi_sr.update_vdis ~__context ~sr db_vdis [ newvdi ]; + match Db.VDI.get_records_where ~__context ~expr with + | (vdi, _) :: _ -> vdi + | [] -> failwith (Printf.sprintf "newvdi failed to create a VDI for %s" (string_of_vdi_info newvdi)) let create ~__context ~name_label ~name_description - ~sR ~virtual_size ~_type - ~sharable ~read_only ~other_config ~xenstore_data ~sm_config ~tags = - Sm.assert_pbd_is_plugged ~__context ~sr:sR; - - (* XXX: unify with record_util.vdi_type_to_string *) - let vdi_type = match _type with - | `crashdump -> "crashdump" - | `ephemeral -> "ephemeral" - | `ha_statefile -> "ha_statefile" - | `metadata -> "metadata" - | `redo_log -> "redo_log" - | `suspend -> "suspend" - | `system -> "system" - | `user -> "user" - | `rrd -> "rrd" in + ~sR ~virtual_size ~_type + ~sharable ~read_only ~other_config ~xenstore_data ~sm_config ~tags = + Sm.assert_pbd_is_plugged ~__context ~sr:sR; - let open Storage_access in - let task = Context.get_task_id __context in - let open Storage_interface in - let vdi_info = { - default_vdi_info with - name_label = name_label; - name_description = name_description; - ty = vdi_type; - read_only = read_only; - virtual_size = virtual_size; - sm_config = sm_config; - } in - let module C = Client(struct let rpc = rpc end) in - let sm_vdi = transform_storage_exn - (fun () -> C.VDI.create ~dbg:(Ref.string_of task) ~sr:(Db.SR.get_uuid ~__context ~self:sR) ~vdi_info) in - if virtual_size < sm_vdi.virtual_size - then info "sr:%s vdi:%s requested virtual size %Ld < actual virtual size %Ld" (Ref.string_of sR) sm_vdi.vdi virtual_size sm_vdi.virtual_size; - let db_vdi = update_vdi_db ~__context ~sr:sR sm_vdi in - Db.VDI.set_other_config ~__context ~self:db_vdi ~value:other_config; - Db.VDI.set_sharable ~__context ~self:db_vdi ~value:sharable; - Db.VDI.set_tags ~__context ~self:db_vdi ~value:tags; - Db.VDI.set_xenstore_data ~__context ~self:db_vdi ~value:xenstore_data; - update_allowed_operations ~__context ~self:db_vdi; - db_vdi + (* XXX: unify with record_util.vdi_type_to_string *) + let vdi_type = match _type with + | `crashdump -> "crashdump" + | `ephemeral -> "ephemeral" + | `ha_statefile -> "ha_statefile" + | `metadata -> "metadata" + | `redo_log -> "redo_log" + | `suspend -> "suspend" + | `system -> "system" + | `user -> "user" + | `rrd -> "rrd" in + + let open Storage_access in + let task = Context.get_task_id __context in + let open Storage_interface in + let vdi_info = { + default_vdi_info with + name_label = name_label; + name_description = name_description; + ty = vdi_type; + read_only = read_only; + virtual_size = virtual_size; + sm_config = sm_config; + } in + let module C = Client(struct let rpc = rpc end) in + let sm_vdi = transform_storage_exn + (fun () -> C.VDI.create ~dbg:(Ref.string_of task) ~sr:(Db.SR.get_uuid ~__context ~self:sR) ~vdi_info) in + if virtual_size < sm_vdi.virtual_size + then info "sr:%s vdi:%s requested virtual size %Ld < actual virtual size %Ld" (Ref.string_of sR) sm_vdi.vdi virtual_size sm_vdi.virtual_size; + let db_vdi = update_vdi_db ~__context ~sr:sR sm_vdi in + Db.VDI.set_other_config ~__context ~self:db_vdi ~value:other_config; + Db.VDI.set_sharable ~__context ~self:db_vdi ~value:sharable; + Db.VDI.set_tags ~__context ~self:db_vdi ~value:tags; + Db.VDI.set_xenstore_data ~__context ~self:db_vdi ~value:xenstore_data; + update_allowed_operations ~__context ~self:db_vdi; + db_vdi (* Make the database record only *) let introduce_dbonly ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location ~xenstore_data ~sm_config ~managed ~virtual_size ~physical_utilisation ~metadata_of_pool ~is_a_snapshot ~snapshot_time ~snapshot_of = @@ -339,13 +339,13 @@ let introduce_dbonly ~__context ~uuid ~name_label ~name_description ~sR ~_type begin try Scanf.sscanf uuid "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" - (fun _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ -> ()) + (fun _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ -> ()) with _ -> raise (Api_errors.Server_error (Api_errors.uuid_invalid, [ "VDI"; uuid ])) end; let ref = Ref.make() in debug "VDI.introduce read_only = %b" read_only; Db.VDI.create ~__context ~ref ~uuid:uuid - ~name_label ~name_description + ~name_label ~name_description ~current_operations:[] ~allowed_operations:[] ~is_a_snapshot ~snapshot_of ~snapshot_time ~sR ~virtual_size @@ -363,34 +363,34 @@ let internal_db_introduce ~__context ~uuid ~name_label ~name_description ~sR ~_t let ref = introduce_dbonly ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location ~xenstore_data ~sm_config ~managed ~virtual_size ~physical_utilisation ~metadata_of_pool ~is_a_snapshot ~snapshot_time ~snapshot_of in update_allowed_operations ~__context ~self:ref; ref - + let pool_introduce = internal_db_introduce let db_introduce = internal_db_introduce -let db_forget ~__context ~vdi = +let db_forget ~__context ~vdi = debug "db_forget uuid=%s" (Db.VDI.get_uuid ~__context ~self:vdi); Db.VDI.destroy ~__context ~self:vdi let introduce ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharable ~read_only ~other_config ~location ~xenstore_data ~sm_config ~managed ~virtual_size ~physical_utilisation ~metadata_of_pool ~is_a_snapshot ~snapshot_time ~snapshot_of = let open Storage_access in let open Storage_interface in - debug "introduce uuid=%s name_label=%s sm_config=[ %s ]" uuid name_label (String.concat "; " (List.map (fun (k, v) -> k ^ " = " ^ v) sm_config)); + debug "introduce uuid=%s name_label=%s sm_config=[ %s ]" uuid name_label (String.concat "; " (List.map (fun (k, v) -> k ^ " = " ^ v) sm_config)); Sm.assert_pbd_is_plugged ~__context ~sr:sR; (* Verify that the location field is unique in this SR - ignore if the vdi is being introduced with same location*) List.iter (fun vdi -> if Db.VDI.get_location ~__context ~self:vdi = location - && Db.VDI.get_uuid ~__context ~self:vdi <> uuid + && Db.VDI.get_uuid ~__context ~self:vdi <> uuid then raise (Api_errors.Server_error (Api_errors.location_not_unique, [ Ref.string_of sR; location ])) ) (Db.SR.get_VDIs ~__context ~self:sR); - let task = Context.get_task_id __context in + let task = Context.get_task_id __context in let sr' = Db.SR.get_uuid ~__context ~self:sR in let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in Sm.assert_pbd_is_plugged ~__context ~sr:sR; let vdi_info = transform_storage_exn (fun () -> - C.VDI.introduce ~dbg:(Ref.string_of task) ~sr:sr' ~uuid ~sm_config ~location + C.VDI.introduce ~dbg:(Ref.string_of task) ~sr:sr' ~uuid ~sm_config ~location ) in let ref = update_vdi_db ~__context ~sr:sR vdi_info in @@ -410,10 +410,10 @@ let introduce ~__context ~uuid ~name_label ~name_description ~sR ~_type ~sharabl update_allowed_operations ~__context ~self:ref; ref -let update ~__context ~vdi = +let update ~__context ~vdi = let vdi_loc = Db.VDI.get_location ~__context ~self:vdi in debug "update ref=%s location=%s" (Ref.string_of vdi) vdi_loc; - let task = Context.get_task_id __context in + let task = Context.get_task_id __context in let sR = Db.VDI.get_SR ~__context ~self:vdi in let sr' = Db.SR.get_uuid ~__context ~self:sR in let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in @@ -429,28 +429,28 @@ open Client snapshot operation (e.g. vmhint for NetAPP) *) let snapshot_and_clone call_f ~__context ~vdi ~driver_params = - let sR = Db.VDI.get_SR ~__context ~self:vdi in + let sR = Db.VDI.get_SR ~__context ~self:vdi in Sm.assert_pbd_is_plugged ~__context ~sr:sR; Xapi_vdi_helpers.assert_managed ~__context ~vdi; let a = Db.VDI.get_record_internal ~__context ~self:vdi in - let call_snapshot () = - let open Storage_access in - let task = Context.get_task_id __context in - let open Storage_interface in - let vdi' = Db.VDI.get_location ~__context ~self:vdi in - let vdi_info = { - default_vdi_info with - vdi = vdi'; - name_label = a.Db_actions.vDI_name_label; - name_description = a.Db_actions.vDI_name_description; - sm_config = driver_params; - snapshot_time = Date.to_string (Date.of_float (Unix.gettimeofday ())); - } in - let sr' = Db.SR.get_uuid ~__context ~self:sR in - (* We don't use transform_storage_exn because of the clone/copy fallback below *) - let new_vdi = call_f ~dbg:(Ref.string_of task) ~sr:sr' ~vdi_info in - update_vdi_db ~__context ~sr:sR new_vdi + let call_snapshot () = + let open Storage_access in + let task = Context.get_task_id __context in + let open Storage_interface in + let vdi' = Db.VDI.get_location ~__context ~self:vdi in + let vdi_info = { + default_vdi_info with + vdi = vdi'; + name_label = a.Db_actions.vDI_name_label; + name_description = a.Db_actions.vDI_name_description; + sm_config = driver_params; + snapshot_time = Date.to_string (Date.of_float (Unix.gettimeofday ())); + } in + let sr' = Db.SR.get_uuid ~__context ~self:sR in + (* We don't use transform_storage_exn because of the clone/copy fallback below *) + let new_vdi = call_f ~dbg:(Ref.string_of task) ~sr:sr' ~vdi_info in + update_vdi_db ~__context ~sr:sR new_vdi in (* While we don't have blkback support for pause/unpause we only do this @@ -469,314 +469,314 @@ let snapshot_and_clone call_f ~__context ~vdi ~driver_params = newvdi let snapshot ~__context ~vdi ~driver_params = - let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in - let newvdi = Storage_access.transform_storage_exn - (fun () -> - try - snapshot_and_clone C.VDI.snapshot ~__context ~vdi ~driver_params - with Storage_interface.Unimplemented _ -> - debug "Backend reported not implemented despite it offering the feature"; - raise (Api_errors.Server_error(Api_errors.unimplemented_in_sm_backend, [ Ref.string_of (Db.VDI.get_SR ~__context ~self:vdi) ])) - ) in - (* Record the fact this is a snapshot *) - Db.VDI.set_is_a_snapshot ~__context ~self:newvdi ~value:true; - Db.VDI.set_snapshot_of ~__context ~self:newvdi ~value:vdi; - - update_allowed_operations ~__context ~self:newvdi; - newvdi + let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in + let newvdi = Storage_access.transform_storage_exn + (fun () -> + try + snapshot_and_clone C.VDI.snapshot ~__context ~vdi ~driver_params + with Storage_interface.Unimplemented _ -> + debug "Backend reported not implemented despite it offering the feature"; + raise (Api_errors.Server_error(Api_errors.unimplemented_in_sm_backend, [ Ref.string_of (Db.VDI.get_SR ~__context ~self:vdi) ])) + ) in + (* Record the fact this is a snapshot *) + Db.VDI.set_is_a_snapshot ~__context ~self:newvdi ~value:true; + Db.VDI.set_snapshot_of ~__context ~self:newvdi ~value:vdi; + + update_allowed_operations ~__context ~self:newvdi; + newvdi let destroy ~__context ~self = - let sr = Db.VDI.get_SR ~__context ~self in - let location = Db.VDI.get_location ~__context ~self in + let sr = Db.VDI.get_SR ~__context ~self in + let location = Db.VDI.get_location ~__context ~self in Sm.assert_pbd_is_plugged ~__context ~sr; Xapi_vdi_helpers.assert_managed ~__context ~vdi:self; let vbds = Db.VDI.get_VBDs ~__context ~self in - let attached_vbds = List.filter - (fun vbd-> - let r = Db.VBD.get_record_internal ~__context ~self:vbd in - r.Db_actions.vBD_currently_attached || r.Db_actions.vBD_reserved) vbds in - if attached_vbds<>[] then - raise (Api_errors.Server_error (Api_errors.vdi_in_use, [(Ref.string_of self); "destroy" ])) - else - begin - let open Storage_access in - let open Storage_interface in - let task = Context.get_task_id __context in - let module C = Client(struct let rpc = rpc end) in - transform_storage_exn - (fun () -> - C.VDI.destroy ~dbg:(Ref.string_of task) ~sr:(Db.SR.get_uuid ~__context ~self:sr) ~vdi:location - ); - if Db.is_valid_ref __context self - then Db.VDI.destroy ~__context ~self; - - (* destroy all the VBDs now rather than wait for the GC thread. This helps - prevent transient glitches but doesn't totally prevent races. *) - List.iter (fun vbd -> - Helpers.log_exn_continue (Printf.sprintf "destroying VBD: %s" (Ref.string_of vbd)) - (fun vbd -> Db.VBD.destroy ~__context ~self:vbd) vbd) vbds; - (* Db.VDI.destroy ~__context ~self *) - end - -let resize_online ~__context ~vdi ~size = + let attached_vbds = List.filter + (fun vbd-> + let r = Db.VBD.get_record_internal ~__context ~self:vbd in + r.Db_actions.vBD_currently_attached || r.Db_actions.vBD_reserved) vbds in + if attached_vbds<>[] then + raise (Api_errors.Server_error (Api_errors.vdi_in_use, [(Ref.string_of self); "destroy" ])) + else + begin + let open Storage_access in + let open Storage_interface in + let task = Context.get_task_id __context in + let module C = Client(struct let rpc = rpc end) in + transform_storage_exn + (fun () -> + C.VDI.destroy ~dbg:(Ref.string_of task) ~sr:(Db.SR.get_uuid ~__context ~self:sr) ~vdi:location + ); + if Db.is_valid_ref __context self + then Db.VDI.destroy ~__context ~self; + + (* destroy all the VBDs now rather than wait for the GC thread. This helps + prevent transient glitches but doesn't totally prevent races. *) + List.iter (fun vbd -> + Helpers.log_exn_continue (Printf.sprintf "destroying VBD: %s" (Ref.string_of vbd)) + (fun vbd -> Db.VBD.destroy ~__context ~self:vbd) vbd) vbds; + (* Db.VDI.destroy ~__context ~self *) + end + +let resize_online ~__context ~vdi ~size = Sm.assert_pbd_is_plugged ~__context ~sr:(Db.VDI.get_SR ~__context ~self:vdi); Xapi_vdi_helpers.assert_managed ~__context ~vdi; Storage_access.transform_storage_exn (fun () -> - let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in - let sr = Db.VDI.get_SR ~__context ~self:vdi in - let sr = Db.SR.get_uuid ~__context ~self:sr in - let vdi' = Db.VDI.get_location ~__context ~self:vdi in - let dbg = Ref.string_of (Context.get_task_id __context) in - let new_size = C.VDI.resize ~dbg ~sr ~vdi:vdi' ~new_size:size in - Db.VDI.set_virtual_size ~__context ~self:vdi ~value:new_size + let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in + let sr = Db.VDI.get_SR ~__context ~self:vdi in + let sr = Db.SR.get_uuid ~__context ~self:sr in + let vdi' = Db.VDI.get_location ~__context ~self:vdi in + let dbg = Ref.string_of (Context.get_task_id __context) in + let new_size = C.VDI.resize ~dbg ~sr ~vdi:vdi' ~new_size:size in + Db.VDI.set_virtual_size ~__context ~self:vdi ~value:new_size ) let resize = resize_online -let generate_config ~__context ~host ~vdi = +let generate_config ~__context ~host ~vdi = Sm.assert_pbd_is_plugged ~__context ~sr:(Db.VDI.get_SR ~__context ~self:vdi); Xapi_vdi_helpers.assert_managed ~__context ~vdi; Sm.call_sm_vdi_functions ~__context ~vdi (fun srconf srtype sr -> - Sm.vdi_generate_config srconf srtype sr vdi) + Sm.vdi_generate_config srconf srtype sr vdi) let clone ~__context ~vdi ~driver_params = - Storage_access.transform_storage_exn - (fun () -> - try - let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in - snapshot_and_clone C.VDI.clone ~__context ~vdi ~driver_params - with Storage_interface.Unimplemented _ -> - debug "Backend does not implement VDI clone: doing it ourselves"; - let a = Db.VDI.get_record_internal ~__context ~self:vdi in - let newvdi = create ~__context - ~name_label:a.Db_actions.vDI_name_label - ~name_description:a.Db_actions.vDI_name_description - ~sR:a.Db_actions.vDI_SR - ~virtual_size:a.Db_actions.vDI_virtual_size - ~_type:a.Db_actions.vDI_type - ~sharable:a.Db_actions.vDI_sharable - ~read_only:a.Db_actions.vDI_read_only - ~other_config:a.Db_actions.vDI_other_config - ~xenstore_data:a.Db_actions.vDI_xenstore_data - ~sm_config:[] ~tags:[] - in - (try - (* Remove the vdi_clone from the SR's current operations, this prevents the whole - SR being locked for the duration of the slow copy *) - let task_id = Ref.string_of (Context.get_task_id __context) in - Db.SR.remove_from_current_operations ~__context ~self:a.Db_actions.vDI_SR ~key:task_id; - Xapi_sr_operations.update_allowed_operations ~__context ~self:a.Db_actions.vDI_SR; - (* Remove the clone from the VDI's current operations since the dom0 block-attach - will protect the VDI anyway. There's no point refreshing the VDI's allowed operations - because they're going to change when the VBD.plug happens. *) - Db.VDI.remove_from_current_operations ~__context ~self:vdi ~key:task_id; - - Sm_fs_ops.copy_vdi ~__context vdi newvdi; - - Db.VDI.remove_from_current_operations ~__context ~self:newvdi ~key:task_id; - update_allowed_operations ~__context ~self:newvdi; - - newvdi - with e -> - debug "Caught failure during copy, deleting VDI"; - destroy ~__context ~self:newvdi; - raise e) - ) + Storage_access.transform_storage_exn + (fun () -> + try + let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in + snapshot_and_clone C.VDI.clone ~__context ~vdi ~driver_params + with Storage_interface.Unimplemented _ -> + debug "Backend does not implement VDI clone: doing it ourselves"; + let a = Db.VDI.get_record_internal ~__context ~self:vdi in + let newvdi = create ~__context + ~name_label:a.Db_actions.vDI_name_label + ~name_description:a.Db_actions.vDI_name_description + ~sR:a.Db_actions.vDI_SR + ~virtual_size:a.Db_actions.vDI_virtual_size + ~_type:a.Db_actions.vDI_type + ~sharable:a.Db_actions.vDI_sharable + ~read_only:a.Db_actions.vDI_read_only + ~other_config:a.Db_actions.vDI_other_config + ~xenstore_data:a.Db_actions.vDI_xenstore_data + ~sm_config:[] ~tags:[] + in + (try + (* Remove the vdi_clone from the SR's current operations, this prevents the whole + SR being locked for the duration of the slow copy *) + let task_id = Ref.string_of (Context.get_task_id __context) in + Db.SR.remove_from_current_operations ~__context ~self:a.Db_actions.vDI_SR ~key:task_id; + Xapi_sr_operations.update_allowed_operations ~__context ~self:a.Db_actions.vDI_SR; + (* Remove the clone from the VDI's current operations since the dom0 block-attach + will protect the VDI anyway. There's no point refreshing the VDI's allowed operations + because they're going to change when the VBD.plug happens. *) + Db.VDI.remove_from_current_operations ~__context ~self:vdi ~key:task_id; + + Sm_fs_ops.copy_vdi ~__context vdi newvdi; + + Db.VDI.remove_from_current_operations ~__context ~self:newvdi ~key:task_id; + update_allowed_operations ~__context ~self:newvdi; + + newvdi + with e -> + debug "Caught failure during copy, deleting VDI"; + destroy ~__context ~self:newvdi; + raise e) + ) let copy ~__context ~vdi ~sr ~base_vdi ~into_vdi = - Xapi_vdi_helpers.assert_managed ~__context ~vdi; - let task_id = Ref.string_of (Context.get_task_id __context) in - let src = Db.VDI.get_record ~__context ~self:vdi in - - (* If 'into' is a valid VDI then we will write into that. - Otherwise we'll create a fresh VDI in 'sr'. *) - - (* Note that we should destroy 'dst' on failure IFF we created it - here. We really ought to have a persistent log of cleanup actions, - since this will get lost over process restart. *) - let vdi_to_cleanup = ref None in - try - let dst = - if Db.is_valid_ref __context into_vdi - then into_vdi - else - (* When creating a new VDI, clone as many properties of the - original as we can. If we're not cloning a property, please - explain why in a comment. *) - Helpers.call_api_functions ~__context - (fun rpc session_id -> - let new_vdi = Client.VDI.create ~rpc ~session_id - ~name_label:src.API.vDI_name_label - ~name_description:src.API.vDI_name_description - ~sR:sr - ~virtual_size:src.API.vDI_virtual_size - ~_type:src.API.vDI_type - ~sharable:src.API.vDI_sharable - (* CA-64962: Always create a RW VDI such that copy operation works with RO source VDI as well *) - ~read_only:false - ~other_config:src.API.vDI_other_config - ~xenstore_data:src.API.vDI_xenstore_data - (* The SM layer stores things like locks (!) here, don't clone a locked lock *) - ~sm_config:[] - ~tags:src.API.vDI_tags in - vdi_to_cleanup := Some new_vdi; - if src.API.vDI_on_boot = `reset then begin - try Client.VDI.set_on_boot ~rpc ~session_id ~self:new_vdi ~value:(`reset) with _ -> () - end; - Db.VDI.set_allow_caching ~__context ~self:new_vdi ~value:src.API.vDI_allow_caching; - new_vdi - ) in - (* Check the destination VDI is suitable to receive the data. *) - let dst_r = Db.VDI.get_record __context dst in - if dst_r.API.vDI_read_only then begin - error "VDI.copy: cannot copy into a read-only VDI: %s" (Ref.string_of dst); - raise (Api_errors.Server_error(Api_errors.vdi_readonly, [ Ref.string_of dst ])) - end; - if dst_r.API.vDI_virtual_size < src.API.vDI_virtual_size then begin - error "VDI.copy: cannot copy a VDI (%s) of size %Ld into a VDI (%s) of size %Ld" - (Ref.string_of vdi) src.API.vDI_virtual_size (Ref.string_of dst) dst_r.API.vDI_virtual_size; - raise (Api_errors.Server_error(Api_errors.vdi_too_small, [ Ref.string_of dst; Int64.to_string src.API.vDI_virtual_size ])) - end; - let base = - if Db.is_valid_ref __context base_vdi - then Some base_vdi - else None in - - Sm_fs_ops.copy_vdi ~__context ?base vdi dst; - - Db.VDI.remove_from_current_operations ~__context ~self:dst ~key:task_id; - update_allowed_operations ~__context ~self:dst; - - dst - with e -> - begin match !vdi_to_cleanup with - | Some vdi -> - error "Caught %s during VDI.copy; cleaning up created VDI %s" (Printexc.to_string e) (Ref.string_of vdi); - Helpers.call_api_functions ~__context (fun rpc session_id -> Client.VDI.destroy rpc session_id vdi) - | None -> () - end; - raise e - -let force_unlock ~__context ~vdi = + Xapi_vdi_helpers.assert_managed ~__context ~vdi; + let task_id = Ref.string_of (Context.get_task_id __context) in + let src = Db.VDI.get_record ~__context ~self:vdi in + + (* If 'into' is a valid VDI then we will write into that. + Otherwise we'll create a fresh VDI in 'sr'. *) + + (* Note that we should destroy 'dst' on failure IFF we created it + here. We really ought to have a persistent log of cleanup actions, + since this will get lost over process restart. *) + let vdi_to_cleanup = ref None in + try + let dst = + if Db.is_valid_ref __context into_vdi + then into_vdi + else + (* When creating a new VDI, clone as many properties of the + original as we can. If we're not cloning a property, please + explain why in a comment. *) + Helpers.call_api_functions ~__context + (fun rpc session_id -> + let new_vdi = Client.VDI.create ~rpc ~session_id + ~name_label:src.API.vDI_name_label + ~name_description:src.API.vDI_name_description + ~sR:sr + ~virtual_size:src.API.vDI_virtual_size + ~_type:src.API.vDI_type + ~sharable:src.API.vDI_sharable + (* CA-64962: Always create a RW VDI such that copy operation works with RO source VDI as well *) + ~read_only:false + ~other_config:src.API.vDI_other_config + ~xenstore_data:src.API.vDI_xenstore_data + (* The SM layer stores things like locks (!) here, don't clone a locked lock *) + ~sm_config:[] + ~tags:src.API.vDI_tags in + vdi_to_cleanup := Some new_vdi; + if src.API.vDI_on_boot = `reset then begin + try Client.VDI.set_on_boot ~rpc ~session_id ~self:new_vdi ~value:(`reset) with _ -> () + end; + Db.VDI.set_allow_caching ~__context ~self:new_vdi ~value:src.API.vDI_allow_caching; + new_vdi + ) in + (* Check the destination VDI is suitable to receive the data. *) + let dst_r = Db.VDI.get_record __context dst in + if dst_r.API.vDI_read_only then begin + error "VDI.copy: cannot copy into a read-only VDI: %s" (Ref.string_of dst); + raise (Api_errors.Server_error(Api_errors.vdi_readonly, [ Ref.string_of dst ])) + end; + if dst_r.API.vDI_virtual_size < src.API.vDI_virtual_size then begin + error "VDI.copy: cannot copy a VDI (%s) of size %Ld into a VDI (%s) of size %Ld" + (Ref.string_of vdi) src.API.vDI_virtual_size (Ref.string_of dst) dst_r.API.vDI_virtual_size; + raise (Api_errors.Server_error(Api_errors.vdi_too_small, [ Ref.string_of dst; Int64.to_string src.API.vDI_virtual_size ])) + end; + let base = + if Db.is_valid_ref __context base_vdi + then Some base_vdi + else None in + + Sm_fs_ops.copy_vdi ~__context ?base vdi dst; + + Db.VDI.remove_from_current_operations ~__context ~self:dst ~key:task_id; + update_allowed_operations ~__context ~self:dst; + + dst + with e -> + begin match !vdi_to_cleanup with + | Some vdi -> + error "Caught %s during VDI.copy; cleaning up created VDI %s" (Printexc.to_string e) (Ref.string_of vdi); + Helpers.call_api_functions ~__context (fun rpc session_id -> Client.VDI.destroy rpc session_id vdi) + | None -> () + end; + raise e + +let force_unlock ~__context ~vdi = raise (Api_errors.Server_error(Api_errors.message_deprecated,[])) let set_sharable ~__context ~self ~value = - Db.VDI.set_sharable ~__context ~self ~value - + Db.VDI.set_sharable ~__context ~self ~value + let set_managed ~__context ~self ~value = Db.VDI.set_managed ~__context ~self ~value -let set_read_only ~__context ~self ~value = +let set_read_only ~__context ~self ~value = Db.VDI.set_read_only ~__context ~self ~value -let set_missing ~__context ~self ~value = +let set_missing ~__context ~self ~value = Db.VDI.set_missing ~__context ~self ~value - -let set_virtual_size ~__context ~self ~value = + +let set_virtual_size ~__context ~self ~value = Db.VDI.set_virtual_size ~__context ~self ~value -let set_physical_utilisation ~__context ~self ~value = +let set_physical_utilisation ~__context ~self ~value = Db.VDI.set_physical_utilisation ~__context ~self ~value let set_is_a_snapshot ~__context ~self ~value = - Db.VDI.set_is_a_snapshot ~__context ~self ~value + Db.VDI.set_is_a_snapshot ~__context ~self ~value let set_snapshot_of ~__context ~self ~value = - Db.VDI.set_snapshot_of ~__context ~self ~value + Db.VDI.set_snapshot_of ~__context ~self ~value let set_snapshot_time ~__context ~self ~value = - Db.VDI.set_snapshot_time ~__context ~self ~value + Db.VDI.set_snapshot_time ~__context ~self ~value let set_metadata_of_pool ~__context ~self ~value = - Db.VDI.set_metadata_of_pool ~__context ~self ~value + Db.VDI.set_metadata_of_pool ~__context ~self ~value let set_on_boot ~__context ~self ~value = - let sr = Db.VDI.get_SR ~__context ~self in - let sr_record = Db.SR.get_record_internal ~__context ~self:sr in - let sm_features = Xapi_sr_operations.features_of_sr ~__context sr_record in - - if not Smint.(has_capability Vdi_reset_on_boot sm_features) then - raise (Api_errors.Server_error(Api_errors.sr_operation_not_supported,[Ref.string_of sr])); - Sm.assert_pbd_is_plugged ~__context ~sr; - - let open Storage_access in - let open Storage_interface in - let task = Context.get_task_id __context in - let sr' = Db.SR.get_uuid ~__context ~self:sr in - let vdi' = Db.VDI.get_location ~__context ~self in - let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in - transform_storage_exn - (fun () -> - C.VDI.set_persistent ~dbg:(Ref.string_of task) ~sr:sr' ~vdi:vdi' ~persistent:(value = `persist); - ); - - Db.VDI.set_on_boot ~__context ~self ~value + let sr = Db.VDI.get_SR ~__context ~self in + let sr_record = Db.SR.get_record_internal ~__context ~self:sr in + let sm_features = Xapi_sr_operations.features_of_sr ~__context sr_record in + + if not Smint.(has_capability Vdi_reset_on_boot sm_features) then + raise (Api_errors.Server_error(Api_errors.sr_operation_not_supported,[Ref.string_of sr])); + Sm.assert_pbd_is_plugged ~__context ~sr; + + let open Storage_access in + let open Storage_interface in + let task = Context.get_task_id __context in + let sr' = Db.SR.get_uuid ~__context ~self:sr in + let vdi' = Db.VDI.get_location ~__context ~self in + let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in + transform_storage_exn + (fun () -> + C.VDI.set_persistent ~dbg:(Ref.string_of task) ~sr:sr' ~vdi:vdi' ~persistent:(value = `persist); + ); + + Db.VDI.set_on_boot ~__context ~self ~value let set_allow_caching ~__context ~self ~value = - Db.VDI.set_allow_caching ~__context ~self ~value + Db.VDI.set_allow_caching ~__context ~self ~value let set_name_label ~__context ~self ~value = - let open Storage_access in - let open Storage_interface in - let task = Context.get_task_id __context in - let sr = Db.VDI.get_SR ~__context ~self in - let sr' = Db.SR.get_uuid ~__context ~self:sr in - let vdi' = Db.VDI.get_location ~__context ~self in - let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in - transform_storage_exn - (fun () -> - C.VDI.set_name_label ~dbg:(Ref.string_of task) ~sr:sr' ~vdi:vdi' ~new_name_label:value - ); - update ~__context ~vdi:self + let open Storage_access in + let open Storage_interface in + let task = Context.get_task_id __context in + let sr = Db.VDI.get_SR ~__context ~self in + let sr' = Db.SR.get_uuid ~__context ~self:sr in + let vdi' = Db.VDI.get_location ~__context ~self in + let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in + transform_storage_exn + (fun () -> + C.VDI.set_name_label ~dbg:(Ref.string_of task) ~sr:sr' ~vdi:vdi' ~new_name_label:value + ); + update ~__context ~vdi:self let set_name_description ~__context ~self ~value = - let open Storage_access in - let open Storage_interface in - let task = Context.get_task_id __context in - let sr = Db.VDI.get_SR ~__context ~self in - let sr' = Db.SR.get_uuid ~__context ~self:sr in - let vdi' = Db.VDI.get_location ~__context ~self in - let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in - transform_storage_exn - (fun () -> - C.VDI.set_name_description ~dbg:(Ref.string_of task) ~sr:sr' ~vdi:vdi' ~new_name_description:value - ); - update ~__context ~vdi:self + let open Storage_access in + let open Storage_interface in + let task = Context.get_task_id __context in + let sr = Db.VDI.get_SR ~__context ~self in + let sr' = Db.SR.get_uuid ~__context ~self:sr in + let vdi' = Db.VDI.get_location ~__context ~self in + let module C = Storage_interface.Client(struct let rpc = Storage_access.rpc end) in + transform_storage_exn + (fun () -> + C.VDI.set_name_description ~dbg:(Ref.string_of task) ~sr:sr' ~vdi:vdi' ~new_name_description:value + ); + update ~__context ~vdi:self let checksum ~__context ~self = - let do_checksum f = Digest.to_hex (Digest.file f) in - Helpers.call_api_functions ~__context - (fun rpc session_id -> Sm_fs_ops.with_block_attached_device __context rpc session_id self `RO do_checksum) + let do_checksum f = Digest.to_hex (Digest.file f) in + Helpers.call_api_functions ~__context + (fun rpc session_id -> Sm_fs_ops.with_block_attached_device __context rpc session_id self `RO do_checksum) (* Functions for opening foreign databases on VDIs *) let open_database ~__context ~self = - let vdi_type = Db.VDI.get_type ~__context ~self in - if vdi_type <> `metadata then - raise (Api_errors.Server_error(Api_errors.vdi_incompatible_type, - [Ref.string_of self; Record_util.vdi_type_to_string vdi_type])); - try - let db_ref = - Some (Xapi_vdi_helpers.database_ref_of_vdi ~__context ~vdi:self) in - (* Create a new session to query the database, and associate it with the db ref *) - debug "%s" "Creating readonly session"; - Xapi_session.create_readonly_session ~__context - ~uname:"disaster-recovery" ~db_ref - with e -> - let error = Printexc.to_string e in - let reason = match e with - | Db_exn.DBCache_NotFound(_, _, _) -> "Database does not match local schema." - | _ -> error - in - debug "Caught %s while trying to open database." error; - raise (Api_errors.Server_error(Api_errors.could_not_import_database, [reason])) + let vdi_type = Db.VDI.get_type ~__context ~self in + if vdi_type <> `metadata then + raise (Api_errors.Server_error(Api_errors.vdi_incompatible_type, + [Ref.string_of self; Record_util.vdi_type_to_string vdi_type])); + try + let db_ref = + Some (Xapi_vdi_helpers.database_ref_of_vdi ~__context ~vdi:self) in + (* Create a new session to query the database, and associate it with the db ref *) + debug "%s" "Creating readonly session"; + Xapi_session.create_readonly_session ~__context + ~uname:"disaster-recovery" ~db_ref + with e -> + let error = Printexc.to_string e in + let reason = match e with + | Db_exn.DBCache_NotFound(_, _, _) -> "Database does not match local schema." + | _ -> error + in + debug "Caught %s while trying to open database." error; + raise (Api_errors.Server_error(Api_errors.could_not_import_database, [reason])) let read_database_pool_uuid ~__context ~self = - match Xapi_dr.read_vdi_cache_record ~vdi:self with - | Some (_, uuid) -> uuid - | None -> "" + match Xapi_dr.read_vdi_cache_record ~vdi:self with + | Some (_, uuid) -> uuid + | None -> "" (* let pool_migrate = "See Xapi_vm_migrate.vdi_pool_migrate!" *) diff --git a/ocaml/xapi/xapi_vdi_helpers.ml b/ocaml/xapi/xapi_vdi_helpers.ml index b7350145e5d..23a2dbcdfea 100644 --- a/ocaml/xapi/xapi_vdi_helpers.ml +++ b/ocaml/xapi/xapi_vdi_helpers.ml @@ -13,7 +13,7 @@ *) (** * @group Storage - *) +*) open Client open Db_cache_types @@ -25,7 +25,7 @@ module D=Debug.Make(struct let name="xapi" end) open D (* CA-26514: Block operations on 'unmanaged' VDIs *) -let assert_managed ~__context ~vdi = +let assert_managed ~__context ~vdi = if not (Db.VDI.get_managed ~__context ~self:vdi) then raise (Api_errors.Server_error(Api_errors.vdi_not_managed, [ Ref.string_of vdi ])) @@ -33,215 +33,215 @@ let assert_managed ~__context ~vdi = let redo_log_lifecycle_mutex = Mutex.create () let metadata_replication : ((API.ref_VDI, (API.ref_VBD * Redo_log.redo_log)) Hashtbl.t) = - Hashtbl.create Xapi_globs.redo_log_max_instances + Hashtbl.create Xapi_globs.redo_log_max_instances let get_master_dom0 ~__context = - let master = Helpers.get_master ~__context in - Db.Host.get_control_domain ~__context ~self:master + let master = Helpers.get_master ~__context in + Db.Host.get_control_domain ~__context ~self:master (* Unplug and destroy any existing VBDs owned by the VDI. *) let destroy_all_vbds ~__context ~vdi = - let existing_vbds = Db.VDI.get_VBDs ~__context ~self:vdi in - Helpers.call_api_functions ~__context - (fun rpc session_id -> List.iter - (fun vbd -> - if Client.VBD.get_currently_attached ~session_id ~rpc ~self:vbd then begin - (* In the case of HA failover, attempting to unplug the previous master's VBD will timeout as the host is uncontactable. *) - try - Attach_helpers.safe_unplug rpc session_id vbd - with Api_errors.Server_error(code, _) when code = Api_errors.cannot_contact_host -> - debug "VBD.unplug attempt on metadata VDI %s timed out - assuming that this is an HA failover and that the previous master is now dead." - (Db.VDI.get_uuid ~__context ~self:vdi) - end; - (* Meanwhile, HA should mark the previous master as dead and set the VBD as detached. *) - (* If the VBD is not detached by now, VBD.destroy will fail and we will give up. *) - Client.VBD.destroy ~rpc ~session_id ~self:vbd) - existing_vbds) + let existing_vbds = Db.VDI.get_VBDs ~__context ~self:vdi in + Helpers.call_api_functions ~__context + (fun rpc session_id -> List.iter + (fun vbd -> + if Client.VBD.get_currently_attached ~session_id ~rpc ~self:vbd then begin + (* In the case of HA failover, attempting to unplug the previous master's VBD will timeout as the host is uncontactable. *) + try + Attach_helpers.safe_unplug rpc session_id vbd + with Api_errors.Server_error(code, _) when code = Api_errors.cannot_contact_host -> + debug "VBD.unplug attempt on metadata VDI %s timed out - assuming that this is an HA failover and that the previous master is now dead." + (Db.VDI.get_uuid ~__context ~self:vdi) + end; + (* Meanwhile, HA should mark the previous master as dead and set the VBD as detached. *) + (* If the VBD is not detached by now, VBD.destroy will fail and we will give up. *) + Client.VBD.destroy ~rpc ~session_id ~self:vbd) + existing_vbds) (* Create and plug a VBD from the VDI, then create a redo log and point it at the block device. *) let enable_database_replication ~__context ~get_vdi_callback = - Mutex.execute redo_log_lifecycle_mutex (fun () -> - (* Check that the number of metadata redo_logs isn't already at the limit. *) - (* There should never actually be more redo_logs than the limit! *) - if Hashtbl.length metadata_replication >= Xapi_globs.redo_log_max_instances then - raise (Api_errors.Server_error(Api_errors.no_more_redo_logs_allowed, [])); - let vdi = get_vdi_callback () in - let vdi_uuid = Db.VDI.get_uuid ~__context ~self:vdi in - if Hashtbl.mem metadata_replication vdi then - debug "Metadata is already being replicated to VDI %s" vdi_uuid - else begin - debug "Attempting to enable metadata replication to VDI %s" vdi_uuid; - let dom0 = get_master_dom0 ~__context in - (* We've established that metadata is not being replicated to this VDI, so it should be safe to do this. *) - destroy_all_vbds ~__context ~vdi; - (* Create and plug vbd *) - let vbd = Helpers.call_api_functions ~__context (fun rpc session_id -> - let vbd = Client.VBD.create ~rpc ~session_id ~vM:dom0 ~empty:false ~vDI:vdi - ~userdevice:"autodetect" ~bootable:false ~mode:`RW ~_type:`Disk - ~unpluggable:true ~qos_algorithm_type:"" ~qos_algorithm_params:[] - ~other_config:[] - in - Client.VBD.plug ~rpc ~session_id ~self:vbd; - vbd) - in - (* This needs to be done in a thread, otherwise the redo_log will hang when attempting the DB write. *) - let state_change_callback = - Some (fun new_state -> - ignore (Thread.create (fun () -> - Db.VDI.set_metadata_latest ~__context ~self:vdi ~value:new_state) ())) - in - (* Enable redo_log and point it at the new device *) - let log_name = Printf.sprintf "DR redo log for VDI %s" vdi_uuid in - let log = Redo_log.create ~name:log_name ~state_change_callback ~read_only:false in - let device = Db.VBD.get_device ~__context ~self:vbd in - try - Redo_log.enable_block log ("/dev/" ^ device); - Hashtbl.add metadata_replication vdi (vbd, log); - let vbd_uuid = Db.VBD.get_uuid ~__context ~self:vbd in - Db.VDI.set_metadata_latest ~__context ~self:vdi ~value:true; - debug "Redo log started on VBD %s" vbd_uuid - with e -> - Redo_log.shutdown log; - Redo_log.delete log; - Helpers.call_api_functions ~__context (fun rpc session_id -> - Client.VBD.unplug ~rpc ~session_id ~self:vbd); - raise (Api_errors.Server_error(Api_errors.cannot_enable_redo_log, - [Printexc.to_string e])) - end - ) + Mutex.execute redo_log_lifecycle_mutex (fun () -> + (* Check that the number of metadata redo_logs isn't already at the limit. *) + (* There should never actually be more redo_logs than the limit! *) + if Hashtbl.length metadata_replication >= Xapi_globs.redo_log_max_instances then + raise (Api_errors.Server_error(Api_errors.no_more_redo_logs_allowed, [])); + let vdi = get_vdi_callback () in + let vdi_uuid = Db.VDI.get_uuid ~__context ~self:vdi in + if Hashtbl.mem metadata_replication vdi then + debug "Metadata is already being replicated to VDI %s" vdi_uuid + else begin + debug "Attempting to enable metadata replication to VDI %s" vdi_uuid; + let dom0 = get_master_dom0 ~__context in + (* We've established that metadata is not being replicated to this VDI, so it should be safe to do this. *) + destroy_all_vbds ~__context ~vdi; + (* Create and plug vbd *) + let vbd = Helpers.call_api_functions ~__context (fun rpc session_id -> + let vbd = Client.VBD.create ~rpc ~session_id ~vM:dom0 ~empty:false ~vDI:vdi + ~userdevice:"autodetect" ~bootable:false ~mode:`RW ~_type:`Disk + ~unpluggable:true ~qos_algorithm_type:"" ~qos_algorithm_params:[] + ~other_config:[] + in + Client.VBD.plug ~rpc ~session_id ~self:vbd; + vbd) + in + (* This needs to be done in a thread, otherwise the redo_log will hang when attempting the DB write. *) + let state_change_callback = + Some (fun new_state -> + ignore (Thread.create (fun () -> + Db.VDI.set_metadata_latest ~__context ~self:vdi ~value:new_state) ())) + in + (* Enable redo_log and point it at the new device *) + let log_name = Printf.sprintf "DR redo log for VDI %s" vdi_uuid in + let log = Redo_log.create ~name:log_name ~state_change_callback ~read_only:false in + let device = Db.VBD.get_device ~__context ~self:vbd in + try + Redo_log.enable_block log ("/dev/" ^ device); + Hashtbl.add metadata_replication vdi (vbd, log); + let vbd_uuid = Db.VBD.get_uuid ~__context ~self:vbd in + Db.VDI.set_metadata_latest ~__context ~self:vdi ~value:true; + debug "Redo log started on VBD %s" vbd_uuid + with e -> + Redo_log.shutdown log; + Redo_log.delete log; + Helpers.call_api_functions ~__context (fun rpc session_id -> + Client.VBD.unplug ~rpc ~session_id ~self:vbd); + raise (Api_errors.Server_error(Api_errors.cannot_enable_redo_log, + [Printexc.to_string e])) + end + ) (* Shut down the redo log, then unplug and destroy the VBD. *) let disable_database_replication ~__context ~vdi = - Mutex.execute redo_log_lifecycle_mutex (fun () -> - debug "Attempting to disable metadata replication on VDI [%s:%s]." - (Db.VDI.get_name_label ~__context ~self:vdi) (Db.VDI.get_uuid ~__context ~self:vdi); - if not(Hashtbl.mem metadata_replication vdi) then - debug "Metadata is not being replicated to this VDI." - else begin - let (vbd, log) = Hashtbl.find metadata_replication vdi in - Redo_log.shutdown log; - Redo_log.disable log; - (* Check the recorded VBD still exists before trying to unplug and destroy it. *) - if Db.is_valid_ref __context vbd then begin - Helpers.call_api_functions ~__context (fun rpc session_id -> - try - Attach_helpers.safe_unplug rpc session_id vbd; - Client.VBD.destroy ~rpc ~session_id ~self:vbd - with e -> - debug "Caught %s while trying to dispose of VBD %s." (Printexc.to_string e) (Ref.string_of vbd)); - end; - Hashtbl.remove metadata_replication vdi; - Redo_log.delete log; - Db.VDI.set_metadata_latest ~__context ~self:vdi ~value:false - end - ) + Mutex.execute redo_log_lifecycle_mutex (fun () -> + debug "Attempting to disable metadata replication on VDI [%s:%s]." + (Db.VDI.get_name_label ~__context ~self:vdi) (Db.VDI.get_uuid ~__context ~self:vdi); + if not(Hashtbl.mem metadata_replication vdi) then + debug "Metadata is not being replicated to this VDI." + else begin + let (vbd, log) = Hashtbl.find metadata_replication vdi in + Redo_log.shutdown log; + Redo_log.disable log; + (* Check the recorded VBD still exists before trying to unplug and destroy it. *) + if Db.is_valid_ref __context vbd then begin + Helpers.call_api_functions ~__context (fun rpc session_id -> + try + Attach_helpers.safe_unplug rpc session_id vbd; + Client.VBD.destroy ~rpc ~session_id ~self:vbd + with e -> + debug "Caught %s while trying to dispose of VBD %s." (Printexc.to_string e) (Ref.string_of vbd)); + end; + Hashtbl.remove metadata_replication vdi; + Redo_log.delete log; + Db.VDI.set_metadata_latest ~__context ~self:vdi ~value:false + end + ) let database_open_mutex = Mutex.create () (* Extract a database from a VDI. *) let database_ref_of_vdi ~__context ~vdi = - let database_ref_of_device device = - let log = Redo_log.create ~name:"Foreign database redo log" ~state_change_callback:None ~read_only:true in - debug "Enabling redo_log with device reason [%s]" device; - Redo_log.enable_block log device; - let db = Database.make (Datamodel_schema.of_datamodel ()) in - let db_ref = Db_ref.in_memory (ref (ref db)) in - Redo_log_usage.read_from_redo_log log Xapi_globs.foreign_metadata_db db_ref; - Redo_log.delete log; - (* Upgrade database to the local schema. *) - (* Reindex database to make sure is_valid_ref works. *) - Db_ref.update_database db_ref - (Db_upgrade.generic_database_upgrade - ++ Database.reindex - ++ (Db_backend.blow_away_non_persistent_fields (Datamodel_schema.of_datamodel ()))); - db_ref - in - Mutex.execute database_open_mutex - (fun () -> Helpers.call_api_functions ~__context - (fun rpc session_id -> Sm_fs_ops.with_block_attached_device __context rpc session_id vdi `RW database_ref_of_device)) + let database_ref_of_device device = + let log = Redo_log.create ~name:"Foreign database redo log" ~state_change_callback:None ~read_only:true in + debug "Enabling redo_log with device reason [%s]" device; + Redo_log.enable_block log device; + let db = Database.make (Datamodel_schema.of_datamodel ()) in + let db_ref = Db_ref.in_memory (ref (ref db)) in + Redo_log_usage.read_from_redo_log log Xapi_globs.foreign_metadata_db db_ref; + Redo_log.delete log; + (* Upgrade database to the local schema. *) + (* Reindex database to make sure is_valid_ref works. *) + Db_ref.update_database db_ref + (Db_upgrade.generic_database_upgrade + ++ Database.reindex + ++ (Db_backend.blow_away_non_persistent_fields (Datamodel_schema.of_datamodel ()))); + db_ref + in + Mutex.execute database_open_mutex + (fun () -> Helpers.call_api_functions ~__context + (fun rpc session_id -> Sm_fs_ops.with_block_attached_device __context rpc session_id vdi `RW database_ref_of_device)) module VDI_CStruct = struct - let magic_number = 0x7ada7adal - let magic_number_offset = 0 - let version = 1l - let version_offset = 4 - let length_offset = 8 - let data_offset = 12 - let vdi_format_length = 12 (* VDI format takes 12bytes *) - let vdi_size = 4194304 (* 4MiB *) - let default_offset = 0 - - (* Set the magic number *) - let set_magic_number cstruct = - Cstruct.BE.set_uint32 cstruct magic_number_offset magic_number - - (* Get the magic number *) - let get_magic_number cstruct = - Cstruct.BE.get_uint32 cstruct magic_number_offset - - (* Set the version *) - let set_version cstruct = - Cstruct.BE.set_uint32 cstruct version_offset version - - (* Set the data length *) - let set_data_length cstruct len = - Cstruct.BE.set_uint32 cstruct length_offset len - - (* Get the data length *) - let get_data_length cstruct = - Cstruct.BE.get_uint32 cstruct length_offset - - (* Write the string to the cstruct *) - let write cstruct text text_len = - Cstruct.blit_from_string text default_offset cstruct data_offset text_len; - set_data_length cstruct (Int32.of_int text_len) - - (* Read the string from the cstruct *) - let read cstruct = - let curr_len = Int32.to_int (get_data_length cstruct) in - let curr_text = String.make curr_len '\000' in - Cstruct.blit_to_string cstruct data_offset curr_text default_offset curr_len; - curr_text - - (* Format the cstruct for the first time *) - let format cstruct = - set_magic_number cstruct; - set_version cstruct + let magic_number = 0x7ada7adal + let magic_number_offset = 0 + let version = 1l + let version_offset = 4 + let length_offset = 8 + let data_offset = 12 + let vdi_format_length = 12 (* VDI format takes 12bytes *) + let vdi_size = 4194304 (* 4MiB *) + let default_offset = 0 + + (* Set the magic number *) + let set_magic_number cstruct = + Cstruct.BE.set_uint32 cstruct magic_number_offset magic_number + + (* Get the magic number *) + let get_magic_number cstruct = + Cstruct.BE.get_uint32 cstruct magic_number_offset + + (* Set the version *) + let set_version cstruct = + Cstruct.BE.set_uint32 cstruct version_offset version + + (* Set the data length *) + let set_data_length cstruct len = + Cstruct.BE.set_uint32 cstruct length_offset len + + (* Get the data length *) + let get_data_length cstruct = + Cstruct.BE.get_uint32 cstruct length_offset + + (* Write the string to the cstruct *) + let write cstruct text text_len = + Cstruct.blit_from_string text default_offset cstruct data_offset text_len; + set_data_length cstruct (Int32.of_int text_len) + + (* Read the string from the cstruct *) + let read cstruct = + let curr_len = Int32.to_int (get_data_length cstruct) in + let curr_text = String.make curr_len '\000' in + Cstruct.blit_to_string cstruct data_offset curr_text default_offset curr_len; + curr_text + + (* Format the cstruct for the first time *) + let format cstruct = + set_magic_number cstruct; + set_version cstruct end let write_raw ~__context ~vdi ~text = - if String.length text >= VDI_CStruct.(vdi_size - vdi_format_length) then - let error_msg = - Printf.sprintf "Cannot write %d bytes to raw VDI. Capacity = %d bytes" - (String.length text) VDI_CStruct.(vdi_size - vdi_format_length) in - ignore (failwith error_msg); - Helpers.call_api_functions ~__context - (fun rpc session_id -> Sm_fs_ops.with_open_block_attached_device __context rpc session_id vdi `RW - (fun fd -> - let contents = Unixext.really_read_string fd VDI_CStruct.vdi_size in - let cstruct = Cstruct.of_string contents in - if (VDI_CStruct.get_magic_number cstruct) <> VDI_CStruct.magic_number then - VDI_CStruct.format cstruct; - VDI_CStruct.write cstruct text (String.length text); - Unix.ftruncate fd 0; - Unixext.seek_to fd 0 |> ignore; - Unixext.really_write_string fd (VDI_CStruct.read cstruct); - ) - ) + if String.length text >= VDI_CStruct.(vdi_size - vdi_format_length) then + let error_msg = + Printf.sprintf "Cannot write %d bytes to raw VDI. Capacity = %d bytes" + (String.length text) VDI_CStruct.(vdi_size - vdi_format_length) in + ignore (failwith error_msg); + Helpers.call_api_functions ~__context + (fun rpc session_id -> Sm_fs_ops.with_open_block_attached_device __context rpc session_id vdi `RW + (fun fd -> + let contents = Unixext.really_read_string fd VDI_CStruct.vdi_size in + let cstruct = Cstruct.of_string contents in + if (VDI_CStruct.get_magic_number cstruct) <> VDI_CStruct.magic_number then + VDI_CStruct.format cstruct; + VDI_CStruct.write cstruct text (String.length text); + Unix.ftruncate fd 0; + Unixext.seek_to fd 0 |> ignore; + Unixext.really_write_string fd (VDI_CStruct.read cstruct); + ) + ) let read_raw ~__context ~vdi = - Helpers.call_api_functions ~__context - (fun rpc session_id -> Sm_fs_ops.with_open_block_attached_device __context rpc session_id vdi `RW - (fun fd -> - let contents = Unixext.really_read_string fd VDI_CStruct.vdi_size in - let cstruct = Cstruct.of_string contents in - if (VDI_CStruct.get_magic_number cstruct) <> VDI_CStruct.magic_number then begin - debug "Attempted read from raw VDI but VDI not formatted: returning None"; - None - end - else - Some (VDI_CStruct.read cstruct) - ) - ) + Helpers.call_api_functions ~__context + (fun rpc session_id -> Sm_fs_ops.with_open_block_attached_device __context rpc session_id vdi `RW + (fun fd -> + let contents = Unixext.really_read_string fd VDI_CStruct.vdi_size in + let cstruct = Cstruct.of_string contents in + if (VDI_CStruct.get_magic_number cstruct) <> VDI_CStruct.magic_number then begin + debug "Attempted read from raw VDI but VDI not formatted: returning None"; + None + end + else + Some (VDI_CStruct.read cstruct) + ) + ) diff --git a/ocaml/xapi/xapi_vgpu.ml b/ocaml/xapi/xapi_vgpu.ml index fbc850fff00..87f6ee45503 100644 --- a/ocaml/xapi/xapi_vgpu.ml +++ b/ocaml/xapi/xapi_vgpu.ml @@ -19,64 +19,64 @@ let m = Mutex.create () (* Only allow device = "0" for now, as we support just a single vGPU per VM *) let valid_device device = - device = "0" + device = "0" let create ~__context ~vM ~gPU_group ~device ~other_config ~_type = - let vgpu = Ref.make () in - let uuid = Uuid.to_string (Uuid.make_uuid ()) in - if not (Pool_features.is_enabled ~__context Features.GPU) then - raise (Api_errors.Server_error (Api_errors.feature_restricted, [])); - Xapi_vm_lifecycle.assert_power_state_is ~__context ~self:vM ~expected:`Halted; - if not(valid_device device) then - raise (Api_errors.Server_error (Api_errors.invalid_device, [device])); + let vgpu = Ref.make () in + let uuid = Uuid.to_string (Uuid.make_uuid ()) in + if not (Pool_features.is_enabled ~__context Features.GPU) then + raise (Api_errors.Server_error (Api_errors.feature_restricted, [])); + Xapi_vm_lifecycle.assert_power_state_is ~__context ~self:vM ~expected:`Halted; + if not(valid_device device) then + raise (Api_errors.Server_error (Api_errors.invalid_device, [device])); - (* For backwards compatibility, convert Ref.null into the passthrough type. *) - let _type = - if _type = Ref.null - then Xapi_vgpu_type.find_or_create ~__context Xapi_vgpu_type.passthrough_gpu - else begin - if Db.is_valid_ref __context _type - then _type - else raise (Api_errors.Server_error - (Api_errors.invalid_value, ["type"; Ref.string_of _type])) - end - in + (* For backwards compatibility, convert Ref.null into the passthrough type. *) + let _type = + if _type = Ref.null + then Xapi_vgpu_type.find_or_create ~__context Xapi_vgpu_type.passthrough_gpu + else begin + if Db.is_valid_ref __context _type + then _type + else raise (Api_errors.Server_error + (Api_errors.invalid_value, ["type"; Ref.string_of _type])) + end + in - Stdext.Threadext.Mutex.execute m (fun () -> - (* Check to make sure the device is unique *) - let all = Db.VM.get_VGPUs ~__context ~self:vM in - let all_devices = List.map (fun self -> Db.VGPU.get_device ~__context ~self) all in - if List.mem device all_devices then - raise (Api_errors.Server_error (Api_errors.device_already_exists, [device])); + Stdext.Threadext.Mutex.execute m (fun () -> + (* Check to make sure the device is unique *) + let all = Db.VM.get_VGPUs ~__context ~self:vM in + let all_devices = List.map (fun self -> Db.VGPU.get_device ~__context ~self) all in + if List.mem device all_devices then + raise (Api_errors.Server_error (Api_errors.device_already_exists, [device])); - Db.VGPU.create ~__context ~ref:vgpu ~uuid ~vM ~gPU_group ~device - ~currently_attached:false ~other_config ~_type ~resident_on:Ref.null - ~scheduled_to_be_resident_on:Ref.null; - ); - debug "VGPU ref='%s' created (VM = '%s', type = '%s')" (Ref.string_of vgpu) (Ref.string_of vM) (Ref.string_of _type); - vgpu + Db.VGPU.create ~__context ~ref:vgpu ~uuid ~vM ~gPU_group ~device + ~currently_attached:false ~other_config ~_type ~resident_on:Ref.null + ~scheduled_to_be_resident_on:Ref.null; + ); + debug "VGPU ref='%s' created (VM = '%s', type = '%s')" (Ref.string_of vgpu) (Ref.string_of vM) (Ref.string_of _type); + vgpu let destroy ~__context ~self = - let vm = Db.VGPU.get_VM ~__context ~self in - if Helpers.is_running ~__context ~self:vm then - raise (Api_errors.Server_error (Api_errors.operation_not_allowed, ["vGPU currently attached to a running VM"])); - Db.VGPU.destroy ~__context ~self + let vm = Db.VGPU.get_VM ~__context ~self in + if Helpers.is_running ~__context ~self:vm then + raise (Api_errors.Server_error (Api_errors.operation_not_allowed, ["vGPU currently attached to a running VM"])); + Db.VGPU.destroy ~__context ~self let atomic_set_resident_on ~__context ~self ~value = assert false let copy ~__context ~vm vgpu = - let all = Db.VGPU.get_record ~__context ~self:vgpu in - let vgpu = create ~__context - ~device:all.API.vGPU_device - ~gPU_group:all.API.vGPU_GPU_group - ~vM:vm - ~other_config:all.API.vGPU_other_config - ~_type:all.API.vGPU_type - in - if all.API.vGPU_currently_attached then - Db.VGPU.set_currently_attached ~__context ~self:vgpu ~value:true; - vgpu + let all = Db.VGPU.get_record ~__context ~self:vgpu in + let vgpu = create ~__context + ~device:all.API.vGPU_device + ~gPU_group:all.API.vGPU_GPU_group + ~vM:vm + ~other_config:all.API.vGPU_other_config + ~_type:all.API.vGPU_type + in + if all.API.vGPU_currently_attached then + Db.VGPU.set_currently_attached ~__context ~self:vgpu ~value:true; + vgpu let requires_passthrough ~__context ~self = - let _type = Db.VGPU.get_type ~__context ~self in - Xapi_vgpu_type.requires_passthrough ~__context ~self:_type + let _type = Db.VGPU.get_type ~__context ~self in + Xapi_vgpu_type.requires_passthrough ~__context ~self:_type diff --git a/ocaml/xapi/xapi_vgpu.mli b/ocaml/xapi/xapi_vgpu.mli index 35868db8803..749c13a33d4 100644 --- a/ocaml/xapi/xapi_vgpu.mli +++ b/ocaml/xapi/xapi_vgpu.mli @@ -13,7 +13,7 @@ *) (** Module that defines API functions for VGPU objects * @group Graphics - *) +*) (** Create a VGPU. *) val create : @@ -29,10 +29,10 @@ val destroy : __context:Context.t -> self:[ `VGPU ] Ref.t -> unit (** Clear a VGPU's scheduled_to_be_resident_on field and set its resident_on * field. This should always run on the pool master. *) val atomic_set_resident_on : - __context:Context.t -> - self:[ `VGPU ] Ref.t -> - value:[ `PGPU ] Ref.t -> - unit + __context:Context.t -> + self:[ `VGPU ] Ref.t -> + value:[ `PGPU ] Ref.t -> + unit (** Duplicate a VGPU. *) val copy : diff --git a/ocaml/xapi/xapi_vgpu_type.ml b/ocaml/xapi/xapi_vgpu_type.ml index c479ff0b656..5fe473972c2 100644 --- a/ocaml/xapi/xapi_vgpu_type.ml +++ b/ocaml/xapi/xapi_vgpu_type.ml @@ -20,523 +20,523 @@ open Stdext.Xstringext exception Parse_error of exn module Identifier = struct - let version = 1 + let version = 1 - type nvidia_id = { - pdev_id : int; - psubdev_id : int option; - vdev_id : int; - vsubdev_id : int; - } + type nvidia_id = { + pdev_id : int; + psubdev_id : int option; + vdev_id : int; + vsubdev_id : int; + } - type gvt_g_id = { - pdev_id : int; - low_gm_sz : int64; - high_gm_sz : int64; - fence_sz : int64; - monitor_config_file : string option; - } + type gvt_g_id = { + pdev_id : int; + low_gm_sz : int64; + high_gm_sz : int64; + fence_sz : int64; + monitor_config_file : string option; + } - type t = - | Passthrough - | Nvidia of nvidia_id - | GVT_g of gvt_g_id + type t = + | Passthrough + | Nvidia of nvidia_id + | GVT_g of gvt_g_id - (* Create a unique string for each possible value of type t. This value - * functions as a primary key for the VGPU_type object, so we can use it - * to decide whether a relevant VGPU_type already exists in the database. *) - let to_string id = - let data = - match id with - | Passthrough -> "passthrough" - | Nvidia nvidia_id -> - Printf.sprintf "nvidia,%04x,%s,%04x,%04x" - nvidia_id.pdev_id - (match nvidia_id.psubdev_id with - | Some id -> Printf.sprintf "%04x" id - | None -> "") - nvidia_id.vdev_id - nvidia_id.vsubdev_id - | GVT_g gvt_g_id -> - Printf.sprintf "gvt-g,%04x,%Lx,%Lx,%Lx,%s" - gvt_g_id.pdev_id - gvt_g_id.low_gm_sz - gvt_g_id.high_gm_sz - gvt_g_id.fence_sz - (match gvt_g_id.monitor_config_file with - | Some path -> path - | None -> "") - in - Printf.sprintf "%04d:%s" version data + (* Create a unique string for each possible value of type t. This value + * functions as a primary key for the VGPU_type object, so we can use it + * to decide whether a relevant VGPU_type already exists in the database. *) + let to_string id = + let data = + match id with + | Passthrough -> "passthrough" + | Nvidia nvidia_id -> + Printf.sprintf "nvidia,%04x,%s,%04x,%04x" + nvidia_id.pdev_id + (match nvidia_id.psubdev_id with + | Some id -> Printf.sprintf "%04x" id + | None -> "") + nvidia_id.vdev_id + nvidia_id.vsubdev_id + | GVT_g gvt_g_id -> + Printf.sprintf "gvt-g,%04x,%Lx,%Lx,%Lx,%s" + gvt_g_id.pdev_id + gvt_g_id.low_gm_sz + gvt_g_id.high_gm_sz + gvt_g_id.fence_sz + (match gvt_g_id.monitor_config_file with + | Some path -> path + | None -> "") + in + Printf.sprintf "%04d:%s" version data - let to_implementation : (t -> API.vgpu_type_implementation) = function - | Passthrough -> `passthrough - | Nvidia _ -> `nvidia - | GVT_g _ -> `gvt_g + let to_implementation : (t -> API.vgpu_type_implementation) = function + | Passthrough -> `passthrough + | Nvidia _ -> `nvidia + | GVT_g _ -> `gvt_g end type vgpu_type = { - vendor_name : string; - model_name : string; - framebuffer_size : int64; - max_heads : int64; - max_resolution_x : int64; - max_resolution_y : int64; - size : int64; - internal_config : (string * string) list; - identifier : Identifier.t; - experimental : bool; + vendor_name : string; + model_name : string; + framebuffer_size : int64; + max_heads : int64; + max_resolution_x : int64; + max_resolution_y : int64; + size : int64; + internal_config : (string * string) list; + identifier : Identifier.t; + experimental : bool; } let passthrough_gpu = { - vendor_name = ""; - model_name = "passthrough"; - framebuffer_size = 0L; - max_heads = 0L; - max_resolution_x = 0L; - max_resolution_y = 0L; - size = 0L; - internal_config = []; - identifier = Identifier.Passthrough; - experimental = false; + vendor_name = ""; + model_name = "passthrough"; + framebuffer_size = 0L; + max_heads = 0L; + max_resolution_x = 0L; + max_resolution_y = 0L; + size = 0L; + internal_config = []; + identifier = Identifier.Passthrough; + experimental = false; } let create ~__context ~vendor_name ~model_name ~framebuffer_size ~max_heads - ~max_resolution_x ~max_resolution_y ~size ~internal_config ~implementation - ~identifier ~experimental = - let ref = Ref.make () in - let uuid = Uuidm.to_string (Uuidm.create `V4) in - Db.VGPU_type.create ~__context ~ref ~uuid ~vendor_name ~model_name - ~framebuffer_size ~max_heads ~max_resolution_x ~max_resolution_y - ~size ~internal_config ~implementation ~identifier ~experimental; - debug "VGPU_type ref='%s' created (vendor_name = '%s'; model_name = '%s')" - (Ref.string_of ref) vendor_name model_name; - ref + ~max_resolution_x ~max_resolution_y ~size ~internal_config ~implementation + ~identifier ~experimental = + let ref = Ref.make () in + let uuid = Uuidm.to_string (Uuidm.create `V4) in + Db.VGPU_type.create ~__context ~ref ~uuid ~vendor_name ~model_name + ~framebuffer_size ~max_heads ~max_resolution_x ~max_resolution_y + ~size ~internal_config ~implementation ~identifier ~experimental; + debug "VGPU_type ref='%s' created (vendor_name = '%s'; model_name = '%s')" + (Ref.string_of ref) vendor_name model_name; + ref let find_and_update ~__context vgpu_type = - let identifier_string = Identifier.to_string vgpu_type.identifier in - let fail () = - failwith "Error: Multiple vGPU types exist with the same configuration." in - let open Db_filter_types in - let new_expr = Eq (Field "identifier", Literal identifier_string) in - let old_expr = And - ((Eq (Field "vendor_name", Literal vgpu_type.vendor_name), - (Eq (Field "model_name", Literal vgpu_type.model_name)))) - in - (* First try to look up by identifier. *) - match Db.VGPU_type.get_internal_records_where ~__context ~expr:new_expr with - | [vgpu_type_ref, rc] -> begin - (* If looking up by identifier succeeds, update that VGPU_type's vendor_name - * and model_name. *) - if vgpu_type.vendor_name <> rc.Db_actions.vGPU_type_vendor_name then - Db.VGPU_type.set_vendor_name ~__context - ~self:vgpu_type_ref - ~value:vgpu_type.vendor_name; - if vgpu_type.model_name <> rc.Db_actions.vGPU_type_model_name then - Db.VGPU_type.set_model_name ~__context - ~self:vgpu_type_ref - ~value:vgpu_type.model_name; - let new_rc = Db_actions.({rc with - vGPU_type_vendor_name = vgpu_type.vendor_name; - vGPU_type_model_name = vgpu_type.model_name; - }) in - Some (vgpu_type_ref, new_rc) - end - | [] -> begin - (* If looking up by identifier fails, try to the old method (vendor name - * and model name). If this finds a VGPU_type, update its identifier - * field. *) - match Db.VGPU_type.get_internal_records_where ~__context ~expr:old_expr with - | [vgpu_type_ref, rc] -> begin - Db.VGPU_type.set_identifier ~__context - ~self:vgpu_type_ref - ~value:identifier_string; - let new_rc = {rc with - Db_actions.vGPU_type_identifier = identifier_string; - } in - Some (vgpu_type_ref, new_rc) - end - | [] -> None - | _ -> fail () - end - | _ -> fail () + let identifier_string = Identifier.to_string vgpu_type.identifier in + let fail () = + failwith "Error: Multiple vGPU types exist with the same configuration." in + let open Db_filter_types in + let new_expr = Eq (Field "identifier", Literal identifier_string) in + let old_expr = And + ((Eq (Field "vendor_name", Literal vgpu_type.vendor_name), + (Eq (Field "model_name", Literal vgpu_type.model_name)))) + in + (* First try to look up by identifier. *) + match Db.VGPU_type.get_internal_records_where ~__context ~expr:new_expr with + | [vgpu_type_ref, rc] -> begin + (* If looking up by identifier succeeds, update that VGPU_type's vendor_name + * and model_name. *) + if vgpu_type.vendor_name <> rc.Db_actions.vGPU_type_vendor_name then + Db.VGPU_type.set_vendor_name ~__context + ~self:vgpu_type_ref + ~value:vgpu_type.vendor_name; + if vgpu_type.model_name <> rc.Db_actions.vGPU_type_model_name then + Db.VGPU_type.set_model_name ~__context + ~self:vgpu_type_ref + ~value:vgpu_type.model_name; + let new_rc = Db_actions.({rc with + vGPU_type_vendor_name = vgpu_type.vendor_name; + vGPU_type_model_name = vgpu_type.model_name; + }) in + Some (vgpu_type_ref, new_rc) + end + | [] -> begin + (* If looking up by identifier fails, try to the old method (vendor name + * and model name). If this finds a VGPU_type, update its identifier + * field. *) + match Db.VGPU_type.get_internal_records_where ~__context ~expr:old_expr with + | [vgpu_type_ref, rc] -> begin + Db.VGPU_type.set_identifier ~__context + ~self:vgpu_type_ref + ~value:identifier_string; + let new_rc = {rc with + Db_actions.vGPU_type_identifier = identifier_string; + } in + Some (vgpu_type_ref, new_rc) + end + | [] -> None + | _ -> fail () + end + | _ -> fail () let find_or_create ~__context vgpu_type = - let implementation = Identifier.to_implementation vgpu_type.identifier in - match (find_and_update ~__context vgpu_type) with - | Some (vgpu_type_ref, rc) -> - (* Update anything about the VGPU type which might have changed since we - * last read the config file. *) - if vgpu_type.framebuffer_size <> rc.Db_actions.vGPU_type_framebuffer_size then - Db.VGPU_type.set_framebuffer_size ~__context - ~self:vgpu_type_ref - ~value:vgpu_type.framebuffer_size; - if vgpu_type.max_heads <> rc.Db_actions.vGPU_type_max_heads then - Db.VGPU_type.set_max_heads ~__context - ~self:vgpu_type_ref - ~value:vgpu_type.max_heads; - if vgpu_type.max_resolution_x <> rc.Db_actions.vGPU_type_max_resolution_x then - Db.VGPU_type.set_max_resolution_x ~__context - ~self:vgpu_type_ref - ~value:vgpu_type.max_resolution_x; - if vgpu_type.max_resolution_y <> rc.Db_actions.vGPU_type_max_resolution_x then - Db.VGPU_type.set_max_resolution_y ~__context - ~self:vgpu_type_ref - ~value:vgpu_type.max_resolution_y; - if vgpu_type.size <> rc.Db_actions.vGPU_type_size then - Db.VGPU_type.set_size ~__context - ~self:vgpu_type_ref - ~value:vgpu_type.size; - if vgpu_type.internal_config <> rc.Db_actions.vGPU_type_internal_config then - Db.VGPU_type.set_internal_config ~__context - ~self:vgpu_type_ref - ~value:vgpu_type.internal_config; - if implementation <> rc.Db_actions.vGPU_type_implementation then - Db.VGPU_type.set_implementation ~__context - ~self:vgpu_type_ref - ~value:implementation; - if vgpu_type.experimental <> rc.Db_actions.vGPU_type_experimental then - Db.VGPU_type.set_experimental ~__context - ~self:vgpu_type_ref - ~value:vgpu_type.experimental; - vgpu_type_ref - | None -> - create ~__context ~vendor_name:vgpu_type.vendor_name - ~model_name:vgpu_type.model_name - ~framebuffer_size:vgpu_type.framebuffer_size - ~max_heads:vgpu_type.max_heads - ~max_resolution_x:vgpu_type.max_resolution_x - ~max_resolution_y:vgpu_type.max_resolution_y - ~size:vgpu_type.size - ~internal_config:vgpu_type.internal_config - ~implementation - ~identifier:(Identifier.to_string vgpu_type.identifier) - ~experimental:vgpu_type.experimental + let implementation = Identifier.to_implementation vgpu_type.identifier in + match (find_and_update ~__context vgpu_type) with + | Some (vgpu_type_ref, rc) -> + (* Update anything about the VGPU type which might have changed since we + * last read the config file. *) + if vgpu_type.framebuffer_size <> rc.Db_actions.vGPU_type_framebuffer_size then + Db.VGPU_type.set_framebuffer_size ~__context + ~self:vgpu_type_ref + ~value:vgpu_type.framebuffer_size; + if vgpu_type.max_heads <> rc.Db_actions.vGPU_type_max_heads then + Db.VGPU_type.set_max_heads ~__context + ~self:vgpu_type_ref + ~value:vgpu_type.max_heads; + if vgpu_type.max_resolution_x <> rc.Db_actions.vGPU_type_max_resolution_x then + Db.VGPU_type.set_max_resolution_x ~__context + ~self:vgpu_type_ref + ~value:vgpu_type.max_resolution_x; + if vgpu_type.max_resolution_y <> rc.Db_actions.vGPU_type_max_resolution_x then + Db.VGPU_type.set_max_resolution_y ~__context + ~self:vgpu_type_ref + ~value:vgpu_type.max_resolution_y; + if vgpu_type.size <> rc.Db_actions.vGPU_type_size then + Db.VGPU_type.set_size ~__context + ~self:vgpu_type_ref + ~value:vgpu_type.size; + if vgpu_type.internal_config <> rc.Db_actions.vGPU_type_internal_config then + Db.VGPU_type.set_internal_config ~__context + ~self:vgpu_type_ref + ~value:vgpu_type.internal_config; + if implementation <> rc.Db_actions.vGPU_type_implementation then + Db.VGPU_type.set_implementation ~__context + ~self:vgpu_type_ref + ~value:implementation; + if vgpu_type.experimental <> rc.Db_actions.vGPU_type_experimental then + Db.VGPU_type.set_experimental ~__context + ~self:vgpu_type_ref + ~value:vgpu_type.experimental; + vgpu_type_ref + | None -> + create ~__context ~vendor_name:vgpu_type.vendor_name + ~model_name:vgpu_type.model_name + ~framebuffer_size:vgpu_type.framebuffer_size + ~max_heads:vgpu_type.max_heads + ~max_resolution_x:vgpu_type.max_resolution_x + ~max_resolution_y:vgpu_type.max_resolution_y + ~size:vgpu_type.size + ~internal_config:vgpu_type.internal_config + ~implementation + ~identifier:(Identifier.to_string vgpu_type.identifier) + ~experimental:vgpu_type.experimental module Nvidia = struct - let nvidia_conf_dir = "/usr/share/nvidia/vgx" - let nvidia_vendor_id = 0x10de + let nvidia_conf_dir = "/usr/share/nvidia/vgx" + let nvidia_vendor_id = 0x10de - type vgpu_conf = { - identifier : Identifier.nvidia_id; - framebufferlength : int64; - num_heads : int64; - max_instance : int64; - max_x : int64; - max_y : int64; - file_path : string; - } + type vgpu_conf = { + identifier : Identifier.nvidia_id; + framebufferlength : int64; + num_heads : int64; + max_instance : int64; + max_x : int64; + max_y : int64; + file_path : string; + } - let of_conf_file file_path = - try - let conf = Stdext.Unixext.read_lines file_path in - let args = List.filter - (fun s -> not (String.startswith "#" s || s = "")) conf in - let args = List.map (String.strip String.isspace) args in - (* Expecting space separated key value entries *) - let args = List.map - (fun s -> - match (String.split ' ' s ~limit:2) with - | k :: [v] -> (k, v) - | _ -> ("", "") - ) args in - (* plugin0.pdev_id will either be just the physical device id, or of the - * form "device_id:subdevice_id" *) - let pdev_id, psubdev_id = - let pdev_id_data = (List.assoc "plugin0.pdev_id" args) in - try - Scanf.sscanf pdev_id_data "\"0x%x:0x%x\"" - (fun pdev_id psubdev_id -> pdev_id, Some psubdev_id) - with Scanf.Scan_failure _ -> - Scanf.sscanf pdev_id_data "\"0x%x\"" - (fun pdev_id -> pdev_id, None) - in - (* NVIDIA key is "device_id:subdevice_id", N.B. not subvendor id *) - Scanf.sscanf (List.assoc "plugin0.vdev_id" args) "\"0x%x:0x%x\"" (fun vdev_id vsubdev_id -> - Scanf.sscanf (List.assoc "plugin0.max_resolution" args) "%Ldx%Ld" (fun max_x max_y -> - let framebufferlength = Int64.of_string - (List.assoc "plugin0.framebufferlength" args) in - let num_heads = Int64.of_string - (List.assoc "plugin0.num_heads" args) in - let max_instance = Int64.of_string - (List.assoc "plugin0.max_instance" args) in - let identifier = Identifier.({ - pdev_id; - psubdev_id; - vdev_id; - vsubdev_id; - }) in - {identifier; framebufferlength; - num_heads; max_instance; max_x; max_y; file_path} - ) - ) - with e -> - raise (Parse_error e) + let of_conf_file file_path = + try + let conf = Stdext.Unixext.read_lines file_path in + let args = List.filter + (fun s -> not (String.startswith "#" s || s = "")) conf in + let args = List.map (String.strip String.isspace) args in + (* Expecting space separated key value entries *) + let args = List.map + (fun s -> + match (String.split ' ' s ~limit:2) with + | k :: [v] -> (k, v) + | _ -> ("", "") + ) args in + (* plugin0.pdev_id will either be just the physical device id, or of the + * form "device_id:subdevice_id" *) + let pdev_id, psubdev_id = + let pdev_id_data = (List.assoc "plugin0.pdev_id" args) in + try + Scanf.sscanf pdev_id_data "\"0x%x:0x%x\"" + (fun pdev_id psubdev_id -> pdev_id, Some psubdev_id) + with Scanf.Scan_failure _ -> + Scanf.sscanf pdev_id_data "\"0x%x\"" + (fun pdev_id -> pdev_id, None) + in + (* NVIDIA key is "device_id:subdevice_id", N.B. not subvendor id *) + Scanf.sscanf (List.assoc "plugin0.vdev_id" args) "\"0x%x:0x%x\"" (fun vdev_id vsubdev_id -> + Scanf.sscanf (List.assoc "plugin0.max_resolution" args) "%Ldx%Ld" (fun max_x max_y -> + let framebufferlength = Int64.of_string + (List.assoc "plugin0.framebufferlength" args) in + let num_heads = Int64.of_string + (List.assoc "plugin0.num_heads" args) in + let max_instance = Int64.of_string + (List.assoc "plugin0.max_instance" args) in + let identifier = Identifier.({ + pdev_id; + psubdev_id; + vdev_id; + vsubdev_id; + }) in + {identifier; framebufferlength; + num_heads; max_instance; max_x; max_y; file_path} + ) + ) + with e -> + raise (Parse_error e) - let read_config_dir conf_dir = - let rec read_configs ac = function - | [] -> ac - | conf_file::tl -> - try - read_configs (of_conf_file conf_file :: ac) tl - with Parse_error e -> - error "Ignoring error parsing %s: %s\n%s\n" conf_file - (Printexc.to_string e) (Printexc.get_backtrace ()); - read_configs ac tl - in - let conf_files = Array.to_list (Sys.readdir conf_dir) in - debug "Reading NVIDIA vGPU config files %s/{%s}" - conf_dir (String.concat ", " conf_files); - read_configs [] - (List.map (fun conf -> String.concat "/" [conf_dir; conf]) conf_files) + let read_config_dir conf_dir = + let rec read_configs ac = function + | [] -> ac + | conf_file::tl -> + try + read_configs (of_conf_file conf_file :: ac) tl + with Parse_error e -> + error "Ignoring error parsing %s: %s\n%s\n" conf_file + (Printexc.to_string e) (Printexc.get_backtrace ()); + read_configs ac tl + in + let conf_files = Array.to_list (Sys.readdir conf_dir) in + debug "Reading NVIDIA vGPU config files %s/{%s}" + conf_dir (String.concat ", " conf_files); + read_configs [] + (List.map (fun conf -> String.concat "/" [conf_dir; conf]) conf_files) - let relevant_vgpu_types pci_dev_id subsystem_device_id = - let open Identifier in - let vgpu_confs = try read_config_dir nvidia_conf_dir with _ -> [] in - let relevant_vgpu_confs = - List.filter - (fun c -> - let device_id_matches = (c.identifier.pdev_id = pci_dev_id) in - let subsystem_device_id_matches = - (* If the config file doesn't specify a physical subdevice ID, then - * the config file is valid for this device no matter the device's - * subsystem device ID. - * - * If the config file does specify a physical subdevice ID, then the - * corresponding ID of the card must match. *) - match subsystem_device_id, c.identifier.psubdev_id with - | _, None -> true - | None, Some _ -> false - | Some device_id, Some conf_id -> device_id = conf_id - in - device_id_matches && subsystem_device_id_matches) - vgpu_confs - in - debug "Relevant confs = [ %s ]" - (String.concat "; " (List.map (fun c -> - Printf.sprintf - "{pdev_id:%04x; psubdev_id:%s; vdev_id:%04x; vsubdev_id:%04x; framebufferlength:0x%Lx}" - c.identifier.pdev_id - (match c.identifier.psubdev_id with - | None -> "Any" - | Some id -> Printf.sprintf "%04x" id) - c.identifier.vdev_id - c.identifier.vsubdev_id - c.framebufferlength) - relevant_vgpu_confs)); - let rec build_vgpu_types pci_access ac = function - | [] -> ac - | conf::tl -> - debug "Pci.lookup_subsystem_device_name: vendor=%04x device=%04x subdev=%04x" - nvidia_vendor_id conf.identifier.vdev_id conf.identifier.vsubdev_id; - let vendor_name = Pci.lookup_vendor_name pci_access nvidia_vendor_id - and model_name = - Pci.lookup_subsystem_device_name pci_access nvidia_vendor_id - conf.identifier.vdev_id nvidia_vendor_id conf.identifier.vsubdev_id - and framebuffer_size = conf.framebufferlength - and max_heads = conf.num_heads - and max_resolution_x = conf.max_x - and max_resolution_y = conf.max_y - and size = Int64.div Constants.pgpu_default_size conf.max_instance - and internal_config = [Xapi_globs.vgpu_config_key, conf.file_path] - and identifier = Nvidia conf.identifier - and experimental = false in - let vgpu_type = { - vendor_name; model_name; framebuffer_size; max_heads; - max_resolution_x; max_resolution_y; size; internal_config; - identifier; experimental} - in - build_vgpu_types pci_access (vgpu_type :: ac) tl - in - Pci.with_access (fun a -> build_vgpu_types a [] relevant_vgpu_confs) + let relevant_vgpu_types pci_dev_id subsystem_device_id = + let open Identifier in + let vgpu_confs = try read_config_dir nvidia_conf_dir with _ -> [] in + let relevant_vgpu_confs = + List.filter + (fun c -> + let device_id_matches = (c.identifier.pdev_id = pci_dev_id) in + let subsystem_device_id_matches = + (* If the config file doesn't specify a physical subdevice ID, then + * the config file is valid for this device no matter the device's + * subsystem device ID. + * + * If the config file does specify a physical subdevice ID, then the + * corresponding ID of the card must match. *) + match subsystem_device_id, c.identifier.psubdev_id with + | _, None -> true + | None, Some _ -> false + | Some device_id, Some conf_id -> device_id = conf_id + in + device_id_matches && subsystem_device_id_matches) + vgpu_confs + in + debug "Relevant confs = [ %s ]" + (String.concat "; " (List.map (fun c -> + Printf.sprintf + "{pdev_id:%04x; psubdev_id:%s; vdev_id:%04x; vsubdev_id:%04x; framebufferlength:0x%Lx}" + c.identifier.pdev_id + (match c.identifier.psubdev_id with + | None -> "Any" + | Some id -> Printf.sprintf "%04x" id) + c.identifier.vdev_id + c.identifier.vsubdev_id + c.framebufferlength) + relevant_vgpu_confs)); + let rec build_vgpu_types pci_access ac = function + | [] -> ac + | conf::tl -> + debug "Pci.lookup_subsystem_device_name: vendor=%04x device=%04x subdev=%04x" + nvidia_vendor_id conf.identifier.vdev_id conf.identifier.vsubdev_id; + let vendor_name = Pci.lookup_vendor_name pci_access nvidia_vendor_id + and model_name = + Pci.lookup_subsystem_device_name pci_access nvidia_vendor_id + conf.identifier.vdev_id nvidia_vendor_id conf.identifier.vsubdev_id + and framebuffer_size = conf.framebufferlength + and max_heads = conf.num_heads + and max_resolution_x = conf.max_x + and max_resolution_y = conf.max_y + and size = Int64.div Constants.pgpu_default_size conf.max_instance + and internal_config = [Xapi_globs.vgpu_config_key, conf.file_path] + and identifier = Nvidia conf.identifier + and experimental = false in + let vgpu_type = { + vendor_name; model_name; framebuffer_size; max_heads; + max_resolution_x; max_resolution_y; size; internal_config; + identifier; experimental} + in + build_vgpu_types pci_access (vgpu_type :: ac) tl + in + Pci.with_access (fun a -> build_vgpu_types a [] relevant_vgpu_confs) - let find_or_create_supported_types ~__context ~pci = - let dev_id = Xapi_pci.int_of_id (Db.PCI.get_device_id ~__context ~self:pci) in - let subsystem_dev_id = - match Db.PCI.get_subsystem_device_id ~__context ~self:pci with - | "" -> None - | id_string -> Some (Xapi_pci.int_of_id id_string) - in - debug "dev_id = %s" (Printf.sprintf "%04x" dev_id); - let relevant_types = relevant_vgpu_types dev_id subsystem_dev_id in - debug "Relevant vGPU configurations for pgpu = [ %s ]" - (String.concat "; " - (List.map (fun vt -> vt.model_name) relevant_types)); - let vgpu_types = List.map - (fun v -> find_or_create ~__context v) relevant_types in - let passthrough_gpu_type = find_or_create ~__context passthrough_gpu in - passthrough_gpu_type :: vgpu_types + let find_or_create_supported_types ~__context ~pci = + let dev_id = Xapi_pci.int_of_id (Db.PCI.get_device_id ~__context ~self:pci) in + let subsystem_dev_id = + match Db.PCI.get_subsystem_device_id ~__context ~self:pci with + | "" -> None + | id_string -> Some (Xapi_pci.int_of_id id_string) + in + debug "dev_id = %s" (Printf.sprintf "%04x" dev_id); + let relevant_types = relevant_vgpu_types dev_id subsystem_dev_id in + debug "Relevant vGPU configurations for pgpu = [ %s ]" + (String.concat "; " + (List.map (fun vt -> vt.model_name) relevant_types)); + let vgpu_types = List.map + (fun v -> find_or_create ~__context v) relevant_types in + let passthrough_gpu_type = find_or_create ~__context passthrough_gpu in + passthrough_gpu_type :: vgpu_types end module Intel = struct - let intel_vendor_id = 0x8086 + let intel_vendor_id = 0x8086 - let ( *** ) = Int64.mul - let ( /// ) = Int64.div - let ( +++ ) = Int64.add - let ( --- ) = Int64.sub - let mib x = List.fold_left Int64.mul x [1024L; 1024L] + let ( *** ) = Int64.mul + let ( /// ) = Int64.div + let ( +++ ) = Int64.add + let ( --- ) = Int64.sub + let mib x = List.fold_left Int64.mul x [1024L; 1024L] - type vgpu_conf = { - identifier : Identifier.gvt_g_id; - experimental : bool; - model_name : string; - framebufferlength : int64; - num_heads : int64; - max_x : int64; - max_y : int64; - } + type vgpu_conf = { + identifier : Identifier.gvt_g_id; + experimental : bool; + model_name : string; + framebufferlength : int64; + num_heads : int64; + max_x : int64; + max_y : int64; + } - let read_whitelist_line ~line = - try - Some (Scanf.sscanf - line - "%04x experimental=%c name='%s@' low_gm_sz=%Ld high_gm_sz=%Ld fence_sz=%Ld framebuffer_sz=%Ld max_heads=%Ld resolution=%Ldx%Ld monitor_config_file=%s" - (fun pdev_id - experimental - model_name - low_gm_sz - high_gm_sz - fence_sz - framebuffer_sz - num_heads - max_x - max_y - monitor_config_file -> - { - identifier = Identifier.({ - pdev_id; - low_gm_sz; - high_gm_sz; - fence_sz; - monitor_config_file = Some monitor_config_file; - }); - experimental = - (match experimental with - | '0' -> false - | _ -> true); - model_name; - framebufferlength = mib framebuffer_sz; - num_heads; - max_x; - max_y; - })) - with e-> begin - error "Failed to read whitelist line: '%s' %s" - line (Printexc.to_string e); - None - end + let read_whitelist_line ~line = + try + Some (Scanf.sscanf + line + "%04x experimental=%c name='%s@' low_gm_sz=%Ld high_gm_sz=%Ld fence_sz=%Ld framebuffer_sz=%Ld max_heads=%Ld resolution=%Ldx%Ld monitor_config_file=%s" + (fun pdev_id + experimental + model_name + low_gm_sz + high_gm_sz + fence_sz + framebuffer_sz + num_heads + max_x + max_y + monitor_config_file -> + { + identifier = Identifier.({ + pdev_id; + low_gm_sz; + high_gm_sz; + fence_sz; + monitor_config_file = Some monitor_config_file; + }); + experimental = + (match experimental with + | '0' -> false + | _ -> true); + model_name; + framebufferlength = mib framebuffer_sz; + num_heads; + max_x; + max_y; + })) + with e-> begin + error "Failed to read whitelist line: '%s' %s" + line (Printexc.to_string e); + None + end - let read_whitelist ~whitelist ~device_id = - if Sys.file_exists whitelist then begin - Stdext.Unixext.file_lines_fold - Identifier.(fun acc line -> - match read_whitelist_line ~line with - | Some conf when conf.identifier.pdev_id = device_id -> conf :: acc - | _ -> acc) - [] - whitelist - end else [] + let read_whitelist ~whitelist ~device_id = + if Sys.file_exists whitelist then begin + Stdext.Unixext.file_lines_fold + Identifier.(fun acc line -> + match read_whitelist_line ~line with + | Some conf when conf.identifier.pdev_id = device_id -> conf :: acc + | _ -> acc) + [] + whitelist + end else [] - let make_vgpu_types ~__context ~pci ~whitelist = - let open Xenops_interface.Pci in - let device_id = - Db.PCI.get_device_id ~__context ~self:pci - |> Xapi_pci.int_of_id - in - let address = - Db.PCI.get_pci_id ~__context ~self:pci - |> address_of_string - in - let whitelist = read_whitelist ~whitelist ~device_id in - let vendor_name, device = - Pci.(with_access (fun access -> - let vendor_name = lookup_vendor_name access intel_vendor_id in - let device = - List.find - (fun device -> - (device.Pci_dev.domain = address.domain) && - (device.Pci_dev.bus = address.bus) && - (device.Pci_dev.dev = address.dev) && - (device.Pci_dev.func = address.fn)) - (get_devices access) - in - vendor_name, device)) - in - let bar_size = - List.nth device.Pci.Pci_dev.size 2 - |> Int64.of_nativeint - in - List.map - Identifier.(fun conf -> - let vgpus_per_pgpu = - bar_size /// 1024L /// 1024L - /// conf.identifier.low_gm_sz - --- 1L - in - let vgpu_size = Constants.pgpu_default_size /// vgpus_per_pgpu in - { - vendor_name; - model_name = conf.model_name; - framebuffer_size = conf.framebufferlength; - max_heads = conf.num_heads; - max_resolution_x = conf.max_x; - max_resolution_y = conf.max_y; - size = vgpu_size; - internal_config = [ - Xapi_globs.vgt_low_gm_sz, Int64.to_string conf.identifier.low_gm_sz; - Xapi_globs.vgt_high_gm_sz, Int64.to_string conf.identifier.high_gm_sz; - Xapi_globs.vgt_fence_sz, Int64.to_string conf.identifier.fence_sz; - ] @ ( - match conf.identifier.monitor_config_file with - | Some monitor_config_file -> - [Xapi_globs.vgt_monitor_config_file, monitor_config_file] - | None -> [] - ); - identifier = GVT_g conf.identifier; - experimental = conf.experimental; - }) - whitelist + let make_vgpu_types ~__context ~pci ~whitelist = + let open Xenops_interface.Pci in + let device_id = + Db.PCI.get_device_id ~__context ~self:pci + |> Xapi_pci.int_of_id + in + let address = + Db.PCI.get_pci_id ~__context ~self:pci + |> address_of_string + in + let whitelist = read_whitelist ~whitelist ~device_id in + let vendor_name, device = + Pci.(with_access (fun access -> + let vendor_name = lookup_vendor_name access intel_vendor_id in + let device = + List.find + (fun device -> + (device.Pci_dev.domain = address.domain) && + (device.Pci_dev.bus = address.bus) && + (device.Pci_dev.dev = address.dev) && + (device.Pci_dev.func = address.fn)) + (get_devices access) + in + vendor_name, device)) + in + let bar_size = + List.nth device.Pci.Pci_dev.size 2 + |> Int64.of_nativeint + in + List.map + Identifier.(fun conf -> + let vgpus_per_pgpu = + bar_size /// 1024L /// 1024L + /// conf.identifier.low_gm_sz + --- 1L + in + let vgpu_size = Constants.pgpu_default_size /// vgpus_per_pgpu in + { + vendor_name; + model_name = conf.model_name; + framebuffer_size = conf.framebufferlength; + max_heads = conf.num_heads; + max_resolution_x = conf.max_x; + max_resolution_y = conf.max_y; + size = vgpu_size; + internal_config = [ + Xapi_globs.vgt_low_gm_sz, Int64.to_string conf.identifier.low_gm_sz; + Xapi_globs.vgt_high_gm_sz, Int64.to_string conf.identifier.high_gm_sz; + Xapi_globs.vgt_fence_sz, Int64.to_string conf.identifier.fence_sz; + ] @ ( + match conf.identifier.monitor_config_file with + | Some monitor_config_file -> + [Xapi_globs.vgt_monitor_config_file, monitor_config_file] + | None -> [] + ); + identifier = GVT_g conf.identifier; + experimental = conf.experimental; + }) + whitelist - let find_or_create_supported_types ~__context ~pci - ~is_system_display_device - ~is_host_display_enabled - ~is_pci_hidden = - let types = - let passthrough_types = - if is_system_display_device && (is_host_display_enabled || not is_pci_hidden) - then [] - else [passthrough_gpu] - in - passthrough_types @ - (make_vgpu_types ~__context ~pci ~whitelist:!Xapi_globs.gvt_g_whitelist) - in - List.map (find_or_create ~__context) types + let find_or_create_supported_types ~__context ~pci + ~is_system_display_device + ~is_host_display_enabled + ~is_pci_hidden = + let types = + let passthrough_types = + if is_system_display_device && (is_host_display_enabled || not is_pci_hidden) + then [] + else [passthrough_gpu] + in + passthrough_types @ + (make_vgpu_types ~__context ~pci ~whitelist:!Xapi_globs.gvt_g_whitelist) + in + List.map (find_or_create ~__context) types end let find_or_create_supported_types ~__context ~pci - ~is_system_display_device - ~is_host_display_enabled - ~is_pci_hidden = - let vendor_id = - Db.PCI.get_vendor_id ~__context ~self:pci - |> Xapi_pci.int_of_id - in - if vendor_id = Nvidia.nvidia_vendor_id - then begin - if is_system_display_device then [] - else Nvidia.find_or_create_supported_types ~__context ~pci - end - else if vendor_id = Intel.intel_vendor_id - then - Intel.find_or_create_supported_types ~__context ~pci - ~is_system_display_device - ~is_host_display_enabled - ~is_pci_hidden - else begin - if is_system_display_device then [] - else [find_or_create ~__context passthrough_gpu] - end + ~is_system_display_device + ~is_host_display_enabled + ~is_pci_hidden = + let vendor_id = + Db.PCI.get_vendor_id ~__context ~self:pci + |> Xapi_pci.int_of_id + in + if vendor_id = Nvidia.nvidia_vendor_id + then begin + if is_system_display_device then [] + else Nvidia.find_or_create_supported_types ~__context ~pci + end + else if vendor_id = Intel.intel_vendor_id + then + Intel.find_or_create_supported_types ~__context ~pci + ~is_system_display_device + ~is_host_display_enabled + ~is_pci_hidden + else begin + if is_system_display_device then [] + else [find_or_create ~__context passthrough_gpu] + end let requires_passthrough ~__context ~self = - Db.VGPU_type.get_implementation ~__context ~self = `passthrough + Db.VGPU_type.get_implementation ~__context ~self = `passthrough diff --git a/ocaml/xapi/xapi_vif.ml b/ocaml/xapi/xapi_vif.ml index e62a8981677..b2d854f4d9b 100644 --- a/ocaml/xapi/xapi_vif.ml +++ b/ocaml/xapi/xapi_vif.ml @@ -18,174 +18,174 @@ open Xapi_vif_helpers module D = Debug.Make(struct let name="xapi" end) open D -let assert_operation_valid ~__context ~self ~(op:API.vif_operations) = +let assert_operation_valid ~__context ~self ~(op:API.vif_operations) = assert_operation_valid ~__context ~self ~op let update_allowed_operations ~__context ~self : unit = update_allowed_operations ~__context ~self let plug ~__context ~self = - Xapi_xenops.vif_plug ~__context ~self + Xapi_xenops.vif_plug ~__context ~self let unplug ~__context ~self = - Xapi_xenops.vif_unplug ~__context ~self false + Xapi_xenops.vif_unplug ~__context ~self false let unplug_force ~__context ~self = - Xapi_xenops.vif_unplug ~__context ~self true + Xapi_xenops.vif_unplug ~__context ~self true let create ~__context ~device ~network ~vM - ~mAC ~mTU ~other_config ~qos_algorithm_type ~qos_algorithm_params ~locking_mode ~ipv4_allowed ~ipv6_allowed : API.ref_VIF = + ~mAC ~mTU ~other_config ~qos_algorithm_type ~qos_algorithm_params ~locking_mode ~ipv4_allowed ~ipv6_allowed : API.ref_VIF = create ~__context ~device ~network ~vM ~currently_attached:false ~mAC ~mTU ~other_config ~qos_algorithm_type ~qos_algorithm_params ~locking_mode ~ipv4_allowed ~ipv6_allowed ~ipv4_configuration_mode:`None ~ipv4_addresses:[] ~ipv4_gateway:"" - ~ipv6_configuration_mode:`None ~ipv6_addresses:[] ~ipv6_gateway:"" + ~ipv6_configuration_mode:`None ~ipv6_addresses:[] ~ipv6_gateway:"" let destroy ~__context ~self = destroy ~__context ~self let device_active ~__context ~self = - let vif_rec = Db.VIF.get_record ~__context ~self in - let vm_rec = Db.VM.get_record ~__context ~self:vif_rec.API.vIF_VM in - let attached = vif_rec.API.vIF_currently_attached in - let suspended = vm_rec.API.vM_power_state = `Suspended in - attached && not suspended + let vif_rec = Db.VIF.get_record ~__context ~self in + let vm_rec = Db.VM.get_record ~__context ~self:vif_rec.API.vIF_VM in + let attached = vif_rec.API.vIF_currently_attached in + let suspended = vm_rec.API.vM_power_state = `Suspended in + attached && not suspended let refresh_filtering_rules ~__context ~self = - if device_active ~__context ~self - then Xapi_xenops.vif_set_locking_mode ~__context ~self + if device_active ~__context ~self + then Xapi_xenops.vif_set_locking_mode ~__context ~self (* This function moves a dom0 vif device from one bridge to another, without involving the guest, * so it also works on guests that do not support hot(un)plug of VIFs. *) let move_internal ~__context ~network ?active vif = - debug "Moving VIF %s to network %s" (Db.VIF.get_uuid ~__context ~self:vif) - (Db.Network.get_uuid ~__context ~self:network); - let active = - match active with - | None -> device_active ~__context ~self:vif - | Some x -> x - in - Db.VIF.set_network ~__context ~self:vif ~value:network; - if active - then Xapi_xenops.vif_move ~__context ~self:vif network + debug "Moving VIF %s to network %s" (Db.VIF.get_uuid ~__context ~self:vif) + (Db.Network.get_uuid ~__context ~self:network); + let active = + match active with + | None -> device_active ~__context ~self:vif + | Some x -> x + in + Db.VIF.set_network ~__context ~self:vif ~value:network; + if active + then Xapi_xenops.vif_move ~__context ~self:vif network let move ~__context ~self ~network = - let active = device_active ~__context ~self in - if active - then begin - let vm = Db.VIF.get_VM ~__context ~self in - let host = Db.VM.get_resident_on ~__context ~self:vm in - try Xapi_network_attach_helpers.assert_can_see_named_networks ~__context ~vm:vm ~host:host [network] with - | Api_errors.Server_error (name, _) - when name = Api_errors.vm_requires_net -> - raise (Api_errors.Server_error ( - Api_errors.host_cannot_attach_network, [ - Ref.string_of host; Ref.string_of network ])) - end; - move_internal ~__context ~network ~active self + let active = device_active ~__context ~self in + if active + then begin + let vm = Db.VIF.get_VM ~__context ~self in + let host = Db.VM.get_resident_on ~__context ~self:vm in + try Xapi_network_attach_helpers.assert_can_see_named_networks ~__context ~vm:vm ~host:host [network] with + | Api_errors.Server_error (name, _) + when name = Api_errors.vm_requires_net -> + raise (Api_errors.Server_error ( + Api_errors.host_cannot_attach_network, [ + Ref.string_of host; Ref.string_of network ])) + end; + move_internal ~__context ~network ~active self let change_locking_config ~__context ~self ~licence_check f = - if licence_check then assert_locking_licensed ~__context; - f (); - refresh_filtering_rules ~__context ~self + if licence_check then assert_locking_licensed ~__context; + f (); + refresh_filtering_rules ~__context ~self let get_effective_locking_mode ~__context ~self vif_mode : API.vif_locking_mode = - match vif_mode with - | `network_default -> - let network = Db.VIF.get_network ~__context ~self in - Db.Network.get_default_locking_mode ~__context ~self:network - | other -> other + match vif_mode with + | `network_default -> + let network = Db.VIF.get_network ~__context ~self in + Db.Network.get_default_locking_mode ~__context ~self:network + | other -> other let set_locking_mode ~__context ~self ~value = - let effective_locking_mode = get_effective_locking_mode ~__context ~self value in - if effective_locking_mode = `locked then - Helpers.assert_vswitch_controller_not_active ~__context; - change_locking_config ~__context ~self - ~licence_check:(effective_locking_mode = `locked) - (fun () -> Db.VIF.set_locking_mode ~__context ~self ~value) + let effective_locking_mode = get_effective_locking_mode ~__context ~self value in + if effective_locking_mode = `locked then + Helpers.assert_vswitch_controller_not_active ~__context; + change_locking_config ~__context ~self + ~licence_check:(effective_locking_mode = `locked) + (fun () -> Db.VIF.set_locking_mode ~__context ~self ~value) let set_ipv4_allowed ~__context ~self ~value = - let setified_value = List.setify value in - change_locking_config ~__context ~self ~licence_check:(setified_value <> []) - (fun () -> - List.iter (Helpers.assert_is_valid_ip `ipv4 "ipv4_allowed") setified_value; - Db.VIF.set_ipv4_allowed ~__context ~self ~value:setified_value) + let setified_value = List.setify value in + change_locking_config ~__context ~self ~licence_check:(setified_value <> []) + (fun () -> + List.iter (Helpers.assert_is_valid_ip `ipv4 "ipv4_allowed") setified_value; + Db.VIF.set_ipv4_allowed ~__context ~self ~value:setified_value) let add_ipv4_allowed ~__context ~self ~value = - change_locking_config ~__context ~self ~licence_check:true - (fun () -> - Helpers.assert_is_valid_ip `ipv4 "ipv4_allowed" value; - Db.VIF.add_ipv4_allowed ~__context ~self ~value) + change_locking_config ~__context ~self ~licence_check:true + (fun () -> + Helpers.assert_is_valid_ip `ipv4 "ipv4_allowed" value; + Db.VIF.add_ipv4_allowed ~__context ~self ~value) let remove_ipv4_allowed ~__context ~self ~value = - change_locking_config ~__context ~self ~licence_check:false - (fun () -> Db.VIF.remove_ipv4_allowed ~__context ~self ~value) + change_locking_config ~__context ~self ~licence_check:false + (fun () -> Db.VIF.remove_ipv4_allowed ~__context ~self ~value) let set_ipv6_allowed ~__context ~self ~value = - let setified_value = List.setify value in - change_locking_config ~__context ~self ~licence_check:(setified_value <> []) - (fun () -> - List.iter (Helpers.assert_is_valid_ip `ipv6 "ipv6_allowed") setified_value; - Db.VIF.set_ipv6_allowed ~__context ~self ~value:setified_value) + let setified_value = List.setify value in + change_locking_config ~__context ~self ~licence_check:(setified_value <> []) + (fun () -> + List.iter (Helpers.assert_is_valid_ip `ipv6 "ipv6_allowed") setified_value; + Db.VIF.set_ipv6_allowed ~__context ~self ~value:setified_value) let add_ipv6_allowed ~__context ~self ~value = - change_locking_config ~__context ~self ~licence_check:true - (fun () -> - Helpers.assert_is_valid_ip `ipv6 "ipv6_allowed" value; - Db.VIF.add_ipv6_allowed ~__context ~self ~value) + change_locking_config ~__context ~self ~licence_check:true + (fun () -> + Helpers.assert_is_valid_ip `ipv6 "ipv6_allowed" value; + Db.VIF.add_ipv6_allowed ~__context ~self ~value) let remove_ipv6_allowed ~__context ~self ~value = - change_locking_config ~__context ~self ~licence_check:false - (fun () -> Db.VIF.remove_ipv6_allowed ~__context ~self ~value) + change_locking_config ~__context ~self ~licence_check:false + (fun () -> Db.VIF.remove_ipv6_allowed ~__context ~self ~value) let assert_has_feature_static_ip_setting ~__context ~self = - let feature = "feature-static-ip-setting" in - let vm = Db.VIF.get_VM ~__context ~self in - let vm_gm = Db.VM.get_guest_metrics ~__context ~self:vm in - try - let other = Db.VM_guest_metrics.get_other ~__context ~self:vm_gm in - if List.assoc feature other <> "1" then - failwith "not found" - with _ -> - raise Api_errors.(Server_error (vm_lacks_feature, [Ref.string_of vm])) + let feature = "feature-static-ip-setting" in + let vm = Db.VIF.get_VM ~__context ~self in + let vm_gm = Db.VM.get_guest_metrics ~__context ~self:vm in + try + let other = Db.VM_guest_metrics.get_other ~__context ~self:vm_gm in + if List.assoc feature other <> "1" then + failwith "not found" + with _ -> + raise Api_errors.(Server_error (vm_lacks_feature, [Ref.string_of vm])) let assert_no_locking_mode_conflict ~__context ~self kind address = - let vif_locking_mode = Db.VIF.get_locking_mode ~__context ~self in - if get_effective_locking_mode ~__context ~self vif_locking_mode = `locked then - let get = if kind = `ipv4 then Db.VIF.get_ipv4_allowed else Db.VIF.get_ipv6_allowed in - let allowed = get ~__context ~self in - match Helpers.parse_cidr kind address with - | None -> () - | Some (address', _) -> - if not (List.mem address' allowed) then - raise Api_errors.(Server_error (address_violates_locking_constraint, [address])) + let vif_locking_mode = Db.VIF.get_locking_mode ~__context ~self in + if get_effective_locking_mode ~__context ~self vif_locking_mode = `locked then + let get = if kind = `ipv4 then Db.VIF.get_ipv4_allowed else Db.VIF.get_ipv6_allowed in + let allowed = get ~__context ~self in + match Helpers.parse_cidr kind address with + | None -> () + | Some (address', _) -> + if not (List.mem address' allowed) then + raise Api_errors.(Server_error (address_violates_locking_constraint, [address])) let configure_ipv4 ~__context ~self ~mode ~address ~gateway = - if mode = `Static then begin - Pool_features.assert_enabled ~__context ~f:Features.Guest_ip_setting; - Helpers.assert_is_valid_cidr `ipv4 "address" address; - assert_no_locking_mode_conflict ~__context ~self `ipv4 address; - if gateway <> "" then - Helpers.assert_is_valid_ip `ipv4 "gateway" gateway; - end; - assert_has_feature_static_ip_setting ~__context ~self; - - Db.VIF.set_ipv4_configuration_mode ~__context ~self ~value:mode; - Db.VIF.set_ipv4_addresses ~__context ~self ~value:[address]; - Db.VIF.set_ipv4_gateway ~__context ~self ~value:gateway; - if device_active ~__context ~self then - Xapi_xenops.vif_set_ipv4_configuration ~__context ~self + if mode = `Static then begin + Pool_features.assert_enabled ~__context ~f:Features.Guest_ip_setting; + Helpers.assert_is_valid_cidr `ipv4 "address" address; + assert_no_locking_mode_conflict ~__context ~self `ipv4 address; + if gateway <> "" then + Helpers.assert_is_valid_ip `ipv4 "gateway" gateway; + end; + assert_has_feature_static_ip_setting ~__context ~self; + + Db.VIF.set_ipv4_configuration_mode ~__context ~self ~value:mode; + Db.VIF.set_ipv4_addresses ~__context ~self ~value:[address]; + Db.VIF.set_ipv4_gateway ~__context ~self ~value:gateway; + if device_active ~__context ~self then + Xapi_xenops.vif_set_ipv4_configuration ~__context ~self let configure_ipv6 ~__context ~self ~mode ~address ~gateway = - if mode = `Static then begin - Pool_features.assert_enabled ~__context ~f:Features.Guest_ip_setting; - Helpers.assert_is_valid_cidr `ipv6 "address" address; - assert_no_locking_mode_conflict ~__context ~self `ipv6 address; - if gateway <> "" then - Helpers.assert_is_valid_ip `ipv6 "gateway" gateway; - end; - assert_has_feature_static_ip_setting ~__context ~self; - - Db.VIF.set_ipv6_configuration_mode ~__context ~self ~value:mode; - Db.VIF.set_ipv6_addresses ~__context ~self ~value:[address]; - Db.VIF.set_ipv6_gateway ~__context ~self ~value:gateway; - if device_active ~__context ~self then - Xapi_xenops.vif_set_ipv6_configuration ~__context ~self + if mode = `Static then begin + Pool_features.assert_enabled ~__context ~f:Features.Guest_ip_setting; + Helpers.assert_is_valid_cidr `ipv6 "address" address; + assert_no_locking_mode_conflict ~__context ~self `ipv6 address; + if gateway <> "" then + Helpers.assert_is_valid_ip `ipv6 "gateway" gateway; + end; + assert_has_feature_static_ip_setting ~__context ~self; + + Db.VIF.set_ipv6_configuration_mode ~__context ~self ~value:mode; + Db.VIF.set_ipv6_addresses ~__context ~self ~value:[address]; + Db.VIF.set_ipv6_gateway ~__context ~self ~value:gateway; + if device_active ~__context ~self then + Xapi_xenops.vif_set_ipv6_configuration ~__context ~self diff --git a/ocaml/xapi/xapi_vif.mli b/ocaml/xapi/xapi_vif.mli index ba2ac7a6800..6847f6edcba 100644 --- a/ocaml/xapi/xapi_vif.mli +++ b/ocaml/xapi/xapi_vif.mli @@ -13,20 +13,20 @@ *) (** Module that defines API functions for VIF objects * @group Networking - *) +*) (** -A {i VIF} object in the datamodel represents a virtual interface. -{ul -{- A VIF is used by a VM, and appears to a VM as a real network interface. A VIF has a MAC address.} -{- The [VIF.currently_attached] field reflects whether a virtual interface is currently {i plugged} into its VM, meaning it is visible to the VM. - {ul - {- A VIF cannot be [currently_attached] when its VM is halted.} - {- When a VM starts up, its VIFs are automatically attached; when a VM shuts down, VIFs become detached.} - {- A VIF can be hot-plugged or hot-unplugged if its VM is running {i and} the VM has PV-drivers installed.} - }} -{- A VIF can be attached to a Network (bridge) to connect it to a PIF (physical interface).} -} + A {i VIF} object in the datamodel represents a virtual interface. + {ul + {- A VIF is used by a VM, and appears to a VM as a real network interface. A VIF has a MAC address.} + {- The [VIF.currently_attached] field reflects whether a virtual interface is currently {i plugged} into its VM, meaning it is visible to the VM. + {ul + {- A VIF cannot be [currently_attached] when its VM is halted.} + {- When a VM starts up, its VIFs are automatically attached; when a VM shuts down, VIFs become detached.} + {- A VIF can be hot-plugged or hot-unplugged if its VM is running {i and} the VM has PV-drivers installed.} + }} + {- A VIF can be attached to a Network (bridge) to connect it to a PIF (physical interface).} + } *) (** {2 API functions} *) @@ -51,7 +51,7 @@ val create : other_config:(string * string) list -> qos_algorithm_type:string -> qos_algorithm_params:(string * string) list -> - locking_mode:API.vif_locking_mode -> + locking_mode:API.vif_locking_mode -> ipv4_allowed:string list -> ipv6_allowed:string list -> API.ref_VIF @@ -62,24 +62,24 @@ val destroy : __context:Context.t -> self:[ `VIF ] Ref.t -> unit (** Move a VIF to another Network. *) val move_internal : - __context:Context.t -> - network:[ `network ] Ref.t -> - ?active:bool -> - [ `VIF ] Ref.t -> - unit + __context:Context.t -> + network:[ `network ] Ref.t -> + ?active:bool -> + [ `VIF ] Ref.t -> + unit (** Move a VIF to another Network. *) val move : - __context:Context.t -> - self:[ `VIF ] Ref.t -> - network:[ `network ] Ref.t -> - unit + __context:Context.t -> + self:[ `VIF ] Ref.t -> + network:[ `network ] Ref.t -> + unit (** Throw error if the given operation is not in the list of allowed operations. * Implemented by {!Xapi_vif_helpers.assert_operation_valid} *) val assert_operation_valid : __context:Context.t -> self:[ `VIF ] Ref.t -> op:API.vif_operations -> unit - + (** Update the [PIF.allowed_operations] field. * Implemented by {!Xapi_vif_helpers.update_allowed_operations} *) val update_allowed_operations : @@ -88,43 +88,43 @@ val update_allowed_operations : (** Set the locking mode of this VIF. * Update the firewall rules associated with this VIF, if it is plugged. *) val set_locking_mode : - __context:Context.t -> self:[ `VIF ] Ref.t -> value:API.vif_locking_mode -> unit + __context:Context.t -> self:[ `VIF ] Ref.t -> value:API.vif_locking_mode -> unit (** Set the list of IPv4 addresses allowed to use this VIF. *) val set_ipv4_allowed : - __context:Context.t -> self:[ `VIF ] Ref.t -> value:string list -> unit + __context:Context.t -> self:[ `VIF ] Ref.t -> value:string list -> unit (** Associate an IPv4 address with this VIF. *) val add_ipv4_allowed : - __context:Context.t -> self:[ `VIF ] Ref.t -> value:string -> unit + __context:Context.t -> self:[ `VIF ] Ref.t -> value:string -> unit (** Remove an IPv4 address from this VIF. *) val remove_ipv4_allowed : - __context:Context.t -> self:[ `VIF ] Ref.t -> value:string -> unit + __context:Context.t -> self:[ `VIF ] Ref.t -> value:string -> unit (** Set the list of IPv6 addresses allowed to use this VIF. *) val set_ipv6_allowed : - __context:Context.t -> self:[ `VIF ] Ref.t -> value:string list -> unit + __context:Context.t -> self:[ `VIF ] Ref.t -> value:string list -> unit (** Associate an IPv6 address with this VIF. *) val add_ipv6_allowed : - __context:Context.t -> self:[ `VIF ] Ref.t -> value:string -> unit + __context:Context.t -> self:[ `VIF ] Ref.t -> value:string -> unit (** Remove an IPv6 address from this VIF. *) val remove_ipv6_allowed : - __context:Context.t -> self:[ `VIF ] Ref.t -> value:string -> unit + __context:Context.t -> self:[ `VIF ] Ref.t -> value:string -> unit (** Change the IP configuration of a VIF *) val configure_ipv4 : - __context:Context.t -> - self:[ `VIF ] Ref.t -> - mode:[`None | `Static] -> - address:string -> gateway:string -> unit + __context:Context.t -> + self:[ `VIF ] Ref.t -> + mode:[`None | `Static] -> + address:string -> gateway:string -> unit (** Change the IP configuration of a VIF *) val configure_ipv6 : - __context:Context.t -> - self:[ `VIF ] Ref.t -> - mode:[`None | `Static] -> - address:string -> gateway:string -> unit + __context:Context.t -> + self:[ `VIF ] Ref.t -> + mode:[`None | `Static] -> + address:string -> gateway:string -> unit diff --git a/ocaml/xapi/xapi_vif_helpers.ml b/ocaml/xapi/xapi_vif_helpers.ml index efb5590106b..e8814b778fe 100644 --- a/ocaml/xapi/xapi_vif_helpers.ml +++ b/ocaml/xapi/xapi_vif_helpers.ml @@ -27,7 +27,7 @@ let all_ops : API.vif_operations_set = [ `attach; `plug; `unplug ] type table = (API.vif_operations, ((string * (string list)) option)) Hashtbl.t (** Returns a table of operations -> API error options (None if the operation would be ok) *) -let valid_operations ~__context record _ref' : table = +let valid_operations ~__context record _ref' : table = let _ref = Ref.string_of _ref' in let current_ops = record.Db_actions.vIF_current_operations in (* Policy: @@ -39,18 +39,18 @@ let valid_operations ~__context record _ref' : table = List.iter (fun x -> Hashtbl.replace table x None) all_ops; let set_errors (code: string) (params: string list) (ops: API.vif_operations_set) = List.iter (fun op -> - if Hashtbl.find table op = None - then Hashtbl.replace table op (Some(code, params))) ops in + if Hashtbl.find table op = None + then Hashtbl.replace table op (Some(code, params))) ops in let vm = Db.VIF.get_VM ~__context ~self:_ref' in (* Any current_operations preclude everything else *) if current_ops <> [] then begin debug "No operations are valid because current-operations = [ %s ]" - (String.concat "; " - (List.map (fun (task, op) -> task ^ " -> " ^ (vif_operation_to_string op)) current_ops)); + (String.concat "; " + (List.map (fun (task, op) -> task ^ " -> " ^ (vif_operation_to_string op)) current_ops)); let concurrent_op = snd (List.hd current_ops) in - set_errors Api_errors.other_operation_in_progress + set_errors Api_errors.other_operation_in_progress [ "VIF"; _ref; vif_operation_to_string concurrent_op ] all_ops; end; @@ -58,23 +58,23 @@ let valid_operations ~__context record _ref' : table = let power_state = Db.VM.get_power_state ~__context ~self:vm in let plugged = record.Db_actions.vIF_currently_attached || record.Db_actions.vIF_reserved in (match power_state, plugged with - | `Running, true -> set_errors Api_errors.device_already_attached [ _ref ] [ `plug ] - | `Running, false -> set_errors Api_errors.device_already_detached [ _ref ] [ `unplug ] - | _, _ -> - let actual = Record_util.power_to_string power_state in - let expected = Record_util.power_to_string `Running in - set_errors Api_errors.vm_bad_power_state [ Ref.string_of vm; expected; actual ] [ `plug; `unplug ]); + | `Running, true -> set_errors Api_errors.device_already_attached [ _ref ] [ `plug ] + | `Running, false -> set_errors Api_errors.device_already_detached [ _ref ] [ `unplug ] + | _, _ -> + let actual = Record_util.power_to_string power_state in + let expected = Record_util.power_to_string `Running in + set_errors Api_errors.vm_bad_power_state [ Ref.string_of vm; expected; actual ] [ `plug; `unplug ]); (* VIF plug/unplug must fail for current_operations * like [clean_shutdown; hard_shutdown; suspend; pause] on VM *) let vm_current_ops = Db.VM.get_current_operations ~__context ~self:vm in List.iter (fun (task,op) -> - if List.mem op [ `clean_shutdown; `hard_shutdown; `suspend; `pause ] then begin - let current_op_str = "Current operation on VM:" ^ (Ref.string_of vm) ^ " is " - ^ (Record_util.vm_operation_to_string op) in - set_errors Api_errors.operation_not_allowed [ current_op_str ] [ `plug; `unplug ] - end - ) vm_current_ops; + if List.mem op [ `clean_shutdown; `hard_shutdown; `suspend; `pause ] then begin + let current_op_str = "Current operation on VM:" ^ (Ref.string_of vm) ^ " is " + ^ (Record_util.vm_operation_to_string op) in + set_errors Api_errors.operation_not_allowed [ current_op_str ] [ `plug; `unplug ] + end + ) vm_current_ops; (* HVM guests MAY support plug/unplug IF they have PV drivers. Assume * all drivers have such support unless they specify that they do not. *) @@ -84,21 +84,21 @@ let valid_operations ~__context record _ref' : table = then ( let fallback () = match Xapi_pv_driver_version.make_error_opt (Xapi_pv_driver_version.of_guest_metrics vm_gmr) vm with - | Some(code, params) -> set_errors code params [ `plug; `unplug ] - | None -> () in + | Some(code, params) -> set_errors code params [ `plug; `unplug ] + | None -> () in match vm_gmr with - | None -> fallback () - | Some gmr -> ( + | None -> fallback () + | Some gmr -> ( match gmr.Db_actions.vM_guest_metrics_can_use_hotplug_vif with - | `yes -> () (* Drivers have made an explicit claim of support. *) - | `no -> set_errors Api_errors.operation_not_allowed ["VM states it does not support VIF hotplug."] [`plug; `unplug] - | `unspecified -> fallback ()) + | `yes -> () (* Drivers have made an explicit claim of support. *) + | `no -> set_errors Api_errors.operation_not_allowed ["VM states it does not support VIF hotplug."] [`plug; `unplug] + | `unspecified -> fallback ()) ); table -let throw_error (table: table) op = +let throw_error (table: table) op = if not(Hashtbl.mem table op) then raise (Api_errors.Server_error(Api_errors.internal_error, [ Printf.sprintf "xapi_vif_helpers.assert_operation_valid unknown operation: %s" (vif_operation_to_string op) ])); @@ -106,16 +106,16 @@ let throw_error (table: table) op = | Some (code, params) -> raise (Api_errors.Server_error(code, params)) | None -> () -let assert_operation_valid ~__context ~self ~(op:API.vif_operations) = +let assert_operation_valid ~__context ~self ~(op:API.vif_operations) = let all = Db.VIF.get_record_internal ~__context ~self in let table = valid_operations ~__context all self in throw_error table op -let assert_attachable ~__context ~self = +let assert_attachable ~__context ~self = let all = Db.VIF.get_record_internal ~__context ~self in let table = valid_operations ~__context all self in throw_error table `attach - + let update_allowed_operations ~__context ~self : unit = let all = Db.VIF.get_record_internal ~__context ~self in let valid = valid_operations ~__context all self in @@ -123,7 +123,7 @@ let update_allowed_operations ~__context ~self : unit = Db.VIF.set_allowed_operations ~__context ~self ~value:keys (** Someone is cancelling a task so remove it from the current_operations *) -let cancel_task ~__context ~self ~task_id = +let cancel_task ~__context ~self ~task_id = let all = List.map fst (Db.VIF.get_current_operations ~__context ~self) in if List.mem task_id all then begin @@ -136,7 +136,7 @@ let cancel_tasks ~__context ~self ~all_tasks_in_db ~task_ids = let set = (fun value -> Db.VIF.set_current_operations ~__context ~self ~value) in Helpers.cancel_tasks ~__context ~ops ~all_tasks_in_db ~task_ids ~set -let clear_current_operations ~__context ~self = +let clear_current_operations ~__context ~self = if (Db.VIF.get_current_operations ~__context ~self)<>[] then begin Db.VIF.set_current_operations ~__context ~self ~value:[]; @@ -157,124 +157,124 @@ let gen_mac(dev, seed) = let mac_data_1 = hashed_seed in let mac_data_2 = Digest.string hashed_seed in let take_byte n s = Char.code (String.get s n) in - Record_util.mac_from_int_array - [| take_byte 0 mac_data_1; - take_byte 1 mac_data_1; - take_byte 2 mac_data_1; - take_byte 3 mac_data_1; - take_byte 1 mac_data_2; - take_byte 2 mac_data_2; |] + Record_util.mac_from_int_array + [| take_byte 0 mac_data_1; + take_byte 1 mac_data_1; + take_byte 2 mac_data_1; + take_byte 3 mac_data_1; + take_byte 1 mac_data_2; + take_byte 2 mac_data_2; |] let assert_locking_licensed ~__context = - Pool_features.assert_enabled ~__context ~f:Features.VIF_locking + Pool_features.assert_enabled ~__context ~f:Features.VIF_locking let m = Mutex.create () (* prevents duplicate VIFs being created by accident *) let create ~__context ~device ~network ~vM - ~mAC ~mTU ~other_config ~qos_algorithm_type ~qos_algorithm_params - ~currently_attached ~locking_mode ~ipv4_allowed ~ipv6_allowed + ~mAC ~mTU ~other_config ~qos_algorithm_type ~qos_algorithm_params + ~currently_attached ~locking_mode ~ipv4_allowed ~ipv6_allowed + ~ipv4_configuration_mode ~ipv4_addresses ~ipv4_gateway + ~ipv6_configuration_mode ~ipv6_addresses ~ipv6_gateway : API.ref_VIF = + let () = debug "VIF.create running" in + + if locking_mode = `locked || ipv4_allowed <> [] || ipv6_allowed <> [] then + assert_locking_licensed ~__context; + + let uuid = Uuid.make_uuid () in + let ref = Ref.make () in + + let vm_mac_seed = + try Some (List.assoc Xapi_globs.mac_seed (Db.VM.get_other_config ~__context ~self:vM)) with _ -> None in + + if not(valid_device device) + then raise (Api_errors.Server_error (Api_errors.invalid_device,[device])); + + let mAC, mAC_autogenerated = + match vm_mac_seed with + Some seed -> + debug "Found mac_seed on VM: supplied MAC parameter = '%s'" mAC; + if mAC="" then gen_mac(int_of_string device, seed), true else mAC, false + | None -> + debug "Did not find mac_seed on VM"; + mAC, false in + + if not (Helpers.is_valid_MAC mAC) then + raise (Api_errors.Server_error (Api_errors.mac_invalid, [mAC])); + + (* Make people aware that non-shared networks being added to VMs makes them not agile *) + let pool = Helpers.get_pool ~__context in + if true + && Db.Pool.get_ha_enabled ~__context ~self:pool + && not(Db.Pool.get_ha_allow_overcommit ~__context ~self:pool) + && Helpers.is_xha_protected ~__context ~self:vM + && not(Agility.is_network_properly_shared ~__context ~self:network) then begin + warn "Creating VIF %s makes VM %s not agile" (Ref.string_of ref) (Ref.string_of vM); + raise (Api_errors.Server_error(Api_errors.ha_operation_would_break_failover_plan, [])) + end; + + (* Check to make sure the device is unique *) + Stdext.Threadext.Mutex.execute m + (fun () -> + let all = Db.VM.get_VIFs ~__context ~self:vM in + let all_devices = List.map (fun self -> Db.VIF.get_device ~__context ~self) all in + if List.mem device all_devices + then raise (Api_errors.Server_error (Api_errors.device_already_exists, [device])); + + let metrics = Ref.make () and metrics_uuid = Uuid.to_string (Uuid.make_uuid ()) in + Db.VIF_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid + ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Stdext.Date.of_float 0.) ~other_config:[]; + + let (_:unit) = Db.VIF.create ~__context ~ref ~uuid:(Uuid.to_string uuid) + ~current_operations:[] ~allowed_operations:[] ~reserved:false + ~device ~network ~vM ~mAC ~mAC_autogenerated ~mTU + ~qos_algorithm_type ~qos_algorithm_params ~qos_supported_algorithms:[] + ~currently_attached + ~status_code:0L ~status_detail:"" + ~runtime_properties:[] ~other_config + ~metrics ~locking_mode + ~ipv4_allowed ~ipv6_allowed ~ipv4_configuration_mode ~ipv4_addresses ~ipv4_gateway - ~ipv6_configuration_mode ~ipv6_addresses ~ipv6_gateway : API.ref_VIF = - let () = debug "VIF.create running" in - - if locking_mode = `locked || ipv4_allowed <> [] || ipv6_allowed <> [] then - assert_locking_licensed ~__context; - - let uuid = Uuid.make_uuid () in - let ref = Ref.make () in - - let vm_mac_seed = - try Some (List.assoc Xapi_globs.mac_seed (Db.VM.get_other_config ~__context ~self:vM)) with _ -> None in - - if not(valid_device device) - then raise (Api_errors.Server_error (Api_errors.invalid_device,[device])); - - let mAC, mAC_autogenerated = - match vm_mac_seed with - Some seed -> - debug "Found mac_seed on VM: supplied MAC parameter = '%s'" mAC; - if mAC="" then gen_mac(int_of_string device, seed), true else mAC, false - | None -> - debug "Did not find mac_seed on VM"; - mAC, false in - - if not (Helpers.is_valid_MAC mAC) then - raise (Api_errors.Server_error (Api_errors.mac_invalid, [mAC])); - - (* Make people aware that non-shared networks being added to VMs makes them not agile *) - let pool = Helpers.get_pool ~__context in - if true - && Db.Pool.get_ha_enabled ~__context ~self:pool - && not(Db.Pool.get_ha_allow_overcommit ~__context ~self:pool) - && Helpers.is_xha_protected ~__context ~self:vM - && not(Agility.is_network_properly_shared ~__context ~self:network) then begin - warn "Creating VIF %s makes VM %s not agile" (Ref.string_of ref) (Ref.string_of vM); - raise (Api_errors.Server_error(Api_errors.ha_operation_would_break_failover_plan, [])) - end; - - (* Check to make sure the device is unique *) - Stdext.Threadext.Mutex.execute m - (fun () -> - let all = Db.VM.get_VIFs ~__context ~self:vM in - let all_devices = List.map (fun self -> Db.VIF.get_device ~__context ~self) all in - if List.mem device all_devices - then raise (Api_errors.Server_error (Api_errors.device_already_exists, [device])); - - let metrics = Ref.make () and metrics_uuid = Uuid.to_string (Uuid.make_uuid ()) in - Db.VIF_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid - ~io_read_kbs:0. ~io_write_kbs:0. ~last_updated:(Stdext.Date.of_float 0.) ~other_config:[]; - - let (_:unit) = Db.VIF.create ~__context ~ref ~uuid:(Uuid.to_string uuid) - ~current_operations:[] ~allowed_operations:[] ~reserved:false - ~device ~network ~vM ~mAC ~mAC_autogenerated ~mTU - ~qos_algorithm_type ~qos_algorithm_params ~qos_supported_algorithms:[] - ~currently_attached - ~status_code:0L ~status_detail:"" - ~runtime_properties:[] ~other_config - ~metrics ~locking_mode - ~ipv4_allowed ~ipv6_allowed - ~ipv4_configuration_mode ~ipv4_addresses ~ipv4_gateway - ~ipv6_configuration_mode ~ipv6_addresses ~ipv6_gateway in () - ); - update_allowed_operations ~__context ~self:ref; - debug "VIF ref='%s' created (VM = '%s'; MAC address = '%s')" (Ref.string_of ref) (Ref.string_of vM) mAC; - ref + ~ipv6_configuration_mode ~ipv6_addresses ~ipv6_gateway in () + ); + update_allowed_operations ~__context ~self:ref; + debug "VIF ref='%s' created (VM = '%s'; MAC address = '%s')" (Ref.string_of ref) (Ref.string_of vM) mAC; + ref let destroy ~__context ~self = - debug "VIF.destroy"; - let vm = Db.VIF.get_VM ~__context ~self in + debug "VIF.destroy"; + let vm = Db.VIF.get_VM ~__context ~self in + + if Helpers.is_running ~__context ~self:vm + && (Db.VIF.get_currently_attached ~__context ~self) + then raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["VIF currently attached to a running VM"])); - if Helpers.is_running ~__context ~self:vm - && (Db.VIF.get_currently_attached ~__context ~self) - then raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["VIF currently attached to a running VM"])); + let metrics = Db.VIF.get_metrics ~__context ~self in + (* Don't let a failure to destroy the metrics stop us *) + Helpers.log_exn_continue "VIF_metrics.destroy" + (fun self -> if Db.is_valid_ref __context self then Db.VIF_metrics.destroy ~__context ~self) metrics; - let metrics = Db.VIF.get_metrics ~__context ~self in - (* Don't let a failure to destroy the metrics stop us *) - Helpers.log_exn_continue "VIF_metrics.destroy" - (fun self -> if Db.is_valid_ref __context self then Db.VIF_metrics.destroy ~__context ~self) metrics; - - Db.VIF.destroy ~__context ~self + Db.VIF.destroy ~__context ~self (* copy a vif *) let copy ~__context ~vm ~preserve_mac_address vif = - let all = Db.VIF.get_record ~__context ~self:vif in - create ~__context - ~device:all.API.vIF_device - ~network:all.API.vIF_network - ~currently_attached:all.API.vIF_currently_attached - ~vM:vm - ~mAC:(if preserve_mac_address then all.API.vIF_MAC else "") (* leave blank == generate new mac from vm random seed *) - ~mTU:all.API.vIF_MTU - ~other_config:all.API.vIF_other_config - ~qos_algorithm_type:all.API.vIF_qos_algorithm_type - ~qos_algorithm_params:all.API.vIF_qos_algorithm_params - ~locking_mode:all.API.vIF_locking_mode - ~ipv4_allowed:all.API.vIF_ipv4_allowed - ~ipv6_allowed:all.API.vIF_ipv6_allowed - ~ipv4_configuration_mode:all.API.vIF_ipv4_configuration_mode - ~ipv4_addresses:all.API.vIF_ipv4_addresses - ~ipv4_gateway:all.API.vIF_ipv4_gateway - ~ipv6_configuration_mode:all.API.vIF_ipv6_configuration_mode - ~ipv6_addresses:all.API.vIF_ipv6_addresses - ~ipv6_gateway:all.API.vIF_ipv6_gateway + let all = Db.VIF.get_record ~__context ~self:vif in + create ~__context + ~device:all.API.vIF_device + ~network:all.API.vIF_network + ~currently_attached:all.API.vIF_currently_attached + ~vM:vm + ~mAC:(if preserve_mac_address then all.API.vIF_MAC else "") (* leave blank == generate new mac from vm random seed *) + ~mTU:all.API.vIF_MTU + ~other_config:all.API.vIF_other_config + ~qos_algorithm_type:all.API.vIF_qos_algorithm_type + ~qos_algorithm_params:all.API.vIF_qos_algorithm_params + ~locking_mode:all.API.vIF_locking_mode + ~ipv4_allowed:all.API.vIF_ipv4_allowed + ~ipv6_allowed:all.API.vIF_ipv6_allowed + ~ipv4_configuration_mode:all.API.vIF_ipv4_configuration_mode + ~ipv4_addresses:all.API.vIF_ipv4_addresses + ~ipv4_gateway:all.API.vIF_ipv4_gateway + ~ipv6_configuration_mode:all.API.vIF_ipv6_configuration_mode + ~ipv6_addresses:all.API.vIF_ipv6_addresses + ~ipv6_gateway:all.API.vIF_ipv6_gateway diff --git a/ocaml/xapi/xapi_vif_helpers.mli b/ocaml/xapi/xapi_vif_helpers.mli index 94fe30f2d95..01ac0f0698c 100644 --- a/ocaml/xapi/xapi_vif_helpers.mli +++ b/ocaml/xapi/xapi_vif_helpers.mli @@ -13,12 +13,12 @@ *) (** Common code between the fake and real servers for dealing with VIFs. * @group Networking - *) +*) (** Throw error if the given operation is not in the list of allowed operations. *) val assert_operation_valid : __context:Context.t -> self:[ `VIF ] Ref.t -> op:API.vif_operations -> unit - + (** Update the [PIF.allowed_operations] field. *) val update_allowed_operations : __context:Context.t -> self:[ `VIF ] Ref.t -> unit @@ -58,7 +58,7 @@ val create : ipv6_addresses:string list -> ipv6_gateway:string -> API.ref_VIF -(** Destroy a VIF object in the database. *) +(** Destroy a VIF object in the database. *) val destroy : __context:Context.t -> self:[ `VIF ] Ref.t -> unit (** Copy a VIF. *) @@ -67,6 +67,6 @@ val copy : vm:[ `VM ] Ref.t -> preserve_mac_address:bool -> [ `VIF ] Ref.t -> API.ref_VIF -(** Generate a MAC address *) +(** Generate a MAC address *) val gen_mac : int * string -> string diff --git a/ocaml/xapi/xapi_vlan.ml b/ocaml/xapi/xapi_vlan.ml index 26e26baa6dc..f527bf4318e 100644 --- a/ocaml/xapi/xapi_vlan.ml +++ b/ocaml/xapi/xapi_vlan.ml @@ -11,95 +11,95 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -module D = Debug.Make(struct let name="xapi" end) +module D = Debug.Make(struct let name="xapi" end) open D (* Dummy MAC used by the VLAN *) let vlan_mac = "fe:ff:ff:ff:ff:ff" let create_internal ~__context ~host ~tagged_PIF ~tag ~network ~device = - let vlan = Ref.make () and vlan_uuid = Uuid.to_string (Uuid.make_uuid ()) in - let untagged_PIF = Ref.make () in - (* Copy the MTU and metrics from the base PIF *) - let mTU = Db.PIF.get_MTU ~__context ~self:tagged_PIF in - let metrics = Db.PIF.get_metrics ~__context ~self:tagged_PIF in - Db.PIF.create ~__context ~ref:untagged_PIF ~uuid:(Uuid.to_string (Uuid.make_uuid ())) - ~device ~device_name:device ~network ~host ~mAC:vlan_mac ~mTU ~vLAN:tag ~metrics - ~physical:false ~currently_attached:false - ~ip_configuration_mode:`None ~iP:"" ~netmask:"" ~gateway:"" ~dNS:"" ~bond_slave_of:Ref.null - ~vLAN_master_of:vlan ~management:false ~other_config:[] ~disallow_unplug:false - ~ipv6_configuration_mode:`None ~iPv6:[""] ~ipv6_gateway:"" ~primary_address_type:`IPv4 ~managed:true - ~properties:[] ~capabilities:[]; - - let () = Db.VLAN.create ~__context ~ref:vlan ~uuid:vlan_uuid ~tagged_PIF ~untagged_PIF ~tag ~other_config:[] in - vlan, untagged_PIF + let vlan = Ref.make () and vlan_uuid = Uuid.to_string (Uuid.make_uuid ()) in + let untagged_PIF = Ref.make () in + (* Copy the MTU and metrics from the base PIF *) + let mTU = Db.PIF.get_MTU ~__context ~self:tagged_PIF in + let metrics = Db.PIF.get_metrics ~__context ~self:tagged_PIF in + Db.PIF.create ~__context ~ref:untagged_PIF ~uuid:(Uuid.to_string (Uuid.make_uuid ())) + ~device ~device_name:device ~network ~host ~mAC:vlan_mac ~mTU ~vLAN:tag ~metrics + ~physical:false ~currently_attached:false + ~ip_configuration_mode:`None ~iP:"" ~netmask:"" ~gateway:"" ~dNS:"" ~bond_slave_of:Ref.null + ~vLAN_master_of:vlan ~management:false ~other_config:[] ~disallow_unplug:false + ~ipv6_configuration_mode:`None ~iPv6:[""] ~ipv6_gateway:"" ~primary_address_type:`IPv4 ~managed:true + ~properties:[] ~capabilities:[]; + + let () = Db.VLAN.create ~__context ~ref:vlan ~uuid:vlan_uuid ~tagged_PIF ~untagged_PIF ~tag ~other_config:[] in + vlan, untagged_PIF let create ~__context ~tagged_PIF ~tag ~network = - if Db.PIF.get_managed ~__context ~self:tagged_PIF <> true then - raise (Api_errors.Server_error (Api_errors.pif_unmanaged, [Ref.string_of tagged_PIF])); + if Db.PIF.get_managed ~__context ~self:tagged_PIF <> true then + raise (Api_errors.Server_error (Api_errors.pif_unmanaged, [Ref.string_of tagged_PIF])); - let host = Db.PIF.get_host ~__context ~self:tagged_PIF in - Xapi_pif.assert_no_other_local_pifs ~__context ~host ~network; + let host = Db.PIF.get_host ~__context ~self:tagged_PIF in + Xapi_pif.assert_no_other_local_pifs ~__context ~host ~network; - if Db.PIF.get_bond_slave_of ~__context ~self:tagged_PIF <> Ref.null then - raise (Api_errors.Server_error (Api_errors.cannot_add_vlan_to_bond_slave, [Ref.string_of tagged_PIF])); + if Db.PIF.get_bond_slave_of ~__context ~self:tagged_PIF <> Ref.null then + raise (Api_errors.Server_error (Api_errors.cannot_add_vlan_to_bond_slave, [Ref.string_of tagged_PIF])); - (* Check that the tagged PIF is not a VLAN itself - CA-25160. This check can be skipped using the allow_vlan_on_vlan FIST point. *) - let origtag = Db.PIF.get_VLAN ~__context ~self:tagged_PIF in - if origtag >= 0L && not (Xapi_fist.allow_vlan_on_vlan()) then - raise (Api_errors.Server_error (Api_errors.pif_is_vlan, [Ref.string_of tagged_PIF])); + (* Check that the tagged PIF is not a VLAN itself - CA-25160. This check can be skipped using the allow_vlan_on_vlan FIST point. *) + let origtag = Db.PIF.get_VLAN ~__context ~self:tagged_PIF in + if origtag >= 0L && not (Xapi_fist.allow_vlan_on_vlan()) then + raise (Api_errors.Server_error (Api_errors.pif_is_vlan, [Ref.string_of tagged_PIF])); - (* Check the VLAN tag is sensible; 4095 is reserved for implementation use (802.1Q) *) - if tag<0L || tag>4094L then - raise (Api_errors.Server_error (Api_errors.vlan_tag_invalid, [Int64.to_string tag])); + (* Check the VLAN tag is sensible; 4095 is reserved for implementation use (802.1Q) *) + if tag<0L || tag>4094L then + raise (Api_errors.Server_error (Api_errors.vlan_tag_invalid, [Int64.to_string tag])); - let device = Db.PIF.get_device ~__context ~self:tagged_PIF in - let vlans = Db.VLAN.get_records_where ~__context - ~expr:(Db_filter_types.And (Db_filter_types.Eq (Db_filter_types.Field "tagged_PIF", Db_filter_types.Literal (Ref.string_of tagged_PIF)), - Db_filter_types.Eq (Db_filter_types.Field "tag", Db_filter_types.Literal (Int64.to_string tag)))) in - if vlans <> [] then - raise (Api_errors.Server_error (Api_errors.pif_vlan_exists, [device])); + let device = Db.PIF.get_device ~__context ~self:tagged_PIF in + let vlans = Db.VLAN.get_records_where ~__context + ~expr:(Db_filter_types.And (Db_filter_types.Eq (Db_filter_types.Field "tagged_PIF", Db_filter_types.Literal (Ref.string_of tagged_PIF)), + Db_filter_types.Eq (Db_filter_types.Field "tag", Db_filter_types.Literal (Int64.to_string tag)))) in + if vlans <> [] then + raise (Api_errors.Server_error (Api_errors.pif_vlan_exists, [device])); - if Db.PIF.get_tunnel_access_PIF_of ~__context ~self:tagged_PIF <> [] then - raise (Api_errors.Server_error (Api_errors.is_tunnel_access_pif, [Ref.string_of tagged_PIF])); + if Db.PIF.get_tunnel_access_PIF_of ~__context ~self:tagged_PIF <> [] then + raise (Api_errors.Server_error (Api_errors.is_tunnel_access_pif, [Ref.string_of tagged_PIF])); - (* Check the VLAN is not in use by the kernel *) - let open Network in - if Net.Interface.has_vlan (Context.string_of_task __context) ~name:device ~vlan:(Int64.to_int tag) then - raise (Api_errors.Server_error (Api_errors.vlan_in_use, [device; Int64.to_string tag])); + (* Check the VLAN is not in use by the kernel *) + let open Network in + if Net.Interface.has_vlan (Context.string_of_task __context) ~name:device ~vlan:(Int64.to_int tag) then + raise (Api_errors.Server_error (Api_errors.vlan_in_use, [device; Int64.to_string tag])); - let vlan, untagged_PIF = create_internal ~__context ~host ~tagged_PIF ~tag ~network ~device in - Xapi_pif.plug ~__context ~self:untagged_PIF; - vlan + let vlan, untagged_PIF = create_internal ~__context ~host ~tagged_PIF ~tag ~network ~device in + Xapi_pif.plug ~__context ~self:untagged_PIF; + vlan let destroy ~__context ~self = - debug "VLAN.destroy uuid = %s" (Db.VLAN.get_uuid ~__context ~self); - let untagged_PIF = Db.VLAN.get_untagged_PIF ~__context ~self in - (* Check if the untagged_PIF exists, if not we must be an orphaned record *) - if try ignore(Db.PIF.get_uuid ~__context ~self:untagged_PIF); false with _ -> true then begin - warn "VLAN's untagged PIF doesn't exist -- orphaned record?"; - Db.VLAN.destroy ~__context ~self - end else begin - debug "untagged PIF uuid = %s" (Db.PIF.get_uuid ~__context ~self:untagged_PIF); - (* Side-effect of this is to destroy any VLAN object *) - Xapi_pif.assert_not_in_bond ~__context ~self:untagged_PIF; - Xapi_pif.assert_not_slave_management_pif ~__context ~self:untagged_PIF; - Xapi_pif.assert_no_protection_enabled ~__context ~self:untagged_PIF; - - if Db.PIF.get_VLAN ~__context ~self:untagged_PIF < 0L then - raise (Api_errors.Server_error (Api_errors.pif_is_physical, [])); - (* Because of the precondition in create_VLAN, this will always be the only PIF - connecting this host to the network. Therefore it is safe to detach the network. *) - let network = Db.PIF.get_network ~__context ~self:untagged_PIF in - let bridge = Db.Network.get_bridge ~__context ~self:network in - - Xapi_pif.unplug ~__context ~self:untagged_PIF; - - Xapi_network.detach ~__context bridge; - - (try - let vlan = Db.PIF.get_VLAN_master_of ~__context ~self:untagged_PIF in - Db.VLAN.destroy ~__context ~self:vlan with _ -> ()); - Db.PIF.destroy ~__context ~self:untagged_PIF - end + debug "VLAN.destroy uuid = %s" (Db.VLAN.get_uuid ~__context ~self); + let untagged_PIF = Db.VLAN.get_untagged_PIF ~__context ~self in + (* Check if the untagged_PIF exists, if not we must be an orphaned record *) + if try ignore(Db.PIF.get_uuid ~__context ~self:untagged_PIF); false with _ -> true then begin + warn "VLAN's untagged PIF doesn't exist -- orphaned record?"; + Db.VLAN.destroy ~__context ~self + end else begin + debug "untagged PIF uuid = %s" (Db.PIF.get_uuid ~__context ~self:untagged_PIF); + (* Side-effect of this is to destroy any VLAN object *) + Xapi_pif.assert_not_in_bond ~__context ~self:untagged_PIF; + Xapi_pif.assert_not_slave_management_pif ~__context ~self:untagged_PIF; + Xapi_pif.assert_no_protection_enabled ~__context ~self:untagged_PIF; + + if Db.PIF.get_VLAN ~__context ~self:untagged_PIF < 0L then + raise (Api_errors.Server_error (Api_errors.pif_is_physical, [])); + (* Because of the precondition in create_VLAN, this will always be the only PIF + connecting this host to the network. Therefore it is safe to detach the network. *) + let network = Db.PIF.get_network ~__context ~self:untagged_PIF in + let bridge = Db.Network.get_bridge ~__context ~self:network in + + Xapi_pif.unplug ~__context ~self:untagged_PIF; + + Xapi_network.detach ~__context bridge; + + (try + let vlan = Db.PIF.get_VLAN_master_of ~__context ~self:untagged_PIF in + Db.VLAN.destroy ~__context ~self:vlan with _ -> ()); + Db.PIF.destroy ~__context ~self:untagged_PIF + end diff --git a/ocaml/xapi/xapi_vlan.mli b/ocaml/xapi/xapi_vlan.mli index 0f1d05d9476..dcb9fdcb945 100644 --- a/ocaml/xapi/xapi_vlan.mli +++ b/ocaml/xapi/xapi_vlan.mli @@ -13,21 +13,21 @@ *) (** Module that defines API functions for VLANs * @group Networking - *) +*) (** -Xapi also supports 802.1Q VLANs, which are used to separate network traffic by inserting a {i tag} in each packet, thereby creating multiple virtual networks. A tag is simply a number. -{ul -{- A VLAN has a {i VLAN} object in the datamodel, which is associated with a {i slave} PIF and a {i master} PIF.} -{- The VLAN slave, or tagged PIF, is used as the base of the VLAN. It can be any existing PIF (including bond masters).} -{- The VLAN master, or untagged PIF, is a higher-level PIF (comparable to a bond master) that is configured with a VLAN tag. Any traffic sent to the VLAN master (via its network) will be tagged.} -{- Both the VLAN slave as well as the master may be used directly. The "pluggedness" of the master and slave is independent: the master may be plugged while the slave is not and vice versa, and both may be plugged or unplugged at the same time.} -{- Multiple VLANs in a pool may share the same tag.} -} -Note: Plugging a VLAN master PIF on top of a VLAN slave that is unplugged, does not set [currently_attached] to [true], while the underlying network device and bridge of the slave {i is} brought up. In this case, [currently_attached] therefore does not always reflect the actual state of the network interface. Unplugging a VLAN slave that is up, while its master is also up, actually leaves the slave's bridge up. Should this be made more aligned/consistent? + Xapi also supports 802.1Q VLANs, which are used to separate network traffic by inserting a {i tag} in each packet, thereby creating multiple virtual networks. A tag is simply a number. + {ul + {- A VLAN has a {i VLAN} object in the datamodel, which is associated with a {i slave} PIF and a {i master} PIF.} + {- The VLAN slave, or tagged PIF, is used as the base of the VLAN. It can be any existing PIF (including bond masters).} + {- The VLAN master, or untagged PIF, is a higher-level PIF (comparable to a bond master) that is configured with a VLAN tag. Any traffic sent to the VLAN master (via its network) will be tagged.} + {- Both the VLAN slave as well as the master may be used directly. The "pluggedness" of the master and slave is independent: the master may be plugged while the slave is not and vice versa, and both may be plugged or unplugged at the same time.} + {- Multiple VLANs in a pool may share the same tag.} + } + Note: Plugging a VLAN master PIF on top of a VLAN slave that is unplugged, does not set [currently_attached] to [true], while the underlying network device and bridge of the slave {i is} brought up. In this case, [currently_attached] therefore does not always reflect the actual state of the network interface. Unplugging a VLAN slave that is up, while its master is also up, actually leaves the slave's bridge up. Should this be made more aligned/consistent? *) -(** Create a VLAN with the given [tag] using the [tagged_PIF] as VLAN slave. +(** Create a VLAN with the given [tag] using the [tagged_PIF] as VLAN slave. * Creates a new PIF object as VLAN master (untagged PIF) and connects it to the * given [network]. No other PIFs on the same host may be connected to this network. *) val create : diff --git a/ocaml/xapi/xapi_vm.ml b/ocaml/xapi/xapi_vm.ml index 832feada879..6bee653a28a 100644 --- a/ocaml/xapi/xapi_vm.ml +++ b/ocaml/xapi/xapi_vm.ml @@ -28,7 +28,7 @@ open Listext * Until we support pools properly VM.start and VM.start_on both try to boot/resume the VM on this host. * If VM.{start,resume}_on is supplied another host reference, they will fail. - *) +*) module D = Debug.Make(struct let name="xapi" end) open D @@ -38,83 +38,83 @@ exception InvalidOperation of string let assert_operation_valid = Xapi_vm_lifecycle.assert_operation_valid ~strict:true let update_allowed_operations ~__context ~self = - Helpers.log_exn_continue "updating allowed operations of VBDs/VIFs/VDIs in VM.update_allowed_operations" - (fun () -> - List.iter - (fun vbd -> - Xapi_vbd_helpers.update_allowed_operations ~__context ~self:vbd; - try - if not(Db.VBD.get_empty ~__context ~self:vbd) - then Xapi_vdi.update_allowed_operations ~__context ~self:(Db.VBD.get_VDI ~__context ~self:vbd) - with _ -> ()) (Db.VM.get_VBDs ~__context ~self); - List.iter - (fun vif -> - Xapi_vif_helpers.update_allowed_operations ~__context ~self:vif) - (Db.VM.get_VIFs ~__context ~self) - ) (); - Xapi_vm_lifecycle.update_allowed_operations ~__context ~self + Helpers.log_exn_continue "updating allowed operations of VBDs/VIFs/VDIs in VM.update_allowed_operations" + (fun () -> + List.iter + (fun vbd -> + Xapi_vbd_helpers.update_allowed_operations ~__context ~self:vbd; + try + if not(Db.VBD.get_empty ~__context ~self:vbd) + then Xapi_vdi.update_allowed_operations ~__context ~self:(Db.VBD.get_VDI ~__context ~self:vbd) + with _ -> ()) (Db.VM.get_VBDs ~__context ~self); + List.iter + (fun vif -> + Xapi_vif_helpers.update_allowed_operations ~__context ~self:vif) + (Db.VM.get_VIFs ~__context ~self) + ) (); + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self let assert_can_boot_here ~__context ~self ~host = - let snapshot = Db.VM.get_record ~__context ~self in - if Helpers.rolling_upgrade_in_progress ~__context then - Helpers.assert_platform_version_is_same_on_master ~__context ~host ~self; - assert_can_boot_here ~__context ~self ~host ~snapshot () + let snapshot = Db.VM.get_record ~__context ~self in + if Helpers.rolling_upgrade_in_progress ~__context then + Helpers.assert_platform_version_is_same_on_master ~__context ~host ~self; + assert_can_boot_here ~__context ~self ~host ~snapshot () let retrieve_wlb_recommendations ~__context ~vm = - let snapshot = Db.VM.get_record ~__context ~self:vm in - retrieve_wlb_recommendations ~__context ~vm ~snapshot + let snapshot = Db.VM.get_record ~__context ~self:vm in + retrieve_wlb_recommendations ~__context ~vm ~snapshot let assert_agile ~__context ~self = Agility.vm_assert_agile ~__context ~self (* helpers *) let immediate_complete ~__context = - Helpers.progress ~__context (0.0 -. 1.0) + Helpers.progress ~__context (0.0 -. 1.0) (* API *) let set_actions_after_shutdown ~__context ~self ~value = - Db.VM.set_actions_after_shutdown ~__context ~self ~value + Db.VM.set_actions_after_shutdown ~__context ~self ~value let set_actions_after_reboot ~__context ~self ~value = - Db.VM.set_actions_after_reboot ~__context ~self ~value + Db.VM.set_actions_after_reboot ~__context ~self ~value let set_actions_after_crash ~__context ~self ~value = - set_actions_after_crash ~__context ~self ~value + set_actions_after_crash ~__context ~self ~value let set_is_a_template ~__context ~self ~value = - set_is_a_template ~__context ~self ~value + set_is_a_template ~__context ~self ~value let validate_restart_priority priority = - if not(List.mem priority Constants.ha_valid_restart_priorities) then - raise (Api_errors.Server_error(Api_errors.invalid_value, ["ha_restart_priority"; priority])) + if not(List.mem priority Constants.ha_valid_restart_priorities) then + raise (Api_errors.Server_error(Api_errors.invalid_value, ["ha_restart_priority"; priority])) let set_ha_restart_priority ~__context ~self ~value = - validate_restart_priority value; - let current = Db.VM.get_ha_restart_priority ~__context ~self in - if true - && current <> Constants.ha_restart - && value = Constants.ha_restart then begin - if Db.VM.get_power_state ~__context ~self != `Halted then - Xapi_ha_vm_failover.assert_new_vm_preserves_ha_plan ~__context self; - let pool = Helpers.get_pool ~__context in - if Db.Pool.get_ha_enabled ~__context ~self:pool then - let (_: bool) = Xapi_ha_vm_failover.update_pool_status ~__context () in () - end; - - if current <> value then begin - Db.VM.set_ha_restart_priority ~__context ~self ~value; - (* If the VM is running then immediately turn on or off "protection" - for the VM by setting ha_always_run *) - if Db.VM.get_power_state ~__context ~self = `Running - then begin - Db.VM.set_ha_always_run ~__context ~self ~value:(value = Constants.ha_restart); - debug "Setting ha_always_run on vm=%s as %b during VM.set_ha_restart_priority" (Ref.string_of self) (value = Constants.ha_restart) - end; - end + validate_restart_priority value; + let current = Db.VM.get_ha_restart_priority ~__context ~self in + if true + && current <> Constants.ha_restart + && value = Constants.ha_restart then begin + if Db.VM.get_power_state ~__context ~self != `Halted then + Xapi_ha_vm_failover.assert_new_vm_preserves_ha_plan ~__context self; + let pool = Helpers.get_pool ~__context in + if Db.Pool.get_ha_enabled ~__context ~self:pool then + let (_: bool) = Xapi_ha_vm_failover.update_pool_status ~__context () in () + end; + + if current <> value then begin + Db.VM.set_ha_restart_priority ~__context ~self ~value; + (* If the VM is running then immediately turn on or off "protection" + for the VM by setting ha_always_run *) + if Db.VM.get_power_state ~__context ~self = `Running + then begin + Db.VM.set_ha_always_run ~__context ~self ~value:(value = Constants.ha_restart); + debug "Setting ha_always_run on vm=%s as %b during VM.set_ha_restart_priority" (Ref.string_of self) (value = Constants.ha_restart) + end; + end (* Field deprecated since Boston - attempt to degrade gracefully if anything sets it. *) let set_ha_always_run ~__context ~self ~value = - if value then - set_ha_restart_priority ~__context ~self ~value:Constants.ha_restart - else - set_ha_restart_priority ~__context ~self ~value:"" + if value then + set_ha_restart_priority ~__context ~self ~value:Constants.ha_restart + else + set_ha_restart_priority ~__context ~self ~value:"" let compute_memory_overhead = compute_memory_overhead @@ -124,42 +124,42 @@ open Xapi_vm_memory_constraints we have to modify the Xen command line in order to update dom0's memory allocation. *) let set_dom0_memory ~__context ~self ~bytes = - let arg = Printf.sprintf "dom0_mem=%LdB,max:%LdB" bytes bytes in - let args = ["--set-xen"; arg] in - try - let _ = Helpers.call_script !Xapi_globs.xen_cmdline_script args in - Xapi_host_helpers.Host_requires_reboot.set () - with - | e -> - error "Failed to update dom0 memory: %s" (Printexc.to_string e); - raise Api_errors.(Server_error (internal_error, ["Failed to update dom0 memory"])) + let arg = Printf.sprintf "dom0_mem=%LdB,max:%LdB" bytes bytes in + let args = ["--set-xen"; arg] in + try + let _ = Helpers.call_script !Xapi_globs.xen_cmdline_script args in + Xapi_host_helpers.Host_requires_reboot.set () + with + | e -> + error "Failed to update dom0 memory: %s" (Printexc.to_string e); + raise Api_errors.(Server_error (internal_error, ["Failed to update dom0 memory"])) let set_memory_static_range ~__context ~self ~min ~max = - (* For non-control domains, this function is only called on the master and - * for halted VMs. *) - let is_control_domain = Db.VM.get_is_control_domain ~__context ~self in - let power_state = Db.VM.get_power_state ~__context ~self in - if not is_control_domain && power_state <> `Halted - then failwith "assertion_failed: set_memory_static_range should only be \ - called when the VM is Halted"; - (* Check the range constraints *) - let constraints = Vm_memory_constraints.get ~__context ~vm_ref:self in - let constraints = {constraints with Vm_memory_constraints. - static_min = min; - static_max = max; - } in - Vm_memory_constraints.assert_valid_for_current_context - ~__context ~vm:self ~constraints; - Db.VM.set_memory_static_min ~__context ~self ~value:min; - Db.VM.set_memory_static_max ~__context ~self ~value:max; - update_memory_overhead ~__context ~vm:self; - if Helpers.is_domain_zero ~__context self then - set_dom0_memory ~__context ~self ~bytes:max; - (* It is allowed to update the memory settings of a running control domain, - * but it needs to be rebooted for the changes to take effect. We signal - * the client to do so. *) - if is_control_domain && power_state = `Running then - Db.VM.set_requires_reboot ~__context ~self ~value:true + (* For non-control domains, this function is only called on the master and + * for halted VMs. *) + let is_control_domain = Db.VM.get_is_control_domain ~__context ~self in + let power_state = Db.VM.get_power_state ~__context ~self in + if not is_control_domain && power_state <> `Halted + then failwith "assertion_failed: set_memory_static_range should only be \ + called when the VM is Halted"; + (* Check the range constraints *) + let constraints = Vm_memory_constraints.get ~__context ~vm_ref:self in + let constraints = {constraints with Vm_memory_constraints. + static_min = min; + static_max = max; + } in + Vm_memory_constraints.assert_valid_for_current_context + ~__context ~vm:self ~constraints; + Db.VM.set_memory_static_min ~__context ~self ~value:min; + Db.VM.set_memory_static_max ~__context ~self ~value:max; + update_memory_overhead ~__context ~vm:self; + if Helpers.is_domain_zero ~__context self then + set_dom0_memory ~__context ~self ~bytes:max; + (* It is allowed to update the memory settings of a running control domain, + * but it needs to be rebooted for the changes to take effect. We signal + * the client to do so. *) + if is_control_domain && power_state = `Running then + Db.VM.set_requires_reboot ~__context ~self ~value:true (* These are always converted into set_memory_dynamic_range *) (* by the message forwarding layer: *) @@ -171,55 +171,55 @@ let set_memory_static_min ~__context ~self ~value = assert false let set_memory_static_max ~__context ~self ~value = assert false let set_memory_limits ~__context ~self - ~static_min ~static_max ~dynamic_min ~dynamic_max = - (* For non-control domains, this function is only called on the master and - * for halted VMs. *) - let is_control_domain = Db.VM.get_is_control_domain ~__context ~self in - let power_state = Db.VM.get_power_state ~__context ~self in - if not is_control_domain && power_state <> `Halted - then failwith "assertion_failed: set_memory_limits should only be \ - called when the VM is Halted"; - (* Check that the new limits are in the correct order. *) - let constraints = {Vm_memory_constraints. - static_min = static_min; - dynamic_min = dynamic_min; - target = dynamic_min; - dynamic_max = dynamic_max; - static_max = static_max; - } in - Vm_memory_constraints.assert_valid_for_current_context - ~__context ~vm:self ~constraints; - Vm_memory_constraints.set ~__context ~vm_ref:self ~constraints; - update_memory_overhead ~__context ~vm:self; - if Helpers.is_domain_zero ~__context self then - set_dom0_memory ~__context ~self ~bytes:static_max; - (* It is allowed to update the memory settings of a running control domain, - * but it needs to be rebooted for the changes to take effect. We signal - * the client to do so. *) - if is_control_domain && power_state = `Running then - Db.VM.set_requires_reboot ~__context ~self ~value:true + ~static_min ~static_max ~dynamic_min ~dynamic_max = + (* For non-control domains, this function is only called on the master and + * for halted VMs. *) + let is_control_domain = Db.VM.get_is_control_domain ~__context ~self in + let power_state = Db.VM.get_power_state ~__context ~self in + if not is_control_domain && power_state <> `Halted + then failwith "assertion_failed: set_memory_limits should only be \ + called when the VM is Halted"; + (* Check that the new limits are in the correct order. *) + let constraints = {Vm_memory_constraints. + static_min = static_min; + dynamic_min = dynamic_min; + target = dynamic_min; + dynamic_max = dynamic_max; + static_max = static_max; + } in + Vm_memory_constraints.assert_valid_for_current_context + ~__context ~vm:self ~constraints; + Vm_memory_constraints.set ~__context ~vm_ref:self ~constraints; + update_memory_overhead ~__context ~vm:self; + if Helpers.is_domain_zero ~__context self then + set_dom0_memory ~__context ~self ~bytes:static_max; + (* It is allowed to update the memory settings of a running control domain, + * but it needs to be rebooted for the changes to take effect. We signal + * the client to do so. *) + if is_control_domain && power_state = `Running then + Db.VM.set_requires_reboot ~__context ~self ~value:true let set_memory ~__context ~self ~value = - set_memory_limits ~__context ~self - ~static_min:(Db.VM.get_memory_static_min ~__context ~self) - ~static_max:value ~dynamic_min:value ~dynamic_max:value + set_memory_limits ~__context ~self + ~static_min:(Db.VM.get_memory_static_min ~__context ~self) + ~static_max:value ~dynamic_min:value ~dynamic_max:value (* If HA is enabled on the Pool and the VM is marked as always_run then block the action *) let assert_not_ha_protected ~__context ~vm = - let pool = Helpers.get_pool ~__context in - let always_run = Db.VM.get_ha_always_run ~__context ~self:vm in - let priority = Db.VM.get_ha_restart_priority ~__context ~self:vm in - if Db.Pool.get_ha_enabled ~__context ~self:pool && (Helpers.vm_should_always_run always_run priority) - then raise (Api_errors.Server_error(Api_errors.vm_is_protected, [ Ref.string_of vm ])) + let pool = Helpers.get_pool ~__context in + let always_run = Db.VM.get_ha_always_run ~__context ~self:vm in + let priority = Db.VM.get_ha_restart_priority ~__context ~self:vm in + if Db.Pool.get_ha_enabled ~__context ~self:pool && (Helpers.vm_should_always_run always_run priority) + then raise (Api_errors.Server_error(Api_errors.vm_is_protected, [ Ref.string_of vm ])) let pause ~__context ~vm = - Xapi_xenops.pause ~__context ~self:vm + Xapi_xenops.pause ~__context ~self:vm let unpause ~__context ~vm = - Xapi_xenops.unpause ~__context ~self:vm + Xapi_xenops.unpause ~__context ~self:vm let set_xenstore_data ~__context ~self ~value = - Xapi_xenops.set_xenstore_data ~__context ~self value + Xapi_xenops.set_xenstore_data ~__context ~self value (* Note: it is important that we use the pool-internal API call, VM.atomic_set_resident_on, to set resident_on and clear scheduled_to_be_resident_on atomically. This prevents concurrent API calls on the master from accounting for the @@ -227,118 +227,118 @@ let set_xenstore_data ~__context ~self ~value = *) let start ~__context ~vm ~start_paused ~force = - let vmr = Db.VM.get_record ~__context ~self:vm in - if vmr.API.vM_ha_restart_priority = Constants.ha_restart - then begin - Db.VM.set_ha_always_run ~__context ~self:vm ~value:true; - debug "Setting ha_always_run on vm=%s as true during VM.start" (Ref.string_of vm) - end; - - (* Check to see if we're using any restricted platform kvs. This raises - an exception if so *) - Vm_platform.check_restricted_flags ~__context vmr.API.vM_platform; - - (* Clear out any VM guest metrics record. Guest metrics will be updated by - * the running VM and for now they might be wrong, especially network - * addresses inherited by a cloned VM. *) - let vm_gm = Db.VM.get_guest_metrics ~__context ~self:vm in - Db.VM.set_guest_metrics ~__context ~self:vm ~value:Ref.null; - (try Db.VM_guest_metrics.destroy ~__context ~self:vm_gm with _ -> ()); - - (* This makes sense here while the available versions are 0, 1 and 2. - * If/when we introduce another version, we must reassess this. *) - update_vm_virtual_hardware_platform_version ~__context ~vm; - - (* Reset CPU feature set, which will be passed to xenopsd *) - Cpuid_helpers.reset_cpu_flags ~__context ~vm; - - (* If the VM has any vGPUs, gpumon must remain stopped until the - * VM has started. *) - begin - match vmr.API.vM_VGPUs with - | [] -> Xapi_xenops.start ~__context ~self:vm start_paused force - | _ -> - Xapi_gpumon.with_gpumon_stopped ~timeout:!Xapi_globs.gpumon_stop_timeout - (fun () -> Xapi_xenops.start ~__context ~self:vm start_paused force) - end; - Xapi_vm_helpers.start_delay ~__context ~vm + let vmr = Db.VM.get_record ~__context ~self:vm in + if vmr.API.vM_ha_restart_priority = Constants.ha_restart + then begin + Db.VM.set_ha_always_run ~__context ~self:vm ~value:true; + debug "Setting ha_always_run on vm=%s as true during VM.start" (Ref.string_of vm) + end; + + (* Check to see if we're using any restricted platform kvs. This raises + an exception if so *) + Vm_platform.check_restricted_flags ~__context vmr.API.vM_platform; + + (* Clear out any VM guest metrics record. Guest metrics will be updated by + * the running VM and for now they might be wrong, especially network + * addresses inherited by a cloned VM. *) + let vm_gm = Db.VM.get_guest_metrics ~__context ~self:vm in + Db.VM.set_guest_metrics ~__context ~self:vm ~value:Ref.null; + (try Db.VM_guest_metrics.destroy ~__context ~self:vm_gm with _ -> ()); + + (* This makes sense here while the available versions are 0, 1 and 2. + * If/when we introduce another version, we must reassess this. *) + update_vm_virtual_hardware_platform_version ~__context ~vm; + + (* Reset CPU feature set, which will be passed to xenopsd *) + Cpuid_helpers.reset_cpu_flags ~__context ~vm; + + (* If the VM has any vGPUs, gpumon must remain stopped until the + * VM has started. *) + begin + match vmr.API.vM_VGPUs with + | [] -> Xapi_xenops.start ~__context ~self:vm start_paused force + | _ -> + Xapi_gpumon.with_gpumon_stopped ~timeout:!Xapi_globs.gpumon_stop_timeout + (fun () -> Xapi_xenops.start ~__context ~self:vm start_paused force) + end; + Xapi_vm_helpers.start_delay ~__context ~vm (** For VM.start_on and VM.resume_on the message forwarding layer should only forward here if 'host' = localhost *) let assert_host_is_localhost ~__context ~host = - let localhost = Helpers.get_localhost ~__context in - if host <> localhost then - let msg = "Error in message forwarding layer: host parameter was not localhost" in - raise (Api_errors.Server_error (Api_errors.internal_error, [ msg ])) + let localhost = Helpers.get_localhost ~__context in + if host <> localhost then + let msg = "Error in message forwarding layer: host parameter was not localhost" in + raise (Api_errors.Server_error (Api_errors.internal_error, [ msg ])) let start_on ~__context ~vm ~host ~start_paused ~force = - (* If we modify this to support start_on other-than-localhost, - insert a precheck to insure that we're starting on an - appropriately versioned host during an upgrade, as per - PR-1007. See the first lines of resume above *) - assert_host_is_localhost ~__context ~host; - start ~__context ~vm ~start_paused ~force + (* If we modify this to support start_on other-than-localhost, + insert a precheck to insure that we're starting on an + appropriately versioned host during an upgrade, as per + PR-1007. See the first lines of resume above *) + assert_host_is_localhost ~__context ~host; + start ~__context ~vm ~start_paused ~force let hard_shutdown ~__context ~vm = - Db.VM.set_ha_always_run ~__context ~self:vm ~value:false; - debug "Setting ha_always_run on vm=%s as false during VM.hard_shutdown" (Ref.string_of vm); - match Db.VM.get_power_state ~__context ~self:vm with - | `Suspended -> begin - debug "hard_shutdown: destroying any suspend VDI"; - let vdi = Db.VM.get_suspend_VDI ~__context ~self:vm in - if vdi <> Ref.null (* avoid spurious but scary messages *) - then Helpers.log_exn_continue - (Printf.sprintf "destroying suspend VDI: %s" (Ref.string_of vdi)) - (Helpers.call_api_functions ~__context) - (fun rpc session_id -> Client.VDI.destroy rpc session_id vdi); - (* Whether or not that worked, forget about the VDI *) - Db.VM.set_suspend_VDI ~__context ~self:vm ~value:Ref.null; - Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:`Halted; - end - | `Running - | `Paused -> - Xapi_xenops.shutdown ~__context ~self:vm None; - Xapi_vm_helpers.shutdown_delay ~__context ~vm - | `Halted -> () + Db.VM.set_ha_always_run ~__context ~self:vm ~value:false; + debug "Setting ha_always_run on vm=%s as false during VM.hard_shutdown" (Ref.string_of vm); + match Db.VM.get_power_state ~__context ~self:vm with + | `Suspended -> begin + debug "hard_shutdown: destroying any suspend VDI"; + let vdi = Db.VM.get_suspend_VDI ~__context ~self:vm in + if vdi <> Ref.null (* avoid spurious but scary messages *) + then Helpers.log_exn_continue + (Printf.sprintf "destroying suspend VDI: %s" (Ref.string_of vdi)) + (Helpers.call_api_functions ~__context) + (fun rpc session_id -> Client.VDI.destroy rpc session_id vdi); + (* Whether or not that worked, forget about the VDI *) + Db.VM.set_suspend_VDI ~__context ~self:vm ~value:Ref.null; + Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:`Halted; + end + | `Running + | `Paused -> + Xapi_xenops.shutdown ~__context ~self:vm None; + Xapi_vm_helpers.shutdown_delay ~__context ~vm + | `Halted -> () let hard_reboot ~__context ~vm = - (* Cancelling operations can cause the VM to now be shutdown *) - begin - match Db.VM.get_power_state ~__context ~self:vm with - | `Running - | `Paused -> - Xapi_xenops.reboot ~__context ~self:vm None - | `Halted -> - start ~__context ~vm ~start_paused:false ~force:false - | `Suspended -> - raise (Api_errors.Server_error (Api_errors.vm_bad_power_state, [Ref.string_of vm; Record_util.power_to_string `Running; Record_util.power_to_string `Suspended])) - end + (* Cancelling operations can cause the VM to now be shutdown *) + begin + match Db.VM.get_power_state ~__context ~self:vm with + | `Running + | `Paused -> + Xapi_xenops.reboot ~__context ~self:vm None + | `Halted -> + start ~__context ~vm ~start_paused:false ~force:false + | `Suspended -> + raise (Api_errors.Server_error (Api_errors.vm_bad_power_state, [Ref.string_of vm; Record_util.power_to_string `Running; Record_util.power_to_string `Suspended])) + end let clean_reboot ~__context ~vm = - update_vm_virtual_hardware_platform_version ~__context ~vm; - Xapi_xenops.reboot ~__context ~self:vm (Some !Xapi_globs.domain_shutdown_total_timeout) + update_vm_virtual_hardware_platform_version ~__context ~vm; + Xapi_xenops.reboot ~__context ~self:vm (Some !Xapi_globs.domain_shutdown_total_timeout) let clean_shutdown_with_timeout ~__context ~vm timeout = - Db.VM.set_ha_always_run ~__context ~self:vm ~value:false; - debug "Setting ha_always_run on vm=%s as false during VM.clean_shutdown" (Ref.string_of vm); - Xapi_xenops.shutdown ~__context ~self:vm (Some timeout); - Xapi_vm_helpers.shutdown_delay ~__context ~vm + Db.VM.set_ha_always_run ~__context ~self:vm ~value:false; + debug "Setting ha_always_run on vm=%s as false during VM.clean_shutdown" (Ref.string_of vm); + Xapi_xenops.shutdown ~__context ~self:vm (Some timeout); + Xapi_vm_helpers.shutdown_delay ~__context ~vm let clean_shutdown ~__context ~vm = - clean_shutdown_with_timeout ~__context ~vm !Xapi_globs.domain_shutdown_total_timeout + clean_shutdown_with_timeout ~__context ~vm !Xapi_globs.domain_shutdown_total_timeout let shutdown ~__context ~vm = - begin - try - let db_timeout = Db.VM.get_shutdown_delay ~__context ~self:vm in - clean_shutdown_with_timeout ~__context ~vm - (if db_timeout > 0L - then Int64.to_float db_timeout - else !Xapi_globs.domain_shutdown_total_timeout) - with e -> - warn "Failed to perform clean_shutdown on VM:%s due to exception %s. Now attempting hard_shutdown." (Ref.string_of vm) (Printexc.to_string e); - hard_shutdown ~__context ~vm - end + begin + try + let db_timeout = Db.VM.get_shutdown_delay ~__context ~self:vm in + clean_shutdown_with_timeout ~__context ~vm + (if db_timeout > 0L + then Int64.to_float db_timeout + else !Xapi_globs.domain_shutdown_total_timeout) + with e -> + warn "Failed to perform clean_shutdown on VM:%s due to exception %s. Now attempting hard_shutdown." (Ref.string_of vm) (Printexc.to_string e); + hard_shutdown ~__context ~vm + end (***************************************************************************************) @@ -348,359 +348,359 @@ let hard_reboot_internal ~__context ~vm = assert false (***************************************************************************************) let power_state_reset ~__context ~vm = - (* CA-31428: Block if the VM is a control domain *) - if Db.VM.get_is_control_domain ~__context ~self:vm then begin - error "VM.power_state_reset vm=%s blocked because VM is a control domain" (Ref.string_of vm); - raise (Api_errors.Server_error(Api_errors.cannot_reset_control_domain, [ Ref.string_of vm ])); - end; + (* CA-31428: Block if the VM is a control domain *) + if Db.VM.get_is_control_domain ~__context ~self:vm then begin + error "VM.power_state_reset vm=%s blocked because VM is a control domain" (Ref.string_of vm); + raise (Api_errors.Server_error(Api_errors.cannot_reset_control_domain, [ Ref.string_of vm ])); + end; (* Perform sanity checks if VM is Running or Paused since we don't want to lose track of running domains. *) - if Xapi_vm_lifecycle.is_live ~__context ~self:vm then begin - debug "VM.power_state_reset vm=%s power state is either running or paused: performing sanity checks" (Ref.string_of vm); - let localhost = Helpers.get_localhost ~__context in - let resident = Db.VM.get_resident_on ~__context ~self:vm in - if resident = localhost then begin - let open Xenops_interface in - let open Xapi_xenops_queue in - let module Client = (val make_client (queue_of_vm ~__context ~self:vm): XENOPS) in - let running = - try - let dbg = Context.string_of_task __context in - let id = Db.VM.get_uuid ~__context ~self:vm in - let _, s = Client.VM.stat dbg id in - if s.Vm.power_state = Running then begin - debug "VM.power_state_reset vm=%s xenopsd reports running;" (Ref.string_of vm); - true - end else begin - (* Delete the metadata from xenopsd *) - Xapi_xenops.Xenopsd_metadata.delete ~__context id; - false - end - with _ -> false in - if running then raise (Api_errors.Server_error(Api_errors.domain_exists, [ Ref.string_of vm ])) - end else begin - (* If resident on another host, check if that host is alive: if so - then refuse to perform the reset, since we have delegated state management - to this host and we trust it -- this call is intended for coping with - host failures and backup restores, not for working around agent bugs. - If the host agent software is malfunctioning, then it should be restarted - (via Host.restart_agent or 'service xapi restart') *) - debug "VM.power_state_reset vm=%s resident_on<>localhost; checking liveness of remote host" (Ref.string_of vm); - if Xapi_host.is_host_alive ~__context ~host:resident then begin - error "VM.power_state_reset vm=%s resident_on=%s; host is alive so refusing to reset power-state" - (Ref.string_of vm) (Ref.string_of resident); - raise (Api_errors.Server_error(Api_errors.host_is_live, [ Ref.string_of resident ])) - end - end - end; - - Xapi_vm_lifecycle.force_state_reset ~__context ~value:`Halted ~self:vm + if Xapi_vm_lifecycle.is_live ~__context ~self:vm then begin + debug "VM.power_state_reset vm=%s power state is either running or paused: performing sanity checks" (Ref.string_of vm); + let localhost = Helpers.get_localhost ~__context in + let resident = Db.VM.get_resident_on ~__context ~self:vm in + if resident = localhost then begin + let open Xenops_interface in + let open Xapi_xenops_queue in + let module Client = (val make_client (queue_of_vm ~__context ~self:vm): XENOPS) in + let running = + try + let dbg = Context.string_of_task __context in + let id = Db.VM.get_uuid ~__context ~self:vm in + let _, s = Client.VM.stat dbg id in + if s.Vm.power_state = Running then begin + debug "VM.power_state_reset vm=%s xenopsd reports running;" (Ref.string_of vm); + true + end else begin + (* Delete the metadata from xenopsd *) + Xapi_xenops.Xenopsd_metadata.delete ~__context id; + false + end + with _ -> false in + if running then raise (Api_errors.Server_error(Api_errors.domain_exists, [ Ref.string_of vm ])) + end else begin + (* If resident on another host, check if that host is alive: if so + then refuse to perform the reset, since we have delegated state management + to this host and we trust it -- this call is intended for coping with + host failures and backup restores, not for working around agent bugs. + If the host agent software is malfunctioning, then it should be restarted + (via Host.restart_agent or 'service xapi restart') *) + debug "VM.power_state_reset vm=%s resident_on<>localhost; checking liveness of remote host" (Ref.string_of vm); + if Xapi_host.is_host_alive ~__context ~host:resident then begin + error "VM.power_state_reset vm=%s resident_on=%s; host is alive so refusing to reset power-state" + (Ref.string_of vm) (Ref.string_of resident); + raise (Api_errors.Server_error(Api_errors.host_is_live, [ Ref.string_of resident ])) + end + end + end; + + Xapi_vm_lifecycle.force_state_reset ~__context ~value:`Halted ~self:vm let suspend ~__context ~vm = - Db.VM.set_ha_always_run ~__context ~self:vm ~value:false; - debug "Setting ha_always_run on vm=%s as false during VM.suspend" (Ref.string_of vm); - Xapi_xenops.suspend ~__context ~self:vm; - let vm_uuid = Db.VM.get_uuid ~__context ~self:vm in - log_and_ignore_exn (fun () -> Rrdd.archive_rrd ~vm_uuid ~remote_address:(try Some (Pool_role.get_master_address ()) with _ -> None)) - -let resume ~__context ~vm ~start_paused ~force = - if Db.VM.get_ha_restart_priority ~__context ~self:vm = Constants.ha_restart - then begin - Db.VM.set_ha_always_run ~__context ~self:vm ~value:true; - debug "Setting ha_always_run on vm=%s as true during VM.resume" (Ref.string_of vm) - end; - - let host = Helpers.get_localhost ~__context in - if not force then Cpuid_helpers.assert_vm_is_compatible ~__context ~vm ~host (); - - (* Update CPU feature set, which will be passed to xenopsd *) - Cpuid_helpers.update_cpu_flags ~__context ~vm ~host; - - Xapi_xenops.resume ~__context ~self:vm ~start_paused ~force + Db.VM.set_ha_always_run ~__context ~self:vm ~value:false; + debug "Setting ha_always_run on vm=%s as false during VM.suspend" (Ref.string_of vm); + Xapi_xenops.suspend ~__context ~self:vm; + let vm_uuid = Db.VM.get_uuid ~__context ~self:vm in + log_and_ignore_exn (fun () -> Rrdd.archive_rrd ~vm_uuid ~remote_address:(try Some (Pool_role.get_master_address ()) with _ -> None)) + +let resume ~__context ~vm ~start_paused ~force = + if Db.VM.get_ha_restart_priority ~__context ~self:vm = Constants.ha_restart + then begin + Db.VM.set_ha_always_run ~__context ~self:vm ~value:true; + debug "Setting ha_always_run on vm=%s as true during VM.resume" (Ref.string_of vm) + end; + + let host = Helpers.get_localhost ~__context in + if not force then Cpuid_helpers.assert_vm_is_compatible ~__context ~vm ~host (); + + (* Update CPU feature set, which will be passed to xenopsd *) + Cpuid_helpers.update_cpu_flags ~__context ~vm ~host; + + Xapi_xenops.resume ~__context ~self:vm ~start_paused ~force let resume_on ~__context ~vm ~host ~start_paused ~force = - (* If we modify this to support resume_on other-than-localhost, - insert a precheck to insure that we're starting on an - appropriately versioned host during an upgrade, as per - PR-1007. See the first lines of resume above *) - assert_host_is_localhost ~__context ~host; - resume ~__context ~vm ~start_paused ~force + (* If we modify this to support resume_on other-than-localhost, + insert a precheck to insure that we're starting on an + appropriately versioned host during an upgrade, as per + PR-1007. See the first lines of resume above *) + assert_host_is_localhost ~__context ~host; + resume ~__context ~vm ~start_paused ~force let create ~__context ~name_label ~name_description - ~user_version ~is_a_template - ~affinity - ~memory_target - ~memory_static_max - ~memory_dynamic_max - ~memory_dynamic_min - ~memory_static_min - ~vCPUs_params - ~vCPUs_max ~vCPUs_at_startup - ~actions_after_shutdown ~actions_after_reboot - ~actions_after_crash - ~pV_bootloader - ~pV_kernel ~pV_ramdisk ~pV_args ~pV_bootloader_args ~pV_legacy_args - ~hVM_boot_policy ~hVM_boot_params ~hVM_shadow_multiplier - ~platform - ~pCI_bus ~other_config ~recommendations ~xenstore_data - ~ha_always_run ~ha_restart_priority ~tags - ~blocked_operations ~protection_policy - ~is_snapshot_from_vmpp - ~appliance - ~start_delay - ~shutdown_delay - ~order - ~suspend_SR - ~version - ~generation_id - ~hardware_platform_version - ~has_vendor_device - : API.ref_VM = - - if has_vendor_device then - Pool_features.assert_enabled ~__context ~f:Features.PCI_device_for_auto_update; - (* Add random mac_seed if there isn't one specified already *) - let other_config = - let gen_mac_seed () = Uuid.to_string (Uuid.make_uuid ()) in - if not (List.mem_assoc Xapi_globs.mac_seed other_config) - then (Xapi_globs.mac_seed, gen_mac_seed ()) :: other_config - else other_config - in - (* NB apart from the above, parameter validation is delayed until VM.start *) - - let uuid = Uuid.make_uuid () in - let vm_ref = Ref.make () in - let resident_on = Ref.null in - let scheduled_to_be_resident_on = Ref.null in - - let metrics = Ref.make () and metrics_uuid = Uuid.to_string (Uuid.make_uuid ()) in - let vCPUs_utilisation = [(0L, 0.)] in - Db.VM_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid - ~memory_actual:0L ~vCPUs_number:0L - ~vCPUs_utilisation - ~vCPUs_CPU:[] - ~vCPUs_params:[] - ~vCPUs_flags:[] - ~state:[] - ~start_time:Date.never - ~install_time:Date.never - ~last_updated:Date.never - ~other_config:[] - ~hvm:false - ~nested_virt:false - ~nomigrate:false - ; - Db.VM.create ~__context ~ref:vm_ref ~uuid:(Uuid.to_string uuid) - ~power_state:(`Halted) ~allowed_operations:[] - ~current_operations:[] - ~blocked_operations:[] - ~name_label ~name_description - ~user_version ~is_a_template - ~transportable_snapshot_id:"" - ~is_a_snapshot:false ~snapshot_time:Date.never ~snapshot_of:Ref.null - ~parent:Ref.null - ~snapshot_info:[] ~snapshot_metadata:"" - ~resident_on ~scheduled_to_be_resident_on ~affinity - ~memory_overhead:0L - ~memory_static_max - ~memory_dynamic_max - ~memory_target - ~memory_dynamic_min - ~memory_static_min - ~vCPUs_params - ~vCPUs_at_startup ~vCPUs_max - ~actions_after_shutdown ~actions_after_reboot - ~actions_after_crash - ~hVM_boot_policy ~hVM_boot_params ~hVM_shadow_multiplier - ~suspend_VDI:Ref.null - ~platform - ~pV_kernel ~pV_ramdisk ~pV_args ~pV_bootloader ~pV_bootloader_args - ~pV_legacy_args - ~pCI_bus ~other_config ~domid:(-1L) ~domarch:"" - ~last_boot_CPU_flags:[] - ~is_control_domain:false - ~metrics ~guest_metrics:Ref.null - ~last_booted_record:"" ~xenstore_data ~recommendations - ~blobs:[] - ~ha_restart_priority - ~ha_always_run ~tags - ~bios_strings:[] - ~protection_policy:Ref.null - ~is_snapshot_from_vmpp:false - ~appliance - ~start_delay - ~shutdown_delay - ~order - ~suspend_SR - ~version - ~generation_id - ~hardware_platform_version - ~has_vendor_device - ~requires_reboot:false - ; - Db.VM.set_power_state ~__context ~self:vm_ref ~value:`Halted; - Xapi_vm_lifecycle.update_allowed_operations ~__context ~self:vm_ref; - update_memory_overhead ~__context ~vm:vm_ref; - update_vm_virtual_hardware_platform_version ~__context ~vm:vm_ref; - vm_ref + ~user_version ~is_a_template + ~affinity + ~memory_target + ~memory_static_max + ~memory_dynamic_max + ~memory_dynamic_min + ~memory_static_min + ~vCPUs_params + ~vCPUs_max ~vCPUs_at_startup + ~actions_after_shutdown ~actions_after_reboot + ~actions_after_crash + ~pV_bootloader + ~pV_kernel ~pV_ramdisk ~pV_args ~pV_bootloader_args ~pV_legacy_args + ~hVM_boot_policy ~hVM_boot_params ~hVM_shadow_multiplier + ~platform + ~pCI_bus ~other_config ~recommendations ~xenstore_data + ~ha_always_run ~ha_restart_priority ~tags + ~blocked_operations ~protection_policy + ~is_snapshot_from_vmpp + ~appliance + ~start_delay + ~shutdown_delay + ~order + ~suspend_SR + ~version + ~generation_id + ~hardware_platform_version + ~has_vendor_device + : API.ref_VM = + + if has_vendor_device then + Pool_features.assert_enabled ~__context ~f:Features.PCI_device_for_auto_update; + (* Add random mac_seed if there isn't one specified already *) + let other_config = + let gen_mac_seed () = Uuid.to_string (Uuid.make_uuid ()) in + if not (List.mem_assoc Xapi_globs.mac_seed other_config) + then (Xapi_globs.mac_seed, gen_mac_seed ()) :: other_config + else other_config + in + (* NB apart from the above, parameter validation is delayed until VM.start *) + + let uuid = Uuid.make_uuid () in + let vm_ref = Ref.make () in + let resident_on = Ref.null in + let scheduled_to_be_resident_on = Ref.null in + + let metrics = Ref.make () and metrics_uuid = Uuid.to_string (Uuid.make_uuid ()) in + let vCPUs_utilisation = [(0L, 0.)] in + Db.VM_metrics.create ~__context ~ref:metrics ~uuid:metrics_uuid + ~memory_actual:0L ~vCPUs_number:0L + ~vCPUs_utilisation + ~vCPUs_CPU:[] + ~vCPUs_params:[] + ~vCPUs_flags:[] + ~state:[] + ~start_time:Date.never + ~install_time:Date.never + ~last_updated:Date.never + ~other_config:[] + ~hvm:false + ~nested_virt:false + ~nomigrate:false + ; + Db.VM.create ~__context ~ref:vm_ref ~uuid:(Uuid.to_string uuid) + ~power_state:(`Halted) ~allowed_operations:[] + ~current_operations:[] + ~blocked_operations:[] + ~name_label ~name_description + ~user_version ~is_a_template + ~transportable_snapshot_id:"" + ~is_a_snapshot:false ~snapshot_time:Date.never ~snapshot_of:Ref.null + ~parent:Ref.null + ~snapshot_info:[] ~snapshot_metadata:"" + ~resident_on ~scheduled_to_be_resident_on ~affinity + ~memory_overhead:0L + ~memory_static_max + ~memory_dynamic_max + ~memory_target + ~memory_dynamic_min + ~memory_static_min + ~vCPUs_params + ~vCPUs_at_startup ~vCPUs_max + ~actions_after_shutdown ~actions_after_reboot + ~actions_after_crash + ~hVM_boot_policy ~hVM_boot_params ~hVM_shadow_multiplier + ~suspend_VDI:Ref.null + ~platform + ~pV_kernel ~pV_ramdisk ~pV_args ~pV_bootloader ~pV_bootloader_args + ~pV_legacy_args + ~pCI_bus ~other_config ~domid:(-1L) ~domarch:"" + ~last_boot_CPU_flags:[] + ~is_control_domain:false + ~metrics ~guest_metrics:Ref.null + ~last_booted_record:"" ~xenstore_data ~recommendations + ~blobs:[] + ~ha_restart_priority + ~ha_always_run ~tags + ~bios_strings:[] + ~protection_policy:Ref.null + ~is_snapshot_from_vmpp:false + ~appliance + ~start_delay + ~shutdown_delay + ~order + ~suspend_SR + ~version + ~generation_id + ~hardware_platform_version + ~has_vendor_device + ~requires_reboot:false + ; + Db.VM.set_power_state ~__context ~self:vm_ref ~value:`Halted; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self:vm_ref; + update_memory_overhead ~__context ~vm:vm_ref; + update_vm_virtual_hardware_platform_version ~__context ~vm:vm_ref; + vm_ref let destroy ~__context ~self = - let parent = Db.VM.get_parent ~__context ~self in + let parent = Db.VM.get_parent ~__context ~self in - (* rebase the children *) - List.iter - (fun child -> try Db.VM.set_parent ~__context ~self:child ~value:parent with _ -> ()) - (Db.VM.get_children ~__context ~self); + (* rebase the children *) + List.iter + (fun child -> try Db.VM.set_parent ~__context ~self:child ~value:parent with _ -> ()) + (Db.VM.get_children ~__context ~self); - log_and_ignore_exn (fun () -> Rrdd.remove_rrd ~uuid:(Db.VM.get_uuid ~__context ~self)); - destroy ~__context ~self + log_and_ignore_exn (fun () -> Rrdd.remove_rrd ~uuid:(Db.VM.get_uuid ~__context ~self)); + destroy ~__context ~self (* Note: we don't need to call lock_vm around clone or copy. The lock_vm just takes the local lock on a specific pool host and is used to manage contention between API threads and the event monitoring thread on live VMs. Since clone does not deal with live VMs we ommit lock_vm. *) let clone ~__context ~vm ~new_name = - TaskHelper.set_cancellable ~__context; - (* !!! Note - please do not be tempted to put this on the "long_running_queue", even though it may be long - running.. XenRT relies on fast clones being parallelizable wrt other long-running ops such as - suspend/resume/migrate etc. *) - (* Now that clones are "fast", there's no need to put this operation in the "normal_vm_queue". Indeed, - putting it in there would mean that clones are serialized on a host-basis whereas they may be able - to proceed in parallel. *) - let new_vm = Xapi_vm_clone.clone Xapi_vm_clone.Disk_op_clone ~__context ~vm ~new_name in - if Db.VM.get_is_a_snapshot ~__context ~self:vm && Db.VM.get_power_state ~__context ~self:new_vm <> `Halted then - hard_shutdown ~__context ~vm:new_vm; - new_vm + TaskHelper.set_cancellable ~__context; + (* !!! Note - please do not be tempted to put this on the "long_running_queue", even though it may be long + running.. XenRT relies on fast clones being parallelizable wrt other long-running ops such as + suspend/resume/migrate etc. *) + (* Now that clones are "fast", there's no need to put this operation in the "normal_vm_queue". Indeed, + putting it in there would mean that clones are serialized on a host-basis whereas they may be able + to proceed in parallel. *) + let new_vm = Xapi_vm_clone.clone Xapi_vm_clone.Disk_op_clone ~__context ~vm ~new_name in + if Db.VM.get_is_a_snapshot ~__context ~self:vm && Db.VM.get_power_state ~__context ~self:new_vm <> `Halted then + hard_shutdown ~__context ~vm:new_vm; + new_vm (* We do call wait_in_line for snapshot and snapshot_with_quiesce because the locks are taken at *) (* the VBD level (with pause/unpause mechanism *) let snapshot ~__context ~vm ~new_name = - TaskHelper.set_cancellable ~__context; - Xapi_vm_snapshot.snapshot ~__context ~vm ~new_name + TaskHelper.set_cancellable ~__context; + Xapi_vm_snapshot.snapshot ~__context ~vm ~new_name (* Snapshot_with_quiesce triggers the VSS plugin which will then calls the VM.snapshot API call. *) (* Thus, to avoid dead-locks, do not put snapshot and snapshot_with_quiesce on the same waiting line *) let snapshot_with_quiesce ~__context ~vm ~new_name = - Pool_features.assert_enabled ~__context ~f:Features.VSS; - TaskHelper.set_cancellable ~__context; - Xapi_vm_snapshot.snapshot_with_quiesce ~__context ~vm ~new_name + Pool_features.assert_enabled ~__context ~f:Features.VSS; + TaskHelper.set_cancellable ~__context; + Xapi_vm_snapshot.snapshot_with_quiesce ~__context ~vm ~new_name (* As we will destroy the domain ourself, we grab the vm_lock here in order to tell the event thread to *) (* do not look at this domain. The message forwarding layer already checked that the VM reference we *) (* revert too is still valid. *) let revert ~__context ~snapshot = - let vm = Db.VM.get_snapshot_of ~__context ~self:snapshot in - let vm = - if Db.is_valid_ref __context vm - then vm - else Xapi_vm_snapshot.create_vm_from_snapshot ~__context ~snapshot in - ignore (Xapi_vm_helpers.vm_fresh_genid ~__context ~self:vm); - Xapi_vm_snapshot.revert ~__context ~snapshot ~vm + let vm = Db.VM.get_snapshot_of ~__context ~self:snapshot in + let vm = + if Db.is_valid_ref __context vm + then vm + else Xapi_vm_snapshot.create_vm_from_snapshot ~__context ~snapshot in + ignore (Xapi_vm_helpers.vm_fresh_genid ~__context ~self:vm); + Xapi_vm_snapshot.revert ~__context ~snapshot ~vm (* As the checkpoint operation modify the domain state, we take the vm_lock to do not let the event *) (* thread mess around with that. *) let checkpoint ~__context ~vm ~new_name = - Pool_features.assert_enabled ~__context ~f:Features.Checkpoint; - Local_work_queue.wait_in_line Local_work_queue.long_running_queue - (Printf.sprintf "VM.checkpoint %s" (Context.string_of_task __context)) - (fun () -> - TaskHelper.set_cancellable ~__context; - Xapi_vm_snapshot.checkpoint ~__context ~vm ~new_name - ) + Pool_features.assert_enabled ~__context ~f:Features.Checkpoint; + Local_work_queue.wait_in_line Local_work_queue.long_running_queue + (Printf.sprintf "VM.checkpoint %s" (Context.string_of_task __context)) + (fun () -> + TaskHelper.set_cancellable ~__context; + Xapi_vm_snapshot.checkpoint ~__context ~vm ~new_name + ) let copy ~__context ~vm ~new_name ~sr = - (* See if the supplied SR is suitable: it must exist and be a non-ISO SR *) - (* First the existence check. It's not an error to not exist at all. *) - let sr = try ignore(Db.SR.get_uuid ~__context ~self:sr); Some sr with _ -> None in - maybe (fun sr -> debug "Copying disks to SR: %s" (Db.SR.get_uuid ~__context ~self:sr)) sr; - (* Second the non-iso check. It is an error to be an iso SR *) - maybe (fun sr -> - if Db.SR.get_content_type ~__context ~self:sr = "iso" - then raise (Api_errors.Server_error(Api_errors.operation_not_allowed, - [ "Cannot copy a VM's disks to an ISO SR" ]))) sr; - let new_vm = Xapi_vm_clone.clone (Xapi_vm_clone.Disk_op_copy sr) ~__context ~vm ~new_name in - if Db.VM.get_is_a_snapshot ~__context ~self:vm && Db.VM.get_power_state ~__context ~self:new_vm <> `Halted then - Helpers.call_api_functions ~__context - (fun rpc session_id -> Client.VM.hard_shutdown ~rpc ~session_id ~vm:new_vm); - new_vm + (* See if the supplied SR is suitable: it must exist and be a non-ISO SR *) + (* First the existence check. It's not an error to not exist at all. *) + let sr = try ignore(Db.SR.get_uuid ~__context ~self:sr); Some sr with _ -> None in + maybe (fun sr -> debug "Copying disks to SR: %s" (Db.SR.get_uuid ~__context ~self:sr)) sr; + (* Second the non-iso check. It is an error to be an iso SR *) + maybe (fun sr -> + if Db.SR.get_content_type ~__context ~self:sr = "iso" + then raise (Api_errors.Server_error(Api_errors.operation_not_allowed, + [ "Cannot copy a VM's disks to an ISO SR" ]))) sr; + let new_vm = Xapi_vm_clone.clone (Xapi_vm_clone.Disk_op_copy sr) ~__context ~vm ~new_name in + if Db.VM.get_is_a_snapshot ~__context ~self:vm && Db.VM.get_power_state ~__context ~self:new_vm <> `Halted then + Helpers.call_api_functions ~__context + (fun rpc session_id -> Client.VM.hard_shutdown ~rpc ~session_id ~vm:new_vm); + new_vm let provision ~__context ~vm = - (* This bit could be done in the guest: *) - debug "start: checking to see whether VM needs 'installing'"; - Helpers.call_api_functions ~__context (fun rpc session_id -> - set_is_a_template ~__context ~self:vm ~value:false; - if Xapi_templates.needs_to_be_installed rpc session_id vm - then begin - TaskHelper.set_progress ~__context 0.1; - debug "install: phase 1/3: creating VBDs and VDIs"; - let script, vbds = Xapi_templates.pre_install rpc session_id vm in - (* If an error occurs after this then delete the created VDIs, VBDs... *) - begin - try - debug "install: phase 2/3: running optional script (in domain 0)"; - let dom0 = Helpers.get_domain_zero __context in - Xapi_templates_install.post_install_script rpc session_id __context dom0 vm (script, vbds); - debug "install: phase 3/3: removing install information from VM"; - Xapi_templates.post_install rpc session_id vm; - debug "finished install"; - with e -> - (* On error delete the VBDs and their associated VDIs *) - let vdis = List.map (fun self -> Client.VBD.get_VDI rpc session_id self) vbds in - List.iter (Helpers.log_exn_continue "deleting auto-provisioned VBD" - (fun self -> Client.VBD.destroy rpc session_id self)) vbds; - List.iter (Helpers.log_exn_continue "deleting auto-provisioned VDI" - (fun self -> Client.VDI.destroy rpc session_id self)) vdis; - raise e - end - end) + (* This bit could be done in the guest: *) + debug "start: checking to see whether VM needs 'installing'"; + Helpers.call_api_functions ~__context (fun rpc session_id -> + set_is_a_template ~__context ~self:vm ~value:false; + if Xapi_templates.needs_to_be_installed rpc session_id vm + then begin + TaskHelper.set_progress ~__context 0.1; + debug "install: phase 1/3: creating VBDs and VDIs"; + let script, vbds = Xapi_templates.pre_install rpc session_id vm in + (* If an error occurs after this then delete the created VDIs, VBDs... *) + begin + try + debug "install: phase 2/3: running optional script (in domain 0)"; + let dom0 = Helpers.get_domain_zero __context in + Xapi_templates_install.post_install_script rpc session_id __context dom0 vm (script, vbds); + debug "install: phase 3/3: removing install information from VM"; + Xapi_templates.post_install rpc session_id vm; + debug "finished install"; + with e -> + (* On error delete the VBDs and their associated VDIs *) + let vdis = List.map (fun self -> Client.VBD.get_VDI rpc session_id self) vbds in + List.iter (Helpers.log_exn_continue "deleting auto-provisioned VBD" + (fun self -> Client.VBD.destroy rpc session_id self)) vbds; + List.iter (Helpers.log_exn_continue "deleting auto-provisioned VDI" + (fun self -> Client.VDI.destroy rpc session_id self)) vdis; + raise e + end + end) (** Sets the maximum number of VCPUs for a {b Halted} guest - or a running guest that is a control domain other than dom0 *) + or a running guest that is a control domain other than dom0 *) let set_VCPUs_max ~__context ~self ~value = - if Helpers.is_domain_zero ~__context self then - failwith "set_VCPUs_max is not allowed on dom0"; - let is_control_domain = Db.VM.get_is_control_domain ~__context ~self in - let power_state = Db.VM.get_power_state ~__context ~self in - (* allowed power states for this operation have already been checked, - but let's be defensive *) - if (not is_control_domain) && (power_state <> `Halted) - then failwith "assertion_failed: set_VCPUs_max should only be \ - called when the VM is Halted"; - let vcpus_at_startup = Db.VM.get_VCPUs_at_startup ~__context ~self in - if value < 1L || value < vcpus_at_startup then invalid_value - "VCPU values must satisfy: 0 < VCPUs_at_startup ≤ VCPUs_max" - (Int64.to_string value); - Db.VM.set_VCPUs_max ~__context ~self ~value; - update_memory_overhead ~__context ~vm:self; - if is_control_domain && power_state = `Running then - Db.VM.set_requires_reboot ~__context ~self ~value:true + if Helpers.is_domain_zero ~__context self then + failwith "set_VCPUs_max is not allowed on dom0"; + let is_control_domain = Db.VM.get_is_control_domain ~__context ~self in + let power_state = Db.VM.get_power_state ~__context ~self in + (* allowed power states for this operation have already been checked, + but let's be defensive *) + if (not is_control_domain) && (power_state <> `Halted) + then failwith "assertion_failed: set_VCPUs_max should only be \ + called when the VM is Halted"; + let vcpus_at_startup = Db.VM.get_VCPUs_at_startup ~__context ~self in + if value < 1L || value < vcpus_at_startup then invalid_value + "VCPU values must satisfy: 0 < VCPUs_at_startup ≤ VCPUs_max" + (Int64.to_string value); + Db.VM.set_VCPUs_max ~__context ~self ~value; + update_memory_overhead ~__context ~vm:self; + if is_control_domain && power_state = `Running then + Db.VM.set_requires_reboot ~__context ~self ~value:true (** Sets the number of startup VCPUs for a {b Halted} guest - including control domains other than dom0. *) + including control domains other than dom0. *) let set_VCPUs_at_startup ~__context ~self ~value = - if Helpers.is_domain_zero ~__context self then - raise (Api_errors.Server_error (Api_errors.operation_not_allowed, - ["set_VCPUs_at_startup is not allowed on dom0"])); - let vcpus_max = Db.VM.get_VCPUs_max ~__context ~self in - if value < 1L || value > vcpus_max then invalid_value - "VCPU values must satisfy: 0 < VCPUs_at_startup ≤ VCPUs_max" - (Int64.to_string value); - Db.VM.set_VCPUs_at_startup ~__context ~self ~value; - update_memory_overhead ~__context ~vm:self + if Helpers.is_domain_zero ~__context self then + raise (Api_errors.Server_error (Api_errors.operation_not_allowed, + ["set_VCPUs_at_startup is not allowed on dom0"])); + let vcpus_max = Db.VM.get_VCPUs_max ~__context ~self in + if value < 1L || value > vcpus_max then invalid_value + "VCPU values must satisfy: 0 < VCPUs_at_startup ≤ VCPUs_max" + (Int64.to_string value); + Db.VM.set_VCPUs_at_startup ~__context ~self ~value; + update_memory_overhead ~__context ~vm:self (** Sets the number of VCPUs for a {b Running} PV guest. -@raise Api_errors.operation_not_allowed if [self] is an HVM guest. *) + @raise Api_errors.operation_not_allowed if [self] is an HVM guest. *) let set_VCPUs_number_live ~__context ~self ~nvcpu = - Xapi_xenops.set_vcpus ~__context ~self nvcpu; - (* Strictly speaking, PV guest memory overhead depends on the number of *) - (* vCPUs. Although our current overhead calculation uses a conservative *) - (* overestimate that ignores the real number of VCPUs, we still update *) - (* the overhead in case our level of conservativeness changes in future. *) - update_memory_overhead ~__context ~vm:self + Xapi_xenops.set_vcpus ~__context ~self nvcpu; + (* Strictly speaking, PV guest memory overhead depends on the number of *) + (* vCPUs. Although our current overhead calculation uses a conservative *) + (* overestimate that ignores the real number of VCPUs, we still update *) + (* the overhead in case our level of conservativeness changes in future. *) + update_memory_overhead ~__context ~vm:self let add_to_VCPUs_params_live ~__context ~self ~key ~value = - raise (Api_errors.Server_error (Api_errors.not_implemented, [ "add_to_VCPUs_params_live" ])) + raise (Api_errors.Server_error (Api_errors.not_implemented, [ "add_to_VCPUs_params_live" ])) (* Use set_memory_dynamic_range instead *) let set_memory_target_live ~__context ~self ~target = () @@ -713,7 +713,7 @@ let wait_memory_target_tolerance_bytes = Int64.(mul 1L (mul 1024L 1024L)) (** Returns true if (and only if) the *) (** specified argument is a power of 2. *) let is_power_of_2 n = - (n > 1) && (n land (0 - n) = n) + (n > 1) && (n land (0 - n) = n) (** Waits for a running VM to reach its current memory target. *) (** This function waits until the following condition is true: *) @@ -724,132 +724,132 @@ let is_power_of_2 n = (** if the time-out counter exceeds its limit, this function *) (** raises a server error and terminates. *) let wait_memory_target_live ~__context ~self = - let timeout_seconds = int_of_float !Xapi_globs.wait_memory_target_timeout in - let tolerance_bytes = wait_memory_target_tolerance_bytes in - let raise_error error = - raise (Api_errors.Server_error (error, [Ref.string_of (Context.get_task_id __context)])) in - let open Xapi_xenops_queue in - let module Client = (val make_client (queue_of_vm ~__context ~self): XENOPS) in - let id = Xapi_xenops.id_of_vm ~__context ~self in - let dbg = Context.string_of_task __context in - let rec wait accumulated_wait_time_seconds = - if accumulated_wait_time_seconds > timeout_seconds - then raise_error Api_errors.vm_memory_target_wait_timeout; - if TaskHelper.is_cancelling ~__context - then raise_error Api_errors.task_cancelled; - - (* Fetch up-to-date value of memory_actual and memory_target *) - let _, s = Client.VM.stat dbg id in - let memory_target_bytes = s.Xenops_interface.Vm.memory_target in - let memory_actual_bytes = s.Xenops_interface.Vm.memory_actual in - - let difference_bytes = Int64.abs (Int64.sub memory_actual_bytes memory_target_bytes) in - debug "memory_actual = %Ld; memory_target = %Ld; difference = %Ld %s tolerance (%Ld)" memory_actual_bytes memory_target_bytes difference_bytes (if difference_bytes <= tolerance_bytes then "<=" else ">") tolerance_bytes; - if difference_bytes <= tolerance_bytes then - (* The memory target has been reached: use the most *) - (* recent value of memory_actual to update the same *) - (* field within the VM's metrics record, presenting *) - (* a consistent view to the world. *) - let vm_metrics_ref = Db.VM.get_metrics ~__context ~self in - Db.VM_metrics.set_memory_actual ~__context ~self:vm_metrics_ref ~value:memory_actual_bytes - else begin - (* At exponentially increasing intervals, write *) - (* a debug message saying how long we've waited: *) - if is_power_of_2 accumulated_wait_time_seconds then debug - "Waited %i second(s) for VM %s to reach \ - its target = %Ld bytes; actual = %Ld bytes." - accumulated_wait_time_seconds id - memory_target_bytes memory_actual_bytes; - (* The memory target has not yet been reached: *) - (* wait for a while before repeating the test. *) - Thread.delay 1.0; - wait (accumulated_wait_time_seconds + 1) - end - in - wait 0 + let timeout_seconds = int_of_float !Xapi_globs.wait_memory_target_timeout in + let tolerance_bytes = wait_memory_target_tolerance_bytes in + let raise_error error = + raise (Api_errors.Server_error (error, [Ref.string_of (Context.get_task_id __context)])) in + let open Xapi_xenops_queue in + let module Client = (val make_client (queue_of_vm ~__context ~self): XENOPS) in + let id = Xapi_xenops.id_of_vm ~__context ~self in + let dbg = Context.string_of_task __context in + let rec wait accumulated_wait_time_seconds = + if accumulated_wait_time_seconds > timeout_seconds + then raise_error Api_errors.vm_memory_target_wait_timeout; + if TaskHelper.is_cancelling ~__context + then raise_error Api_errors.task_cancelled; + + (* Fetch up-to-date value of memory_actual and memory_target *) + let _, s = Client.VM.stat dbg id in + let memory_target_bytes = s.Xenops_interface.Vm.memory_target in + let memory_actual_bytes = s.Xenops_interface.Vm.memory_actual in + + let difference_bytes = Int64.abs (Int64.sub memory_actual_bytes memory_target_bytes) in + debug "memory_actual = %Ld; memory_target = %Ld; difference = %Ld %s tolerance (%Ld)" memory_actual_bytes memory_target_bytes difference_bytes (if difference_bytes <= tolerance_bytes then "<=" else ">") tolerance_bytes; + if difference_bytes <= tolerance_bytes then + (* The memory target has been reached: use the most *) + (* recent value of memory_actual to update the same *) + (* field within the VM's metrics record, presenting *) + (* a consistent view to the world. *) + let vm_metrics_ref = Db.VM.get_metrics ~__context ~self in + Db.VM_metrics.set_memory_actual ~__context ~self:vm_metrics_ref ~value:memory_actual_bytes + else begin + (* At exponentially increasing intervals, write *) + (* a debug message saying how long we've waited: *) + if is_power_of_2 accumulated_wait_time_seconds then debug + "Waited %i second(s) for VM %s to reach \ + its target = %Ld bytes; actual = %Ld bytes." + accumulated_wait_time_seconds id + memory_target_bytes memory_actual_bytes; + (* The memory target has not yet been reached: *) + (* wait for a while before repeating the test. *) + Thread.delay 1.0; + wait (accumulated_wait_time_seconds + 1) + end + in + wait 0 (* Dummy implementation for a deprecated API method. *) let get_cooperative ~__context ~self = true let set_HVM_shadow_multiplier ~__context ~self ~value = - set_HVM_shadow_multiplier ~__context ~self ~value + set_HVM_shadow_multiplier ~__context ~self ~value (** Sets the HVM shadow multiplier for a {b Running} VM. Runs on the slave. *) let set_shadow_multiplier_live ~__context ~self ~multiplier = - Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Running; - - validate_HVM_shadow_multiplier multiplier; - - Xapi_xenops.set_shadow_multiplier ~__context ~self multiplier; - update_memory_overhead ~__context ~vm:self - -let set_memory_dynamic_range ~__context ~self ~min ~max = - (* NB called in either `Halted or `Running states *) - let power_state = Db.VM.get_power_state ~__context ~self in - (* Check the range constraints *) - let constraints = - if power_state = `Running - then Vm_memory_constraints.get_live ~__context ~vm_ref:self - else Vm_memory_constraints.get ~__context ~vm_ref:self in - let constraints = { constraints with Vm_memory_constraints. - dynamic_min = min; - target = min; - dynamic_max = max } in - Vm_memory_constraints.assert_valid_for_current_context - ~__context ~vm:self ~constraints; - - (* memory_target is now unused but setting it equal *) - (* to dynamic_min avoids tripping validation code. *) - Db.VM.set_memory_target ~__context ~self ~value:min; - Db.VM.set_memory_dynamic_min ~__context ~self ~value:min; - Db.VM.set_memory_dynamic_max ~__context ~self ~value:max; - - if power_state = `Running - then Xapi_xenops.set_memory_dynamic_range ~__context ~self min max + Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Running; + + validate_HVM_shadow_multiplier multiplier; + + Xapi_xenops.set_shadow_multiplier ~__context ~self multiplier; + update_memory_overhead ~__context ~vm:self + +let set_memory_dynamic_range ~__context ~self ~min ~max = + (* NB called in either `Halted or `Running states *) + let power_state = Db.VM.get_power_state ~__context ~self in + (* Check the range constraints *) + let constraints = + if power_state = `Running + then Vm_memory_constraints.get_live ~__context ~vm_ref:self + else Vm_memory_constraints.get ~__context ~vm_ref:self in + let constraints = { constraints with Vm_memory_constraints. + dynamic_min = min; + target = min; + dynamic_max = max } in + Vm_memory_constraints.assert_valid_for_current_context + ~__context ~vm:self ~constraints; + + (* memory_target is now unused but setting it equal *) + (* to dynamic_min avoids tripping validation code. *) + Db.VM.set_memory_target ~__context ~self ~value:min; + Db.VM.set_memory_dynamic_min ~__context ~self ~value:min; + Db.VM.set_memory_dynamic_max ~__context ~self ~value:max; + + if power_state = `Running + then Xapi_xenops.set_memory_dynamic_range ~__context ~self min max let request_rdp ~__context ~vm ~enabled = - let vm_gm = Db.VM.get_guest_metrics ~__context ~self:vm in - let vm_gmr = try Some (Db.VM_guest_metrics.get_record_internal ~__context ~self:vm_gm) with _ -> None in - let is_feature_ts2_on = - match vm_gmr with - | None -> false - | Some vm_gmr -> - let other = vm_gmr.Db_actions.vM_guest_metrics_other in - try - match List.assoc "feature-ts2" other with - | "" - | "0" -> false - | _ -> true - with Not_found -> false - in - if is_feature_ts2_on - then - Xapi_xenops.request_rdp ~__context ~self:vm enabled - else raise Not_found + let vm_gm = Db.VM.get_guest_metrics ~__context ~self:vm in + let vm_gmr = try Some (Db.VM_guest_metrics.get_record_internal ~__context ~self:vm_gm) with _ -> None in + let is_feature_ts2_on = + match vm_gmr with + | None -> false + | Some vm_gmr -> + let other = vm_gmr.Db_actions.vM_guest_metrics_other in + try + match List.assoc "feature-ts2" other with + | "" + | "0" -> false + | _ -> true + with Not_found -> false + in + if is_feature_ts2_on + then + Xapi_xenops.request_rdp ~__context ~self:vm enabled + else raise Not_found let request_rdp_on ~__context ~vm = - request_rdp ~__context ~vm ~enabled:true + request_rdp ~__context ~vm ~enabled:true let request_rdp_off ~__context ~vm = - request_rdp ~__context ~vm ~enabled:false + request_rdp ~__context ~vm ~enabled:false let run_script ~__context ~vm ~args = - (* Args can be any key value pair, which include "username", "password", "script", "interpreter" (optional), and "arguments" (optional). *) - if not (Helpers.guest_agent_run_script_enabled ~__context) - then raise (Api_errors.Server_error(Api_errors.feature_restricted, [])); - let required = [ "username"; "password"; "script" ] in - (* let optional = [ "interpreter"; "arguments" ] in *) - List.iter (fun a -> if not (List.mem_assoc a args) then raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, ["missing argument"; ""; Printf.sprintf "Argument %s is required." a]))) required; - (* Ensure the caller has the VM memory-access level permission i.e. vm-power-admin or higher. - As all the plugin calls share the same role/perms setting, we must do ad-hoc checking here by ourselves. *) - let session_id = Xapi_session.get_top ~__context ~self:(Context.get_session_id __context) in - if not (Rbac.is_access_allowed ~__context ~session_id ~permission:Rbac_static.permission_VM_checkpoint.Db_actions.role_name_label) - then raise (Api_errors.Server_error(Api_errors.rbac_permission_denied, ["vm.call_plugin"; "No permission to run script, must have VM power admin role or higher."])); - (* For the moment, we only make use of "script". *) - let script = List.assoc "script" args in - if String.length script > 1024 then raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, ["length restriction"; ""; "The script length must not exceed 1024 bytes"])); - try Xapi_xenops.run_script ~__context ~self:vm script - with Xenops_interface.Failed_to_run_script reason -> raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ reason ])) + (* Args can be any key value pair, which include "username", "password", "script", "interpreter" (optional), and "arguments" (optional). *) + if not (Helpers.guest_agent_run_script_enabled ~__context) + then raise (Api_errors.Server_error(Api_errors.feature_restricted, [])); + let required = [ "username"; "password"; "script" ] in + (* let optional = [ "interpreter"; "arguments" ] in *) + List.iter (fun a -> if not (List.mem_assoc a args) then raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, ["missing argument"; ""; Printf.sprintf "Argument %s is required." a]))) required; + (* Ensure the caller has the VM memory-access level permission i.e. vm-power-admin or higher. + As all the plugin calls share the same role/perms setting, we must do ad-hoc checking here by ourselves. *) + let session_id = Xapi_session.get_top ~__context ~self:(Context.get_session_id __context) in + if not (Rbac.is_access_allowed ~__context ~session_id ~permission:Rbac_static.permission_VM_checkpoint.Db_actions.role_name_label) + then raise (Api_errors.Server_error(Api_errors.rbac_permission_denied, ["vm.call_plugin"; "No permission to run script, must have VM power admin role or higher."])); + (* For the moment, we only make use of "script". *) + let script = List.assoc "script" args in + if String.length script > 1024 then raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, ["length restriction"; ""; "The script length must not exceed 1024 bytes"])); + try Xapi_xenops.run_script ~__context ~self:vm script + with Xenops_interface.Failed_to_run_script reason -> raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ reason ])) (* A temporal database holding the latest calling log for each VM. It's fine for it to be host local as a VM won't be resident on two hosts at the same time, nor does it migrate that frequently *) @@ -858,84 +858,84 @@ let call_plugin_latest = Hashtbl.create 37 let call_plugin_latest_m = Mutex.create () let record_call_plugin_latest vm = - let interval = Int64.of_float (!Xapi_globs.vm_call_plugin_interval *. 1e9) in - Mutex.execute call_plugin_latest_m (fun () -> - let now = Oclock.gettime Oclock.monotonic in - (* First do a round of GC *) - let to_gc = ref [] in - Hashtbl.iter - (fun v t -> - if Int64.sub now t > interval - then to_gc := v :: !to_gc) - call_plugin_latest; - List.iter (Hashtbl.remove call_plugin_latest) !to_gc; - (* Then calculate the schedule *) - let to_wait = - if Hashtbl.mem call_plugin_latest vm then - let t = Hashtbl.find call_plugin_latest vm in - Int64.sub (Int64.add t interval) now - else 0L in - if to_wait > 0L then - raise (Api_errors.Server_error (Api_errors.vm_call_plugin_rate_limit, [ Ref.string_of vm; string_of_float !Xapi_globs.vm_call_plugin_interval; string_of_float (Int64.to_float to_wait /. 1e9) ])) - else - Hashtbl.replace call_plugin_latest vm now - ) + let interval = Int64.of_float (!Xapi_globs.vm_call_plugin_interval *. 1e9) in + Mutex.execute call_plugin_latest_m (fun () -> + let now = Oclock.gettime Oclock.monotonic in + (* First do a round of GC *) + let to_gc = ref [] in + Hashtbl.iter + (fun v t -> + if Int64.sub now t > interval + then to_gc := v :: !to_gc) + call_plugin_latest; + List.iter (Hashtbl.remove call_plugin_latest) !to_gc; + (* Then calculate the schedule *) + let to_wait = + if Hashtbl.mem call_plugin_latest vm then + let t = Hashtbl.find call_plugin_latest vm in + Int64.sub (Int64.add t interval) now + else 0L in + if to_wait > 0L then + raise (Api_errors.Server_error (Api_errors.vm_call_plugin_rate_limit, [ Ref.string_of vm; string_of_float !Xapi_globs.vm_call_plugin_interval; string_of_float (Int64.to_float to_wait /. 1e9) ])) + else + Hashtbl.replace call_plugin_latest vm now + ) (* this is the generic plugin call available to xapi users *) let call_plugin ~__context ~vm ~plugin ~fn ~args = - if plugin <> "guest-agent-operation" then - raise (Api_errors.Server_error(Api_errors.xenapi_missing_plugin, [ plugin ])); - (* throttle plugin calls, hold a call if there are frequent attempts *) - record_call_plugin_latest vm; - try - match fn with - | "request-rdp-on" -> - request_rdp_on ~__context ~vm; - "" - | "request-rdp-off" -> - request_rdp_off ~__context ~vm; - "" - | "run-script" -> - run_script ~__context ~vm ~args - | _ -> - let msg = Printf.sprintf "The requested fn \"%s\" could not be found in plugin \"%s\"." fn plugin in - raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ "failed to find fn"; msg; msg ])) - with Not_found -> - let msg = Printf.sprintf "The requested fn \"%s\" of plugin \"%s\" could not be executed for lack of guest agent control feature." fn plugin in - raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ "failed to execute fn"; msg; msg ])) + if plugin <> "guest-agent-operation" then + raise (Api_errors.Server_error(Api_errors.xenapi_missing_plugin, [ plugin ])); + (* throttle plugin calls, hold a call if there are frequent attempts *) + record_call_plugin_latest vm; + try + match fn with + | "request-rdp-on" -> + request_rdp_on ~__context ~vm; + "" + | "request-rdp-off" -> + request_rdp_off ~__context ~vm; + "" + | "run-script" -> + run_script ~__context ~vm ~args + | _ -> + let msg = Printf.sprintf "The requested fn \"%s\" could not be found in plugin \"%s\"." fn plugin in + raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ "failed to find fn"; msg; msg ])) + with Not_found -> + let msg = Printf.sprintf "The requested fn \"%s\" of plugin \"%s\" could not be executed for lack of guest agent control feature." fn plugin in + raise (Api_errors.Server_error(Api_errors.xenapi_plugin_failure, [ "failed to execute fn"; msg; msg ])) let send_sysrq ~__context ~vm ~key = - raise (Api_errors.Server_error (Api_errors.not_implemented, [ "send_sysrq" ])) + raise (Api_errors.Server_error (Api_errors.not_implemented, [ "send_sysrq" ])) let send_trigger ~__context ~vm ~trigger = - raise (Api_errors.Server_error (Api_errors.not_implemented, [ "send_trigger" ])) + raise (Api_errors.Server_error (Api_errors.not_implemented, [ "send_trigger" ])) let get_boot_record ~__context ~self = - Helpers.get_boot_record ~__context ~self + Helpers.get_boot_record ~__context ~self let get_data_sources ~__context ~self = - List.map Rrdd_helper.to_API_data_source (Rrdd.query_possible_vm_dss ~vm_uuid:(Db.VM.get_uuid ~__context ~self)) + List.map Rrdd_helper.to_API_data_source (Rrdd.query_possible_vm_dss ~vm_uuid:(Db.VM.get_uuid ~__context ~self)) let record_data_source ~__context ~self ~data_source = - Rrdd.add_vm_ds ~vm_uuid:(Db.VM.get_uuid ~__context ~self) - ~domid:(Int64.to_int (Db.VM.get_domid ~__context ~self)) - ~ds_name:data_source + Rrdd.add_vm_ds ~vm_uuid:(Db.VM.get_uuid ~__context ~self) + ~domid:(Int64.to_int (Db.VM.get_domid ~__context ~self)) + ~ds_name:data_source let query_data_source ~__context ~self ~data_source = Rrdd.query_vm_ds ~vm_uuid:(Db.VM.get_uuid ~__context ~self) ~ds_name:data_source let forget_data_source_archives ~__context ~self ~data_source = Rrdd.forget_vm_ds ~vm_uuid:(Db.VM.get_uuid ~__context ~self) ~ds_name:data_source let get_possible_hosts ~__context ~vm = - let snapshot = Db.VM.get_record ~__context ~self:vm in - get_possible_hosts_for_vm ~__context ~vm ~snapshot + let snapshot = Db.VM.get_record ~__context ~self:vm in + get_possible_hosts_for_vm ~__context ~vm ~snapshot let get_allowed_VBD_devices ~__context ~vm = List.map (fun d -> string_of_int (Device_number.to_disk_number d)) (allowed_VBD_devices ~__context ~vm ~_type:`Disk) let get_allowed_VIF_devices = allowed_VIF_devices (* Undocumented Rio message, deprecated in favour of standard VM.clone *) let csvm ~__context ~vm = - Xapi_vm_clone.clone ~__context Xapi_vm_clone.Disk_op_clone ~vm - ~new_name:(Db.VM.get_name_label ~__context ~self:vm ^ "-cloned-suspended") + Xapi_vm_clone.clone ~__context Xapi_vm_clone.Disk_op_clone ~vm + ~new_name:(Db.VM.get_name_label ~__context ~self:vm ^ "-cloned-suspended") (* XXX: NOT IN RIO *) (** Return the largest possible static-max setting which will fit in a given amount of @@ -944,29 +944,29 @@ let csvm ~__context ~vm = NB function is related to Vmops.check_enough_memory. *) let maximise_memory ~__context ~self ~total ~approximate = - let r = Db.VM.get_record ~__context ~self in - let r = { r with API.vM_VCPUs_max = if approximate then 64L else r.API.vM_VCPUs_max } in + let r = Db.VM.get_record ~__context ~self in + let r = { r with API.vM_VCPUs_max = if approximate then 64L else r.API.vM_VCPUs_max } in - (* Need to find the maximum input value to this function so that it still evaluates - to true *) - let will_fit static_max = - let r = { r with API.vM_memory_static_max = static_max } in - let normal, shadow = Memory_check.vm_compute_start_memory ~__context ~policy:Memory_check.Static_max r in - Int64.add normal shadow <= total in + (* Need to find the maximum input value to this function so that it still evaluates + to true *) + let will_fit static_max = + let r = { r with API.vM_memory_static_max = static_max } in + let normal, shadow = Memory_check.vm_compute_start_memory ~__context ~policy:Memory_check.Static_max r in + Int64.add normal shadow <= total in - let max = Helpers.bisect will_fit 0L total in - (* Round down to the nearest MiB boundary... there's a slight mismatch between the - boot_free_mem - sum(static_max) value and the results of querying the free pages in Xen.*) - Int64.(mul (mul (div (div max 1024L) 1024L) 1024L) 1024L) + let max = Helpers.bisect will_fit 0L total in + (* Round down to the nearest MiB boundary... there's a slight mismatch between the + boot_free_mem - sum(static_max) value and the results of querying the free pages in Xen.*) + Int64.(mul (mul (div (div max 1024L) 1024L) 1024L) 1024L) (* In the master's forwarding layer with the global forwarding lock *) let atomic_set_resident_on ~__context ~vm ~host = assert false let update_snapshot_metadata ~__context ~vm ~snapshot_of ~snapshot_time = assert false let create_new_blob ~__context ~vm ~name ~mime_type ~public = - let blob = Xapi_blob.create ~__context ~mime_type ~public in - Db.VM.add_to_blobs ~__context ~self:vm ~key:name ~value:blob; - blob + let blob = Xapi_blob.create ~__context ~mime_type ~public in + Db.VM.add_to_blobs ~__context ~self:vm ~key:name ~value:blob; + blob let s3_suspend ~__context ~vm = Xapi_xenops.s3suspend ~__context ~self:vm @@ -975,195 +975,195 @@ let s3_resume ~__context ~vm = Xapi_xenops.s3resume ~__context ~self:vm let copy_bios_strings = Xapi_vm_helpers.copy_bios_strings let set_protection_policy ~__context ~self ~value = - raise (Api_errors.Server_error (Api_errors.message_removed, [])) + raise (Api_errors.Server_error (Api_errors.message_removed, [])) let set_start_delay ~__context ~self ~value = - if value < 0L then invalid_value - "start_delay must be non-negative" - (Int64.to_string value); - Db.VM.set_start_delay ~__context ~self ~value + if value < 0L then invalid_value + "start_delay must be non-negative" + (Int64.to_string value); + Db.VM.set_start_delay ~__context ~self ~value let set_shutdown_delay ~__context ~self ~value = - if value < 0L then invalid_value - "shutdown_delay must be non-negative" - (Int64.to_string value); - Db.VM.set_shutdown_delay ~__context ~self ~value + if value < 0L then invalid_value + "shutdown_delay must be non-negative" + (Int64.to_string value); + Db.VM.set_shutdown_delay ~__context ~self ~value let set_order ~__context ~self ~value = - if value < 0L then invalid_value - "order must be non-negative" - (Int64.to_string value); - Db.VM.set_order ~__context ~self ~value + if value < 0L then invalid_value + "order must be non-negative" + (Int64.to_string value); + Db.VM.set_order ~__context ~self ~value let assert_can_be_recovered ~__context ~self ~session_to = - Xapi_vm_helpers.assert_can_be_recovered ~__context ~self ~session_to + Xapi_vm_helpers.assert_can_be_recovered ~__context ~self ~session_to let get_SRs_required_for_recovery ~__context ~self ~session_to = - Xapi_vm_helpers.get_SRs_required_for_recovery ~__context ~self ~session_to + Xapi_vm_helpers.get_SRs_required_for_recovery ~__context ~self ~session_to let recover ~__context ~self ~session_to ~force = - Xapi_dr.assert_session_allows_dr ~session_id:session_to ~action:"VM.recover"; - (* Check the VM SRs are available. *) - assert_can_be_recovered ~__context ~self ~session_to; - (* Attempt to recover the VM. *) - ignore (Xapi_dr.recover_vms ~__context ~vms:[self] ~session_to ~force) + Xapi_dr.assert_session_allows_dr ~session_id:session_to ~action:"VM.recover"; + (* Check the VM SRs are available. *) + assert_can_be_recovered ~__context ~self ~session_to; + (* Attempt to recover the VM. *) + ignore (Xapi_dr.recover_vms ~__context ~vms:[self] ~session_to ~force) let set_suspend_VDI ~__context ~self ~value = - Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Suspended; - let src_vdi = Db.VM.get_suspend_VDI ~__context ~self in - let dst_vdi = value in - if src_vdi <> dst_vdi then - (* + Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Suspended; + let src_vdi = Db.VM.get_suspend_VDI ~__context ~self in + let dst_vdi = value in + if src_vdi <> dst_vdi then + (* * We don't care if the future host can see current suspend VDI or not, but * we want to make sure there's at least a host can see all the VDIs of the * VM + the new suspend VDI. We raise an exception if there's no suitable * host. *) - let vbds = Db.VM.get_VBDs ~__context ~self in - let vbds = List.filter (fun self -> not (Db.VBD.get_empty ~__context ~self)) vbds in - let vdis = List.map (fun self -> Db.VBD.get_VDI ~__context ~self) vbds in - let vdis = value :: vdis in - let reqd_srs = List.map (fun self -> Db.VDI.get_SR ~__context ~self) vdis in - let choose_fn = Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs in - let _ = Xapi_vm_helpers.choose_host ~__context ~choose_fn () in - let do_checksum vdi result = - try - let r = Helpers.call_api_functions ~__context - (fun rpc session_id -> - Client.VDI.checksum ~rpc ~session_id ~self:vdi) in - result := `Succ r - with e -> - result := `Fail e in - let src_result = ref `Pending in - let src_thread = Thread.create (do_checksum src_vdi) src_result in - let dst_result = ref `Pending in - let dst_thread = Thread.create (do_checksum dst_vdi) dst_result in - let get_result t r = - Thread.join(t); - match !r with - | `Succ cs -> cs - | `Fail e -> raise e - | `Pending -> assert false in - let src_checksum = get_result src_thread src_result in - let dst_checksum = get_result dst_thread dst_result in - debug "source suspend_VDI checksum: %s" src_checksum; - debug "destination suspend VDI checksum: %s" dst_checksum; - if src_checksum = dst_checksum then - Db.VM.set_suspend_VDI ~__context ~self ~value - else - raise - (Api_errors.Server_error - (Api_errors.suspend_vdi_replacement_is_not_identical, - [(Db.VDI.get_uuid ~__context ~self:src_vdi ^ " : " ^ src_checksum); - (Db.VDI.get_uuid ~__context ~self:dst_vdi ^ " : " ^ dst_checksum)])) + let vbds = Db.VM.get_VBDs ~__context ~self in + let vbds = List.filter (fun self -> not (Db.VBD.get_empty ~__context ~self)) vbds in + let vdis = List.map (fun self -> Db.VBD.get_VDI ~__context ~self) vbds in + let vdis = value :: vdis in + let reqd_srs = List.map (fun self -> Db.VDI.get_SR ~__context ~self) vdis in + let choose_fn = Xapi_vm_helpers.assert_can_see_specified_SRs ~__context ~reqd_srs in + let _ = Xapi_vm_helpers.choose_host ~__context ~choose_fn () in + let do_checksum vdi result = + try + let r = Helpers.call_api_functions ~__context + (fun rpc session_id -> + Client.VDI.checksum ~rpc ~session_id ~self:vdi) in + result := `Succ r + with e -> + result := `Fail e in + let src_result = ref `Pending in + let src_thread = Thread.create (do_checksum src_vdi) src_result in + let dst_result = ref `Pending in + let dst_thread = Thread.create (do_checksum dst_vdi) dst_result in + let get_result t r = + Thread.join(t); + match !r with + | `Succ cs -> cs + | `Fail e -> raise e + | `Pending -> assert false in + let src_checksum = get_result src_thread src_result in + let dst_checksum = get_result dst_thread dst_result in + debug "source suspend_VDI checksum: %s" src_checksum; + debug "destination suspend VDI checksum: %s" dst_checksum; + if src_checksum = dst_checksum then + Db.VM.set_suspend_VDI ~__context ~self ~value + else + raise + (Api_errors.Server_error + (Api_errors.suspend_vdi_replacement_is_not_identical, + [(Db.VDI.get_uuid ~__context ~self:src_vdi ^ " : " ^ src_checksum); + (Db.VDI.get_uuid ~__context ~self:dst_vdi ^ " : " ^ dst_checksum)])) let set_appliance ~__context ~self ~value = - if - Db.VM.get_is_control_domain ~__context ~self || - Db.VM.get_is_a_template ~__context ~self || - Db.VM.get_is_a_snapshot ~__context ~self - then - raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["Control domains, templates and snapshots cannot be assigned to appliances."])); - let previous_value = Db.VM.get_appliance ~__context ~self in - Db.VM.set_appliance ~__context ~self ~value; - (* Update allowed operations of the old appliance, if valid. *) - if Db.is_valid_ref __context previous_value then - Xapi_vm_appliance.update_allowed_operations ~__context ~self:previous_value; - (* Update the VM's allowed operations - this will update the new appliance's operations, if valid. *) - update_allowed_operations __context self + if + Db.VM.get_is_control_domain ~__context ~self || + Db.VM.get_is_a_template ~__context ~self || + Db.VM.get_is_a_snapshot ~__context ~self + then + raise (Api_errors.Server_error(Api_errors.operation_not_allowed, ["Control domains, templates and snapshots cannot be assigned to appliances."])); + let previous_value = Db.VM.get_appliance ~__context ~self in + Db.VM.set_appliance ~__context ~self ~value; + (* Update allowed operations of the old appliance, if valid. *) + if Db.is_valid_ref __context previous_value then + Xapi_vm_appliance.update_allowed_operations ~__context ~self:previous_value; + (* Update the VM's allowed operations - this will update the new appliance's operations, if valid. *) + update_allowed_operations __context self let import_convert ~__context ~_type ~username ~password ~sr ~remote_config = - let open Vpx in - let print_jobInstance (j : Vpx.jobInstance) = - debug "import_convert %Ld%% %s -> %s!\n" j.percentComplete (string_of_jobState j.state) (j.stateDesc) in - let rec loop call vpx_ip = - let response = vpxrpc vpx_ip call in - let jobInstance = Vpx.jobInstance_of_rpc response.Rpc.contents in - print_jobInstance jobInstance; - (match jobInstance.state with - | Created - | Queued - | Running -> Thread.delay 1.; loop call vpx_ip - | Completed - | Aborted - | UserAborted -> ()) in - debug "import_convert %s" (String.concat "; " (List.map (fun (k,v) -> (k ^ "," ^ v)) remote_config)); - let vpx_ip = Xapi_plugins.call_plugin (Context.get_session_id __context) "conversion" "main" [] in - debug "import_convert %s" vpx_ip; - let xen_servicecred = { username = username; password = password } in - let r_cred = rpc_of_serviceCred xen_servicecred in - let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in - debug "import_convert sr-uuid:%s" sr_uuid; - let importInfo = { Vpx.sRuuid = sr_uuid } in - let vmware_serverinfo = { - serverType = serverType_of_string _type; - hostname = (List.assoc "hostname" remote_config); - cred = {username = (List.assoc "username" remote_config); password = (List.assoc "password" remote_config)}} in - let jobInfo = {source = vmware_serverinfo; sourceVmUUID = ""; - sourceVmName = (List.assoc "vm-name" remote_config); importInfo = importInfo } in - let r_jobInfo = rpc_of_jobInfo jobInfo in - let call = Rpc.call "job.create" [ r_cred; r_jobInfo ] in - let response = vpxrpc vpx_ip call in - let jobInstance = jobInstance_of_rpc response.Rpc.contents in - let r_jobId = Rpc.rpc_of_string jobInstance.id in - let call = Rpc.call "job.get" [ r_cred; r_jobId ] in - loop call vpx_ip + let open Vpx in + let print_jobInstance (j : Vpx.jobInstance) = + debug "import_convert %Ld%% %s -> %s!\n" j.percentComplete (string_of_jobState j.state) (j.stateDesc) in + let rec loop call vpx_ip = + let response = vpxrpc vpx_ip call in + let jobInstance = Vpx.jobInstance_of_rpc response.Rpc.contents in + print_jobInstance jobInstance; + (match jobInstance.state with + | Created + | Queued + | Running -> Thread.delay 1.; loop call vpx_ip + | Completed + | Aborted + | UserAborted -> ()) in + debug "import_convert %s" (String.concat "; " (List.map (fun (k,v) -> (k ^ "," ^ v)) remote_config)); + let vpx_ip = Xapi_plugins.call_plugin (Context.get_session_id __context) "conversion" "main" [] in + debug "import_convert %s" vpx_ip; + let xen_servicecred = { username = username; password = password } in + let r_cred = rpc_of_serviceCred xen_servicecred in + let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in + debug "import_convert sr-uuid:%s" sr_uuid; + let importInfo = { Vpx.sRuuid = sr_uuid } in + let vmware_serverinfo = { + serverType = serverType_of_string _type; + hostname = (List.assoc "hostname" remote_config); + cred = {username = (List.assoc "username" remote_config); password = (List.assoc "password" remote_config)}} in + let jobInfo = {source = vmware_serverinfo; sourceVmUUID = ""; + sourceVmName = (List.assoc "vm-name" remote_config); importInfo = importInfo } in + let r_jobInfo = rpc_of_jobInfo jobInfo in + let call = Rpc.call "job.create" [ r_cred; r_jobInfo ] in + let response = vpxrpc vpx_ip call in + let jobInstance = jobInstance_of_rpc response.Rpc.contents in + let r_jobId = Rpc.rpc_of_string jobInstance.id in + let call = Rpc.call "job.get" [ r_cred; r_jobId ] in + loop call vpx_ip exception Retry of string (* Redirect *) let max_redirects = 5 let rec import_inner n ~__context ~url ~sr ~full_restore ~force = - if n > max_redirects - then raise (Api_errors.Server_error(Api_errors.import_error_generic, ["Maximum redirect limit reached"])) - else begin - let uri = Uri.of_string url in - try - Open_uri.with_open_uri uri (fun fd -> - let module Request = Cohttp.Request.Make(Cohttp_posix_io.Unbuffered_IO) in - let module Response = Cohttp.Response.Make(Cohttp_posix_io.Unbuffered_IO) in - let request = Cohttp.Request.make ~meth:`GET uri in - let ic = {Cohttp_posix_io.Unbuffered_IO.header_buffer=None; header_buffer_idx=0; fd} in - Request.write (fun _ -> ()) request fd; - match Response.read ic with - | `Eof -> raise (Api_errors.Server_error(Api_errors.import_error_premature_eof, [])) - | `Invalid x -> raise (Api_errors.Server_error(Api_errors.import_error_generic, [x])) - | `Ok r -> - match r.Cohttp.Response.status with - | `OK -> - let rpc = Helpers.make_rpc ~__context in - let session_id = Context.get_session_id __context in - Import.stream_import __context rpc session_id fd None (fun () -> ()) - Import.({import_type = Full_import sr; - full_restore; - force}) - | e when Cohttp.Code.is_redirection (Cohttp.Code.code_of_status e) -> - begin match Cohttp.Header.get (Cohttp.Response.headers r) "Location" with - | Some l -> raise (Retry l) - | None -> raise (Api_errors.Server_error(Api_errors.import_error_generic, ["Redirect with no new location"])) - end - | e -> - raise (Api_errors.Server_error(Api_errors.import_error_generic, [Cohttp.Code.string_of_status e])) - ) - with - | Retry redirect -> import_inner (n+1) ~__context ~url:redirect ~sr ~full_restore ~force - | e -> raise e - end + if n > max_redirects + then raise (Api_errors.Server_error(Api_errors.import_error_generic, ["Maximum redirect limit reached"])) + else begin + let uri = Uri.of_string url in + try + Open_uri.with_open_uri uri (fun fd -> + let module Request = Cohttp.Request.Make(Cohttp_posix_io.Unbuffered_IO) in + let module Response = Cohttp.Response.Make(Cohttp_posix_io.Unbuffered_IO) in + let request = Cohttp.Request.make ~meth:`GET uri in + let ic = {Cohttp_posix_io.Unbuffered_IO.header_buffer=None; header_buffer_idx=0; fd} in + Request.write (fun _ -> ()) request fd; + match Response.read ic with + | `Eof -> raise (Api_errors.Server_error(Api_errors.import_error_premature_eof, [])) + | `Invalid x -> raise (Api_errors.Server_error(Api_errors.import_error_generic, [x])) + | `Ok r -> + match r.Cohttp.Response.status with + | `OK -> + let rpc = Helpers.make_rpc ~__context in + let session_id = Context.get_session_id __context in + Import.stream_import __context rpc session_id fd None (fun () -> ()) + Import.({import_type = Full_import sr; + full_restore; + force}) + | e when Cohttp.Code.is_redirection (Cohttp.Code.code_of_status e) -> + begin match Cohttp.Header.get (Cohttp.Response.headers r) "Location" with + | Some l -> raise (Retry l) + | None -> raise (Api_errors.Server_error(Api_errors.import_error_generic, ["Redirect with no new location"])) + end + | e -> + raise (Api_errors.Server_error(Api_errors.import_error_generic, [Cohttp.Code.string_of_status e])) + ) + with + | Retry redirect -> import_inner (n+1) ~__context ~url:redirect ~sr ~full_restore ~force + | e -> raise e + end let import ~__context ~url ~sr ~full_restore ~force = - import_inner 0 ~__context ~url ~sr ~full_restore ~force + import_inner 0 ~__context ~url ~sr ~full_restore ~force let query_services ~__context ~self = - raise (Api_errors.Server_error(Api_errors.not_implemented, [ "query_services" ])) + raise (Api_errors.Server_error(Api_errors.not_implemented, [ "query_services" ])) let assert_can_set_has_vendor_device ~__context ~self ~value = - if value - (* Do the check even for templates, because snapshots are templates and - * we allow restoration of a VM from a snapshot. *) - then Pool_features.assert_enabled ~__context ~f:Features.PCI_device_for_auto_update; + if value + (* Do the check even for templates, because snapshots are templates and + * we allow restoration of a VM from a snapshot. *) + then Pool_features.assert_enabled ~__context ~f:Features.PCI_device_for_auto_update; - Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Halted + Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Halted let set_has_vendor_device ~__context ~self ~value = - assert_can_set_has_vendor_device ~__context ~self ~value; - Db.VM.set_has_vendor_device ~__context ~self ~value; - update_vm_virtual_hardware_platform_version ~__context ~vm:self + assert_can_set_has_vendor_device ~__context ~self ~value; + Db.VM.set_has_vendor_device ~__context ~self ~value; + update_vm_virtual_hardware_platform_version ~__context ~vm:self diff --git a/ocaml/xapi/xapi_vm.mli b/ocaml/xapi/xapi_vm.mli index 82a436da2ff..345f78cd3cd 100644 --- a/ocaml/xapi/xapi_vm.mli +++ b/ocaml/xapi/xapi_vm.mli @@ -13,7 +13,7 @@ *) (** Module that defines API functions for VM objects * @group XenAPI functions - *) +*) (** {2 (Fill in Title!)} *) @@ -39,11 +39,11 @@ val set_actions_after_crash : __context:Context.t -> self:[ `VM ] Ref.t -> value:[< `coredump_and_destroy - | `coredump_and_restart - | `destroy - | `preserve - | `rename_restart - | `restart ] -> + | `coredump_and_restart + | `destroy + | `preserve + | `rename_restart + | `restart ] -> unit val set_is_a_template : __context:Context.t -> self:[ `VM ] Ref.t -> value:bool -> unit @@ -110,11 +110,11 @@ val create : actions_after_shutdown:[< `destroy | `restart ] -> actions_after_reboot:[< `destroy | `restart ] -> actions_after_crash:[< `coredump_and_destroy - | `coredump_and_restart - | `destroy - | `preserve - | `rename_restart - | `restart ] -> + | `coredump_and_restart + | `destroy + | `preserve + | `rename_restart + | `restart ] -> pV_bootloader:string -> pV_kernel:string -> pV_ramdisk:string -> @@ -143,7 +143,7 @@ val create : generation_id:string -> hardware_platform_version:int64 -> has_vendor_device:bool --> API.ref_VM + -> API.ref_VM val destroy : __context:Context.t -> self:[ `VM ] Ref.t -> unit val clone : __context:Context.t -> vm:API.ref_VM -> new_name:string -> [ `VM ] Ref.t @@ -226,15 +226,15 @@ val set_order : __context:Context.t -> self:API.ref_VM -> value:int64 -> unit val assert_can_be_recovered : __context:Context.t -> self:API.ref_VM -> session_to:API.ref_session -> unit val get_SRs_required_for_recovery : __context:Context.t -> self:API.ref_VM -> session_to:API.ref_session ->API.ref_SR list val recover : __context:Context.t -> self:API.ref_VM -> - session_to:API.ref_session -> force:bool -> unit + session_to:API.ref_session -> force:bool -> unit val set_suspend_VDI : __context:Context.t -> self:API.ref_VM -> - value:API.ref_VDI -> unit + value:API.ref_VDI -> unit val set_appliance : __context:Context.t -> self:API.ref_VM -> value:API.ref_VM_appliance -> unit val import_convert : __context:Context.t -> _type:string -> username:string -> password:string -> - sr:API.ref_SR -> remote_config:(string * string) list -> unit + sr:API.ref_SR -> remote_config:(string * string) list -> unit (** [query_services __context self] returns a Map of service type -> name label provided - by the specific VM. *) + by the specific VM. *) val query_services : __context:Context.t -> self:API.ref_VM -> (string * string) list val request_rdp_on : __context:Context.t -> vm:API.ref_VM -> unit diff --git a/ocaml/xapi/xapi_vm_appliance.ml b/ocaml/xapi/xapi_vm_appliance.ml index 3240186b10a..fd90db2b0d1 100644 --- a/ocaml/xapi/xapi_vm_appliance.ml +++ b/ocaml/xapi/xapi_vm_appliance.ml @@ -24,9 +24,9 @@ open D module Int64Map = Map.Make(struct type t = int64 let compare = compare end) type appliance_operation = { - name : string; - vm_operation : (API.ref_VM -> (Rpc.call -> Rpc.response) -> API.ref_session -> API.ref_task); - required_state : API.vm_power_state; + name : string; + vm_operation : (API.ref_VM -> (Rpc.call -> Rpc.response) -> API.ref_session -> API.ref_task); + required_state : API.vm_power_state; } let assert_operation_valid = Xapi_vm_appliance_lifecycle.assert_operation_valid @@ -34,142 +34,142 @@ let assert_operation_valid = Xapi_vm_appliance_lifecycle.assert_operation_valid let update_allowed_operations = Xapi_vm_appliance_lifecycle.update_allowed_operations let create ~__context ~name_label ~name_description = - let uuid = Uuid.make_uuid () in - let ref = Ref.make() in - Db.VM_appliance.create ~__context ~ref ~uuid:(Uuid.to_string uuid) ~name_label ~name_description ~allowed_operations:[] ~current_operations:[]; - update_allowed_operations ~__context ~self:ref; - ref + let uuid = Uuid.make_uuid () in + let ref = Ref.make() in + Db.VM_appliance.create ~__context ~ref ~uuid:(Uuid.to_string uuid) ~name_label ~name_description ~allowed_operations:[] ~current_operations:[]; + update_allowed_operations ~__context ~self:ref; + ref let destroy ~__context ~self = - Db.VM_appliance.destroy ~__context ~self + Db.VM_appliance.destroy ~__context ~self (* Takes a list of VMs and returns a map binding each boot order *) (* found in the list to a list of VMs with that boot order. *) let group_vms_by_order ~__context vms = - List.fold_left (fun map vm -> - let order = Db.VM.get_order ~__context ~self:vm in - let existing = if Int64Map.mem order map then Int64Map.find order map else [] in - Int64Map.add order (vm::existing) map) Int64Map.empty vms + List.fold_left (fun map vm -> + let order = Db.VM.get_order ~__context ~self:vm in + let existing = if Int64Map.mem order map then Int64Map.find order map else [] in + Int64Map.add order (vm::existing) map) Int64Map.empty vms (* Return a list of lists of VMs where each list contains *) (* VMs with the same boot order. *) let create_action_list ~__context start vms = - let order_map = group_vms_by_order ~__context vms in - (if start then List.rev else (fun x -> x)) - (Int64Map.fold (fun _ vms groups -> vms::groups) order_map []) + let order_map = group_vms_by_order ~__context vms in + (if start then List.rev else (fun x -> x)) + (Int64Map.fold (fun _ vms groups -> vms::groups) order_map []) (* Run the given operation on all VMs in the list, and record the tasks created. *) (* Return once all the tasks have completed, with a list of VMs which threw an exception. *) let run_operation_on_vms ~__context operation vms = - Helpers.call_api_functions ~__context (fun rpc session_id -> - let (tasks, failed_vms) = List.fold_left (fun (tasks, failed_vms) vm -> - try - let task = operation vm rpc session_id in - (task::tasks, failed_vms) - with e -> - (tasks, vm::failed_vms)) ([], []) vms in - Tasks.wait_for_all ~rpc ~session_id ~tasks) + Helpers.call_api_functions ~__context (fun rpc session_id -> + let (tasks, failed_vms) = List.fold_left (fun (tasks, failed_vms) vm -> + try + let task = operation vm rpc session_id in + (task::tasks, failed_vms) + with e -> + (tasks, vm::failed_vms)) ([], []) vms in + Tasks.wait_for_all ~rpc ~session_id ~tasks) let perform_operation ~__context ~self ~operation ~ascending_priority = - let appliance_uuid = (Db.VM_appliance.get_uuid ~__context ~self) in - let contained_vms = Db.VM_appliance.get_VMs ~__context ~self in - (* Obtain a list of VMs which are not already in the required power state. *) - let target_vms = List.filter (fun vm -> Db.VM.get_power_state ~__context ~self:vm <> operation.required_state) contained_vms in - let action_list = create_action_list ~__context ascending_priority target_vms in - debug "Beginning operation %s on appliance %s" operation.name appliance_uuid; - List.iter (fun vm_list -> run_operation_on_vms ~__context operation.vm_operation vm_list) action_list; - (* Check whether all the VMs have transitioned to the required power state. *) - let failed_vms = List.filter (fun vm -> Db.VM.get_power_state ~__context ~self:vm <> operation.required_state) target_vms in - match failed_vms with - | [] -> debug "Operation %s on appliance with uuid %s completed successfully" operation.name appliance_uuid - | _ -> - debug "Operation %s on appliance with uuid %s partially failed" operation.name appliance_uuid; - raise (Api_errors.Server_error(Api_errors.operation_partially_failed, - operation.name::(List.map Ref.string_of failed_vms))) + let appliance_uuid = (Db.VM_appliance.get_uuid ~__context ~self) in + let contained_vms = Db.VM_appliance.get_VMs ~__context ~self in + (* Obtain a list of VMs which are not already in the required power state. *) + let target_vms = List.filter (fun vm -> Db.VM.get_power_state ~__context ~self:vm <> operation.required_state) contained_vms in + let action_list = create_action_list ~__context ascending_priority target_vms in + debug "Beginning operation %s on appliance %s" operation.name appliance_uuid; + List.iter (fun vm_list -> run_operation_on_vms ~__context operation.vm_operation vm_list) action_list; + (* Check whether all the VMs have transitioned to the required power state. *) + let failed_vms = List.filter (fun vm -> Db.VM.get_power_state ~__context ~self:vm <> operation.required_state) target_vms in + match failed_vms with + | [] -> debug "Operation %s on appliance with uuid %s completed successfully" operation.name appliance_uuid + | _ -> + debug "Operation %s on appliance with uuid %s partially failed" operation.name appliance_uuid; + raise (Api_errors.Server_error(Api_errors.operation_partially_failed, + operation.name::(List.map Ref.string_of failed_vms))) let start ~__context ~self ~paused = - let operation = { - name = "VM_appliance.start"; - vm_operation = (fun vm rpc session_id -> Client.Async.VM.start ~rpc ~session_id ~vm ~start_paused:paused ~force:false); - required_state = if paused then `Paused else `Running; - } in - perform_operation ~__context ~self ~operation ~ascending_priority:true + let operation = { + name = "VM_appliance.start"; + vm_operation = (fun vm rpc session_id -> Client.Async.VM.start ~rpc ~session_id ~vm ~start_paused:paused ~force:false); + required_state = if paused then `Paused else `Running; + } in + perform_operation ~__context ~self ~operation ~ascending_priority:true let clean_shutdown ~__context ~self = - let operation = { - name = "VM_appliance.clean_shutdown"; - vm_operation = (fun vm rpc session_id -> Client.Async.VM.clean_shutdown ~rpc ~session_id ~vm); - required_state = `Halted; - } in - perform_operation ~__context ~self ~operation ~ascending_priority:false + let operation = { + name = "VM_appliance.clean_shutdown"; + vm_operation = (fun vm rpc session_id -> Client.Async.VM.clean_shutdown ~rpc ~session_id ~vm); + required_state = `Halted; + } in + perform_operation ~__context ~self ~operation ~ascending_priority:false let hard_shutdown ~__context ~self = - let operation = { - name = "VM_appliance.hard_shutdown"; - vm_operation = (fun vm rpc session_id -> Client.Async.VM.hard_shutdown ~rpc ~session_id ~vm); - required_state = `Halted; - } in - perform_operation ~__context ~self ~operation ~ascending_priority:false - -let shutdown ~__context ~self = - let operation = { - name = "VM_appliance.shutdown"; - vm_operation = (fun vm rpc session_id -> Client.Async.VM.shutdown ~rpc ~session_id ~vm); - required_state = `Halted; - } in - perform_operation ~__context ~self ~operation ~ascending_priority:false + let operation = { + name = "VM_appliance.hard_shutdown"; + vm_operation = (fun vm rpc session_id -> Client.Async.VM.hard_shutdown ~rpc ~session_id ~vm); + required_state = `Halted; + } in + perform_operation ~__context ~self ~operation ~ascending_priority:false + +let shutdown ~__context ~self = + let operation = { + name = "VM_appliance.shutdown"; + vm_operation = (fun vm rpc session_id -> Client.Async.VM.shutdown ~rpc ~session_id ~vm); + required_state = `Halted; + } in + perform_operation ~__context ~self ~operation ~ascending_priority:false (* Check that VDI SRs are present for each VM in the appliance. *) let assert_can_be_recovered ~__context ~self ~session_to = - let vms = Db.VM_appliance.get_VMs ~__context ~self in - List.iter - (fun vm -> Xapi_vm_helpers.assert_can_be_recovered ~__context ~self:vm ~session_to) - vms - + let vms = Db.VM_appliance.get_VMs ~__context ~self in + List.iter + (fun vm -> Xapi_vm_helpers.assert_can_be_recovered ~__context ~self:vm ~session_to) + vms + let get_SRs_required_for_recovery ~__context ~self ~session_to = - let vms = Db.VM_appliance.get_VMs ~__context ~self in - let sr_list = List.map - (fun vm -> Xapi_vm_helpers.get_SRs_required_for_recovery ~__context ~self:vm ~session_to) - vms in - List.setify(List.flatten sr_list) + let vms = Db.VM_appliance.get_VMs ~__context ~self in + let sr_list = List.map + (fun vm -> Xapi_vm_helpers.get_SRs_required_for_recovery ~__context ~self:vm ~session_to) + vms in + List.setify(List.flatten sr_list) let recover ~__context ~self ~session_to ~force = - Xapi_dr.assert_session_allows_dr ~session_id:session_to ~action:"VM_appliance.recover"; - assert_can_be_recovered ~__context ~self ~session_to; - let vms = Db.VM_appliance.get_VMs ~__context ~self in - let recovered_vms = Xapi_dr.recover_vms ~__context ~vms ~session_to ~force in - (* Deal with the VM appliance object. *) - let old_appliance = Db.VM_appliance.get_record ~__context ~self in - Server_helpers.exec_with_new_task ~session_id:session_to "Recreating VM appliance object" - (fun __context_to -> - let recovered_appliance = try - (* If an appliance with the same UUID exists, remove all VMs from the appliance and update its name_label/name_description. *) - let existing_appliance = Db.VM_appliance.get_by_uuid ~__context:__context_to ~uuid:old_appliance.API.vM_appliance_uuid in - debug "An appliance with UUID %s already exists - reusing it." old_appliance.API.vM_appliance_uuid; - let vms = Db.VM_appliance.get_VMs ~__context:__context_to ~self:existing_appliance in - List.iter - (fun vm -> Db.VM.set_appliance ~__context:__context_to ~self:vm ~value:Ref.null) - vms; - Db.VM_appliance.set_name_label ~__context:__context_to ~self:existing_appliance ~value:old_appliance.API.vM_appliance_name_label; - Db.VM_appliance.set_name_description ~__context:__context_to ~self:existing_appliance ~value:old_appliance.API.vM_appliance_name_description; - existing_appliance - with Db_exn.Read_missing_uuid("VM_appliance", _, _) -> - (* If no appliance with the same UUID exists, create a new one from the old appliance's data. *) - debug "No appliance with UUID %s exists - creating a new one." old_appliance.API.vM_appliance_uuid; - begin - let new_appliance = create ~__context:__context_to - ~name_label:old_appliance.API.vM_appliance_name_label - ~name_description:old_appliance.API.vM_appliance_name_description in - Db.VM_appliance.set_uuid ~__context:__context_to - ~self:new_appliance - ~value:old_appliance.API.vM_appliance_uuid; - new_appliance - end - in - (* Add all the non-template VMs to the appliance. *) - List.iter - (fun vm -> - if not (Db.VM.get_is_a_template ~__context:__context_to ~self:vm) then - Db.VM.set_appliance ~__context:__context_to ~self:vm ~value:recovered_appliance) - recovered_vms; - update_allowed_operations ~__context:__context_to ~self:recovered_appliance) + Xapi_dr.assert_session_allows_dr ~session_id:session_to ~action:"VM_appliance.recover"; + assert_can_be_recovered ~__context ~self ~session_to; + let vms = Db.VM_appliance.get_VMs ~__context ~self in + let recovered_vms = Xapi_dr.recover_vms ~__context ~vms ~session_to ~force in + (* Deal with the VM appliance object. *) + let old_appliance = Db.VM_appliance.get_record ~__context ~self in + Server_helpers.exec_with_new_task ~session_id:session_to "Recreating VM appliance object" + (fun __context_to -> + let recovered_appliance = try + (* If an appliance with the same UUID exists, remove all VMs from the appliance and update its name_label/name_description. *) + let existing_appliance = Db.VM_appliance.get_by_uuid ~__context:__context_to ~uuid:old_appliance.API.vM_appliance_uuid in + debug "An appliance with UUID %s already exists - reusing it." old_appliance.API.vM_appliance_uuid; + let vms = Db.VM_appliance.get_VMs ~__context:__context_to ~self:existing_appliance in + List.iter + (fun vm -> Db.VM.set_appliance ~__context:__context_to ~self:vm ~value:Ref.null) + vms; + Db.VM_appliance.set_name_label ~__context:__context_to ~self:existing_appliance ~value:old_appliance.API.vM_appliance_name_label; + Db.VM_appliance.set_name_description ~__context:__context_to ~self:existing_appliance ~value:old_appliance.API.vM_appliance_name_description; + existing_appliance + with Db_exn.Read_missing_uuid("VM_appliance", _, _) -> + (* If no appliance with the same UUID exists, create a new one from the old appliance's data. *) + debug "No appliance with UUID %s exists - creating a new one." old_appliance.API.vM_appliance_uuid; + begin + let new_appliance = create ~__context:__context_to + ~name_label:old_appliance.API.vM_appliance_name_label + ~name_description:old_appliance.API.vM_appliance_name_description in + Db.VM_appliance.set_uuid ~__context:__context_to + ~self:new_appliance + ~value:old_appliance.API.vM_appliance_uuid; + new_appliance + end + in + (* Add all the non-template VMs to the appliance. *) + List.iter + (fun vm -> + if not (Db.VM.get_is_a_template ~__context:__context_to ~self:vm) then + Db.VM.set_appliance ~__context:__context_to ~self:vm ~value:recovered_appliance) + recovered_vms; + update_allowed_operations ~__context:__context_to ~self:recovered_appliance) diff --git a/ocaml/xapi/xapi_vm_appliance.mli b/ocaml/xapi/xapi_vm_appliance.mli index eec5fe3472a..01a89ba4398 100644 --- a/ocaml/xapi/xapi_vm_appliance.mli +++ b/ocaml/xapi/xapi_vm_appliance.mli @@ -13,27 +13,27 @@ *) val create : - __context:Context.t -> name_label:string -> name_description:string -> [ `VM_appliance ] Ref.t + __context:Context.t -> name_label:string -> name_description:string -> [ `VM_appliance ] Ref.t val destroy : - __context:Context.t -> self:[ `VM_appliance ] Ref.t -> unit + __context:Context.t -> self:[ `VM_appliance ] Ref.t -> unit val assert_operation_valid : - __context:Context.t -> self:[ `VM_appliance ] Ref.t -> op:API.vm_appliance_operation -> unit + __context:Context.t -> self:[ `VM_appliance ] Ref.t -> op:API.vm_appliance_operation -> unit val update_allowed_operations : - __context:Context.t -> self:[ `VM_appliance ] Ref.t -> unit + __context:Context.t -> self:[ `VM_appliance ] Ref.t -> unit val start : - __context:Context.t -> self:[ `VM_appliance ] Ref.t -> paused:bool -> unit + __context:Context.t -> self:[ `VM_appliance ] Ref.t -> paused:bool -> unit val clean_shutdown : - __context:Context.t -> self:[ `VM_appliance ] Ref.t -> unit + __context:Context.t -> self:[ `VM_appliance ] Ref.t -> unit val hard_shutdown : - __context:Context.t -> self:[ `VM_appliance ] Ref.t -> unit + __context:Context.t -> self:[ `VM_appliance ] Ref.t -> unit val shutdown : - __context:Context.t -> self:[ `VM_appliance ] Ref.t -> unit + __context:Context.t -> self:[ `VM_appliance ] Ref.t -> unit val assert_can_be_recovered : - __context:Context.t -> self:[ `VM_appliance ] Ref.t -> session_to:[ `session ] Ref.t -> unit + __context:Context.t -> self:[ `VM_appliance ] Ref.t -> session_to:[ `session ] Ref.t -> unit val get_SRs_required_for_recovery : - __context:Context.t -> self:[ `VM_appliance] Ref.t -> session_to:[ `session ] Ref.t -> API.ref_SR list + __context:Context.t -> self:[ `VM_appliance] Ref.t -> session_to:[ `session ] Ref.t -> API.ref_SR list val recover : - __context:Context.t -> self:[ `VM_appliance ] Ref.t -> session_to:[ `session ] Ref.t -> force:bool -> unit + __context:Context.t -> self:[ `VM_appliance ] Ref.t -> session_to:[ `session ] Ref.t -> force:bool -> unit diff --git a/ocaml/xapi/xapi_vm_appliance_lifecycle.ml b/ocaml/xapi/xapi_vm_appliance_lifecycle.ml index 44b4cda2f63..fdad0546fac 100644 --- a/ocaml/xapi/xapi_vm_appliance_lifecycle.ml +++ b/ocaml/xapi/xapi_vm_appliance_lifecycle.ml @@ -15,44 +15,44 @@ (* Checks to see if an operation is valid in this state. Returns Some exception *) (* if not and None if everything is ok. *) let check_operation_error ~__context record self op = - let _ref = Ref.string_of self in - let current_ops = record.Db_actions.vM_appliance_current_operations in - (* Only allow one operation of [`start | `clean_shutdown | `hard_shutdown | `shutdown ] at a time. *) - if List.length current_ops > 0 then - Some (Api_errors.other_operation_in_progress, ["VM_appliance"; _ref]) - else - let vms = Db.VM_appliance.get_VMs ~__context ~self in - if List.length vms = 0 then - Some (Api_errors.operation_not_allowed, ["Appliance has no VMs."]) - else begin - (* Allow the op if any VMs are in a state where the op makes sense. *) - let power_states = List.map (fun vm -> Db.VM.get_power_state ~__context ~self:vm) vms in - let predicate, error = match op with - (* Can start if any are halted. *) - | `start -> - (fun power_state -> power_state = `Halted), "There are no halted VMs in this appliance." - (* Can clean_shutdown if any are running. *) - | `clean_shutdown -> - (fun power_state -> power_state = `Running), "There are no running VMs in this appliance." - (* Can hard_shutdown/shutdown if any are not halted. *) - | `hard_shutdown | `shutdown -> - (fun power_state -> power_state <> `Halted), "All VMs in this appliance are halted." - in - if List.exists predicate power_states then - None - else - Some (Api_errors.operation_not_allowed, [error]) - end + let _ref = Ref.string_of self in + let current_ops = record.Db_actions.vM_appliance_current_operations in + (* Only allow one operation of [`start | `clean_shutdown | `hard_shutdown | `shutdown ] at a time. *) + if List.length current_ops > 0 then + Some (Api_errors.other_operation_in_progress, ["VM_appliance"; _ref]) + else + let vms = Db.VM_appliance.get_VMs ~__context ~self in + if List.length vms = 0 then + Some (Api_errors.operation_not_allowed, ["Appliance has no VMs."]) + else begin + (* Allow the op if any VMs are in a state where the op makes sense. *) + let power_states = List.map (fun vm -> Db.VM.get_power_state ~__context ~self:vm) vms in + let predicate, error = match op with + (* Can start if any are halted. *) + | `start -> + (fun power_state -> power_state = `Halted), "There are no halted VMs in this appliance." + (* Can clean_shutdown if any are running. *) + | `clean_shutdown -> + (fun power_state -> power_state = `Running), "There are no running VMs in this appliance." + (* Can hard_shutdown/shutdown if any are not halted. *) + | `hard_shutdown | `shutdown -> + (fun power_state -> power_state <> `Halted), "All VMs in this appliance are halted." + in + if List.exists predicate power_states then + None + else + Some (Api_errors.operation_not_allowed, [error]) + end let assert_operation_valid ~__context ~self ~(op:API.vm_appliance_operation) = - let all = Db.VM_appliance.get_record_internal ~__context ~self in - match check_operation_error ~__context all self op with - | None -> () - | Some (a,b) -> raise (Api_errors.Server_error (a,b)) + let all = Db.VM_appliance.get_record_internal ~__context ~self in + match check_operation_error ~__context all self op with + | None -> () + | Some (a,b) -> raise (Api_errors.Server_error (a,b)) let update_allowed_operations ~__context ~self = - let all = Db.VM_appliance.get_record_internal ~__context ~self in - let allowed_ops = - let allowed x = match check_operation_error ~__context all self x with None -> true | _ -> false in - List.filter allowed [`start; `clean_shutdown; `hard_shutdown; `shutdown] in - Db.VM_appliance.set_allowed_operations ~__context ~self ~value:allowed_ops + let all = Db.VM_appliance.get_record_internal ~__context ~self in + let allowed_ops = + let allowed x = match check_operation_error ~__context all self x with None -> true | _ -> false in + List.filter allowed [`start; `clean_shutdown; `hard_shutdown; `shutdown] in + Db.VM_appliance.set_allowed_operations ~__context ~self ~value:allowed_ops diff --git a/ocaml/xapi/xapi_vm_clone.ml b/ocaml/xapi/xapi_vm_clone.ml index 9cdcd67fbae..7d7be15b98e 100644 --- a/ocaml/xapi/xapi_vm_clone.ml +++ b/ocaml/xapi/xapi_vm_clone.ml @@ -13,7 +13,7 @@ *) (** * @group Virtual-Machine Management - *) +*) open Stdext open Client @@ -25,146 +25,146 @@ module D = Debug.Make(struct let name="xapi" end) open D let delete_disks rpc session_id disks = - List.iter (fun (vbd,vdi,on_error_delete) -> - if on_error_delete - then try Client.VDI.destroy rpc session_id vdi with _ -> () - else debug "Not destroying CD VDI: %s" (Ref.string_of vdi) - ) disks + List.iter (fun (vbd,vdi,on_error_delete) -> + if on_error_delete + then try Client.VDI.destroy rpc session_id vdi with _ -> () + else debug "Not destroying CD VDI: %s" (Ref.string_of vdi) + ) disks let wait_for_subtask ?progress_minmax ~__context task = - Helpers.call_api_functions ~__context (fun rpc session -> - let refresh_session = Xapi_session.consider_touching_session rpc session in - let main_task = Context.get_task_id __context in - - let cancel_task () = - (* Signal the VDI copy sub-task to cancel *) - Db_actions.DB_Action.Task.set_current_operations ~__context ~self:task ~value:[(Ref.string_of main_task, `cancel)]; - in - - (* Listen for status and progress events on the task *) - let finished = ref false in - let process_copy_task task_rec = - (* Update progress *) - let myprogress = may (fun (min, max) -> min +. (max -. min) *. task_rec.API.task_progress) progress_minmax in - maybe (fun value -> Db_actions.DB_Action.Task.set_progress ~__context ~self:main_task ~value) myprogress; - - (* See if it has finished *) - match task_rec.API.task_status with - | `success -> finished := true - | `cancelled -> begin - let task_id = Db.Task.get_by_uuid ~__context ~uuid:task_rec.API.task_uuid in - raise Api_errors.(Server_error (task_cancelled,[Ref.string_of task_id])) - end - | `failure -> - begin match task_rec.API.task_error_info with - | code :: params -> raise (Api_errors.Server_error(code, params)) - | _ -> failwith "xapi_vm_clone: task_info has no error_info" - end - | _ -> () - in - - (* Listen for the over-arching task being cancelled *) - let process_main_task task_rec = - let current_ops = task_rec.API.task_current_operations in - if List.exists (fun (_,x) -> x = `cancel) current_ops then cancel_task() - in - - (* Check for the initial state before entering the event-wait loop - in case the task has already finished *) - process_copy_task (Client.Task.get_record rpc session task); - process_main_task (Client.Task.get_record rpc session main_task); - - let token = ref "" in - - (* Watch for events relating to the VDI copy sub-task and the over-arching task *) - while not !finished do - let events = Client.Event.from rpc session - [Printf.sprintf "task/%s" (Ref.string_of task); - Printf.sprintf "task/%s" (Ref.string_of main_task)] - !token 30. |> Event_types.event_from_of_rpc in - token := events.token; - refresh_session (); - let checkevent ev = - match Event_helper.record_of_event ev with - | Event_helper.Task (r, Some x) -> - if r=task then process_copy_task x - else if r=main_task then process_main_task x - | _ -> () (* received an irrelevant event *) - in - List.iter checkevent events.events - done; - debug "Finished listening for events relating to tasks %s and %s" (Ref.string_of task) (Ref.string_of main_task); - - Db_actions.DB_Action.Task.get_result ~__context ~self:task) + Helpers.call_api_functions ~__context (fun rpc session -> + let refresh_session = Xapi_session.consider_touching_session rpc session in + let main_task = Context.get_task_id __context in + + let cancel_task () = + (* Signal the VDI copy sub-task to cancel *) + Db_actions.DB_Action.Task.set_current_operations ~__context ~self:task ~value:[(Ref.string_of main_task, `cancel)]; + in + + (* Listen for status and progress events on the task *) + let finished = ref false in + let process_copy_task task_rec = + (* Update progress *) + let myprogress = may (fun (min, max) -> min +. (max -. min) *. task_rec.API.task_progress) progress_minmax in + maybe (fun value -> Db_actions.DB_Action.Task.set_progress ~__context ~self:main_task ~value) myprogress; + + (* See if it has finished *) + match task_rec.API.task_status with + | `success -> finished := true + | `cancelled -> begin + let task_id = Db.Task.get_by_uuid ~__context ~uuid:task_rec.API.task_uuid in + raise Api_errors.(Server_error (task_cancelled,[Ref.string_of task_id])) + end + | `failure -> + begin match task_rec.API.task_error_info with + | code :: params -> raise (Api_errors.Server_error(code, params)) + | _ -> failwith "xapi_vm_clone: task_info has no error_info" + end + | _ -> () + in + + (* Listen for the over-arching task being cancelled *) + let process_main_task task_rec = + let current_ops = task_rec.API.task_current_operations in + if List.exists (fun (_,x) -> x = `cancel) current_ops then cancel_task() + in + + (* Check for the initial state before entering the event-wait loop + in case the task has already finished *) + process_copy_task (Client.Task.get_record rpc session task); + process_main_task (Client.Task.get_record rpc session main_task); + + let token = ref "" in + + (* Watch for events relating to the VDI copy sub-task and the over-arching task *) + while not !finished do + let events = Client.Event.from rpc session + [Printf.sprintf "task/%s" (Ref.string_of task); + Printf.sprintf "task/%s" (Ref.string_of main_task)] + !token 30. |> Event_types.event_from_of_rpc in + token := events.token; + refresh_session (); + let checkevent ev = + match Event_helper.record_of_event ev with + | Event_helper.Task (r, Some x) -> + if r=task then process_copy_task x + else if r=main_task then process_main_task x + | _ -> () (* received an irrelevant event *) + in + List.iter checkevent events.events + done; + debug "Finished listening for events relating to tasks %s and %s" (Ref.string_of task) (Ref.string_of main_task); + + Db_actions.DB_Action.Task.get_result ~__context ~self:task) let wait_for_clone ?progress_minmax ~__context task = - let result = wait_for_subtask ?progress_minmax ~__context task in - let result = Xml.parse_string result in - let vdiref = API.Legacy.From.ref_VDI "" result in - vdiref + let result = wait_for_subtask ?progress_minmax ~__context task in + let result = Xml.parse_string result in + let vdiref = API.Legacy.From.ref_VDI "" result in + vdiref (* Clone code is parameterised over this so it can be shared with copy *) type disk_op_t = - | Disk_op_clone - | Disk_op_copy of API.ref_SR option - | Disk_op_snapshot - | Disk_op_checkpoint - -let clone_single_vdi ?(progress) rpc session_id disk_op ~__context vdi driver_params = - let task = - match disk_op with - | Disk_op_clone -> - Client.Async.VDI.clone rpc session_id vdi driver_params - | Disk_op_copy None -> - let sr = Client.VDI.get_SR rpc session_id vdi in - Client.Async.VDI.copy rpc session_id vdi sr Ref.null Ref.null - | Disk_op_copy (Some other_sr) -> - Client.Async.VDI.copy rpc session_id vdi other_sr Ref.null Ref.null - | Disk_op_snapshot | Disk_op_checkpoint -> - Client.Async.VDI.snapshot rpc session_id vdi driver_params - in - (* This particular clone takes overall progress from startprogress to endprogress *) - let progress_minmax = may - (fun (done_so_far, size, total) -> - let startprogress = (Int64.to_float done_so_far) /. total in - let endprogress = (Int64.to_float (Int64.add done_so_far size)) /. total in - startprogress, endprogress) progress in - let vdi_ref = wait_for_clone ?progress_minmax ~__context task in - Client.Task.destroy rpc session_id task; - vdi_ref + | Disk_op_clone + | Disk_op_copy of API.ref_SR option + | Disk_op_snapshot + | Disk_op_checkpoint + +let clone_single_vdi ?(progress) rpc session_id disk_op ~__context vdi driver_params = + let task = + match disk_op with + | Disk_op_clone -> + Client.Async.VDI.clone rpc session_id vdi driver_params + | Disk_op_copy None -> + let sr = Client.VDI.get_SR rpc session_id vdi in + Client.Async.VDI.copy rpc session_id vdi sr Ref.null Ref.null + | Disk_op_copy (Some other_sr) -> + Client.Async.VDI.copy rpc session_id vdi other_sr Ref.null Ref.null + | Disk_op_snapshot | Disk_op_checkpoint -> + Client.Async.VDI.snapshot rpc session_id vdi driver_params + in + (* This particular clone takes overall progress from startprogress to endprogress *) + let progress_minmax = may + (fun (done_so_far, size, total) -> + let startprogress = (Int64.to_float done_so_far) /. total in + let endprogress = (Int64.to_float (Int64.add done_so_far size)) /. total in + startprogress, endprogress) progress in + let vdi_ref = wait_for_clone ?progress_minmax ~__context task in + Client.Task.destroy rpc session_id task; + vdi_ref (* Clone a list of disks, if any error occurs then delete all the ones we've * got. Reverse the list at the end, so that the disks are returned in the * same order as the [vbds] parameter. *) let safe_clone_disks rpc session_id disk_op ~__context vbds driver_params = - (* Find the sizes of the disks, and the total size in order to do progress *) - let sizes = List.map - (fun vbd -> try (vbd,Db.VDI.get_virtual_size ~__context - ~self:(Db.VBD.get_VDI ~__context ~self:vbd)) with _ -> (vbd,0L)) vbds in - let total = Int64.to_float (List.fold_left (fun tot (_,size) -> Int64.add tot size) 0L sizes) in - - let fold_function (acc,done_so_far) (vbd,size) = - try - TaskHelper.exn_if_cancelling ~__context; - let vbd_r = Client.VBD.get_record rpc session_id vbd in - (* If the VBD is empty there is no VDI to copy. *) - (* If the VBD is a CD then eject it (we cannot make copies of ISOs: they're identified *) - (* by their filename unlike other VDIs) *) - let newvdi, on_error_delete = - if vbd_r.API.vBD_empty - then Ref.null, false - else if vbd_r.API.vBD_type = `CD - then vbd_r.API.vBD_VDI, false (* don't delete the original CD *) - else clone_single_vdi ~progress:(done_so_far, size, total) rpc session_id disk_op ~__context vbd_r.API.vBD_VDI driver_params, true (* do delete newly created VDI *) - in - ((vbd,newvdi,on_error_delete)::acc, (Int64.add done_so_far size)) - with e -> - debug "Error in safe_clone_disks: %s" (Printexc.to_string e); - delete_disks rpc session_id acc; (* Delete those cloned so far *) - raise e - in - List.rev (fst (List.fold_left fold_function ([],0L) sizes)) + (* Find the sizes of the disks, and the total size in order to do progress *) + let sizes = List.map + (fun vbd -> try (vbd,Db.VDI.get_virtual_size ~__context + ~self:(Db.VBD.get_VDI ~__context ~self:vbd)) with _ -> (vbd,0L)) vbds in + let total = Int64.to_float (List.fold_left (fun tot (_,size) -> Int64.add tot size) 0L sizes) in + + let fold_function (acc,done_so_far) (vbd,size) = + try + TaskHelper.exn_if_cancelling ~__context; + let vbd_r = Client.VBD.get_record rpc session_id vbd in + (* If the VBD is empty there is no VDI to copy. *) + (* If the VBD is a CD then eject it (we cannot make copies of ISOs: they're identified *) + (* by their filename unlike other VDIs) *) + let newvdi, on_error_delete = + if vbd_r.API.vBD_empty + then Ref.null, false + else if vbd_r.API.vBD_type = `CD + then vbd_r.API.vBD_VDI, false (* don't delete the original CD *) + else clone_single_vdi ~progress:(done_so_far, size, total) rpc session_id disk_op ~__context vbd_r.API.vBD_VDI driver_params, true (* do delete newly created VDI *) + in + ((vbd,newvdi,on_error_delete)::acc, (Int64.add done_so_far size)) + with e -> + debug "Error in safe_clone_disks: %s" (Printexc.to_string e); + delete_disks rpc session_id acc; (* Delete those cloned so far *) + raise e + in + List.rev (fst (List.fold_left fold_function ([],0L) sizes)) let power_state_at_snapshot = "power-state-at-snapshot" let disk_snapshot_type = "disk-snapshot-type" @@ -172,279 +172,279 @@ let crash_consistent = "crash_consistent" let quiesced = "quiesced" let snapshot_info ~power_state ~is_a_snapshot = - let power_state_info = [power_state_at_snapshot, Record_util.power_state_to_string power_state] in - if is_a_snapshot then - (disk_snapshot_type, crash_consistent) :: power_state_info - else - [] + let power_state_info = [power_state_at_snapshot, Record_util.power_state_to_string power_state] in + if is_a_snapshot then + (disk_snapshot_type, crash_consistent) :: power_state_info + else + [] let snapshot_metadata ~__context ~vm ~is_a_snapshot = - if is_a_snapshot then - Helpers.vm_to_string __context vm - else - "" + if is_a_snapshot then + Helpers.vm_to_string __context vm + else + "" (* return a new VM record, in appropriate power state and having the good metrics. *) (* N.B. always check VM.has_vendor_device and Features.PCI_device_for_auto_update before calling this, * as is done before the single existing call to this function. * If ever we need to expose this function in the .mli file then we should do the check in the function. *) let copy_vm_record ?(snapshot_info_record) ~__context ~vm ~disk_op ~new_name ~new_power_state = - let all = Db.VM.get_record_internal ~__context ~self:vm in - let is_a_snapshot = disk_op = Disk_op_snapshot || disk_op = Disk_op_checkpoint in - let task_id = Ref.string_of (Context.get_task_id __context) in - let uuid = Uuid.make_uuid () in - let ref = Ref.make () in - let power_state = Db.VM.get_power_state ~__context ~self:vm in - let current_op = - match disk_op with - | Disk_op_clone -> `clone - | Disk_op_copy _-> `copy - | Disk_op_snapshot -> `snapshot - | Disk_op_checkpoint -> `checkpoint - in - (* replace VM mac seed on clone *) - let rec replace_seed l = - match l with - | [] -> [] - | (x,y)::xs -> - if x=Xapi_globs.mac_seed - then (x,Uuid.to_string (Uuid.make_uuid()))::xs - else (x,y)::(replace_seed xs) - in - (* rewrite mac_seed in other_config *) - let other_config = all.Db_actions.vM_other_config in - let other_config = - if is_a_snapshot - then other_config - else if (List.mem_assoc Xapi_globs.mac_seed other_config) - then replace_seed other_config - else (Xapi_globs.mac_seed, Uuid.to_string (Uuid.make_uuid()))::other_config - in - (* remove "default_template" and "xensource_internal" from other_config if it's there *) - let other_config = - List.filter - (fun (k,v) -> k <> Xapi_globs.default_template_key && k <> Xapi_globs.xensource_internal) - other_config - in - (* Preserve the name_label of the base template in other_config. *) - let other_config = - if all.Db_actions.vM_is_a_template && not(List.mem_assoc Xapi_globs.base_template_name_key other_config) - then (Xapi_globs.base_template_name_key, all.Db_actions.vM_name_label) :: other_config - else other_config - in - (* Copy the old metrics if available, otherwise generate a fresh one *) - let m = - if Db.is_valid_ref __context all.Db_actions.vM_metrics - then Some (Db.VM_metrics.get_record_internal ~__context ~self:all.Db_actions.vM_metrics) - else None - in - let metrics = Ref.make () - and metrics_uuid = Uuid.to_string (Uuid.make_uuid ()) in - Db.VM_metrics.create ~__context - ~ref:metrics - ~uuid:metrics_uuid - ~memory_actual:(default 0L (may (fun x -> x.Db_actions.vM_metrics_memory_actual) m)) - ~vCPUs_number:(default 0L (may (fun x -> x.Db_actions.vM_metrics_VCPUs_number) m)) - ~vCPUs_utilisation:(default [(0L, 0.)] (may (fun x -> x.Db_actions.vM_metrics_VCPUs_utilisation) m)) - ~vCPUs_CPU:(default [] (may (fun x -> x.Db_actions.vM_metrics_VCPUs_CPU) m)) - ~vCPUs_params:(default [] (may (fun x -> x.Db_actions.vM_metrics_VCPUs_params) m)) - ~vCPUs_flags:(default [] (may (fun x -> x.Db_actions.vM_metrics_VCPUs_flags) m)) - ~start_time:(default Date.never (may (fun x -> x.Db_actions.vM_metrics_start_time) m)) - ~install_time:(default Date.never (may (fun x -> x.Db_actions.vM_metrics_install_time) m)) - ~state:(default [] (may (fun x -> x.Db_actions.vM_metrics_state) m)) - ~last_updated:(default Date.never (may (fun x -> x.Db_actions.vM_metrics_last_updated) m)) - ~other_config:(default [] (may (fun x -> x.Db_actions.vM_metrics_other_config) m)) - ~nomigrate:(default false (may (fun x -> x.Db_actions.vM_metrics_nomigrate) m)) - ~hvm:(default false (may (fun x -> x.Db_actions.vM_metrics_hvm) m)) - ~nested_virt:(default false (may (fun x -> x.Db_actions.vM_metrics_nested_virt) m)) - ; - - let guest_metrics = Xapi_vm_helpers.copy_guest_metrics ~__context ~vm in - - (* compute the parent VM *) - let parent = - match disk_op with - (* CA-52668: clone or copy result in new top-level VMs *) - | Disk_op_clone | Disk_op_copy _-> Ref.null - | Disk_op_snapshot | Disk_op_checkpoint -> all.Db_actions.vM_parent in - - (* We always reset an existing generation ID on VM.clone *) - let generation_id = Xapi_vm_helpers.fresh_genid - ~current_genid:all.Db_actions.vM_generation_id () in - - (* create a new VM *) - Db.VM.create ~__context - ~ref - ~uuid:(Uuid.to_string uuid) - ~power_state:new_power_state - ~allowed_operations:[] - ~blocked_operations:[] - ~name_label:new_name - ~current_operations:[ task_id, current_op ] - ~name_description:all.Db_actions.vM_name_description - ~user_version:all.Db_actions.vM_user_version - ~is_a_template: (is_a_snapshot || all.Db_actions.vM_is_a_template) - ~is_a_snapshot: is_a_snapshot - ~snapshot_of:(if is_a_snapshot then vm else Ref.null) - ~snapshot_time:(if is_a_snapshot then Date.of_float (Unix.gettimeofday ()) else Date.never) - ~snapshot_info:(match snapshot_info_record with - None -> (snapshot_info ~power_state ~is_a_snapshot) - | Some s -> s) - ~snapshot_metadata:(snapshot_metadata ~__context ~vm ~is_a_snapshot) - ~transportable_snapshot_id:"" - ~parent - ~resident_on:Ref.null - ~scheduled_to_be_resident_on:Ref.null - ~affinity:all.Db_actions.vM_affinity - ~memory_overhead:all.Db_actions.vM_memory_overhead - ~memory_target:all.Db_actions.vM_memory_target - ~memory_static_max:all.Db_actions.vM_memory_static_max - ~memory_dynamic_max:all.Db_actions.vM_memory_dynamic_max - ~memory_dynamic_min:all.Db_actions.vM_memory_dynamic_min - ~memory_static_min:all.Db_actions.vM_memory_static_min - ~vCPUs_max:all.Db_actions.vM_VCPUs_max - ~vCPUs_at_startup:all.Db_actions.vM_VCPUs_at_startup - ~vCPUs_params:all.Db_actions.vM_VCPUs_params - ~actions_after_shutdown:all.Db_actions.vM_actions_after_shutdown - ~actions_after_reboot:all.Db_actions.vM_actions_after_reboot - ~actions_after_crash:all.Db_actions.vM_actions_after_crash - ~hVM_boot_policy:all.Db_actions.vM_HVM_boot_policy - ~hVM_boot_params:all.Db_actions.vM_HVM_boot_params - ~hVM_shadow_multiplier:all.Db_actions.vM_HVM_shadow_multiplier - ~suspend_VDI:Ref.null - ~platform:all.Db_actions.vM_platform - ~pV_kernel:all.Db_actions.vM_PV_kernel - ~pV_ramdisk:all.Db_actions.vM_PV_ramdisk - ~pV_args:all.Db_actions.vM_PV_args - ~pV_bootloader:all.Db_actions.vM_PV_bootloader - ~pV_bootloader_args:all.Db_actions.vM_PV_bootloader_args - ~pV_legacy_args:all.Db_actions.vM_PV_legacy_args - ~pCI_bus:all.Db_actions.vM_PCI_bus - ~other_config - ~domid:(-1L) - ~domarch:"" - ~last_boot_CPU_flags:all.Db_actions.vM_last_boot_CPU_flags - ~is_control_domain:all.Db_actions.vM_is_control_domain - ~metrics - ~blobs:[] - ~guest_metrics:guest_metrics - ~last_booted_record:all.Db_actions.vM_last_booted_record - ~recommendations:all.Db_actions.vM_recommendations - ~xenstore_data:all.Db_actions.vM_xenstore_data - ~ha_restart_priority:all.Db_actions.vM_ha_restart_priority - ~ha_always_run:false - ~tags:all.Db_actions.vM_tags - ~bios_strings:all.Db_actions.vM_bios_strings - ~protection_policy:Ref.null - ~is_snapshot_from_vmpp:false - ~appliance:Ref.null - ~start_delay:0L - ~shutdown_delay:0L - ~order:0L - ~suspend_SR:Ref.null - ~version:0L - ~generation_id - ~hardware_platform_version:all.Db_actions.vM_hardware_platform_version - ~has_vendor_device:all.Db_actions.vM_has_vendor_device - ~requires_reboot:false - ; - - (* update the VM's parent field in case of snapshot. Note this must be done after "ref" - has been created, so that its "children" field can be updated by the database layer *) - begin match disk_op with - | Disk_op_clone | Disk_op_copy _-> () - | Disk_op_snapshot | Disk_op_checkpoint -> Db.VM.set_parent ~__context ~self:vm ~value:ref - end; - - ref, uuid + let all = Db.VM.get_record_internal ~__context ~self:vm in + let is_a_snapshot = disk_op = Disk_op_snapshot || disk_op = Disk_op_checkpoint in + let task_id = Ref.string_of (Context.get_task_id __context) in + let uuid = Uuid.make_uuid () in + let ref = Ref.make () in + let power_state = Db.VM.get_power_state ~__context ~self:vm in + let current_op = + match disk_op with + | Disk_op_clone -> `clone + | Disk_op_copy _-> `copy + | Disk_op_snapshot -> `snapshot + | Disk_op_checkpoint -> `checkpoint + in + (* replace VM mac seed on clone *) + let rec replace_seed l = + match l with + | [] -> [] + | (x,y)::xs -> + if x=Xapi_globs.mac_seed + then (x,Uuid.to_string (Uuid.make_uuid()))::xs + else (x,y)::(replace_seed xs) + in + (* rewrite mac_seed in other_config *) + let other_config = all.Db_actions.vM_other_config in + let other_config = + if is_a_snapshot + then other_config + else if (List.mem_assoc Xapi_globs.mac_seed other_config) + then replace_seed other_config + else (Xapi_globs.mac_seed, Uuid.to_string (Uuid.make_uuid()))::other_config + in + (* remove "default_template" and "xensource_internal" from other_config if it's there *) + let other_config = + List.filter + (fun (k,v) -> k <> Xapi_globs.default_template_key && k <> Xapi_globs.xensource_internal) + other_config + in + (* Preserve the name_label of the base template in other_config. *) + let other_config = + if all.Db_actions.vM_is_a_template && not(List.mem_assoc Xapi_globs.base_template_name_key other_config) + then (Xapi_globs.base_template_name_key, all.Db_actions.vM_name_label) :: other_config + else other_config + in + (* Copy the old metrics if available, otherwise generate a fresh one *) + let m = + if Db.is_valid_ref __context all.Db_actions.vM_metrics + then Some (Db.VM_metrics.get_record_internal ~__context ~self:all.Db_actions.vM_metrics) + else None + in + let metrics = Ref.make () + and metrics_uuid = Uuid.to_string (Uuid.make_uuid ()) in + Db.VM_metrics.create ~__context + ~ref:metrics + ~uuid:metrics_uuid + ~memory_actual:(default 0L (may (fun x -> x.Db_actions.vM_metrics_memory_actual) m)) + ~vCPUs_number:(default 0L (may (fun x -> x.Db_actions.vM_metrics_VCPUs_number) m)) + ~vCPUs_utilisation:(default [(0L, 0.)] (may (fun x -> x.Db_actions.vM_metrics_VCPUs_utilisation) m)) + ~vCPUs_CPU:(default [] (may (fun x -> x.Db_actions.vM_metrics_VCPUs_CPU) m)) + ~vCPUs_params:(default [] (may (fun x -> x.Db_actions.vM_metrics_VCPUs_params) m)) + ~vCPUs_flags:(default [] (may (fun x -> x.Db_actions.vM_metrics_VCPUs_flags) m)) + ~start_time:(default Date.never (may (fun x -> x.Db_actions.vM_metrics_start_time) m)) + ~install_time:(default Date.never (may (fun x -> x.Db_actions.vM_metrics_install_time) m)) + ~state:(default [] (may (fun x -> x.Db_actions.vM_metrics_state) m)) + ~last_updated:(default Date.never (may (fun x -> x.Db_actions.vM_metrics_last_updated) m)) + ~other_config:(default [] (may (fun x -> x.Db_actions.vM_metrics_other_config) m)) + ~nomigrate:(default false (may (fun x -> x.Db_actions.vM_metrics_nomigrate) m)) + ~hvm:(default false (may (fun x -> x.Db_actions.vM_metrics_hvm) m)) + ~nested_virt:(default false (may (fun x -> x.Db_actions.vM_metrics_nested_virt) m)) + ; + + let guest_metrics = Xapi_vm_helpers.copy_guest_metrics ~__context ~vm in + + (* compute the parent VM *) + let parent = + match disk_op with + (* CA-52668: clone or copy result in new top-level VMs *) + | Disk_op_clone | Disk_op_copy _-> Ref.null + | Disk_op_snapshot | Disk_op_checkpoint -> all.Db_actions.vM_parent in + + (* We always reset an existing generation ID on VM.clone *) + let generation_id = Xapi_vm_helpers.fresh_genid + ~current_genid:all.Db_actions.vM_generation_id () in + + (* create a new VM *) + Db.VM.create ~__context + ~ref + ~uuid:(Uuid.to_string uuid) + ~power_state:new_power_state + ~allowed_operations:[] + ~blocked_operations:[] + ~name_label:new_name + ~current_operations:[ task_id, current_op ] + ~name_description:all.Db_actions.vM_name_description + ~user_version:all.Db_actions.vM_user_version + ~is_a_template: (is_a_snapshot || all.Db_actions.vM_is_a_template) + ~is_a_snapshot: is_a_snapshot + ~snapshot_of:(if is_a_snapshot then vm else Ref.null) + ~snapshot_time:(if is_a_snapshot then Date.of_float (Unix.gettimeofday ()) else Date.never) + ~snapshot_info:(match snapshot_info_record with + None -> (snapshot_info ~power_state ~is_a_snapshot) + | Some s -> s) + ~snapshot_metadata:(snapshot_metadata ~__context ~vm ~is_a_snapshot) + ~transportable_snapshot_id:"" + ~parent + ~resident_on:Ref.null + ~scheduled_to_be_resident_on:Ref.null + ~affinity:all.Db_actions.vM_affinity + ~memory_overhead:all.Db_actions.vM_memory_overhead + ~memory_target:all.Db_actions.vM_memory_target + ~memory_static_max:all.Db_actions.vM_memory_static_max + ~memory_dynamic_max:all.Db_actions.vM_memory_dynamic_max + ~memory_dynamic_min:all.Db_actions.vM_memory_dynamic_min + ~memory_static_min:all.Db_actions.vM_memory_static_min + ~vCPUs_max:all.Db_actions.vM_VCPUs_max + ~vCPUs_at_startup:all.Db_actions.vM_VCPUs_at_startup + ~vCPUs_params:all.Db_actions.vM_VCPUs_params + ~actions_after_shutdown:all.Db_actions.vM_actions_after_shutdown + ~actions_after_reboot:all.Db_actions.vM_actions_after_reboot + ~actions_after_crash:all.Db_actions.vM_actions_after_crash + ~hVM_boot_policy:all.Db_actions.vM_HVM_boot_policy + ~hVM_boot_params:all.Db_actions.vM_HVM_boot_params + ~hVM_shadow_multiplier:all.Db_actions.vM_HVM_shadow_multiplier + ~suspend_VDI:Ref.null + ~platform:all.Db_actions.vM_platform + ~pV_kernel:all.Db_actions.vM_PV_kernel + ~pV_ramdisk:all.Db_actions.vM_PV_ramdisk + ~pV_args:all.Db_actions.vM_PV_args + ~pV_bootloader:all.Db_actions.vM_PV_bootloader + ~pV_bootloader_args:all.Db_actions.vM_PV_bootloader_args + ~pV_legacy_args:all.Db_actions.vM_PV_legacy_args + ~pCI_bus:all.Db_actions.vM_PCI_bus + ~other_config + ~domid:(-1L) + ~domarch:"" + ~last_boot_CPU_flags:all.Db_actions.vM_last_boot_CPU_flags + ~is_control_domain:all.Db_actions.vM_is_control_domain + ~metrics + ~blobs:[] + ~guest_metrics:guest_metrics + ~last_booted_record:all.Db_actions.vM_last_booted_record + ~recommendations:all.Db_actions.vM_recommendations + ~xenstore_data:all.Db_actions.vM_xenstore_data + ~ha_restart_priority:all.Db_actions.vM_ha_restart_priority + ~ha_always_run:false + ~tags:all.Db_actions.vM_tags + ~bios_strings:all.Db_actions.vM_bios_strings + ~protection_policy:Ref.null + ~is_snapshot_from_vmpp:false + ~appliance:Ref.null + ~start_delay:0L + ~shutdown_delay:0L + ~order:0L + ~suspend_SR:Ref.null + ~version:0L + ~generation_id + ~hardware_platform_version:all.Db_actions.vM_hardware_platform_version + ~has_vendor_device:all.Db_actions.vM_has_vendor_device + ~requires_reboot:false + ; + + (* update the VM's parent field in case of snapshot. Note this must be done after "ref" + has been created, so that its "children" field can be updated by the database layer *) + begin match disk_op with + | Disk_op_clone | Disk_op_copy _-> () + | Disk_op_snapshot | Disk_op_checkpoint -> Db.VM.set_parent ~__context ~self:vm ~value:ref + end; + + ref, uuid (* epoch hint for netapp backend *) let make_driver_params () = - [Xapi_globs._sm_epoch_hint, Uuid.to_string (Uuid.make_uuid())] + [Xapi_globs._sm_epoch_hint, Uuid.to_string (Uuid.make_uuid())] (* NB this function may be called when the VM is suspended for copy/clone operations. Snapshot can be done in live.*) let clone ?(snapshot_info_record) disk_op ~__context ~vm ~new_name = - Helpers.call_api_functions ~__context (fun rpc session_id -> - let task_id = Ref.string_of (Context.get_task_id __context) in - let vbds = Db.VM.get_VBDs ~__context ~self:vm in - let vifs = Db.VM.get_VIFs ~__context ~self:vm in - let vgpus = Db.VM.get_VGPUs ~__context ~self:vm in - let power_state = Db.VM.get_power_state ~__context ~self:vm in - - (* if we do a snaphshot on a VM, then the new VM must remain halted. *) - (* Otherwise, we keep the same power-state as the initial VM *) - let new_power_state = - match disk_op, power_state with - | Disk_op_checkpoint, (`Running | `Suspended) -> `Suspended - | (Disk_op_clone|Disk_op_copy _), `Suspended -> `Suspended - | _ -> `Halted - in - - let is_a_snapshot = disk_op = Disk_op_snapshot || disk_op = Disk_op_checkpoint in - - (* Check licence permission before copying disks, since the copy can take a long time. - * We always allow snapshotting a VM, but check before clone/copy of an existing snapshot or template. *) - if (Db.VM.get_has_vendor_device ~__context ~self:vm && not is_a_snapshot) then - Pool_features.assert_enabled ~__context ~f:Features.PCI_device_for_auto_update; - - (* driver params to be passed to storage backend clone operations. *) - let driver_params = make_driver_params () in - - (* backend cloning operations first *) - let cloned_disks = safe_clone_disks rpc session_id disk_op ~__context vbds driver_params in - - begin try - - (* create the VM record *) - let ref, uuid = copy_vm_record ?snapshot_info_record ~__context ~vm ~disk_op ~new_name ~new_power_state in - - (* copy every VBD using the new VDI as backend *) - (* if this fails halfway through, delete the VM and the VDIs, but don't worry *) - (* about any VBDs left hanging around, as these will be GC'd later *) - begin try - - (* copy VBDs *) - List.iter (fun (vbd, newvdi, _) -> - let vbd = Xapi_vbd_helpers.copy ~__context ~vm:ref ~vdi:newvdi vbd in - (* CA-58405: when we make a clone/snapshot/checkpoint we consider the clone/snapshot/checkpoint VM - to "own" all the clone/snapshot/checkpoint *disks* irrespective of the ownership of the original - disks. We wish the clone/snapshot/checkpoint disks to be cleaned up with the VM. *) - if Db.VBD.get_type ~__context ~self:vbd = `Disk then begin - let other_config = Db.VBD.get_other_config ~__context ~self:vbd in - if not(List.mem_assoc Xapi_globs.owner_key other_config) - then Db.VBD.add_to_other_config ~__context ~self:vbd ~key:Xapi_globs.owner_key ~value:""; - end - ) cloned_disks; - (* copy VIFs *) - let (_ : [`VIF] Ref.t list) = - List.map (fun vif -> Xapi_vif_helpers.copy ~__context ~vm:ref ~preserve_mac_address:is_a_snapshot vif) vifs in - (* copy VGPUs *) - let (_ : [`VGPU] Ref.t list) = - List.map (fun vgpu -> Xapi_vgpu.copy ~__context ~vm:ref vgpu) vgpus in - - (* copy the suspended VDI if needed *) - let suspend_VDI = - Helpers.call_api_functions ~__context - (fun rpc session_id -> - let original = Db.VM.get_suspend_VDI ~__context ~self:vm in - if original = Ref.null || disk_op = Disk_op_snapshot - then Ref.null - else if disk_op = Disk_op_checkpoint && power_state = `Runnning - then original - else clone_single_vdi rpc session_id disk_op ~__context original driver_params) in - - Db.VM.set_suspend_VDI ~__context ~self:ref ~value:suspend_VDI; - Db.VM.remove_from_current_operations ~__context ~self:ref ~key:task_id; - Xapi_vm_lifecycle.force_state_reset ~__context ~self:ref ~value:new_power_state; - - ref - - with e -> - Db.VM.destroy ~__context ~self:ref; - raise e - end - - with e -> - delete_disks rpc session_id cloned_disks; - raise e - end) + Helpers.call_api_functions ~__context (fun rpc session_id -> + let task_id = Ref.string_of (Context.get_task_id __context) in + let vbds = Db.VM.get_VBDs ~__context ~self:vm in + let vifs = Db.VM.get_VIFs ~__context ~self:vm in + let vgpus = Db.VM.get_VGPUs ~__context ~self:vm in + let power_state = Db.VM.get_power_state ~__context ~self:vm in + + (* if we do a snaphshot on a VM, then the new VM must remain halted. *) + (* Otherwise, we keep the same power-state as the initial VM *) + let new_power_state = + match disk_op, power_state with + | Disk_op_checkpoint, (`Running | `Suspended) -> `Suspended + | (Disk_op_clone|Disk_op_copy _), `Suspended -> `Suspended + | _ -> `Halted + in + + let is_a_snapshot = disk_op = Disk_op_snapshot || disk_op = Disk_op_checkpoint in + + (* Check licence permission before copying disks, since the copy can take a long time. + * We always allow snapshotting a VM, but check before clone/copy of an existing snapshot or template. *) + if (Db.VM.get_has_vendor_device ~__context ~self:vm && not is_a_snapshot) then + Pool_features.assert_enabled ~__context ~f:Features.PCI_device_for_auto_update; + + (* driver params to be passed to storage backend clone operations. *) + let driver_params = make_driver_params () in + + (* backend cloning operations first *) + let cloned_disks = safe_clone_disks rpc session_id disk_op ~__context vbds driver_params in + + begin try + + (* create the VM record *) + let ref, uuid = copy_vm_record ?snapshot_info_record ~__context ~vm ~disk_op ~new_name ~new_power_state in + + (* copy every VBD using the new VDI as backend *) + (* if this fails halfway through, delete the VM and the VDIs, but don't worry *) + (* about any VBDs left hanging around, as these will be GC'd later *) + begin try + + (* copy VBDs *) + List.iter (fun (vbd, newvdi, _) -> + let vbd = Xapi_vbd_helpers.copy ~__context ~vm:ref ~vdi:newvdi vbd in + (* CA-58405: when we make a clone/snapshot/checkpoint we consider the clone/snapshot/checkpoint VM + to "own" all the clone/snapshot/checkpoint *disks* irrespective of the ownership of the original + disks. We wish the clone/snapshot/checkpoint disks to be cleaned up with the VM. *) + if Db.VBD.get_type ~__context ~self:vbd = `Disk then begin + let other_config = Db.VBD.get_other_config ~__context ~self:vbd in + if not(List.mem_assoc Xapi_globs.owner_key other_config) + then Db.VBD.add_to_other_config ~__context ~self:vbd ~key:Xapi_globs.owner_key ~value:""; + end + ) cloned_disks; + (* copy VIFs *) + let (_ : [`VIF] Ref.t list) = + List.map (fun vif -> Xapi_vif_helpers.copy ~__context ~vm:ref ~preserve_mac_address:is_a_snapshot vif) vifs in + (* copy VGPUs *) + let (_ : [`VGPU] Ref.t list) = + List.map (fun vgpu -> Xapi_vgpu.copy ~__context ~vm:ref vgpu) vgpus in + + (* copy the suspended VDI if needed *) + let suspend_VDI = + Helpers.call_api_functions ~__context + (fun rpc session_id -> + let original = Db.VM.get_suspend_VDI ~__context ~self:vm in + if original = Ref.null || disk_op = Disk_op_snapshot + then Ref.null + else if disk_op = Disk_op_checkpoint && power_state = `Runnning + then original + else clone_single_vdi rpc session_id disk_op ~__context original driver_params) in + + Db.VM.set_suspend_VDI ~__context ~self:ref ~value:suspend_VDI; + Db.VM.remove_from_current_operations ~__context ~self:ref ~key:task_id; + Xapi_vm_lifecycle.force_state_reset ~__context ~self:ref ~value:new_power_state; + + ref + + with e -> + Db.VM.destroy ~__context ~self:ref; + raise e + end + + with e -> + delete_disks rpc session_id cloned_disks; + raise e + end) diff --git a/ocaml/xapi/xapi_vm_clone.mli b/ocaml/xapi/xapi_vm_clone.mli index b962888604d..72d87efa8bf 100644 --- a/ocaml/xapi/xapi_vm_clone.mli +++ b/ocaml/xapi/xapi_vm_clone.mli @@ -14,54 +14,54 @@ (* Clone code is parameterised over this so it can be shared with copy *) type disk_op_t = - | Disk_op_clone - | Disk_op_copy of API.ref_SR option - | Disk_op_snapshot - | Disk_op_checkpoint + | Disk_op_clone + | Disk_op_copy of API.ref_SR option + | Disk_op_snapshot + | Disk_op_checkpoint val disk_snapshot_type: string val quiesced: string val snapshot_info: - power_state:[< `Halted - | `Migrating - | `Paused - | `Running - | `ShuttingDown - | `Suspended ] -> - is_a_snapshot:bool -> - (string * string) list + power_state:[< `Halted + | `Migrating + | `Paused + | `Running + | `ShuttingDown + | `Suspended ] -> + is_a_snapshot:bool -> + (string * string) list (* epoch hint for netapp backend *) val make_driver_params: - unit -> - (string * string) list + unit -> + (string * string) list (* Clone a list of disks, if any error occurs then delete all the ones we've * got. Reverse the list at the end, so that the disks are returned in the * same order as the [vbds] parameter. *) val safe_clone_disks: - (Rpc.call -> Rpc.response Client.Id.t) -> - 'a Ref.t -> - disk_op_t -> - __context:Context.t -> - [ `VBD ] API.Ref.t list -> - (string * string) list -> - ([ `VBD ] API.Ref.t * API.ref_VDI * bool) list + (Rpc.call -> Rpc.response Client.Id.t) -> + 'a Ref.t -> + disk_op_t -> + __context:Context.t -> + [ `VBD ] API.Ref.t list -> + (string * string) list -> + ([ `VBD ] API.Ref.t * API.ref_VDI * bool) list val clone_single_vdi: - ?progress:int64 * int64 * float -> - (Rpc.call -> Rpc.response Client.Id.t) -> - 'a Ref.t -> - disk_op_t -> - __context:Context.t -> - 'b Ref.t -> (string * string) list -> - API.ref_VDI + ?progress:int64 * int64 * float -> + (Rpc.call -> Rpc.response Client.Id.t) -> + 'a Ref.t -> + disk_op_t -> + __context:Context.t -> + 'b Ref.t -> (string * string) list -> + API.ref_VDI (* NB this function may be called when the VM is suspended for copy/clone operations. Snapshot can be done in live.*) val clone : - ?snapshot_info_record:(string * string) list -> - disk_op_t -> - __context:Context.t -> - vm:[ `VM ] API.Ref.t -> new_name:string -> - [ `VM ] Ref.t + ?snapshot_info_record:(string * string) list -> + disk_op_t -> + __context:Context.t -> + vm:[ `VM ] API.Ref.t -> new_name:string -> + [ `VM ] Ref.t diff --git a/ocaml/xapi/xapi_vm_helpers.ml b/ocaml/xapi/xapi_vm_helpers.ml index e0afe92350d..0091fc4f992 100644 --- a/ocaml/xapi/xapi_vm_helpers.ml +++ b/ocaml/xapi/xapi_vm_helpers.ml @@ -13,7 +13,7 @@ *) (** Common code between the fake and real servers for dealing with VMs. * @group Virtual-Machine Management - *) +*) open Stdext open Xstringext @@ -30,348 +30,348 @@ open Workload_balancing (* Convenience function. Not thread-safe. *) let db_set_in_other_config ~__context ~self ~key ~value = - if List.mem_assoc key (Db.VM.get_other_config ~__context ~self) then - Db.VM.remove_from_other_config ~__context ~self ~key; - Db.VM.add_to_other_config ~__context ~self ~key ~value + if List.mem_assoc key (Db.VM.get_other_config ~__context ~self) then + Db.VM.remove_from_other_config ~__context ~self ~key; + Db.VM.add_to_other_config ~__context ~self ~key ~value let compute_memory_overhead ~__context ~vm = - let snapshot = match Db.VM.get_power_state ~__context ~self:vm with - | `Paused | `Running | `Suspended -> Helpers.get_boot_record ~__context ~self:vm - | `Halted | _ -> Db.VM.get_record ~__context ~self:vm in - Memory_check.vm_compute_memory_overhead snapshot + let snapshot = match Db.VM.get_power_state ~__context ~self:vm with + | `Paused | `Running | `Suspended -> Helpers.get_boot_record ~__context ~self:vm + | `Halted | _ -> Db.VM.get_record ~__context ~self:vm in + Memory_check.vm_compute_memory_overhead snapshot let update_memory_overhead ~__context ~vm = Db.VM.set_memory_overhead ~__context ~self:vm ~value:(compute_memory_overhead ~__context ~vm) (* Overrides for database set functions: ************************************************) let set_actions_after_crash ~__context ~self ~value = - Db.VM.set_actions_after_crash ~__context ~self ~value + Db.VM.set_actions_after_crash ~__context ~self ~value let set_is_a_template ~__context ~self ~value = - (* We define a 'set_is_a_template false' as 'install time' *) - info "VM.set_is_a_template('%b')" value; - if (Db.VM.get_has_vendor_device ~__context ~self) - then Pool_features.assert_enabled ~__context ~f:Features.PCI_device_for_auto_update; - let m = Db.VM.get_metrics ~__context ~self in - if not value then begin - try Db.VM_metrics.set_install_time ~__context ~self:m ~value:(Date.of_float (Unix.gettimeofday ())) - with _ -> warn "Could not update VM install time because metrics object was missing" - end else begin - (* VM must be halted, or we couldn't have got this far. - * If we have a halted VM with ha_always_run = true, ha_restart_priority = "restart" - * and HA is enabled on the pool, then HA is about to restart the VM and we should - * block converting it into a template. - * - * This logic can't live in the allowed_operations code, or we'd have to update VM.allowed_operations - * across the pool when enabling or disabling HA. *) - let ha_enabled = Db.Pool.get_ha_enabled ~__context ~self:(Helpers.get_pool ~__context) in - if ha_enabled && (Helpers.is_xha_protected ~__context ~self) - then raise - (Api_errors.Server_error - (Api_errors.vm_is_protected, [Ref.string_of self])) - (* If the VM is not protected then we can convert the VM to a template, - * but we should clear the ha_always_run flag - * (which will be true if the VM has ha_restart_priority = "restart" and was shut down from inside). - * - * We don't want templates to have this flag, or HA will try to start them. *) - else Db.VM.set_ha_always_run ~__context ~self ~value:false; - (* delete the vm metrics associated with the vm if it exists, when we templat'ize it *) - try Db.VM_metrics.destroy ~__context ~self:m with _ -> () - end; - Db.VM.set_is_a_template ~__context ~self ~value + (* We define a 'set_is_a_template false' as 'install time' *) + info "VM.set_is_a_template('%b')" value; + if (Db.VM.get_has_vendor_device ~__context ~self) + then Pool_features.assert_enabled ~__context ~f:Features.PCI_device_for_auto_update; + let m = Db.VM.get_metrics ~__context ~self in + if not value then begin + try Db.VM_metrics.set_install_time ~__context ~self:m ~value:(Date.of_float (Unix.gettimeofday ())) + with _ -> warn "Could not update VM install time because metrics object was missing" + end else begin + (* VM must be halted, or we couldn't have got this far. + * If we have a halted VM with ha_always_run = true, ha_restart_priority = "restart" + * and HA is enabled on the pool, then HA is about to restart the VM and we should + * block converting it into a template. + * + * This logic can't live in the allowed_operations code, or we'd have to update VM.allowed_operations + * across the pool when enabling or disabling HA. *) + let ha_enabled = Db.Pool.get_ha_enabled ~__context ~self:(Helpers.get_pool ~__context) in + if ha_enabled && (Helpers.is_xha_protected ~__context ~self) + then raise + (Api_errors.Server_error + (Api_errors.vm_is_protected, [Ref.string_of self])) + (* If the VM is not protected then we can convert the VM to a template, + * but we should clear the ha_always_run flag + * (which will be true if the VM has ha_restart_priority = "restart" and was shut down from inside). + * + * We don't want templates to have this flag, or HA will try to start them. *) + else Db.VM.set_ha_always_run ~__context ~self ~value:false; + (* delete the vm metrics associated with the vm if it exists, when we templat'ize it *) + try Db.VM_metrics.destroy ~__context ~self:m with _ -> () + end; + Db.VM.set_is_a_template ~__context ~self ~value let update_vm_virtual_hardware_platform_version ~__context ~vm = - let vm_record = Db.VM.get_record ~__context ~self:vm in - (* Deduce what we can, but the guest VM might need a higher version. *) - let visibly_required_version = - if vm_record.API.vM_has_vendor_device then - Xapi_globs.has_vendor_device - else - 0L - in - let current_version = vm_record.API.vM_hardware_platform_version in - if visibly_required_version > current_version then - Db.VM.set_hardware_platform_version ~__context ~self:vm ~value:visibly_required_version + let vm_record = Db.VM.get_record ~__context ~self:vm in + (* Deduce what we can, but the guest VM might need a higher version. *) + let visibly_required_version = + if vm_record.API.vM_has_vendor_device then + Xapi_globs.has_vendor_device + else + 0L + in + let current_version = vm_record.API.vM_hardware_platform_version in + if visibly_required_version > current_version then + Db.VM.set_hardware_platform_version ~__context ~self:vm ~value:visibly_required_version let create_from_record_without_checking_licence_feature_for_vendor_device ~__context rpc session_id vm_record = - let mk_vm r = Client.Client.VM.create_from_record rpc session_id r in - let has_vendor_device = vm_record.API.vM_has_vendor_device in - if has_vendor_device && not (Pool_features.is_enabled ~__context Features.PCI_device_for_auto_update) - then ( - (* Avoid the licence feature check which is enforced in VM.create (and create_from_record). *) - let vm = mk_vm {vm_record with API.vM_has_vendor_device = false} in - Db.VM.set_has_vendor_device ~__context ~self:vm ~value:true; - update_vm_virtual_hardware_platform_version ~__context ~vm; - vm - ) else mk_vm vm_record + let mk_vm r = Client.Client.VM.create_from_record rpc session_id r in + let has_vendor_device = vm_record.API.vM_has_vendor_device in + if has_vendor_device && not (Pool_features.is_enabled ~__context Features.PCI_device_for_auto_update) + then ( + (* Avoid the licence feature check which is enforced in VM.create (and create_from_record). *) + let vm = mk_vm {vm_record with API.vM_has_vendor_device = false} in + Db.VM.set_has_vendor_device ~__context ~self:vm ~value:true; + update_vm_virtual_hardware_platform_version ~__context ~vm; + vm + ) else mk_vm vm_record let destroy ~__context ~self = - (* Used to be a call to hard shutdown here, but this will be redundant *) - (* given the call to 'assert_operation_valid' *) - debug "VM.destroy: deleting DB records"; - - (* Should we be destroying blobs? It's possible to create a blob and then - add its reference to multiple objects. Perhaps we want to just leave the - blob? Or only delete it if there is no other reference to it? Is that - even possible to know? *) - let blobs = Db.VM.get_blobs ~__context ~self in - List.iter (fun (_,self) -> try Xapi_blob.destroy ~__context ~self with _ -> ()) blobs; - - let other_config = Db.VM.get_other_config ~__context ~self in - if ((List.mem_assoc Xapi_globs.default_template_key other_config) && - (List.assoc Xapi_globs.default_template_key other_config)="true") then - raise (Api_errors.Server_error (Api_errors.vm_cannot_delete_default_template, [])); - let appliance = Db.VM.get_appliance ~__context ~self in - if Db.is_valid_ref __context appliance then begin - Db.VM.set_appliance ~__context ~self ~value:Ref.null; - Xapi_vm_appliance_lifecycle.update_allowed_operations ~__context ~self:appliance - end; - let vbds = Db.VM.get_VBDs ~__context ~self in - List.iter (fun vbd -> - (try - let metrics = Db.VBD.get_metrics ~__context ~self:vbd in - Db.VBD_metrics.destroy ~__context ~self:metrics with _ -> ()); - (try Db.VBD.destroy ~__context ~self:vbd with _ -> ())) vbds; - let vifs = Db.VM.get_VIFs ~__context ~self in - List.iter (fun vif -> - (try - let metrics = Db.VIF.get_metrics ~__context ~self:vif in - Db.VIF_metrics.destroy ~__context ~self:metrics with _ -> ()); - (try Db.VIF.destroy ~__context ~self:vif with _ -> ())) vifs; - let vgpus = Db.VM.get_VGPUs ~__context ~self in - List.iter (fun vgpu -> try Db.VGPU.destroy ~__context ~self:vgpu with _ -> ()) vgpus; - let pcis = Db.VM.get_attached_PCIs ~__context ~self in - List.iter (fun pci -> try Db.PCI.remove_attached_VMs ~__context ~self:pci ~value:self with _ -> ()) pcis; - let vm_metrics = Db.VM.get_metrics ~__context ~self in - (try Db.VM_metrics.destroy ~__context ~self:vm_metrics with _ -> ()); - let vm_guest_metrics = Db.VM.get_guest_metrics ~__context ~self in - (try Db.VM_guest_metrics.destroy ~__context ~self:vm_guest_metrics with _ -> ()); - - Db.VM.destroy ~__context ~self + (* Used to be a call to hard shutdown here, but this will be redundant *) + (* given the call to 'assert_operation_valid' *) + debug "VM.destroy: deleting DB records"; + + (* Should we be destroying blobs? It's possible to create a blob and then + add its reference to multiple objects. Perhaps we want to just leave the + blob? Or only delete it if there is no other reference to it? Is that + even possible to know? *) + let blobs = Db.VM.get_blobs ~__context ~self in + List.iter (fun (_,self) -> try Xapi_blob.destroy ~__context ~self with _ -> ()) blobs; + + let other_config = Db.VM.get_other_config ~__context ~self in + if ((List.mem_assoc Xapi_globs.default_template_key other_config) && + (List.assoc Xapi_globs.default_template_key other_config)="true") then + raise (Api_errors.Server_error (Api_errors.vm_cannot_delete_default_template, [])); + let appliance = Db.VM.get_appliance ~__context ~self in + if Db.is_valid_ref __context appliance then begin + Db.VM.set_appliance ~__context ~self ~value:Ref.null; + Xapi_vm_appliance_lifecycle.update_allowed_operations ~__context ~self:appliance + end; + let vbds = Db.VM.get_VBDs ~__context ~self in + List.iter (fun vbd -> + (try + let metrics = Db.VBD.get_metrics ~__context ~self:vbd in + Db.VBD_metrics.destroy ~__context ~self:metrics with _ -> ()); + (try Db.VBD.destroy ~__context ~self:vbd with _ -> ())) vbds; + let vifs = Db.VM.get_VIFs ~__context ~self in + List.iter (fun vif -> + (try + let metrics = Db.VIF.get_metrics ~__context ~self:vif in + Db.VIF_metrics.destroy ~__context ~self:metrics with _ -> ()); + (try Db.VIF.destroy ~__context ~self:vif with _ -> ())) vifs; + let vgpus = Db.VM.get_VGPUs ~__context ~self in + List.iter (fun vgpu -> try Db.VGPU.destroy ~__context ~self:vgpu with _ -> ()) vgpus; + let pcis = Db.VM.get_attached_PCIs ~__context ~self in + List.iter (fun pci -> try Db.PCI.remove_attached_VMs ~__context ~self:pci ~value:self with _ -> ()) pcis; + let vm_metrics = Db.VM.get_metrics ~__context ~self in + (try Db.VM_metrics.destroy ~__context ~self:vm_metrics with _ -> ()); + let vm_guest_metrics = Db.VM.get_guest_metrics ~__context ~self in + (try Db.VM_guest_metrics.destroy ~__context ~self:vm_guest_metrics with _ -> ()); + + Db.VM.destroy ~__context ~self (* Validation and assertion functions *) let invalid_value x y = raise (Api_errors.Server_error (Api_errors.invalid_value, [ x; y ])) let value_not_supported fld v reason = - raise (Api_errors.Server_error (Api_errors.value_not_supported, [ fld; v; reason ])) + raise (Api_errors.Server_error (Api_errors.value_not_supported, [ fld; v; reason ])) let validate_vcpus ~__context ~vCPUs_max ~vCPUs_at_startup = - if vCPUs_max < 1L then invalid_value "VCPUs_max" (Int64.to_string vCPUs_max); - if vCPUs_at_startup < 1L - then invalid_value "VCPUs-at-startup" (Int64.to_string vCPUs_at_startup); - if vCPUs_at_startup > vCPUs_max - then value_not_supported "VCPUs-at-startup" (Int64.to_string vCPUs_at_startup) "value greater than VCPUs-max" + if vCPUs_max < 1L then invalid_value "VCPUs_max" (Int64.to_string vCPUs_max); + if vCPUs_at_startup < 1L + then invalid_value "VCPUs-at-startup" (Int64.to_string vCPUs_at_startup); + if vCPUs_at_startup > vCPUs_max + then value_not_supported "VCPUs-at-startup" (Int64.to_string vCPUs_at_startup) "value greater than VCPUs-max" let validate_memory ~__context ~snapshot:vm_record = - let constraints = Vm_memory_constraints.extract ~vm_record in - (* For now, we simply check that the given snapshot record has *) - (* memory constraints that can be coerced to valid constraints. *) - (* In future, we can be more rigorous and require the snapshot *) - (* to have valid constraints without allowing coercion. *) - match Vm_memory_constraints.transform constraints with - | Some constraints -> () - (* Do nothing. *) - | None -> - (* The constraints could not be coerced. *) - raise (Api_errors.Server_error (Api_errors.memory_constraint_violation, [])) + let constraints = Vm_memory_constraints.extract ~vm_record in + (* For now, we simply check that the given snapshot record has *) + (* memory constraints that can be coerced to valid constraints. *) + (* In future, we can be more rigorous and require the snapshot *) + (* to have valid constraints without allowing coercion. *) + match Vm_memory_constraints.transform constraints with + | Some constraints -> () + (* Do nothing. *) + | None -> + (* The constraints could not be coerced. *) + raise (Api_errors.Server_error (Api_errors.memory_constraint_violation, [])) let validate_shadow_multiplier ~hVM_shadow_multiplier = - if hVM_shadow_multiplier < 1. - then invalid_value "HVM_shadow_multiplier" (string_of_float hVM_shadow_multiplier) + if hVM_shadow_multiplier < 1. + then invalid_value "HVM_shadow_multiplier" (string_of_float hVM_shadow_multiplier) let validate_actions_after_crash ~__context ~self ~value = - let fld = "VM.actions_after_crash" in - let hvm_cannot_coredump v = - if Helpers.will_boot_hvm ~__context ~self - then value_not_supported fld v "cannot invoke a coredump of an HVM domain" in - match value with - | `rename_restart -> value_not_supported fld "rename_restart" - "option would leak a domain; VMs and not domains are managed by this API" - | `coredump_and_destroy -> hvm_cannot_coredump "coredump_and_destroy" - | `coredump_and_restart -> hvm_cannot_coredump "coredump_and_restart" - | `destroy | `restart | `preserve -> () + let fld = "VM.actions_after_crash" in + let hvm_cannot_coredump v = + if Helpers.will_boot_hvm ~__context ~self + then value_not_supported fld v "cannot invoke a coredump of an HVM domain" in + match value with + | `rename_restart -> value_not_supported fld "rename_restart" + "option would leak a domain; VMs and not domains are managed by this API" + | `coredump_and_destroy -> hvm_cannot_coredump "coredump_and_destroy" + | `coredump_and_restart -> hvm_cannot_coredump "coredump_and_restart" + | `destroy | `restart | `preserve -> () (* Used to sanity-check parameters before VM start *) let validate_basic_parameters ~__context ~self ~snapshot:x = - validate_vcpus ~__context - ~vCPUs_max:x.API.vM_VCPUs_max - ~vCPUs_at_startup:x.API.vM_VCPUs_at_startup; - validate_memory ~__context ~snapshot:x; - validate_shadow_multiplier - ~hVM_shadow_multiplier:x.API.vM_HVM_shadow_multiplier; - validate_actions_after_crash ~__context ~self ~value:x.API.vM_actions_after_crash + validate_vcpus ~__context + ~vCPUs_max:x.API.vM_VCPUs_max + ~vCPUs_at_startup:x.API.vM_VCPUs_at_startup; + validate_memory ~__context ~snapshot:x; + validate_shadow_multiplier + ~hVM_shadow_multiplier:x.API.vM_HVM_shadow_multiplier; + validate_actions_after_crash ~__context ~self ~value:x.API.vM_actions_after_crash let assert_hardware_platform_support ~__context ~vm ~host = - let vm_hardware_platform_version = Db.VM.get_hardware_platform_version ~__context ~self:vm in - let host_virtual_hardware_platform_versions = - try - match host with - | Helpers.LocalObject host_ref -> - Db.Host.get_virtual_hardware_platform_versions ~__context ~self:host_ref - | Helpers.RemoteObject (rpc, session_id, host_ref) -> - XenAPI.Host.get_virtual_hardware_platform_versions ~rpc ~session_id ~self:host_ref - with Not_found -> - (* An old host that does not understand the concept - * has implicit support for version 0 *) - [0L] - in - if not (List.mem vm_hardware_platform_version host_virtual_hardware_platform_versions) then - let host_r = match host with - | Helpers.LocalObject host_ref -> host_ref - | Helpers.RemoteObject (rpc, session_id, host_ref) -> host_ref - in - raise (Api_errors.Server_error ( - Api_errors.vm_host_incompatible_virtual_hardware_platform_version, [ - Ref.string_of host_r; - "["^(String.concat "; " (List.map Int64.to_string host_virtual_hardware_platform_versions))^"]"; - Ref.string_of vm; - Int64.to_string vm_hardware_platform_version])) + let vm_hardware_platform_version = Db.VM.get_hardware_platform_version ~__context ~self:vm in + let host_virtual_hardware_platform_versions = + try + match host with + | Helpers.LocalObject host_ref -> + Db.Host.get_virtual_hardware_platform_versions ~__context ~self:host_ref + | Helpers.RemoteObject (rpc, session_id, host_ref) -> + XenAPI.Host.get_virtual_hardware_platform_versions ~rpc ~session_id ~self:host_ref + with Not_found -> + (* An old host that does not understand the concept + * has implicit support for version 0 *) + [0L] + in + if not (List.mem vm_hardware_platform_version host_virtual_hardware_platform_versions) then + let host_r = match host with + | Helpers.LocalObject host_ref -> host_ref + | Helpers.RemoteObject (rpc, session_id, host_ref) -> host_ref + in + raise (Api_errors.Server_error ( + Api_errors.vm_host_incompatible_virtual_hardware_platform_version, [ + Ref.string_of host_r; + "["^(String.concat "; " (List.map Int64.to_string host_virtual_hardware_platform_versions))^"]"; + Ref.string_of vm; + Int64.to_string vm_hardware_platform_version])) let assert_host_is_enabled ~__context ~host = - (* Check the host is enabled first *) - if not (Db.Host.get_enabled ~__context ~self:host) then - raise (Api_errors.Server_error ( - Api_errors.host_disabled, [Ref.string_of host])) + (* Check the host is enabled first *) + if not (Db.Host.get_enabled ~__context ~self:host) then + raise (Api_errors.Server_error ( + Api_errors.host_disabled, [Ref.string_of host])) let is_host_live ~__context host = - try - Db.Host_metrics.get_live - ~__context ~self:(Db.Host.get_metrics ~__context ~self:host) - with _ -> false + try + Db.Host_metrics.get_live + ~__context ~self:(Db.Host.get_metrics ~__context ~self:host) + with _ -> false let assert_host_is_live ~__context ~host = - let host_is_live = is_host_live ~__context host in - if not host_is_live then - raise (Api_errors.Server_error (Api_errors.host_not_live, [])) + let host_is_live = is_host_live ~__context host in + if not host_is_live then + raise (Api_errors.Server_error (Api_errors.host_not_live, [])) let which_specified_SRs_not_available_on_host ~__context ~reqd_srs ~host = - let pbds = Db.Host.get_PBDs ~__context ~self:host in - (* filter for those currently_attached *) - let pbds = List.filter (fun self -> Db.PBD.get_currently_attached ~__context ~self) pbds in - let avail_srs = List.map (fun self -> Db.PBD.get_SR ~__context ~self) pbds in - let not_available = List.set_difference reqd_srs avail_srs in - List.iter (fun sr -> warn "Host %s cannot see SR %s" - (Helpers.checknull (fun () -> Db.Host.get_name_label ~__context ~self:host)) - (Helpers.checknull (fun () -> Db.SR.get_name_label ~__context ~self:sr))) - not_available; - not_available + let pbds = Db.Host.get_PBDs ~__context ~self:host in + (* filter for those currently_attached *) + let pbds = List.filter (fun self -> Db.PBD.get_currently_attached ~__context ~self) pbds in + let avail_srs = List.map (fun self -> Db.PBD.get_SR ~__context ~self) pbds in + let not_available = List.set_difference reqd_srs avail_srs in + List.iter (fun sr -> warn "Host %s cannot see SR %s" + (Helpers.checknull (fun () -> Db.Host.get_name_label ~__context ~self:host)) + (Helpers.checknull (fun () -> Db.SR.get_name_label ~__context ~self:sr))) + not_available; + not_available exception Host_cannot_see_all_SRs let assert_can_see_specified_SRs ~__context ~reqd_srs ~host = - let not_available = which_specified_SRs_not_available_on_host ~__context ~reqd_srs ~host in - if not_available <> [] - then raise Host_cannot_see_all_SRs + let not_available = which_specified_SRs_not_available_on_host ~__context ~reqd_srs ~host in + if not_available <> [] + then raise Host_cannot_see_all_SRs let assert_can_see_SRs ~__context ~self ~host = - let vbds = Db.VM.get_VBDs ~__context ~self in - (* Skip empty VBDs *) - let vbds = List.filter (fun self -> not(Db.VBD.get_empty ~__context ~self)) vbds in - let vdis = List.map (fun self -> Db.VBD.get_VDI ~__context ~self) vbds in - (* If VM is currently suspended then consider the suspend_VDI. Note both power_state and the suspend VDI - are stored in R/O fields, not the last_boot_record *) - let suspend_vdi = if Db.VM.get_power_state ~__context ~self =`Suspended then [ Db.VM.get_suspend_VDI ~__context ~self ] else [] in - let reqd_srs = List.map (fun self -> Db.VDI.get_SR ~__context ~self) (vdis @ suspend_vdi) in - let not_available = which_specified_SRs_not_available_on_host ~__context ~reqd_srs ~host in - if not_available <> [] - then raise (Api_errors.Server_error (Api_errors.vm_requires_sr, [ Ref.string_of self; Ref.string_of (List.hd not_available) ])) + let vbds = Db.VM.get_VBDs ~__context ~self in + (* Skip empty VBDs *) + let vbds = List.filter (fun self -> not(Db.VBD.get_empty ~__context ~self)) vbds in + let vdis = List.map (fun self -> Db.VBD.get_VDI ~__context ~self) vbds in + (* If VM is currently suspended then consider the suspend_VDI. Note both power_state and the suspend VDI + are stored in R/O fields, not the last_boot_record *) + let suspend_vdi = if Db.VM.get_power_state ~__context ~self =`Suspended then [ Db.VM.get_suspend_VDI ~__context ~self ] else [] in + let reqd_srs = List.map (fun self -> Db.VDI.get_SR ~__context ~self) (vdis @ suspend_vdi) in + let not_available = which_specified_SRs_not_available_on_host ~__context ~reqd_srs ~host in + if not_available <> [] + then raise (Api_errors.Server_error (Api_errors.vm_requires_sr, [ Ref.string_of self; Ref.string_of (List.hd not_available) ])) let assert_can_see_networks ~__context ~self ~host = - let vifs = Db.VM.get_VIFs ~__context ~self in - let reqd_nets = - List.map (fun self -> Db.VIF.get_network ~__context ~self) vifs in - assert_can_see_named_networks ~__context ~vm:self ~host reqd_nets + let vifs = Db.VM.get_VIFs ~__context ~self in + let reqd_nets = + List.map (fun self -> Db.VIF.get_network ~__context ~self) vifs in + assert_can_see_named_networks ~__context ~vm:self ~host reqd_nets (* IOMMU (VT-d) is required iff the VM has any vGPUs which require PCI * passthrough. *) let vm_needs_iommu ~__context ~self = - List.exists - (fun vgpu -> Xapi_vgpu.requires_passthrough ~__context ~self:vgpu) - (Db.VM.get_VGPUs ~__context ~self) + List.exists + (fun vgpu -> Xapi_vgpu.requires_passthrough ~__context ~self:vgpu) + (Db.VM.get_VGPUs ~__context ~self) let assert_host_has_iommu ~__context ~host = - let chipset_info = Db.Host.get_chipset_info ~__context ~self:host in - if List.assoc "iommu" chipset_info <> "true" then - raise (Api_errors.Server_error (Api_errors.vm_requires_iommu, [Ref.string_of host])) + let chipset_info = Db.Host.get_chipset_info ~__context ~self:host in + if List.assoc "iommu" chipset_info <> "true" then + raise (Api_errors.Server_error (Api_errors.vm_requires_iommu, [Ref.string_of host])) let assert_gpus_available ~__context ~self ~host = - let vgpus = Db.VM.get_VGPUs ~__context ~self in - let reqd_groups = - List.map (fun self -> Db.VGPU.get_GPU_group ~__context ~self) vgpus in - let is_pgpu_available pgpu vgpu = - try Xapi_pgpu.assert_can_run_VGPU ~__context ~self:pgpu ~vgpu; true - with _ -> false - in - let can_run_vgpu_on host vgpu = - let group = Db.VGPU.get_GPU_group ~__context ~self:vgpu in - let pgpus = Db.GPU_group.get_PGPUs ~__context ~self:group in - let avail_pgpus = - List.filter - (fun pgpu -> is_pgpu_available pgpu vgpu) - pgpus - in - let hosts = List.map (fun self -> Db.PGPU.get_host ~__context ~self) avail_pgpus in - List.mem host hosts - in - let runnable_vgpus = List.filter (can_run_vgpu_on host) vgpus in - let avail_groups = - List.map - (fun self -> Db.VGPU.get_GPU_group ~__context ~self) - runnable_vgpus - in - let not_available = List.set_difference reqd_groups avail_groups in - - List.iter - (fun group -> warn "Host %s does not have a pGPU from group %s available" - (Helpers.checknull - (fun () -> Db.Host.get_name_label ~__context ~self:host)) - (Helpers.checknull - (fun () -> Db.GPU_group.get_name_label ~__context ~self:group))) - not_available; - if not_available <> [] then - raise (Api_errors.Server_error (Api_errors.vm_requires_gpu, [ - Ref.string_of self; - Ref.string_of (List.hd not_available) - ])) + let vgpus = Db.VM.get_VGPUs ~__context ~self in + let reqd_groups = + List.map (fun self -> Db.VGPU.get_GPU_group ~__context ~self) vgpus in + let is_pgpu_available pgpu vgpu = + try Xapi_pgpu.assert_can_run_VGPU ~__context ~self:pgpu ~vgpu; true + with _ -> false + in + let can_run_vgpu_on host vgpu = + let group = Db.VGPU.get_GPU_group ~__context ~self:vgpu in + let pgpus = Db.GPU_group.get_PGPUs ~__context ~self:group in + let avail_pgpus = + List.filter + (fun pgpu -> is_pgpu_available pgpu vgpu) + pgpus + in + let hosts = List.map (fun self -> Db.PGPU.get_host ~__context ~self) avail_pgpus in + List.mem host hosts + in + let runnable_vgpus = List.filter (can_run_vgpu_on host) vgpus in + let avail_groups = + List.map + (fun self -> Db.VGPU.get_GPU_group ~__context ~self) + runnable_vgpus + in + let not_available = List.set_difference reqd_groups avail_groups in + + List.iter + (fun group -> warn "Host %s does not have a pGPU from group %s available" + (Helpers.checknull + (fun () -> Db.Host.get_name_label ~__context ~self:host)) + (Helpers.checknull + (fun () -> Db.GPU_group.get_name_label ~__context ~self:group))) + not_available; + if not_available <> [] then + raise (Api_errors.Server_error (Api_errors.vm_requires_gpu, [ + Ref.string_of self; + Ref.string_of (List.hd not_available) + ])) let assert_host_supports_hvm ~__context ~self ~host = - (* For now we say that a host supports HVM if any of *) - (* the capability strings contains the substring "hvm". *) - let capabilities = Db.Host.get_capabilities ~__context ~self:host in - let host_supports_hvm = List.fold_left (||) false - (List.map (fun x -> String.has_substr x "hvm") capabilities) in - if not(host_supports_hvm) then - raise (Api_errors.Server_error (Api_errors.vm_hvm_required, [Ref.string_of self])) + (* For now we say that a host supports HVM if any of *) + (* the capability strings contains the substring "hvm". *) + let capabilities = Db.Host.get_capabilities ~__context ~self:host in + let host_supports_hvm = List.fold_left (||) false + (List.map (fun x -> String.has_substr x "hvm") capabilities) in + if not(host_supports_hvm) then + raise (Api_errors.Server_error (Api_errors.vm_hvm_required, [Ref.string_of self])) let assert_enough_memory_available ~__context ~self ~host ~snapshot = - let host_mem_available = - Memory_check.host_compute_free_memory_with_maximum_compression - ~__context ~host (Some self) in - let main, shadow = - Memory_check.vm_compute_start_memory ~__context snapshot in - let mem_reqd_for_vm = Int64.add main shadow in - debug "host %s; available_memory = %Ld; memory_required = %Ld" - (Db.Host.get_name_label ~self:host ~__context) - host_mem_available - mem_reqd_for_vm; - if host_mem_available < mem_reqd_for_vm then - raise (Api_errors.Server_error ( - Api_errors.host_not_enough_free_memory, - [ - Int64.to_string mem_reqd_for_vm; - Int64.to_string host_mem_available; - ])) + let host_mem_available = + Memory_check.host_compute_free_memory_with_maximum_compression + ~__context ~host (Some self) in + let main, shadow = + Memory_check.vm_compute_start_memory ~__context snapshot in + let mem_reqd_for_vm = Int64.add main shadow in + debug "host %s; available_memory = %Ld; memory_required = %Ld" + (Db.Host.get_name_label ~self:host ~__context) + host_mem_available + mem_reqd_for_vm; + if host_mem_available < mem_reqd_for_vm then + raise (Api_errors.Server_error ( + Api_errors.host_not_enough_free_memory, + [ + Int64.to_string mem_reqd_for_vm; + Int64.to_string host_mem_available; + ])) (** Checks to see if a VM can boot on a particular host, throws an error if not. * Criteria: - - The host must support the VM's required Virtual Hardware Platform version. - - The vCPU, memory, shadow multiplier, and actions-after-crash values must be valid. - - For each VBD, corresponding VDI's SR must be attached on the target host. - - For each VIF, either the Network has a PIF connecting to the target host, - OR if no PIF is connected to the Network then the host must be the same one - all running VMs with VIFs on the Network are running on. - - If the VM need PCI passthrough, check the host supports IOMMU/VT-d. - - For each vGPU, check whether a pGPU from the required GPU group is available. - - If the VM would boot HVM, check the host supports it. - - If the VM would boot PV, check the bootloader is supported. + - The host must support the VM's required Virtual Hardware Platform version. + - The vCPU, memory, shadow multiplier, and actions-after-crash values must be valid. + - For each VBD, corresponding VDI's SR must be attached on the target host. + - For each VIF, either the Network has a PIF connecting to the target host, + OR if no PIF is connected to the Network then the host must be the same one + all running VMs with VIFs on the Network are running on. + - If the VM need PCI passthrough, check the host supports IOMMU/VT-d. + - For each vGPU, check whether a pGPU from the required GPU group is available. + - If the VM would boot HVM, check the host supports it. + - If the VM would boot PV, check the bootloader is supported. * I.e. we share storage but not (internal/PIF-less) networks: the first VIF on a * network pins it to the host the VM is running on. @@ -381,340 +381,340 @@ let assert_enough_memory_available ~__context ~self ~host ~snapshot = * We must use the snapshot to boot the VM. * XXX: we ought to lock this otherwise we may violate our constraints under load - *) +*) let assert_can_boot_here ~__context ~self ~host ~snapshot ?(do_sr_check=true) ?(do_memory_check=true) () = - debug "Checking whether VM %s can run on host %s" (Ref.string_of self) (Ref.string_of host); - validate_basic_parameters ~__context ~self ~snapshot; - assert_host_is_live ~__context ~host; - assert_host_is_enabled ~__context ~host; - (* Check the host can support the VM's required version of virtual hardware platform *) - assert_hardware_platform_support ~__context ~vm:self ~host:(Helpers.LocalObject host); - if do_sr_check then - assert_can_see_SRs ~__context ~self ~host; - assert_can_see_networks ~__context ~self ~host; - if vm_needs_iommu ~__context ~self then - assert_host_has_iommu ~__context ~host; - assert_gpus_available ~__context ~self ~host; - if Helpers.will_boot_hvm ~__context ~self then - assert_host_supports_hvm ~__context ~self ~host; - if do_memory_check then - assert_enough_memory_available ~__context ~self ~host ~snapshot; - debug "All fine, VM %s can run on host %s!" (Ref.string_of self) (Ref.string_of host) + debug "Checking whether VM %s can run on host %s" (Ref.string_of self) (Ref.string_of host); + validate_basic_parameters ~__context ~self ~snapshot; + assert_host_is_live ~__context ~host; + assert_host_is_enabled ~__context ~host; + (* Check the host can support the VM's required version of virtual hardware platform *) + assert_hardware_platform_support ~__context ~vm:self ~host:(Helpers.LocalObject host); + if do_sr_check then + assert_can_see_SRs ~__context ~self ~host; + assert_can_see_networks ~__context ~self ~host; + if vm_needs_iommu ~__context ~self then + assert_host_has_iommu ~__context ~host; + assert_gpus_available ~__context ~self ~host; + if Helpers.will_boot_hvm ~__context ~self then + assert_host_supports_hvm ~__context ~self ~host; + if do_memory_check then + assert_enough_memory_available ~__context ~self ~host ~snapshot; + debug "All fine, VM %s can run on host %s!" (Ref.string_of self) (Ref.string_of host) let retrieve_wlb_recommendations ~__context ~vm ~snapshot = - (* we have already checked the number of returned entries is correct in retrieve_vm_recommendations - But checking that there are no duplicates is also quite cheap, put them in a hash and overwrite duplicates *) - let recs = Hashtbl.create 12 in - List.iter - (fun (h, r) -> - try - assert_can_boot_here ~__context ~self:vm ~host:h ~snapshot (); - Hashtbl.replace recs h r; - with - | Api_errors.Server_error(x, y) -> Hashtbl.replace recs h (x :: y)) - (retrieve_vm_recommendations ~__context ~vm); - if ((Hashtbl.length recs) <> (List.length (Helpers.get_live_hosts ~__context))) - then - raise_malformed_response' "VMGetRecommendations" - "Number of unique recommendations does not match number of potential hosts" "Unknown" - else - Hashtbl.fold (fun k v tl -> (k,v) :: tl) recs [] + (* we have already checked the number of returned entries is correct in retrieve_vm_recommendations + But checking that there are no duplicates is also quite cheap, put them in a hash and overwrite duplicates *) + let recs = Hashtbl.create 12 in + List.iter + (fun (h, r) -> + try + assert_can_boot_here ~__context ~self:vm ~host:h ~snapshot (); + Hashtbl.replace recs h r; + with + | Api_errors.Server_error(x, y) -> Hashtbl.replace recs h (x :: y)) + (retrieve_vm_recommendations ~__context ~vm); + if ((Hashtbl.length recs) <> (List.length (Helpers.get_live_hosts ~__context))) + then + raise_malformed_response' "VMGetRecommendations" + "Number of unique recommendations does not match number of potential hosts" "Unknown" + else + Hashtbl.fold (fun k v tl -> (k,v) :: tl) recs [] (** Returns the subset of all hosts to which the given function [choose_fn] -can be applied without raising an exception. If the optional [vm] argument is -present, this function additionally prints a debug message that includes the -names of the given VM and each of the possible hosts. *) + can be applied without raising an exception. If the optional [vm] argument is + present, this function additionally prints a debug message that includes the + names of the given VM and each of the possible hosts. *) let possible_hosts ~__context ?vm ~choose_fn () = - (* XXXX: This function uses exceptions to control the flow of execution. *) - (* XXXX: This function mixes business logic with debugging functionality. *) - let all_hosts = Db.Host.get_all ~__context in - let choices = List.filter - (fun host -> - try (choose_fn ~host : unit); assert_host_is_live ~__context ~host; true - with _ -> false - ) - all_hosts in - begin - match vm with - | Some vm -> - warn "VM %s could run on any of these hosts: [ %s ]" - (Helpers.checknull - (fun () -> Db.VM.get_name_label ~__context ~self:vm)) - (String.concat "; " - (List.map - (fun self -> - Helpers.checknull - (fun () -> - Db.Host.get_name_label ~__context ~self) - ) - choices - ) - ); - | None -> () - end; - choices + (* XXXX: This function uses exceptions to control the flow of execution. *) + (* XXXX: This function mixes business logic with debugging functionality. *) + let all_hosts = Db.Host.get_all ~__context in + let choices = List.filter + (fun host -> + try (choose_fn ~host : unit); assert_host_is_live ~__context ~host; true + with _ -> false + ) + all_hosts in + begin + match vm with + | Some vm -> + warn "VM %s could run on any of these hosts: [ %s ]" + (Helpers.checknull + (fun () -> Db.VM.get_name_label ~__context ~self:vm)) + (String.concat "; " + (List.map + (fun self -> + Helpers.checknull + (fun () -> + Db.Host.get_name_label ~__context ~self) + ) + choices + ) + ); + | None -> () + end; + choices (** Returns a single host (from the set of all hosts) to which the given -function [choose_fn] can be applied without raising an exception. Raises -[Api_errors.no_hosts_available] if no such host exists. If the optional [vm] -argument is present, then this function additionally prints a debug message -that includes the names of the given VM and the subset of all hosts that -satisfy the given function [choose_fn]. *) + function [choose_fn] can be applied without raising an exception. Raises + [Api_errors.no_hosts_available] if no such host exists. If the optional [vm] + argument is present, then this function additionally prints a debug message + that includes the names of the given VM and the subset of all hosts that + satisfy the given function [choose_fn]. *) let choose_host ~__context ?vm ~choose_fn ?(prefer_slaves=false) () = - let choices = possible_hosts ~__context ?vm ~choose_fn () in - match choices with - | [] -> raise (Api_errors.Server_error (Api_errors.no_hosts_available, [])) - | [h] -> h - | _ -> - let choices = - if prefer_slaves then - let master = Helpers.get_master ~__context in - List.filter ((<>) master) choices - else choices in - List.nth choices (Random.int (List.length choices)) + let choices = possible_hosts ~__context ?vm ~choose_fn () in + match choices with + | [] -> raise (Api_errors.Server_error (Api_errors.no_hosts_available, [])) + | [h] -> h + | _ -> + let choices = + if prefer_slaves then + let master = Helpers.get_master ~__context in + List.filter ((<>) master) choices + else choices in + List.nth choices (Random.int (List.length choices)) (* Compute all SRs required for shutting down suspended domains *) let compute_required_SRs_for_shutting_down_suspended_domains ~__context ~vm = - let all_vm_vdis = - List.map - (fun vbd-> - if Db.VBD.get_empty ~__context ~self:vbd then - None - else - Some (Db.VBD.get_VDI ~__context ~self:vbd)) - (Db.VM.get_VBDs ~__context ~self:vm) in - let all_vm_vdis = List.unbox_list all_vm_vdis in - List.map (fun vdi -> Db.VDI.get_SR ~self:vdi ~__context) all_vm_vdis + let all_vm_vdis = + List.map + (fun vbd-> + if Db.VBD.get_empty ~__context ~self:vbd then + None + else + Some (Db.VBD.get_VDI ~__context ~self:vbd)) + (Db.VM.get_VBDs ~__context ~self:vm) in + let all_vm_vdis = List.unbox_list all_vm_vdis in + List.map (fun vdi -> Db.VDI.get_SR ~self:vdi ~__context) all_vm_vdis (** Returns the subset of all hosts on which the given [vm] can boot. This -function also prints a debug message identifying the given [vm] and hosts. *) + function also prints a debug message identifying the given [vm] and hosts. *) let get_possible_hosts_for_vm ~__context ~vm ~snapshot = - let host = Db.VM.get_scheduled_to_be_resident_on ~__context ~self:vm in - if host <> Ref.null then [ host ] else - possible_hosts ~__context ~vm - ~choose_fn:(assert_can_boot_here ~__context ~self:vm ~snapshot ()) () + let host = Db.VM.get_scheduled_to_be_resident_on ~__context ~self:vm in + if host <> Ref.null then [ host ] else + possible_hosts ~__context ~vm + ~choose_fn:(assert_can_boot_here ~__context ~self:vm ~snapshot ()) () (** Performs an expensive and comprehensive check to determine whether the -given [guest] can run on the given [host]. Returns true if and only if the -guest can run on the host. *) + given [guest] can run on the given [host]. Returns true if and only if the + guest can run on the host. *) let vm_can_run_on_host ~__context ~vm ~snapshot ~do_memory_check host = - let host_has_proper_version () = - if Helpers.rolling_upgrade_in_progress ~__context - then - Helpers.host_has_highest_version_in_pool - ~__context ~host:(Helpers.LocalObject host) - else true in - let host_enabled () = Db.Host.get_enabled ~__context ~self:host in - let host_live () = - let host_metrics = Db.Host.get_metrics ~__context ~self:host in - Db.Host_metrics.get_live ~__context ~self:host_metrics in - let host_can_run_vm () = - Cpuid_helpers.assert_vm_is_compatible ~__context ~vm ~host (); - assert_can_boot_here ~__context ~self:vm ~host ~snapshot ~do_memory_check (); - true in - let host_evacuate_in_progress = - try let _ = List.find (fun s -> snd s = `evacuate) (Db.Host.get_current_operations ~__context ~self:host) in false with _ -> true - in - try host_has_proper_version () && host_enabled () && host_live () && host_can_run_vm () && host_evacuate_in_progress - with _ -> false + let host_has_proper_version () = + if Helpers.rolling_upgrade_in_progress ~__context + then + Helpers.host_has_highest_version_in_pool + ~__context ~host:(Helpers.LocalObject host) + else true in + let host_enabled () = Db.Host.get_enabled ~__context ~self:host in + let host_live () = + let host_metrics = Db.Host.get_metrics ~__context ~self:host in + Db.Host_metrics.get_live ~__context ~self:host_metrics in + let host_can_run_vm () = + Cpuid_helpers.assert_vm_is_compatible ~__context ~vm ~host (); + assert_can_boot_here ~__context ~self:vm ~host ~snapshot ~do_memory_check (); + true in + let host_evacuate_in_progress = + try let _ = List.find (fun s -> snd s = `evacuate) (Db.Host.get_current_operations ~__context ~self:host) in false with _ -> true + in + try host_has_proper_version () && host_enabled () && host_live () && host_can_run_vm () && host_evacuate_in_progress + with _ -> false (* Group the hosts into lists of hosts with equal best capacity *) let group_hosts_by_best_pgpu_in_group ~__context gpu_group vgpu_type = - let pgpus = Db.GPU_group.get_PGPUs ~__context ~self:gpu_group in - let can_accomodate_vgpu pgpu = - Xapi_pgpu_helpers.get_remaining_capacity ~__context ~self:pgpu - ~vgpu_type > 0L - in - let viable_pgpus = List.filter can_accomodate_vgpu pgpus in - let viable_hosts = List.setify - (List.map (fun pgpu -> Db.PGPU.get_host ~__context ~self:pgpu) - viable_pgpus) - in - let ordering = - match Db.GPU_group.get_allocation_algorithm ~__context ~self:gpu_group with - | `depth_first -> `ascending | `breadth_first -> `descending - in - Helpers.group_by ~ordering - (fun host -> - let group_by_capacity pgpus = Helpers.group_by ~ordering - (fun pgpu -> Xapi_pgpu_helpers.get_remaining_capacity ~__context ~self:pgpu ~vgpu_type) - pgpus - in - let viable_resident_pgpus = List.filter - (fun self -> Db.PGPU.get_host ~__context ~self = host) - viable_pgpus - in - snd (List.hd (List.hd (group_by_capacity viable_resident_pgpus))) - ) viable_hosts + let pgpus = Db.GPU_group.get_PGPUs ~__context ~self:gpu_group in + let can_accomodate_vgpu pgpu = + Xapi_pgpu_helpers.get_remaining_capacity ~__context ~self:pgpu + ~vgpu_type > 0L + in + let viable_pgpus = List.filter can_accomodate_vgpu pgpus in + let viable_hosts = List.setify + (List.map (fun pgpu -> Db.PGPU.get_host ~__context ~self:pgpu) + viable_pgpus) + in + let ordering = + match Db.GPU_group.get_allocation_algorithm ~__context ~self:gpu_group with + | `depth_first -> `ascending | `breadth_first -> `descending + in + Helpers.group_by ~ordering + (fun host -> + let group_by_capacity pgpus = Helpers.group_by ~ordering + (fun pgpu -> Xapi_pgpu_helpers.get_remaining_capacity ~__context ~self:pgpu ~vgpu_type) + pgpus + in + let viable_resident_pgpus = List.filter + (fun self -> Db.PGPU.get_host ~__context ~self = host) + viable_pgpus + in + snd (List.hd (List.hd (group_by_capacity viable_resident_pgpus))) + ) viable_hosts (** Selects a single host from the set of all hosts on which the given [vm] -can boot. Raises [Api_errors.no_hosts_available] if no such host exists. *) + can boot. Raises [Api_errors.no_hosts_available] if no such host exists. *) let choose_host_for_vm_no_wlb ~__context ~vm ~snapshot = - let validate_host = vm_can_run_on_host ~__context ~vm ~snapshot ~do_memory_check:true in - let all_hosts = Db.Host.get_all ~__context in - try - match Db.VM.get_VGPUs ~__context ~self:vm with - | [] -> Xapi_vm_placement.select_host __context vm validate_host all_hosts - | vgpu :: _ -> (* just considering first vgpu *) - let vgpu_type = Db.VGPU.get_type ~__context ~self:vgpu in - let gpu_group = Db.VGPU.get_GPU_group ~__context ~self:vgpu in - match - Xapi_gpu_group.get_remaining_capacity_internal ~__context - ~self:gpu_group ~vgpu_type - with - | Either.Left e -> raise e - | Either.Right _ -> (); - let host_lists = - group_hosts_by_best_pgpu_in_group ~__context gpu_group vgpu_type in - let rec select_host_from = function - | [] -> raise (Api_errors.Server_error (Api_errors.no_hosts_available, [])) - | (hosts :: less_optimal_groups_of_hosts) -> - let hosts = List.map (fun (h, c) -> h) hosts in - debug "Attempting to start VM (%s) on one of equally optimal hosts [ %s ]" - (Ref.string_of vm) (String.concat ";" (List.map Ref.string_of hosts)); - try Xapi_vm_placement.select_host __context vm validate_host hosts - with _ -> - info "Failed to start VM (%s) on any of [ %s ]" - (Ref.string_of vm) (String.concat ";" (List.map Ref.string_of hosts)); - select_host_from less_optimal_groups_of_hosts - in - select_host_from host_lists - with Api_errors.Server_error(x,[]) when x=Api_errors.no_hosts_available -> - debug "No hosts guaranteed to satisfy VM constraints. Trying again ignoring memory checks"; - let validate_host = vm_can_run_on_host ~__context ~vm ~snapshot ~do_memory_check:false in - Xapi_vm_placement.select_host __context vm validate_host all_hosts + let validate_host = vm_can_run_on_host ~__context ~vm ~snapshot ~do_memory_check:true in + let all_hosts = Db.Host.get_all ~__context in + try + match Db.VM.get_VGPUs ~__context ~self:vm with + | [] -> Xapi_vm_placement.select_host __context vm validate_host all_hosts + | vgpu :: _ -> (* just considering first vgpu *) + let vgpu_type = Db.VGPU.get_type ~__context ~self:vgpu in + let gpu_group = Db.VGPU.get_GPU_group ~__context ~self:vgpu in + match + Xapi_gpu_group.get_remaining_capacity_internal ~__context + ~self:gpu_group ~vgpu_type + with + | Either.Left e -> raise e + | Either.Right _ -> (); + let host_lists = + group_hosts_by_best_pgpu_in_group ~__context gpu_group vgpu_type in + let rec select_host_from = function + | [] -> raise (Api_errors.Server_error (Api_errors.no_hosts_available, [])) + | (hosts :: less_optimal_groups_of_hosts) -> + let hosts = List.map (fun (h, c) -> h) hosts in + debug "Attempting to start VM (%s) on one of equally optimal hosts [ %s ]" + (Ref.string_of vm) (String.concat ";" (List.map Ref.string_of hosts)); + try Xapi_vm_placement.select_host __context vm validate_host hosts + with _ -> + info "Failed to start VM (%s) on any of [ %s ]" + (Ref.string_of vm) (String.concat ";" (List.map Ref.string_of hosts)); + select_host_from less_optimal_groups_of_hosts + in + select_host_from host_lists + with Api_errors.Server_error(x,[]) when x=Api_errors.no_hosts_available -> + debug "No hosts guaranteed to satisfy VM constraints. Trying again ignoring memory checks"; + let validate_host = vm_can_run_on_host ~__context ~vm ~snapshot ~do_memory_check:false in + Xapi_vm_placement.select_host __context vm validate_host all_hosts (* choose_host_for_vm will use WLB as long as it is enabled and there *) (* is no pool.other_config["wlb_choose_host_disable"] = "true". *) let choose_host_uses_wlb ~__context = - Workload_balancing.check_wlb_enabled ~__context && - not ( - List.exists - (fun (k,v) -> - k = "wlb_choose_host_disable" - && (String.lowercase v = "true")) - (Db.Pool.get_other_config ~__context - ~self:(Helpers.get_pool ~__context))) + Workload_balancing.check_wlb_enabled ~__context && + not ( + List.exists + (fun (k,v) -> + k = "wlb_choose_host_disable" + && (String.lowercase v = "true")) + (Db.Pool.get_other_config ~__context + ~self:(Helpers.get_pool ~__context))) (** Given a virtual machine, returns a host it can boot on, giving *) (** priority to an affinity host if one is present. WARNING: called *) (** while holding the global lock from the message forwarding layer. *) let choose_host_for_vm ~__context ~vm ~snapshot = - if choose_host_uses_wlb ~__context then - try - let rec filter_and_convert recs = - match recs with - | (h, recom) :: tl -> - begin - debug "\n%s\n" (String.concat ";" recom); - match recom with - | ["WLB"; "0.0"; rec_id; zero_reason] -> - filter_and_convert tl - | ["WLB"; stars; rec_id] -> - (h, float_of_string stars, rec_id) - :: filter_and_convert tl - | _ -> filter_and_convert tl - end - | [] -> [] - in - begin - let all_hosts = - (List.sort - (fun (h, s, r) (h', s', r') -> - if s < s' then 1 else if s > s' then -1 else 0) - (filter_and_convert (retrieve_wlb_recommendations - ~__context ~vm ~snapshot)) - ) - in - debug "Hosts sorted in priority: %s" - (List.fold_left - (fun a (h,s,r) -> - a ^ (Printf.sprintf "%s %f," - (Db.Host.get_name_label ~__context ~self:h) s) - ) "" all_hosts - ); - match all_hosts with - | (h,s,r)::_ -> - debug "Wlb has recommended host %s" - (Db.Host.get_name_label ~__context ~self:h); - let action = Db.Task.get_name_label ~__context - ~self:(Context.get_task_id __context) in - let oc = Db.Pool.get_other_config ~__context - ~self:(Helpers.get_pool ~__context) in - Db.Task.set_other_config ~__context - ~self:(Context.get_task_id __context) - ~value:([ - ("wlb_advised", r); - ("wlb_action", action); - ("wlb_action_obj_type", "VM"); - ("wlb_action_obj_ref", (Ref.string_of vm)) - ] @ oc); - h - | _ -> - debug "Wlb has no recommendations. \ - Using original algorithm"; - choose_host_for_vm_no_wlb ~__context ~vm ~snapshot - end - with - | Api_errors.Server_error(error_type, error_detail) -> - debug "Encountered error when using wlb for choosing host \ - \"%s: %s\". Using original algorithm" - error_type - (String.concat "" error_detail); - begin - try - let uuid = Db.VM.get_uuid ~__context ~self:vm in - let message_body = - Printf.sprintf - "Wlb consultation for VM '%s' failed (pool uuid: %s)" - (Db.VM.get_name_label ~__context ~self:vm) - (Db.Pool.get_uuid ~__context - ~self:(Helpers.get_pool ~__context)) - in - let (name, priority) = Api_messages.wlb_failed in - ignore (Xapi_message.create ~__context ~name ~priority - ~cls:`VM ~obj_uuid:uuid ~body:message_body) - with _ -> () - end; - choose_host_for_vm_no_wlb ~__context ~vm ~snapshot - | Failure "float_of_string" -> - debug "Star ratings from wlb could not be parsed to floats. \ - Using original algorithm"; - choose_host_for_vm_no_wlb ~__context ~vm ~snapshot - | _ -> - debug "Encountered an unknown error when using wlb for \ - choosing host. Using original algorithm"; - choose_host_for_vm_no_wlb ~__context ~vm ~snapshot - else - begin - debug "Using wlb recommendations for choosing a host has been \ - disabled or wlb is not available. Using original algorithm"; - choose_host_for_vm_no_wlb ~__context ~vm ~snapshot - end + if choose_host_uses_wlb ~__context then + try + let rec filter_and_convert recs = + match recs with + | (h, recom) :: tl -> + begin + debug "\n%s\n" (String.concat ";" recom); + match recom with + | ["WLB"; "0.0"; rec_id; zero_reason] -> + filter_and_convert tl + | ["WLB"; stars; rec_id] -> + (h, float_of_string stars, rec_id) + :: filter_and_convert tl + | _ -> filter_and_convert tl + end + | [] -> [] + in + begin + let all_hosts = + (List.sort + (fun (h, s, r) (h', s', r') -> + if s < s' then 1 else if s > s' then -1 else 0) + (filter_and_convert (retrieve_wlb_recommendations + ~__context ~vm ~snapshot)) + ) + in + debug "Hosts sorted in priority: %s" + (List.fold_left + (fun a (h,s,r) -> + a ^ (Printf.sprintf "%s %f," + (Db.Host.get_name_label ~__context ~self:h) s) + ) "" all_hosts + ); + match all_hosts with + | (h,s,r)::_ -> + debug "Wlb has recommended host %s" + (Db.Host.get_name_label ~__context ~self:h); + let action = Db.Task.get_name_label ~__context + ~self:(Context.get_task_id __context) in + let oc = Db.Pool.get_other_config ~__context + ~self:(Helpers.get_pool ~__context) in + Db.Task.set_other_config ~__context + ~self:(Context.get_task_id __context) + ~value:([ + ("wlb_advised", r); + ("wlb_action", action); + ("wlb_action_obj_type", "VM"); + ("wlb_action_obj_ref", (Ref.string_of vm)) + ] @ oc); + h + | _ -> + debug "Wlb has no recommendations. \ + Using original algorithm"; + choose_host_for_vm_no_wlb ~__context ~vm ~snapshot + end + with + | Api_errors.Server_error(error_type, error_detail) -> + debug "Encountered error when using wlb for choosing host \ + \"%s: %s\". Using original algorithm" + error_type + (String.concat "" error_detail); + begin + try + let uuid = Db.VM.get_uuid ~__context ~self:vm in + let message_body = + Printf.sprintf + "Wlb consultation for VM '%s' failed (pool uuid: %s)" + (Db.VM.get_name_label ~__context ~self:vm) + (Db.Pool.get_uuid ~__context + ~self:(Helpers.get_pool ~__context)) + in + let (name, priority) = Api_messages.wlb_failed in + ignore (Xapi_message.create ~__context ~name ~priority + ~cls:`VM ~obj_uuid:uuid ~body:message_body) + with _ -> () + end; + choose_host_for_vm_no_wlb ~__context ~vm ~snapshot + | Failure "float_of_string" -> + debug "Star ratings from wlb could not be parsed to floats. \ + Using original algorithm"; + choose_host_for_vm_no_wlb ~__context ~vm ~snapshot + | _ -> + debug "Encountered an unknown error when using wlb for \ + choosing host. Using original algorithm"; + choose_host_for_vm_no_wlb ~__context ~vm ~snapshot + else + begin + debug "Using wlb recommendations for choosing a host has been \ + disabled or wlb is not available. Using original algorithm"; + choose_host_for_vm_no_wlb ~__context ~vm ~snapshot + end type set_cpus_number_fn = __context:Context.t -> self:API.ref_VM -> int -> API.vM_t -> int64 -> unit let validate_HVM_shadow_multiplier multiplier = - if multiplier < 1. - then invalid_value "multiplier" (string_of_float multiplier) + if multiplier < 1. + then invalid_value "multiplier" (string_of_float multiplier) (** Sets the HVM shadow multiplier for a {b Halted} VM. Runs on the master. *) let set_HVM_shadow_multiplier ~__context ~self ~value = - if Db.VM.get_power_state ~__context ~self <> `Halted - then failwith "assertion_failed: set_HVM_shadow_multiplier should only be \ - called when the VM is Halted"; - validate_HVM_shadow_multiplier value; - Db.VM.set_HVM_shadow_multiplier ~__context ~self ~value; - update_memory_overhead ~__context ~vm:self + if Db.VM.get_power_state ~__context ~self <> `Halted + then failwith "assertion_failed: set_HVM_shadow_multiplier should only be \ + called when the VM is Halted"; + validate_HVM_shadow_multiplier value; + Db.VM.set_HVM_shadow_multiplier ~__context ~self ~value; + update_memory_overhead ~__context ~vm:self let inclusive_range a b = Range.to_list (Range.make a (b + 1)) let vbd_inclusive_range hvm a b = - List.map (Device_number.of_disk_number hvm) (inclusive_range a b) + List.map (Device_number.of_disk_number hvm) (inclusive_range a b) let vif_inclusive_range a b = - List.map string_of_int (inclusive_range a b) + List.map string_of_int (inclusive_range a b) (* These are high-watermark limits as documented in CA-6525. Individual guest types may be further restricted. *) @@ -732,206 +732,206 @@ let allowed_VIF_devices_HVM = vif_inclusive_range 0 6 let allowed_VIF_devices_PV = vif_inclusive_range 0 6 (** [possible_VBD_devices_of_string s] returns a list of Device_number.t which - represent possible interpretations of [s]. *) + represent possible interpretations of [s]. *) let possible_VBD_devices_of_string s = - (* NB userdevice fields are arbitrary strings and device fields may be "" *) - let parse hvm x = try Some (Device_number.of_string hvm x) with _ -> None in - Listext.List.unbox_list [ parse true s; parse false s ] + (* NB userdevice fields are arbitrary strings and device fields may be "" *) + let parse hvm x = try Some (Device_number.of_string hvm x) with _ -> None in + Listext.List.unbox_list [ parse true s; parse false s ] (** [all_used_VBD_devices __context self] returns a list of Device_number.t - which are considered to be already in-use in the VM *) + which are considered to be already in-use in the VM *) let all_used_VBD_devices ~__context ~self = - let all = Db.VM.get_VBDs ~__context ~self in + let all = Db.VM.get_VBDs ~__context ~self in - let existing_devices = - let all_devices = List.map (fun self -> Db.VBD.get_device ~__context ~self) all in - let all_devices2 = List.map (fun self -> Db.VBD.get_userdevice ~__context ~self) all in - all_devices @ all_devices2 in + let existing_devices = + let all_devices = List.map (fun self -> Db.VBD.get_device ~__context ~self) all in + let all_devices2 = List.map (fun self -> Db.VBD.get_userdevice ~__context ~self) all in + all_devices @ all_devices2 in - List.concat (List.map possible_VBD_devices_of_string existing_devices) + List.concat (List.map possible_VBD_devices_of_string existing_devices) let allowed_VBD_devices ~__context ~vm ~_type = - let is_hvm = Helpers.will_boot_hvm ~__context ~self:vm in - let is_control_domain = Db.VM.get_is_control_domain ~__context ~self:vm in - let all_devices = match is_hvm,is_control_domain,_type with - | true, _, `Floppy -> allowed_VBD_devices_HVM_floppy - | false, _, `Floppy -> [] (* floppy is not supported on PV *) - | false, true, _ -> allowed_VBD_devices_control_domain - | false, false, _ -> allowed_VBD_devices_PV - | true, _, _ -> allowed_VBD_devices_HVM - in - (* Filter out those we've already got VBDs for *) - let used_devices = all_used_VBD_devices ~__context ~self:vm in - List.filter (fun dev -> not (List.mem dev used_devices)) all_devices + let is_hvm = Helpers.will_boot_hvm ~__context ~self:vm in + let is_control_domain = Db.VM.get_is_control_domain ~__context ~self:vm in + let all_devices = match is_hvm,is_control_domain,_type with + | true, _, `Floppy -> allowed_VBD_devices_HVM_floppy + | false, _, `Floppy -> [] (* floppy is not supported on PV *) + | false, true, _ -> allowed_VBD_devices_control_domain + | false, false, _ -> allowed_VBD_devices_PV + | true, _, _ -> allowed_VBD_devices_HVM + in + (* Filter out those we've already got VBDs for *) + let used_devices = all_used_VBD_devices ~__context ~self:vm in + List.filter (fun dev -> not (List.mem dev used_devices)) all_devices let allowed_VIF_devices ~__context ~vm = - let is_hvm = Helpers.will_boot_hvm ~__context ~self:vm in - let all_devices = if is_hvm then allowed_VIF_devices_HVM else allowed_VIF_devices_PV in - (* Filter out those we've already got VIFs for *) - let all_vifs = Db.VM.get_VIFs ~__context ~self:vm in - let used_devices = List.map (fun vif -> Db.VIF.get_device ~__context ~self:vif) all_vifs in - List.filter (fun dev -> not (List.mem dev used_devices)) all_devices + let is_hvm = Helpers.will_boot_hvm ~__context ~self:vm in + let all_devices = if is_hvm then allowed_VIF_devices_HVM else allowed_VIF_devices_PV in + (* Filter out those we've already got VIFs for *) + let all_vifs = Db.VM.get_VIFs ~__context ~self:vm in + let used_devices = List.map (fun vif -> Db.VIF.get_device ~__context ~self:vif) all_vifs in + List.filter (fun dev -> not (List.mem dev used_devices)) all_devices let delete_guest_metrics ~__context ~self:vm = - (* Delete potentially stale guest metrics object *) - let guest_metrics = Db.VM.get_guest_metrics ~__context ~self:vm in - Db.VM.set_guest_metrics ~__context ~self:vm ~value:Ref.null; - (try Db.VM_guest_metrics.destroy ~__context ~self:guest_metrics with _ -> ()) + (* Delete potentially stale guest metrics object *) + let guest_metrics = Db.VM.get_guest_metrics ~__context ~self:vm in + Db.VM.set_guest_metrics ~__context ~self:vm ~value:Ref.null; + (try Db.VM_guest_metrics.destroy ~__context ~self:guest_metrics with _ -> ()) let copy_guest_metrics ~__context ~vm = - try - let gm = Db.VM.get_guest_metrics ~__context ~self:vm in - let all = Db.VM_guest_metrics.get_record ~__context ~self:gm in - let ref = Ref.make () in - Db.VM_guest_metrics.create ~__context - ~ref - ~uuid:(Uuid.to_string (Uuid.make_uuid ())) - ~os_version:all.API.vM_guest_metrics_os_version - ~pV_drivers_version:all.API.vM_guest_metrics_PV_drivers_version - ~pV_drivers_up_to_date:all.API.vM_guest_metrics_PV_drivers_up_to_date - ~memory:all.API.vM_guest_metrics_memory - ~disks:all.API.vM_guest_metrics_disks - ~networks:all.API.vM_guest_metrics_networks - ~pV_drivers_detected:all.API.vM_guest_metrics_PV_drivers_detected - ~other:all.API.vM_guest_metrics_other - ~last_updated:all.API.vM_guest_metrics_last_updated - ~other_config:all.API.vM_guest_metrics_other_config - ~live:all.API.vM_guest_metrics_live - ~can_use_hotplug_vbd:all.API.vM_guest_metrics_can_use_hotplug_vbd - ~can_use_hotplug_vif:all.API.vM_guest_metrics_can_use_hotplug_vif - ; - ref - with _ -> - Ref.null + try + let gm = Db.VM.get_guest_metrics ~__context ~self:vm in + let all = Db.VM_guest_metrics.get_record ~__context ~self:gm in + let ref = Ref.make () in + Db.VM_guest_metrics.create ~__context + ~ref + ~uuid:(Uuid.to_string (Uuid.make_uuid ())) + ~os_version:all.API.vM_guest_metrics_os_version + ~pV_drivers_version:all.API.vM_guest_metrics_PV_drivers_version + ~pV_drivers_up_to_date:all.API.vM_guest_metrics_PV_drivers_up_to_date + ~memory:all.API.vM_guest_metrics_memory + ~disks:all.API.vM_guest_metrics_disks + ~networks:all.API.vM_guest_metrics_networks + ~pV_drivers_detected:all.API.vM_guest_metrics_PV_drivers_detected + ~other:all.API.vM_guest_metrics_other + ~last_updated:all.API.vM_guest_metrics_last_updated + ~other_config:all.API.vM_guest_metrics_other_config + ~live:all.API.vM_guest_metrics_live + ~can_use_hotplug_vbd:all.API.vM_guest_metrics_can_use_hotplug_vbd + ~can_use_hotplug_vif:all.API.vM_guest_metrics_can_use_hotplug_vif + ; + ref + with _ -> + Ref.null let start_delay ~__context ~vm = - let start_delay = Db.VM.get_start_delay ~__context ~self:vm in - Thread.delay (Int64.to_float start_delay) + let start_delay = Db.VM.get_start_delay ~__context ~self:vm in + Thread.delay (Int64.to_float start_delay) let shutdown_delay ~__context ~vm = - let shutdown_delay = Db.VM.get_shutdown_delay ~__context ~self:vm in - Thread.delay (Int64.to_float shutdown_delay) + let shutdown_delay = Db.VM.get_shutdown_delay ~__context ~self:vm in + Thread.delay (Int64.to_float shutdown_delay) let list_required_vdis ~__context ~self = - let vbds = Db.VM.get_VBDs ~__context ~self in - let vbds_excluding_cd = - List.filter (fun vbd -> Db.VBD.get_type ~__context ~self:vbd <> `CD) vbds - in - List.map (fun vbd -> Db.VBD.get_VDI ~__context ~self:vbd) vbds_excluding_cd + let vbds = Db.VM.get_VBDs ~__context ~self in + let vbds_excluding_cd = + List.filter (fun vbd -> Db.VBD.get_type ~__context ~self:vbd <> `CD) vbds + in + List.map (fun vbd -> Db.VBD.get_VDI ~__context ~self:vbd) vbds_excluding_cd (* Find the SRs of all VDIs which have VBDs attached to the VM. *) let list_required_SRs ~__context ~self = - let vdis = list_required_vdis ~__context ~self in - let srs = List.map (fun vdi -> Db.VDI.get_SR ~__context ~self:vdi) vdis in - let srs = List.filter (fun sr -> Db.SR.get_content_type ~__context ~self:sr <> "iso") srs in - List.setify srs + let vdis = list_required_vdis ~__context ~self in + let srs = List.map (fun vdi -> Db.VDI.get_SR ~__context ~self:vdi) vdis in + let srs = List.filter (fun sr -> Db.SR.get_content_type ~__context ~self:sr <> "iso") srs in + List.setify srs (* Check if the database referenced by session_to *) (* contains the SRs required to recover the VM. *) let assert_can_be_recovered ~__context ~self ~session_to = - (* Get the required SR uuids from the foreign database. *) - let required_SRs = list_required_SRs ~__context ~self in - let required_SR_uuids = List.map (fun sr -> Db.SR.get_uuid ~__context ~self:sr) - required_SRs - in - (* Try to look up the SRs by uuid in the local database. *) - try - Server_helpers.exec_with_new_task ~session_id:session_to - "Looking for required SRs" - (fun __context_to -> List.iter - (fun sr_uuid -> - let sr = Db.SR.get_by_uuid ~__context:__context_to ~uuid:sr_uuid in - (* Check if SR has any attached PBDs. *) - let pbds = Db.SR.get_PBDs ~__context:__context_to ~self:sr in - let attached_pbds = List.filter - (fun pbd -> Db.PBD.get_currently_attached ~__context:__context_to ~self:pbd) - pbds - in - if attached_pbds = [] then - raise (Api_errors.Server_error(Api_errors.vm_requires_sr, - [Ref.string_of self; Ref.string_of sr])) - ) - required_SR_uuids) - with Db_exn.Read_missing_uuid(_, _, sr_uuid) -> - (* Throw exception containing the ref of the first SR which wasn't found. *) - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr_uuid in - raise (Api_errors.Server_error(Api_errors.vm_requires_sr, - [Ref.string_of self; Ref.string_of sr])) + (* Get the required SR uuids from the foreign database. *) + let required_SRs = list_required_SRs ~__context ~self in + let required_SR_uuids = List.map (fun sr -> Db.SR.get_uuid ~__context ~self:sr) + required_SRs + in + (* Try to look up the SRs by uuid in the local database. *) + try + Server_helpers.exec_with_new_task ~session_id:session_to + "Looking for required SRs" + (fun __context_to -> List.iter + (fun sr_uuid -> + let sr = Db.SR.get_by_uuid ~__context:__context_to ~uuid:sr_uuid in + (* Check if SR has any attached PBDs. *) + let pbds = Db.SR.get_PBDs ~__context:__context_to ~self:sr in + let attached_pbds = List.filter + (fun pbd -> Db.PBD.get_currently_attached ~__context:__context_to ~self:pbd) + pbds + in + if attached_pbds = [] then + raise (Api_errors.Server_error(Api_errors.vm_requires_sr, + [Ref.string_of self; Ref.string_of sr])) + ) + required_SR_uuids) + with Db_exn.Read_missing_uuid(_, _, sr_uuid) -> + (* Throw exception containing the ref of the first SR which wasn't found. *) + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr_uuid in + raise (Api_errors.Server_error(Api_errors.vm_requires_sr, + [Ref.string_of self; Ref.string_of sr])) let get_SRs_required_for_recovery ~__context ~self ~session_to = - let required_SR_list = list_required_SRs ~__context ~self in - Server_helpers.exec_with_new_task ~session_id:session_to - "Looking for the required SRs" - (fun __context_to -> List.filter - ( fun sr_ref -> - let sr_uuid = Db.SR.get_uuid ~__context ~self:sr_ref in - try - let sr = Db.SR.get_by_uuid ~__context:__context_to ~uuid:sr_uuid in - let pbds = Db.SR.get_PBDs ~__context:__context_to ~self:sr in - let attached_pbds = List.filter - (fun pbd -> Db.PBD.get_currently_attached ~__context:__context_to ~self:pbd) - pbds - in - if attached_pbds = [] then true else false - with Db_exn.Read_missing_uuid(_ , _ , sr_uuid) -> true - ) - required_SR_list) + let required_SR_list = list_required_SRs ~__context ~self in + Server_helpers.exec_with_new_task ~session_id:session_to + "Looking for the required SRs" + (fun __context_to -> List.filter + ( fun sr_ref -> + let sr_uuid = Db.SR.get_uuid ~__context ~self:sr_ref in + try + let sr = Db.SR.get_by_uuid ~__context:__context_to ~uuid:sr_uuid in + let pbds = Db.SR.get_PBDs ~__context:__context_to ~self:sr in + let attached_pbds = List.filter + (fun pbd -> Db.PBD.get_currently_attached ~__context:__context_to ~self:pbd) + pbds + in + if attached_pbds = [] then true else false + with Db_exn.Read_missing_uuid(_ , _ , sr_uuid) -> true + ) + required_SR_list) (* BIOS strings *) let copy_bios_strings ~__context ~vm ~host = - (* only allow to fill in BIOS strings if they are not yet set *) - let current_strings = Db.VM.get_bios_strings ~__context ~self:vm in - if List.length current_strings > 0 then - raise (Api_errors.Server_error(Api_errors.vm_bios_strings_already_set, [])) - else begin - let bios_strings = Db.Host.get_bios_strings ~__context ~self:host in - Db.VM.set_bios_strings ~__context ~self:vm ~value:bios_strings; - (* also set the affinity field to push the VM to start on this host *) - Db.VM.set_affinity ~__context ~self:vm ~value:host - end + (* only allow to fill in BIOS strings if they are not yet set *) + let current_strings = Db.VM.get_bios_strings ~__context ~self:vm in + if List.length current_strings > 0 then + raise (Api_errors.Server_error(Api_errors.vm_bios_strings_already_set, [])) + else begin + let bios_strings = Db.Host.get_bios_strings ~__context ~self:host in + Db.VM.set_bios_strings ~__context ~self:vm ~value:bios_strings; + (* also set the affinity field to push the VM to start on this host *) + Db.VM.set_affinity ~__context ~self:vm ~value:host + end let consider_generic_bios_strings ~__context ~vm = - (* check BIOS strings: set to generic values if empty *) - let bios_strings = Db.VM.get_bios_strings ~__context ~self:vm in - if bios_strings = [] then begin - info "The VM's BIOS strings were not yet filled in. The VM is now made BIOS-generic."; - Db.VM.set_bios_strings ~__context ~self:vm ~value:Xapi_globs.generic_bios_strings - end + (* check BIOS strings: set to generic values if empty *) + let bios_strings = Db.VM.get_bios_strings ~__context ~self:vm in + if bios_strings = [] then begin + info "The VM's BIOS strings were not yet filled in. The VM is now made BIOS-generic."; + Db.VM.set_bios_strings ~__context ~self:vm ~value:Xapi_globs.generic_bios_strings + end (* Windows VM Generation ID *) let fresh_genid ?(current_genid="0:0") () = - if current_genid = "" then "" else - Printf.sprintf "%Ld:%Ld" - (Random.int64 Int64.max_int) - (Random.int64 Int64.max_int) + if current_genid = "" then "" else + Printf.sprintf "%Ld:%Ld" + (Random.int64 Int64.max_int) + (Random.int64 Int64.max_int) let vm_fresh_genid ~__context ~self = - let current_genid = Db.VM.get_generation_id ~__context ~self in - let new_genid = fresh_genid ~current_genid () - and uuid = Db.VM.get_uuid ~__context ~self in - debug "Refreshing GenID for VM %s to %s" uuid new_genid; - Db.VM.set_generation_id ~__context ~self ~value:new_genid ; - new_genid + let current_genid = Db.VM.get_generation_id ~__context ~self in + let new_genid = fresh_genid ~current_genid () + and uuid = Db.VM.get_uuid ~__context ~self in + debug "Refreshing GenID for VM %s to %s" uuid new_genid; + Db.VM.set_generation_id ~__context ~self ~value:new_genid ; + new_genid (** Add to the VM's current operations, call a function and then remove from the - current operations. Ensure the allowed_operations are kept up to date. *) + current operations. Ensure the allowed_operations are kept up to date. *) let with_vm_operation ~__context ~self ~doc ~op ?(strict=true) ?policy f = - let task_id = Ref.string_of (Context.get_task_id __context) in - Helpers.retry_with_global_lock ~__context ~doc ?policy - (fun () -> - Xapi_vm_lifecycle.assert_operation_valid ~__context ~self ~op ~strict; - Db.VM.add_to_current_operations ~__context ~self ~key:task_id ~value:op; - Xapi_vm_lifecycle.update_allowed_operations ~__context ~self); - (* Then do the action with the lock released *) - Pervasiveext.finally f - (* Make sure to clean up at the end *) - (fun () -> - try - Db.VM.remove_from_current_operations ~__context ~self ~key:task_id; - Xapi_vm_lifecycle.update_allowed_operations ~__context ~self; - Helpers.Early_wakeup.broadcast (Datamodel._vm, Ref.string_of self); - with - _ -> ()) + let task_id = Ref.string_of (Context.get_task_id __context) in + Helpers.retry_with_global_lock ~__context ~doc ?policy + (fun () -> + Xapi_vm_lifecycle.assert_operation_valid ~__context ~self ~op ~strict; + Db.VM.add_to_current_operations ~__context ~self ~key:task_id ~value:op; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self); + (* Then do the action with the lock released *) + Pervasiveext.finally f + (* Make sure to clean up at the end *) + (fun () -> + try + Db.VM.remove_from_current_operations ~__context ~self ~key:task_id; + Xapi_vm_lifecycle.update_allowed_operations ~__context ~self; + Helpers.Early_wakeup.broadcast (Datamodel._vm, Ref.string_of self); + with + _ -> ()) diff --git a/ocaml/xapi/xapi_vm_lifecycle.ml b/ocaml/xapi/xapi_vm_lifecycle.ml index 8f53f361602..51ce388e7c0 100644 --- a/ocaml/xapi/xapi_vm_lifecycle.ml +++ b/ocaml/xapi/xapi_vm_lifecycle.ml @@ -13,7 +13,7 @@ *) (** Helper functions relating to VM lifecycle operations. * @group Virtual-Machine Management - *) +*) open Xapi_pv_driver_version open Stdext @@ -26,121 +26,121 @@ module Rrdd = Rrd_client.Client let assoc_opt key assocs = Opt.of_exception (fun () -> List.assoc key assocs) let bool_of_assoc key assocs = match assoc_opt key assocs with - | Some v -> v = "1" || String.lowercase v = "true" - | _ -> false + | Some v -> v = "1" || String.lowercase v = "true" + | _ -> false (** Given an operation, [allowed_power_states] returns all the possible power state for - wich this operation can be performed. *) + wich this operation can be performed. *) let allowed_power_states ~__context ~vmr ~(op:API.vm_operations) = - let all_power_states = - [`Halted; `Paused; `Suspended; `Running] in - match op with - (* a VM.import is done on file and not on VMs, so there is not power-state there! *) - | `import - -> [] - - | `changing_VCPUs - | `changing_static_range - | `changing_memory_limits -> `Halted :: (if vmr.Db_actions.vM_is_control_domain then [`Running] else []) - | `changing_shadow_memory - | `make_into_template - | `provision - | `start - | `start_on - -> [`Halted] - | `unpause - -> [`Paused] - | `csvm - | `resume - | `resume_on - -> [`Suspended] - | `awaiting_memory_live - | `call_plugin - | `clean_reboot - | `clean_shutdown - | `changing_memory_live - | `changing_shadow_memory_live - | `changing_VCPUs_live - | `data_source_op - | `pause - | `pool_migrate - | `send_sysrq - | `send_trigger - | `snapshot_with_quiesce - | `suspend - -> [`Running] - | `changing_dynamic_range - -> [`Halted; `Running] - | `clone - | `copy - -> `Halted :: (if vmr.Db_actions.vM_is_a_snapshot || Helpers.clone_suspended_vm_enabled ~__context then [`Suspended] else []) - | `create_template (* Don't touch until XMLRPC unmarshal code is able to pre-blank fields on input. *) - | `destroy - | `export - -> [`Halted; `Suspended] - | `hard_reboot - -> [`Paused; `Running] - | `checkpoint - | `get_boot_record - | `shutdown - | `hard_shutdown - -> [`Paused; `Suspended; `Running] - | `migrate_send - -> [`Halted; `Suspended; `Running] - | `assert_operation_valid - | `metadata_export - | `power_state_reset - | `revert - | `reverting - | `snapshot - | `update_allowed_operations - | `query_services - -> all_power_states + let all_power_states = + [`Halted; `Paused; `Suspended; `Running] in + match op with + (* a VM.import is done on file and not on VMs, so there is not power-state there! *) + | `import + -> [] + + | `changing_VCPUs + | `changing_static_range + | `changing_memory_limits -> `Halted :: (if vmr.Db_actions.vM_is_control_domain then [`Running] else []) + | `changing_shadow_memory + | `make_into_template + | `provision + | `start + | `start_on + -> [`Halted] + | `unpause + -> [`Paused] + | `csvm + | `resume + | `resume_on + -> [`Suspended] + | `awaiting_memory_live + | `call_plugin + | `clean_reboot + | `clean_shutdown + | `changing_memory_live + | `changing_shadow_memory_live + | `changing_VCPUs_live + | `data_source_op + | `pause + | `pool_migrate + | `send_sysrq + | `send_trigger + | `snapshot_with_quiesce + | `suspend + -> [`Running] + | `changing_dynamic_range + -> [`Halted; `Running] + | `clone + | `copy + -> `Halted :: (if vmr.Db_actions.vM_is_a_snapshot || Helpers.clone_suspended_vm_enabled ~__context then [`Suspended] else []) + | `create_template (* Don't touch until XMLRPC unmarshal code is able to pre-blank fields on input. *) + | `destroy + | `export + -> [`Halted; `Suspended] + | `hard_reboot + -> [`Paused; `Running] + | `checkpoint + | `get_boot_record + | `shutdown + | `hard_shutdown + -> [`Paused; `Suspended; `Running] + | `migrate_send + -> [`Halted; `Suspended; `Running] + | `assert_operation_valid + | `metadata_export + | `power_state_reset + | `revert + | `reverting + | `snapshot + | `update_allowed_operations + | `query_services + -> all_power_states (** check if [op] can be done when [vmr] is in [power_state], when no other operation is in progress *) let is_allowed_sequentially ~__context ~vmr ~power_state ~op = - List.mem power_state (allowed_power_states ~__context ~vmr ~op) + List.mem power_state (allowed_power_states ~__context ~vmr ~op) (** check if [op] can be done while [current_ops] are already in progress. - Remark: we do not test whether the power-state is valid. *) + Remark: we do not test whether the power-state is valid. *) let is_allowed_concurrently ~(op:API.vm_operations) ~current_ops = - (* declare below the non-conflicting concurrent sets. *) - let long_copies = [`clone; `copy; `export ] - and boot_record = [`get_boot_record] - and snapshot = [`snapshot; `checkpoint] - and allowed_operations = (* a list of valid state -> operation *) - [ [`snapshot_with_quiesce], `snapshot; - [`migrate_send], `metadata_export; - [`migrate_send], `clean_shutdown; - [`migrate_send], `clean_reboot; - [`migrate_send], `start; - [`migrate_send], `start_on; - ] in - let state_machine () = - let current_state = List.map snd current_ops in - match op with - | `hard_shutdown - -> not (List.mem op current_state) - | `hard_reboot -> not (List.exists - (fun o -> List.mem o [`hard_shutdown; `hard_reboot]) current_state) - | _ -> List.exists (fun (state, transition) -> - state = current_state && transition = op) allowed_operations - in - let aux ops = - List.mem op ops && List.for_all (fun (_,o) -> List.mem o ops) current_ops - in - aux long_copies || aux snapshot || aux boot_record || state_machine () + (* declare below the non-conflicting concurrent sets. *) + let long_copies = [`clone; `copy; `export ] + and boot_record = [`get_boot_record] + and snapshot = [`snapshot; `checkpoint] + and allowed_operations = (* a list of valid state -> operation *) + [ [`snapshot_with_quiesce], `snapshot; + [`migrate_send], `metadata_export; + [`migrate_send], `clean_shutdown; + [`migrate_send], `clean_reboot; + [`migrate_send], `start; + [`migrate_send], `start_on; + ] in + let state_machine () = + let current_state = List.map snd current_ops in + match op with + | `hard_shutdown + -> not (List.mem op current_state) + | `hard_reboot -> not (List.exists + (fun o -> List.mem o [`hard_shutdown; `hard_reboot]) current_state) + | _ -> List.exists (fun (state, transition) -> + state = current_state && transition = op) allowed_operations + in + let aux ops = + List.mem op ops && List.for_all (fun (_,o) -> List.mem o ops) current_ops + in + aux long_copies || aux snapshot || aux boot_record || state_machine () (** True iff the vm guest metrics "other" field includes (feature, "1") - as a key-value pair. *) + as a key-value pair. *) let has_feature ~vmgmr ~feature = - match vmgmr with - | None -> false - | Some gmr -> - let other = gmr.Db_actions.vM_guest_metrics_other in - try - List.assoc feature other = "1" - with Not_found -> false + match vmgmr with + | None -> false + | Some gmr -> + let other = gmr.Db_actions.vM_guest_metrics_other in + try + List.assoc feature other = "1" + with Not_found -> false (** Return an error iff vmr is an HVM guest and lacks a needed feature. * Note: it turned out that the Windows guest agent does not write "feature-suspend" @@ -152,118 +152,118 @@ let has_feature ~vmgmr ~feature = * to perform an operation. This makes a difference for ops that require the guest to * react helpfully. *) let check_op_for_feature ~__context ~vmr ~vmgmr ~power_state ~op ~ref ~strict = - if power_state <> `Running || - (* PV guests offer support implicitly *) - not (Helpers.has_booted_hvm_of_record ~__context vmr) || - has_pv_drivers (of_guest_metrics vmgmr) (* Full PV drivers imply all features *) - then None - else - let some_err e = - Some (e, [ Ref.string_of ref ]) - in - let lack_feature feature = - not (has_feature ~vmgmr ~feature) - in - match op with - | `clean_shutdown - when strict && lack_feature "feature-shutdown" && lack_feature "feature-poweroff" - -> some_err Api_errors.vm_lacks_feature - | `clean_reboot - when strict && lack_feature "feature-shutdown" && lack_feature "feature-reboot" - -> some_err Api_errors.vm_lacks_feature - | `changing_VCPUs_live - when lack_feature "feature-vcpu-hotplug" - -> some_err Api_errors.vm_lacks_feature - | `suspend | `checkpoint | `pool_migrate | `migrate_send - when strict && lack_feature "feature-suspend" - -> some_err Api_errors.vm_lacks_feature - | _ -> None - (* N.B. In the pattern matching above, "pat1 | pat2 | pat3" counts as - * one pattern, and the whole thing can be guarded by a "when" clause. *) + if power_state <> `Running || + (* PV guests offer support implicitly *) + not (Helpers.has_booted_hvm_of_record ~__context vmr) || + has_pv_drivers (of_guest_metrics vmgmr) (* Full PV drivers imply all features *) + then None + else + let some_err e = + Some (e, [ Ref.string_of ref ]) + in + let lack_feature feature = + not (has_feature ~vmgmr ~feature) + in + match op with + | `clean_shutdown + when strict && lack_feature "feature-shutdown" && lack_feature "feature-poweroff" + -> some_err Api_errors.vm_lacks_feature + | `clean_reboot + when strict && lack_feature "feature-shutdown" && lack_feature "feature-reboot" + -> some_err Api_errors.vm_lacks_feature + | `changing_VCPUs_live + when lack_feature "feature-vcpu-hotplug" + -> some_err Api_errors.vm_lacks_feature + | `suspend | `checkpoint | `pool_migrate | `migrate_send + when strict && lack_feature "feature-suspend" + -> some_err Api_errors.vm_lacks_feature + | _ -> None +(* N.B. In the pattern matching above, "pat1 | pat2 | pat3" counts as + * one pattern, and the whole thing can be guarded by a "when" clause. *) (* templates support clone operations, destroy and cross-pool migrate (if not default), export, provision, and memory settings change *) let check_template ~vmr ~op ~ref_str = - let default_template = - bool_of_assoc Xapi_globs.default_template_key vmr.Db_actions.vM_other_config in - let allowed_operations = [ - `changing_dynamic_range; - `changing_static_range; - `changing_memory_limits; - `changing_shadow_memory; - `changing_VCPUs; - `clone; - `copy; - `export; - `metadata_export; - `provision; - ] in - if false - || List.mem op allowed_operations - || ((op = `destroy || op = `migrate_send) && not default_template) - then None - else Some (Api_errors.vm_is_template, [ref_str; Record_util.vm_operation_to_string op]) + let default_template = + bool_of_assoc Xapi_globs.default_template_key vmr.Db_actions.vM_other_config in + let allowed_operations = [ + `changing_dynamic_range; + `changing_static_range; + `changing_memory_limits; + `changing_shadow_memory; + `changing_VCPUs; + `clone; + `copy; + `export; + `metadata_export; + `provision; + ] in + if false + || List.mem op allowed_operations + || ((op = `destroy || op = `migrate_send) && not default_template) + then None + else Some (Api_errors.vm_is_template, [ref_str; Record_util.vm_operation_to_string op]) let check_snapshot ~vmr ~op ~ref_str = - let allowed = [`revert; `clone; `copy; `export; `destroy; `hard_shutdown; `metadata_export] in - if List.mem op allowed - then None - else Some (Api_errors.vm_is_snapshot, [ref_str; Record_util.vm_operation_to_string op]) + let allowed = [`revert; `clone; `copy; `export; `destroy; `hard_shutdown; `metadata_export] in + if List.mem op allowed + then None + else Some (Api_errors.vm_is_snapshot, [ref_str; Record_util.vm_operation_to_string op]) (* report a power_state/operation error *) let report_power_state_error ~__context ~vmr ~power_state ~op ~ref_str = - let expected = allowed_power_states ~__context ~vmr ~op in - let expected = String.concat ", " (List.map Record_util.power_to_string expected) in - let actual = Record_util.power_to_string power_state in - Some (Api_errors.vm_bad_power_state, [ref_str; expected; actual]) + let expected = allowed_power_states ~__context ~vmr ~op in + let expected = String.concat ", " (List.map Record_util.power_to_string expected) in + let actual = Record_util.power_to_string power_state in + Some (Api_errors.vm_bad_power_state, [ref_str; expected; actual]) let report_concurrent_operations_error ~current_ops ~ref_str = - let current_ops_str = - match current_ops with - | [] -> failwith "No concurrent operation to report" - | [_,cop] -> Record_util.vm_operation_to_string cop - | l -> "{" ^ (String.concat "," (List.map Record_util.vm_operation_to_string (List.map snd l))) ^ "}" - in - Some (Api_errors.other_operation_in_progress,["VM." ^ current_ops_str; ref_str]) + let current_ops_str = + match current_ops with + | [] -> failwith "No concurrent operation to report" + | [_,cop] -> Record_util.vm_operation_to_string cop + | l -> "{" ^ (String.concat "," (List.map Record_util.vm_operation_to_string (List.map snd l))) ^ "}" + in + Some (Api_errors.other_operation_in_progress,["VM." ^ current_ops_str; ref_str]) (* Suspending, checkpointing and live-migrating are not (yet) allowed if a PCI device is passed through *) let check_pci ~op ~ref_str = - match op with - | `suspend | `checkpoint | `pool_migrate | `migrate_send -> Some (Api_errors.vm_has_pci_attached, [ref_str]) - | _ -> None + match op with + | `suspend | `checkpoint | `pool_migrate | `migrate_send -> Some (Api_errors.vm_has_pci_attached, [ref_str]) + | _ -> None let check_vgpu ~__context ~op ~ref_str ~vgpus = - match op with - | `suspend | `pool_migrate -> begin - let all_nvidia_vgpus = - List.fold_left - (fun acc vgpu -> - let vgpu_type = Db.VGPU.get_type ~__context ~self:vgpu in - let implementation = - Db.VGPU_type.get_implementation ~__context ~self:vgpu_type in - acc && (implementation = `nvidia)) - true vgpus - in - if all_nvidia_vgpus && (Xapi_fist.allow_nvidia_vgpu_migration ()) - then None - else Some (Api_errors.vm_has_vgpu, [ref_str]) - end - | `checkpoint | `migrate_send -> Some (Api_errors.vm_has_vgpu, [ref_str]) - | _ -> None + match op with + | `suspend | `pool_migrate -> begin + let all_nvidia_vgpus = + List.fold_left + (fun acc vgpu -> + let vgpu_type = Db.VGPU.get_type ~__context ~self:vgpu in + let implementation = + Db.VGPU_type.get_implementation ~__context ~self:vgpu_type in + acc && (implementation = `nvidia)) + true vgpus + in + if all_nvidia_vgpus && (Xapi_fist.allow_nvidia_vgpu_migration ()) + then None + else Some (Api_errors.vm_has_vgpu, [ref_str]) + end + | `checkpoint | `migrate_send -> Some (Api_errors.vm_has_vgpu, [ref_str]) + | _ -> None (* VM cannot be converted into a template while it is a member of an appliance. *) let check_appliance ~vmr ~op ~ref_str = - match op with - | `make_into_template -> Some (Api_errors.vm_is_part_of_an_appliance, - [ref_str; Ref.string_of vmr.Db_actions.vM_appliance]) - | _ -> None + match op with + | `make_into_template -> Some (Api_errors.vm_is_part_of_an_appliance, + [ref_str; Ref.string_of vmr.Db_actions.vM_appliance]) + | _ -> None (* VM cannot be converted into a template while it is assigned to a protection policy. *) let check_protection_policy ~vmr ~op ~ref_str = - match op with - | `make_into_template -> Some (Api_errors.vm_assigned_to_protection_policy, - [ref_str; Ref.string_of vmr.Db_actions.vM_protection_policy]) - | _ -> None + match op with + | `make_into_template -> Some (Api_errors.vm_assigned_to_protection_policy, + [ref_str; Ref.string_of vmr.Db_actions.vM_protection_policy]) + | _ -> None (** Some VMs can't migrate. The predicate [is_mobile] is true, if and * only if a VM is mobile. @@ -280,17 +280,17 @@ let check_protection_policy ~vmr ~op ~ref_str = * running - in which case we use the current values from the database. **) let is_mobile ~__context vm strict = - let metrics = Db.VM.get_metrics ~__context ~self:vm in - try - let nomigrate = Db.VM_metrics.get_nomigrate ~__context ~self:metrics in - let nested_virt = Db.VM_metrics.get_nested_virt ~__context ~self:metrics in - (not nomigrate && not nested_virt) || not strict - with _ -> - (* No VM_metrics *) - let not_true platformdata key = - not @@ Vm_platform.is_true ~key ~platformdata ~default:false in - let platform = Db.VM.get_platform ~__context ~self:vm in - (not_true platform "nomigrate" && not_true platform "nested-virt") || not strict + let metrics = Db.VM.get_metrics ~__context ~self:vm in + try + let nomigrate = Db.VM_metrics.get_nomigrate ~__context ~self:metrics in + let nested_virt = Db.VM_metrics.get_nested_virt ~__context ~self:metrics in + (not nomigrate && not nested_virt) || not strict + with _ -> + (* No VM_metrics *) + let not_true platformdata key = + not @@ Vm_platform.is_true ~key ~platformdata ~default:false in + let platform = Db.VM.get_platform ~__context ~self:vm in + (not_true platform "nomigrate" && not_true platform "nested-virt") || not strict (** Take an internal VM record and a proposed operation. Return None iff the operation @@ -299,298 +299,298 @@ let is_mobile ~__context vm strict = The "strict" param sets whether we require feature-flags for ops that need guest support: ops in the suspend-like and shutdown-like categories. *) let check_operation_error ~__context ~vmr ~vmgmr ~ref ~clone_suspended_vm_enabled ~vdis_reset_and_caching ~op ~strict = - let ref_str = Ref.string_of ref in - let power_state = vmr.Db_actions.vM_power_state in - let current_ops = vmr.Db_actions.vM_current_operations in - let is_template = vmr.Db_actions.vM_is_a_template in - let is_snapshot = vmr.Db_actions.vM_is_a_snapshot in - - (* Check if the operation has been explicitly blocked by the/a user *) - let current_error = None in - - let check c f = match c with | Some e -> Some e | None -> f () in - - let current_error = check current_error (fun () -> - Opt.map (fun v -> Api_errors.operation_blocked, [ref_str; v]) - (assoc_opt op vmr.Db_actions.vM_blocked_operations)) in - - (* Always check the power state constraint of the operation first *) - let current_error = check current_error (fun () -> - if not (is_allowed_sequentially ~__context ~vmr ~power_state ~op) - then report_power_state_error ~__context ~vmr ~power_state ~op ~ref_str - else None) in - - (* if other operations are in progress, check that the new operation is allowed concurrently with them. *) - let current_error = check current_error (fun () -> - if List.length current_ops <> 0 && not (is_allowed_concurrently ~op ~current_ops) - then report_concurrent_operations_error ~current_ops ~ref_str - else None) in - - (* if the VM is a template, check the template behavior exceptions. *) - let current_error = check current_error (fun () -> - if is_template && not is_snapshot - then check_template ~vmr ~op ~ref_str - else None) in - - (* if the VM is a snapshot, check the snapshot behavior exceptions. *) - let current_error = check current_error (fun () -> - if is_snapshot - then check_snapshot ~vmr ~op ~ref_str - else None) in - - (* if the VM is neither a template nor a snapshot, do not allow provision and revert. *) - let current_error = check current_error (fun () -> - if op = `provision && (not is_template) - then Some (Api_errors.only_provision_template, []) - else None) in - - let current_error = check current_error (fun () -> - if op = `revert && (not is_snapshot) - then Some (Api_errors.only_revert_snapshot, []) - else None) in - - (* Migration must be blocked if VM is not mobile *) - let current_error = check current_error (fun () -> - match op with - | `suspend - | `checkpoint - | `pool_migrate - | `migrate_send - when not (is_mobile ~__context ref strict) -> - Some (Api_errors.vm_is_immobile, [ref_str]) - | _ -> None - ) in - - (* Check if the VM is a control domain (eg domain 0). *) - (* FIXME: Instead of special-casing for the control domain here, *) - (* make use of the Helpers.ballooning_enabled_for_vm function. *) - let current_error = check current_error (fun () -> - let vm_ref = Db.VM.get_by_uuid ~__context ~uuid:vmr.Db_actions.vM_uuid in - if Helpers.is_domain_zero ~__context vm_ref - && (op = `changing_VCPUs - || op = `destroy) - then Some (Api_errors.operation_not_allowed, ["This operation is not allowed on dom0"]) - else if vmr.Db_actions.vM_is_control_domain - && op <> `data_source_op - && op <> `changing_memory_live - && op <> `awaiting_memory_live - && op <> `metadata_export - && op <> `changing_dynamic_range - && op <> `changing_memory_limits - && op <> `changing_static_range - && op <> `start - && op <> `changing_VCPUs - && op <> `destroy - then Some (Api_errors.operation_not_allowed, ["This operation is not allowed on a control domain"]) - else None) in - - (* check for any HVM guest feature needed by the op *) - let current_error = check current_error (fun () -> - check_op_for_feature ~__context ~vmr ~vmgmr ~power_state ~op ~ref ~strict - ) in - - (* check if the dynamic changeable operations are still valid *) - let current_error = check current_error (fun () -> - if op = `snapshot_with_quiesce && - (Pervasiveext.maybe_with_default true - (fun gm -> let other = gm.Db_actions.vM_guest_metrics_other in - not (List.mem_assoc "feature-quiesce" other || List.mem_assoc "feature-snapshot" other)) - vmgmr) - then Some (Api_errors.vm_snapshot_with_quiesce_not_supported, [ ref_str ]) - else None) in - - (* Check for an error due to VDI caching/reset behaviour *) - let current_error = check current_error (fun () -> - if op = `checkpoint || op = `snapshot || op = `suspend || op = `snapshot_with_quiesce - then (* If any vdi exists with on_boot=reset, then disallow checkpoint, snapshot, suspend *) - if List.exists fst vdis_reset_and_caching - then Some (Api_errors.vdi_on_boot_mode_incompatible_with_operation,[]) - else None - else if op = `pool_migrate then - (* If any vdi exists with on_boot=reset and caching is enabled, disallow migrate *) - if List.exists (fun (reset,caching) -> reset && caching) vdis_reset_and_caching - then Some (Api_errors.vdi_on_boot_mode_incompatible_with_operation,[]) - else None - else None) in - - (* If a PCI device is passed-through, check if the operation is allowed *) - let current_error = check current_error (fun () -> - if vmr.Db_actions.vM_attached_PCIs <> [] - then check_pci ~op ~ref_str - else None) in - - (* The VM has a VGPU, check if the operation is allowed*) - let current_error = check current_error (fun () -> - if vmr.Db_actions.vM_VGPUs <> [] - then check_vgpu ~__context ~op ~ref_str ~vgpus:vmr.Db_actions.vM_VGPUs - else None) in - - (* Check for errors caused by VM being in an appliance. *) - let current_error = check current_error (fun () -> - if Db.is_valid_ref __context vmr.Db_actions.vM_appliance - then check_appliance ~vmr ~op ~ref_str - else None) in - - (* Check for errors caused by VM being assigned to a protection policy. *) - let current_error = check current_error (fun () -> - if Db.is_valid_ref __context vmr.Db_actions.vM_protection_policy - then check_protection_policy ~vmr ~op ~ref_str - else None) in - - (* Check whether this VM needs to be a system domain. *) - let current_error = check current_error (fun () -> - if op = `query_services && not (bool_of_assoc "is_system_domain" vmr.Db_actions.vM_other_config) - then Some (Api_errors.not_system_domain, [ ref_str ]) - else None) in - - current_error + let ref_str = Ref.string_of ref in + let power_state = vmr.Db_actions.vM_power_state in + let current_ops = vmr.Db_actions.vM_current_operations in + let is_template = vmr.Db_actions.vM_is_a_template in + let is_snapshot = vmr.Db_actions.vM_is_a_snapshot in + + (* Check if the operation has been explicitly blocked by the/a user *) + let current_error = None in + + let check c f = match c with | Some e -> Some e | None -> f () in + + let current_error = check current_error (fun () -> + Opt.map (fun v -> Api_errors.operation_blocked, [ref_str; v]) + (assoc_opt op vmr.Db_actions.vM_blocked_operations)) in + + (* Always check the power state constraint of the operation first *) + let current_error = check current_error (fun () -> + if not (is_allowed_sequentially ~__context ~vmr ~power_state ~op) + then report_power_state_error ~__context ~vmr ~power_state ~op ~ref_str + else None) in + + (* if other operations are in progress, check that the new operation is allowed concurrently with them. *) + let current_error = check current_error (fun () -> + if List.length current_ops <> 0 && not (is_allowed_concurrently ~op ~current_ops) + then report_concurrent_operations_error ~current_ops ~ref_str + else None) in + + (* if the VM is a template, check the template behavior exceptions. *) + let current_error = check current_error (fun () -> + if is_template && not is_snapshot + then check_template ~vmr ~op ~ref_str + else None) in + + (* if the VM is a snapshot, check the snapshot behavior exceptions. *) + let current_error = check current_error (fun () -> + if is_snapshot + then check_snapshot ~vmr ~op ~ref_str + else None) in + + (* if the VM is neither a template nor a snapshot, do not allow provision and revert. *) + let current_error = check current_error (fun () -> + if op = `provision && (not is_template) + then Some (Api_errors.only_provision_template, []) + else None) in + + let current_error = check current_error (fun () -> + if op = `revert && (not is_snapshot) + then Some (Api_errors.only_revert_snapshot, []) + else None) in + + (* Migration must be blocked if VM is not mobile *) + let current_error = check current_error (fun () -> + match op with + | `suspend + | `checkpoint + | `pool_migrate + | `migrate_send + when not (is_mobile ~__context ref strict) -> + Some (Api_errors.vm_is_immobile, [ref_str]) + | _ -> None + ) in + + (* Check if the VM is a control domain (eg domain 0). *) + (* FIXME: Instead of special-casing for the control domain here, *) + (* make use of the Helpers.ballooning_enabled_for_vm function. *) + let current_error = check current_error (fun () -> + let vm_ref = Db.VM.get_by_uuid ~__context ~uuid:vmr.Db_actions.vM_uuid in + if Helpers.is_domain_zero ~__context vm_ref + && (op = `changing_VCPUs + || op = `destroy) + then Some (Api_errors.operation_not_allowed, ["This operation is not allowed on dom0"]) + else if vmr.Db_actions.vM_is_control_domain + && op <> `data_source_op + && op <> `changing_memory_live + && op <> `awaiting_memory_live + && op <> `metadata_export + && op <> `changing_dynamic_range + && op <> `changing_memory_limits + && op <> `changing_static_range + && op <> `start + && op <> `changing_VCPUs + && op <> `destroy + then Some (Api_errors.operation_not_allowed, ["This operation is not allowed on a control domain"]) + else None) in + + (* check for any HVM guest feature needed by the op *) + let current_error = check current_error (fun () -> + check_op_for_feature ~__context ~vmr ~vmgmr ~power_state ~op ~ref ~strict + ) in + + (* check if the dynamic changeable operations are still valid *) + let current_error = check current_error (fun () -> + if op = `snapshot_with_quiesce && + (Pervasiveext.maybe_with_default true + (fun gm -> let other = gm.Db_actions.vM_guest_metrics_other in + not (List.mem_assoc "feature-quiesce" other || List.mem_assoc "feature-snapshot" other)) + vmgmr) + then Some (Api_errors.vm_snapshot_with_quiesce_not_supported, [ ref_str ]) + else None) in + + (* Check for an error due to VDI caching/reset behaviour *) + let current_error = check current_error (fun () -> + if op = `checkpoint || op = `snapshot || op = `suspend || op = `snapshot_with_quiesce + then (* If any vdi exists with on_boot=reset, then disallow checkpoint, snapshot, suspend *) + if List.exists fst vdis_reset_and_caching + then Some (Api_errors.vdi_on_boot_mode_incompatible_with_operation,[]) + else None + else if op = `pool_migrate then + (* If any vdi exists with on_boot=reset and caching is enabled, disallow migrate *) + if List.exists (fun (reset,caching) -> reset && caching) vdis_reset_and_caching + then Some (Api_errors.vdi_on_boot_mode_incompatible_with_operation,[]) + else None + else None) in + + (* If a PCI device is passed-through, check if the operation is allowed *) + let current_error = check current_error (fun () -> + if vmr.Db_actions.vM_attached_PCIs <> [] + then check_pci ~op ~ref_str + else None) in + + (* The VM has a VGPU, check if the operation is allowed*) + let current_error = check current_error (fun () -> + if vmr.Db_actions.vM_VGPUs <> [] + then check_vgpu ~__context ~op ~ref_str ~vgpus:vmr.Db_actions.vM_VGPUs + else None) in + + (* Check for errors caused by VM being in an appliance. *) + let current_error = check current_error (fun () -> + if Db.is_valid_ref __context vmr.Db_actions.vM_appliance + then check_appliance ~vmr ~op ~ref_str + else None) in + + (* Check for errors caused by VM being assigned to a protection policy. *) + let current_error = check current_error (fun () -> + if Db.is_valid_ref __context vmr.Db_actions.vM_protection_policy + then check_protection_policy ~vmr ~op ~ref_str + else None) in + + (* Check whether this VM needs to be a system domain. *) + let current_error = check current_error (fun () -> + if op = `query_services && not (bool_of_assoc "is_system_domain" vmr.Db_actions.vM_other_config) + then Some (Api_errors.not_system_domain, [ ref_str ]) + else None) in + + current_error let maybe_get_guest_metrics ~__context ~ref = - if Db.is_valid_ref __context ref - then Some (Db.VM_guest_metrics.get_record_internal ~__context ~self:ref) - else None + if Db.is_valid_ref __context ref + then Some (Db.VM_guest_metrics.get_record_internal ~__context ~self:ref) + else None let get_info ~__context ~self = - let all = Db.VM.get_record_internal ~__context ~self in - let gm = maybe_get_guest_metrics ~__context ~ref:(all.Db_actions.vM_guest_metrics) in - let clone_suspended_vm_enabled = Helpers.clone_suspended_vm_enabled ~__context in - let vdis_reset_and_caching = List.filter_map (fun vbd -> - try - let vdi = Db.VBD.get_VDI ~__context ~self:vbd in - let sm_config = Db.VDI.get_sm_config ~__context ~self:vdi in - Some ((assoc_opt "on_boot" sm_config = Some "reset"), (bool_of_assoc "caching" sm_config)) - with _ -> None) all.Db_actions.vM_VBDs in - all, gm, clone_suspended_vm_enabled, vdis_reset_and_caching + let all = Db.VM.get_record_internal ~__context ~self in + let gm = maybe_get_guest_metrics ~__context ~ref:(all.Db_actions.vM_guest_metrics) in + let clone_suspended_vm_enabled = Helpers.clone_suspended_vm_enabled ~__context in + let vdis_reset_and_caching = List.filter_map (fun vbd -> + try + let vdi = Db.VBD.get_VDI ~__context ~self:vbd in + let sm_config = Db.VDI.get_sm_config ~__context ~self:vdi in + Some ((assoc_opt "on_boot" sm_config = Some "reset"), (bool_of_assoc "caching" sm_config)) + with _ -> None) all.Db_actions.vM_VBDs in + all, gm, clone_suspended_vm_enabled, vdis_reset_and_caching let get_operation_error ~__context ~self ~op ~strict = - let all, gm, clone_suspended_vm_enabled, vdis_reset_and_caching = get_info ~__context ~self in - check_operation_error __context all gm self clone_suspended_vm_enabled vdis_reset_and_caching op strict + let all, gm, clone_suspended_vm_enabled, vdis_reset_and_caching = get_info ~__context ~self in + check_operation_error __context all gm self clone_suspended_vm_enabled vdis_reset_and_caching op strict let assert_operation_valid ~__context ~self ~op ~strict = - match get_operation_error ~__context ~self ~op ~strict with - | None -> () - | Some (a,b) -> raise (Api_errors.Server_error (a,b)) + match get_operation_error ~__context ~self ~op ~strict with + | None -> () + | Some (a,b) -> raise (Api_errors.Server_error (a,b)) let update_allowed_operations ~__context ~self = - let all, gm, clone_suspended_vm_enabled, vdis_reset_and_caching = get_info ~__context ~self in - let check accu op = - match check_operation_error __context all gm self clone_suspended_vm_enabled vdis_reset_and_caching op true with - | None -> op :: accu - | _ -> accu - in - let allowed = - List.fold_left check [] - [`snapshot; `copy; `clone; `revert; `checkpoint; `snapshot_with_quiesce; - `start; `start_on; `pause; `unpause; `clean_shutdown; `clean_reboot; - `hard_shutdown; `hard_reboot; `suspend; `resume; `resume_on; `export; `destroy; - `provision; `changing_VCPUs_live; `pool_migrate; `migrate_send; `make_into_template; `changing_static_range; - `changing_shadow_memory; `changing_dynamic_range] - in - (* FIXME: need to be able to deal with rolling-upgrade for orlando as well *) - let allowed = - if Helpers.rolling_upgrade_in_progress ~__context - then Listext.List.intersect allowed Xapi_globs.vm_operations_miami - else allowed - in - Db.VM.set_allowed_operations ~__context ~self ~value:allowed; - (* Update the appliance's allowed operations. *) - let appliance = Db.VM.get_appliance ~__context ~self in - if Db.is_valid_ref __context appliance then - Xapi_vm_appliance_lifecycle.update_allowed_operations ~__context ~self:appliance + let all, gm, clone_suspended_vm_enabled, vdis_reset_and_caching = get_info ~__context ~self in + let check accu op = + match check_operation_error __context all gm self clone_suspended_vm_enabled vdis_reset_and_caching op true with + | None -> op :: accu + | _ -> accu + in + let allowed = + List.fold_left check [] + [`snapshot; `copy; `clone; `revert; `checkpoint; `snapshot_with_quiesce; + `start; `start_on; `pause; `unpause; `clean_shutdown; `clean_reboot; + `hard_shutdown; `hard_reboot; `suspend; `resume; `resume_on; `export; `destroy; + `provision; `changing_VCPUs_live; `pool_migrate; `migrate_send; `make_into_template; `changing_static_range; + `changing_shadow_memory; `changing_dynamic_range] + in + (* FIXME: need to be able to deal with rolling-upgrade for orlando as well *) + let allowed = + if Helpers.rolling_upgrade_in_progress ~__context + then Listext.List.intersect allowed Xapi_globs.vm_operations_miami + else allowed + in + Db.VM.set_allowed_operations ~__context ~self ~value:allowed; + (* Update the appliance's allowed operations. *) + let appliance = Db.VM.get_appliance ~__context ~self in + if Db.is_valid_ref __context appliance then + Xapi_vm_appliance_lifecycle.update_allowed_operations ~__context ~self:appliance (** Called on new VMs (clones, imports) and on server start to manually refresh the power state, allowed_operations field etc. Current-operations won't be cleaned *) let force_state_reset_keep_current_operations ~__context ~self ~value:state = - if state = `Halted then begin - (* mark all devices as disconnected *) - List.iter - (fun vbd -> - Db.VBD.set_currently_attached ~__context ~self:vbd ~value:false; - Db.VBD.set_reserved ~__context ~self:vbd ~value:false; - Xapi_vbd_helpers.clear_current_operations ~__context ~self:vbd) - (Db.VM.get_VBDs ~__context ~self); - List.iter - (fun vif -> - Db.VIF.set_currently_attached ~__context ~self:vif ~value:false; - Db.VIF.set_reserved ~__context ~self:vif ~value:false; - Xapi_vif_helpers.clear_current_operations ~__context ~self:vif) - (Db.VM.get_VIFs ~__context ~self); - List.iter - (fun vgpu -> - Db.VGPU.set_currently_attached ~__context ~self:vgpu ~value:false; - Db.VGPU.set_resident_on ~__context ~self:vgpu ~value:Ref.null; - Db.VGPU.set_scheduled_to_be_resident_on - ~__context ~self:vgpu ~value:Ref.null) - (Db.VM.get_VGPUs ~__context ~self); - List.iter - (fun pci -> - Db.PCI.remove_attached_VMs ~__context ~self:pci ~value:self) - (Db.VM.get_attached_PCIs ~__context ~self); - (* The following should not be necessary if many-to-many relations in the DB - * work properly. People have reported issues that may indicate that this is - * not the case, but we have not yet found the root cause. Therefore, the - * following code is there "just to be sure". - *) - List.iter - (fun pci -> - if List.mem self (Db.PCI.get_attached_VMs ~__context ~self:pci) then - Db.PCI.remove_attached_VMs ~__context ~self:pci ~value:self - ) - (Db.PCI.get_all ~__context); - (* Blank the requires_reboot flag *) - Db.VM.set_requires_reboot ~__context ~self ~value:false - end; - - if state = `Halted || state = `Suspended then begin - Db.VM.set_resident_on ~__context ~self ~value:Ref.null; - (* make sure we aren't reserving any memory for this VM *) - Db.VM.set_scheduled_to_be_resident_on ~__context ~self ~value:Ref.null; - Db.VM.set_domid ~__context ~self ~value:(-1L) - end; - - if state = `Halted then begin - (* archive the rrd for this vm *) - let vm_uuid = Db.VM.get_uuid ~__context ~self in - log_and_ignore_exn (fun () -> Rrdd.archive_rrd ~vm_uuid ~remote_address:(try Some (Pool_role.get_master_address ()) with _ -> None)) - end; - - Db.VM.set_power_state ~__context ~self ~value:state; - update_allowed_operations ~__context ~self + if state = `Halted then begin + (* mark all devices as disconnected *) + List.iter + (fun vbd -> + Db.VBD.set_currently_attached ~__context ~self:vbd ~value:false; + Db.VBD.set_reserved ~__context ~self:vbd ~value:false; + Xapi_vbd_helpers.clear_current_operations ~__context ~self:vbd) + (Db.VM.get_VBDs ~__context ~self); + List.iter + (fun vif -> + Db.VIF.set_currently_attached ~__context ~self:vif ~value:false; + Db.VIF.set_reserved ~__context ~self:vif ~value:false; + Xapi_vif_helpers.clear_current_operations ~__context ~self:vif) + (Db.VM.get_VIFs ~__context ~self); + List.iter + (fun vgpu -> + Db.VGPU.set_currently_attached ~__context ~self:vgpu ~value:false; + Db.VGPU.set_resident_on ~__context ~self:vgpu ~value:Ref.null; + Db.VGPU.set_scheduled_to_be_resident_on + ~__context ~self:vgpu ~value:Ref.null) + (Db.VM.get_VGPUs ~__context ~self); + List.iter + (fun pci -> + Db.PCI.remove_attached_VMs ~__context ~self:pci ~value:self) + (Db.VM.get_attached_PCIs ~__context ~self); + (* The following should not be necessary if many-to-many relations in the DB + * work properly. People have reported issues that may indicate that this is + * not the case, but we have not yet found the root cause. Therefore, the + * following code is there "just to be sure". + *) + List.iter + (fun pci -> + if List.mem self (Db.PCI.get_attached_VMs ~__context ~self:pci) then + Db.PCI.remove_attached_VMs ~__context ~self:pci ~value:self + ) + (Db.PCI.get_all ~__context); + (* Blank the requires_reboot flag *) + Db.VM.set_requires_reboot ~__context ~self ~value:false + end; + + if state = `Halted || state = `Suspended then begin + Db.VM.set_resident_on ~__context ~self ~value:Ref.null; + (* make sure we aren't reserving any memory for this VM *) + Db.VM.set_scheduled_to_be_resident_on ~__context ~self ~value:Ref.null; + Db.VM.set_domid ~__context ~self ~value:(-1L) + end; + + if state = `Halted then begin + (* archive the rrd for this vm *) + let vm_uuid = Db.VM.get_uuid ~__context ~self in + log_and_ignore_exn (fun () -> Rrdd.archive_rrd ~vm_uuid ~remote_address:(try Some (Pool_role.get_master_address ()) with _ -> None)) + end; + + Db.VM.set_power_state ~__context ~self ~value:state; + update_allowed_operations ~__context ~self (** Called on new VMs (clones, imports) and on server start to manually refresh the power state, allowed_operations field etc. Clean current-operations as well *) let force_state_reset ~__context ~self ~value:state = - if (Db.VM.get_current_operations ~__context ~self) <> [] then - Db.VM.set_current_operations ~__context ~self ~value:[]; - force_state_reset_keep_current_operations ~__context ~self ~value:state + if (Db.VM.get_current_operations ~__context ~self) <> [] then + Db.VM.set_current_operations ~__context ~self ~value:[]; + force_state_reset_keep_current_operations ~__context ~self ~value:state (** Someone is cancelling a task so remove it from the current_operations *) -let cancel_task ~__context ~self ~task_id = - let all = List.map fst (Db.VM.get_current_operations ~__context ~self) in - if List.mem task_id all - then begin - Db.VM.remove_from_current_operations ~__context ~self ~key:task_id; - update_allowed_operations ~__context ~self - end +let cancel_task ~__context ~self ~task_id = + let all = List.map fst (Db.VM.get_current_operations ~__context ~self) in + if List.mem task_id all + then begin + Db.VM.remove_from_current_operations ~__context ~self ~key:task_id; + update_allowed_operations ~__context ~self + end let cancel_tasks ~__context ~self ~all_tasks_in_db ~task_ids = - let ops = Db.VM.get_current_operations ~__context ~self in - let set = (fun value -> Db.VM.set_current_operations ~__context ~self ~value) in - Helpers.cancel_tasks ~__context ~ops ~all_tasks_in_db ~task_ids ~set + let ops = Db.VM.get_current_operations ~__context ~self in + let set = (fun value -> Db.VM.set_current_operations ~__context ~self ~value) in + Helpers.cancel_tasks ~__context ~ops ~all_tasks_in_db ~task_ids ~set (* VM is considered as "live" when it's either Running or Paused, i.e. with a live domain *) let is_live ~__context ~self = - let power_state = Db.VM.get_power_state ~__context ~self in - power_state = `Running || power_state = `Paused + let power_state = Db.VM.get_power_state ~__context ~self in + power_state = `Running || power_state = `Paused let assert_power_state_in ~__context ~self ~allowed = - let actual = Db.VM.get_power_state ~__context ~self in - if not (List.mem actual allowed) - then raise (Api_errors.Server_error(Api_errors.vm_bad_power_state, [ - Ref.string_of self; - List.map Record_util.power_to_string allowed |> String.concat ";"; - Record_util.power_to_string actual ])) + let actual = Db.VM.get_power_state ~__context ~self in + if not (List.mem actual allowed) + then raise (Api_errors.Server_error(Api_errors.vm_bad_power_state, [ + Ref.string_of self; + List.map Record_util.power_to_string allowed |> String.concat ";"; + Record_util.power_to_string actual ])) let assert_power_state_is ~expected = assert_power_state_in ~allowed:[expected] diff --git a/ocaml/xapi/xapi_vm_memory_constraints.ml b/ocaml/xapi/xapi_vm_memory_constraints.ml index ea13b5951be..89d1d7e1a1c 100644 --- a/ocaml/xapi/xapi_vm_memory_constraints.ml +++ b/ocaml/xapi/xapi_vm_memory_constraints.ml @@ -13,94 +13,94 @@ *) (** * @group Virtual-Machine Management - *) - +*) + (** An extension of Vm_memory_constraints that provides additional database and API operations. *) module type T = sig - include Vm_memory_constraints.T + include Vm_memory_constraints.T - (** Asserts for the given set of constraints [c], that - [c.static_min] ≤ [c.dynamic_min] ≤ [c.dynamic_max] ≤ [c.static_max]. *) - val assert_valid : constraints:t -> unit + (** Asserts for the given set of constraints [c], that + [c.static_min] ≤ [c.dynamic_min] ≤ [c.dynamic_max] ≤ [c.static_max]. *) + val assert_valid : constraints:t -> unit - (** Asserts for the given set of constraints [c], that - [c.static_min] ≤ [c.dynamic_min] = [c.dynamic_max] = [c.static_max]. *) - val assert_valid_and_pinned_at_static_max : constraints:t -> unit + (** Asserts for the given set of constraints [c], that + [c.static_min] ≤ [c.dynamic_min] = [c.dynamic_max] = [c.static_max]. *) + val assert_valid_and_pinned_at_static_max : constraints:t -> unit - (** Asserts that the given set of constraints [c] is valid for the current - context. *) - val assert_valid_for_current_context : - __context:Context.t -> vm:[`VM] Ref.t -> constraints:t -> unit + (** Asserts that the given set of constraints [c] is valid for the current + context. *) + val assert_valid_for_current_context : + __context:Context.t -> vm:[`VM] Ref.t -> constraints:t -> unit - (** Extracts memory constraints from the given VM record. *) - val extract : vm_record:API.vM_t -> t + (** Extracts memory constraints from the given VM record. *) + val extract : vm_record:API.vM_t -> t - (** Reads memory constraints for the given VM, from the database. *) - val get : __context:Context.t -> vm_ref:[`VM] Ref.t -> t + (** Reads memory constraints for the given VM, from the database. *) + val get : __context:Context.t -> vm_ref:[`VM] Ref.t -> t - (** Reads memory constraints effective for the given running VM, from the database. *) - val get_live : __context:Context.t -> vm_ref:[`VM] Ref.t -> t + (** Reads memory constraints effective for the given running VM, from the database. *) + val get_live : __context:Context.t -> vm_ref:[`VM] Ref.t -> t - (** Writes memory constraints for the given VM, to the database. *) - val set : __context:Context.t -> vm_ref:[`VM] Ref.t -> constraints:t -> unit + (** Writes memory constraints for the given VM, to the database. *) + val set : __context:Context.t -> vm_ref:[`VM] Ref.t -> constraints:t -> unit end module Vm_memory_constraints : T = struct - include Vm_memory_constraints.Vm_memory_constraints - - let assert_valid ~constraints = - if not (are_valid ~constraints) - then raise (Api_errors.Server_error ( - Api_errors.memory_constraint_violation, - ["Memory limits must satisfy: \ - static_min ≤ dynamic_min ≤ dynamic_max ≤ static_max"])) - - let assert_valid_and_pinned_at_static_max ~constraints = - if not (are_valid_and_pinned_at_static_max ~constraints) - then raise (Api_errors.Server_error ( - Api_errors.memory_constraint_violation, - ["Memory limits must satisfy: \ - static_min ≤ dynamic_min = dynamic_max = static_max"])) - - let assert_valid_for_current_context ~__context ~vm ~constraints = - let is_control_domain = Db.VM.get_is_control_domain ~__context ~self:vm in - (if Pool_features.is_enabled ~__context Features.DMC && not is_control_domain - then assert_valid - else assert_valid_and_pinned_at_static_max) - ~constraints - - let extract ~vm_record = - { - static_min = vm_record.API.vM_memory_static_min; - dynamic_min = vm_record.API.vM_memory_dynamic_min; - target = vm_record.API.vM_memory_target; - dynamic_max = vm_record.API.vM_memory_dynamic_max; - static_max = vm_record.API.vM_memory_static_max; - } - - let get ~__context ~vm_ref = - let vm_record = Db.VM.get_record ~__context ~self:vm_ref in - extract vm_record - - let get_live ~__context ~vm_ref = - let live_record = Db.VM.get_record ~__context ~self:vm_ref in - let boot_record = Helpers.get_boot_record ~__context ~self:vm_ref in - { - static_min = boot_record.API.vM_memory_static_min; - dynamic_min = live_record.API.vM_memory_dynamic_min; - target = live_record.API.vM_memory_target; - dynamic_max = live_record.API.vM_memory_dynamic_max; - static_max = boot_record.API.vM_memory_static_max; - } - - let set ~__context ~vm_ref ~constraints = - Db.VM.set_memory_static_min ~__context ~self:vm_ref ~value:constraints.static_min; - Db.VM.set_memory_dynamic_min ~__context ~self:vm_ref ~value:constraints.dynamic_min; - Db.VM.set_memory_target ~__context ~self:vm_ref ~value:constraints.target; - Db.VM.set_memory_dynamic_max ~__context ~self:vm_ref ~value:constraints.dynamic_max; - Db.VM.set_memory_static_max ~__context ~self:vm_ref ~value:constraints.static_max + include Vm_memory_constraints.Vm_memory_constraints + + let assert_valid ~constraints = + if not (are_valid ~constraints) + then raise (Api_errors.Server_error ( + Api_errors.memory_constraint_violation, + ["Memory limits must satisfy: \ + static_min ≤ dynamic_min ≤ dynamic_max ≤ static_max"])) + + let assert_valid_and_pinned_at_static_max ~constraints = + if not (are_valid_and_pinned_at_static_max ~constraints) + then raise (Api_errors.Server_error ( + Api_errors.memory_constraint_violation, + ["Memory limits must satisfy: \ + static_min ≤ dynamic_min = dynamic_max = static_max"])) + + let assert_valid_for_current_context ~__context ~vm ~constraints = + let is_control_domain = Db.VM.get_is_control_domain ~__context ~self:vm in + (if Pool_features.is_enabled ~__context Features.DMC && not is_control_domain + then assert_valid + else assert_valid_and_pinned_at_static_max) + ~constraints + + let extract ~vm_record = + { + static_min = vm_record.API.vM_memory_static_min; + dynamic_min = vm_record.API.vM_memory_dynamic_min; + target = vm_record.API.vM_memory_target; + dynamic_max = vm_record.API.vM_memory_dynamic_max; + static_max = vm_record.API.vM_memory_static_max; + } + + let get ~__context ~vm_ref = + let vm_record = Db.VM.get_record ~__context ~self:vm_ref in + extract vm_record + + let get_live ~__context ~vm_ref = + let live_record = Db.VM.get_record ~__context ~self:vm_ref in + let boot_record = Helpers.get_boot_record ~__context ~self:vm_ref in + { + static_min = boot_record.API.vM_memory_static_min; + dynamic_min = live_record.API.vM_memory_dynamic_min; + target = live_record.API.vM_memory_target; + dynamic_max = live_record.API.vM_memory_dynamic_max; + static_max = boot_record.API.vM_memory_static_max; + } + + let set ~__context ~vm_ref ~constraints = + Db.VM.set_memory_static_min ~__context ~self:vm_ref ~value:constraints.static_min; + Db.VM.set_memory_dynamic_min ~__context ~self:vm_ref ~value:constraints.dynamic_min; + Db.VM.set_memory_target ~__context ~self:vm_ref ~value:constraints.target; + Db.VM.set_memory_dynamic_max ~__context ~self:vm_ref ~value:constraints.dynamic_max; + Db.VM.set_memory_static_max ~__context ~self:vm_ref ~value:constraints.static_max end diff --git a/ocaml/xapi/xapi_vm_migrate.ml b/ocaml/xapi/xapi_vm_migrate.ml index 69bb018bba0..057dc5d9dab 100644 --- a/ocaml/xapi/xapi_vm_migrate.ml +++ b/ocaml/xapi/xapi_vm_migrate.ml @@ -41,7 +41,7 @@ let _master = "master" type remote = { rpc : Rpc.call -> Rpc.response; - session : API.ref_session; + session : API.ref_session; sm_url : string; xenops_url : string; master_url : string; @@ -53,7 +53,7 @@ type remote = { let get_ip_from_url url = match Http.Url.of_string url with | Http.Url.Http { Http.Url.host = host }, _ -> host - | _, _ -> failwith (Printf.sprintf "Cannot extract foreign IP address from: %s" url) + | _, _ -> failwith (Printf.sprintf "Cannot extract foreign IP address from: %s" url) let remote_of_dest dest = let master_url = List.assoc _master dest in @@ -82,8 +82,8 @@ let with_migrate f = Mutex.execute nmutex (fun () -> if !number = 3 then raise (Api_errors.Server_error (Api_errors.too_many_storage_migrates,["3"])); incr number); - finally f (fun () -> - Mutex.execute nmutex (fun () -> + finally f (fun () -> + Mutex.execute nmutex (fun () -> decr number)) module XenAPI = Client @@ -132,7 +132,7 @@ let assert_sr_support_operations ~__context ~vdi_map ~remote ~ops = |> List.iter (fun (vdi, sr) -> op_supported_on_source_sr vdi ops; op_supported_on_dest_sr sr ops sm_record remote; - ) + ) let assert_licensed_storage_motion ~__context = Pool_features.assert_enabled ~__context ~f:Features.Storage_motion @@ -154,7 +154,7 @@ let rec migrate_with_retries ~__context queue_name max try_no dbg vm_uuid xenops (* CA-86347 Handle the excn if the VM happens to reboot during migration. * Such a reboot causes Xenops_interface.Cancelled the first try, then * Xenops_interface.Internal_error("End_of_file") the second, then success. *) - with + with (* User cancelled migration *) | Xenops_interface.Cancelled _ as e when TaskHelper.is_cancelling ~__context -> debug "xenops: Migration cancelled by user."; @@ -168,7 +168,7 @@ let rec migrate_with_retries ~__context queue_name max try_no dbg vm_uuid xenops migrate_with_retries ~__context queue_name max (try_no + 1) dbg vm_uuid xenops_vdi_map xenops_vif_map xenops (* Something else went wrong *) - | e -> + | e -> debug "xenops: not retrying migration: caught %s from %s in attempt %d of %d." (Printexc.to_string e) !progress try_no max; raise e @@ -298,9 +298,9 @@ let inter_pool_metadata_transfer ~__context ~remote ~vm ~vdi_map ~vif_map ~dry_r Db.VDI.remove_from_other_config ~__context ~self:vdi ~key:Constants.storage_migrate_vdi_map_key; Opt.iter (fun remote_vdi_reference -> - Db.VDI.add_to_other_config ~__context ~self:vdi - ~key:Constants.storage_migrate_vdi_map_key - ~value:(Ref.string_of remote_vdi_reference)) + Db.VDI.add_to_other_config ~__context ~self:vdi + ~key:Constants.storage_migrate_vdi_map_key + ~value:(Ref.string_of remote_vdi_reference)) vdi_record.remote_vdi_reference) vdi_map; List.iter (fun vif_record -> @@ -322,15 +322,15 @@ let inter_pool_metadata_transfer ~__context ~remote ~vm ~vdi_map ~vif_map ~dry_r (* Make sure we clean up the remote VDI and VIF mapping keys. *) List.iter (fun vdi_record -> - Db.VDI.remove_from_other_config ~__context - ~self:vdi_record.local_vdi_reference - ~key:Constants.storage_migrate_vdi_map_key) + Db.VDI.remove_from_other_config ~__context + ~self:vdi_record.local_vdi_reference + ~key:Constants.storage_migrate_vdi_map_key) vdi_map; List.iter (fun vif_record -> - Db.VIF.remove_from_other_config ~__context - ~self:vif_record.local_vif_reference - ~key:Constants.storage_migrate_vif_map_key) + Db.VIF.remove_from_other_config ~__context + ~self:vif_record.local_vif_reference + ~key:Constants.storage_migrate_vif_map_key) vif_map) module VDIMap = Map.Make(struct type t = API.ref_VDI let compare = compare end) @@ -526,9 +526,9 @@ let vdi_copy_fun __context dbg vdi_map remote is_intra_pool remote_vdis so_far t let query = Printf.sprintf "(field \"location\"=\"%s\") and (field \"SR\"=\"%s\")" remote_vdi (Ref.string_of dest_sr_ref) in let vdis = XenAPI.VDI.get_all_records_where remote.rpc remote.session query in let remote_vdi_ref = match vdis with - | [] -> raise (Api_errors.Server_error(Api_errors.vdi_location_missing, [Ref.string_of dest_sr_ref; remote_vdi])) - | h :: [] -> debug "Found remote vdi reference: %s" (Ref.string_of (fst h)); fst h - | _ -> raise (Api_errors.Server_error(Api_errors.location_not_unique, [Ref.string_of dest_sr_ref; remote_vdi])) in + | [] -> raise (Api_errors.Server_error(Api_errors.vdi_location_missing, [Ref.string_of dest_sr_ref; remote_vdi])) + | h :: [] -> debug "Found remote vdi reference: %s" (Ref.string_of (fst h)); fst h + | _ -> raise (Api_errors.Server_error(Api_errors.location_not_unique, [Ref.string_of dest_sr_ref; remote_vdi])) in try cont remote_vdi_ref with e -> (try XenAPI.VDI.destroy remote.rpc remote.session remote_vdi_ref with _ -> error "Failed to destroy remote VDI"); @@ -573,11 +573,11 @@ let vdi_copy_fun __context dbg vdi_map remote is_intra_pool remote_vdis so_far t let task_result = task |> register_task __context - |> add_to_progress_map mapfn - |> wait_for_task dbg - |> remove_from_progress_map - |> unregister_task __context - |> success_task dbg in + |> add_to_progress_map mapfn + |> wait_for_task dbg + |> remove_from_progress_map + |> unregister_task __context + |> success_task dbg in let mirror_id, remote_vdi = if not vconf.do_mirror then @@ -597,17 +597,17 @@ let vdi_copy_fun __context dbg vdi_map remote is_intra_pool remote_vdis so_far t try let result = continuation mirror_record in (match mirror_id with - | Some mid -> ignore(Storage_access.unregister_mirror mid); - | None -> ()); + | Some mid -> ignore(Storage_access.unregister_mirror mid); + | None -> ()); if mirror && not (Xapi_fist.storage_motion_keep_vdi () || copy) then Helpers.call_api_functions ~__context (fun rpc session_id -> - XenAPI.VDI.destroy rpc session_id vconf.vdi); + XenAPI.VDI.destroy rpc session_id vconf.vdi); result with e -> let mirror_failed = match mirror_id with | Some mid -> - ignore(Storage_access.unregister_mirror mid); + ignore(Storage_access.unregister_mirror mid); (try SMAPI.DATA.MIRROR.stop ~dbg ~id:mid with _ -> ()); let m = SMAPI.DATA.MIRROR.stat ~dbg ~id:mid in m.Mirror.failed @@ -617,10 +617,10 @@ let vdi_copy_fun __context dbg vdi_map remote is_intra_pool remote_vdis so_far t if mirror then with_new_dp (fun new_dp -> - let mirror_id, remote_vdi = mirror_to_remote new_dp in - with_remote_vdi remote_vdi (fun remote_vdi_ref -> - let mirror_record = get_mirror_record ~new_dp remote_vdi remote_vdi_ref in - post_mirror mirror_id mirror_record)) + let mirror_id, remote_vdi = mirror_to_remote new_dp in + with_remote_vdi remote_vdi (fun remote_vdi_ref -> + let mirror_record = get_mirror_record ~new_dp remote_vdi remote_vdi_ref in + post_mirror mirror_id mirror_record)) else let mirror_record = get_mirror_record vconf.location (XenAPI.VDI.get_by_uuid remote.rpc remote.session vdi_uuid) in continuation mirror_record @@ -687,16 +687,16 @@ let migrate_send' ~__context ~vm ~dest ~live ~vdi_map ~vif_map ~options = let open Xapi_xenops in let remote = remote_of_dest dest in - + (* Copy mode means we don't destroy the VM on the source host. We also don't copy over the RRDs/messages *) let copy = try bool_of_string (List.assoc "copy" options) with _ -> false in - (* The first thing to do is to create mirrors of all the disks on the remote. + (* The first thing to do is to create mirrors of all the disks on the remote. We look through the VM's VBDs and all of those of the snapshots. We then compile a list of all of the associated VDIs, whether we mirror them or not - (mirroring means we believe the VDI to be active and new writes should be - mirrored to the destination - otherwise we just copy it) + (mirroring means we believe the VDI to be active and new writes should be + mirrored to the destination - otherwise we just copy it) We look at the VDIs of the VM, the VDIs of all of the snapshots, and any suspend-image VDIs. *) @@ -832,16 +832,16 @@ let migrate_send' ~__context ~vm ~dest ~live ~vdi_map ~vif_map ~options = let vms = let vdi_map = List.map (fun mirror_record -> { - local_vdi_reference = mirror_record.mr_local_vdi_reference; - remote_vdi_reference = Some mirror_record.mr_remote_vdi_reference; - }) + local_vdi_reference = mirror_record.mr_local_vdi_reference; + remote_vdi_reference = Some mirror_record.mr_remote_vdi_reference; + }) (suspends_map @ snapshots_map @ vdi_map) in let vif_map = List.map (fun (vif, network) -> { - local_vif_reference = vif; - remote_network_reference = network; - }) + local_vif_reference = vif; + remote_network_reference = network; + }) vif_map in inter_pool_metadata_transfer ~__context ~remote ~vm ~vdi_map @@ -877,7 +877,7 @@ let migrate_send' ~__context ~vm ~dest ~live ~vdi_map ~vif_map ~options = (* It's acceptable for the VM not to exist at this point; shutdown commutes with storage migrate *) begin try - Xapi_xenops.Events_from_xenopsd.with_suppressed queue_name dbg vm_uuid + Xapi_xenops.Events_from_xenopsd.with_suppressed queue_name dbg vm_uuid (fun () -> migrate_with_retry ~__context queue_name dbg vm_uuid xenops_vdi_map xenops_vif_map remote.xenops_url; Xapi_xenops.Xenopsd_metadata.delete ~__context vm_uuid) @@ -931,7 +931,7 @@ let migrate_send' ~__context ~vm ~dest ~live ~vdi_map ~vif_map ~options = (* Signal the remote pool that we're done *) end; - Helpers.call_api_functions ~__context (fun rpc session_id -> + Helpers.call_api_functions ~__context (fun rpc session_id -> if not is_intra_pool && not copy then begin info "Destroying VM ref=%s uuid=%s" (Ref.string_of vm) vm_uuid; Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:`Halted; @@ -994,7 +994,7 @@ let assert_can_migrate ~__context ~vm ~dest ~live ~vdi_map ~vif_map ~options = debug "This is a cross-pool migration"; `cross_pool in - + (* Check VDIs are not migrating to or from an SR which doesn't have required_sr_operations *) let required_sr_operations = [Smint.Vdi_mirror; Smint.Vdi_snapshot] in assert_sr_support_operations ~__context ~vdi_map ~remote ~ops:required_sr_operations; @@ -1035,21 +1035,21 @@ let assert_can_migrate ~__context ~vm ~dest ~live ~vdi_map ~vif_map ~options = let vifs = Db.VM.get_VIFs ~__context ~self:vm in let snapshots = Db.VM.get_snapshots ~__context ~self:vm in let snapshot_vifs = List.flatten - (List.map (fun self -> Db.VM.get_VIFs ~__context ~self) snapshots) in + (List.map (fun self -> Db.VM.get_VIFs ~__context ~self) snapshots) in let vif_map = infer_vif_map ~__context (vifs @ snapshot_vifs) vif_map in try let vdi_map = List.map (fun (vdi, sr) -> { - local_vdi_reference = vdi; - remote_vdi_reference = None; - }) + local_vdi_reference = vdi; + remote_vdi_reference = None; + }) vdi_map in let vif_map = List.map (fun (vif, network) -> { - local_vif_reference = vif; - remote_network_reference = network; - }) + local_vif_reference = vif; + remote_network_reference = network; + }) vif_map in assert (inter_pool_metadata_transfer ~__context ~remote ~vm ~vdi_map ~vif_map ~dry_run:true ~live:true ~copy = []) with Xmlrpc_client.Connection_reset -> @@ -1091,7 +1091,7 @@ let handler req fd _ = let localhost = Helpers.get_localhost ~__context in (* NB this parameter will be present except when we're doing a rolling upgrade. *) - let memory_required_kib = + let memory_required_kib = if List.mem_assoc _memory_required_kib req.Http.Request.query then Int64.of_string (List.assoc _memory_required_kib req.Http.Request.query) else diff --git a/ocaml/xapi/xapi_vm_placement.ml b/ocaml/xapi/xapi_vm_placement.ml index 2a556ba0bd4..cb5f434a221 100644 --- a/ocaml/xapi/xapi_vm_placement.ml +++ b/ocaml/xapi/xapi_vm_placement.ml @@ -13,85 +13,85 @@ *) (** * @group Virtual-Machine Management - *) - +*) + open Db_filter_types open Vm_placement (* === Snapshot constructors ================================================ *) let create_guest_snapshot __context guest = - let r = Db.VM.get_record ~__context ~self:guest in - { GS.id = r.API.vM_uuid - ; GS.memory_overhead = r.API.vM_memory_overhead - ; GS.memory_static_max = r.API.vM_memory_static_max - ; GS.memory_static_min = r.API.vM_memory_static_min - ; GS.memory_dynamic_max = r.API.vM_memory_dynamic_max - ; GS.memory_dynamic_min = r.API.vM_memory_dynamic_min - } + let r = Db.VM.get_record ~__context ~self:guest in + { GS.id = r.API.vM_uuid + ; GS.memory_overhead = r.API.vM_memory_overhead + ; GS.memory_static_max = r.API.vM_memory_static_max + ; GS.memory_static_min = r.API.vM_memory_static_min + ; GS.memory_dynamic_max = r.API.vM_memory_dynamic_max + ; GS.memory_dynamic_min = r.API.vM_memory_dynamic_min + } let create_host_snapshot __context host = - let host_id = Db.Host.get_uuid __context host in - let memory_overhead = Db.Host.get_memory_overhead ~__context ~self:host in - let metrics = Db.Host.get_metrics ~__context ~self:host in - let memory_total = Db.Host_metrics.get_memory_total ~__context ~self:metrics in - let guest_snapshots guest_type = List.map - (create_guest_snapshot __context) - (Db.VM.get_refs_where ~__context - ~expr:(Eq (Field guest_type, Literal (Ref.string_of host)))) - in - { HS.id = host_id - ; HS.is_pool_master = Helpers.is_pool_master ~__context ~host - ; HS.guests_resident = guest_snapshots "resident_on" - ; HS.guests_scheduled = guest_snapshots "scheduled_to_be_resident_on" - ; HS.memory_overhead = memory_overhead - ; HS.memory_total = memory_total - } + let host_id = Db.Host.get_uuid __context host in + let memory_overhead = Db.Host.get_memory_overhead ~__context ~self:host in + let metrics = Db.Host.get_metrics ~__context ~self:host in + let memory_total = Db.Host_metrics.get_memory_total ~__context ~self:metrics in + let guest_snapshots guest_type = List.map + (create_guest_snapshot __context) + (Db.VM.get_refs_where ~__context + ~expr:(Eq (Field guest_type, Literal (Ref.string_of host)))) + in + { HS.id = host_id + ; HS.is_pool_master = Helpers.is_pool_master ~__context ~host + ; HS.guests_resident = guest_snapshots "resident_on" + ; HS.guests_scheduled = guest_snapshots "scheduled_to_be_resident_on" + ; HS.memory_overhead = memory_overhead + ; HS.memory_total = memory_total + } let create_pool_subset_snapshot __context pool hosts = - let host_snapshots = List.map (create_host_snapshot __context) hosts in - { PS.id = Ref.string_of pool - ; PS.hosts = host_snapshots - } + let host_snapshots = List.map (create_host_snapshot __context) hosts in + { PS.id = Ref.string_of pool + ; PS.hosts = host_snapshots + } (* === Snapshot summary constructors ======================================== *) let create_host_snapshot_summary __context extra_guests host = - summarise_host_snapshot - (List.map (create_guest_snapshot __context) extra_guests) - (create_host_snapshot __context host) + summarise_host_snapshot + (List.map (create_guest_snapshot __context) extra_guests) + (create_host_snapshot __context host) let create_pool_subset_snapshot_summary __context extra_guests pool hosts = - summarise_pool_snapshot - (List.map (create_guest_snapshot __context) extra_guests) - (create_pool_subset_snapshot __context pool hosts) + summarise_pool_snapshot + (List.map (create_guest_snapshot __context) extra_guests) + (create_pool_subset_snapshot __context pool hosts) (* === Plumbing code ======================================================== *) (** Returns a list of affinity host identifiers for the given [guest]. *) let affinity_host_ids_of_guest __context guest = - let affinity_host = Db.VM.get_affinity ~__context ~self:guest in - let affinity_host_is_valid = Db.is_valid_ref __context affinity_host in - if affinity_host_is_valid - then [Db.Host.get_uuid __context affinity_host] - else [] + let affinity_host = Db.VM.get_affinity ~__context ~self:guest in + let affinity_host_is_valid = Db.is_valid_ref __context affinity_host in + if affinity_host_is_valid + then [Db.Host.get_uuid __context affinity_host] + else [] (** Returns a single host (from the given list of hosts) on which the given [vm] -can boot. @raise Api_errors.no_hosts_available if no such host exists. *) + can boot. @raise Api_errors.no_hosts_available if no such host exists. *) let select_host __context guest validate_host hosts = - let pool_summary = create_pool_subset_snapshot_summary __context [guest] - (Helpers.get_pool ~__context) hosts in - let affinity_host_ids = affinity_host_ids_of_guest __context guest in - let random_fn = - if Xapi_fist.deterministic_host_selection () - then zero_fn - else biased_random_fn in - let validate_host = - (fun uuid -> validate_host (Db.Host.get_by_uuid ~__context ~uuid)) in - let host = select_host_from_summary pool_summary affinity_host_ids - validate_host random_fn in - match host with - | Some (host) -> - Db.Host.get_by_uuid ~__context ~uuid:host - | None -> - raise (Api_errors.Server_error (Api_errors.no_hosts_available, [])) + let pool_summary = create_pool_subset_snapshot_summary __context [guest] + (Helpers.get_pool ~__context) hosts in + let affinity_host_ids = affinity_host_ids_of_guest __context guest in + let random_fn = + if Xapi_fist.deterministic_host_selection () + then zero_fn + else biased_random_fn in + let validate_host = + (fun uuid -> validate_host (Db.Host.get_by_uuid ~__context ~uuid)) in + let host = select_host_from_summary pool_summary affinity_host_ids + validate_host random_fn in + match host with + | Some (host) -> + Db.Host.get_by_uuid ~__context ~uuid:host + | None -> + raise (Api_errors.Server_error (Api_errors.no_hosts_available, [])) diff --git a/ocaml/xapi/xapi_vm_snapshot.ml b/ocaml/xapi/xapi_vm_snapshot.ml index 1b76a5813df..8a83b23d960 100644 --- a/ocaml/xapi/xapi_vm_snapshot.ml +++ b/ocaml/xapi/xapi_vm_snapshot.ml @@ -13,7 +13,7 @@ *) (** * @group Virtual-Machine Management - *) +*) open Client @@ -25,208 +25,208 @@ open D (* Crash-consistant snapshot *) (*************************************************************************************************) let snapshot ~__context ~vm ~new_name = - debug "Snapshot: begin"; - TaskHelper.set_cancellable ~__context; - let res = Xapi_vm_clone.clone Xapi_vm_clone.Disk_op_snapshot ~__context ~vm ~new_name in - debug "Snapshot: end"; - res + debug "Snapshot: begin"; + TaskHelper.set_cancellable ~__context; + let res = Xapi_vm_clone.clone Xapi_vm_clone.Disk_op_snapshot ~__context ~vm ~new_name in + debug "Snapshot: end"; + res (*************************************************************************************************) (* Quiesced snapshot *) (*************************************************************************************************) (* xenstore paths *) let control_path ~xs ~domid x = - xs.Xenstore.Xs.getdomainpath domid ^ "/control/" ^ x + xs.Xenstore.Xs.getdomainpath domid ^ "/control/" ^ x let snapshot_path ~xs ~domid x = - xs.Xenstore.Xs.getdomainpath domid ^ "/control/snapshot/" ^ x + xs.Xenstore.Xs.getdomainpath domid ^ "/control/snapshot/" ^ x let snapshot_cleanup_path ~xs ~domid = - xs.Xenstore.Xs.getdomainpath domid ^ "/control/snapshot" + xs.Xenstore.Xs.getdomainpath domid ^ "/control/snapshot" (* check if [flag] is set in the control_path of the VM [vm]. This looks like this code is a kind *) (* of duplicate of the one in {!xal.ml}, {!events.ml} and {!xapi_guest_agent.ml} which are looking *) (* dynamically if there is a change in this part of the VM's xenstore tree. However, at the moment *) (* always allowing the operation and checking if it is enabled when it is triggered is sufficient. *) let is_flag_set ~xs ~flag ~domid ~vm = - try - xs.Xenstore.Xs.read (control_path ~xs ~domid flag) = "1" - with e -> - debug "Exception while reading %s flag of VM %s (domain %i): %s" - flag (Ref.string_of vm) domid (Printexc.to_string e); - false + try + xs.Xenstore.Xs.read (control_path ~xs ~domid flag) = "1" + with e -> + debug "Exception while reading %s flag of VM %s (domain %i): %s" + flag (Ref.string_of vm) domid (Printexc.to_string e); + false let quiesce_enabled ~xs ~domid ~vm = - let aux x = is_flag_set ~xs ~domid ~vm ~flag:x in - aux "feature-snapshot" || aux "feature-quiesce" + let aux x = is_flag_set ~xs ~domid ~vm ~flag:x in + aux "feature-snapshot" || aux "feature-quiesce" (* we want to compare the integer at the end of a common string, ie. strings as x="/local/..../3" *) (* and y="/local/.../12". The result should be x < y. *) let compare_snapid_chunks s1 s2 = - if String.length s1 <> String.length s2 - then String.length s1 - String.length s2 - else compare s1 s2 + if String.length s1 <> String.length s2 + then String.length s1 - String.length s2 + else compare s1 s2 (* wait for the VSS provider (or similar piece of software running inside the guest) to quiesce *) (* the applications of the VM and to call VM.snapshot. After that, the VSS provider is supposed *) (* to tell us if everything happened nicely. *) let wait_for_snapshot ~__context ~vm ~xs ~domid ~new_name = - let value = Watch.value_to_appear (snapshot_path ~xs ~domid "status") in - match Watch.wait_for ~xs ~timeout:!Xapi_globs.snapshot_with_quiesce_timeout value with - | "snapshot-created" -> - (* Get the transportable snap ID *) - debug "wait_for_snapshot: getting the transportable ID"; - let snapid = xs.Xenstore.Xs.directory (snapshot_path ~xs ~domid "snapid") in - let snapid = List.sort compare_snapid_chunks snapid in - let read_chunk x = xs.Xenstore.Xs.read (snapshot_path ~xs ~domid ("snapid/" ^ x)) in - let snapid = String.concat "" (List.map read_chunk snapid) in - - (* Get the uuid of the snapshot VM *) - debug "wait_for_snapshot: getting uuid of the snapshot VM"; - let snapshot_uuid = - try xs.Xenstore.Xs.read (snapshot_path ~xs ~domid "snapuuid") - with _ -> - error "The snapshot has not been correctly created; did snapwatchd create a full VM snapshot?"; - raise (Api_errors.Server_error (Api_errors.vm_snapshot_with_quiesce_failed, [ Ref.string_of vm ])) in - let snapshot_ref = - try Db.VM.get_by_uuid ~__context ~uuid:snapshot_uuid - with _ -> - error "The snapshot UUID provided by snapwatchd is not a valid UUID."; - raise (Api_errors.Server_error (Api_errors.vm_snapshot_with_quiesce_failed, [ Ref.string_of vm ])) in - - Db.VM.set_transportable_snapshot_id ~__context ~self:snapshot_ref ~value:snapid; - Db.VM.set_name_label ~__context ~self:snapshot_ref ~value:new_name; - - (* update the snapshot-info field *) - Db.VM.remove_from_snapshot_info ~__context ~self:snapshot_ref ~key:Xapi_vm_clone.disk_snapshot_type; - Db.VM.add_to_snapshot_info ~__context ~self:snapshot_ref ~key:Xapi_vm_clone.disk_snapshot_type ~value:Xapi_vm_clone.quiesced; - - snapshot_ref - - | "snapshot-error" -> - (* If an error was occured we get the error type and return *) - let error_str = xs.Xenstore.Xs.read (snapshot_path ~xs ~domid "error") in - let error_code () = try xs.Xenstore.Xs.read (snapshot_path ~xs ~domid "error/code") with _ -> "0" in - error "wait_for_snapshot: %s" error_str; - if List.mem error_str [ - Api_errors.xen_vss_req_error_init_failed; - Api_errors.xen_vss_req_error_prov_not_loaded; - Api_errors.xen_vss_req_error_no_volumes_supported; - Api_errors.xen_vss_req_error_start_snapshot_set_failed; - Api_errors.xen_vss_req_error_adding_volume_to_snapset_failed; - Api_errors.xen_vss_req_error_preparing_writers; - Api_errors.xen_vss_req_error_creating_snapshot; - Api_errors.xen_vss_req_error_creating_snapshot_xml_string ] - then raise (Api_errors.Server_error (error_str, [ Ref.string_of vm; error_code () ])) - else raise (Api_errors.Server_error (Api_errors.vm_snapshot_with_quiesce_failed, [ Ref.string_of vm; error_str ])) - - | e -> - failwith (Printf.sprintf "wait_for_snapshot: unexpected result (%s)" e) + let value = Watch.value_to_appear (snapshot_path ~xs ~domid "status") in + match Watch.wait_for ~xs ~timeout:!Xapi_globs.snapshot_with_quiesce_timeout value with + | "snapshot-created" -> + (* Get the transportable snap ID *) + debug "wait_for_snapshot: getting the transportable ID"; + let snapid = xs.Xenstore.Xs.directory (snapshot_path ~xs ~domid "snapid") in + let snapid = List.sort compare_snapid_chunks snapid in + let read_chunk x = xs.Xenstore.Xs.read (snapshot_path ~xs ~domid ("snapid/" ^ x)) in + let snapid = String.concat "" (List.map read_chunk snapid) in + + (* Get the uuid of the snapshot VM *) + debug "wait_for_snapshot: getting uuid of the snapshot VM"; + let snapshot_uuid = + try xs.Xenstore.Xs.read (snapshot_path ~xs ~domid "snapuuid") + with _ -> + error "The snapshot has not been correctly created; did snapwatchd create a full VM snapshot?"; + raise (Api_errors.Server_error (Api_errors.vm_snapshot_with_quiesce_failed, [ Ref.string_of vm ])) in + let snapshot_ref = + try Db.VM.get_by_uuid ~__context ~uuid:snapshot_uuid + with _ -> + error "The snapshot UUID provided by snapwatchd is not a valid UUID."; + raise (Api_errors.Server_error (Api_errors.vm_snapshot_with_quiesce_failed, [ Ref.string_of vm ])) in + + Db.VM.set_transportable_snapshot_id ~__context ~self:snapshot_ref ~value:snapid; + Db.VM.set_name_label ~__context ~self:snapshot_ref ~value:new_name; + + (* update the snapshot-info field *) + Db.VM.remove_from_snapshot_info ~__context ~self:snapshot_ref ~key:Xapi_vm_clone.disk_snapshot_type; + Db.VM.add_to_snapshot_info ~__context ~self:snapshot_ref ~key:Xapi_vm_clone.disk_snapshot_type ~value:Xapi_vm_clone.quiesced; + + snapshot_ref + + | "snapshot-error" -> + (* If an error was occured we get the error type and return *) + let error_str = xs.Xenstore.Xs.read (snapshot_path ~xs ~domid "error") in + let error_code () = try xs.Xenstore.Xs.read (snapshot_path ~xs ~domid "error/code") with _ -> "0" in + error "wait_for_snapshot: %s" error_str; + if List.mem error_str [ + Api_errors.xen_vss_req_error_init_failed; + Api_errors.xen_vss_req_error_prov_not_loaded; + Api_errors.xen_vss_req_error_no_volumes_supported; + Api_errors.xen_vss_req_error_start_snapshot_set_failed; + Api_errors.xen_vss_req_error_adding_volume_to_snapset_failed; + Api_errors.xen_vss_req_error_preparing_writers; + Api_errors.xen_vss_req_error_creating_snapshot; + Api_errors.xen_vss_req_error_creating_snapshot_xml_string ] + then raise (Api_errors.Server_error (error_str, [ Ref.string_of vm; error_code () ])) + else raise (Api_errors.Server_error (Api_errors.vm_snapshot_with_quiesce_failed, [ Ref.string_of vm; error_str ])) + + | e -> + failwith (Printf.sprintf "wait_for_snapshot: unexpected result (%s)" e) (* We fail if the guest does not support quiesce mode. Normally, that should be detected *) (* dynamically by the xapi_vm_lifecycle.update_allowed_operations call. *) let snapshot_with_quiesce ~__context ~vm ~new_name = - debug "snapshot_with_quiesce: begin"; - let domid = Int64.to_int (Db.VM.get_domid ~__context ~self:vm) in - let result = Xenstore.with_xs (fun xs -> - (* 1. We first check if the VM supports quiesce-mode *) - if quiesce_enabled ~xs ~domid ~vm - then begin Stdext.Pervasiveext.finally - (fun () -> - (* 2. if it the case, we can trigger a VSS snapshot *) - xs.Xenstore.Xs.rm (snapshot_cleanup_path ~xs ~domid); - xs.Xenstore.Xs.write (snapshot_path ~xs ~domid "action") "create-snapshot"; - - try - debug "Snapshot_with_quiesce: waiting for the VSS agent to proceed"; - let value = Watch.key_to_disappear (snapshot_path ~xs ~domid "action") in - Watch.wait_for ~xs ~timeout:(60.) value; - debug "Snapshot_with_quiesce: waiting for the VSS agent to take a snapshot"; - try wait_for_snapshot ~__context ~vm ~xs ~domid ~new_name - with Watch.Timeout _ -> - error "time-out while waiting for VSS snapshot"; - raise (Api_errors.Server_error (Api_errors.vm_snapshot_with_quiesce_timeout, [ Ref.string_of vm ])) - - with Watch.Timeout _ -> - error "VSS plugin does not respond"; - raise (Api_errors.Server_error (Api_errors.vm_snapshot_with_quiesce_plugin_does_not_respond, [ Ref.string_of vm ]))) - - (fun () -> - xs.Xenstore.Xs.rm (snapshot_cleanup_path ~xs ~domid)) - - end else begin - error "Quiesce snapshot not supported"; - raise (Api_errors.Server_error (Api_errors.vm_snapshot_with_quiesce_not_supported, [ Ref.string_of vm ])) - end) in - debug "snapshot_with_quiesce: end"; - result + debug "snapshot_with_quiesce: begin"; + let domid = Int64.to_int (Db.VM.get_domid ~__context ~self:vm) in + let result = Xenstore.with_xs (fun xs -> + (* 1. We first check if the VM supports quiesce-mode *) + if quiesce_enabled ~xs ~domid ~vm + then begin Stdext.Pervasiveext.finally + (fun () -> + (* 2. if it the case, we can trigger a VSS snapshot *) + xs.Xenstore.Xs.rm (snapshot_cleanup_path ~xs ~domid); + xs.Xenstore.Xs.write (snapshot_path ~xs ~domid "action") "create-snapshot"; + + try + debug "Snapshot_with_quiesce: waiting for the VSS agent to proceed"; + let value = Watch.key_to_disappear (snapshot_path ~xs ~domid "action") in + Watch.wait_for ~xs ~timeout:(60.) value; + debug "Snapshot_with_quiesce: waiting for the VSS agent to take a snapshot"; + try wait_for_snapshot ~__context ~vm ~xs ~domid ~new_name + with Watch.Timeout _ -> + error "time-out while waiting for VSS snapshot"; + raise (Api_errors.Server_error (Api_errors.vm_snapshot_with_quiesce_timeout, [ Ref.string_of vm ])) + + with Watch.Timeout _ -> + error "VSS plugin does not respond"; + raise (Api_errors.Server_error (Api_errors.vm_snapshot_with_quiesce_plugin_does_not_respond, [ Ref.string_of vm ]))) + + (fun () -> + xs.Xenstore.Xs.rm (snapshot_cleanup_path ~xs ~domid)) + + end else begin + error "Quiesce snapshot not supported"; + raise (Api_errors.Server_error (Api_errors.vm_snapshot_with_quiesce_not_supported, [ Ref.string_of vm ])) + end) in + debug "snapshot_with_quiesce: end"; + result (*************************************************************************************************) (* Checkpoint *) (*************************************************************************************************) let checkpoint ~__context ~vm ~new_name = - let power_state = Db.VM.get_power_state ~__context ~self:vm in - let snapshot_info = ref [] in - (* live-suspend the VM if the VM is running *) - if power_state = `Running - then begin - try - (* Save the state of the vm *) - snapshot_info := Xapi_vm_clone.snapshot_info ~power_state ~is_a_snapshot:true; - - (* Get all the VM's VDI's except CD's *) - let vbds = Db.VM.get_VBDs ~__context ~self:vm in - let vbds = List.filter (fun x -> Db.VBD.get_type ~__context ~self:x <> `CD) vbds in - let vdis = List.map (fun self -> Db.VBD.get_VDI ~__context ~self) vbds in - - (* Get SR of each VDI *) - let vdi_sr = List.filter_map (fun vdi -> try Some (Db.VDI.get_SR ~__context ~self:vdi) with _ -> None) vdis in - let vdi_sr = List.setify vdi_sr in - let sr_records = List.map (fun self -> Db.SR.get_record_internal ~__context ~self) vdi_sr in - - (* Check if SR has snapshot feature *) - let sr_has_snapshot_feature sr = - if not Smint.(has_capability Vdi_snapshot (Xapi_sr_operations.features_of_sr ~__context sr)) then false - else true - in - - List.iter - (fun sr -> - if not (sr_has_snapshot_feature sr) - then raise (Api_errors.Server_error (Api_errors.sr_operation_not_supported, [Ref.string_of vm])) ) - sr_records ; - (* suspend the VM *) - Xapi_xenops.suspend ~__context ~self:vm; - with - | Api_errors.Server_error(_, _) as e -> raise e - (* | _ -> raise (Api_errors.Server_error (Api_errors.vm_checkpoint_suspend_failed, [Ref.string_of vm])) *) - end; - - (* snapshot the disks and the suspend VDI *) - let snap, err = - if not (TaskHelper.is_cancelling ~__context) then begin - try Some (Xapi_vm_clone.clone Xapi_vm_clone.Disk_op_checkpoint ~__context ~vm ~new_name ~snapshot_info_record:!snapshot_info), None - with e -> None, Some e - end - else None, None - in - - (* restore the power state of the VM *) - if power_state = `Running - then begin - let localhost = Helpers.get_localhost ~__context in - Db.VM.set_resident_on ~__context ~self:vm ~value:localhost; - debug "Performing a slow resume"; - Xapi_xenops.resume ~__context ~self:vm ~start_paused:false ~force:false; - end; - match snap with - | None -> begin - match err with - | None -> TaskHelper.raise_cancelled ~__context - | Some Api_errors.Server_error (x, _) when x=Api_errors.task_cancelled -> TaskHelper.raise_cancelled ~__context - | Some e -> raise e - end - | Some snap -> snap + let power_state = Db.VM.get_power_state ~__context ~self:vm in + let snapshot_info = ref [] in + (* live-suspend the VM if the VM is running *) + if power_state = `Running + then begin + try + (* Save the state of the vm *) + snapshot_info := Xapi_vm_clone.snapshot_info ~power_state ~is_a_snapshot:true; + + (* Get all the VM's VDI's except CD's *) + let vbds = Db.VM.get_VBDs ~__context ~self:vm in + let vbds = List.filter (fun x -> Db.VBD.get_type ~__context ~self:x <> `CD) vbds in + let vdis = List.map (fun self -> Db.VBD.get_VDI ~__context ~self) vbds in + + (* Get SR of each VDI *) + let vdi_sr = List.filter_map (fun vdi -> try Some (Db.VDI.get_SR ~__context ~self:vdi) with _ -> None) vdis in + let vdi_sr = List.setify vdi_sr in + let sr_records = List.map (fun self -> Db.SR.get_record_internal ~__context ~self) vdi_sr in + + (* Check if SR has snapshot feature *) + let sr_has_snapshot_feature sr = + if not Smint.(has_capability Vdi_snapshot (Xapi_sr_operations.features_of_sr ~__context sr)) then false + else true + in + + List.iter + (fun sr -> + if not (sr_has_snapshot_feature sr) + then raise (Api_errors.Server_error (Api_errors.sr_operation_not_supported, [Ref.string_of vm])) ) + sr_records ; + (* suspend the VM *) + Xapi_xenops.suspend ~__context ~self:vm; + with + | Api_errors.Server_error(_, _) as e -> raise e + (* | _ -> raise (Api_errors.Server_error (Api_errors.vm_checkpoint_suspend_failed, [Ref.string_of vm])) *) + end; + + (* snapshot the disks and the suspend VDI *) + let snap, err = + if not (TaskHelper.is_cancelling ~__context) then begin + try Some (Xapi_vm_clone.clone Xapi_vm_clone.Disk_op_checkpoint ~__context ~vm ~new_name ~snapshot_info_record:!snapshot_info), None + with e -> None, Some e + end + else None, None + in + + (* restore the power state of the VM *) + if power_state = `Running + then begin + let localhost = Helpers.get_localhost ~__context in + Db.VM.set_resident_on ~__context ~self:vm ~value:localhost; + debug "Performing a slow resume"; + Xapi_xenops.resume ~__context ~self:vm ~start_paused:false ~force:false; + end; + match snap with + | None -> begin + match err with + | None -> TaskHelper.raise_cancelled ~__context + | Some Api_errors.Server_error (x, _) when x=Api_errors.task_cancelled -> TaskHelper.raise_cancelled ~__context + | Some e -> raise e + end + | Some snap -> snap (********************************************************************************) @@ -235,266 +235,266 @@ let checkpoint ~__context ~vm ~new_name = (* The following code have to run on the master as it manipulates the DB cache directly. *) let copy_vm_fields ~__context ~metadata ~dst ~do_not_copy ~default_values = - assert (Pool_role.is_master ()); - debug "copying metadata into %s" (Ref.string_of dst); - let db = Context.database_of __context in - let module DB = (val (Db_cache.get db) : Db_interface.DB_ACCESS) in - List.iter - (fun (key,value) -> - let value = - if List.mem_assoc key default_values - then List.assoc key default_values - else value in - if not (List.mem key do_not_copy) - then DB.write_field db Db_names.vm (Ref.string_of dst) key value) - metadata - + assert (Pool_role.is_master ()); + debug "copying metadata into %s" (Ref.string_of dst); + let db = Context.database_of __context in + let module DB = (val (Db_cache.get db) : Db_interface.DB_ACCESS) in + List.iter + (fun (key,value) -> + let value = + if List.mem_assoc key default_values + then List.assoc key default_values + else value in + if not (List.mem key do_not_copy) + then DB.write_field db Db_names.vm (Ref.string_of dst) key value) + metadata + let safe_destroy_vbd ~__context ~rpc ~session_id vbd = - if Db.is_valid_ref __context vbd then begin - Client.VBD.destroy rpc session_id vbd - end + if Db.is_valid_ref __context vbd then begin + Client.VBD.destroy rpc session_id vbd + end let safe_destroy_vif ~__context ~rpc ~session_id vif = - if Db.is_valid_ref __context vif then begin - Client.VIF.destroy rpc session_id vif - end + if Db.is_valid_ref __context vif then begin + Client.VIF.destroy rpc session_id vif + end let safe_destroy_vgpu ~__context ~rpc ~session_id vgpu = - if Db.is_valid_ref __context vgpu then begin - Client.VGPU.destroy rpc session_id vgpu - end + if Db.is_valid_ref __context vgpu then begin + Client.VGPU.destroy rpc session_id vgpu + end let safe_destroy_vdi ~__context ~rpc ~session_id vdi = - if Db.is_valid_ref __context vdi then begin - let sr = Db.VDI.get_SR ~__context ~self:vdi in - if not (Db.SR.get_content_type ~__context ~self:sr = "iso") then - Client.VDI.destroy rpc session_id vdi - end - + if Db.is_valid_ref __context vdi then begin + let sr = Db.VDI.get_SR ~__context ~self:vdi in + if not (Db.SR.get_content_type ~__context ~self:sr = "iso") then + Client.VDI.destroy rpc session_id vdi + end + (* Copy the VBDs and VIFs from a source VM to a dest VM and then delete the old disks. *) (* This operation destroys the data of the dest VM. *) let update_vifs_vbds_and_vgpus ~__context ~snapshot ~vm = - let snap_VBDs = Db.VM.get_VBDs ~__context ~self:snapshot in - let snap_VBDs_disk, snap_VBDs_CD = - List.partition - (fun vbd -> Db.VBD.get_type ~__context ~self:vbd = `Disk) - snap_VBDs - in - let snap_disks = List.map (fun vbd -> Db.VBD.get_VDI ~__context ~self:vbd) snap_VBDs_disk in - let snap_disks_snapshot_of = List.map (fun vdi -> Db.VDI.get_snapshot_of ~__context ~self:vdi) snap_disks in - let snap_VIFs = Db.VM.get_VIFs ~__context ~self:snapshot in - let snap_VGPUs = Db.VM.get_VGPUs ~__context ~self:snapshot in - let snap_suspend_VDI = Db.VM.get_suspend_VDI ~__context ~self:snapshot in - - let vm_VBDs = Db.VM.get_VBDs ~__context ~self:vm in - (* Filter VBDs to ensure that we don't read empty CDROMs *) - let vm_VBDs_disk = List.filter (fun vbd -> Db.VBD.get_type ~__context ~self:vbd = `Disk) vm_VBDs in - let vm_disks = List.map (fun vbd -> Db.VBD.get_VDI ~__context ~self:vbd) vm_VBDs_disk in - (* Filter out VM disks for which the snapshot does not have a corresponding - * disk - these disks will be left unattached after the revert is complete. *) - let vm_disks_with_snapshot = List.filter (fun vdi -> List.mem vdi snap_disks_snapshot_of) vm_disks in - let vm_VIFs = Db.VM.get_VIFs ~__context ~self:vm in - let vm_VGPUs = Db.VM.get_VGPUs ~__context ~self:vm in - let vm_suspend_VDI = Db.VM.get_suspend_VDI ~__context ~self:vm in - - (* clone all the disks of the snapshot *) - Helpers.call_api_functions ~__context (fun rpc session_id -> - - debug "Cleaning up the old VBDs and VDIs to have more free space"; - List.iter (safe_destroy_vbd ~__context ~rpc ~session_id) vm_VBDs; - List.iter (safe_destroy_vdi ~__context ~rpc ~session_id) (vm_suspend_VDI :: vm_disks_with_snapshot); - TaskHelper.set_progress ~__context 0.2; - - debug "Cloning the snapshotted disks"; - let driver_params = Xapi_vm_clone.make_driver_params () in - let cloned_disks = Xapi_vm_clone.safe_clone_disks rpc session_id Xapi_vm_clone.Disk_op_clone ~__context snap_VBDs_disk driver_params in - let cloned_CDs = Xapi_vm_clone.safe_clone_disks rpc session_id Xapi_vm_clone.Disk_op_clone ~__context snap_VBDs_CD driver_params in - TaskHelper.set_progress ~__context 0.5; - - debug "Updating the snapshot_of fields for relevant VDIs"; - List.iter2 - (fun snap_disk (_, cloned_disk, _) -> - (* For each snapshot disk which was just cloned: - * 1) Find the value of snapshot_of - * 2) Find all snapshots with the same snapshot_of - * 3) Update each of these snapshots so that their snapshot_of points - * to the new cloned disk. *) - let open Db_filter_types in - let snapshot_of = Db.VDI.get_snapshot_of ~__context ~self:snap_disk in - let all_snaps_in_tree = Db.VDI.get_refs_where ~__context - ~expr:(Eq (Field "snapshot_of", Literal (Ref.string_of snapshot_of))) - in - List.iter - (fun snapshot -> - Db.VDI.set_snapshot_of ~__context ~self:snapshot ~value:cloned_disk) - all_snaps_in_tree) - snap_disks - cloned_disks; - - debug "Cloning the suspend VDI if needed"; - let cloned_suspend_VDI = - if snap_suspend_VDI = Ref.null - then Ref.null - else Xapi_vm_clone.clone_single_vdi rpc session_id Xapi_vm_clone.Disk_op_clone ~__context snap_suspend_VDI driver_params in - TaskHelper.set_progress ~__context 0.6; - - try - debug "Copying the VBDs"; - let (_ : [`VBD] Ref.t list) = - List.map (fun (vbd, vdi, _) -> Xapi_vbd_helpers.copy ~__context ~vm ~vdi vbd) (cloned_disks @ cloned_CDs) in - (* XXX: no VBDs stored in the LBR now *) - (* + let snap_VBDs = Db.VM.get_VBDs ~__context ~self:snapshot in + let snap_VBDs_disk, snap_VBDs_CD = + List.partition + (fun vbd -> Db.VBD.get_type ~__context ~self:vbd = `Disk) + snap_VBDs + in + let snap_disks = List.map (fun vbd -> Db.VBD.get_VDI ~__context ~self:vbd) snap_VBDs_disk in + let snap_disks_snapshot_of = List.map (fun vdi -> Db.VDI.get_snapshot_of ~__context ~self:vdi) snap_disks in + let snap_VIFs = Db.VM.get_VIFs ~__context ~self:snapshot in + let snap_VGPUs = Db.VM.get_VGPUs ~__context ~self:snapshot in + let snap_suspend_VDI = Db.VM.get_suspend_VDI ~__context ~self:snapshot in + + let vm_VBDs = Db.VM.get_VBDs ~__context ~self:vm in + (* Filter VBDs to ensure that we don't read empty CDROMs *) + let vm_VBDs_disk = List.filter (fun vbd -> Db.VBD.get_type ~__context ~self:vbd = `Disk) vm_VBDs in + let vm_disks = List.map (fun vbd -> Db.VBD.get_VDI ~__context ~self:vbd) vm_VBDs_disk in + (* Filter out VM disks for which the snapshot does not have a corresponding + * disk - these disks will be left unattached after the revert is complete. *) + let vm_disks_with_snapshot = List.filter (fun vdi -> List.mem vdi snap_disks_snapshot_of) vm_disks in + let vm_VIFs = Db.VM.get_VIFs ~__context ~self:vm in + let vm_VGPUs = Db.VM.get_VGPUs ~__context ~self:vm in + let vm_suspend_VDI = Db.VM.get_suspend_VDI ~__context ~self:vm in + + (* clone all the disks of the snapshot *) + Helpers.call_api_functions ~__context (fun rpc session_id -> + + debug "Cleaning up the old VBDs and VDIs to have more free space"; + List.iter (safe_destroy_vbd ~__context ~rpc ~session_id) vm_VBDs; + List.iter (safe_destroy_vdi ~__context ~rpc ~session_id) (vm_suspend_VDI :: vm_disks_with_snapshot); + TaskHelper.set_progress ~__context 0.2; + + debug "Cloning the snapshotted disks"; + let driver_params = Xapi_vm_clone.make_driver_params () in + let cloned_disks = Xapi_vm_clone.safe_clone_disks rpc session_id Xapi_vm_clone.Disk_op_clone ~__context snap_VBDs_disk driver_params in + let cloned_CDs = Xapi_vm_clone.safe_clone_disks rpc session_id Xapi_vm_clone.Disk_op_clone ~__context snap_VBDs_CD driver_params in + TaskHelper.set_progress ~__context 0.5; + + debug "Updating the snapshot_of fields for relevant VDIs"; + List.iter2 + (fun snap_disk (_, cloned_disk, _) -> + (* For each snapshot disk which was just cloned: + * 1) Find the value of snapshot_of + * 2) Find all snapshots with the same snapshot_of + * 3) Update each of these snapshots so that their snapshot_of points + * to the new cloned disk. *) + let open Db_filter_types in + let snapshot_of = Db.VDI.get_snapshot_of ~__context ~self:snap_disk in + let all_snaps_in_tree = Db.VDI.get_refs_where ~__context + ~expr:(Eq (Field "snapshot_of", Literal (Ref.string_of snapshot_of))) + in + List.iter + (fun snapshot -> + Db.VDI.set_snapshot_of ~__context ~self:snapshot ~value:cloned_disk) + all_snaps_in_tree) + snap_disks + cloned_disks; + + debug "Cloning the suspend VDI if needed"; + let cloned_suspend_VDI = + if snap_suspend_VDI = Ref.null + then Ref.null + else Xapi_vm_clone.clone_single_vdi rpc session_id Xapi_vm_clone.Disk_op_clone ~__context snap_suspend_VDI driver_params in + TaskHelper.set_progress ~__context 0.6; + + try + debug "Copying the VBDs"; + let (_ : [`VBD] Ref.t list) = + List.map (fun (vbd, vdi, _) -> Xapi_vbd_helpers.copy ~__context ~vm ~vdi vbd) (cloned_disks @ cloned_CDs) in + (* XXX: no VBDs stored in the LBR now *) + (* (* To include the case of checkpoints we must also update the VBD references in the LBR *) let snapshot = Helpers.get_boot_record ~__context ~self:vm in Helpers.set_boot_record ~__context ~self:vm { snapshot with API.vM_VBDs = vbds }; *) - TaskHelper.set_progress ~__context 0.7; + TaskHelper.set_progress ~__context 0.7; - debug "Update the suspend_VDI"; - Db.VM.set_suspend_VDI ~__context ~self:vm ~value:cloned_suspend_VDI; + debug "Update the suspend_VDI"; + Db.VM.set_suspend_VDI ~__context ~self:vm ~value:cloned_suspend_VDI; - debug "Cleaning up the old VIFs"; - List.iter (safe_destroy_vif ~__context ~rpc ~session_id) vm_VIFs; + debug "Cleaning up the old VIFs"; + List.iter (safe_destroy_vif ~__context ~rpc ~session_id) vm_VIFs; - debug "Setting up the new VIFs"; - let (_ : [`VIF] Ref.t list) = - List.map (fun vif -> Xapi_vif_helpers.copy ~__context ~vm ~preserve_mac_address:true vif) snap_VIFs in - TaskHelper.set_progress ~__context 0.8; + debug "Setting up the new VIFs"; + let (_ : [`VIF] Ref.t list) = + List.map (fun vif -> Xapi_vif_helpers.copy ~__context ~vm ~preserve_mac_address:true vif) snap_VIFs in + TaskHelper.set_progress ~__context 0.8; - debug "Cleaning up the old VGPUs"; - List.iter (safe_destroy_vgpu ~__context ~rpc ~session_id) vm_VGPUs; + debug "Cleaning up the old VGPUs"; + List.iter (safe_destroy_vgpu ~__context ~rpc ~session_id) vm_VGPUs; - debug "Setting up the new VGPUs"; - let (_ : [`VGPU] Ref.t list) = - List.map (fun vgpu -> Xapi_vgpu.copy ~__context ~vm vgpu) snap_VGPUs in - TaskHelper.set_progress ~__context 0.9; - with e -> - error "Error while updating the new VBD, VDI, VIF and VGPU records. Cleaning up the cloned VDIs."; - let vdis = cloned_suspend_VDI :: (List.fold_left (fun acc (_, vdi, on_error_delete) -> if on_error_delete then vdi::acc else acc) [] cloned_disks) in - List.iter (safe_destroy_vdi ~__context ~rpc ~session_id) vdis; - raise e) + debug "Setting up the new VGPUs"; + let (_ : [`VGPU] Ref.t list) = + List.map (fun vgpu -> Xapi_vgpu.copy ~__context ~vm vgpu) snap_VGPUs in + TaskHelper.set_progress ~__context 0.9; + with e -> + error "Error while updating the new VBD, VDI, VIF and VGPU records. Cleaning up the cloned VDIs."; + let vdis = cloned_suspend_VDI :: (List.fold_left (fun acc (_, vdi, on_error_delete) -> if on_error_delete then vdi::acc else acc) [] cloned_disks) in + List.iter (safe_destroy_vdi ~__context ~rpc ~session_id) vdis; + raise e) let update_guest_metrics ~__context ~vm ~snapshot = - let snap_gm = Db.VM.get_guest_metrics ~__context ~self:snapshot in - let vm_gm = Db.VM.get_guest_metrics ~__context ~self:vm in + let snap_gm = Db.VM.get_guest_metrics ~__context ~self:snapshot in + let vm_gm = Db.VM.get_guest_metrics ~__context ~self:vm in - debug "Reverting the guest metrics"; - if Db.is_valid_ref __context vm_gm then Db.VM_guest_metrics.destroy ~__context ~self:vm_gm; - if Db.is_valid_ref __context snap_gm then begin - let new_gm = Xapi_vm_helpers.copy_guest_metrics ~__context ~vm:snapshot in - Db.VM.set_guest_metrics ~__context ~self:vm ~value:new_gm - end + debug "Reverting the guest metrics"; + if Db.is_valid_ref __context vm_gm then Db.VM_guest_metrics.destroy ~__context ~self:vm_gm; + if Db.is_valid_ref __context snap_gm then begin + let new_gm = Xapi_vm_helpers.copy_guest_metrics ~__context ~vm:snapshot in + Db.VM.set_guest_metrics ~__context ~self:vm ~value:new_gm + end let update_parent ~__context ~vm ~snapshot = - Db.VM.set_parent ~__context ~self:vm ~value:snapshot + Db.VM.set_parent ~__context ~self:vm ~value:snapshot let do_not_copy = [ - Db_names.uuid; - Db_names.ref; - Db_names.suspend_VDI; - Db_names.power_state; - Db_names.parent; - Db_names.current_operations; - Db_names.allowed_operations; - Db_names.guest_metrics; - Db_names.resident_on; - Db_names.domid; - Db_names.protection_policy; - Db_names.scheduled_to_be_resident_on; - (* Global persistent fields should keep *) - "snapshots"; "tags"; "affinity"; - (* Current fields should remain to get destroyed during revert process *) - "consoles"; "VBDs"; "VIFs"; "VGPUs"; - (* Stateful fields that will be reset anyway *) - "power_state"; + Db_names.uuid; + Db_names.ref; + Db_names.suspend_VDI; + Db_names.power_state; + Db_names.parent; + Db_names.current_operations; + Db_names.allowed_operations; + Db_names.guest_metrics; + Db_names.resident_on; + Db_names.domid; + Db_names.protection_policy; + Db_names.scheduled_to_be_resident_on; + (* Global persistent fields should keep *) + "snapshots"; "tags"; "affinity"; + (* Current fields should remain to get destroyed during revert process *) + "consoles"; "VBDs"; "VIFs"; "VGPUs"; + (* Stateful fields that will be reset anyway *) + "power_state"; ] -let default_values = [ - Db_names.ha_always_run, "false"; +let default_values = [ + Db_names.ha_always_run, "false"; ] let extended_do_not_copy = [ - Db_names.name_label; - Db_names.is_a_snapshot; - Db_names.is_a_template; - Db_names.snapshot_of; - Db_names.snapshot_time; - Db_names.transportable_snapshot_id; - "children"; + Db_names.name_label; + Db_names.is_a_snapshot; + Db_names.is_a_template; + Db_names.snapshot_of; + Db_names.snapshot_time; + Db_names.transportable_snapshot_id; + "children"; ] @ do_not_copy (* This function has to be done on the master *) let revert_vm_fields ~__context ~snapshot ~vm = - let snap_metadata = Db.VM.get_snapshot_metadata ~__context ~self:snapshot in - let post_MNR = snap_metadata <> "" in - debug "Reverting the fields of %s to the ones of %s (%s)" (Ref.string_of vm) (Ref.string_of snapshot) (if post_MNR then "post-MNR" else "pre-MNR"); - let snap_metadata = - if post_MNR - then Helpers.vm_string_to_assoc snap_metadata - else Helpers.vm_string_to_assoc (Helpers.vm_to_string __context snapshot) in - let do_not_copy = - if post_MNR - then do_not_copy - else extended_do_not_copy in - copy_vm_fields ~__context ~metadata:snap_metadata ~dst:vm ~do_not_copy ~default_values; - TaskHelper.set_progress ~__context 0.1 + let snap_metadata = Db.VM.get_snapshot_metadata ~__context ~self:snapshot in + let post_MNR = snap_metadata <> "" in + debug "Reverting the fields of %s to the ones of %s (%s)" (Ref.string_of vm) (Ref.string_of snapshot) (if post_MNR then "post-MNR" else "pre-MNR"); + let snap_metadata = + if post_MNR + then Helpers.vm_string_to_assoc snap_metadata + else Helpers.vm_string_to_assoc (Helpers.vm_to_string __context snapshot) in + let do_not_copy = + if post_MNR + then do_not_copy + else extended_do_not_copy in + copy_vm_fields ~__context ~metadata:snap_metadata ~dst:vm ~do_not_copy ~default_values; + TaskHelper.set_progress ~__context 0.1 let revert ~__context ~snapshot ~vm = - debug "Reverting %s to %s" (Ref.string_of vm) (Ref.string_of snapshot); - - (* This is destructive and relatively fast. There's no point advertising cancel since it - will result in a broken VM. *) - TaskHelper.set_not_cancellable ~__context; - try - let power_state = Db.VM.get_power_state ~__context ~self:snapshot in - - update_vifs_vbds_and_vgpus ~__context ~snapshot ~vm; - update_guest_metrics ~__context ~snapshot ~vm; - update_parent ~__context ~snapshot ~vm; - TaskHelper.set_progress ~__context 1.; - - Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:power_state; - debug "VM.revert done" - - with e -> - error "revert failed: %s" (Printexc.to_string e); - Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:`Halted; - match e with - | Api_errors.Server_error("SR_BACKEND_FAILURE_44", _) as e -> - error "Not enough space to create the new disk images"; - raise e - | Api_errors.Server_error("SR_BACKEND_FAILURE_109", _) as e -> - error "Snapshot chain too long"; - raise e - | _ -> raise (Api_errors.Server_error (Api_errors.vm_revert_failed, [Ref.string_of snapshot; Ref.string_of vm])) + debug "Reverting %s to %s" (Ref.string_of vm) (Ref.string_of snapshot); + + (* This is destructive and relatively fast. There's no point advertising cancel since it + will result in a broken VM. *) + TaskHelper.set_not_cancellable ~__context; + try + let power_state = Db.VM.get_power_state ~__context ~self:snapshot in + + update_vifs_vbds_and_vgpus ~__context ~snapshot ~vm; + update_guest_metrics ~__context ~snapshot ~vm; + update_parent ~__context ~snapshot ~vm; + TaskHelper.set_progress ~__context 1.; + + Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:power_state; + debug "VM.revert done" + + with e -> + error "revert failed: %s" (Printexc.to_string e); + Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:`Halted; + match e with + | Api_errors.Server_error("SR_BACKEND_FAILURE_44", _) as e -> + error "Not enough space to create the new disk images"; + raise e + | Api_errors.Server_error("SR_BACKEND_FAILURE_109", _) as e -> + error "Snapshot chain too long"; + raise e + | _ -> raise (Api_errors.Server_error (Api_errors.vm_revert_failed, [Ref.string_of snapshot; Ref.string_of vm])) let create_vm_from_snapshot ~__context ~snapshot = - let old_vm = Db.VM.get_snapshot_of ~__context ~self:snapshot in - try - let snapshots = - Db.VM.get_records_where __context - (Db_filter_types.Eq (Db_filter_types.Field "snapshot_of", Db_filter_types.Literal (Ref.string_of old_vm))) in - - let snap_metadata = Db.VM.get_snapshot_metadata ~__context ~self:snapshot in - let snap_metadata = Helpers.vm_string_to_assoc snap_metadata in - let vm_uuid = List.assoc Db_names.uuid snap_metadata in - let snap_record = Db.VM.get_record ~__context ~self:snapshot in - - Helpers.call_api_functions ~__context - (fun rpc session_id -> - let new_vm = Xapi_vm_helpers.create_from_record_without_checking_licence_feature_for_vendor_device - ~__context rpc session_id snap_record in - begin try - Db.VM.set_uuid ~__context ~self:new_vm ~value:vm_uuid; - copy_vm_fields ~__context ~metadata:snap_metadata ~dst:new_vm ~do_not_copy:do_not_copy ~default_values; - List.iter (fun (snap,_) -> Db.VM.set_snapshot_of ~__context ~self:snap ~value:new_vm) snapshots; - new_vm - with e -> - debug "cleaning-up by deleting the VM %s" (Ref.string_of new_vm); - Client.VM.destroy rpc session_id new_vm; - raise e; - end) - with e -> - error "create_vm_from_snapshot failed: %s" (Printexc.to_string e); - raise (Api_errors.Server_error (Api_errors.vm_revert_failed, [Ref.string_of snapshot; Ref.string_of old_vm])) - + let old_vm = Db.VM.get_snapshot_of ~__context ~self:snapshot in + try + let snapshots = + Db.VM.get_records_where __context + (Db_filter_types.Eq (Db_filter_types.Field "snapshot_of", Db_filter_types.Literal (Ref.string_of old_vm))) in + + let snap_metadata = Db.VM.get_snapshot_metadata ~__context ~self:snapshot in + let snap_metadata = Helpers.vm_string_to_assoc snap_metadata in + let vm_uuid = List.assoc Db_names.uuid snap_metadata in + let snap_record = Db.VM.get_record ~__context ~self:snapshot in + + Helpers.call_api_functions ~__context + (fun rpc session_id -> + let new_vm = Xapi_vm_helpers.create_from_record_without_checking_licence_feature_for_vendor_device + ~__context rpc session_id snap_record in + begin try + Db.VM.set_uuid ~__context ~self:new_vm ~value:vm_uuid; + copy_vm_fields ~__context ~metadata:snap_metadata ~dst:new_vm ~do_not_copy:do_not_copy ~default_values; + List.iter (fun (snap,_) -> Db.VM.set_snapshot_of ~__context ~self:snap ~value:new_vm) snapshots; + new_vm + with e -> + debug "cleaning-up by deleting the VM %s" (Ref.string_of new_vm); + Client.VM.destroy rpc session_id new_vm; + raise e; + end) + with e -> + error "create_vm_from_snapshot failed: %s" (Printexc.to_string e); + raise (Api_errors.Server_error (Api_errors.vm_revert_failed, [Ref.string_of snapshot; Ref.string_of old_vm])) + diff --git a/ocaml/xapi/xapi_vmpp.ml b/ocaml/xapi/xapi_vmpp.ml index b78b2455842..e6c49433c0d 100644 --- a/ocaml/xapi/xapi_vmpp.ml +++ b/ocaml/xapi/xapi_vmpp.ml @@ -15,93 +15,93 @@ module D = Debug.Make(struct let name="xapi" end) open D let raise_removed () = - raise (Api_errors.Server_error (Api_errors.message_removed, [])) + raise (Api_errors.Server_error (Api_errors.message_removed, [])) let protect_now ~__context ~vmpp = - raise_removed () + raise_removed () -let archive_now ~__context ~snapshot = - raise_removed () +let archive_now ~__context ~snapshot = + raise_removed () let add_to_recent_alerts ~__context ~vmpp ~value = - raise_removed () + raise_removed () let create_alert ~__context ~vmpp ~name ~priority ~body ~data = - raise_removed () + raise_removed () let get_alerts ~__context ~vmpp ~hours_from_now = - raise_removed () + raise_removed () let set_is_backup_running ~__context ~self ~value = - raise_removed () + raise_removed () let set_is_archive_running ~__context ~self ~value = - raise_removed () + raise_removed () let set_backup_frequency ~__context ~self ~value = - raise_removed () + raise_removed () let set_archive_frequency ~__context ~self ~value = - raise_removed () + raise_removed () let set_archive_target_type ~__context ~self ~value = - raise_removed () + raise_removed () let set_is_alarm_enabled ~__context ~self ~value = - raise_removed () + raise_removed () let set_backup_schedule ~__context ~self ~value = - raise_removed () + raise_removed () let add_to_backup_schedule ~__context ~self ~key ~value = - raise_removed () + raise_removed () let set_archive_target_config ~__context ~self ~value = - raise_removed () + raise_removed () let add_to_archive_target_config ~__context ~self ~key ~value = - raise_removed () + raise_removed () let set_archive_schedule ~__context ~self ~value = - raise_removed () + raise_removed () let add_to_archive_schedule ~__context ~self ~key ~value = - raise_removed () + raise_removed () let set_alarm_config ~__context ~self ~value = - raise_removed () + raise_removed () let add_to_alarm_config ~__context ~self ~key ~value = - raise_removed () + raise_removed () let remove_from_backup_schedule ~__context ~self ~key = - raise_removed () + raise_removed () let remove_from_archive_target_config ~__context ~self ~key = - raise_removed () + raise_removed () let remove_from_archive_schedule ~__context ~self ~key = - raise_removed () + raise_removed () let remove_from_alarm_config ~__context ~self ~key = - raise_removed () + raise_removed () let set_backup_last_run_time ~__context ~self ~value = - raise_removed () + raise_removed () let set_archive_last_run_time ~__context ~self ~value = - raise_removed () + raise_removed () let set_backup_retention_value ~__context ~self ~value = - raise_removed () + raise_removed () let create ~__context ~name_label ~name_description ~is_policy_enabled - ~backup_type ~backup_retention_value ~backup_frequency ~backup_schedule - ~archive_target_type ~archive_target_config ~archive_frequency ~archive_schedule - ~is_alarm_enabled ~alarm_config -: API.ref_VMPP = - raise_removed () - -let destroy ~__context ~self = - raise_removed () + ~backup_type ~backup_retention_value ~backup_frequency ~backup_schedule + ~archive_target_type ~archive_target_config ~archive_frequency ~archive_schedule + ~is_alarm_enabled ~alarm_config + : API.ref_VMPP = + raise_removed () + +let destroy ~__context ~self = + raise_removed () diff --git a/ocaml/xapi/xapi_vncsnapshot.ml b/ocaml/xapi/xapi_vncsnapshot.ml index 558ca071683..3b4064e3495 100644 --- a/ocaml/xapi/xapi_vncsnapshot.ml +++ b/ocaml/xapi/xapi_vncsnapshot.ml @@ -26,22 +26,22 @@ let vncsnapshot_handler (req: Request.t) s _ = Xapi_http.with_context "Taking snapshot of VM console" req s (fun __context -> try - let console = Console.console_of_request __context req in - Console.rbac_check_for_control_domain __context req console - Rbac_static.permission_http_get_vncsnapshot_host_console.Db_actions.role_name_label; - let tmp = Filename.temp_file "snapshot" "jpg" in - Stdext.Pervasiveext.finally - (fun () -> - let vnc_port = Int64.to_int (Db.Console.get_port ~__context ~self:console) in - - let pid = safe_close_and_exec None None None [] vncsnapshot - [ "-quiet"; "-allowblank" ; "-encodings"; "\"raw\""; - Printf.sprintf "%s:%d" "127.0.0.1" (vnc_port-5900); tmp ] in - waitpid_fail_if_bad_exit pid; - Http_svr.response_file s tmp - ) - (fun () -> try Unix.unlink tmp with _ -> ()) + let console = Console.console_of_request __context req in + Console.rbac_check_for_control_domain __context req console + Rbac_static.permission_http_get_vncsnapshot_host_console.Db_actions.role_name_label; + let tmp = Filename.temp_file "snapshot" "jpg" in + Stdext.Pervasiveext.finally + (fun () -> + let vnc_port = Int64.to_int (Db.Console.get_port ~__context ~self:console) in + + let pid = safe_close_and_exec None None None [] vncsnapshot + [ "-quiet"; "-allowblank" ; "-encodings"; "\"raw\""; + Printf.sprintf "%s:%d" "127.0.0.1" (vnc_port-5900); tmp ] in + waitpid_fail_if_bad_exit pid; + Http_svr.response_file s tmp + ) + (fun () -> try Unix.unlink tmp with _ -> ()) with e -> - req.Request.close <- true; - raise e + req.Request.close <- true; + raise e ) diff --git a/ocaml/xapi/xapi_xenops.ml b/ocaml/xapi/xapi_xenops.ml index d4c7392c699..f4a3db388dd 100644 --- a/ocaml/xapi/xapi_xenops.ml +++ b/ocaml/xapi/xapi_xenops.ml @@ -29,708 +29,708 @@ open Xenops_interface open Xapi_xenops_queue let event_wait queue_name dbg ?from p = - let finished = ref false in - let event_id = ref from in - let module Client = (val make_client queue_name : XENOPS) in - while not !finished do - let _, deltas, next_id = Client.UPDATES.get dbg !event_id (Some 30) in - event_id := Some next_id; - List.iter (fun d -> if p d then finished := true) deltas; - done + let finished = ref false in + let event_id = ref from in + let module Client = (val make_client queue_name : XENOPS) in + while not !finished do + let _, deltas, next_id = Client.UPDATES.get dbg !event_id (Some 30) in + event_id := Some next_id; + List.iter (fun d -> if p d then finished := true) deltas; + done let task_ended queue_name dbg id = - let module Client = (val make_client queue_name : XENOPS) in - match (Client.TASK.stat dbg id).Task.state with - | Task.Completed _ - | Task.Failed _ -> true - | Task.Pending _ -> false + let module Client = (val make_client queue_name : XENOPS) in + match (Client.TASK.stat dbg id).Task.state with + | Task.Completed _ + | Task.Failed _ -> true + | Task.Pending _ -> false let assume_task_succeeded queue_name dbg id = - let module Client = (val make_client queue_name : XENOPS) in - let t = Client.TASK.stat dbg id in - Client.TASK.destroy dbg id; - match t.Task.state with - | Task.Completed _ -> t - | Task.Failed x -> - let exn = exn_of_exnty (Exception.exnty_of_rpc x) in - let bt = Backtrace.t_of_sexp (Sexplib.Sexp.of_string t.Task.backtrace) in - Backtrace.add exn bt; - raise exn - | Task.Pending _ -> failwith "task pending" + let module Client = (val make_client queue_name : XENOPS) in + let t = Client.TASK.stat dbg id in + Client.TASK.destroy dbg id; + match t.Task.state with + | Task.Completed _ -> t + | Task.Failed x -> + let exn = exn_of_exnty (Exception.exnty_of_rpc x) in + let bt = Backtrace.t_of_sexp (Sexplib.Sexp.of_string t.Task.backtrace) in + Backtrace.add exn bt; + raise exn + | Task.Pending _ -> failwith "task pending" let wait_for_task queue_name dbg id = - let module Client = (val make_client queue_name : XENOPS) in - let finished = function - | Dynamic.Task id' -> - id = id' && (task_ended queue_name dbg id) - | _ -> - false in - let from = Client.UPDATES.last_id dbg in - if not(task_ended queue_name dbg id) then event_wait queue_name dbg ~from finished; - id + let module Client = (val make_client queue_name : XENOPS) in + let finished = function + | Dynamic.Task id' -> + id = id' && (task_ended queue_name dbg id) + | _ -> + false in + let from = Client.UPDATES.last_id dbg in + if not(task_ended queue_name dbg id) then event_wait queue_name dbg ~from finished; + id let xenapi_of_xenops_power_state = function - | Some Running -> `Running - | Some Halted -> `Halted - | Some Suspended -> `Suspended - | Some Paused -> `Paused - | None -> `Halted + | Some Running -> `Running + | Some Halted -> `Halted + | Some Suspended -> `Suspended + | Some Paused -> `Paused + | None -> `Halted let xenops_of_xenapi_power_state = function - | `Running -> Running - | `Halted -> Halted - | `Suspended -> Suspended - | `Paused -> Paused + | `Running -> Running + | `Halted -> Halted + | `Suspended -> Suspended + | `Paused -> Paused let xenops_vdi_locator_of_strings sr_uuid vdi_location = - Printf.sprintf "%s/%s" sr_uuid vdi_location + Printf.sprintf "%s/%s" sr_uuid vdi_location let xenops_vdi_locator ~__context ~self = - let sr = Db.VDI.get_SR ~__context ~self in - let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in - let vdi_location = Db.VDI.get_location ~__context ~self in - xenops_vdi_locator_of_strings sr_uuid vdi_location + let sr = Db.VDI.get_SR ~__context ~self in + let sr_uuid = Db.SR.get_uuid ~__context ~self:sr in + let vdi_location = Db.VDI.get_location ~__context ~self in + xenops_vdi_locator_of_strings sr_uuid vdi_location let disk_of_vdi ~__context ~self = - try Some (VDI (xenops_vdi_locator ~__context ~self)) with _ -> None + try Some (VDI (xenops_vdi_locator ~__context ~self)) with _ -> None let vdi_of_disk ~__context x = match String.split ~limit:2 '/' x with - | [ sr_uuid; location ] -> - let open Db_filter_types in - let sr = Db.SR.get_by_uuid ~__context ~uuid:sr_uuid in - begin match Db.VDI.get_records_where ~__context ~expr:(And((Eq (Field "location", Literal location)),Eq (Field "SR", Literal (Ref.string_of sr)))) with - | x :: _ -> Some x - | _ -> - error "Failed to find VDI: %s" x; - None - end - | _ -> - error "Failed to parse VDI name: %s" x; - None + | [ sr_uuid; location ] -> + let open Db_filter_types in + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr_uuid in + begin match Db.VDI.get_records_where ~__context ~expr:(And((Eq (Field "location", Literal location)),Eq (Field "SR", Literal (Ref.string_of sr)))) with + | x :: _ -> Some x + | _ -> + error "Failed to find VDI: %s" x; + None + end + | _ -> + error "Failed to parse VDI name: %s" x; + None let backend_of_network net = - if List.mem_assoc "backend_vm" net.API.network_other_config then begin - let backend_vm = List.assoc "backend_vm" net.API.network_other_config in - debug "Using VM %s as backend for VIF on network %s" backend_vm net.API.network_uuid; - Network.Remote (backend_vm, net.API.network_bridge) - end else - Network.Local net.API.network_bridge (* PR-1255 *) + if List.mem_assoc "backend_vm" net.API.network_other_config then begin + let backend_vm = List.assoc "backend_vm" net.API.network_other_config in + debug "Using VM %s as backend for VIF on network %s" backend_vm net.API.network_uuid; + Network.Remote (backend_vm, net.API.network_bridge) + end else + Network.Local net.API.network_bridge (* PR-1255 *) let find f map default feature = - try - let v = List.assoc feature map in - try f v - with e -> - warn "Failed to parse %s as value for %s: %s; Using default value." - v feature (Printexc.to_string e); - default - with Not_found -> default + try + let v = List.assoc feature map in + try f v + with e -> + warn "Failed to parse %s as value for %s: %s; Using default value." + v feature (Printexc.to_string e); + default + with Not_found -> default let string = find (fun x -> x) let int = find int_of_string let bool = find (function "1" -> true | "0" -> false | x -> bool_of_string x) let rtc_timeoffset_of_vm ~__context (vm, vm_t) vbds = - let timeoffset = string vm_t.API.vM_platform "0" Vm_platform.timeoffset in - (* If any VDI has on_boot = reset AND has a VDI.other_config:timeoffset - then we override the platform/timeoffset. This is needed because windows - stores the local time in timeoffset (the BIOS clock) but records whether - it has adjusted it for daylight savings in the system disk. If we reset - the system disk to an earlier snapshot then the BIOS clock needs to be - reset too. *) - let non_empty_vbds = List.filter (fun vbd -> not vbd.API.vBD_empty) vbds in - let vdis = List.map (fun vbd -> vbd.API.vBD_VDI) non_empty_vbds in - let vdis_with_timeoffset_to_be_reset_on_boot = - vdis - |> List.map (fun self -> (self, Db.VDI.get_record ~__context ~self)) - |> List.filter (fun (_, record) -> record.API.vDI_on_boot = `reset) - |> List.filter_map (fun (reference, record) -> - Opt.of_exception (fun () -> - reference, - List.assoc Vm_platform.timeoffset - record.API.vDI_other_config)) in - match vdis_with_timeoffset_to_be_reset_on_boot with - | [] -> - timeoffset - | [(reference, timeoffset)] -> - timeoffset - | reference_timeoffset_pairs -> - raise (Api_errors.Server_error ( - (Api_errors.vm_attached_to_more_than_one_vdi_with_timeoffset_marked_as_reset_on_boot), - (Ref.string_of vm) :: - (reference_timeoffset_pairs - |> List.map fst - |> List.map Ref.string_of))) + let timeoffset = string vm_t.API.vM_platform "0" Vm_platform.timeoffset in + (* If any VDI has on_boot = reset AND has a VDI.other_config:timeoffset + then we override the platform/timeoffset. This is needed because windows + stores the local time in timeoffset (the BIOS clock) but records whether + it has adjusted it for daylight savings in the system disk. If we reset + the system disk to an earlier snapshot then the BIOS clock needs to be + reset too. *) + let non_empty_vbds = List.filter (fun vbd -> not vbd.API.vBD_empty) vbds in + let vdis = List.map (fun vbd -> vbd.API.vBD_VDI) non_empty_vbds in + let vdis_with_timeoffset_to_be_reset_on_boot = + vdis + |> List.map (fun self -> (self, Db.VDI.get_record ~__context ~self)) + |> List.filter (fun (_, record) -> record.API.vDI_on_boot = `reset) + |> List.filter_map (fun (reference, record) -> + Opt.of_exception (fun () -> + reference, + List.assoc Vm_platform.timeoffset + record.API.vDI_other_config)) in + match vdis_with_timeoffset_to_be_reset_on_boot with + | [] -> + timeoffset + | [(reference, timeoffset)] -> + timeoffset + | reference_timeoffset_pairs -> + raise (Api_errors.Server_error ( + (Api_errors.vm_attached_to_more_than_one_vdi_with_timeoffset_marked_as_reset_on_boot), + (Ref.string_of vm) :: + (reference_timeoffset_pairs + |> List.map fst + |> List.map Ref.string_of))) (* /boot/ contains potentially sensitive files like xen-initrd, so we will only*) -(* allow directly booting guests from the subfolder /boot/guest/ *) +(* allow directly booting guests from the subfolder /boot/guest/ *) let allowed_dom0_directory_for_boot_files = "/boot/guest/" let is_boot_file_whitelisted filename = - let safe_str str = not (String.has_substr str "..") in - (* make sure the script prefix is the allowed dom0 directory *) - (String.startswith allowed_dom0_directory_for_boot_files filename) - (* avoid ..-style attacks and other weird things *) - &&(safe_str filename) + let safe_str str = not (String.has_substr str "..") in + (* make sure the script prefix is the allowed dom0 directory *) + (String.startswith allowed_dom0_directory_for_boot_files filename) + (* avoid ..-style attacks and other weird things *) + &&(safe_str filename) let builder_of_vm ~__context (vmref, vm) timeoffset pci_passthrough vgpu = - let open Vm in - - let video_mode = - if vgpu then Vgpu - else if (Vm_platform.is_true - ~key:Vm_platform.igd_passthru_key - ~platformdata:vm.API.vM_platform - ~default:false) - then (IGD_passthrough GVT_d) - else - match string vm.API.vM_platform "cirrus" Vm_platform.vga with - | "std" -> Standard_VGA - | "cirrus" -> Cirrus - | x -> - error "Unknown platform/vga option: %s (expected 'std' or 'cirrus')" x; - Cirrus - in - - let pci_emulations = - let s = try Some (List.assoc "mtc_pci_emulations" vm.API.vM_other_config) with _ -> None in - match s with - | None -> [] - | Some x -> - try - let l = String.split ',' x in - List.map (String.strip String.isspace) l - with _ -> [] + let open Vm in + + let video_mode = + if vgpu then Vgpu + else if (Vm_platform.is_true + ~key:Vm_platform.igd_passthru_key + ~platformdata:vm.API.vM_platform + ~default:false) + then (IGD_passthrough GVT_d) + else + match string vm.API.vM_platform "cirrus" Vm_platform.vga with + | "std" -> Standard_VGA + | "cirrus" -> Cirrus + | x -> + error "Unknown platform/vga option: %s (expected 'std' or 'cirrus')" x; + Cirrus + in + + let pci_emulations = + let s = try Some (List.assoc "mtc_pci_emulations" vm.API.vM_other_config) with _ -> None in + match s with + | None -> [] + | Some x -> + try + let l = String.split ',' x in + List.map (String.strip String.isspace) l + with _ -> [] + in + + match Helpers.boot_method_of_vm ~__context ~vm with + | Helpers.HVM { Helpers.timeoffset = t } -> HVM { + hap = true; + shadow_multiplier = vm.API.vM_HVM_shadow_multiplier; + timeoffset = timeoffset; + video_mib = begin + (* For vGPU, make sure videoram is at least 16MiB. *) + let requested_videoram = int vm.API.vM_platform 4 "videoram" in + if video_mode = Vgpu + then max requested_videoram 16 + else requested_videoram + end; + video = video_mode; + acpi = bool vm.API.vM_platform true "acpi"; + serial = begin + (* The platform value should override the other_config value. If + * neither are set, use pty. *) + let key = "hvm_serial" in + let other_config_value = + try Some (List.assoc key vm.API.vM_other_config) + with Not_found -> None + in + let platform_value = + try Some (List.assoc key vm.API.vM_platform) + with Not_found -> None + in + match other_config_value, platform_value with + | None, None -> Some "pty" + | _, Some value -> Some value + | Some value, None -> Some value + end; + keymap = begin + try Some (List.assoc "keymap" vm.API.vM_platform) + with Not_found -> None + end; + vnc_ip = None (*None PR-1255*); + pci_emulations = pci_emulations; + pci_passthrough = pci_passthrough; + boot_order = string vm.API.vM_HVM_boot_params "cd" "order"; + qemu_disk_cmdline = bool vm.API.vM_platform false "qemu_disk_cmdline"; + qemu_stubdom = bool vm.API.vM_platform false "qemu_stubdom"; + } + | Helpers.DirectPV { Helpers.kernel = k; kernel_args = ka; ramdisk = initrd } -> + let k = if is_boot_file_whitelisted k then k else begin + debug "kernel %s is not in the whitelist: ignoring" k; + "" + end in + let initrd = Opt.map (fun x -> + if is_boot_file_whitelisted x then x else begin + debug "initrd %s is not in the whitelist: ignoring" k; + "" + end + ) initrd in + PV { + boot = Direct { kernel = k; cmdline = ka; ramdisk = initrd }; + framebuffer = bool vm.API.vM_platform false "pvfb"; + framebuffer_ip = None; (* None PR-1255 *) + vncterm = begin match List.mem_assoc "disable_pv_vnc" vm.API.vM_other_config with + |true -> false + |false -> true + end; + vncterm_ip = None (*None PR-1255*); + } + | Helpers.IndirectPV { Helpers.bootloader = b; extra_args = e; legacy_args = l; pv_bootloader_args = p; vdis = vdis } -> + PV { + boot = Indirect { bootloader = b; extra_args = e; legacy_args = l; bootloader_args = p; devices = List.filter_map (fun x -> disk_of_vdi ~__context ~self:x) vdis }; + framebuffer = bool vm.API.vM_platform false "pvfb"; + framebuffer_ip = None; (* None PR-1255 *) + vncterm = begin match List.mem_assoc "disable_pv_vnc" vm.API.vM_other_config with + |true -> false + |false -> true + end; + vncterm_ip = None (*None PR-1255*); + } + +module MD = struct + (** Convert between xapi DB records and xenopsd records *) + + let of_vbd ~__context ~vm ~vbd = + let hvm = vm.API.vM_HVM_boot_policy <> "" in + let device_number = Device_number.of_string hvm vbd.API.vBD_userdevice in + let open Vbd in + let ty = vbd.API.vBD_qos_algorithm_type in + let params = vbd.API.vBD_qos_algorithm_params in + + let qos_class params = + if List.mem_assoc "class" params then + match List.assoc "class" params with + | "highest" -> Highest + | "high" -> High + | "normal" -> Normal + | "low" -> Low + | "lowest" -> Lowest + | s -> + try Other (int_of_string s) + with _ -> + warn "Unknown VBD QoS scheduler class (try 'high' 'low' 'normal')"; + Normal + else + Normal in + let qos_scheduler params = + try + match List.assoc "sched" params with + | "rt" | "real-time" -> RealTime (qos_class params) + | "idle" -> Idle + | "best-effort" -> BestEffort (qos_class params) + | _ -> + warn "Unknown VBD QoS scheduler (try 'real-time' 'idle' 'best-effort')"; + BestEffort (qos_class params) + with Not_found -> + BestEffort (qos_class params) in + let qos = function + | "ionice" -> Some (Ionice (qos_scheduler params)) + | "" -> None + | x -> + warn "Unknown VBD QoS type: %s (try 'ionice')" x; + None in + + let other_config_keys ?(default=None) key = + let oc = vbd.API.vBD_other_config in + let k = key in + try + let v = List.assoc k oc in + [(k, v)] + with Not_found -> match default with None->[] | Some x->[(k, x)] in - match Helpers.boot_method_of_vm ~__context ~vm with - | Helpers.HVM { Helpers.timeoffset = t } -> HVM { - hap = true; - shadow_multiplier = vm.API.vM_HVM_shadow_multiplier; - timeoffset = timeoffset; - video_mib = begin - (* For vGPU, make sure videoram is at least 16MiB. *) - let requested_videoram = int vm.API.vM_platform 4 "videoram" in - if video_mode = Vgpu - then max requested_videoram 16 - else requested_videoram - end; - video = video_mode; - acpi = bool vm.API.vM_platform true "acpi"; - serial = begin - (* The platform value should override the other_config value. If - * neither are set, use pty. *) - let key = "hvm_serial" in - let other_config_value = - try Some (List.assoc key vm.API.vM_other_config) - with Not_found -> None - in - let platform_value = - try Some (List.assoc key vm.API.vM_platform) - with Not_found -> None - in - match other_config_value, platform_value with - | None, None -> Some "pty" - | _, Some value -> Some value - | Some value, None -> Some value - end; - keymap = begin - try Some (List.assoc "keymap" vm.API.vM_platform) - with Not_found -> None - end; - vnc_ip = None (*None PR-1255*); - pci_emulations = pci_emulations; - pci_passthrough = pci_passthrough; - boot_order = string vm.API.vM_HVM_boot_params "cd" "order"; - qemu_disk_cmdline = bool vm.API.vM_platform false "qemu_disk_cmdline"; - qemu_stubdom = bool vm.API.vM_platform false "qemu_stubdom"; - } - | Helpers.DirectPV { Helpers.kernel = k; kernel_args = ka; ramdisk = initrd } -> - let k = if is_boot_file_whitelisted k then k else begin - debug "kernel %s is not in the whitelist: ignoring" k; - "" - end in - let initrd = Opt.map (fun x -> - if is_boot_file_whitelisted x then x else begin - debug "initrd %s is not in the whitelist: ignoring" k; - "" - end - ) initrd in - PV { - boot = Direct { kernel = k; cmdline = ka; ramdisk = initrd }; - framebuffer = bool vm.API.vM_platform false "pvfb"; - framebuffer_ip = None; (* None PR-1255 *) - vncterm = begin match List.mem_assoc "disable_pv_vnc" vm.API.vM_other_config with - |true -> false - |false -> true - end; - vncterm_ip = None (*None PR-1255*); - } - | Helpers.IndirectPV { Helpers.bootloader = b; extra_args = e; legacy_args = l; pv_bootloader_args = p; vdis = vdis } -> - PV { - boot = Indirect { bootloader = b; extra_args = e; legacy_args = l; bootloader_args = p; devices = List.filter_map (fun x -> disk_of_vdi ~__context ~self:x) vdis }; - framebuffer = bool vm.API.vM_platform false "pvfb"; - framebuffer_ip = None; (* None PR-1255 *) - vncterm = begin match List.mem_assoc "disable_pv_vnc" vm.API.vM_other_config with - |true -> false - |false -> true - end; - vncterm_ip = None (*None PR-1255*); - } + let in_range ~min ~max ~fallback values = + List.map (fun (k,v)-> k, + let value = try int_of_string v + with _-> + debug "%s: warning: value %s is not an integer. Using fallback value %d" k v fallback; + fallback + in + string_of_int ( + if value < min then min + else if value > max then max + else value + ) + ) + values + in -module MD = struct - (** Convert between xapi DB records and xenopsd records *) - - let of_vbd ~__context ~vm ~vbd = - let hvm = vm.API.vM_HVM_boot_policy <> "" in - let device_number = Device_number.of_string hvm vbd.API.vBD_userdevice in - let open Vbd in - let ty = vbd.API.vBD_qos_algorithm_type in - let params = vbd.API.vBD_qos_algorithm_params in - - let qos_class params = - if List.mem_assoc "class" params then - match List.assoc "class" params with - | "highest" -> Highest - | "high" -> High - | "normal" -> Normal - | "low" -> Low - | "lowest" -> Lowest - | s -> - try Other (int_of_string s) - with _ -> - warn "Unknown VBD QoS scheduler class (try 'high' 'low' 'normal')"; - Normal - else - Normal in - let qos_scheduler params = - try - match List.assoc "sched" params with - | "rt" | "real-time" -> RealTime (qos_class params) - | "idle" -> Idle - | "best-effort" -> BestEffort (qos_class params) - | _ -> - warn "Unknown VBD QoS scheduler (try 'real-time' 'idle' 'best-effort')"; - BestEffort (qos_class params) - with Not_found -> - BestEffort (qos_class params) in - let qos = function - | "ionice" -> Some (Ionice (qos_scheduler params)) - | "" -> None - | x -> - warn "Unknown VBD QoS type: %s (try 'ionice')" x; - None in - - let other_config_keys ?(default=None) key = - let oc = vbd.API.vBD_other_config in - let k = key in - try - let v = List.assoc k oc in - [(k, v)] - with Not_found -> match default with None->[] | Some x->[(k, x)] - in - - let in_range ~min ~max ~fallback values = - List.map (fun (k,v)-> k, - let value = try int_of_string v - with _-> - debug "%s: warning: value %s is not an integer. Using fallback value %d" k v fallback; - fallback - in - string_of_int ( - if value < min then min - else if value > max then max - else value - ) - ) - values - in - - let backend_kind_keys = other_config_keys Xapi_globs.vbd_backend_key in - let poll_duration_keys = in_range ~min:0 ~max:max_int - ~fallback:0 (* if user provides invalid integer, use 0 = disable polling *) - (other_config_keys Xapi_globs.vbd_polling_duration_key ~default:(Some (string_of_int !Xapi_globs.default_vbd3_polling_duration))) - in - let poll_idle_threshold_keys = in_range ~min:0 ~max:100 - ~fallback:50 (* if user provides invalid float, use 50 = default 50% *) - (other_config_keys Xapi_globs.vbd_polling_idle_threshold_key ~default:(Some (string_of_int !Xapi_globs.default_vbd3_polling_idle_threshold))) - in - - let backend_of_vbd vbd = - let vbd_oc = vbd.API.vBD_other_config in - if List.mem_assoc Xapi_globs.vbd_backend_local_key vbd_oc then - let path = List.assoc Xapi_globs.vbd_backend_local_key vbd_oc in - warn "Using local override for VBD backend: %s -> %s" vbd.API.vBD_uuid path; - Some (Local path) - else disk_of_vdi ~__context ~self:vbd.API.vBD_VDI - in - - { - id = (vm.API.vM_uuid, Device_number.to_linux_device device_number); - position = Some device_number; - mode = if vbd.API.vBD_mode = `RO then ReadOnly else ReadWrite; - backend = backend_of_vbd vbd; - ty = (match vbd.API.vBD_type with - | `Disk -> Disk - | `CD -> CDROM - | `Floppy -> Floppy); - unpluggable = vbd.API.vBD_unpluggable; - extra_backend_keys = backend_kind_keys @ poll_duration_keys @ poll_idle_threshold_keys; - extra_private_keys = []; - qos = qos ty; - persistent = (try Db.VDI.get_on_boot ~__context ~self:vbd.API.vBD_VDI = `persist with _ -> true); - } - - let of_vif ~__context ~vm ~vif = - let net = Db.Network.get_record ~__context ~self:vif.API.vIF_network in - let net_mtu = Int64.to_int (net.API.network_MTU) in - let mtu = - try - if List.mem_assoc "mtu" vif.API.vIF_other_config - then List.assoc "mtu" vif.API.vIF_other_config |> int_of_string - else net_mtu - with _ -> - error "Failed to parse VIF.other_config:mtu; defaulting to network.mtu"; - net_mtu in - let qos_type = vif.API.vIF_qos_algorithm_type in - let qos_params = vif.API.vIF_qos_algorithm_params in - let log_qos_failure reason = - warn "vif QoS failed: %s (vm=%s,vif=%s)" reason vm.API.vM_uuid vif.API.vIF_uuid in - let rate = match qos_type with - | "ratelimit" -> - let timeslice = - try Int64.of_string (List.assoc "timeslice_us" qos_params) - with _ -> 0L in - begin - try - let rate = Int64.of_string (List.assoc "kbps" qos_params) in - Some (rate, timeslice) - with - | Failure "int_of_string" -> - log_qos_failure "parameter \"kbps\" not an integer"; None - | Not_found -> - log_qos_failure "necessary parameter \"kbps\" not found"; None - | e -> - log_qos_failure (Printf.sprintf "unexpected error: %s" (Printexc.to_string e)); None - end - | "" -> None - | _ -> log_qos_failure (Printf.sprintf "unknown type: %s" qos_type); None in - let locking_mode = match vif.API.vIF_locking_mode, net.API.network_default_locking_mode with - | `network_default, `disabled -> Vif.Disabled - | `network_default, `unlocked -> Vif.Unlocked - | `locked, _ -> Vif.Locked { Vif.ipv4 = vif.API.vIF_ipv4_allowed; ipv6 = vif.API.vIF_ipv6_allowed } - | `unlocked, _ -> Vif.Unlocked - | `disabled, _ -> Vif.Disabled in - let carrier = - if !Xapi_globs.pass_through_pif_carrier then - (* We need to reflect the carrier of the local PIF on the network (if any) *) - let host = Helpers.get_localhost ~__context in - let pifs = Xapi_network_attach_helpers.get_local_pifs ~__context ~network:vif.API.vIF_network ~host in - match pifs with - | [] -> true (* Internal network; consider as "always up" *) - | pif :: _ -> - try - let metrics = Db.PIF.get_metrics ~__context ~self:pif in - Db.PIF_metrics.get_carrier ~__context ~self:metrics - with _ -> true - else - (* If we don't need to reflect anything, the carrier is set to "true" *) - true - in - let ipv4_configuration = - match vif.API.vIF_ipv4_configuration_mode with - | `None -> Vif.Unspecified4 - | `Static -> - let gateway = if vif.API.vIF_ipv4_gateway = "" then None else Some vif.API.vIF_ipv4_gateway in - Vif.Static4 (vif.API.vIF_ipv4_addresses, gateway) - in - let ipv6_configuration = - match vif.API.vIF_ipv6_configuration_mode with - | `None -> Vif.Unspecified6 - | `Static -> - let gateway = if vif.API.vIF_ipv6_gateway = "" then None else Some vif.API.vIF_ipv6_gateway in - Vif.Static6 (vif.API.vIF_ipv6_addresses, gateway) - in - let open Vif in { - id = (vm.API.vM_uuid, vif.API.vIF_device); - position = int_of_string vif.API.vIF_device; - mac = vif.API.vIF_MAC; - carrier = carrier; - mtu = mtu; - rate = rate; - backend = backend_of_network net; - other_config = vif.API.vIF_other_config; - locking_mode = locking_mode; - extra_private_keys = [ - "vif-uuid", vif.API.vIF_uuid; - "network-uuid", net.API.network_uuid; - ]; - ipv4_configuration = ipv4_configuration; - ipv6_configuration = ipv6_configuration - } - - let pcis_of_vm ~__context (vmref, vm) = - let vgpu_pcidevs = Vgpuops.list_pcis_for_passthrough ~__context ~vm:vmref in - let devs = List.flatten (List.map (fun (_, dev) -> dev) (Pciops.sort_pcidevs vgpu_pcidevs)) in - - (* The 'unmanaged' PCI devices are in the other_config key: *) - let other_pcidevs = Pciops.other_pcidevs_of_vm ~__context vm.API.vM_other_config in - - let unmanaged = List.flatten (List.map (fun (_, dev) -> dev) (Pciops.sort_pcidevs other_pcidevs)) in - - let devs = devs @ unmanaged in - - let open Pci in - List.map - (fun (idx, (domain, bus, dev, fn)) -> { - id = (vm.API.vM_uuid, Printf.sprintf "%04x:%02x:%02x.%01x" domain bus dev fn); - position = idx; - address = {domain; bus; dev; fn}; - msitranslate = None; - power_mgmt = None; - }) - (List.combine (Range.to_list (Range.make 0 (List.length devs))) devs) - - let get_target_pci_address ~__context vgpu = - let pgpu = - if Db.is_valid_ref __context - vgpu.Db_actions.vGPU_scheduled_to_be_resident_on - then vgpu.Db_actions.vGPU_scheduled_to_be_resident_on - else vgpu.Db_actions.vGPU_resident_on - in - let pci = Db.PGPU.get_PCI ~__context ~self:pgpu in - let pci_address = Db.PCI.get_pci_id ~__context ~self:pci in - Xenops_interface.Pci.address_of_string pci_address - - let of_nvidia_vgpu ~__context vm vgpu = - let open Vgpu in - (* Get the PCI address. *) - let physical_pci_address = get_target_pci_address ~__context vgpu in - (* Get the vGPU config. *) - let vgpu_type = vgpu.Db_actions.vGPU_type in - let internal_config = - Db.VGPU_type.get_internal_config ~__context ~self:vgpu_type in - let config_file = - try List.assoc Xapi_globs.vgpu_config_key internal_config - with Not_found -> failwith "NVIDIA vGPU config file not specified" - in - let config_file = - try - let extra_args = - List.assoc Xapi_globs.vgpu_extra_args_key vm.API.vM_platform in - Printf.sprintf "%s,%s" config_file extra_args - with Not_found -> config_file - in - let implementation = - Nvidia { - physical_pci_address; - config_file; - } - in { - id = (vm.API.vM_uuid, vgpu.Db_actions.vGPU_device); - position = int_of_string vgpu.Db_actions.vGPU_device; - implementation; - } - - let of_gvt_g_vgpu ~__context vm vgpu = - let open Vgpu in - (* Get the PCI address. *) - let physical_pci_address = get_target_pci_address ~__context vgpu in - (* Get the vGPU config. *) - let vgpu_type = vgpu.Db_actions.vGPU_type in - let internal_config = - Db.VGPU_type.get_internal_config ~__context ~self:vgpu_type in - try - let implementation = - GVT_g { - physical_pci_address; - low_gm_sz = - List.assoc Xapi_globs.vgt_low_gm_sz internal_config - |> Int64.of_string; - high_gm_sz = - List.assoc Xapi_globs.vgt_high_gm_sz internal_config - |> Int64.of_string; - fence_sz = - List.assoc Xapi_globs.vgt_fence_sz internal_config - |> Int64.of_string; - monitor_config_file = - if List.mem_assoc Xapi_globs.vgt_monitor_config_file internal_config - then Some - (List.assoc Xapi_globs.vgt_monitor_config_file internal_config) - else None; - } - in { - id = (vm.API.vM_uuid, vgpu.Db_actions.vGPU_device); - position = int_of_string vgpu.Db_actions.vGPU_device; - implementation; - } - with - | Not_found -> failwith "Intel GVT-g settings not specified" - | Failure "int_of_string" -> - failwith "Intel GVT-g settings invalid" - - let vgpus_of_vm ~__context (vmref, vm) = - let open Vgpu in - if Vgpuops.vgpu_manual_setup_of_vm vm - && (List.mem_assoc Vm_platform.vgpu_pci_id vm.API.vM_platform) - && (List.mem_assoc Vm_platform.vgpu_config vm.API.vM_platform) - then begin - (* We're using the vGPU manual setup mode, so get the vGPU configuration - * from the VM platform keys. *) - let implementation = - Nvidia { - physical_pci_address = - Xenops_interface.Pci.address_of_string - (List.assoc Vm_platform.vgpu_pci_id vm.API.vM_platform); - config_file = List.assoc Vm_platform.vgpu_config vm.API.vM_platform; - } - in [{ - id = (vm.API.vM_uuid, "0"); - position = 0; - implementation; - }] - end else - List.fold_left - (fun acc vgpu -> - let vgpu_record = Db.VGPU.get_record_internal ~__context ~self:vgpu in - let implementation = - Db.VGPU_type.get_implementation ~__context - ~self:vgpu_record.Db_actions.vGPU_type - in - match implementation with - (* Passthrough VGPUs are dealt with in pcis_of_vm. *) - | `passthrough -> acc - | `nvidia -> - (of_nvidia_vgpu ~__context vm vgpu_record) :: acc - | `gvt_g -> - (of_gvt_g_vgpu ~__context vm vgpu_record) :: acc) - [] vm.API.vM_VGPUs - - let of_vm ~__context (vmref, vm) vbds pci_passthrough vgpu = - let on_crash_behaviour = function - | `preserve -> [ Vm.Pause ] - | `coredump_and_restart -> [ Vm.Coredump; Vm.Start ] - | `coredump_and_destroy -> [ Vm.Coredump; Vm.Shutdown ] - | `restart - | `rename_restart -> [ Vm.Start ] - | `destroy -> [ Vm.Shutdown ] in - let on_normal_exit_behaviour = function - | `restart -> [ Vm.Start ] - | `destroy -> [ Vm.Shutdown ] in - let open Vm in - let scheduler_params = - (* vcpu <-> pcpu affinity settings are stored here. - Format is either: - 1,2,3 :: all vCPUs receive this mask - 1,2,3; 4,5,6 :: vCPU n receives mask n. Unlisted vCPUs - receive first mask *) - let affinity = - try - List.map - (fun x -> List.map int_of_string (String.split ',' x)) - (String.split ';' (List.assoc "mask" vm.API.vM_VCPUs_params)) - with _ -> [] in - let localhost = Helpers.get_localhost ~__context in - let host_guest_VCPUs_params = Db.Host.get_guest_VCPUs_params ~__context ~self:localhost in - let host_cpu_mask = - try - List.map int_of_string (String.split ',' (List.assoc "mask" host_guest_VCPUs_params)) - with _ -> [] in - let affinity = - match affinity,host_cpu_mask with - | [],[] -> [] - | [],h -> [h] - | v,[] -> v - | affinity,mask -> - List.map - (fun vcpu_affinity -> - List.filter (fun x -> List.mem x mask) vcpu_affinity) affinity in - let priority = - let weight = - let default=256 in - try - let weight = List.assoc "weight" vm.API.vM_VCPUs_params in - int_of_string weight - with - | Not_found -> default - | e -> error "%s" (Printexc.to_string e); - debug "Could not parse weight value. Setting it to default value %d." default; default in - let cap = - let default=0 in - try - let cap = List.assoc "cap" vm.API.vM_VCPUs_params in - int_of_string cap - with - | Not_found -> default - | e -> error "%s" (Printexc.to_string e); - debug "Could not parse cap value. Setting it to default value %d." default; default in - Some ( weight , cap ) in - { priority = priority; affinity = affinity } in - - let platformdata = - Vm_platform.sanity_check - ~platformdata:vm.API.vM_platform - ~vcpu_max:vm.API.vM_VCPUs_max - ~vcpu_at_startup:vm.API.vM_VCPUs_at_startup - ~hvm:(Helpers.will_boot_hvm ~__context ~self:vmref) - ~filter_out_unknowns: - (not(Pool_features.is_enabled ~__context Features.No_platform_filter)) - in - (* Replace the timeoffset in the platform data too, to avoid confusion *) - let timeoffset = rtc_timeoffset_of_vm ~__context (vmref, vm) vbds in - let platformdata = - (Vm_platform.timeoffset, timeoffset) :: - (List.filter (fun (key, _) -> key <> Vm_platform.timeoffset) platformdata) in - let platformdata = - let genid = match vm.API.vM_generation_id with - | "0:0" -> Xapi_vm_helpers.vm_fresh_genid ~__context ~self:vmref - | _ -> vm.API.vM_generation_id in - (Vm_platform.generation_id, genid) :: platformdata - in - (* Add the CPUID feature set for the VM to the platform data. *) - let platformdata = - if not (List.mem_assoc Vm_platform.featureset platformdata) then - let featureset = - if List.mem_assoc Xapi_globs.cpu_info_features_key vm.API.vM_last_boot_CPU_flags then - List.assoc Xapi_globs.cpu_info_features_key vm.API.vM_last_boot_CPU_flags - else - failwith "VM's CPU featureset not initialised" - in - (Vm_platform.featureset, featureset) :: platformdata - else - platformdata - in - - let pci_msitranslate = true in (* default setting *) - (* CA-55754: allow VM.other_config:msitranslate to override the bus-wide setting *) - let pci_msitranslate = - if List.mem_assoc "msitranslate" vm.API.vM_other_config - then List.assoc "msitranslate" vm.API.vM_other_config = "1" - else pci_msitranslate in - (* CA-55754: temporarily disable msitranslate when GPU is passed through. *) - let pci_msitranslate = - if vm.API.vM_VGPUs <> [] then false else pci_msitranslate in - { - id = vm.API.vM_uuid; - name = vm.API.vM_name_label; - ssidref = 0l; - xsdata = vm.API.vM_xenstore_data; - platformdata = platformdata; - bios_strings = vm.API.vM_bios_strings; - ty = builder_of_vm ~__context (vmref, vm) timeoffset pci_passthrough vgpu; - suppress_spurious_page_faults = (try List.assoc "suppress-spurious-page-faults" vm.API.vM_other_config = "true" with _ -> false); - machine_address_size = (try Some(int_of_string (List.assoc "machine-address-size" vm.API.vM_other_config)) with _ -> None); - memory_static_max = vm.API.vM_memory_static_max; - memory_dynamic_max = vm.API.vM_memory_dynamic_max; - memory_dynamic_min = vm.API.vM_memory_dynamic_min; - vcpu_max = Int64.to_int vm.API.vM_VCPUs_max; - vcpus = Int64.to_int vm.API.vM_VCPUs_at_startup; - scheduler_params = scheduler_params; - on_crash = on_crash_behaviour vm.API.vM_actions_after_crash; - on_shutdown = on_normal_exit_behaviour vm.API.vM_actions_after_shutdown; - on_reboot = on_normal_exit_behaviour vm.API.vM_actions_after_reboot; - pci_msitranslate = pci_msitranslate; - pci_power_mgmt = false; - has_vendor_device = vm.API.vM_has_vendor_device - } + let backend_kind_keys = other_config_keys Xapi_globs.vbd_backend_key in + let poll_duration_keys = in_range ~min:0 ~max:max_int + ~fallback:0 (* if user provides invalid integer, use 0 = disable polling *) + (other_config_keys Xapi_globs.vbd_polling_duration_key ~default:(Some (string_of_int !Xapi_globs.default_vbd3_polling_duration))) + in + let poll_idle_threshold_keys = in_range ~min:0 ~max:100 + ~fallback:50 (* if user provides invalid float, use 50 = default 50% *) + (other_config_keys Xapi_globs.vbd_polling_idle_threshold_key ~default:(Some (string_of_int !Xapi_globs.default_vbd3_polling_idle_threshold))) + in + + let backend_of_vbd vbd = + let vbd_oc = vbd.API.vBD_other_config in + if List.mem_assoc Xapi_globs.vbd_backend_local_key vbd_oc then + let path = List.assoc Xapi_globs.vbd_backend_local_key vbd_oc in + warn "Using local override for VBD backend: %s -> %s" vbd.API.vBD_uuid path; + Some (Local path) + else disk_of_vdi ~__context ~self:vbd.API.vBD_VDI + in + + { + id = (vm.API.vM_uuid, Device_number.to_linux_device device_number); + position = Some device_number; + mode = if vbd.API.vBD_mode = `RO then ReadOnly else ReadWrite; + backend = backend_of_vbd vbd; + ty = (match vbd.API.vBD_type with + | `Disk -> Disk + | `CD -> CDROM + | `Floppy -> Floppy); + unpluggable = vbd.API.vBD_unpluggable; + extra_backend_keys = backend_kind_keys @ poll_duration_keys @ poll_idle_threshold_keys; + extra_private_keys = []; + qos = qos ty; + persistent = (try Db.VDI.get_on_boot ~__context ~self:vbd.API.vBD_VDI = `persist with _ -> true); + } + + let of_vif ~__context ~vm ~vif = + let net = Db.Network.get_record ~__context ~self:vif.API.vIF_network in + let net_mtu = Int64.to_int (net.API.network_MTU) in + let mtu = + try + if List.mem_assoc "mtu" vif.API.vIF_other_config + then List.assoc "mtu" vif.API.vIF_other_config |> int_of_string + else net_mtu + with _ -> + error "Failed to parse VIF.other_config:mtu; defaulting to network.mtu"; + net_mtu in + let qos_type = vif.API.vIF_qos_algorithm_type in + let qos_params = vif.API.vIF_qos_algorithm_params in + let log_qos_failure reason = + warn "vif QoS failed: %s (vm=%s,vif=%s)" reason vm.API.vM_uuid vif.API.vIF_uuid in + let rate = match qos_type with + | "ratelimit" -> + let timeslice = + try Int64.of_string (List.assoc "timeslice_us" qos_params) + with _ -> 0L in + begin + try + let rate = Int64.of_string (List.assoc "kbps" qos_params) in + Some (rate, timeslice) + with + | Failure "int_of_string" -> + log_qos_failure "parameter \"kbps\" not an integer"; None + | Not_found -> + log_qos_failure "necessary parameter \"kbps\" not found"; None + | e -> + log_qos_failure (Printf.sprintf "unexpected error: %s" (Printexc.to_string e)); None + end + | "" -> None + | _ -> log_qos_failure (Printf.sprintf "unknown type: %s" qos_type); None in + let locking_mode = match vif.API.vIF_locking_mode, net.API.network_default_locking_mode with + | `network_default, `disabled -> Vif.Disabled + | `network_default, `unlocked -> Vif.Unlocked + | `locked, _ -> Vif.Locked { Vif.ipv4 = vif.API.vIF_ipv4_allowed; ipv6 = vif.API.vIF_ipv6_allowed } + | `unlocked, _ -> Vif.Unlocked + | `disabled, _ -> Vif.Disabled in + let carrier = + if !Xapi_globs.pass_through_pif_carrier then + (* We need to reflect the carrier of the local PIF on the network (if any) *) + let host = Helpers.get_localhost ~__context in + let pifs = Xapi_network_attach_helpers.get_local_pifs ~__context ~network:vif.API.vIF_network ~host in + match pifs with + | [] -> true (* Internal network; consider as "always up" *) + | pif :: _ -> + try + let metrics = Db.PIF.get_metrics ~__context ~self:pif in + Db.PIF_metrics.get_carrier ~__context ~self:metrics + with _ -> true + else + (* If we don't need to reflect anything, the carrier is set to "true" *) + true + in + let ipv4_configuration = + match vif.API.vIF_ipv4_configuration_mode with + | `None -> Vif.Unspecified4 + | `Static -> + let gateway = if vif.API.vIF_ipv4_gateway = "" then None else Some vif.API.vIF_ipv4_gateway in + Vif.Static4 (vif.API.vIF_ipv4_addresses, gateway) + in + let ipv6_configuration = + match vif.API.vIF_ipv6_configuration_mode with + | `None -> Vif.Unspecified6 + | `Static -> + let gateway = if vif.API.vIF_ipv6_gateway = "" then None else Some vif.API.vIF_ipv6_gateway in + Vif.Static6 (vif.API.vIF_ipv6_addresses, gateway) + in + let open Vif in { + id = (vm.API.vM_uuid, vif.API.vIF_device); + position = int_of_string vif.API.vIF_device; + mac = vif.API.vIF_MAC; + carrier = carrier; + mtu = mtu; + rate = rate; + backend = backend_of_network net; + other_config = vif.API.vIF_other_config; + locking_mode = locking_mode; + extra_private_keys = [ + "vif-uuid", vif.API.vIF_uuid; + "network-uuid", net.API.network_uuid; + ]; + ipv4_configuration = ipv4_configuration; + ipv6_configuration = ipv6_configuration + } + + let pcis_of_vm ~__context (vmref, vm) = + let vgpu_pcidevs = Vgpuops.list_pcis_for_passthrough ~__context ~vm:vmref in + let devs = List.flatten (List.map (fun (_, dev) -> dev) (Pciops.sort_pcidevs vgpu_pcidevs)) in + + (* The 'unmanaged' PCI devices are in the other_config key: *) + let other_pcidevs = Pciops.other_pcidevs_of_vm ~__context vm.API.vM_other_config in + + let unmanaged = List.flatten (List.map (fun (_, dev) -> dev) (Pciops.sort_pcidevs other_pcidevs)) in + + let devs = devs @ unmanaged in + + let open Pci in + List.map + (fun (idx, (domain, bus, dev, fn)) -> { + id = (vm.API.vM_uuid, Printf.sprintf "%04x:%02x:%02x.%01x" domain bus dev fn); + position = idx; + address = {domain; bus; dev; fn}; + msitranslate = None; + power_mgmt = None; + }) + (List.combine (Range.to_list (Range.make 0 (List.length devs))) devs) + + let get_target_pci_address ~__context vgpu = + let pgpu = + if Db.is_valid_ref __context + vgpu.Db_actions.vGPU_scheduled_to_be_resident_on + then vgpu.Db_actions.vGPU_scheduled_to_be_resident_on + else vgpu.Db_actions.vGPU_resident_on + in + let pci = Db.PGPU.get_PCI ~__context ~self:pgpu in + let pci_address = Db.PCI.get_pci_id ~__context ~self:pci in + Xenops_interface.Pci.address_of_string pci_address + + let of_nvidia_vgpu ~__context vm vgpu = + let open Vgpu in + (* Get the PCI address. *) + let physical_pci_address = get_target_pci_address ~__context vgpu in + (* Get the vGPU config. *) + let vgpu_type = vgpu.Db_actions.vGPU_type in + let internal_config = + Db.VGPU_type.get_internal_config ~__context ~self:vgpu_type in + let config_file = + try List.assoc Xapi_globs.vgpu_config_key internal_config + with Not_found -> failwith "NVIDIA vGPU config file not specified" + in + let config_file = + try + let extra_args = + List.assoc Xapi_globs.vgpu_extra_args_key vm.API.vM_platform in + Printf.sprintf "%s,%s" config_file extra_args + with Not_found -> config_file + in + let implementation = + Nvidia { + physical_pci_address; + config_file; + } + in { + id = (vm.API.vM_uuid, vgpu.Db_actions.vGPU_device); + position = int_of_string vgpu.Db_actions.vGPU_device; + implementation; + } + + let of_gvt_g_vgpu ~__context vm vgpu = + let open Vgpu in + (* Get the PCI address. *) + let physical_pci_address = get_target_pci_address ~__context vgpu in + (* Get the vGPU config. *) + let vgpu_type = vgpu.Db_actions.vGPU_type in + let internal_config = + Db.VGPU_type.get_internal_config ~__context ~self:vgpu_type in + try + let implementation = + GVT_g { + physical_pci_address; + low_gm_sz = + List.assoc Xapi_globs.vgt_low_gm_sz internal_config + |> Int64.of_string; + high_gm_sz = + List.assoc Xapi_globs.vgt_high_gm_sz internal_config + |> Int64.of_string; + fence_sz = + List.assoc Xapi_globs.vgt_fence_sz internal_config + |> Int64.of_string; + monitor_config_file = + if List.mem_assoc Xapi_globs.vgt_monitor_config_file internal_config + then Some + (List.assoc Xapi_globs.vgt_monitor_config_file internal_config) + else None; + } + in { + id = (vm.API.vM_uuid, vgpu.Db_actions.vGPU_device); + position = int_of_string vgpu.Db_actions.vGPU_device; + implementation; + } + with + | Not_found -> failwith "Intel GVT-g settings not specified" + | Failure "int_of_string" -> + failwith "Intel GVT-g settings invalid" + + let vgpus_of_vm ~__context (vmref, vm) = + let open Vgpu in + if Vgpuops.vgpu_manual_setup_of_vm vm + && (List.mem_assoc Vm_platform.vgpu_pci_id vm.API.vM_platform) + && (List.mem_assoc Vm_platform.vgpu_config vm.API.vM_platform) + then begin + (* We're using the vGPU manual setup mode, so get the vGPU configuration + * from the VM platform keys. *) + let implementation = + Nvidia { + physical_pci_address = + Xenops_interface.Pci.address_of_string + (List.assoc Vm_platform.vgpu_pci_id vm.API.vM_platform); + config_file = List.assoc Vm_platform.vgpu_config vm.API.vM_platform; + } + in [{ + id = (vm.API.vM_uuid, "0"); + position = 0; + implementation; + }] + end else + List.fold_left + (fun acc vgpu -> + let vgpu_record = Db.VGPU.get_record_internal ~__context ~self:vgpu in + let implementation = + Db.VGPU_type.get_implementation ~__context + ~self:vgpu_record.Db_actions.vGPU_type + in + match implementation with + (* Passthrough VGPUs are dealt with in pcis_of_vm. *) + | `passthrough -> acc + | `nvidia -> + (of_nvidia_vgpu ~__context vm vgpu_record) :: acc + | `gvt_g -> + (of_gvt_g_vgpu ~__context vm vgpu_record) :: acc) + [] vm.API.vM_VGPUs + + let of_vm ~__context (vmref, vm) vbds pci_passthrough vgpu = + let on_crash_behaviour = function + | `preserve -> [ Vm.Pause ] + | `coredump_and_restart -> [ Vm.Coredump; Vm.Start ] + | `coredump_and_destroy -> [ Vm.Coredump; Vm.Shutdown ] + | `restart + | `rename_restart -> [ Vm.Start ] + | `destroy -> [ Vm.Shutdown ] in + let on_normal_exit_behaviour = function + | `restart -> [ Vm.Start ] + | `destroy -> [ Vm.Shutdown ] in + let open Vm in + let scheduler_params = + (* vcpu <-> pcpu affinity settings are stored here. + Format is either: + 1,2,3 :: all vCPUs receive this mask + 1,2,3; 4,5,6 :: vCPU n receives mask n. Unlisted vCPUs + receive first mask *) + let affinity = + try + List.map + (fun x -> List.map int_of_string (String.split ',' x)) + (String.split ';' (List.assoc "mask" vm.API.vM_VCPUs_params)) + with _ -> [] in + let localhost = Helpers.get_localhost ~__context in + let host_guest_VCPUs_params = Db.Host.get_guest_VCPUs_params ~__context ~self:localhost in + let host_cpu_mask = + try + List.map int_of_string (String.split ',' (List.assoc "mask" host_guest_VCPUs_params)) + with _ -> [] in + let affinity = + match affinity,host_cpu_mask with + | [],[] -> [] + | [],h -> [h] + | v,[] -> v + | affinity,mask -> + List.map + (fun vcpu_affinity -> + List.filter (fun x -> List.mem x mask) vcpu_affinity) affinity in + let priority = + let weight = + let default=256 in + try + let weight = List.assoc "weight" vm.API.vM_VCPUs_params in + int_of_string weight + with + | Not_found -> default + | e -> error "%s" (Printexc.to_string e); + debug "Could not parse weight value. Setting it to default value %d." default; default in + let cap = + let default=0 in + try + let cap = List.assoc "cap" vm.API.vM_VCPUs_params in + int_of_string cap + with + | Not_found -> default + | e -> error "%s" (Printexc.to_string e); + debug "Could not parse cap value. Setting it to default value %d." default; default in + Some ( weight , cap ) in + { priority = priority; affinity = affinity } in + + let platformdata = + Vm_platform.sanity_check + ~platformdata:vm.API.vM_platform + ~vcpu_max:vm.API.vM_VCPUs_max + ~vcpu_at_startup:vm.API.vM_VCPUs_at_startup + ~hvm:(Helpers.will_boot_hvm ~__context ~self:vmref) + ~filter_out_unknowns: + (not(Pool_features.is_enabled ~__context Features.No_platform_filter)) + in + (* Replace the timeoffset in the platform data too, to avoid confusion *) + let timeoffset = rtc_timeoffset_of_vm ~__context (vmref, vm) vbds in + let platformdata = + (Vm_platform.timeoffset, timeoffset) :: + (List.filter (fun (key, _) -> key <> Vm_platform.timeoffset) platformdata) in + let platformdata = + let genid = match vm.API.vM_generation_id with + | "0:0" -> Xapi_vm_helpers.vm_fresh_genid ~__context ~self:vmref + | _ -> vm.API.vM_generation_id in + (Vm_platform.generation_id, genid) :: platformdata + in + (* Add the CPUID feature set for the VM to the platform data. *) + let platformdata = + if not (List.mem_assoc Vm_platform.featureset platformdata) then + let featureset = + if List.mem_assoc Xapi_globs.cpu_info_features_key vm.API.vM_last_boot_CPU_flags then + List.assoc Xapi_globs.cpu_info_features_key vm.API.vM_last_boot_CPU_flags + else + failwith "VM's CPU featureset not initialised" + in + (Vm_platform.featureset, featureset) :: platformdata + else + platformdata + in + + let pci_msitranslate = true in (* default setting *) + (* CA-55754: allow VM.other_config:msitranslate to override the bus-wide setting *) + let pci_msitranslate = + if List.mem_assoc "msitranslate" vm.API.vM_other_config + then List.assoc "msitranslate" vm.API.vM_other_config = "1" + else pci_msitranslate in + (* CA-55754: temporarily disable msitranslate when GPU is passed through. *) + let pci_msitranslate = + if vm.API.vM_VGPUs <> [] then false else pci_msitranslate in + { + id = vm.API.vM_uuid; + name = vm.API.vM_name_label; + ssidref = 0l; + xsdata = vm.API.vM_xenstore_data; + platformdata = platformdata; + bios_strings = vm.API.vM_bios_strings; + ty = builder_of_vm ~__context (vmref, vm) timeoffset pci_passthrough vgpu; + suppress_spurious_page_faults = (try List.assoc "suppress-spurious-page-faults" vm.API.vM_other_config = "true" with _ -> false); + machine_address_size = (try Some(int_of_string (List.assoc "machine-address-size" vm.API.vM_other_config)) with _ -> None); + memory_static_max = vm.API.vM_memory_static_max; + memory_dynamic_max = vm.API.vM_memory_dynamic_max; + memory_dynamic_min = vm.API.vM_memory_dynamic_min; + vcpu_max = Int64.to_int vm.API.vM_VCPUs_max; + vcpus = Int64.to_int vm.API.vM_VCPUs_at_startup; + scheduler_params = scheduler_params; + on_crash = on_crash_behaviour vm.API.vM_actions_after_crash; + on_shutdown = on_normal_exit_behaviour vm.API.vM_actions_after_shutdown; + on_reboot = on_normal_exit_behaviour vm.API.vM_actions_after_reboot; + pci_msitranslate = pci_msitranslate; + pci_power_mgmt = false; + has_vendor_device = vm.API.vM_has_vendor_device + } end @@ -739,383 +739,383 @@ open Xenops_interface open Fun module Guest_agent_features = struct - module Xapi = struct - let auto_update_enabled = "auto_update_enabled" - let auto_update_url = "auto_update_url" - end - - module Xenopsd = struct - let auto_update_enabled = "enabled" - let auto_update_url = "update_url" - - let enabled = "1" - let disabled = "0" - end - - let auto_update_parameters_of_config config = - let auto_update_enabled = - match - if List.mem_assoc Xapi.auto_update_enabled config - then Some - (* bool_of_string should be safe as the setter in xapi_pool.ml only - * allows "true" or "false" to be put into the database. *) - (bool_of_string (List.assoc Xapi.auto_update_enabled config)) - else None - with - | Some true -> [Xenopsd.auto_update_enabled, Xenopsd.enabled] - | Some false -> [Xenopsd.auto_update_enabled, Xenopsd.disabled] - | None -> [] - in - let auto_update_url = - if List.mem_assoc Xapi.auto_update_url config - then [Xenopsd.auto_update_url, List.assoc Xapi.auto_update_url config] - else [] - in - auto_update_enabled @ auto_update_url - - let of_config ~__context config = - let open Features in - let vss = - let name = Features.name_of_feature VSS in - let licensed = Pool_features.is_enabled ~__context VSS in - let parameters = [] in - Host.({ - name; - licensed; - parameters; - }) - in - let guest_agent_auto_update = - let name = Features.name_of_feature Guest_agent_auto_update in - let licensed = - Pool_features.is_enabled ~__context Guest_agent_auto_update in - let parameters = auto_update_parameters_of_config config in - Host.({ - name; - licensed; - parameters; - }) - in - [vss; guest_agent_auto_update] + module Xapi = struct + let auto_update_enabled = "auto_update_enabled" + let auto_update_url = "auto_update_url" + end + + module Xenopsd = struct + let auto_update_enabled = "enabled" + let auto_update_url = "update_url" + + let enabled = "1" + let disabled = "0" + end + + let auto_update_parameters_of_config config = + let auto_update_enabled = + match + if List.mem_assoc Xapi.auto_update_enabled config + then Some + (* bool_of_string should be safe as the setter in xapi_pool.ml only + * allows "true" or "false" to be put into the database. *) + (bool_of_string (List.assoc Xapi.auto_update_enabled config)) + else None + with + | Some true -> [Xenopsd.auto_update_enabled, Xenopsd.enabled] + | Some false -> [Xenopsd.auto_update_enabled, Xenopsd.disabled] + | None -> [] + in + let auto_update_url = + if List.mem_assoc Xapi.auto_update_url config + then [Xenopsd.auto_update_url, List.assoc Xapi.auto_update_url config] + else [] + in + auto_update_enabled @ auto_update_url + + let of_config ~__context config = + let open Features in + let vss = + let name = Features.name_of_feature VSS in + let licensed = Pool_features.is_enabled ~__context VSS in + let parameters = [] in + Host.({ + name; + licensed; + parameters; + }) + in + let guest_agent_auto_update = + let name = Features.name_of_feature Guest_agent_auto_update in + let licensed = + Pool_features.is_enabled ~__context Guest_agent_auto_update in + let parameters = auto_update_parameters_of_config config in + Host.({ + name; + licensed; + parameters; + }) + in + [vss; guest_agent_auto_update] end let apply_guest_agent_config ~__context config = - let dbg = Context.string_of_task __context in - let features = Guest_agent_features.of_config ~__context config in - let module Client = (val make_client (default_xenopsd ()): XENOPS) in - Client.HOST.update_guest_agent_features dbg features + let dbg = Context.string_of_task __context in + let features = Guest_agent_features.of_config ~__context config in + let module Client = (val make_client (default_xenopsd ()): XENOPS) in + Client.HOST.update_guest_agent_features dbg features (* If a VM was suspended pre-xenopsd it won't have a last_booted_record of the format understood by xenopsd. *) (* If we can parse the last_booted_record according to the old syntax, update it before attempting to resume. *) let generate_xenops_state ~__context ~self ~vm ~vbds ~pcis ~vgpus = - try - let vm_to_resume = { - (Helpers.parse_boot_record vm.API.vM_last_booted_record) with - API.vM_VBDs = vm.API.vM_VBDs - } in - debug "Successfully parsed old last_booted_record format - translating to new format so that xenopsd can resume the VM."; - let module Client = (val make_client (queue_of_vmr vm): XENOPS) in - let vm = MD.of_vm ~__context - (self, vm_to_resume) vbds (pcis <> []) (vgpus <> []) - in - let dbg = Context.string_of_task __context in - Client.VM.generate_state_string dbg vm - with Xml.Error _ -> - debug "last_booted_record is not of the old format, so we should be able to resume the VM."; - vm.API.vM_last_booted_record + try + let vm_to_resume = { + (Helpers.parse_boot_record vm.API.vM_last_booted_record) with + API.vM_VBDs = vm.API.vM_VBDs + } in + debug "Successfully parsed old last_booted_record format - translating to new format so that xenopsd can resume the VM."; + let module Client = (val make_client (queue_of_vmr vm): XENOPS) in + let vm = MD.of_vm ~__context + (self, vm_to_resume) vbds (pcis <> []) (vgpus <> []) + in + let dbg = Context.string_of_task __context in + Client.VM.generate_state_string dbg vm + with Xml.Error _ -> + debug "last_booted_record is not of the old format, so we should be able to resume the VM."; + vm.API.vM_last_booted_record (* Create an instance of Metadata.t, suitable for uploading to the xenops service *) let create_metadata ~__context ~upgrade ~self = - let vm = Db.VM.get_record ~__context ~self in - let vbds = List.filter (fun vbd -> vbd.API.vBD_currently_attached) - (List.map (fun self -> Db.VBD.get_record ~__context ~self) vm.API.vM_VBDs) in - let vbds' = List.map (fun vbd -> MD.of_vbd ~__context ~vm ~vbd) vbds in - let vifs = List.filter (fun vif -> vif.API.vIF_currently_attached) - (List.map (fun self -> Db.VIF.get_record ~__context ~self) vm.API.vM_VIFs) in - let vifs' = List.map (fun vif -> MD.of_vif ~__context ~vm ~vif) vifs in - let pcis = MD.pcis_of_vm ~__context (self, vm) in - let vgpus = MD.vgpus_of_vm ~__context (self, vm) in - let domains = - (* For suspended VMs, we may need to translate the last_booted_record from the old format. *) - if vm.API.vM_power_state = `Suspended || upgrade then begin - (* We need to recall the currently_attached devices *) - Some(generate_xenops_state ~__context ~self ~vm ~vbds ~pcis ~vgpus) - end else None in - let open Metadata in { - vm = MD.of_vm ~__context (self, vm) vbds (pcis <> []) (vgpus <> []); - vbds = vbds'; - vifs = vifs'; - pcis = pcis; - vgpus = vgpus; - domains = domains - } + let vm = Db.VM.get_record ~__context ~self in + let vbds = List.filter (fun vbd -> vbd.API.vBD_currently_attached) + (List.map (fun self -> Db.VBD.get_record ~__context ~self) vm.API.vM_VBDs) in + let vbds' = List.map (fun vbd -> MD.of_vbd ~__context ~vm ~vbd) vbds in + let vifs = List.filter (fun vif -> vif.API.vIF_currently_attached) + (List.map (fun self -> Db.VIF.get_record ~__context ~self) vm.API.vM_VIFs) in + let vifs' = List.map (fun vif -> MD.of_vif ~__context ~vm ~vif) vifs in + let pcis = MD.pcis_of_vm ~__context (self, vm) in + let vgpus = MD.vgpus_of_vm ~__context (self, vm) in + let domains = + (* For suspended VMs, we may need to translate the last_booted_record from the old format. *) + if vm.API.vM_power_state = `Suspended || upgrade then begin + (* We need to recall the currently_attached devices *) + Some(generate_xenops_state ~__context ~self ~vm ~vbds ~pcis ~vgpus) + end else None in + let open Metadata in { + vm = MD.of_vm ~__context (self, vm) vbds (pcis <> []) (vgpus <> []); + vbds = vbds'; + vifs = vifs'; + pcis = pcis; + vgpus = vgpus; + domains = domains + } let id_of_vm ~__context ~self = Db.VM.get_uuid ~__context ~self let vm_of_id ~__context uuid = Db.VM.get_by_uuid ~__context ~uuid let vm_exists_in_xenopsd queue_name dbg id = - let module Client = (val make_client queue_name : XENOPS) in - Client.VM.exists dbg id + let module Client = (val make_client queue_name : XENOPS) in + Client.VM.exists dbg id let string_of_exn = function - | Api_errors.Server_error(code, params) -> Printf.sprintf "%s [ %s ]" code (String.concat "; " params) - | e -> Printexc.to_string e + | Api_errors.Server_error(code, params) -> Printf.sprintf "%s [ %s ]" code (String.concat "; " params) + | e -> Printexc.to_string e (* Serialise updates to the metadata caches *) let metadata_m = Mutex.create () module Xapi_cache = struct - (** Keep a cache of the "xenops-translation" of XenAPI VM configuration, - updated whenever we receive an event from xapi. *) + (** Keep a cache of the "xenops-translation" of XenAPI VM configuration, + updated whenever we receive an event from xapi. *) - let cache = Hashtbl.create 10 (* indexed by Vm.id *) + let cache = Hashtbl.create 10 (* indexed by Vm.id *) - let _register_nolock id initial_value = - debug "xapi_cache: creating cache for %s" id; - if not(Hashtbl.mem cache id) || (Hashtbl.find cache id = None) - then Hashtbl.replace cache id initial_value + let _register_nolock id initial_value = + debug "xapi_cache: creating cache for %s" id; + if not(Hashtbl.mem cache id) || (Hashtbl.find cache id = None) + then Hashtbl.replace cache id initial_value - let _unregister_nolock id = - debug "xapi_cache: deleting cache for %s" id; - Hashtbl.remove cache id + let _unregister_nolock id = + debug "xapi_cache: deleting cache for %s" id; + Hashtbl.remove cache id - let find_nolock id = - if Hashtbl.mem cache id - then Hashtbl.find cache id - else None + let find_nolock id = + if Hashtbl.mem cache id + then Hashtbl.find cache id + else None - let update_nolock id t = - if Hashtbl.mem cache id then begin - debug "xapi_cache: updating cache for %s" id; - Hashtbl.replace cache id t - end else debug "xapi_cache: not updating cache for %s" id + let update_nolock id t = + if Hashtbl.mem cache id then begin + debug "xapi_cache: updating cache for %s" id; + Hashtbl.replace cache id t + end else debug "xapi_cache: not updating cache for %s" id - let list_nolock () = Hashtbl.fold (fun id _ acc -> id :: acc) cache [] + let list_nolock () = Hashtbl.fold (fun id _ acc -> id :: acc) cache [] end module Xenops_cache = struct - (** Remember the last events received from xenopsd so we can compute - field-level differences. This allows us to minimise the number of - database writes we issue upwards. *) - - type t = { - vm: Vm.state option; - vbds: (Vbd.id * Vbd.state) list; - vifs: (Vif.id * Vif.state) list; - pcis: (Pci.id * Pci.state) list; - vgpus: (Vgpu.id * Vgpu.state) list; - } - let empty = { - vm = None; - vbds = []; - vifs = []; - pcis = []; - vgpus = []; - } - - let cache = Hashtbl.create 10 (* indexed by Vm.id *) - - let _register_nolock id = - debug "xenops_cache: creating empty cache for %s" id; - Hashtbl.replace cache id empty - - let _unregister_nolock id = - debug "xenops_cache: deleting cache for %s" id; - Hashtbl.remove cache id - - let find id : t option = - Mutex.execute metadata_m - (fun () -> - if Hashtbl.mem cache id - then Some (Hashtbl.find cache id) - else None - ) - - let find_vm id : Vm.state option = - match find id with - | Some { vm = Some vm } -> Some vm - | _ -> None - - let find_vbd id : Vbd.state option = - match find (fst id) with - | Some { vbds = vbds } -> - if List.mem_assoc id vbds - then Some (List.assoc id vbds) - else None - | _ -> None - - let find_vif id : Vif.state option = - match find (fst id) with - | Some { vifs = vifs } -> - if List.mem_assoc id vifs - then Some (List.assoc id vifs) - else None - | _ -> None - - let find_pci id : Pci.state option = - match find (fst id) with - | Some { pcis = pcis } -> - if List.mem_assoc id pcis - then Some (List.assoc id pcis) - else None - | _ -> None - - let find_vgpu id : Vgpu.state option = - match find (fst id) with - | Some { vgpus = vgpus } -> - if List.mem_assoc id vgpus - then Some (List.assoc id vgpus) - else None - | _ -> None - - let update id t = - Mutex.execute metadata_m - (fun () -> - if Hashtbl.mem cache id - then Hashtbl.replace cache id t - else debug "xenops_cache: Not updating cache for unregistered VM %s" id - ) - - let update_vbd id info = - let existing = Opt.default empty (find (fst id)) in - let vbds' = List.filter (fun (vbd_id, _) -> vbd_id <> id) existing.vbds in - update (fst id) { existing with vbds = Opt.default vbds' (Opt.map (fun info -> (id, info) :: vbds') info) } - - let update_vif id info = - let existing = Opt.default empty (find (fst id)) in - let vifs' = List.filter (fun (vif_id, _) -> vif_id <> id) existing.vifs in - update (fst id) { existing with vifs = Opt.default vifs' (Opt.map (fun info -> (id, info) :: vifs') info) } - - let update_pci id info = - let existing = Opt.default empty (find (fst id)) in - let pcis' = List.filter (fun (pci_id, _) -> pci_id <> id) existing.pcis in - update (fst id) { existing with pcis = Opt.default pcis' (Opt.map (fun info -> (id, info) :: pcis') info) } - - let update_vgpu id info = - let existing = Opt.default empty (find (fst id)) in - let vgpus' = List.filter (fun (vgpu_id, _) -> vgpu_id <> id) existing.vgpus in - update (fst id) { existing with vgpus = Opt.default vgpus' (Opt.map (fun info -> (id, info) :: vgpus') info) } - - let update_vm id info = - let existing = Opt.default empty (find id) in - update id { existing with vm = info } - - let list_nolock () = Hashtbl.fold (fun id _ acc -> id :: acc) cache [] + (** Remember the last events received from xenopsd so we can compute + field-level differences. This allows us to minimise the number of + database writes we issue upwards. *) + + type t = { + vm: Vm.state option; + vbds: (Vbd.id * Vbd.state) list; + vifs: (Vif.id * Vif.state) list; + pcis: (Pci.id * Pci.state) list; + vgpus: (Vgpu.id * Vgpu.state) list; + } + let empty = { + vm = None; + vbds = []; + vifs = []; + pcis = []; + vgpus = []; + } + + let cache = Hashtbl.create 10 (* indexed by Vm.id *) + + let _register_nolock id = + debug "xenops_cache: creating empty cache for %s" id; + Hashtbl.replace cache id empty + + let _unregister_nolock id = + debug "xenops_cache: deleting cache for %s" id; + Hashtbl.remove cache id + + let find id : t option = + Mutex.execute metadata_m + (fun () -> + if Hashtbl.mem cache id + then Some (Hashtbl.find cache id) + else None + ) + + let find_vm id : Vm.state option = + match find id with + | Some { vm = Some vm } -> Some vm + | _ -> None + + let find_vbd id : Vbd.state option = + match find (fst id) with + | Some { vbds = vbds } -> + if List.mem_assoc id vbds + then Some (List.assoc id vbds) + else None + | _ -> None + + let find_vif id : Vif.state option = + match find (fst id) with + | Some { vifs = vifs } -> + if List.mem_assoc id vifs + then Some (List.assoc id vifs) + else None + | _ -> None + + let find_pci id : Pci.state option = + match find (fst id) with + | Some { pcis = pcis } -> + if List.mem_assoc id pcis + then Some (List.assoc id pcis) + else None + | _ -> None + + let find_vgpu id : Vgpu.state option = + match find (fst id) with + | Some { vgpus = vgpus } -> + if List.mem_assoc id vgpus + then Some (List.assoc id vgpus) + else None + | _ -> None + + let update id t = + Mutex.execute metadata_m + (fun () -> + if Hashtbl.mem cache id + then Hashtbl.replace cache id t + else debug "xenops_cache: Not updating cache for unregistered VM %s" id + ) + + let update_vbd id info = + let existing = Opt.default empty (find (fst id)) in + let vbds' = List.filter (fun (vbd_id, _) -> vbd_id <> id) existing.vbds in + update (fst id) { existing with vbds = Opt.default vbds' (Opt.map (fun info -> (id, info) :: vbds') info) } + + let update_vif id info = + let existing = Opt.default empty (find (fst id)) in + let vifs' = List.filter (fun (vif_id, _) -> vif_id <> id) existing.vifs in + update (fst id) { existing with vifs = Opt.default vifs' (Opt.map (fun info -> (id, info) :: vifs') info) } + + let update_pci id info = + let existing = Opt.default empty (find (fst id)) in + let pcis' = List.filter (fun (pci_id, _) -> pci_id <> id) existing.pcis in + update (fst id) { existing with pcis = Opt.default pcis' (Opt.map (fun info -> (id, info) :: pcis') info) } + + let update_vgpu id info = + let existing = Opt.default empty (find (fst id)) in + let vgpus' = List.filter (fun (vgpu_id, _) -> vgpu_id <> id) existing.vgpus in + update (fst id) { existing with vgpus = Opt.default vgpus' (Opt.map (fun info -> (id, info) :: vgpus') info) } + + let update_vm id info = + let existing = Opt.default empty (find id) in + update id { existing with vm = info } + + let list_nolock () = Hashtbl.fold (fun id _ acc -> id :: acc) cache [] end module Xenopsd_metadata = struct - (** Manage the lifetime of VM metadata pushed to xenopsd *) - - (* If the VM has Xapi_globs.persist_xenopsd_md -> filename in its other_config, - we persist the xenopsd metadata to a well-known location in the filesystem *) - let maybe_persist_md ~__context ~self md = - let oc = Db.VM.get_other_config ~__context ~self in - if List.mem_assoc Xapi_globs.persist_xenopsd_md oc then begin - let file_path = - Filename.concat Xapi_globs.persist_xenopsd_md_root (List.assoc Xapi_globs.persist_xenopsd_md oc) |> - Stdext.Unixext.resolve_dot_and_dotdot in - - if not (String.startswith Xapi_globs.persist_xenopsd_md_root file_path) then begin - warn "Not persisting xenopsd metadata to bad location: '%s'" file_path - end else begin - Unixext.mkdir_safe Xapi_globs.persist_xenopsd_md_root 0o755; - Unixext.write_string_to_file file_path md - end - end - - let push ~__context ~upgrade ~self = - Mutex.execute metadata_m (fun () -> - let md = create_metadata ~__context ~upgrade ~self in - let txt = md |> Metadata.rpc_of_t |> Jsonrpc.to_string in - info "xenops: VM.import_metadata %s" txt; - let dbg = Context.string_of_task __context in - let module Client = (val make_client (queue_of_vm ~__context ~self) : XENOPS) in - let id = Client.VM.import_metadata dbg txt in - - maybe_persist_md ~__context ~self txt; - - Xapi_cache._register_nolock id (Some txt); - Xenops_cache._register_nolock id; - id - ) - - let delete_nolock ~__context id = - let dbg = Context.string_of_task __context in - info "xenops: VM.remove %s" id; - try - let module Client = (val make_client (queue_of_vm ~__context ~self:(vm_of_id ~__context id)) : XENOPS) in - Client.VM.remove dbg id; - - (* Once the VM has been successfully removed from xenopsd, remove the caches *) - Xenops_cache._unregister_nolock id; - Xapi_cache._unregister_nolock id - - with - | Bad_power_state(_, _) -> - (* This can fail during a localhost live migrate; but this is safe to ignore *) - debug "We have not removed metadata from xenopsd because VM %s is still running" id - | Does_not_exist(_) -> - debug "Metadata for VM %s was already removed" id - - - (* Unregisters a VM with xenopsd, and cleans up metadata and caches *) - let pull ~__context id = - Mutex.execute metadata_m - (fun () -> - info "xenops: VM.export_metadata %s" id; - let dbg = Context.string_of_task __context in - let module Client = (val make_client (queue_of_vm ~__context ~self:(vm_of_id ~__context id)) : XENOPS) in - let md = Client.VM.export_metadata dbg id |> Jsonrpc.of_string |> Metadata.t_of_rpc in - - delete_nolock ~__context id; - - md) - - let delete ~__context id = - Mutex.execute metadata_m - (fun () -> - delete_nolock ~__context id - ) - - let update ~__context ~self = - let id = id_of_vm ~__context ~self in - let queue_name = queue_of_vm ~__context ~self in - Mutex.execute metadata_m - (fun () -> - let dbg = Context.string_of_task __context in - if vm_exists_in_xenopsd queue_name dbg id - then - let txt = create_metadata ~__context ~upgrade:false ~self |> Metadata.rpc_of_t |> Jsonrpc.to_string in - begin match Xapi_cache.find_nolock id with - | Some old when old = txt -> () - | _ -> - debug "VM %s metadata has changed: updating xenopsd" id; - info "xenops: VM.import_metadata %s" txt; - maybe_persist_md ~__context ~self txt; - Xapi_cache.update_nolock id (Some txt); - let module Client = (val make_client queue_name : XENOPS) in - let (_: Vm.id) = Client.VM.import_metadata dbg txt in - () - end - ) + (** Manage the lifetime of VM metadata pushed to xenopsd *) + + (* If the VM has Xapi_globs.persist_xenopsd_md -> filename in its other_config, + we persist the xenopsd metadata to a well-known location in the filesystem *) + let maybe_persist_md ~__context ~self md = + let oc = Db.VM.get_other_config ~__context ~self in + if List.mem_assoc Xapi_globs.persist_xenopsd_md oc then begin + let file_path = + Filename.concat Xapi_globs.persist_xenopsd_md_root (List.assoc Xapi_globs.persist_xenopsd_md oc) |> + Stdext.Unixext.resolve_dot_and_dotdot in + + if not (String.startswith Xapi_globs.persist_xenopsd_md_root file_path) then begin + warn "Not persisting xenopsd metadata to bad location: '%s'" file_path + end else begin + Unixext.mkdir_safe Xapi_globs.persist_xenopsd_md_root 0o755; + Unixext.write_string_to_file file_path md + end + end + + let push ~__context ~upgrade ~self = + Mutex.execute metadata_m (fun () -> + let md = create_metadata ~__context ~upgrade ~self in + let txt = md |> Metadata.rpc_of_t |> Jsonrpc.to_string in + info "xenops: VM.import_metadata %s" txt; + let dbg = Context.string_of_task __context in + let module Client = (val make_client (queue_of_vm ~__context ~self) : XENOPS) in + let id = Client.VM.import_metadata dbg txt in + + maybe_persist_md ~__context ~self txt; + + Xapi_cache._register_nolock id (Some txt); + Xenops_cache._register_nolock id; + id + ) + + let delete_nolock ~__context id = + let dbg = Context.string_of_task __context in + info "xenops: VM.remove %s" id; + try + let module Client = (val make_client (queue_of_vm ~__context ~self:(vm_of_id ~__context id)) : XENOPS) in + Client.VM.remove dbg id; + + (* Once the VM has been successfully removed from xenopsd, remove the caches *) + Xenops_cache._unregister_nolock id; + Xapi_cache._unregister_nolock id + + with + | Bad_power_state(_, _) -> + (* This can fail during a localhost live migrate; but this is safe to ignore *) + debug "We have not removed metadata from xenopsd because VM %s is still running" id + | Does_not_exist(_) -> + debug "Metadata for VM %s was already removed" id + + + (* Unregisters a VM with xenopsd, and cleans up metadata and caches *) + let pull ~__context id = + Mutex.execute metadata_m + (fun () -> + info "xenops: VM.export_metadata %s" id; + let dbg = Context.string_of_task __context in + let module Client = (val make_client (queue_of_vm ~__context ~self:(vm_of_id ~__context id)) : XENOPS) in + let md = Client.VM.export_metadata dbg id |> Jsonrpc.of_string |> Metadata.t_of_rpc in + + delete_nolock ~__context id; + + md) + + let delete ~__context id = + Mutex.execute metadata_m + (fun () -> + delete_nolock ~__context id + ) + + let update ~__context ~self = + let id = id_of_vm ~__context ~self in + let queue_name = queue_of_vm ~__context ~self in + Mutex.execute metadata_m + (fun () -> + let dbg = Context.string_of_task __context in + if vm_exists_in_xenopsd queue_name dbg id + then + let txt = create_metadata ~__context ~upgrade:false ~self |> Metadata.rpc_of_t |> Jsonrpc.to_string in + begin match Xapi_cache.find_nolock id with + | Some old when old = txt -> () + | _ -> + debug "VM %s metadata has changed: updating xenopsd" id; + info "xenops: VM.import_metadata %s" txt; + maybe_persist_md ~__context ~self txt; + Xapi_cache.update_nolock id (Some txt); + let module Client = (val make_client queue_name : XENOPS) in + let (_: Vm.id) = Client.VM.import_metadata dbg txt in + () + end + ) end let add_caches id = - Mutex.execute metadata_m - (fun () -> - Xapi_cache._register_nolock id None; - Xenops_cache._register_nolock id; - ) + Mutex.execute metadata_m + (fun () -> + Xapi_cache._register_nolock id None; + Xenops_cache._register_nolock id; + ) let to_xenops_console_protocol = let open Vm in function - | `rfb -> Rfb - | `vt100 -> Vt100 - | `rdp -> Rfb (* RDP was never used in the XenAPI so this never happens *) + | `rfb -> Rfb + | `vt100 -> Vt100 + | `rdp -> Rfb (* RDP was never used in the XenAPI so this never happens *) let to_xenapi_console_protocol = let open Vm in function - | Rfb -> `rfb - | Vt100 -> `vt100 + | Rfb -> `rfb + | Vt100 -> `vt100 (* Event handling: When we tell the xenopsd to start a VM, we wait for the task to complete. @@ -1128,666 +1128,666 @@ let to_xenapi_console_protocol = let open Vm in function (* If a xapi event thread is blocked, wake it up and cause it to re-register. This should be called after updating Host.resident_VMs *) let trigger_xenapi_reregister = - ref (fun () -> - debug "No xapi event thread to wake up" - ) + ref (fun () -> + debug "No xapi event thread to wake up" + ) module Events_from_xenopsd = struct - type t = { - mutable finished: bool; - m: Mutex.t; - c: Condition.t; - } - let make () = { - finished = false; - m = Mutex.create (); - c = Condition.create (); - } - let active = Hashtbl.create 10 - let active_m = Mutex.create () - let register = - let counter = ref 0 in - fun t -> - Mutex.execute active_m - (fun () -> - let id = !counter in - incr counter; - Hashtbl.replace active id t; - id - ) - let wait queue_name dbg vm_id () = - let module Client = (val make_client queue_name : XENOPS) in - let t = make () in - let id = register t in - debug "Client.UPDATES.inject_barrier %d" id; - Client.UPDATES.inject_barrier dbg vm_id id; - Mutex.execute t.m - (fun () -> - while not t.finished do Condition.wait t.c t.m done - ) - let wakeup queue_name dbg id = - let module Client = (val make_client queue_name : XENOPS) in - Client.UPDATES.remove_barrier dbg id; - let t = Mutex.execute active_m - (fun () -> - if not(Hashtbl.mem active id) - then (warn "Events_from_xenopsd.wakeup: unknown id %d" id; None) - else - let t = Hashtbl.find active id in - Hashtbl.remove active id; - Some t - ) in - Opt.iter - (fun t -> - Mutex.execute t.m - (fun () -> - t.finished <- true; - Condition.signal t.c - ) - ) t - - let events_suppressed_on = Hashtbl.create 10 - let events_suppressed_on_m = Mutex.create () - let events_suppressed_on_c = Condition.create () - let are_suppressed vm = - Hashtbl.mem events_suppressed_on vm - - let with_suppressed queue_name dbg vm_id f = - debug "suppressing xenops events on VM: %s" vm_id; - let module Client = (val make_client queue_name : XENOPS) in - Mutex.execute events_suppressed_on_m (fun () -> - Hashtbl.add events_suppressed_on vm_id (); - ); - finally f (fun () -> - Mutex.execute events_suppressed_on_m (fun () -> - Hashtbl.remove events_suppressed_on vm_id; - if not (Hashtbl.mem events_suppressed_on vm_id) then begin - debug "re-enabled xenops events on VM: %s; refreshing VM" vm_id; - Client.UPDATES.refresh_vm dbg vm_id; - wait queue_name dbg vm_id (); - Condition.broadcast events_suppressed_on_c; - end else while are_suppressed vm_id do - debug "waiting for events to become re-enabled"; - Condition.wait events_suppressed_on_c events_suppressed_on_m - done; - ); - ) + type t = { + mutable finished: bool; + m: Mutex.t; + c: Condition.t; + } + let make () = { + finished = false; + m = Mutex.create (); + c = Condition.create (); + } + let active = Hashtbl.create 10 + let active_m = Mutex.create () + let register = + let counter = ref 0 in + fun t -> + Mutex.execute active_m + (fun () -> + let id = !counter in + incr counter; + Hashtbl.replace active id t; + id + ) + let wait queue_name dbg vm_id () = + let module Client = (val make_client queue_name : XENOPS) in + let t = make () in + let id = register t in + debug "Client.UPDATES.inject_barrier %d" id; + Client.UPDATES.inject_barrier dbg vm_id id; + Mutex.execute t.m + (fun () -> + while not t.finished do Condition.wait t.c t.m done + ) + let wakeup queue_name dbg id = + let module Client = (val make_client queue_name : XENOPS) in + Client.UPDATES.remove_barrier dbg id; + let t = Mutex.execute active_m + (fun () -> + if not(Hashtbl.mem active id) + then (warn "Events_from_xenopsd.wakeup: unknown id %d" id; None) + else + let t = Hashtbl.find active id in + Hashtbl.remove active id; + Some t + ) in + Opt.iter + (fun t -> + Mutex.execute t.m + (fun () -> + t.finished <- true; + Condition.signal t.c + ) + ) t + + let events_suppressed_on = Hashtbl.create 10 + let events_suppressed_on_m = Mutex.create () + let events_suppressed_on_c = Condition.create () + let are_suppressed vm = + Hashtbl.mem events_suppressed_on vm + + let with_suppressed queue_name dbg vm_id f = + debug "suppressing xenops events on VM: %s" vm_id; + let module Client = (val make_client queue_name : XENOPS) in + Mutex.execute events_suppressed_on_m (fun () -> + Hashtbl.add events_suppressed_on vm_id (); + ); + finally f (fun () -> + Mutex.execute events_suppressed_on_m (fun () -> + Hashtbl.remove events_suppressed_on vm_id; + if not (Hashtbl.mem events_suppressed_on vm_id) then begin + debug "re-enabled xenops events on VM: %s; refreshing VM" vm_id; + Client.UPDATES.refresh_vm dbg vm_id; + wait queue_name dbg vm_id (); + Condition.broadcast events_suppressed_on_c; + end else while are_suppressed vm_id do + debug "waiting for events to become re-enabled"; + Condition.wait events_suppressed_on_c events_suppressed_on_m + done; + ); + ) end let update_vm ~__context id = - try - let open Vm in - if Events_from_xenopsd.are_suppressed id - then debug "xenopsd event: ignoring event for VM (VM %s migrating away)" id - else - let self = Db.VM.get_by_uuid ~__context ~uuid:id in - let localhost = Helpers.get_localhost ~__context in - if Db.VM.get_resident_on ~__context ~self <> localhost - then debug "xenopsd event: ignoring event for VM (VM %s not resident)" id - else - let previous = Xenops_cache.find_vm id in - let dbg = Context.string_of_task __context in - let module Client = (val make_client (queue_of_vm ~__context ~self) : XENOPS) in - let info = try Some (Client.VM.stat dbg id) with _ -> None in - if Opt.map snd info = previous - then debug "xenopsd event: ignoring event for VM %s: metadata has not changed" id - else begin - debug "xenopsd event: processing event for VM %s" id; - if info = None then debug "xenopsd event: VM state missing: assuming VM has shut down"; - let should_update_allowed_operations = ref false in - let different f = - let a = Opt.map (fun x -> f (snd x)) info in - let b = Opt.map f previous in - a <> b in - (* Notes on error handling: if something fails we log and continue, to - maximise the amount of state which is correctly synced. If something - does fail then we may end up permanently out-of-sync until either a - process restart or an event is generated. We may wish to periodically - inject artificial events IF there has been an event sync failure? *) - if different (fun x -> x.power_state) then begin - try - debug "Will update VM.allowed_operations because power_state has changed."; - should_update_allowed_operations := true; - let power_state = xenapi_of_xenops_power_state (Opt.map (fun x -> (snd x).power_state) info) in - debug "xenopsd event: Updating VM %s power_state <- %s" id (Record_util.power_state_to_string power_state); - (* This will mark VBDs, VIFs as detached and clear resident_on - if the VM has permanently shutdown. current-operations - should not be reset as there maybe a checkpoint is ongoing*) - Xapi_vm_lifecycle.force_state_reset_keep_current_operations ~__context ~self ~value:power_state; - - if power_state = `Suspended || power_state = `Halted then begin - Xapi_network.detach_for_vm ~__context ~host:localhost ~vm:self; - Storage_access.reset ~__context ~vm:self; - end; - if power_state = `Halted - then Xenopsd_metadata.delete ~__context id; - if power_state = `Suspended then begin - let md = Xenopsd_metadata.pull ~__context id in - match md.Metadata.domains with - | None -> - error "Suspended VM has no domain-specific metadata" - | Some x -> - Db.VM.set_last_booted_record ~__context ~self ~value:x; - debug "VM %s last_booted_record set to %s" (Ref.string_of self) x; - Xenopsd_metadata.delete ~__context id - end; - if power_state = `Halted then ( - !trigger_xenapi_reregister () - ); - with e -> - error "Caught %s: while updating VM %s power_state" (Printexc.to_string e) id - end; - if different (fun x -> x.domids) then begin - try - debug "Will update VM.allowed_operations because domid has changed."; - should_update_allowed_operations := true; - debug "xenopsd event: Updating VM %s domid" id; - Opt.iter - (fun (_, state) -> - match state.domids with - | value :: _ -> - Db.VM.set_domid ~__context ~self ~value:(Int64.of_int value) - | [] -> () (* happens when the VM is shutdown *) - ) info; - (* If this is a storage domain, attempt to plug the PBD *) - Opt.iter (fun pbd -> - let (_: Thread.t) = Thread.create (fun () -> - (* Don't block the database update thread *) - Xapi_pbd.plug ~__context ~self:pbd - ) () in - () - ) (System_domains.pbd_of_vm ~__context ~vm:self) - with e -> - error "Caught %s: while updating VM %s domids" (Printexc.to_string e) id - end; - (* consoles *) - if different (fun x -> x.consoles) then begin - try - debug "xenopsd event: Updating VM %s consoles" id; - Opt.iter - (fun (_, state) -> - let localhost = Helpers.get_localhost ~__context in - let address = Db.Host.get_address ~__context ~self:localhost in - let uri = Printf.sprintf "https://%s%s" address Constants.console_uri in - let get_uri_from_location loc = - try - let n = String.index loc '?' in - String.sub loc 0 n - with Not_found -> loc - in - let current_protocols = List.map - (fun self -> - (Db.Console.get_protocol ~__context ~self |> to_xenops_console_protocol, - Db.Console.get_location ~__context ~self |> get_uri_from_location), - self) - (Db.VM.get_consoles ~__context ~self) in - let new_protocols = List.map (fun c -> (c.protocol, uri), c) state.consoles in - (* Destroy consoles that have gone away *) - List.iter - (fun protocol -> - let self = List.assoc protocol current_protocols in - Db.Console.destroy ~__context ~self - ) (List.set_difference (List.map fst current_protocols) (List.map fst new_protocols)); - (* Create consoles that have appeared *) - List.iter - (fun (protocol, _) -> - let ref = Ref.make () in - let uuid = Uuid.to_string (Uuid.make_uuid ()) in - let location = Printf.sprintf "%s?uuid=%s" uri uuid in - let port = - try Int64.of_int ((List.find (fun c -> c.protocol = protocol) state.consoles).port) - with Not_found -> -1L - in - Db.Console.create ~__context ~ref ~uuid - ~protocol:(to_xenapi_console_protocol protocol) ~location ~vM:self - ~other_config:[] ~port - ) (List.set_difference (List.map fst new_protocols) (List.map fst current_protocols)); - ) info; - with e -> - error "Caught %s: while updating VM %s consoles" (Printexc.to_string e) id - end; - if different (fun x -> x.memory_target) then begin - try - Opt.iter - (fun (_, state) -> - debug "xenopsd event: Updating VM %s memory_target <- %Ld" id state.memory_target; - Db.VM.set_memory_target ~__context ~self ~value:state.memory_target - ) info - with e -> - error "Caught %s: while updating VM %s consoles" (Printexc.to_string e) id - end; - if different (fun x -> x.rtc_timeoffset) then begin - try - Opt.iter - (fun (_, state) -> - if state.rtc_timeoffset <> "" then begin - debug "xenopsd event: Updating VM %s platform:timeoffset <- %s" id state.rtc_timeoffset; - (try Db.VM.remove_from_platform ~__context ~self ~key:Vm_platform.timeoffset with _ -> ()); - Db.VM.add_to_platform ~__context ~self ~key:Vm_platform.timeoffset ~value:state.rtc_timeoffset; - end - ) info - with e -> - error "Caught %s: while updating VM %s rtc/timeoffset" (Printexc.to_string e) id - end; - let check_guest_agent () = - Opt.iter - (fun (_, state) -> - Opt.iter (fun oldstate -> - let old_ga = oldstate.guest_agent in - let new_ga = state.guest_agent in - - (* Remove memory keys *) - let ignored_keys = [ "data/meminfo_free"; "data/updated"; "data/update_cnt" ] in - let remove_ignored ga = - List.fold_left (fun acc k -> List.filter (fun x -> fst x <> k) acc) ga ignored_keys in - let old_ga = remove_ignored old_ga in - let new_ga = remove_ignored new_ga in - if new_ga <> old_ga then begin - debug "Will update VM.allowed_operations because guest_agent has changed."; - should_update_allowed_operations := true - end else begin - debug "Supressing VM.allowed_operations update because guest_agent data is largely the same" - end - ) previous; - List.iter - (fun domid -> - let lookup key = - if List.mem_assoc key state.guest_agent then Some (List.assoc key state.guest_agent) else None in - let list dir = - let dir = if dir.[0] = '/' then String.sub dir 1 (String.length dir - 1) else dir in - let results = Listext.List.filter_map (fun (path, value) -> - if String.startswith dir path then begin - let rest = String.sub path (String.length dir) (String.length path - (String.length dir)) in - match List.filter (fun x -> x <> "") (String.split '/' rest) with - | x :: _ -> Some x - | _ -> None - end else None - ) state.guest_agent |> Listext.List.setify in - results in - try - debug "xenopsd event: Updating VM %s domid %d guest_agent" id domid; - Xapi_guest_agent.all lookup list ~__context ~domid ~uuid:id - with e -> - error "Caught %s: while updating VM %s guest_agent" (Printexc.to_string e) id - ) state.domids - ) info in - if different (fun x -> x.hvm) then begin - Opt.iter - (fun (_, state) -> - let metrics = Db.VM.get_metrics ~__context ~self in - debug "xenopsd event: Updating VM %s hvm <- %s" - id (string_of_bool state.Vm.hvm); - Db.VM_metrics.set_hvm ~__context ~self:metrics - ~value:state.Vm.hvm; - ) - info - end; - if different (fun x -> x.nomigrate) then begin - Opt.iter - (fun (_, state) -> - let metrics = Db.VM.get_metrics ~__context ~self in - debug "xenopsd event: Updating VM %s nomigrate <- %s" - id (string_of_bool state.Vm.nomigrate); - Db.VM_metrics.set_nomigrate ~__context ~self:metrics - ~value:state.Vm.nomigrate; - ) - info - end; - if different (fun x -> x.nested_virt) then begin - Opt.iter - (fun (_, state) -> - let metrics = Db.VM.get_metrics ~__context ~self in - debug "xenopsd event: Updating VM %s nested_virt <- %s" - id (string_of_bool state.Vm.nested_virt); - Db.VM_metrics.set_nested_virt ~__context ~self:metrics - ~value:state.Vm.nested_virt; - ) - info - end; - let update_pv_drivers_detected () = - Opt.iter - (fun (_, state) -> - let gm = Db.VM.get_guest_metrics ~__context ~self in - debug "xenopsd event: Updating VM %s PV drivers detected %b" id state.pv_drivers_detected; - Db.VM_guest_metrics.set_PV_drivers_detected ~__context ~self:gm ~value:state.pv_drivers_detected; - Db.VM_guest_metrics.set_PV_drivers_up_to_date ~__context ~self:gm ~value:state.pv_drivers_detected - ) info in - Opt.iter - (fun (_, state) -> - List.iter - (fun domid -> - if different (fun x -> x.uncooperative_balloon_driver) then begin - debug "xenopsd event: VM %s domid %d uncooperative_balloon_driver = %b" id domid state.uncooperative_balloon_driver; - end; - if different (fun x -> x.guest_agent) then check_guest_agent (); - if different (fun x -> x.pv_drivers_detected) then update_pv_drivers_detected (); - - if different (fun x -> x.xsdata_state) then begin - try - debug "xenopsd event: Updating VM %s domid %d xsdata" id domid; - Db.VM.set_xenstore_data ~__context ~self ~value:state.xsdata_state - with e -> - error "Caught %s: while updating VM %s xsdata" (Printexc.to_string e) id - end; - if different (fun x -> x.memory_target) then begin - try - debug "xenopsd event: Updating VM %s domid %d memory target" id domid; - Rrdd.update_vm_memory_target ~domid ~target:state.memory_target; - with e -> - error "Caught %s: while updating VM %s memory_target" (Printexc.to_string e) id - end; - ) state.domids; - ) info; - if different (fun x -> x.vcpu_target) then begin - Opt.iter - (fun (_, state) -> - try - debug "xenopsd event: Updating VM %s vcpu_target <- %d" id state.Vm.vcpu_target; - let metrics = Db.VM.get_metrics ~__context ~self in - Db.VM_metrics.set_VCPUs_number ~__context ~self:metrics ~value:(Int64.of_int state.Vm.vcpu_target); - with e -> - error "Caught %s: while updating VM %s VCPUs_number" (Printexc.to_string e) id - ) info - end; - if different (fun x -> x.last_start_time) then begin - try - Opt.iter - (fun (_, state) -> - debug "xenopsd event: Updating VM %s last_start_time <- %s" id (Date.to_string (Date.of_float state.last_start_time)); - let metrics = Db.VM.get_metrics ~__context ~self in - let start_time = Date.of_float state.last_start_time in - Db.VM_metrics.set_start_time ~__context ~self:metrics ~value:start_time; - begin - try - let gm = Db.VM.get_guest_metrics ~__context ~self in - let update_time = Db.VM_guest_metrics.get_last_updated ~__context ~self:gm in - if update_time < start_time then begin - debug "VM %s guest metrics update time (%s) < VM start time (%s): deleting" - id (Date.to_string update_time) (Date.to_string start_time); - Xapi_vm_helpers.delete_guest_metrics ~__context ~self; - check_guest_agent (); - end - with _ -> () (* The guest metrics didn't exist *) - end - ) info - with e -> - error "Caught %s: while updating VM %s last_start_time" (Printexc.to_string e) id - end; - if different (fun x -> x.shadow_multiplier_target) then begin - try - Opt.iter - (fun (_, state) -> - debug "xenopsd event: Updating VM %s shadow_multiplier <- %.2f" id state.shadow_multiplier_target; - if state.power_state <> Halted && state.shadow_multiplier_target >= 0.0 then - Db.VM.set_HVM_shadow_multiplier ~__context ~self ~value:state.shadow_multiplier_target - ) info - with e -> - error "Caught %s: while updating VM %s HVM_shadow_multiplier" (Printexc.to_string e) id - end; - Xenops_cache.update_vm id (Opt.map snd info); - if !should_update_allowed_operations then - Helpers.call_api_functions ~__context - (fun rpc session_id -> XenAPI.VM.update_allowed_operations ~rpc ~session_id ~self); - end - with e -> - error "xenopsd event: Caught %s while updating VM: has this VM been removed while this host is offline?" (string_of_exn e) + try + let open Vm in + if Events_from_xenopsd.are_suppressed id + then debug "xenopsd event: ignoring event for VM (VM %s migrating away)" id + else + let self = Db.VM.get_by_uuid ~__context ~uuid:id in + let localhost = Helpers.get_localhost ~__context in + if Db.VM.get_resident_on ~__context ~self <> localhost + then debug "xenopsd event: ignoring event for VM (VM %s not resident)" id + else + let previous = Xenops_cache.find_vm id in + let dbg = Context.string_of_task __context in + let module Client = (val make_client (queue_of_vm ~__context ~self) : XENOPS) in + let info = try Some (Client.VM.stat dbg id) with _ -> None in + if Opt.map snd info = previous + then debug "xenopsd event: ignoring event for VM %s: metadata has not changed" id + else begin + debug "xenopsd event: processing event for VM %s" id; + if info = None then debug "xenopsd event: VM state missing: assuming VM has shut down"; + let should_update_allowed_operations = ref false in + let different f = + let a = Opt.map (fun x -> f (snd x)) info in + let b = Opt.map f previous in + a <> b in + (* Notes on error handling: if something fails we log and continue, to + maximise the amount of state which is correctly synced. If something + does fail then we may end up permanently out-of-sync until either a + process restart or an event is generated. We may wish to periodically + inject artificial events IF there has been an event sync failure? *) + if different (fun x -> x.power_state) then begin + try + debug "Will update VM.allowed_operations because power_state has changed."; + should_update_allowed_operations := true; + let power_state = xenapi_of_xenops_power_state (Opt.map (fun x -> (snd x).power_state) info) in + debug "xenopsd event: Updating VM %s power_state <- %s" id (Record_util.power_state_to_string power_state); + (* This will mark VBDs, VIFs as detached and clear resident_on + if the VM has permanently shutdown. current-operations + should not be reset as there maybe a checkpoint is ongoing*) + Xapi_vm_lifecycle.force_state_reset_keep_current_operations ~__context ~self ~value:power_state; + + if power_state = `Suspended || power_state = `Halted then begin + Xapi_network.detach_for_vm ~__context ~host:localhost ~vm:self; + Storage_access.reset ~__context ~vm:self; + end; + if power_state = `Halted + then Xenopsd_metadata.delete ~__context id; + if power_state = `Suspended then begin + let md = Xenopsd_metadata.pull ~__context id in + match md.Metadata.domains with + | None -> + error "Suspended VM has no domain-specific metadata" + | Some x -> + Db.VM.set_last_booted_record ~__context ~self ~value:x; + debug "VM %s last_booted_record set to %s" (Ref.string_of self) x; + Xenopsd_metadata.delete ~__context id + end; + if power_state = `Halted then ( + !trigger_xenapi_reregister () + ); + with e -> + error "Caught %s: while updating VM %s power_state" (Printexc.to_string e) id + end; + if different (fun x -> x.domids) then begin + try + debug "Will update VM.allowed_operations because domid has changed."; + should_update_allowed_operations := true; + debug "xenopsd event: Updating VM %s domid" id; + Opt.iter + (fun (_, state) -> + match state.domids with + | value :: _ -> + Db.VM.set_domid ~__context ~self ~value:(Int64.of_int value) + | [] -> () (* happens when the VM is shutdown *) + ) info; + (* If this is a storage domain, attempt to plug the PBD *) + Opt.iter (fun pbd -> + let (_: Thread.t) = Thread.create (fun () -> + (* Don't block the database update thread *) + Xapi_pbd.plug ~__context ~self:pbd + ) () in + () + ) (System_domains.pbd_of_vm ~__context ~vm:self) + with e -> + error "Caught %s: while updating VM %s domids" (Printexc.to_string e) id + end; + (* consoles *) + if different (fun x -> x.consoles) then begin + try + debug "xenopsd event: Updating VM %s consoles" id; + Opt.iter + (fun (_, state) -> + let localhost = Helpers.get_localhost ~__context in + let address = Db.Host.get_address ~__context ~self:localhost in + let uri = Printf.sprintf "https://%s%s" address Constants.console_uri in + let get_uri_from_location loc = + try + let n = String.index loc '?' in + String.sub loc 0 n + with Not_found -> loc + in + let current_protocols = List.map + (fun self -> + (Db.Console.get_protocol ~__context ~self |> to_xenops_console_protocol, + Db.Console.get_location ~__context ~self |> get_uri_from_location), + self) + (Db.VM.get_consoles ~__context ~self) in + let new_protocols = List.map (fun c -> (c.protocol, uri), c) state.consoles in + (* Destroy consoles that have gone away *) + List.iter + (fun protocol -> + let self = List.assoc protocol current_protocols in + Db.Console.destroy ~__context ~self + ) (List.set_difference (List.map fst current_protocols) (List.map fst new_protocols)); + (* Create consoles that have appeared *) + List.iter + (fun (protocol, _) -> + let ref = Ref.make () in + let uuid = Uuid.to_string (Uuid.make_uuid ()) in + let location = Printf.sprintf "%s?uuid=%s" uri uuid in + let port = + try Int64.of_int ((List.find (fun c -> c.protocol = protocol) state.consoles).port) + with Not_found -> -1L + in + Db.Console.create ~__context ~ref ~uuid + ~protocol:(to_xenapi_console_protocol protocol) ~location ~vM:self + ~other_config:[] ~port + ) (List.set_difference (List.map fst new_protocols) (List.map fst current_protocols)); + ) info; + with e -> + error "Caught %s: while updating VM %s consoles" (Printexc.to_string e) id + end; + if different (fun x -> x.memory_target) then begin + try + Opt.iter + (fun (_, state) -> + debug "xenopsd event: Updating VM %s memory_target <- %Ld" id state.memory_target; + Db.VM.set_memory_target ~__context ~self ~value:state.memory_target + ) info + with e -> + error "Caught %s: while updating VM %s consoles" (Printexc.to_string e) id + end; + if different (fun x -> x.rtc_timeoffset) then begin + try + Opt.iter + (fun (_, state) -> + if state.rtc_timeoffset <> "" then begin + debug "xenopsd event: Updating VM %s platform:timeoffset <- %s" id state.rtc_timeoffset; + (try Db.VM.remove_from_platform ~__context ~self ~key:Vm_platform.timeoffset with _ -> ()); + Db.VM.add_to_platform ~__context ~self ~key:Vm_platform.timeoffset ~value:state.rtc_timeoffset; + end + ) info + with e -> + error "Caught %s: while updating VM %s rtc/timeoffset" (Printexc.to_string e) id + end; + let check_guest_agent () = + Opt.iter + (fun (_, state) -> + Opt.iter (fun oldstate -> + let old_ga = oldstate.guest_agent in + let new_ga = state.guest_agent in + + (* Remove memory keys *) + let ignored_keys = [ "data/meminfo_free"; "data/updated"; "data/update_cnt" ] in + let remove_ignored ga = + List.fold_left (fun acc k -> List.filter (fun x -> fst x <> k) acc) ga ignored_keys in + let old_ga = remove_ignored old_ga in + let new_ga = remove_ignored new_ga in + if new_ga <> old_ga then begin + debug "Will update VM.allowed_operations because guest_agent has changed."; + should_update_allowed_operations := true + end else begin + debug "Supressing VM.allowed_operations update because guest_agent data is largely the same" + end + ) previous; + List.iter + (fun domid -> + let lookup key = + if List.mem_assoc key state.guest_agent then Some (List.assoc key state.guest_agent) else None in + let list dir = + let dir = if dir.[0] = '/' then String.sub dir 1 (String.length dir - 1) else dir in + let results = Listext.List.filter_map (fun (path, value) -> + if String.startswith dir path then begin + let rest = String.sub path (String.length dir) (String.length path - (String.length dir)) in + match List.filter (fun x -> x <> "") (String.split '/' rest) with + | x :: _ -> Some x + | _ -> None + end else None + ) state.guest_agent |> Listext.List.setify in + results in + try + debug "xenopsd event: Updating VM %s domid %d guest_agent" id domid; + Xapi_guest_agent.all lookup list ~__context ~domid ~uuid:id + with e -> + error "Caught %s: while updating VM %s guest_agent" (Printexc.to_string e) id + ) state.domids + ) info in + if different (fun x -> x.hvm) then begin + Opt.iter + (fun (_, state) -> + let metrics = Db.VM.get_metrics ~__context ~self in + debug "xenopsd event: Updating VM %s hvm <- %s" + id (string_of_bool state.Vm.hvm); + Db.VM_metrics.set_hvm ~__context ~self:metrics + ~value:state.Vm.hvm; + ) + info + end; + if different (fun x -> x.nomigrate) then begin + Opt.iter + (fun (_, state) -> + let metrics = Db.VM.get_metrics ~__context ~self in + debug "xenopsd event: Updating VM %s nomigrate <- %s" + id (string_of_bool state.Vm.nomigrate); + Db.VM_metrics.set_nomigrate ~__context ~self:metrics + ~value:state.Vm.nomigrate; + ) + info + end; + if different (fun x -> x.nested_virt) then begin + Opt.iter + (fun (_, state) -> + let metrics = Db.VM.get_metrics ~__context ~self in + debug "xenopsd event: Updating VM %s nested_virt <- %s" + id (string_of_bool state.Vm.nested_virt); + Db.VM_metrics.set_nested_virt ~__context ~self:metrics + ~value:state.Vm.nested_virt; + ) + info + end; + let update_pv_drivers_detected () = + Opt.iter + (fun (_, state) -> + let gm = Db.VM.get_guest_metrics ~__context ~self in + debug "xenopsd event: Updating VM %s PV drivers detected %b" id state.pv_drivers_detected; + Db.VM_guest_metrics.set_PV_drivers_detected ~__context ~self:gm ~value:state.pv_drivers_detected; + Db.VM_guest_metrics.set_PV_drivers_up_to_date ~__context ~self:gm ~value:state.pv_drivers_detected + ) info in + Opt.iter + (fun (_, state) -> + List.iter + (fun domid -> + if different (fun x -> x.uncooperative_balloon_driver) then begin + debug "xenopsd event: VM %s domid %d uncooperative_balloon_driver = %b" id domid state.uncooperative_balloon_driver; + end; + if different (fun x -> x.guest_agent) then check_guest_agent (); + if different (fun x -> x.pv_drivers_detected) then update_pv_drivers_detected (); + + if different (fun x -> x.xsdata_state) then begin + try + debug "xenopsd event: Updating VM %s domid %d xsdata" id domid; + Db.VM.set_xenstore_data ~__context ~self ~value:state.xsdata_state + with e -> + error "Caught %s: while updating VM %s xsdata" (Printexc.to_string e) id + end; + if different (fun x -> x.memory_target) then begin + try + debug "xenopsd event: Updating VM %s domid %d memory target" id domid; + Rrdd.update_vm_memory_target ~domid ~target:state.memory_target; + with e -> + error "Caught %s: while updating VM %s memory_target" (Printexc.to_string e) id + end; + ) state.domids; + ) info; + if different (fun x -> x.vcpu_target) then begin + Opt.iter + (fun (_, state) -> + try + debug "xenopsd event: Updating VM %s vcpu_target <- %d" id state.Vm.vcpu_target; + let metrics = Db.VM.get_metrics ~__context ~self in + Db.VM_metrics.set_VCPUs_number ~__context ~self:metrics ~value:(Int64.of_int state.Vm.vcpu_target); + with e -> + error "Caught %s: while updating VM %s VCPUs_number" (Printexc.to_string e) id + ) info + end; + if different (fun x -> x.last_start_time) then begin + try + Opt.iter + (fun (_, state) -> + debug "xenopsd event: Updating VM %s last_start_time <- %s" id (Date.to_string (Date.of_float state.last_start_time)); + let metrics = Db.VM.get_metrics ~__context ~self in + let start_time = Date.of_float state.last_start_time in + Db.VM_metrics.set_start_time ~__context ~self:metrics ~value:start_time; + begin + try + let gm = Db.VM.get_guest_metrics ~__context ~self in + let update_time = Db.VM_guest_metrics.get_last_updated ~__context ~self:gm in + if update_time < start_time then begin + debug "VM %s guest metrics update time (%s) < VM start time (%s): deleting" + id (Date.to_string update_time) (Date.to_string start_time); + Xapi_vm_helpers.delete_guest_metrics ~__context ~self; + check_guest_agent (); + end + with _ -> () (* The guest metrics didn't exist *) + end + ) info + with e -> + error "Caught %s: while updating VM %s last_start_time" (Printexc.to_string e) id + end; + if different (fun x -> x.shadow_multiplier_target) then begin + try + Opt.iter + (fun (_, state) -> + debug "xenopsd event: Updating VM %s shadow_multiplier <- %.2f" id state.shadow_multiplier_target; + if state.power_state <> Halted && state.shadow_multiplier_target >= 0.0 then + Db.VM.set_HVM_shadow_multiplier ~__context ~self ~value:state.shadow_multiplier_target + ) info + with e -> + error "Caught %s: while updating VM %s HVM_shadow_multiplier" (Printexc.to_string e) id + end; + Xenops_cache.update_vm id (Opt.map snd info); + if !should_update_allowed_operations then + Helpers.call_api_functions ~__context + (fun rpc session_id -> XenAPI.VM.update_allowed_operations ~rpc ~session_id ~self); + end + with e -> + error "xenopsd event: Caught %s while updating VM: has this VM been removed while this host is offline?" (string_of_exn e) let update_vbd ~__context (id: (string * string)) = - try - let open Vbd in - if Events_from_xenopsd.are_suppressed (fst id) - then debug "xenopsd event: ignoring event for VM (VM %s migrating away)" (fst id) - else - let vm = Db.VM.get_by_uuid ~__context ~uuid:(fst id) in - let localhost = Helpers.get_localhost ~__context in - if Db.VM.get_resident_on ~__context ~self:vm <> localhost - then debug "xenopsd event: ignoring event for VBD (VM %s not resident)" (fst id) - else - let previous = Xenops_cache.find_vbd id in - let dbg = Context.string_of_task __context in - let module Client = (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) in - let info = try Some(Client.VBD.stat dbg id) with _ -> None in - if Opt.map snd info = previous - then debug "xenopsd event: ignoring event for VBD %s.%s: metadata has not changed" (fst id) (snd id) - else begin - let vbds = Db.VM.get_VBDs ~__context ~self:vm in - let vbdrs = List.map (fun self -> self, Db.VBD.get_record ~__context ~self) vbds in - let linux_device = snd id in - let device_number = Device_number.of_linux_device linux_device in - (* only try matching against disk number if the device is not a floppy (as "0" shouldn't match "fda") *) - let disk_number = - match Device_number.spec device_number with - | (Device_number.Ide,_,_) - | (Device_number.Xen,_,_) -> Some (device_number |> Device_number.to_disk_number |> string_of_int) - | _ -> None in - debug "VM %s VBD userdevices = [ %s ]" (fst id) (String.concat "; " (List.map (fun (_,r) -> r.API.vBD_userdevice) vbdrs)); - let vbd, vbd_r = List.find (fun (_, vbdr) -> vbdr.API.vBD_userdevice = linux_device || - (Opt.is_some disk_number && vbdr.API.vBD_userdevice = Opt.unbox disk_number)) vbdrs in - debug "VBD %s.%s matched device %s" (fst id) (snd id) vbd_r.API.vBD_userdevice; - Opt.iter - (fun (vb, state) -> - let currently_attached = state.plugged || state.active in - debug "xenopsd event: Updating VBD %s.%s device <- %s; currently_attached <- %b" (fst id) (snd id) linux_device currently_attached; - Db.VBD.set_device ~__context ~self:vbd ~value:linux_device; - Db.VBD.set_currently_attached ~__context ~self:vbd ~value:currently_attached; - if state.plugged then begin - match state.backend_present with - | Some (VDI x) -> - Opt.iter - (fun (vdi, _) -> - debug "VBD %s.%s backend_present = %s" (fst id) (snd id) x; - Db.VBD.set_VDI ~__context ~self:vbd ~value:vdi; - Db.VBD.set_empty ~__context ~self:vbd ~value:false; - Xapi_vdi.update_allowed_operations ~__context ~self:vdi; - ) (vdi_of_disk ~__context x) - | Some disk -> - error "VBD %s.%s backend_present has unknown disk = %s" (fst id) (snd id) (disk |> rpc_of_disk |> Jsonrpc.to_string) - | None -> - if vbd_r.API.vBD_type = `CD then begin - debug "VBD %s.%s backend_present = None (empty)" (fst id) (snd id); - Db.VBD.set_empty ~__context ~self:vbd ~value:true; - Db.VBD.set_VDI ~__context ~self:vbd ~value:Ref.null - end else error "VBD %s.%s is empty but is not a CD" (fst id) (snd id) - end; - if not(state.plugged || state.active) then begin - debug "VBD.remove %s.%s" (fst id) (snd id); - (try Client.VBD.remove dbg id with e -> debug "VBD.remove failed: %s" (Printexc.to_string e)) - end - ) info; - Xenops_cache.update_vbd id (Opt.map snd info); - Xapi_vbd_helpers.update_allowed_operations ~__context ~self:vbd; - if not (Db.VBD.get_empty ~__context ~self:vbd) then - let vdi = Db.VBD.get_VDI ~__context ~self:vbd in - Xapi_vdi.update_allowed_operations ~__context ~self:vdi - end - with e -> - error "xenopsd event: Caught %s while updating VBD" (string_of_exn e) + try + let open Vbd in + if Events_from_xenopsd.are_suppressed (fst id) + then debug "xenopsd event: ignoring event for VM (VM %s migrating away)" (fst id) + else + let vm = Db.VM.get_by_uuid ~__context ~uuid:(fst id) in + let localhost = Helpers.get_localhost ~__context in + if Db.VM.get_resident_on ~__context ~self:vm <> localhost + then debug "xenopsd event: ignoring event for VBD (VM %s not resident)" (fst id) + else + let previous = Xenops_cache.find_vbd id in + let dbg = Context.string_of_task __context in + let module Client = (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) in + let info = try Some(Client.VBD.stat dbg id) with _ -> None in + if Opt.map snd info = previous + then debug "xenopsd event: ignoring event for VBD %s.%s: metadata has not changed" (fst id) (snd id) + else begin + let vbds = Db.VM.get_VBDs ~__context ~self:vm in + let vbdrs = List.map (fun self -> self, Db.VBD.get_record ~__context ~self) vbds in + let linux_device = snd id in + let device_number = Device_number.of_linux_device linux_device in + (* only try matching against disk number if the device is not a floppy (as "0" shouldn't match "fda") *) + let disk_number = + match Device_number.spec device_number with + | (Device_number.Ide,_,_) + | (Device_number.Xen,_,_) -> Some (device_number |> Device_number.to_disk_number |> string_of_int) + | _ -> None in + debug "VM %s VBD userdevices = [ %s ]" (fst id) (String.concat "; " (List.map (fun (_,r) -> r.API.vBD_userdevice) vbdrs)); + let vbd, vbd_r = List.find (fun (_, vbdr) -> vbdr.API.vBD_userdevice = linux_device || + (Opt.is_some disk_number && vbdr.API.vBD_userdevice = Opt.unbox disk_number)) vbdrs in + debug "VBD %s.%s matched device %s" (fst id) (snd id) vbd_r.API.vBD_userdevice; + Opt.iter + (fun (vb, state) -> + let currently_attached = state.plugged || state.active in + debug "xenopsd event: Updating VBD %s.%s device <- %s; currently_attached <- %b" (fst id) (snd id) linux_device currently_attached; + Db.VBD.set_device ~__context ~self:vbd ~value:linux_device; + Db.VBD.set_currently_attached ~__context ~self:vbd ~value:currently_attached; + if state.plugged then begin + match state.backend_present with + | Some (VDI x) -> + Opt.iter + (fun (vdi, _) -> + debug "VBD %s.%s backend_present = %s" (fst id) (snd id) x; + Db.VBD.set_VDI ~__context ~self:vbd ~value:vdi; + Db.VBD.set_empty ~__context ~self:vbd ~value:false; + Xapi_vdi.update_allowed_operations ~__context ~self:vdi; + ) (vdi_of_disk ~__context x) + | Some disk -> + error "VBD %s.%s backend_present has unknown disk = %s" (fst id) (snd id) (disk |> rpc_of_disk |> Jsonrpc.to_string) + | None -> + if vbd_r.API.vBD_type = `CD then begin + debug "VBD %s.%s backend_present = None (empty)" (fst id) (snd id); + Db.VBD.set_empty ~__context ~self:vbd ~value:true; + Db.VBD.set_VDI ~__context ~self:vbd ~value:Ref.null + end else error "VBD %s.%s is empty but is not a CD" (fst id) (snd id) + end; + if not(state.plugged || state.active) then begin + debug "VBD.remove %s.%s" (fst id) (snd id); + (try Client.VBD.remove dbg id with e -> debug "VBD.remove failed: %s" (Printexc.to_string e)) + end + ) info; + Xenops_cache.update_vbd id (Opt.map snd info); + Xapi_vbd_helpers.update_allowed_operations ~__context ~self:vbd; + if not (Db.VBD.get_empty ~__context ~self:vbd) then + let vdi = Db.VBD.get_VDI ~__context ~self:vbd in + Xapi_vdi.update_allowed_operations ~__context ~self:vdi + end + with e -> + error "xenopsd event: Caught %s while updating VBD" (string_of_exn e) let update_vif ~__context id = - try - if Events_from_xenopsd.are_suppressed (fst id) - then debug "xenopsd event: ignoring event for VIF (VM %s migrating away)" (fst id) - else - let vm = Db.VM.get_by_uuid ~__context ~uuid:(fst id) in - let localhost = Helpers.get_localhost ~__context in - if Db.VM.get_resident_on ~__context ~self:vm <> localhost - then debug "xenopsd event: ignoring event for VIF (VM %s not resident)" (fst id) - else - let open Vif in - let previous = Xenops_cache.find_vif id in - let dbg = Context.string_of_task __context in - let module Client = (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) in - let info = try Some (Client.VIF.stat dbg id) with _ -> None in - if Opt.map snd info = previous - then debug "xenopsd event: ignoring event for VIF %s.%s: metadata has not changed" (fst id) (snd id) - else begin - let vifs = Db.VM.get_VIFs ~__context ~self:vm in - let vifrs = List.map (fun self -> self, Db.VIF.get_record ~__context ~self) vifs in - let vif, vifr = List.find (fun (_, vifr) -> vifr.API.vIF_device = (snd id)) vifrs in - Opt.iter - (fun (vf, state) -> - if not (state.plugged || state.active) then begin - (try - Xapi_network.deregister_vif ~__context vif - with e -> - error "Failed to deregister vif: %s" (Printexc.to_string e)); - debug "VIF.remove %s.%s" (fst id) (snd id); - (try Client.VIF.remove dbg id with e -> debug "VIF.remove failed: %s" (Printexc.to_string e)) - end; - - if state.plugged then begin - (* sync MTU *) - (try - let device = "vif" ^ (Int64.to_string (Db.VM.get_domid ~__context ~self:vm)) ^ "." ^ (snd id) in - let dbg = Context.string_of_task __context in - let mtu = Net.Interface.get_mtu dbg ~name:device in - Db.VIF.set_MTU ~__context ~self:vif ~value:(Int64.of_int mtu) - with _ -> - debug "could not update MTU field on VIF %s.%s" (fst id) (snd id)); - - (* Clear monitor cache for associated PIF if pass_through_pif_carrier is set *) - if !Xapi_globs.pass_through_pif_carrier then - let host = Helpers.get_localhost ~__context in - let pifs = Xapi_network_attach_helpers.get_local_pifs ~__context ~network:vifr.API.vIF_network ~host in - List.iter (fun pif -> - let pif_name = Db.PIF.get_device ~__context ~self:pif in - Monitor_dbcalls_cache.clear_cache_for_pif ~pif_name - ) pifs - end; - debug "xenopsd event: Updating VIF %s.%s currently_attached <- %b" (fst id) (snd id) (state.plugged || state.active); - Db.VIF.set_currently_attached ~__context ~self:vif ~value:(state.plugged || state.active) - ) info; - Xenops_cache.update_vif id (Opt.map snd info); - Xapi_vif_helpers.update_allowed_operations ~__context ~self:vif - end - with e -> - error "xenopsd event: Caught %s while updating VIF" (string_of_exn e) + try + if Events_from_xenopsd.are_suppressed (fst id) + then debug "xenopsd event: ignoring event for VIF (VM %s migrating away)" (fst id) + else + let vm = Db.VM.get_by_uuid ~__context ~uuid:(fst id) in + let localhost = Helpers.get_localhost ~__context in + if Db.VM.get_resident_on ~__context ~self:vm <> localhost + then debug "xenopsd event: ignoring event for VIF (VM %s not resident)" (fst id) + else + let open Vif in + let previous = Xenops_cache.find_vif id in + let dbg = Context.string_of_task __context in + let module Client = (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) in + let info = try Some (Client.VIF.stat dbg id) with _ -> None in + if Opt.map snd info = previous + then debug "xenopsd event: ignoring event for VIF %s.%s: metadata has not changed" (fst id) (snd id) + else begin + let vifs = Db.VM.get_VIFs ~__context ~self:vm in + let vifrs = List.map (fun self -> self, Db.VIF.get_record ~__context ~self) vifs in + let vif, vifr = List.find (fun (_, vifr) -> vifr.API.vIF_device = (snd id)) vifrs in + Opt.iter + (fun (vf, state) -> + if not (state.plugged || state.active) then begin + (try + Xapi_network.deregister_vif ~__context vif + with e -> + error "Failed to deregister vif: %s" (Printexc.to_string e)); + debug "VIF.remove %s.%s" (fst id) (snd id); + (try Client.VIF.remove dbg id with e -> debug "VIF.remove failed: %s" (Printexc.to_string e)) + end; + + if state.plugged then begin + (* sync MTU *) + (try + let device = "vif" ^ (Int64.to_string (Db.VM.get_domid ~__context ~self:vm)) ^ "." ^ (snd id) in + let dbg = Context.string_of_task __context in + let mtu = Net.Interface.get_mtu dbg ~name:device in + Db.VIF.set_MTU ~__context ~self:vif ~value:(Int64.of_int mtu) + with _ -> + debug "could not update MTU field on VIF %s.%s" (fst id) (snd id)); + + (* Clear monitor cache for associated PIF if pass_through_pif_carrier is set *) + if !Xapi_globs.pass_through_pif_carrier then + let host = Helpers.get_localhost ~__context in + let pifs = Xapi_network_attach_helpers.get_local_pifs ~__context ~network:vifr.API.vIF_network ~host in + List.iter (fun pif -> + let pif_name = Db.PIF.get_device ~__context ~self:pif in + Monitor_dbcalls_cache.clear_cache_for_pif ~pif_name + ) pifs + end; + debug "xenopsd event: Updating VIF %s.%s currently_attached <- %b" (fst id) (snd id) (state.plugged || state.active); + Db.VIF.set_currently_attached ~__context ~self:vif ~value:(state.plugged || state.active) + ) info; + Xenops_cache.update_vif id (Opt.map snd info); + Xapi_vif_helpers.update_allowed_operations ~__context ~self:vif + end + with e -> + error "xenopsd event: Caught %s while updating VIF" (string_of_exn e) let update_pci ~__context id = - try - if Events_from_xenopsd.are_suppressed (fst id) - then debug "xenopsd event: ignoring event for PCI (VM %s migrating away)" (fst id) - else - let vm = Db.VM.get_by_uuid ~__context ~uuid:(fst id) in - let localhost = Helpers.get_localhost ~__context in - if Db.VM.get_resident_on ~__context ~self:vm <> localhost - then debug "xenopsd event: ignoring event for PCI (VM %s not resident)" (fst id) - else - let open Pci in - let previous = Xenops_cache.find_pci id in - let dbg = Context.string_of_task __context in - let module Client = (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) in - let info = try Some (Client.PCI.stat dbg id) with _ -> None in - if Opt.map snd info = previous - then debug "xenopsd event: ignoring event for PCI %s.%s: metadata has not changed" (fst id) (snd id) - else begin - let pcis = Db.Host.get_PCIs ~__context ~self:localhost in - let pcirs = List.map (fun self -> self, Db.PCI.get_record ~__context ~self) pcis in - - let pci, _ = List.find (fun (_, pcir) -> pcir.API.pCI_pci_id = (snd id)) pcirs in - - (* Assumption: a VM can have only one vGPU *) - let vgpu_opt = - let pci_class = Db.PCI.get_class_id ~__context ~self:pci in - if Xapi_pci.(is_class_of_kind Display_controller @@ int_of_id pci_class) - then - match Db.VM.get_VGPUs ~__context ~self:vm with - | vgpu :: _ -> Some vgpu - | _ -> None - else None in - let attached_in_db = List.mem vm (Db.PCI.get_attached_VMs ~__context ~self:pci) in - Opt.iter - (fun (_, state) -> - debug "xenopsd event: Updating PCI %s.%s currently_attached <- %b" (fst id) (snd id) state.plugged; - if attached_in_db && (not state.plugged) - then Db.PCI.remove_attached_VMs ~__context ~self:pci ~value:vm - else if (not attached_in_db) && state.plugged - then Db.PCI.add_attached_VMs ~__context ~self:pci ~value:vm; - - match vgpu_opt with - | Some vgpu -> begin - let scheduled = - Db.VGPU.get_scheduled_to_be_resident_on ~__context ~self:vgpu - in - if Db.is_valid_ref __context scheduled && state.plugged - then - Helpers.call_api_functions ~__context - (fun rpc session_id -> - XenAPI.VGPU.atomic_set_resident_on ~rpc ~session_id - ~self:vgpu ~value:scheduled) - end - | None -> (); - - Opt.iter - (fun vgpu -> - debug "xenopsd event: Update VGPU %s.%s currently_attached <- %b" (fst id) (snd id) state.plugged; - Db.VGPU.set_currently_attached ~__context ~self:vgpu ~value:state.plugged - ) vgpu_opt - ) info; - Xenops_cache.update_pci id (Opt.map snd info); - end - with e -> - error "xenopsd event: Caught %s while updating PCI" (string_of_exn e) + try + if Events_from_xenopsd.are_suppressed (fst id) + then debug "xenopsd event: ignoring event for PCI (VM %s migrating away)" (fst id) + else + let vm = Db.VM.get_by_uuid ~__context ~uuid:(fst id) in + let localhost = Helpers.get_localhost ~__context in + if Db.VM.get_resident_on ~__context ~self:vm <> localhost + then debug "xenopsd event: ignoring event for PCI (VM %s not resident)" (fst id) + else + let open Pci in + let previous = Xenops_cache.find_pci id in + let dbg = Context.string_of_task __context in + let module Client = (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) in + let info = try Some (Client.PCI.stat dbg id) with _ -> None in + if Opt.map snd info = previous + then debug "xenopsd event: ignoring event for PCI %s.%s: metadata has not changed" (fst id) (snd id) + else begin + let pcis = Db.Host.get_PCIs ~__context ~self:localhost in + let pcirs = List.map (fun self -> self, Db.PCI.get_record ~__context ~self) pcis in + + let pci, _ = List.find (fun (_, pcir) -> pcir.API.pCI_pci_id = (snd id)) pcirs in + + (* Assumption: a VM can have only one vGPU *) + let vgpu_opt = + let pci_class = Db.PCI.get_class_id ~__context ~self:pci in + if Xapi_pci.(is_class_of_kind Display_controller @@ int_of_id pci_class) + then + match Db.VM.get_VGPUs ~__context ~self:vm with + | vgpu :: _ -> Some vgpu + | _ -> None + else None in + let attached_in_db = List.mem vm (Db.PCI.get_attached_VMs ~__context ~self:pci) in + Opt.iter + (fun (_, state) -> + debug "xenopsd event: Updating PCI %s.%s currently_attached <- %b" (fst id) (snd id) state.plugged; + if attached_in_db && (not state.plugged) + then Db.PCI.remove_attached_VMs ~__context ~self:pci ~value:vm + else if (not attached_in_db) && state.plugged + then Db.PCI.add_attached_VMs ~__context ~self:pci ~value:vm; + + match vgpu_opt with + | Some vgpu -> begin + let scheduled = + Db.VGPU.get_scheduled_to_be_resident_on ~__context ~self:vgpu + in + if Db.is_valid_ref __context scheduled && state.plugged + then + Helpers.call_api_functions ~__context + (fun rpc session_id -> + XenAPI.VGPU.atomic_set_resident_on ~rpc ~session_id + ~self:vgpu ~value:scheduled) + end + | None -> (); + + Opt.iter + (fun vgpu -> + debug "xenopsd event: Update VGPU %s.%s currently_attached <- %b" (fst id) (snd id) state.plugged; + Db.VGPU.set_currently_attached ~__context ~self:vgpu ~value:state.plugged + ) vgpu_opt + ) info; + Xenops_cache.update_pci id (Opt.map snd info); + end + with e -> + error "xenopsd event: Caught %s while updating PCI" (string_of_exn e) let update_vgpu ~__context id = - try - if Events_from_xenopsd.are_suppressed (fst id) - then debug "xenopsd event: ignoring event for VGPU (VM %s migrating away)" (fst id) - else - let vm = Db.VM.get_by_uuid ~__context ~uuid:(fst id) in - let localhost = Helpers.get_localhost ~__context in - if Db.VM.get_resident_on ~__context ~self:vm <> localhost - then debug "xenopsd event: ignoring event for VGPU (VM %s not resident)" (fst id) - else - let open Vgpu in - let previous = Xenops_cache.find_vgpu id in - let dbg = Context.string_of_task __context in - let module Client = - (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) - in - let info = try Some (Client.VGPU.stat dbg id) with _ -> None in - if Opt.map snd info = previous - then debug "xenopsd event: ignoring event for VGPU %s.%s: metadata has not changed" (fst id) (snd id) - else begin - let vgpus = Db.VM.get_VGPUs ~__context ~self:vm in - let vgpu_records = - List.map - (fun self -> self, Db.VGPU.get_record ~__context ~self) - vgpus - in - let vgpu, vgpu_record = - List.find - (fun (_, vgpu_record) -> vgpu_record.API.vGPU_device = (snd id)) - vgpu_records - in - Opt.iter - (fun (xenopsd_vgpu, state) -> - if state.plugged then begin - let scheduled = - Db.VGPU.get_scheduled_to_be_resident_on ~__context ~self:vgpu - in - if Db.is_valid_ref __context scheduled - then begin - Helpers.call_api_functions ~__context - (fun rpc session_id -> - XenAPI.VGPU.atomic_set_resident_on ~rpc ~session_id - ~self:vgpu ~value:scheduled) - end; - if not vgpu_record.API.vGPU_currently_attached - then Db.VGPU.set_currently_attached ~__context - ~self:vgpu ~value:true - end else begin - if vgpu_record.API.vGPU_currently_attached - then Db.VGPU.set_currently_attached ~__context - ~self:vgpu ~value:false; - try Client.VGPU.remove dbg id - with e -> debug "VGPU.remove failed: %s" (Printexc.to_string e) - end) info; - Xenops_cache.update_vgpu id (Opt.map snd info) - end - with e -> - error "xenopsd event: Caught %s while updating VGPU" (string_of_exn e) + try + if Events_from_xenopsd.are_suppressed (fst id) + then debug "xenopsd event: ignoring event for VGPU (VM %s migrating away)" (fst id) + else + let vm = Db.VM.get_by_uuid ~__context ~uuid:(fst id) in + let localhost = Helpers.get_localhost ~__context in + if Db.VM.get_resident_on ~__context ~self:vm <> localhost + then debug "xenopsd event: ignoring event for VGPU (VM %s not resident)" (fst id) + else + let open Vgpu in + let previous = Xenops_cache.find_vgpu id in + let dbg = Context.string_of_task __context in + let module Client = + (val make_client (queue_of_vm ~__context ~self:vm) : XENOPS) + in + let info = try Some (Client.VGPU.stat dbg id) with _ -> None in + if Opt.map snd info = previous + then debug "xenopsd event: ignoring event for VGPU %s.%s: metadata has not changed" (fst id) (snd id) + else begin + let vgpus = Db.VM.get_VGPUs ~__context ~self:vm in + let vgpu_records = + List.map + (fun self -> self, Db.VGPU.get_record ~__context ~self) + vgpus + in + let vgpu, vgpu_record = + List.find + (fun (_, vgpu_record) -> vgpu_record.API.vGPU_device = (snd id)) + vgpu_records + in + Opt.iter + (fun (xenopsd_vgpu, state) -> + if state.plugged then begin + let scheduled = + Db.VGPU.get_scheduled_to_be_resident_on ~__context ~self:vgpu + in + if Db.is_valid_ref __context scheduled + then begin + Helpers.call_api_functions ~__context + (fun rpc session_id -> + XenAPI.VGPU.atomic_set_resident_on ~rpc ~session_id + ~self:vgpu ~value:scheduled) + end; + if not vgpu_record.API.vGPU_currently_attached + then Db.VGPU.set_currently_attached ~__context + ~self:vgpu ~value:true + end else begin + if vgpu_record.API.vGPU_currently_attached + then Db.VGPU.set_currently_attached ~__context + ~self:vgpu ~value:false; + try Client.VGPU.remove dbg id + with e -> debug "VGPU.remove failed: %s" (Printexc.to_string e) + end) info; + Xenops_cache.update_vgpu id (Opt.map snd info) + end + with e -> + error "xenopsd event: Caught %s while updating VGPU" (string_of_exn e) exception Not_a_xenops_task let wrap queue_name id = TaskHelper.Xenops (queue_name, id) @@ -1796,372 +1796,372 @@ let register_task __context queue_name id = TaskHelper.register_task __context ( let unregister_task __context queue_name id = TaskHelper.unregister_task __context (wrap queue_name id); id let update_task ~__context queue_name id = - try - let self = TaskHelper.id_to_task_exn (TaskHelper.Xenops (queue_name, id)) in (* throws Not_found *) - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - let task_t = Client.TASK.stat dbg id in - match task_t.Task.state with - | Task.Pending x -> - Db.Task.set_progress ~__context ~self ~value:x - | _ -> () - with Not_found -> - (* Since this is called on all tasks, possibly after the task has been - destroyed, it's safe to ignore a Not_found exception here. *) - () - | e -> - error "xenopsd event: Caught %s while updating task" (string_of_exn e) + try + let self = TaskHelper.id_to_task_exn (TaskHelper.Xenops (queue_name, id)) in (* throws Not_found *) + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + let task_t = Client.TASK.stat dbg id in + match task_t.Task.state with + | Task.Pending x -> + Db.Task.set_progress ~__context ~self ~value:x + | _ -> () + with Not_found -> + (* Since this is called on all tasks, possibly after the task has been + destroyed, it's safe to ignore a Not_found exception here. *) + () + | e -> + error "xenopsd event: Caught %s while updating task" (string_of_exn e) let rec events_watch ~__context queue_name from = - let dbg = Context.string_of_task __context in - if Xapi_fist.delay_xenopsd_event_threads () then Thread.delay 30.0; - let module Client = (val make_client queue_name : XENOPS) in - let barriers, events, next = Client.UPDATES.get dbg from None in - let done_events = ref [] in - let already_done x = List.mem x !done_events in - let add_event x = done_events := (x :: !done_events) in - let do_updates l = - let open Dynamic in - List.iter - (fun ev -> - debug "Processing event: %s" (ev |> Dynamic.rpc_of_id |> Jsonrpc.to_string); - if (already_done ev) then - debug "Skipping (already processed this round)" - else begin - add_event ev; - match ev with - | Vm id -> - if Events_from_xenopsd.are_suppressed id - then debug "ignoring xenops event on VM %s" id - else begin - debug "xenops event on VM %s" id; - update_vm ~__context id; - end - | Vbd id -> - if Events_from_xenopsd.are_suppressed (fst id) - then debug "ignoring xenops event on VBD %s.%s" (fst id) (snd id) - else begin - debug "xenops event on VBD %s.%s" (fst id) (snd id); - update_vbd ~__context id - end - | Vif id -> - if Events_from_xenopsd.are_suppressed (fst id) - then debug "ignoring xenops event on VIF %s.%s" (fst id) (snd id) - else begin - debug "xenops event on VIF %s.%s" (fst id) (snd id); - update_vif ~__context id - end - | Pci id -> - if Events_from_xenopsd.are_suppressed (fst id) - then debug "ignoring xenops event on PCI %s.%s" (fst id) (snd id) - else begin - debug "xenops event on PCI %s.%s" (fst id) (snd id); - update_pci ~__context id - end - | Vgpu id -> - if Events_from_xenopsd.are_suppressed (fst id) - then debug "ignoring xenops event on VGPU %s.%s" (fst id) (snd id) - else begin - debug "xenops event on VGPU %s.%s" (fst id) (snd id); - update_vgpu ~__context id - end - | Task id -> - debug "xenops event on Task %s" id; - update_task ~__context queue_name id - end) l - in - List.iter (fun (id,b_events) -> - debug "Processing barrier %d" id; - do_updates b_events; - Events_from_xenopsd.wakeup queue_name dbg id) barriers; - do_updates events; - events_watch ~__context queue_name (Some next) + let dbg = Context.string_of_task __context in + if Xapi_fist.delay_xenopsd_event_threads () then Thread.delay 30.0; + let module Client = (val make_client queue_name : XENOPS) in + let barriers, events, next = Client.UPDATES.get dbg from None in + let done_events = ref [] in + let already_done x = List.mem x !done_events in + let add_event x = done_events := (x :: !done_events) in + let do_updates l = + let open Dynamic in + List.iter + (fun ev -> + debug "Processing event: %s" (ev |> Dynamic.rpc_of_id |> Jsonrpc.to_string); + if (already_done ev) then + debug "Skipping (already processed this round)" + else begin + add_event ev; + match ev with + | Vm id -> + if Events_from_xenopsd.are_suppressed id + then debug "ignoring xenops event on VM %s" id + else begin + debug "xenops event on VM %s" id; + update_vm ~__context id; + end + | Vbd id -> + if Events_from_xenopsd.are_suppressed (fst id) + then debug "ignoring xenops event on VBD %s.%s" (fst id) (snd id) + else begin + debug "xenops event on VBD %s.%s" (fst id) (snd id); + update_vbd ~__context id + end + | Vif id -> + if Events_from_xenopsd.are_suppressed (fst id) + then debug "ignoring xenops event on VIF %s.%s" (fst id) (snd id) + else begin + debug "xenops event on VIF %s.%s" (fst id) (snd id); + update_vif ~__context id + end + | Pci id -> + if Events_from_xenopsd.are_suppressed (fst id) + then debug "ignoring xenops event on PCI %s.%s" (fst id) (snd id) + else begin + debug "xenops event on PCI %s.%s" (fst id) (snd id); + update_pci ~__context id + end + | Vgpu id -> + if Events_from_xenopsd.are_suppressed (fst id) + then debug "ignoring xenops event on VGPU %s.%s" (fst id) (snd id) + else begin + debug "xenops event on VGPU %s.%s" (fst id) (snd id); + update_vgpu ~__context id + end + | Task id -> + debug "xenops event on Task %s" id; + update_task ~__context queue_name id + end) l + in + List.iter (fun (id,b_events) -> + debug "Processing barrier %d" id; + do_updates b_events; + Events_from_xenopsd.wakeup queue_name dbg id) barriers; + do_updates events; + events_watch ~__context queue_name (Some next) let events_from_xenopsd queue_name = - Server_helpers.exec_with_new_task (Printf.sprintf "%s events" queue_name) - (fun __context -> - while true do - try - events_watch ~__context queue_name None; - with e -> - error "%s event thread caught: %s" queue_name (string_of_exn e); - Thread.delay 10. - done - ) + Server_helpers.exec_with_new_task (Printf.sprintf "%s events" queue_name) + (fun __context -> + while true do + try + events_watch ~__context queue_name None; + with e -> + error "%s event thread caught: %s" queue_name (string_of_exn e); + Thread.delay 10. + done + ) let refresh_vm ~__context ~self = - let id = id_of_vm ~__context ~self in - info "xenops: UPDATES.refresh_vm %s" id; - let dbg = Context.string_of_task __context in - let queue_name = queue_of_vm ~__context ~self in - let module Client = (val make_client queue_name : XENOPS) in - Client.UPDATES.refresh_vm dbg id; - Events_from_xenopsd.wait queue_name dbg id () + let id = id_of_vm ~__context ~self in + info "xenops: UPDATES.refresh_vm %s" id; + let dbg = Context.string_of_task __context in + let queue_name = queue_of_vm ~__context ~self in + let module Client = (val make_client queue_name : XENOPS) in + Client.UPDATES.refresh_vm dbg id; + Events_from_xenopsd.wait queue_name dbg id () let on_xapi_restart ~__context = - let dbg = Context.string_of_task __context in - let localhost = Helpers.get_localhost ~__context in - - (* For all available xenopsds, start the event thread *) - List.iter (fun queue_name -> - let (_: Thread.t) = Thread.create events_from_xenopsd queue_name in - () - ) (all_known_xenopsds ()); - - (* Get a list of all the ids of VMs that Xapi thinks are resident here *) - let resident_vms_in_db = - List.filter (fun self -> - not (Db.VM.get_is_control_domain ~__context ~self) - ) (Db.Host.get_resident_VMs ~__context ~self:localhost) - |> List.map (fun self -> (id_of_vm ~__context ~self, self)) in - - (* Get a list of VMs that the xenopsds know about with their xenopsd client *) - let vms_in_xenopsds = - List.map (fun queue_name -> - let module Client = (val make_client queue_name : XENOPS) in - let vms = Client.VM.list dbg () in - List.map (fun (vm, state) -> ((vm.Vm.id, state), queue_name)) vms - ) (all_known_xenopsds ()) - |> List.flatten in - - let xenopsd_vms_in_xapi, xenopsd_vms_not_in_xapi = - List.partition (fun ((id, _), _) -> - try vm_of_id ~__context id |> ignore; true with _ -> false - ) vms_in_xenopsds in - - let xapi_vms_not_in_xenopsd = - List.filter (fun (id, _) -> - not (List.exists (fun ((id', _), _) -> id' = id) vms_in_xenopsds) - ) resident_vms_in_db in - - (* Destroy any VMs running that aren't in Xapi's database *) - List.iter (fun ((id, state), queue_name) -> - let module Client = (val make_client queue_name : XENOPS) in - info "VM %s is running here but isn't in the database: terminating" id; - if state.Vm.power_state <> Halted then - Client.VM.shutdown dbg id None |> wait_for_task queue_name dbg |> ignore; - Client.VM.remove dbg id - ) xenopsd_vms_not_in_xapi; - - (* Sync VM state in Xapi for VMs running by local Xenopsds *) - List.iter (fun ((id, state), queue_name) -> - let vm = vm_of_id ~__context id in - let xapi_power_state = - xenapi_of_xenops_power_state (Some state.Vm.power_state) in - Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:xapi_power_state; - match xapi_power_state with - | `Running | `Paused -> - Db.VM.set_resident_on ~__context ~self:vm ~value:localhost; - add_caches id; - refresh_vm ~__context ~self:vm; - | `Suspended | `Halted -> - let module Client = (val make_client queue_name : XENOPS) in - Client.VM.remove dbg id; - if List.exists (fun (id', _) -> id' = id) resident_vms_in_db - then Db.VM.set_resident_on ~__context ~self:vm ~value:Ref.null; - ) xenopsd_vms_in_xapi; - - (* Sync VM state in Xapi for VMs not running on this host *) - List.iter (fun (id, vm) -> - Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:`Halted; - Db.VM.set_resident_on ~__context ~self:vm ~value:Ref.null; - ) xapi_vms_not_in_xenopsd + let dbg = Context.string_of_task __context in + let localhost = Helpers.get_localhost ~__context in + + (* For all available xenopsds, start the event thread *) + List.iter (fun queue_name -> + let (_: Thread.t) = Thread.create events_from_xenopsd queue_name in + () + ) (all_known_xenopsds ()); + + (* Get a list of all the ids of VMs that Xapi thinks are resident here *) + let resident_vms_in_db = + List.filter (fun self -> + not (Db.VM.get_is_control_domain ~__context ~self) + ) (Db.Host.get_resident_VMs ~__context ~self:localhost) + |> List.map (fun self -> (id_of_vm ~__context ~self, self)) in + + (* Get a list of VMs that the xenopsds know about with their xenopsd client *) + let vms_in_xenopsds = + List.map (fun queue_name -> + let module Client = (val make_client queue_name : XENOPS) in + let vms = Client.VM.list dbg () in + List.map (fun (vm, state) -> ((vm.Vm.id, state), queue_name)) vms + ) (all_known_xenopsds ()) + |> List.flatten in + + let xenopsd_vms_in_xapi, xenopsd_vms_not_in_xapi = + List.partition (fun ((id, _), _) -> + try vm_of_id ~__context id |> ignore; true with _ -> false + ) vms_in_xenopsds in + + let xapi_vms_not_in_xenopsd = + List.filter (fun (id, _) -> + not (List.exists (fun ((id', _), _) -> id' = id) vms_in_xenopsds) + ) resident_vms_in_db in + + (* Destroy any VMs running that aren't in Xapi's database *) + List.iter (fun ((id, state), queue_name) -> + let module Client = (val make_client queue_name : XENOPS) in + info "VM %s is running here but isn't in the database: terminating" id; + if state.Vm.power_state <> Halted then + Client.VM.shutdown dbg id None |> wait_for_task queue_name dbg |> ignore; + Client.VM.remove dbg id + ) xenopsd_vms_not_in_xapi; + + (* Sync VM state in Xapi for VMs running by local Xenopsds *) + List.iter (fun ((id, state), queue_name) -> + let vm = vm_of_id ~__context id in + let xapi_power_state = + xenapi_of_xenops_power_state (Some state.Vm.power_state) in + Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:xapi_power_state; + match xapi_power_state with + | `Running | `Paused -> + Db.VM.set_resident_on ~__context ~self:vm ~value:localhost; + add_caches id; + refresh_vm ~__context ~self:vm; + | `Suspended | `Halted -> + let module Client = (val make_client queue_name : XENOPS) in + Client.VM.remove dbg id; + if List.exists (fun (id', _) -> id' = id) resident_vms_in_db + then Db.VM.set_resident_on ~__context ~self:vm ~value:Ref.null; + ) xenopsd_vms_in_xapi; + + (* Sync VM state in Xapi for VMs not running on this host *) + List.iter (fun (id, vm) -> + Xapi_vm_lifecycle.force_state_reset ~__context ~self:vm ~value:`Halted; + Db.VM.set_resident_on ~__context ~self:vm ~value:Ref.null; + ) xapi_vms_not_in_xenopsd let assert_resident_on ~__context ~self = - let localhost = Helpers.get_localhost ~__context in - assert (Db.VM.get_resident_on ~__context ~self = localhost) + let localhost = Helpers.get_localhost ~__context in + assert (Db.VM.get_resident_on ~__context ~self = localhost) module Events_from_xapi = struct - let greatest_token = ref "" - let c = Condition.create () - let m = Mutex.create () - - let wait ~__context ~self = - assert_resident_on ~__context ~self; - let t = Helpers.call_api_functions ~__context - (fun rpc session_id -> - XenAPI.Event.inject ~rpc ~session_id ~_class:"VM" ~_ref:(Ref.string_of self) - ) in - debug "Waiting for token greater than: %s" t; - Mutex.execute m - (fun () -> - while !greatest_token < t do Condition.wait c m done - ) - - let broadcast new_token = - Mutex.execute m - (fun () -> - greatest_token := new_token; - Condition.broadcast c - ) + let greatest_token = ref "" + let c = Condition.create () + let m = Mutex.create () + + let wait ~__context ~self = + assert_resident_on ~__context ~self; + let t = Helpers.call_api_functions ~__context + (fun rpc session_id -> + XenAPI.Event.inject ~rpc ~session_id ~_class:"VM" ~_ref:(Ref.string_of self) + ) in + debug "Waiting for token greater than: %s" t; + Mutex.execute m + (fun () -> + while !greatest_token < t do Condition.wait c m done + ) + + let broadcast new_token = + Mutex.execute m + (fun () -> + greatest_token := new_token; + Condition.broadcast c + ) end (* XXX: PR-1255: this will be receiving too many events and we may wish to synchronise updates to the VM metadata and resident_on fields *) (* XXX: PR-1255: we also want to only listen for events on VMs and fields we care about *) let events_from_xapi () = - let open Event_types in - Server_helpers.exec_with_new_task "xapi events" - (fun __context -> - let localhost = Helpers.get_localhost ~__context in - let token = ref "" in - while true do - try - Helpers.call_api_functions ~__context - (fun rpc session_id -> - trigger_xenapi_reregister := - (fun () -> - try - (* This causes Event.next () and Event.from () to return SESSION_INVALID *) - debug "triggering xapi event thread to re-register via session.logout"; - XenAPI.Session.logout ~rpc ~session_id - with - | Api_errors.Server_error(code, _) when code = Api_errors.session_invalid -> - debug "Event thread has already woken up" - | e -> - error "Waking up the xapi event thread: %s" (string_of_exn e) - ); - (* We register for events on resident_VMs only *) - let resident_VMs = Db.Host.get_resident_VMs ~__context ~self:localhost in - - let uuids = List.map (fun self -> Db.VM.get_uuid ~__context ~self) resident_VMs in - let cached = Mutex.execute metadata_m (fun () -> Xenops_cache.list_nolock ()) in - let missing_in_cache = Listext.List.set_difference uuids cached in - let extra_in_cache = Listext.List.set_difference cached uuids in - if missing_in_cache <> [] - then error "events_from_xapi: missing from the cache: [ %s ]" (String.concat "; " missing_in_cache); - if extra_in_cache <> [] - then error "events_from_xapi: extra items in the cache: [ %s ]" (String.concat "; " extra_in_cache); - - let classes = List.map (fun x -> Printf.sprintf "VM/%s" (Ref.string_of x)) resident_VMs in - (* NB we re-use the old token so we don't get events we've already - received BUT we will not necessarily receive events for the new VMs *) - - while true do - let api_timeout = 60. in - let timeout = 30. +. api_timeout +. !Xapi_globs.master_connection_reset_timeout in - let otherwise () = info "Event.from timed out in %f seconds, abort the events listening loop and retry" timeout; raise Exit in - let from = Helpers.timebox ~timeout ~otherwise (fun () -> XenAPI.Event.from ~rpc ~session_id ~classes ~token:!token ~timeout:api_timeout |> event_from_of_rpc) in - if List.length from.events > 200 then warn "Warning: received more than 200 events!"; - List.iter - (function - | { ty = "vm"; reference = vm' } -> - let vm = Ref.of_string vm' in - begin - try - let id = id_of_vm ~__context ~self:vm in - let resident_here = Db.VM.get_resident_on ~__context ~self:vm = localhost in - debug "Event on VM %s; resident_here = %b" id resident_here; - if resident_here - then Xenopsd_metadata.update ~__context ~self:vm |> ignore - with e -> - if not(Db.is_valid_ref __context vm) - then debug "VM %s has been removed: event on it will be ignored" (Ref.string_of vm) - else begin - error "Caught %s while processing XenAPI event for VM %s" (Printexc.to_string e) (Ref.string_of vm); - raise e - end - end - | _ -> warn "Received event for something we didn't register for!" - ) from.events; - token := from.token; - Events_from_xapi.broadcast !token; - done - ) - with - | Api_errors.Server_error(code, _) when code = Api_errors.session_invalid -> - debug "Woken event thread: updating list of event subscriptions" - | e -> - debug "Caught %s listening to events from xapi" (string_of_exn e); - (* Start from scratch *) - token := ""; - Thread.delay 15. - done - ) + let open Event_types in + Server_helpers.exec_with_new_task "xapi events" + (fun __context -> + let localhost = Helpers.get_localhost ~__context in + let token = ref "" in + while true do + try + Helpers.call_api_functions ~__context + (fun rpc session_id -> + trigger_xenapi_reregister := + (fun () -> + try + (* This causes Event.next () and Event.from () to return SESSION_INVALID *) + debug "triggering xapi event thread to re-register via session.logout"; + XenAPI.Session.logout ~rpc ~session_id + with + | Api_errors.Server_error(code, _) when code = Api_errors.session_invalid -> + debug "Event thread has already woken up" + | e -> + error "Waking up the xapi event thread: %s" (string_of_exn e) + ); + (* We register for events on resident_VMs only *) + let resident_VMs = Db.Host.get_resident_VMs ~__context ~self:localhost in + + let uuids = List.map (fun self -> Db.VM.get_uuid ~__context ~self) resident_VMs in + let cached = Mutex.execute metadata_m (fun () -> Xenops_cache.list_nolock ()) in + let missing_in_cache = Listext.List.set_difference uuids cached in + let extra_in_cache = Listext.List.set_difference cached uuids in + if missing_in_cache <> [] + then error "events_from_xapi: missing from the cache: [ %s ]" (String.concat "; " missing_in_cache); + if extra_in_cache <> [] + then error "events_from_xapi: extra items in the cache: [ %s ]" (String.concat "; " extra_in_cache); + + let classes = List.map (fun x -> Printf.sprintf "VM/%s" (Ref.string_of x)) resident_VMs in + (* NB we re-use the old token so we don't get events we've already + received BUT we will not necessarily receive events for the new VMs *) + + while true do + let api_timeout = 60. in + let timeout = 30. +. api_timeout +. !Xapi_globs.master_connection_reset_timeout in + let otherwise () = info "Event.from timed out in %f seconds, abort the events listening loop and retry" timeout; raise Exit in + let from = Helpers.timebox ~timeout ~otherwise (fun () -> XenAPI.Event.from ~rpc ~session_id ~classes ~token:!token ~timeout:api_timeout |> event_from_of_rpc) in + if List.length from.events > 200 then warn "Warning: received more than 200 events!"; + List.iter + (function + | { ty = "vm"; reference = vm' } -> + let vm = Ref.of_string vm' in + begin + try + let id = id_of_vm ~__context ~self:vm in + let resident_here = Db.VM.get_resident_on ~__context ~self:vm = localhost in + debug "Event on VM %s; resident_here = %b" id resident_here; + if resident_here + then Xenopsd_metadata.update ~__context ~self:vm |> ignore + with e -> + if not(Db.is_valid_ref __context vm) + then debug "VM %s has been removed: event on it will be ignored" (Ref.string_of vm) + else begin + error "Caught %s while processing XenAPI event for VM %s" (Printexc.to_string e) (Ref.string_of vm); + raise e + end + end + | _ -> warn "Received event for something we didn't register for!" + ) from.events; + token := from.token; + Events_from_xapi.broadcast !token; + done + ) + with + | Api_errors.Server_error(code, _) when code = Api_errors.session_invalid -> + debug "Woken event thread: updating list of event subscriptions" + | e -> + debug "Caught %s listening to events from xapi" (string_of_exn e); + (* Start from scratch *) + token := ""; + Thread.delay 15. + done + ) let success_task queue_name f dbg id = - let module Client = (val make_client queue_name : XENOPS) in - finally - (fun () -> - let t = Client.TASK.stat dbg id in - match t.Task.state with - | Task.Completed r -> f t;r.Task.result - | Task.Failed x -> - let exn = exn_of_exnty (Exception.exnty_of_rpc x) in - let bt = Backtrace.t_of_sexp (Sexplib.Sexp.of_string t.Task.backtrace) in - Backtrace.add exn bt; - raise exn - | Task.Pending _ -> failwith "task pending" - ) (fun () -> Client.TASK.destroy dbg id) + let module Client = (val make_client queue_name : XENOPS) in + finally + (fun () -> + let t = Client.TASK.stat dbg id in + match t.Task.state with + | Task.Completed r -> f t;r.Task.result + | Task.Failed x -> + let exn = exn_of_exnty (Exception.exnty_of_rpc x) in + let bt = Backtrace.t_of_sexp (Sexplib.Sexp.of_string t.Task.backtrace) in + Backtrace.add exn bt; + raise exn + | Task.Pending _ -> failwith "task pending" + ) (fun () -> Client.TASK.destroy dbg id) (* Catch any uncaught xenops exceptions and transform into the most relevant XenAPI error. We do not want a XenAPI client to see a raw xenopsd error. *) let transform_xenops_exn ~__context ~vm queue_name f = - try - f () - with e -> - Backtrace.is_important e; - let reraise code params = - error "Re-raising as %s [ %s ]" code (String.concat "; " params); - let e' = Api_errors.Server_error(code, params) in - Backtrace.reraise e e' in - let internal fmt = Printf.kprintf - (fun x -> - reraise Api_errors.internal_error [ x ] - ) fmt in - begin match e with - | Internal_error msg -> internal "xenopsd internal error: %s" msg - | Already_exists(thing, id) -> internal "Object with type %s and id %s already exists in xenopsd" thing id - | Does_not_exist(thing, id) -> internal "Object with type %s and id %s does not exist in xenopsd" thing id - | Unimplemented(fn) -> reraise Api_errors.not_implemented [ fn ] - | Domain_not_built -> internal "domain has not been built" - | Invalid_vcpus n -> internal "the maximum number of vcpus configured for this VM is currently: %d" n - | Bad_power_state(found, expected) -> - let f x = xenapi_of_xenops_power_state (Some x) |> Record_util.power_state_to_string in - let found = f found and expected = f expected in - reraise Api_errors.vm_bad_power_state [ Ref.string_of vm; expected; found ] - | Failed_to_acknowledge_shutdown_request -> - reraise Api_errors.vm_failed_shutdown_ack [] - | Failed_to_shutdown(id, timeout) -> - reraise Api_errors.vm_shutdown_timeout [ vm_of_id ~__context id |> Ref.string_of; string_of_float timeout ] - | Device_is_connected -> - internal "Cannot remove device because it is connected to a VM" - | Device_not_connected -> - internal "Device is not connected" - | Device_detach_rejected(cls, id, msg) -> - reraise Api_errors.device_detach_rejected [ cls; id; msg ] - | Media_not_ejectable -> internal "the media in this drive cannot be ejected" - | Media_present -> internal "there is already media in this drive" - | Media_not_present -> internal "there is no media in this drive" - | No_bootable_device -> internal "there is no bootable device" - | Bootloader_error (uuid, msg) -> - let vm = Db.VM.get_by_uuid ~__context ~uuid in - reraise Api_errors.bootloader_failed [Ref.string_of vm; msg] - | Cannot_free_this_much_memory(needed, free) -> - reraise Api_errors.host_not_enough_free_memory [ Int64.to_string needed; Int64.to_string free ] - | Vms_failed_to_cooperate vms -> - let vms' = List.map (fun uuid -> Db.VM.get_by_uuid ~__context ~uuid |> Ref.string_of) vms in - reraise Api_errors.vms_failed_to_cooperate vms' - | Ballooning_error(code, descr) -> internal "ballooning error: %s %s" code descr - | IO_error -> reraise Api_errors.vdi_io_error ["I/O error saving VM suspend image"] - | Failed_to_contact_remote_service x -> internal "failed to contact: %s" x - | Hook_failed(script, reason, stdout, i) -> reraise Api_errors.xapi_hook_failed [ script; reason; stdout; i ] - | Not_enough_memory needed -> internal "there was not enough memory (needed %Ld bytes)" needed - | Cancelled id -> - let task = - try - TaskHelper.id_to_task_exn (TaskHelper.Xenops (queue_name, id)) - with _ -> - debug "xenopsd task id %s is not associated with a XenAPI task" id; - Ref.null in - reraise Api_errors.task_cancelled [ Ref.string_of task ] - | Storage_backend_error(code, params) -> reraise code params - | PCIBack_not_loaded -> internal "pciback has not loaded" - | Failed_to_start_emulator (uuid, name, msg) -> - let vm = Db.VM.get_by_uuid ~__context ~uuid in - reraise Api_errors.failed_to_start_emulator [Ref.string_of vm; name; msg] - | e -> raise e - end + try + f () + with e -> + Backtrace.is_important e; + let reraise code params = + error "Re-raising as %s [ %s ]" code (String.concat "; " params); + let e' = Api_errors.Server_error(code, params) in + Backtrace.reraise e e' in + let internal fmt = Printf.kprintf + (fun x -> + reraise Api_errors.internal_error [ x ] + ) fmt in + begin match e with + | Internal_error msg -> internal "xenopsd internal error: %s" msg + | Already_exists(thing, id) -> internal "Object with type %s and id %s already exists in xenopsd" thing id + | Does_not_exist(thing, id) -> internal "Object with type %s and id %s does not exist in xenopsd" thing id + | Unimplemented(fn) -> reraise Api_errors.not_implemented [ fn ] + | Domain_not_built -> internal "domain has not been built" + | Invalid_vcpus n -> internal "the maximum number of vcpus configured for this VM is currently: %d" n + | Bad_power_state(found, expected) -> + let f x = xenapi_of_xenops_power_state (Some x) |> Record_util.power_state_to_string in + let found = f found and expected = f expected in + reraise Api_errors.vm_bad_power_state [ Ref.string_of vm; expected; found ] + | Failed_to_acknowledge_shutdown_request -> + reraise Api_errors.vm_failed_shutdown_ack [] + | Failed_to_shutdown(id, timeout) -> + reraise Api_errors.vm_shutdown_timeout [ vm_of_id ~__context id |> Ref.string_of; string_of_float timeout ] + | Device_is_connected -> + internal "Cannot remove device because it is connected to a VM" + | Device_not_connected -> + internal "Device is not connected" + | Device_detach_rejected(cls, id, msg) -> + reraise Api_errors.device_detach_rejected [ cls; id; msg ] + | Media_not_ejectable -> internal "the media in this drive cannot be ejected" + | Media_present -> internal "there is already media in this drive" + | Media_not_present -> internal "there is no media in this drive" + | No_bootable_device -> internal "there is no bootable device" + | Bootloader_error (uuid, msg) -> + let vm = Db.VM.get_by_uuid ~__context ~uuid in + reraise Api_errors.bootloader_failed [Ref.string_of vm; msg] + | Cannot_free_this_much_memory(needed, free) -> + reraise Api_errors.host_not_enough_free_memory [ Int64.to_string needed; Int64.to_string free ] + | Vms_failed_to_cooperate vms -> + let vms' = List.map (fun uuid -> Db.VM.get_by_uuid ~__context ~uuid |> Ref.string_of) vms in + reraise Api_errors.vms_failed_to_cooperate vms' + | Ballooning_error(code, descr) -> internal "ballooning error: %s %s" code descr + | IO_error -> reraise Api_errors.vdi_io_error ["I/O error saving VM suspend image"] + | Failed_to_contact_remote_service x -> internal "failed to contact: %s" x + | Hook_failed(script, reason, stdout, i) -> reraise Api_errors.xapi_hook_failed [ script; reason; stdout; i ] + | Not_enough_memory needed -> internal "there was not enough memory (needed %Ld bytes)" needed + | Cancelled id -> + let task = + try + TaskHelper.id_to_task_exn (TaskHelper.Xenops (queue_name, id)) + with _ -> + debug "xenopsd task id %s is not associated with a XenAPI task" id; + Ref.null in + reraise Api_errors.task_cancelled [ Ref.string_of task ] + | Storage_backend_error(code, params) -> reraise code params + | PCIBack_not_loaded -> internal "pciback has not loaded" + | Failed_to_start_emulator (uuid, name, msg) -> + let vm = Db.VM.get_by_uuid ~__context ~uuid in + reraise Api_errors.failed_to_start_emulator [Ref.string_of vm; name; msg] + | e -> raise e + end (* After this function is called, locally-generated events will be reflected in the xapi pool metadata. When this function returns we believe that the @@ -2174,642 +2174,642 @@ let transform_xenops_exn ~__context ~vm queue_name f = should not be any other suppression going on. *) let set_resident_on ~__context ~self = - let id = id_of_vm ~__context ~self in - debug "VM %s set_resident_on" id; - let localhost = Helpers.get_localhost ~__context in - Helpers.call_api_functions ~__context - (fun rpc session_id -> XenAPI.VM.atomic_set_resident_on rpc session_id self localhost); - debug "Signalling xenapi event thread to re-register, and xenopsd events to sync"; - refresh_vm ~__context ~self; - !trigger_xenapi_reregister (); - (* Any future XenAPI updates will trigger events, but we might have missed one so: *) - Xenopsd_metadata.update ~__context ~self + let id = id_of_vm ~__context ~self in + debug "VM %s set_resident_on" id; + let localhost = Helpers.get_localhost ~__context in + Helpers.call_api_functions ~__context + (fun rpc session_id -> XenAPI.VM.atomic_set_resident_on rpc session_id self localhost); + debug "Signalling xenapi event thread to re-register, and xenopsd events to sync"; + refresh_vm ~__context ~self; + !trigger_xenapi_reregister (); + (* Any future XenAPI updates will trigger events, but we might have missed one so: *) + Xenopsd_metadata.update ~__context ~self let update_debug_info __context t = - let task = Context.get_task_id __context in - let debug_info = List.map (fun (k, v) -> "debug_info:" ^ k, v) t.Task.debug_info in - List.iter - (fun (k, v) -> - try - Db.Task.add_to_other_config ~__context ~self:task ~key:k ~value:v - with e -> - debug "Failed to add %s = %s to task %s: %s" k v (Ref.string_of task) (Printexc.to_string e) - ) debug_info + let task = Context.get_task_id __context in + let debug_info = List.map (fun (k, v) -> "debug_info:" ^ k, v) t.Task.debug_info in + List.iter + (fun (k, v) -> + try + Db.Task.add_to_other_config ~__context ~self:task ~key:k ~value:v + with e -> + debug "Failed to add %s = %s to task %s: %s" k v (Ref.string_of task) (Printexc.to_string e) + ) debug_info let sync_with_task_result __context queue_name x = - let dbg = Context.string_of_task __context in - x |> register_task __context queue_name |> wait_for_task queue_name dbg |> unregister_task __context queue_name |> success_task queue_name (update_debug_info __context) dbg + let dbg = Context.string_of_task __context in + x |> register_task __context queue_name |> wait_for_task queue_name dbg |> unregister_task __context queue_name |> success_task queue_name (update_debug_info __context) dbg let sync_with_task __context queue_name x = sync_with_task_result __context queue_name x |> ignore let sync __context queue_name x = - let dbg = Context.string_of_task __context in - x |> wait_for_task queue_name dbg |> success_task queue_name (update_debug_info __context) dbg |> ignore + let dbg = Context.string_of_task __context in + x |> wait_for_task queue_name dbg |> success_task queue_name (update_debug_info __context) dbg |> ignore let pause ~__context ~self = - let queue_name = queue_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - let id = id_of_vm ~__context ~self in - debug "xenops: VM.pause %s" id; - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - Client.VM.pause dbg id |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg id (); - Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Paused - ) + let queue_name = queue_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + let id = id_of_vm ~__context ~self in + debug "xenops: VM.pause %s" id; + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + Client.VM.pause dbg id |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg id (); + Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Paused + ) let unpause ~__context ~self = - let queue_name = queue_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - let id = id_of_vm ~__context ~self in - debug "xenops: VM.unpause %s" id; - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - Client.VM.unpause dbg id |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg id (); - Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Running - ) + let queue_name = queue_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + let id = id_of_vm ~__context ~self in + debug "xenops: VM.unpause %s" id; + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + Client.VM.unpause dbg id |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg id (); + Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Running + ) let request_rdp ~__context ~self enabled = - let queue_name = queue_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - let id = id_of_vm ~__context ~self in - debug "xenops: VM.request_rdp %s %b" id enabled; - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - Client.VM.request_rdp dbg id enabled |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg id () - ) + let queue_name = queue_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + let id = id_of_vm ~__context ~self in + debug "xenops: VM.request_rdp %s %b" id enabled; + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + Client.VM.request_rdp dbg id enabled |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg id () + ) let run_script ~__context ~self script = - let queue_name = queue_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - let id = id_of_vm ~__context ~self in - debug "xenops: VM.run_script %s %s" id script; - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - let r = Client.VM.run_script dbg id script |> sync_with_task_result __context queue_name in - let r = match r with None -> "" | Some rpc -> Jsonrpc.to_string rpc in - Events_from_xenopsd.wait queue_name dbg id (); - r - ) + let queue_name = queue_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + let id = id_of_vm ~__context ~self in + debug "xenops: VM.run_script %s %s" id script; + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + let r = Client.VM.run_script dbg id script |> sync_with_task_result __context queue_name in + let r = match r with None -> "" | Some rpc -> Jsonrpc.to_string rpc in + Events_from_xenopsd.wait queue_name dbg id (); + r + ) let set_xenstore_data ~__context ~self xsdata = - let queue_name = queue_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - let id = id_of_vm ~__context ~self in - debug "xenops: VM.set_xenstore_data %s" id; - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - Client.VM.set_xsdata dbg id xsdata |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg id (); - ) + let queue_name = queue_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + let id = id_of_vm ~__context ~self in + debug "xenops: VM.set_xenstore_data %s" id; + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + Client.VM.set_xsdata dbg id xsdata |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg id (); + ) let set_vcpus ~__context ~self n = - let queue_name = queue_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - let id = id_of_vm ~__context ~self in - debug "xenops: VM.set_vcpus %s" id; - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - try - Client.VM.set_vcpus dbg id (Int64.to_int n) |> sync_with_task __context queue_name; - Db.VM.set_VCPUs_at_startup ~__context ~self ~value:n; - let metrics = Db.VM.get_metrics ~__context ~self in - if metrics <> Ref.null then - Db.VM_metrics.set_VCPUs_number ~__context ~self:metrics ~value:n; - Events_from_xenopsd.wait queue_name dbg id (); - with - | Invalid_vcpus n -> - raise (Api_errors.Server_error(Api_errors.invalid_value, [ - "VCPU values must satisfy: 0 < VCPUs ≤ VCPUs_max"; - string_of_int n - ])) - ) + let queue_name = queue_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + let id = id_of_vm ~__context ~self in + debug "xenops: VM.set_vcpus %s" id; + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + try + Client.VM.set_vcpus dbg id (Int64.to_int n) |> sync_with_task __context queue_name; + Db.VM.set_VCPUs_at_startup ~__context ~self ~value:n; + let metrics = Db.VM.get_metrics ~__context ~self in + if metrics <> Ref.null then + Db.VM_metrics.set_VCPUs_number ~__context ~self:metrics ~value:n; + Events_from_xenopsd.wait queue_name dbg id (); + with + | Invalid_vcpus n -> + raise (Api_errors.Server_error(Api_errors.invalid_value, [ + "VCPU values must satisfy: 0 < VCPUs ≤ VCPUs_max"; + string_of_int n + ])) + ) let set_shadow_multiplier ~__context ~self target = - let queue_name = queue_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - let id = id_of_vm ~__context ~self in - debug "xenops: VM.set_shadow_multiplier %s" id; - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - try - Client.VM.set_shadow_multiplier dbg id target |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg id (); - with - | Not_enough_memory needed -> - let host = Db.VM.get_resident_on ~__context ~self in - let free_mem_b = Memory_check.host_compute_free_memory_with_maximum_compression ~__context ~host None in - raise (Api_errors.Server_error(Api_errors.host_not_enough_free_memory, [ Int64.to_string needed; Int64.to_string free_mem_b ])) - | Unimplemented _ -> - (* The existing behaviour is to ignore this failure *) - error "VM.set_shadow_multiplier: not supported for PV domains" - ) + let queue_name = queue_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + let id = id_of_vm ~__context ~self in + debug "xenops: VM.set_shadow_multiplier %s" id; + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + try + Client.VM.set_shadow_multiplier dbg id target |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg id (); + with + | Not_enough_memory needed -> + let host = Db.VM.get_resident_on ~__context ~self in + let free_mem_b = Memory_check.host_compute_free_memory_with_maximum_compression ~__context ~host None in + raise (Api_errors.Server_error(Api_errors.host_not_enough_free_memory, [ Int64.to_string needed; Int64.to_string free_mem_b ])) + | Unimplemented _ -> + (* The existing behaviour is to ignore this failure *) + error "VM.set_shadow_multiplier: not supported for PV domains" + ) let set_memory_dynamic_range ~__context ~self min max = - let queue_name = queue_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - let id = id_of_vm ~__context ~self in - debug "xenops: VM.set_memory_dynamic_range %s" id; - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - Client.VM.set_memory_dynamic_range dbg id min max |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg id () - ) + let queue_name = queue_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + let id = id_of_vm ~__context ~self in + debug "xenops: VM.set_memory_dynamic_range %s" id; + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + Client.VM.set_memory_dynamic_range dbg id min max |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg id () + ) let maybe_cleanup_vm ~__context ~self = - let dbg = Context.string_of_task __context in - let queue_name = queue_of_vm ~__context ~self in - let id = id_of_vm ~__context ~self in - if vm_exists_in_xenopsd queue_name dbg id then begin - warn "Stale VM detected in Xenopsd, flushing outstanding events"; - (* By calling with_events_suppressed we can guarentee that an refresh_vm - * will be called with events enabled and therefore we get Xenopsd into a - * consistent state with Xapi *) - Events_from_xenopsd.with_suppressed queue_name dbg id (fun _ -> ()); - Xenopsd_metadata.delete ~__context id; - end + let dbg = Context.string_of_task __context in + let queue_name = queue_of_vm ~__context ~self in + let id = id_of_vm ~__context ~self in + if vm_exists_in_xenopsd queue_name dbg id then begin + warn "Stale VM detected in Xenopsd, flushing outstanding events"; + (* By calling with_events_suppressed we can guarentee that an refresh_vm + * will be called with events enabled and therefore we get Xenopsd into a + * consistent state with Xapi *) + Events_from_xenopsd.with_suppressed queue_name dbg id (fun _ -> ()); + Xenopsd_metadata.delete ~__context id; + end let start ~__context ~self paused force = - let dbg = Context.string_of_task __context in - let queue_name = queue_of_vm ~__context ~self in - let vm_id = id_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name (fun () -> - maybe_cleanup_vm ~__context ~self; - if vm_exists_in_xenopsd queue_name dbg vm_id then - raise (Bad_power_state (Running, Halted)); - (* For all devices which we want xenopsd to manage, set currently_attached = true - so the metadata is pushed. *) - let vbds = - (* xenopsd only manages empty VBDs for HVM guests *) - let hvm = Helpers.will_boot_hvm ~__context ~self in - let vbds = Db.VM.get_VBDs ~__context ~self in - if hvm then vbds else (List.filter (fun self -> not(Db.VBD.get_empty ~__context ~self)) vbds) in - List.iter (fun self -> Db.VBD.set_currently_attached ~__context ~self ~value:true) vbds; - List.iter (fun self -> Db.VIF.set_currently_attached ~__context ~self ~value:true) (Db.VM.get_VIFs ~__context ~self); - - let module Client = (val make_client queue_name : XENOPS) in - debug "Sending VM %s configuration to xenopsd" (Ref.string_of self); - try - let id = Xenopsd_metadata.push ~__context ~upgrade:false ~self in - Xapi_network.with_networks_attached_for_vm ~__context ~vm:self (fun () -> - info "xenops: VM.start %s" id; - if not paused then begin - let vm_start = Client.VM.start dbg id force in - info "xenops: Queueing VM.unpause %s" id; - let vm_unpause = Client.VM.unpause dbg id in - begin - try - sync_with_task __context queue_name vm_start; - with e -> - (* If the VM.start throws an error, clean up the unpause - which will fail in an irrelevant manor, then reraise - the original error *) - begin - try sync __context queue_name vm_unpause with _ -> () - end; - raise e - end; - - (* At this point, the start paused has succeeded. Now - we _do_ care about any error from unpause *) - - sync_with_task __context queue_name vm_unpause - end else - Client.VM.start dbg id force |> sync_with_task __context queue_name); - - set_resident_on ~__context ~self; - (* set_resident_on syncs both xenopsd and with the xapi event mechanism *) - Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:(if paused then `Paused else `Running) - with e -> - error "Caught exception starting VM: %s" (string_of_exn e); - set_resident_on ~__context ~self; - raise e - ) + let dbg = Context.string_of_task __context in + let queue_name = queue_of_vm ~__context ~self in + let vm_id = id_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name (fun () -> + maybe_cleanup_vm ~__context ~self; + if vm_exists_in_xenopsd queue_name dbg vm_id then + raise (Bad_power_state (Running, Halted)); + (* For all devices which we want xenopsd to manage, set currently_attached = true + so the metadata is pushed. *) + let vbds = + (* xenopsd only manages empty VBDs for HVM guests *) + let hvm = Helpers.will_boot_hvm ~__context ~self in + let vbds = Db.VM.get_VBDs ~__context ~self in + if hvm then vbds else (List.filter (fun self -> not(Db.VBD.get_empty ~__context ~self)) vbds) in + List.iter (fun self -> Db.VBD.set_currently_attached ~__context ~self ~value:true) vbds; + List.iter (fun self -> Db.VIF.set_currently_attached ~__context ~self ~value:true) (Db.VM.get_VIFs ~__context ~self); + + let module Client = (val make_client queue_name : XENOPS) in + debug "Sending VM %s configuration to xenopsd" (Ref.string_of self); + try + let id = Xenopsd_metadata.push ~__context ~upgrade:false ~self in + Xapi_network.with_networks_attached_for_vm ~__context ~vm:self (fun () -> + info "xenops: VM.start %s" id; + if not paused then begin + let vm_start = Client.VM.start dbg id force in + info "xenops: Queueing VM.unpause %s" id; + let vm_unpause = Client.VM.unpause dbg id in + begin + try + sync_with_task __context queue_name vm_start; + with e -> + (* If the VM.start throws an error, clean up the unpause + which will fail in an irrelevant manor, then reraise + the original error *) + begin + try sync __context queue_name vm_unpause with _ -> () + end; + raise e + end; + + (* At this point, the start paused has succeeded. Now + we _do_ care about any error from unpause *) + + sync_with_task __context queue_name vm_unpause + end else + Client.VM.start dbg id force |> sync_with_task __context queue_name); + + set_resident_on ~__context ~self; + (* set_resident_on syncs both xenopsd and with the xapi event mechanism *) + Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:(if paused then `Paused else `Running) + with e -> + error "Caught exception starting VM: %s" (string_of_exn e); + set_resident_on ~__context ~self; + raise e + ) let start ~__context ~self paused force = - let queue_name = queue_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - try - start ~__context ~self paused force - with Bad_power_state(a, b) as e -> - Backtrace.is_important e; - let power_state = function - | Running -> "Running" - | Halted -> "Halted" - | Suspended -> "Suspended" - | Paused -> "Paused" in - let exn = Api_errors.Server_error(Api_errors.vm_bad_power_state, [ Ref.string_of self; power_state a; power_state b ]) in - Backtrace.reraise e exn - ) + let queue_name = queue_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + try + start ~__context ~self paused force + with Bad_power_state(a, b) as e -> + Backtrace.is_important e; + let power_state = function + | Running -> "Running" + | Halted -> "Halted" + | Suspended -> "Suspended" + | Paused -> "Paused" in + let exn = Api_errors.Server_error(Api_errors.vm_bad_power_state, [ Ref.string_of self; power_state a; power_state b ]) in + Backtrace.reraise e exn + ) let reboot ~__context ~self timeout = - let queue_name = queue_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - assert_resident_on ~__context ~self; - let id = id_of_vm ~__context ~self in - let dbg = Context.string_of_task __context in - maybe_cleanup_vm ~__context ~self; - (* If Xenopsd no longer knows about the VM after cleanup it was shutdown. - This also means our caches have been removed. *) - if not (vm_exists_in_xenopsd queue_name dbg id) then - raise (Bad_power_state (Halted, Running)); - (* Ensure we have the latest version of the VM metadata before the reboot *) - Events_from_xapi.wait ~__context ~self; - info "xenops: VM.reboot %s" id; - let module Client = (val make_client queue_name : XENOPS ) in - let () = Pervasiveext.finally - (fun () -> - Client.VM.reboot dbg id timeout |> sync_with_task __context queue_name) - (fun () -> - Events_from_xenopsd.wait queue_name dbg id ()) - in - Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Running - ) + let queue_name = queue_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + assert_resident_on ~__context ~self; + let id = id_of_vm ~__context ~self in + let dbg = Context.string_of_task __context in + maybe_cleanup_vm ~__context ~self; + (* If Xenopsd no longer knows about the VM after cleanup it was shutdown. + This also means our caches have been removed. *) + if not (vm_exists_in_xenopsd queue_name dbg id) then + raise (Bad_power_state (Halted, Running)); + (* Ensure we have the latest version of the VM metadata before the reboot *) + Events_from_xapi.wait ~__context ~self; + info "xenops: VM.reboot %s" id; + let module Client = (val make_client queue_name : XENOPS ) in + let () = Pervasiveext.finally + (fun () -> + Client.VM.reboot dbg id timeout |> sync_with_task __context queue_name) + (fun () -> + Events_from_xenopsd.wait queue_name dbg id ()) + in + Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Running + ) let shutdown ~__context ~self timeout = - let queue_name = queue_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - assert_resident_on ~__context ~self; - let id = id_of_vm ~__context ~self in - let dbg = Context.string_of_task __context in - info "xenops: VM.shutdown %s" id; - let module Client = (val make_client queue_name : XENOPS ) in - let () = Pervasiveext.finally - (fun () -> - Client.VM.shutdown dbg id timeout |> sync_with_task __context queue_name) - (fun () -> - Events_from_xenopsd.wait queue_name dbg id ()) - in - Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Halted; - (* force_state_reset called from the xenopsd event loop above *) - assert (Db.VM.get_resident_on ~__context ~self = Ref.null); - List.iter - (fun vbd -> - assert (not(Db.VBD.get_currently_attached ~__context ~self:vbd)) - ) (Db.VM.get_VBDs ~__context ~self) - ) + let queue_name = queue_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + assert_resident_on ~__context ~self; + let id = id_of_vm ~__context ~self in + let dbg = Context.string_of_task __context in + info "xenops: VM.shutdown %s" id; + let module Client = (val make_client queue_name : XENOPS ) in + let () = Pervasiveext.finally + (fun () -> + Client.VM.shutdown dbg id timeout |> sync_with_task __context queue_name) + (fun () -> + Events_from_xenopsd.wait queue_name dbg id ()) + in + Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Halted; + (* force_state_reset called from the xenopsd event loop above *) + assert (Db.VM.get_resident_on ~__context ~self = Ref.null); + List.iter + (fun vbd -> + assert (not(Db.VBD.get_currently_attached ~__context ~self:vbd)) + ) (Db.VM.get_VBDs ~__context ~self) + ) let suspend ~__context ~self = - let queue_name = queue_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - assert_resident_on ~__context ~self; - let id = id_of_vm ~__context ~self in - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - let vm_t, state = Client.VM.stat dbg id in - (* XXX: this needs to be at boot time *) - let space_needed = Int64.(add (of_float (to_float vm_t.Vm.memory_static_max *. 1.2 *. 1.05)) 104857600L) in - let suspend_SR = Helpers.choose_suspend_sr ~__context ~vm:self in - let sm_config = [ - Xapi_globs._sm_vm_hint, id; - (* Fully inflate the VDI if the SR supports thin provisioning *) - Xapi_globs._sm_initial_allocation, (Int64.to_string space_needed); - ] in - Helpers.call_api_functions ~__context - (fun rpc session_id -> - let vdi = - XenAPI.VDI.create ~rpc ~session_id - ~name_label:"Suspend image" - ~name_description:"Suspend image" - ~sR:suspend_SR ~virtual_size:space_needed - ~sharable:false ~read_only:false ~_type:`suspend - ~other_config:[] ~xenstore_data:[] ~sm_config ~tags:[] in - let disk = disk_of_vdi ~__context ~self:vdi |> Opt.unbox in - Db.VM.set_suspend_VDI ~__context ~self ~value:vdi; - try - let dbg = Context.string_of_task __context in - info "xenops: VM.suspend %s to %s" id (disk |> rpc_of_disk |> Jsonrpc.to_string); - Client.VM.suspend dbg id disk |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg id (); - Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Suspended; - assert (Db.VM.get_resident_on ~__context ~self = Ref.null); - with e -> - error "Caught exception suspending VM: %s" (string_of_exn e); - (* If the domain has suspended, we have to shut it down *) - Events_from_xenopsd.wait queue_name dbg id (); - if Db.VM.get_power_state ~__context ~self = `Suspended then begin - info "VM has already suspended; we must perform a hard_shutdown"; - Xapi_vm_lifecycle.force_state_reset ~__context ~self ~value:`Halted; - !trigger_xenapi_reregister (); - end else info "VM is still running after failed suspend"; - XenAPI.VDI.destroy ~rpc ~session_id ~self:vdi; - Db.VM.set_suspend_VDI ~__context ~self ~value:Ref.null; - raise e - ) - ) + let queue_name = queue_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + assert_resident_on ~__context ~self; + let id = id_of_vm ~__context ~self in + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + let vm_t, state = Client.VM.stat dbg id in + (* XXX: this needs to be at boot time *) + let space_needed = Int64.(add (of_float (to_float vm_t.Vm.memory_static_max *. 1.2 *. 1.05)) 104857600L) in + let suspend_SR = Helpers.choose_suspend_sr ~__context ~vm:self in + let sm_config = [ + Xapi_globs._sm_vm_hint, id; + (* Fully inflate the VDI if the SR supports thin provisioning *) + Xapi_globs._sm_initial_allocation, (Int64.to_string space_needed); + ] in + Helpers.call_api_functions ~__context + (fun rpc session_id -> + let vdi = + XenAPI.VDI.create ~rpc ~session_id + ~name_label:"Suspend image" + ~name_description:"Suspend image" + ~sR:suspend_SR ~virtual_size:space_needed + ~sharable:false ~read_only:false ~_type:`suspend + ~other_config:[] ~xenstore_data:[] ~sm_config ~tags:[] in + let disk = disk_of_vdi ~__context ~self:vdi |> Opt.unbox in + Db.VM.set_suspend_VDI ~__context ~self ~value:vdi; + try + let dbg = Context.string_of_task __context in + info "xenops: VM.suspend %s to %s" id (disk |> rpc_of_disk |> Jsonrpc.to_string); + Client.VM.suspend dbg id disk |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg id (); + Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:`Suspended; + assert (Db.VM.get_resident_on ~__context ~self = Ref.null); + with e -> + error "Caught exception suspending VM: %s" (string_of_exn e); + (* If the domain has suspended, we have to shut it down *) + Events_from_xenopsd.wait queue_name dbg id (); + if Db.VM.get_power_state ~__context ~self = `Suspended then begin + info "VM has already suspended; we must perform a hard_shutdown"; + Xapi_vm_lifecycle.force_state_reset ~__context ~self ~value:`Halted; + !trigger_xenapi_reregister (); + end else info "VM is still running after failed suspend"; + XenAPI.VDI.destroy ~rpc ~session_id ~self:vdi; + Db.VM.set_suspend_VDI ~__context ~self ~value:Ref.null; + raise e + ) + ) let resume ~__context ~self ~start_paused ~force = - let dbg = Context.string_of_task __context in - let queue_name = queue_of_vm ~__context ~self in - let vm_id = id_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - maybe_cleanup_vm ~__context ~self; - if vm_exists_in_xenopsd queue_name dbg vm_id then - raise (Bad_power_state (Running, Suspended)); - let vdi = Db.VM.get_suspend_VDI ~__context ~self in - let disk = disk_of_vdi ~__context ~self:vdi |> Opt.unbox in - let module Client = (val make_client queue_name : XENOPS) in - (* NB we don't set resident_on because we don't want to - modify the VM.power_state, {VBD,VIF}.currently_attached in the - failures cases. This means we must remove the metadata from - xenopsd on failure. *) - begin try - Events_from_xenopsd.with_suppressed queue_name dbg vm_id - (fun () -> - debug "Sending VM %s configuration to xenopsd" (Ref.string_of self); - let id = Xenopsd_metadata.push ~__context ~upgrade:false ~self in - Xapi_network.with_networks_attached_for_vm ~__context ~vm:self - (fun () -> - info "xenops: VM.resume %s from %s" id (disk |> rpc_of_disk |> Jsonrpc.to_string); - Client.VM.resume dbg id disk |> sync_with_task __context queue_name; - if not start_paused then begin - info "xenops: VM.unpause %s" id; - Client.VM.unpause dbg id |> sync_with_task __context queue_name; - end; - ) - ) - with e -> - error "Caught exception resuming VM: %s" (string_of_exn e); - let id = id_of_vm ~__context ~self in - Xenopsd_metadata.delete ~__context id; - raise e - end; - set_resident_on ~__context ~self; - Db.VM.set_suspend_VDI ~__context ~self ~value:Ref.null; - Helpers.call_api_functions ~__context - (fun rpc session_id -> - XenAPI.VDI.destroy rpc session_id vdi - ); - Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:(if start_paused then `Paused else `Running) - ) + let dbg = Context.string_of_task __context in + let queue_name = queue_of_vm ~__context ~self in + let vm_id = id_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + maybe_cleanup_vm ~__context ~self; + if vm_exists_in_xenopsd queue_name dbg vm_id then + raise (Bad_power_state (Running, Suspended)); + let vdi = Db.VM.get_suspend_VDI ~__context ~self in + let disk = disk_of_vdi ~__context ~self:vdi |> Opt.unbox in + let module Client = (val make_client queue_name : XENOPS) in + (* NB we don't set resident_on because we don't want to + modify the VM.power_state, {VBD,VIF}.currently_attached in the + failures cases. This means we must remove the metadata from + xenopsd on failure. *) + begin try + Events_from_xenopsd.with_suppressed queue_name dbg vm_id + (fun () -> + debug "Sending VM %s configuration to xenopsd" (Ref.string_of self); + let id = Xenopsd_metadata.push ~__context ~upgrade:false ~self in + Xapi_network.with_networks_attached_for_vm ~__context ~vm:self + (fun () -> + info "xenops: VM.resume %s from %s" id (disk |> rpc_of_disk |> Jsonrpc.to_string); + Client.VM.resume dbg id disk |> sync_with_task __context queue_name; + if not start_paused then begin + info "xenops: VM.unpause %s" id; + Client.VM.unpause dbg id |> sync_with_task __context queue_name; + end; + ) + ) + with e -> + error "Caught exception resuming VM: %s" (string_of_exn e); + let id = id_of_vm ~__context ~self in + Xenopsd_metadata.delete ~__context id; + raise e + end; + set_resident_on ~__context ~self; + Db.VM.set_suspend_VDI ~__context ~self ~value:Ref.null; + Helpers.call_api_functions ~__context + (fun rpc session_id -> + XenAPI.VDI.destroy rpc session_id vdi + ); + Xapi_vm_lifecycle.assert_power_state_is ~__context ~self ~expected:(if start_paused then `Paused else `Running) + ) let s3suspend ~__context ~self = - let queue_name = queue_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - let id = id_of_vm ~__context ~self in - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - debug "xenops: VM.s3suspend %s" id; - Client.VM.s3suspend dbg id |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg id () - ) + let queue_name = queue_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + let id = id_of_vm ~__context ~self in + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + debug "xenops: VM.s3suspend %s" id; + Client.VM.s3suspend dbg id |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg id () + ) let s3resume ~__context ~self = - let queue_name = queue_of_vm ~__context ~self in - transform_xenops_exn ~__context ~vm:self queue_name - (fun () -> - let id = id_of_vm ~__context ~self in - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - debug "xenops: VM.s3resume %s" id; - Client.VM.s3resume dbg id |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg id () - ) + let queue_name = queue_of_vm ~__context ~self in + transform_xenops_exn ~__context ~vm:self queue_name + (fun () -> + let id = id_of_vm ~__context ~self in + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + debug "xenops: VM.s3resume %s" id; + Client.VM.s3resume dbg id |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg id () + ) let is_vm_running ~__context ~self = - let id = id_of_vm ~__context ~self in - let dbg = Context.string_of_task __context in - let queue_name = queue_of_vm ~__context ~self in - let module Client = (val make_client queue_name : XENOPS) in - debug "xenops: VM.stat %s" id; - (* If the metadata is still present, VM is "Running" *) - try Client.VM.stat dbg id |> ignore; true with _ -> false + let id = id_of_vm ~__context ~self in + let dbg = Context.string_of_task __context in + let queue_name = queue_of_vm ~__context ~self in + let module Client = (val make_client queue_name : XENOPS) in + debug "xenops: VM.stat %s" id; + (* If the metadata is still present, VM is "Running" *) + try Client.VM.stat dbg id |> ignore; true with _ -> false let md_of_vbd ~__context ~self = - let vm = Db.VBD.get_VM ~__context ~self in - MD.of_vbd ~__context ~vm:(Db.VM.get_record ~__context ~self:vm) ~vbd:(Db.VBD.get_record ~__context ~self) + let vm = Db.VBD.get_VM ~__context ~self in + MD.of_vbd ~__context ~vm:(Db.VM.get_record ~__context ~self:vm) ~vbd:(Db.VBD.get_record ~__context ~self) let vbd_plug ~__context ~self = - let vm = Db.VBD.get_VM ~__context ~self in - let vm_id = id_of_vm ~__context ~self:vm in - let queue_name = queue_of_vm ~__context ~self:vm in - transform_xenops_exn ~__context ~vm queue_name - (fun () -> - assert_resident_on ~__context ~self:vm; - Events_from_xapi.wait ~__context ~self:vm; - let vbd = md_of_vbd ~__context ~self in - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - Events_from_xenopsd.with_suppressed queue_name dbg vm_id (fun () -> - info "xenops: VBD.add %s.%s" (fst vbd.Vbd.id) (snd vbd.Vbd.id); - let id = Client.VBD.add dbg vbd in - info "xenops: VBD.plug %s.%s" (fst vbd.Vbd.id) (snd vbd.Vbd.id); - Client.VBD.plug dbg id |> sync_with_task __context queue_name; - ); - assert (Db.VBD.get_currently_attached ~__context ~self) - ) + let vm = Db.VBD.get_VM ~__context ~self in + let vm_id = id_of_vm ~__context ~self:vm in + let queue_name = queue_of_vm ~__context ~self:vm in + transform_xenops_exn ~__context ~vm queue_name + (fun () -> + assert_resident_on ~__context ~self:vm; + Events_from_xapi.wait ~__context ~self:vm; + let vbd = md_of_vbd ~__context ~self in + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + Events_from_xenopsd.with_suppressed queue_name dbg vm_id (fun () -> + info "xenops: VBD.add %s.%s" (fst vbd.Vbd.id) (snd vbd.Vbd.id); + let id = Client.VBD.add dbg vbd in + info "xenops: VBD.plug %s.%s" (fst vbd.Vbd.id) (snd vbd.Vbd.id); + Client.VBD.plug dbg id |> sync_with_task __context queue_name; + ); + assert (Db.VBD.get_currently_attached ~__context ~self) + ) let vbd_unplug ~__context ~self force = - let vm = Db.VBD.get_VM ~__context ~self in - let queue_name = queue_of_vm ~__context ~self:vm in - transform_xenops_exn ~__context ~vm queue_name - (fun () -> - assert_resident_on ~__context ~self:vm; - let vbd = md_of_vbd ~__context ~self in - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - begin - try - info "xenops: VBD.unplug %s.%s" (fst vbd.Vbd.id) (snd vbd.Vbd.id); - Client.VBD.unplug dbg vbd.Vbd.id force |> sync_with_task __context queue_name; - with Device_detach_rejected(_, _, _) -> - raise (Api_errors.Server_error(Api_errors.device_detach_rejected, [ "VBD"; Ref.string_of self; "" ])) - end; - Events_from_xenopsd.wait queue_name dbg (fst vbd.Vbd.id) (); - assert (not(Db.VBD.get_currently_attached ~__context ~self)) - ) + let vm = Db.VBD.get_VM ~__context ~self in + let queue_name = queue_of_vm ~__context ~self:vm in + transform_xenops_exn ~__context ~vm queue_name + (fun () -> + assert_resident_on ~__context ~self:vm; + let vbd = md_of_vbd ~__context ~self in + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + begin + try + info "xenops: VBD.unplug %s.%s" (fst vbd.Vbd.id) (snd vbd.Vbd.id); + Client.VBD.unplug dbg vbd.Vbd.id force |> sync_with_task __context queue_name; + with Device_detach_rejected(_, _, _) -> + raise (Api_errors.Server_error(Api_errors.device_detach_rejected, [ "VBD"; Ref.string_of self; "" ])) + end; + Events_from_xenopsd.wait queue_name dbg (fst vbd.Vbd.id) (); + assert (not(Db.VBD.get_currently_attached ~__context ~self)) + ) let vbd_eject_hvm ~__context ~self = - let vm = Db.VBD.get_VM ~__context ~self in - let queue_name = queue_of_vm ~__context ~self:vm in - transform_xenops_exn ~__context ~vm queue_name - (fun () -> - assert_resident_on ~__context ~self:vm; - let vbd = md_of_vbd ~__context ~self in - info "xenops: VBD.eject %s.%s" (fst vbd.Vbd.id) (snd vbd.Vbd.id); - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - Client.VBD.eject dbg vbd.Vbd.id |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg (fst vbd.Vbd.id) (); - Events_from_xapi.wait ~__context ~self:vm; - assert (Db.VBD.get_empty ~__context ~self); - assert (Db.VBD.get_VDI ~__context ~self = Ref.null) - ) + let vm = Db.VBD.get_VM ~__context ~self in + let queue_name = queue_of_vm ~__context ~self:vm in + transform_xenops_exn ~__context ~vm queue_name + (fun () -> + assert_resident_on ~__context ~self:vm; + let vbd = md_of_vbd ~__context ~self in + info "xenops: VBD.eject %s.%s" (fst vbd.Vbd.id) (snd vbd.Vbd.id); + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + Client.VBD.eject dbg vbd.Vbd.id |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg (fst vbd.Vbd.id) (); + Events_from_xapi.wait ~__context ~self:vm; + assert (Db.VBD.get_empty ~__context ~self); + assert (Db.VBD.get_VDI ~__context ~self = Ref.null) + ) let vbd_insert_hvm ~__context ~self ~vdi = - let vm = Db.VBD.get_VM ~__context ~self in - let queue_name = queue_of_vm ~__context ~self:vm in - transform_xenops_exn ~__context ~vm queue_name - (fun () -> - assert_resident_on ~__context ~self:vm; - let vbd = md_of_vbd ~__context ~self in - let disk = disk_of_vdi ~__context ~self:vdi |> Opt.unbox in - info "xenops: VBD.insert %s.%s %s" (fst vbd.Vbd.id) (snd vbd.Vbd.id) (disk |> rpc_of_disk |> Jsonrpc.to_string); - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - Client.VBD.insert dbg vbd.Vbd.id disk |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg (fst vbd.Vbd.id) (); - Events_from_xapi.wait ~__context ~self:vm; - assert (not(Db.VBD.get_empty ~__context ~self)); - assert (Db.VBD.get_VDI ~__context ~self = vdi) - ) + let vm = Db.VBD.get_VM ~__context ~self in + let queue_name = queue_of_vm ~__context ~self:vm in + transform_xenops_exn ~__context ~vm queue_name + (fun () -> + assert_resident_on ~__context ~self:vm; + let vbd = md_of_vbd ~__context ~self in + let disk = disk_of_vdi ~__context ~self:vdi |> Opt.unbox in + info "xenops: VBD.insert %s.%s %s" (fst vbd.Vbd.id) (snd vbd.Vbd.id) (disk |> rpc_of_disk |> Jsonrpc.to_string); + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + Client.VBD.insert dbg vbd.Vbd.id disk |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg (fst vbd.Vbd.id) (); + Events_from_xapi.wait ~__context ~self:vm; + assert (not(Db.VBD.get_empty ~__context ~self)); + assert (Db.VBD.get_VDI ~__context ~self = vdi) + ) let ejectable ~__context ~self = - let dbg = Context.string_of_task __context in - let vm = Db.VBD.get_VM ~__context ~self in - let id = Db.VM.get_uuid ~__context ~self:vm in - let queue_name = queue_of_vm ~__context ~self:vm in - let module Client = (val make_client queue_name : XENOPS) in - let _, state = Client.VM.stat dbg id in - state.Vm.hvm + let dbg = Context.string_of_task __context in + let vm = Db.VBD.get_VM ~__context ~self in + let id = Db.VM.get_uuid ~__context ~self:vm in + let queue_name = queue_of_vm ~__context ~self:vm in + let module Client = (val make_client queue_name : XENOPS) in + let _, state = Client.VM.stat dbg id in + state.Vm.hvm let vbd_eject ~__context ~self = - if ejectable ~__context ~self - then vbd_eject_hvm ~__context ~self - else begin - vbd_unplug ~__context ~self false; - Db.VBD.set_empty ~__context ~self ~value:true; - Db.VBD.set_VDI ~__context ~self ~value:Ref.null; - end + if ejectable ~__context ~self + then vbd_eject_hvm ~__context ~self + else begin + vbd_unplug ~__context ~self false; + Db.VBD.set_empty ~__context ~self ~value:true; + Db.VBD.set_VDI ~__context ~self ~value:Ref.null; + end let vbd_insert ~__context ~self ~vdi = - if ejectable ~__context ~self - then vbd_insert_hvm ~__context ~self ~vdi - else begin - Db.VBD.set_VDI ~__context ~self ~value:vdi; - Db.VBD.set_empty ~__context ~self ~value:false; - vbd_plug ~__context ~self - end + if ejectable ~__context ~self + then vbd_insert_hvm ~__context ~self ~vdi + else begin + Db.VBD.set_VDI ~__context ~self ~value:vdi; + Db.VBD.set_empty ~__context ~self ~value:false; + vbd_plug ~__context ~self + end let md_of_vif ~__context ~self = - let vm = Db.VIF.get_VM ~__context ~self in - MD.of_vif ~__context ~vm:(Db.VM.get_record ~__context ~self:vm) ~vif:(Db.VIF.get_record ~__context ~self) + let vm = Db.VIF.get_VM ~__context ~self in + MD.of_vif ~__context ~vm:(Db.VM.get_record ~__context ~self:vm) ~vif:(Db.VIF.get_record ~__context ~self) let vif_plug ~__context ~self = - let vm = Db.VIF.get_VM ~__context ~self in - let vm_id = id_of_vm ~__context ~self:vm in - let queue_name = queue_of_vm ~__context ~self:vm in - transform_xenops_exn ~__context ~vm queue_name - (fun () -> - assert_resident_on ~__context ~self:vm; - Events_from_xapi.wait ~__context ~self:vm; - let vif = md_of_vif ~__context ~self in - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - Xapi_network.with_networks_attached_for_vm ~__context ~vm (fun () -> - Events_from_xenopsd.with_suppressed queue_name dbg vm_id (fun () -> - info "xenops: VIF.add %s.%s" (fst vif.Vif.id) (snd vif.Vif.id); - let id = Client.VIF.add dbg vif in - info "xenops: VIF.plug %s.%s" (fst vif.Vif.id) (snd vif.Vif.id); - Client.VIF.plug dbg id |> sync_with_task __context queue_name; - ); - ); - assert (Db.VIF.get_currently_attached ~__context ~self) - ) + let vm = Db.VIF.get_VM ~__context ~self in + let vm_id = id_of_vm ~__context ~self:vm in + let queue_name = queue_of_vm ~__context ~self:vm in + transform_xenops_exn ~__context ~vm queue_name + (fun () -> + assert_resident_on ~__context ~self:vm; + Events_from_xapi.wait ~__context ~self:vm; + let vif = md_of_vif ~__context ~self in + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + Xapi_network.with_networks_attached_for_vm ~__context ~vm (fun () -> + Events_from_xenopsd.with_suppressed queue_name dbg vm_id (fun () -> + info "xenops: VIF.add %s.%s" (fst vif.Vif.id) (snd vif.Vif.id); + let id = Client.VIF.add dbg vif in + info "xenops: VIF.plug %s.%s" (fst vif.Vif.id) (snd vif.Vif.id); + Client.VIF.plug dbg id |> sync_with_task __context queue_name; + ); + ); + assert (Db.VIF.get_currently_attached ~__context ~self) + ) let vm_set_vm_data ~__context ~self = () let vif_set_locking_mode ~__context ~self = - let vm = Db.VIF.get_VM ~__context ~self in - let queue_name = queue_of_vm ~__context ~self:vm in - transform_xenops_exn ~__context ~vm queue_name - (fun () -> - assert_resident_on ~__context ~self:vm; - let vif = md_of_vif ~__context ~self in - info "xenops: VIF.set_locking_mode %s.%s" (fst vif.Vif.id) (snd vif.Vif.id); - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - Client.VIF.set_locking_mode dbg vif.Vif.id vif.Vif.locking_mode |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg (fst vif.Vif.id) (); - ) + let vm = Db.VIF.get_VM ~__context ~self in + let queue_name = queue_of_vm ~__context ~self:vm in + transform_xenops_exn ~__context ~vm queue_name + (fun () -> + assert_resident_on ~__context ~self:vm; + let vif = md_of_vif ~__context ~self in + info "xenops: VIF.set_locking_mode %s.%s" (fst vif.Vif.id) (snd vif.Vif.id); + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + Client.VIF.set_locking_mode dbg vif.Vif.id vif.Vif.locking_mode |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg (fst vif.Vif.id) (); + ) let vif_unplug ~__context ~self force = - let vm = Db.VIF.get_VM ~__context ~self in - let queue_name = queue_of_vm ~__context ~self:vm in - transform_xenops_exn ~__context ~vm queue_name - (fun () -> - assert_resident_on ~__context ~self:vm; - let vif = md_of_vif ~__context ~self in - info "xenops: VIF.unplug %s.%s" (fst vif.Vif.id) (snd vif.Vif.id); - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - Client.VIF.unplug dbg vif.Vif.id force |> sync_with_task __context queue_name; - (* We need to make sure VIF.stat still works so: wait before calling VIF.remove *) - Events_from_xenopsd.wait queue_name dbg (fst vif.Vif.id) (); - assert (not(Db.VIF.get_currently_attached ~__context ~self)) - ) + let vm = Db.VIF.get_VM ~__context ~self in + let queue_name = queue_of_vm ~__context ~self:vm in + transform_xenops_exn ~__context ~vm queue_name + (fun () -> + assert_resident_on ~__context ~self:vm; + let vif = md_of_vif ~__context ~self in + info "xenops: VIF.unplug %s.%s" (fst vif.Vif.id) (snd vif.Vif.id); + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + Client.VIF.unplug dbg vif.Vif.id force |> sync_with_task __context queue_name; + (* We need to make sure VIF.stat still works so: wait before calling VIF.remove *) + Events_from_xenopsd.wait queue_name dbg (fst vif.Vif.id) (); + assert (not(Db.VIF.get_currently_attached ~__context ~self)) + ) let vif_move ~__context ~self network = - let vm = Db.VIF.get_VM ~__context ~self in - let queue_name = queue_of_vm ~__context ~self:vm in - transform_xenops_exn ~__context ~vm queue_name - (fun () -> - assert_resident_on ~__context ~self:vm; - let vif = md_of_vif ~__context ~self in - info "xenops: VIF.move %s.%s" (fst vif.Vif.id) (snd vif.Vif.id); - let network = Db.Network.get_record ~__context ~self:network in - let backend = backend_of_network network in - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - (* Nb., at this point, the database shows the vif on the new network *) - Xapi_network.attach_for_vif ~__context ~vif:self (); - Client.VIF.move dbg vif.Vif.id backend |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg (fst vif.Vif.id) (); - assert (Db.VIF.get_currently_attached ~__context ~self) - ) + let vm = Db.VIF.get_VM ~__context ~self in + let queue_name = queue_of_vm ~__context ~self:vm in + transform_xenops_exn ~__context ~vm queue_name + (fun () -> + assert_resident_on ~__context ~self:vm; + let vif = md_of_vif ~__context ~self in + info "xenops: VIF.move %s.%s" (fst vif.Vif.id) (snd vif.Vif.id); + let network = Db.Network.get_record ~__context ~self:network in + let backend = backend_of_network network in + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + (* Nb., at this point, the database shows the vif on the new network *) + Xapi_network.attach_for_vif ~__context ~vif:self (); + Client.VIF.move dbg vif.Vif.id backend |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg (fst vif.Vif.id) (); + assert (Db.VIF.get_currently_attached ~__context ~self) + ) let vif_set_ipv4_configuration ~__context ~self = - let vm = Db.VIF.get_VM ~__context ~self in - let queue_name = queue_of_vm ~__context ~self:vm in - transform_xenops_exn ~__context ~vm queue_name - (fun () -> - assert_resident_on ~__context ~self:vm; - let vif = md_of_vif ~__context ~self in - info "xenops: VIF.set_ipv4_configuration %s.%s" (fst vif.Vif.id) (snd vif.Vif.id); - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - Client.VIF.set_ipv4_configuration dbg vif.Vif.id vif.Vif.ipv4_configuration |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg (fst vif.Vif.id) (); - ) + let vm = Db.VIF.get_VM ~__context ~self in + let queue_name = queue_of_vm ~__context ~self:vm in + transform_xenops_exn ~__context ~vm queue_name + (fun () -> + assert_resident_on ~__context ~self:vm; + let vif = md_of_vif ~__context ~self in + info "xenops: VIF.set_ipv4_configuration %s.%s" (fst vif.Vif.id) (snd vif.Vif.id); + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + Client.VIF.set_ipv4_configuration dbg vif.Vif.id vif.Vif.ipv4_configuration |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg (fst vif.Vif.id) (); + ) let vif_set_ipv6_configuration ~__context ~self = - let vm = Db.VIF.get_VM ~__context ~self in - let queue_name = queue_of_vm ~__context ~self:vm in - transform_xenops_exn ~__context ~vm queue_name - (fun () -> - assert_resident_on ~__context ~self:vm; - let vif = md_of_vif ~__context ~self in - info "xenops: VIF.set_ipv6_configuration %s.%s" (fst vif.Vif.id) (snd vif.Vif.id); - let dbg = Context.string_of_task __context in - let module Client = (val make_client queue_name : XENOPS) in - Client.VIF.set_ipv6_configuration dbg vif.Vif.id vif.Vif.ipv6_configuration |> sync_with_task __context queue_name; - Events_from_xenopsd.wait queue_name dbg (fst vif.Vif.id) (); - ) + let vm = Db.VIF.get_VM ~__context ~self in + let queue_name = queue_of_vm ~__context ~self:vm in + transform_xenops_exn ~__context ~vm queue_name + (fun () -> + assert_resident_on ~__context ~self:vm; + let vif = md_of_vif ~__context ~self in + info "xenops: VIF.set_ipv6_configuration %s.%s" (fst vif.Vif.id) (snd vif.Vif.id); + let dbg = Context.string_of_task __context in + let module Client = (val make_client queue_name : XENOPS) in + Client.VIF.set_ipv6_configuration dbg vif.Vif.id vif.Vif.ipv6_configuration |> sync_with_task __context queue_name; + Events_from_xenopsd.wait queue_name dbg (fst vif.Vif.id) (); + ) let task_cancel ~__context ~self = - try - let queue_name, id = TaskHelper.task_to_id_exn self |> unwrap in - let module Client = (val make_client queue_name : XENOPS) in - let dbg = Context.string_of_task __context in - info "xenops: TASK.cancel %s" id; - Client.TASK.cancel dbg id |> ignore; (* it might actually have completed, we don't care *) - true - with - | Not_found -> false - | Not_a_xenops_task -> false + try + let queue_name, id = TaskHelper.task_to_id_exn self |> unwrap in + let module Client = (val make_client queue_name : XENOPS) in + let dbg = Context.string_of_task __context in + info "xenops: TASK.cancel %s" id; + Client.TASK.cancel dbg id |> ignore; (* it might actually have completed, we don't care *) + true + with + | Not_found -> false + | Not_a_xenops_task -> false diff --git a/ocaml/xapi/xapi_xenops_queue.ml b/ocaml/xapi/xapi_xenops_queue.ml index 2cc8b0cba7f..e1088064767 100644 --- a/ocaml/xapi/xapi_xenops_queue.ml +++ b/ocaml/xapi/xapi_xenops_queue.ml @@ -20,27 +20,27 @@ open Xenops_interface module type XENOPS = module type of Xenops_client.Client let make_client queue_name = - let module Client = Xenops_interface.Client(struct - let rpc x = - if !Xcp_client.use_switch - then Xcp_client.json_switch_rpc queue_name x - else Xcp_client.http_rpc Xmlrpc.string_of_call Xmlrpc.response_of_string ~srcstr:"xapi" ~dststr:"xenops" Xenops_interface.default_uri x - end) in - (module Client: XENOPS) + let module Client = Xenops_interface.Client(struct + let rpc x = + if !Xcp_client.use_switch + then Xcp_client.json_switch_rpc queue_name x + else Xcp_client.http_rpc Xmlrpc.string_of_call Xmlrpc.response_of_string ~srcstr:"xapi" ~dststr:"xenops" Xenops_interface.default_uri x + end) in + (module Client: XENOPS) let all_known_xenopsds () = !Xapi_globs.xenopsd_queues let default_xenopsd () = !Xapi_globs.default_xenopsd let queue_of_other_config oc = - if List.mem_assoc "xenops" oc then begin - let queue_name = List.assoc "xenops" oc in - if List.mem queue_name (all_known_xenopsds ()) - then queue_name - else begin - error "Unknown xenops queue: %s, using default %s" queue_name (default_xenopsd ()); - default_xenopsd () - end - end else default_xenopsd () + if List.mem_assoc "xenops" oc then begin + let queue_name = List.assoc "xenops" oc in + if List.mem queue_name (all_known_xenopsds ()) + then queue_name + else begin + error "Unknown xenops queue: %s, using default %s" queue_name (default_xenopsd ()); + default_xenopsd () + end + end else default_xenopsd () let queue_of_vmr vm = queue_of_other_config vm.API.vM_other_config diff --git a/ocaml/xapi/xenopsMemory.ml b/ocaml/xapi/xenopsMemory.ml index d9ccbfeadc6..5e56807e461 100644 --- a/ocaml/xapi/xenopsMemory.ml +++ b/ocaml/xapi/xenopsMemory.ml @@ -36,13 +36,13 @@ let pages_per_mib = bytes_per_mib /// bytes_per_page (** Returns true if (and only if) the specified argument is a power of 2. *) let is_power_of_2 n = - (n > 0) && (n land (0 - n) = n) + (n > 0) && (n land (0 - n) = n) let round_down_to_multiple_of x y = - (x /// y) *** y + (x /// y) *** y let round_up_to_multiple_of x y = - ((x +++ y --- 1L) /// y) *** y + ((x +++ y --- 1L) /// y) *** y (* === Memory rounding functions ============================================ *) @@ -59,10 +59,10 @@ let round_pages_up_to_nearest_mib v = round_up v pages_per_mib (* === Division functions =================================================== *) let divide_rounding_down numerator denominator = - numerator /// denominator + numerator /// denominator let divide_rounding_up numerator denominator = - (numerator +++ denominator --- 1L) /// denominator + (numerator +++ denominator --- 1L) /// denominator (* === Memory unit conversion functions ===================================== *) @@ -116,49 +116,49 @@ let mib_of_pages_used value = divide_rounding_up value pages_per_mib (* === Domain memory breakdown: HVM guests ================================== *) module type MEMORY_MODEL_DATA = sig - val extra_internal_mib : int64 - val extra_external_mib : int64 + val extra_internal_mib : int64 + val extra_external_mib : int64 end module HVM_memory_model_data : MEMORY_MODEL_DATA = struct - let extra_internal_mib = 1L - let extra_external_mib = 1L + let extra_internal_mib = 1L + let extra_external_mib = 1L end module Linux_memory_model_data : MEMORY_MODEL_DATA = struct - let extra_internal_mib = 0L - let extra_external_mib = 1L + let extra_internal_mib = 0L + let extra_external_mib = 1L end module Memory_model (D : MEMORY_MODEL_DATA) = struct - let build_max_mib static_max_mib video_mib = static_max_mib --- (Int64.of_int video_mib) + let build_max_mib static_max_mib video_mib = static_max_mib --- (Int64.of_int video_mib) - let build_start_mib target_mib video_mib = target_mib --- (Int64.of_int video_mib) + let build_start_mib target_mib video_mib = target_mib --- (Int64.of_int video_mib) - let xen_max_offset_mib = D.extra_internal_mib + let xen_max_offset_mib = D.extra_internal_mib - let xen_max_mib target_mib = target_mib +++ xen_max_offset_mib + let xen_max_mib target_mib = target_mib +++ xen_max_offset_mib - let shadow_mib static_max_mib vcpu_count multiplier = - let vcpu_pages = 256L *** (Int64.of_int vcpu_count) in - let p2m_map_pages = static_max_mib in - let shadow_resident_pages = static_max_mib in - let total_mib = mib_of_pages_used - (vcpu_pages +++ p2m_map_pages +++ shadow_resident_pages) in - let total_mib_multiplied = - Int64.of_float ((Int64.to_float total_mib) *. multiplier) in - max 1L total_mib_multiplied + let shadow_mib static_max_mib vcpu_count multiplier = + let vcpu_pages = 256L *** (Int64.of_int vcpu_count) in + let p2m_map_pages = static_max_mib in + let shadow_resident_pages = static_max_mib in + let total_mib = mib_of_pages_used + (vcpu_pages +++ p2m_map_pages +++ shadow_resident_pages) in + let total_mib_multiplied = + Int64.of_float ((Int64.to_float total_mib) *. multiplier) in + max 1L total_mib_multiplied - let overhead_mib static_max_mib vcpu_count multiplier = - D.extra_internal_mib +++ - D.extra_external_mib +++ - (shadow_mib static_max_mib vcpu_count multiplier) + let overhead_mib static_max_mib vcpu_count multiplier = + D.extra_internal_mib +++ + D.extra_external_mib +++ + (shadow_mib static_max_mib vcpu_count multiplier) - let footprint_mib target_mib static_max_mib vcpu_count multiplier = - target_mib +++ (overhead_mib static_max_mib vcpu_count multiplier) + let footprint_mib target_mib static_max_mib vcpu_count multiplier = + target_mib +++ (overhead_mib static_max_mib vcpu_count multiplier) - let shadow_multiplier_default = 1.0 + let shadow_multiplier_default = 1.0 end diff --git a/ocaml/xapi/xenstore_copy.ml b/ocaml/xapi/xenstore_copy.ml index eaf78e0ca82..451966a97ab 100644 --- a/ocaml/xapi/xenstore_copy.ml +++ b/ocaml/xapi/xenstore_copy.ml @@ -16,10 +16,10 @@ open Xenstore_dump open Xenstore -let _ = +let _ = let src = ref "" and dest = ref "" in - Arg.parse + Arg.parse [ "-src", Arg.Set_string src, "source path"; "-dest", Arg.Set_string dest, "destination path" ] (fun x -> Printf.fprintf stderr "Ignoring unknown parameter: %s\n" x) diff --git a/ocaml/xapi/xenstore_dump.ml b/ocaml/xapi/xenstore_dump.ml index 607546e4e2d..342d7a6476a 100644 --- a/ocaml/xapi/xenstore_dump.ml +++ b/ocaml/xapi/xenstore_dump.ml @@ -15,7 +15,7 @@ which can be used to preserve selected parts of xenstore across suspend/resume/migrate. Permissions have to be handled separately. - *) +*) open Xstringext open Xenstore @@ -30,11 +30,11 @@ let dump ~xs (path: string) : Xml.xml = let children = List.map (fun x -> ls_R (Filename.concat prefix x) (Filename.concat path x)) files in let relative_paths = List.map (Filename.concat prefix) files in let absolute_paths = List.map (Filename.concat path) files in - let kvpairs = List.map - (fun (relative, absolute) -> relative, handle_enoent xs.Xs.read absolute) - (List.combine relative_paths absolute_paths) in + let kvpairs = List.map + (fun (relative, absolute) -> relative, handle_enoent xs.Xs.read absolute) + (List.combine relative_paths absolute_paths) in - List.concat (kvpairs :: children) + List.concat (kvpairs :: children) in let all = ls_R "" path in let list = List.map (fun (k, v) -> Xml.Element("n", [ "path", k; "value", v ], [])) all in @@ -42,15 +42,15 @@ let dump ~xs (path: string) : Xml.xml = let restore ~xs (path: string) (dump: Xml.xml) = match dump with | Xml.Element("xenstore-dump", [ "version", _ ], nodes) -> - let node = function - | Xml.Element("n", attr, _) -> - if not(List.mem_assoc "path" attr) - then failwith "expected path attribute"; - if not(List.mem_assoc "value" attr) - then failwith "expected value attribute"; - List.assoc "path" attr, List.assoc "value" attr - | _ -> failwith "expected element" in - let nodes = List.map node nodes in - xs.Xs.writev path nodes + let node = function + | Xml.Element("n", attr, _) -> + if not(List.mem_assoc "path" attr) + then failwith "expected path attribute"; + if not(List.mem_assoc "value" attr) + then failwith "expected value attribute"; + List.assoc "path" attr, List.assoc "value" attr + | _ -> failwith "expected element" in + let nodes = List.map node nodes in + xs.Xs.writev path nodes | _ -> failwith "expected element" diff --git a/ocaml/xapi/xha_errno.ml b/ocaml/xapi/xha_errno.ml index 42245b379bb..bd14b149906 100644 --- a/ocaml/xapi/xha_errno.ml +++ b/ocaml/xapi/xha_errno.ml @@ -12,92 +12,92 @@ * GNU Lesser General Public License for more details. *) (* Autogenerated by ./scripts/mtcerrno-to-ocaml.py -- do not edit *) -type code = -| Mtc_exit_success -| Mtc_exit_invalid_parameter -| Mtc_exit_system_error -| Mtc_exit_transient_system_error -| Mtc_exit_watchdog_error -| Mtc_exit_improper_license -| Mtc_exit_can_not_read_config_file -| Mtc_exit_invalid_config_file -| Mtc_exit_can_not_access_statefile -| Mtc_exit_invalid_state_file -| Mtc_exit_generation_uuid_mismatch -| Mtc_exit_invalid_pool_state -| Mtc_exit_bootjoin_timeout -| Mtc_exit_can_not_join_existing_liveset -| Mtc_exit_daemon_is_not_present -| Mtc_exit_daemon_is_present -| Mtc_exit_invalid_environment -| Mtc_exit_invalid_localhost_state -| Mtc_exit_boot_blocked_by_excluded -| Mtc_exit_set_excluded -| Mtc_exit_internal_bug +type code = + | Mtc_exit_success + | Mtc_exit_invalid_parameter + | Mtc_exit_system_error + | Mtc_exit_transient_system_error + | Mtc_exit_watchdog_error + | Mtc_exit_improper_license + | Mtc_exit_can_not_read_config_file + | Mtc_exit_invalid_config_file + | Mtc_exit_can_not_access_statefile + | Mtc_exit_invalid_state_file + | Mtc_exit_generation_uuid_mismatch + | Mtc_exit_invalid_pool_state + | Mtc_exit_bootjoin_timeout + | Mtc_exit_can_not_join_existing_liveset + | Mtc_exit_daemon_is_not_present + | Mtc_exit_daemon_is_present + | Mtc_exit_invalid_environment + | Mtc_exit_invalid_localhost_state + | Mtc_exit_boot_blocked_by_excluded + | Mtc_exit_set_excluded + | Mtc_exit_internal_bug let to_string : code -> string = function -| Mtc_exit_success -> "MTC_EXIT_SUCCESS" -| Mtc_exit_invalid_parameter -> "MTC_EXIT_INVALID_PARAMETER" -| Mtc_exit_system_error -> "MTC_EXIT_SYSTEM_ERROR" -| Mtc_exit_transient_system_error -> "MTC_EXIT_TRANSIENT_SYSTEM_ERROR" -| Mtc_exit_watchdog_error -> "MTC_EXIT_WATCHDOG_ERROR" -| Mtc_exit_improper_license -> "MTC_EXIT_IMPROPER_LICENSE" -| Mtc_exit_can_not_read_config_file -> "MTC_EXIT_CAN_NOT_READ_CONFIG_FILE" -| Mtc_exit_invalid_config_file -> "MTC_EXIT_INVALID_CONFIG_FILE" -| Mtc_exit_can_not_access_statefile -> "MTC_EXIT_CAN_NOT_ACCESS_STATEFILE" -| Mtc_exit_invalid_state_file -> "MTC_EXIT_INVALID_STATE_FILE" -| Mtc_exit_generation_uuid_mismatch -> "MTC_EXIT_GENERATION_UUID_MISMATCH" -| Mtc_exit_invalid_pool_state -> "MTC_EXIT_INVALID_POOL_STATE" -| Mtc_exit_bootjoin_timeout -> "MTC_EXIT_BOOTJOIN_TIMEOUT" -| Mtc_exit_can_not_join_existing_liveset -> "MTC_EXIT_CAN_NOT_JOIN_EXISTING_LIVESET" -| Mtc_exit_daemon_is_not_present -> "MTC_EXIT_DAEMON_IS_NOT_PRESENT" -| Mtc_exit_daemon_is_present -> "MTC_EXIT_DAEMON_IS_PRESENT" -| Mtc_exit_invalid_environment -> "MTC_EXIT_INVALID_ENVIRONMENT" -| Mtc_exit_invalid_localhost_state -> "MTC_EXIT_INVALID_LOCALHOST_STATE" -| Mtc_exit_boot_blocked_by_excluded -> "MTC_EXIT_BOOT_BLOCKED_BY_EXCLUDED" -| Mtc_exit_set_excluded -> "MTC_EXIT_SET_EXCLUDED" -| Mtc_exit_internal_bug -> "MTC_EXIT_INTERNAL_BUG" + | Mtc_exit_success -> "MTC_EXIT_SUCCESS" + | Mtc_exit_invalid_parameter -> "MTC_EXIT_INVALID_PARAMETER" + | Mtc_exit_system_error -> "MTC_EXIT_SYSTEM_ERROR" + | Mtc_exit_transient_system_error -> "MTC_EXIT_TRANSIENT_SYSTEM_ERROR" + | Mtc_exit_watchdog_error -> "MTC_EXIT_WATCHDOG_ERROR" + | Mtc_exit_improper_license -> "MTC_EXIT_IMPROPER_LICENSE" + | Mtc_exit_can_not_read_config_file -> "MTC_EXIT_CAN_NOT_READ_CONFIG_FILE" + | Mtc_exit_invalid_config_file -> "MTC_EXIT_INVALID_CONFIG_FILE" + | Mtc_exit_can_not_access_statefile -> "MTC_EXIT_CAN_NOT_ACCESS_STATEFILE" + | Mtc_exit_invalid_state_file -> "MTC_EXIT_INVALID_STATE_FILE" + | Mtc_exit_generation_uuid_mismatch -> "MTC_EXIT_GENERATION_UUID_MISMATCH" + | Mtc_exit_invalid_pool_state -> "MTC_EXIT_INVALID_POOL_STATE" + | Mtc_exit_bootjoin_timeout -> "MTC_EXIT_BOOTJOIN_TIMEOUT" + | Mtc_exit_can_not_join_existing_liveset -> "MTC_EXIT_CAN_NOT_JOIN_EXISTING_LIVESET" + | Mtc_exit_daemon_is_not_present -> "MTC_EXIT_DAEMON_IS_NOT_PRESENT" + | Mtc_exit_daemon_is_present -> "MTC_EXIT_DAEMON_IS_PRESENT" + | Mtc_exit_invalid_environment -> "MTC_EXIT_INVALID_ENVIRONMENT" + | Mtc_exit_invalid_localhost_state -> "MTC_EXIT_INVALID_LOCALHOST_STATE" + | Mtc_exit_boot_blocked_by_excluded -> "MTC_EXIT_BOOT_BLOCKED_BY_EXCLUDED" + | Mtc_exit_set_excluded -> "MTC_EXIT_SET_EXCLUDED" + | Mtc_exit_internal_bug -> "MTC_EXIT_INTERNAL_BUG" let to_description_string : code -> string = function -| Mtc_exit_success -> "" -| Mtc_exit_invalid_parameter -> "Invalid parameter" -| Mtc_exit_system_error -> "Fatal system error" -| Mtc_exit_transient_system_error -> "Transient system error" -| Mtc_exit_watchdog_error -> "Watchdog error" -| Mtc_exit_improper_license -> "Improper license" -| Mtc_exit_can_not_read_config_file -> "Config-file is inaccessible" -| Mtc_exit_invalid_config_file -> "Invalid config-file contents" -| Mtc_exit_can_not_access_statefile -> "State-File is inaccessible" -| Mtc_exit_invalid_state_file -> "Invalid State-File contents" -| Mtc_exit_generation_uuid_mismatch -> "Generation UUID mismatch" -| Mtc_exit_invalid_pool_state -> "Invalid pool state" -| Mtc_exit_bootjoin_timeout -> "Join timeout during start" -| Mtc_exit_can_not_join_existing_liveset -> "Join is not allowed" -| Mtc_exit_daemon_is_not_present -> "Daemon is not present" -| Mtc_exit_daemon_is_present -> "Daemon is (already) present" -| Mtc_exit_invalid_environment -> "Invalid operation environment" -| Mtc_exit_invalid_localhost_state -> "Invalid local host state" -| Mtc_exit_boot_blocked_by_excluded -> "Start failed" -| Mtc_exit_set_excluded -> "Exclude flag is set while the daemon is operating" -| Mtc_exit_internal_bug -> "Internal bug" + | Mtc_exit_success -> "" + | Mtc_exit_invalid_parameter -> "Invalid parameter" + | Mtc_exit_system_error -> "Fatal system error" + | Mtc_exit_transient_system_error -> "Transient system error" + | Mtc_exit_watchdog_error -> "Watchdog error" + | Mtc_exit_improper_license -> "Improper license" + | Mtc_exit_can_not_read_config_file -> "Config-file is inaccessible" + | Mtc_exit_invalid_config_file -> "Invalid config-file contents" + | Mtc_exit_can_not_access_statefile -> "State-File is inaccessible" + | Mtc_exit_invalid_state_file -> "Invalid State-File contents" + | Mtc_exit_generation_uuid_mismatch -> "Generation UUID mismatch" + | Mtc_exit_invalid_pool_state -> "Invalid pool state" + | Mtc_exit_bootjoin_timeout -> "Join timeout during start" + | Mtc_exit_can_not_join_existing_liveset -> "Join is not allowed" + | Mtc_exit_daemon_is_not_present -> "Daemon is not present" + | Mtc_exit_daemon_is_present -> "Daemon is (already) present" + | Mtc_exit_invalid_environment -> "Invalid operation environment" + | Mtc_exit_invalid_localhost_state -> "Invalid local host state" + | Mtc_exit_boot_blocked_by_excluded -> "Start failed" + | Mtc_exit_set_excluded -> "Exclude flag is set while the daemon is operating" + | Mtc_exit_internal_bug -> "Internal bug" let of_int : int -> code = function -| 0 -> Mtc_exit_success -| 1 -> Mtc_exit_invalid_parameter -| 2 -> Mtc_exit_system_error -| 3 -> Mtc_exit_transient_system_error -| 4 -> Mtc_exit_watchdog_error -| 5 -> Mtc_exit_improper_license -| 6 -> Mtc_exit_can_not_read_config_file -| 7 -> Mtc_exit_invalid_config_file -| 8 -> Mtc_exit_can_not_access_statefile -| 9 -> Mtc_exit_invalid_state_file -| 10 -> Mtc_exit_generation_uuid_mismatch -| 11 -> Mtc_exit_invalid_pool_state -| 12 -> Mtc_exit_bootjoin_timeout -| 13 -> Mtc_exit_can_not_join_existing_liveset -| 14 -> Mtc_exit_daemon_is_not_present -| 15 -> Mtc_exit_daemon_is_present -| 16 -> Mtc_exit_invalid_environment -| 17 -> Mtc_exit_invalid_localhost_state -| 18 -> Mtc_exit_boot_blocked_by_excluded -| 19 -> Mtc_exit_set_excluded -| 127 -> Mtc_exit_internal_bug -| x -> failwith (Printf.sprintf "Unrecognised MTC exit code: %d" x) + | 0 -> Mtc_exit_success + | 1 -> Mtc_exit_invalid_parameter + | 2 -> Mtc_exit_system_error + | 3 -> Mtc_exit_transient_system_error + | 4 -> Mtc_exit_watchdog_error + | 5 -> Mtc_exit_improper_license + | 6 -> Mtc_exit_can_not_read_config_file + | 7 -> Mtc_exit_invalid_config_file + | 8 -> Mtc_exit_can_not_access_statefile + | 9 -> Mtc_exit_invalid_state_file + | 10 -> Mtc_exit_generation_uuid_mismatch + | 11 -> Mtc_exit_invalid_pool_state + | 12 -> Mtc_exit_bootjoin_timeout + | 13 -> Mtc_exit_can_not_join_existing_liveset + | 14 -> Mtc_exit_daemon_is_not_present + | 15 -> Mtc_exit_daemon_is_present + | 16 -> Mtc_exit_invalid_environment + | 17 -> Mtc_exit_invalid_localhost_state + | 18 -> Mtc_exit_boot_blocked_by_excluded + | 19 -> Mtc_exit_set_excluded + | 127 -> Mtc_exit_internal_bug + | x -> failwith (Printf.sprintf "Unrecognised MTC exit code: %d" x) diff --git a/ocaml/xapi/xha_interface.ml b/ocaml/xapi/xha_interface.ml index 65d59eea447..3613a8a0ce4 100644 --- a/ocaml/xapi/xha_interface.ml +++ b/ocaml/xapi/xha_interface.ml @@ -21,40 +21,40 @@ open Listext (** Generates an XML leaf element of the form: *) (** value *) let xml_leaf_element name value = - Xml.Element ( - name, [], [Xml.PCData value] - ) + Xml.Element ( + name, [], [Xml.PCData value] + ) (** Returns true iff. the given element matches the given name. *) let xml_element_has_name name element = - match element with - | Xml.Element (name_, _, _) -> name = name_ - | _ -> false + match element with + | Xml.Element (name_, _, _) -> name = name_ + | _ -> false (** Returns a sub-list of the given element list, containing *) (** only those elements with the specified name. *) let xml_elements_with_name elements name = - List.filter (xml_element_has_name name) elements + List.filter (xml_element_has_name name) elements (** Returns the first element with the specified name from *) (** the given element list. *) let first_xml_element_with_name elements name = - try - Some (List.find (xml_element_has_name name) elements) - with - Not_found -> None + try + Some (List.find (xml_element_has_name name) elements) + with + Not_found -> None (** Parses an XML element of the form "value". *) (** Returns a (name, value) string pair, where the arguments *) (** are stripped of leading and trailing whitespace. *) let hash_table_entry_of_leaf_xml_element = function - | Xml.Element (name, _, Xml.PCData (value) :: values) -> - Some ( - String.strip String.isspace name, - String.strip String.isspace value - ) - | Xml.Element (name, _, []) -> Some (String.strip String.isspace name, "") - | _ -> None + | Xml.Element (name, _, Xml.PCData (value) :: values) -> + Some ( + String.strip String.isspace name, + String.strip String.isspace value + ) + | Xml.Element (name, _, []) -> Some (String.strip String.isspace name, "") + | _ -> None (** Parses a list of XML elements of the form: *) (** value0 *) @@ -68,176 +68,176 @@ let hash_table_entry_of_leaf_xml_element = function (** (name2 -> value2) *) (** ... *) let hash_table_of_leaf_xml_element_list list = - Hashtblext.of_list ( - List.filter_map hash_table_entry_of_leaf_xml_element list - ) + Hashtblext.of_list ( + List.filter_map hash_table_entry_of_leaf_xml_element list + ) (* === Daemon configuration === *) module DaemonConfiguration = struct - (* Taken from Marathon's spec section 4.1.4.4 *) - let filename = Filename.concat "/etc/xensource" "xhad.conf" - - module Host = struct - - type t = { - uuid : string; - address : string - } - - (** Simple type convertor. *) - let of_host_t host_t = { - uuid = host_t.host_uuid ; - address = host_t.host_address; - } - - (** Converts the given HA daemon host configuration *) - (** into an XML element tree. *) - let to_xml_element host = - Xml.Element ( - "host", [], [ - (xml_leaf_element "HostID" host.uuid ); - (xml_leaf_element "IPaddress" host.address); - ] - ) - - end - - type t = { - common_generation_uuid : string; - common_udp_port : int; - common_hosts : Host.t list; - local_host_uuid : string; - local_heart_beat_interface : string; - local_heart_beat_physical_interface : string; - local_state_file : string; - heart_beat_interval : int option; - state_file_interval : int option; - heart_beat_timeout : int option; - state_file_timeout : int option; - heart_beat_watchdog_timeout : int option; - state_file_watchdog_timeout : int option; - boot_join_timeout : int option; - enable_join_timeout : int option; - xapi_healthcheck_interval : int option; - xapi_healthcheck_timeout : int option; - xapi_restart_attempts : int option; - xapi_restart_timeout : int option; - xapi_licensecheck_timeout : int option; - } - - (** See interface. *) - let create - ?(common_udp_port = 49154) - ?heart_beat_interval - ?state_file_interval - ?heart_beat_timeout - ?state_file_timeout - ?heart_beat_watchdog_timeout - ?state_file_watchdog_timeout - ?boot_join_timeout - ?enable_join_timeout - ?xapi_healthcheck_interval - ?xapi_healthcheck_timeout - ?xapi_restart_attempts - ?xapi_restart_timeout - ?xapi_licensecheck_timeout - ~common_generation_uuid - ~local_heart_beat_interface - ~local_heart_beat_physical_interface - ~local_state_file - ~__context - () = - let records = Db.Host.get_all_records ~__context in - let common_hosts = List.map - (fun (_, host) -> Host.of_host_t host) - records in - let local_host_uuid = - Db.Host.get_uuid - ~__context ~self:!Xapi_globs.localhost_ref in - { - common_hosts = common_hosts; - common_generation_uuid = (Uuid.to_string common_generation_uuid); - common_udp_port = common_udp_port; - local_host_uuid = local_host_uuid; - local_heart_beat_interface = local_heart_beat_interface; - local_heart_beat_physical_interface = local_heart_beat_physical_interface; - local_state_file = local_state_file; - heart_beat_interval = heart_beat_interval; - state_file_interval = state_file_interval; - heart_beat_timeout = heart_beat_timeout; - state_file_timeout = state_file_timeout; - heart_beat_watchdog_timeout = heart_beat_watchdog_timeout; - state_file_watchdog_timeout = state_file_watchdog_timeout; - boot_join_timeout = boot_join_timeout; - enable_join_timeout = enable_join_timeout; - xapi_healthcheck_interval = xapi_healthcheck_interval; - xapi_healthcheck_timeout = xapi_healthcheck_timeout; - xapi_restart_attempts = xapi_restart_attempts; - xapi_restart_timeout = xapi_restart_timeout; - xapi_licensecheck_timeout = xapi_licensecheck_timeout; - } - - let int_parameter (name, param) = - Opt.default [] (Opt.map (fun x -> [ xml_leaf_element name (string_of_int x) ]) param) - - (** Converts the given HA daemon configuration *) - (** into an XML element tree. *) - let to_xml_element config = Xml.Element ( - "xhad-config", - [("version", "1.0")], - [ - Xml.Element ( - "common-config", [], - xml_leaf_element "GenerationUUID" ( config.common_generation_uuid) :: - xml_leaf_element "UDPport" (string_of_int config.common_udp_port ) :: - List.map Host.to_xml_element config.common_hosts @ - [ - Xml.Element ("parameters", [], - List.concat (List.map int_parameter - [ "HeartbeatInterval", config.heart_beat_interval; - "HeartbeatTimeout", config.heart_beat_timeout; - "StateFileInterval", config.state_file_interval; - "StateFileTimeout", config.state_file_timeout; - "HeartbeatWatchdogTimeout", config.heart_beat_watchdog_timeout; - "StateFileWatchdogTimeout", config.state_file_watchdog_timeout; - "BootJoinTimeout", config.boot_join_timeout; - "EnableJoinTimeout", config.enable_join_timeout; - "XapiHealthCheckInterval", config.xapi_healthcheck_interval; - "XapiHealthCheckTimeout", config.xapi_healthcheck_timeout; - "XapiRestartAttempts", config.xapi_restart_attempts; - "XapiRestartTimeout", config.xapi_restart_timeout; - "XapiLicenseCheckTimeout", config.xapi_licensecheck_timeout; - ]) - ) - ] - ); - Xml.Element ( - "local-config", [], - [ - Xml.Element ( - "localhost", [], - [ - xml_leaf_element "HostID" config.local_host_uuid ; - xml_leaf_element "HeartbeatInterface" config.local_heart_beat_interface; - xml_leaf_element "HeartbeatPhysicalInterface" config.local_heart_beat_physical_interface; - xml_leaf_element "StateFile" config.local_state_file ; - ] - ) - ] - ) - ] - ) - - (** Converts the given HA daemon configuration *) - (** into an XML string. *) - let to_xml_string config = - "\n" ^ ( - Xml.to_string_fmt ( - to_xml_element config - ) - ) + (* Taken from Marathon's spec section 4.1.4.4 *) + let filename = Filename.concat "/etc/xensource" "xhad.conf" + + module Host = struct + + type t = { + uuid : string; + address : string + } + + (** Simple type convertor. *) + let of_host_t host_t = { + uuid = host_t.host_uuid ; + address = host_t.host_address; + } + + (** Converts the given HA daemon host configuration *) + (** into an XML element tree. *) + let to_xml_element host = + Xml.Element ( + "host", [], [ + (xml_leaf_element "HostID" host.uuid ); + (xml_leaf_element "IPaddress" host.address); + ] + ) + + end + + type t = { + common_generation_uuid : string; + common_udp_port : int; + common_hosts : Host.t list; + local_host_uuid : string; + local_heart_beat_interface : string; + local_heart_beat_physical_interface : string; + local_state_file : string; + heart_beat_interval : int option; + state_file_interval : int option; + heart_beat_timeout : int option; + state_file_timeout : int option; + heart_beat_watchdog_timeout : int option; + state_file_watchdog_timeout : int option; + boot_join_timeout : int option; + enable_join_timeout : int option; + xapi_healthcheck_interval : int option; + xapi_healthcheck_timeout : int option; + xapi_restart_attempts : int option; + xapi_restart_timeout : int option; + xapi_licensecheck_timeout : int option; + } + + (** See interface. *) + let create + ?(common_udp_port = 49154) + ?heart_beat_interval + ?state_file_interval + ?heart_beat_timeout + ?state_file_timeout + ?heart_beat_watchdog_timeout + ?state_file_watchdog_timeout + ?boot_join_timeout + ?enable_join_timeout + ?xapi_healthcheck_interval + ?xapi_healthcheck_timeout + ?xapi_restart_attempts + ?xapi_restart_timeout + ?xapi_licensecheck_timeout + ~common_generation_uuid + ~local_heart_beat_interface + ~local_heart_beat_physical_interface + ~local_state_file + ~__context + () = + let records = Db.Host.get_all_records ~__context in + let common_hosts = List.map + (fun (_, host) -> Host.of_host_t host) + records in + let local_host_uuid = + Db.Host.get_uuid + ~__context ~self:!Xapi_globs.localhost_ref in + { + common_hosts = common_hosts; + common_generation_uuid = (Uuid.to_string common_generation_uuid); + common_udp_port = common_udp_port; + local_host_uuid = local_host_uuid; + local_heart_beat_interface = local_heart_beat_interface; + local_heart_beat_physical_interface = local_heart_beat_physical_interface; + local_state_file = local_state_file; + heart_beat_interval = heart_beat_interval; + state_file_interval = state_file_interval; + heart_beat_timeout = heart_beat_timeout; + state_file_timeout = state_file_timeout; + heart_beat_watchdog_timeout = heart_beat_watchdog_timeout; + state_file_watchdog_timeout = state_file_watchdog_timeout; + boot_join_timeout = boot_join_timeout; + enable_join_timeout = enable_join_timeout; + xapi_healthcheck_interval = xapi_healthcheck_interval; + xapi_healthcheck_timeout = xapi_healthcheck_timeout; + xapi_restart_attempts = xapi_restart_attempts; + xapi_restart_timeout = xapi_restart_timeout; + xapi_licensecheck_timeout = xapi_licensecheck_timeout; + } + + let int_parameter (name, param) = + Opt.default [] (Opt.map (fun x -> [ xml_leaf_element name (string_of_int x) ]) param) + + (** Converts the given HA daemon configuration *) + (** into an XML element tree. *) + let to_xml_element config = Xml.Element ( + "xhad-config", + [("version", "1.0")], + [ + Xml.Element ( + "common-config", [], + xml_leaf_element "GenerationUUID" ( config.common_generation_uuid) :: + xml_leaf_element "UDPport" (string_of_int config.common_udp_port ) :: + List.map Host.to_xml_element config.common_hosts @ + [ + Xml.Element ("parameters", [], + List.concat (List.map int_parameter + [ "HeartbeatInterval", config.heart_beat_interval; + "HeartbeatTimeout", config.heart_beat_timeout; + "StateFileInterval", config.state_file_interval; + "StateFileTimeout", config.state_file_timeout; + "HeartbeatWatchdogTimeout", config.heart_beat_watchdog_timeout; + "StateFileWatchdogTimeout", config.state_file_watchdog_timeout; + "BootJoinTimeout", config.boot_join_timeout; + "EnableJoinTimeout", config.enable_join_timeout; + "XapiHealthCheckInterval", config.xapi_healthcheck_interval; + "XapiHealthCheckTimeout", config.xapi_healthcheck_timeout; + "XapiRestartAttempts", config.xapi_restart_attempts; + "XapiRestartTimeout", config.xapi_restart_timeout; + "XapiLicenseCheckTimeout", config.xapi_licensecheck_timeout; + ]) + ) + ] + ); + Xml.Element ( + "local-config", [], + [ + Xml.Element ( + "localhost", [], + [ + xml_leaf_element "HostID" config.local_host_uuid ; + xml_leaf_element "HeartbeatInterface" config.local_heart_beat_interface; + xml_leaf_element "HeartbeatPhysicalInterface" config.local_heart_beat_physical_interface; + xml_leaf_element "StateFile" config.local_state_file ; + ] + ) + ] + ) + ] + ) + + (** Converts the given HA daemon configuration *) + (** into an XML string. *) + let to_xml_string config = + "\n" ^ ( + Xml.to_string_fmt ( + to_xml_element config + ) + ) end @@ -245,251 +245,251 @@ end module LiveSetInformation = struct - module Status = struct - - type t = Online | Offline | Starting - - let of_string string = - match String.lowercase string with - | "online" -> Some (Online) - | "offline" -> Some (Offline) - | "starting" -> Some Starting - | _ -> invalid_arg "Invalid status string." - - let to_string = function - | Online -> "online" - | Offline -> "offline" - | Starting -> "starting" - - end - - module Host = struct - - type t = { - id: [`host] Uuid.t; - liveness: bool; - master: bool; - state_file_access: bool; - state_file_corrupted: bool; - excluded: bool - } - - (** Creates a new host record from a host XML element. *) - (** The element must contain valid child elements for *) - (** each member of the host record type. *) - let of_xml_element = function - | Xml.Element ("host", _, children) -> - begin - let table = hash_table_of_leaf_xml_element_list children in - let find x = - try Hashtbl.find table x - with Not_found -> - invalid_arg (Printf.sprintf "Missig entry '%s' within 'host' element" x) in - let bool s = - try bool_of_string (String.lowercase s) - with Invalid_argument _ -> - invalid_arg (Printf.sprintf "Invalid boolean value '%s' within 'host' element" s) in - - let uuid = Uuid.of_string in - Some ({ - id = uuid (find "HostID" ); - liveness = bool (find "liveness" ); - master = bool (find "master" ); - state_file_access = bool (find "statefile_access" ); - state_file_corrupted = bool (find "statefile_corrupted"); - excluded = bool (find "excluded" ) - }) - end - | _ -> - None - - end - - module HostRawData = struct - type t = { - id: [`host] Uuid.t; - time_since_last_update_on_statefile: int; - time_since_last_heartbeat: int; - time_since_xapi_restart_first_attempted: int; - heartbeat_active_list_on_heartbeat: [`host] Uuid.t list; - heartbeat_active_list_on_statefile: [`host] Uuid.t list; - (* ... *) - } - let of_xml_element = function - | Xml.Element("host_raw_data", _, children) -> - let table = hash_table_of_leaf_xml_element_list children in - let find x = - try Hashtbl.find table x - with Not_found -> - invalid_arg (Printf.sprintf "Missing entry '%s' within 'host_raw_data' element" x) in - let int s = - try int_of_string (String.lowercase s) - with Invalid_argument _ -> - invalid_arg (Printf.sprintf "Invalid integer value '%s' within 'host_raw_data' element" s) in - let uuid = Uuid.of_string in - let set f x = List.map f (String.split_f String.isspace x) in - Some ({ - id = uuid (find "HostID"); - time_since_last_update_on_statefile = int (find "time_since_last_update_on_statefile" ); - time_since_last_heartbeat = int (find "time_since_last_heartbeat" ); - time_since_xapi_restart_first_attempted = int (find "time_since_xapi_restart_first_attempted"); - heartbeat_active_list_on_heartbeat = set uuid (find "heartbeat_active_list_on_heartbeat"); - heartbeat_active_list_on_statefile = set uuid (find "heartbeat_active_list_on_statefile"); - }) - | _ -> None - - end - - module Warning = struct - type t = { - statefile_lost: bool; - heartbeat_approaching_timeout: bool; - statefile_approaching_timeout: bool; - xapi_healthcheck_approaching_timeout: bool; - network_bonding_error: bool; - } - let of_xml_element = function - | Xml.Element("warning_on_local_host", _, children) -> - begin - let table = hash_table_of_leaf_xml_element_list children in - let find x = - try Hashtbl.find table x - with Not_found -> - invalid_arg (Printf.sprintf "Missing entry '%s' within 'warning_on_local_host' element" x) in - let bool x = find x = "TRUE" in - Some({ - statefile_lost = bool "statefile_lost"; - heartbeat_approaching_timeout = bool "heartbeat_approaching_timeout"; - statefile_approaching_timeout = bool "statefile_approaching_timeout"; - xapi_healthcheck_approaching_timeout = bool "Xapi_healthcheck_approaching_timeout"; - network_bonding_error = bool "network_bonding_error"; - }) - end - | _ -> - None - end - - module RawStatus = struct - type t = { - statefile_latency: int; - statefile_min: int; - statefile_max: int; - heartbeat_latency: int; - heartbeat_min: int; - heartbeat_max: int; - xapi_healthcheck_latency: int; - xapi_healthcheck_min: int; - xapi_healthcheck_max: int; - host_raw_data: ([`host] Uuid.t, HostRawData.t) Hashtbl.t; - } - let of_xml_element = function - | Xml.Element("raw_status_on_local_host", _, children) -> - begin - let table = hash_table_of_leaf_xml_element_list children in - let find x = - try Hashtbl.find table x - with Not_found -> - invalid_arg (Printf.sprintf "Missing entry '%s' within 'raw_status_on_local_host' element" x) in - let int s = - try int_of_string (String.lowercase s) - with Invalid_argument _ -> - invalid_arg (Printf.sprintf "Invalid integer value '%s' within 'raw_status_on_local_host' element" s) in - let host_raw_data = Hashtblext.of_list ( - List.map - (fun host -> (host.HostRawData.id, host)) - (List.filter_map HostRawData.of_xml_element children) - ) in - - Some({ - statefile_latency = int (find "statefile_latency" ); - statefile_min = int (find "statefile_latency_min" ); - statefile_max = int (find "statefile_latency_max" ); - heartbeat_latency = int (find "heartbeat_latency" ); - heartbeat_min = int (find "heartbeat_latency_min" ); - heartbeat_max = int (find "heartbeat_latency_max" ); - xapi_healthcheck_latency = int (find "Xapi_healthcheck_latency" ); - xapi_healthcheck_min = int (find "Xapi_healthcheck_latency_min"); - xapi_healthcheck_max = int (find "Xapi_healthcheck_latency_max"); - host_raw_data = host_raw_data; - }) - end - | _ -> - None - end - - - - type t = { - status: Status.t; - local_host_id: [`host] Uuid.t; - hosts: ([`host] Uuid.t, Host.t) Hashtbl.t; - raw_status_on_local_host: RawStatus.t option; - warning_on_local_host: Warning.t option; - } - - (** Creates a new HA live set information record *) - (** from the given list of XML elements. *) - let of_xml_element_list elements = { - hosts = Hashtblext.of_list ( - List.map - (fun host -> (host.Host.id, host)) - (List.filter_map Host.of_xml_element elements) - ); - local_host_id = ( - match first_xml_element_with_name elements "localhost" with - | Some Xml.Element - (_, _ , [Xml.Element ("HostID", _, [Xml.PCData (local_host_id)])]) -> - Uuid.of_string local_host_id - | _ -> - invalid_arg "Invalid or missing 'localhost' element." - ); - status = ( - let status_option = - match first_xml_element_with_name elements "status" with - | Some Xml.Element (_, _, [Xml.PCData (status_string)]) -> - Status.of_string status_string - | _ -> - None in - match status_option with - | Some (status) -> status - | _ -> invalid_arg "Invalid or missing 'status' element." - ); - raw_status_on_local_host = ( - match first_xml_element_with_name elements "raw_status_on_local_host" with - | Some x -> RawStatus.of_xml_element x - | None -> None - ); - warning_on_local_host = ( - match first_xml_element_with_name elements "warning_on_local_host" with - | Some x -> Warning.of_xml_element x - | None -> None - ); - } - - (** Creates a new HA live set information record *) - (** from the given root XML element. *) - let of_xml_element = function - | Xml.Element ("ha_liveset_info", _, children) -> - of_xml_element_list children - | _ -> - invalid_arg "Invalid or missing 'ha_liveset_info' element." - - (** See interface. *) - let of_xml_string string = - of_xml_element (Xml.parse_string string) - - (** See interface. *) - let to_summary_string t = - let status = Status.to_string t.status in - let host h = Printf.sprintf "%s [%s%s%s%s%s%s]" - (Uuid.string_of_uuid h.Host.id) - (if h.Host.id = t.local_host_id then "*" else " ") - (if h.Host.liveness then "L" else " ") - (if h.Host.master then "M" else " ") - (if h.Host.excluded then "X" else " ") - (if h.Host.state_file_access then "A" else " ") - (if h.Host.state_file_corrupted then "X" else " ") in - status ^ " " ^ (Hashtbl.fold (fun _ h acc -> host h ^ "; " ^ acc) t.hosts "") + module Status = struct + + type t = Online | Offline | Starting + + let of_string string = + match String.lowercase string with + | "online" -> Some (Online) + | "offline" -> Some (Offline) + | "starting" -> Some Starting + | _ -> invalid_arg "Invalid status string." + + let to_string = function + | Online -> "online" + | Offline -> "offline" + | Starting -> "starting" + + end + + module Host = struct + + type t = { + id: [`host] Uuid.t; + liveness: bool; + master: bool; + state_file_access: bool; + state_file_corrupted: bool; + excluded: bool + } + + (** Creates a new host record from a host XML element. *) + (** The element must contain valid child elements for *) + (** each member of the host record type. *) + let of_xml_element = function + | Xml.Element ("host", _, children) -> + begin + let table = hash_table_of_leaf_xml_element_list children in + let find x = + try Hashtbl.find table x + with Not_found -> + invalid_arg (Printf.sprintf "Missig entry '%s' within 'host' element" x) in + let bool s = + try bool_of_string (String.lowercase s) + with Invalid_argument _ -> + invalid_arg (Printf.sprintf "Invalid boolean value '%s' within 'host' element" s) in + + let uuid = Uuid.of_string in + Some ({ + id = uuid (find "HostID" ); + liveness = bool (find "liveness" ); + master = bool (find "master" ); + state_file_access = bool (find "statefile_access" ); + state_file_corrupted = bool (find "statefile_corrupted"); + excluded = bool (find "excluded" ) + }) + end + | _ -> + None + + end + + module HostRawData = struct + type t = { + id: [`host] Uuid.t; + time_since_last_update_on_statefile: int; + time_since_last_heartbeat: int; + time_since_xapi_restart_first_attempted: int; + heartbeat_active_list_on_heartbeat: [`host] Uuid.t list; + heartbeat_active_list_on_statefile: [`host] Uuid.t list; + (* ... *) + } + let of_xml_element = function + | Xml.Element("host_raw_data", _, children) -> + let table = hash_table_of_leaf_xml_element_list children in + let find x = + try Hashtbl.find table x + with Not_found -> + invalid_arg (Printf.sprintf "Missing entry '%s' within 'host_raw_data' element" x) in + let int s = + try int_of_string (String.lowercase s) + with Invalid_argument _ -> + invalid_arg (Printf.sprintf "Invalid integer value '%s' within 'host_raw_data' element" s) in + let uuid = Uuid.of_string in + let set f x = List.map f (String.split_f String.isspace x) in + Some ({ + id = uuid (find "HostID"); + time_since_last_update_on_statefile = int (find "time_since_last_update_on_statefile" ); + time_since_last_heartbeat = int (find "time_since_last_heartbeat" ); + time_since_xapi_restart_first_attempted = int (find "time_since_xapi_restart_first_attempted"); + heartbeat_active_list_on_heartbeat = set uuid (find "heartbeat_active_list_on_heartbeat"); + heartbeat_active_list_on_statefile = set uuid (find "heartbeat_active_list_on_statefile"); + }) + | _ -> None + + end + + module Warning = struct + type t = { + statefile_lost: bool; + heartbeat_approaching_timeout: bool; + statefile_approaching_timeout: bool; + xapi_healthcheck_approaching_timeout: bool; + network_bonding_error: bool; + } + let of_xml_element = function + | Xml.Element("warning_on_local_host", _, children) -> + begin + let table = hash_table_of_leaf_xml_element_list children in + let find x = + try Hashtbl.find table x + with Not_found -> + invalid_arg (Printf.sprintf "Missing entry '%s' within 'warning_on_local_host' element" x) in + let bool x = find x = "TRUE" in + Some({ + statefile_lost = bool "statefile_lost"; + heartbeat_approaching_timeout = bool "heartbeat_approaching_timeout"; + statefile_approaching_timeout = bool "statefile_approaching_timeout"; + xapi_healthcheck_approaching_timeout = bool "Xapi_healthcheck_approaching_timeout"; + network_bonding_error = bool "network_bonding_error"; + }) + end + | _ -> + None + end + + module RawStatus = struct + type t = { + statefile_latency: int; + statefile_min: int; + statefile_max: int; + heartbeat_latency: int; + heartbeat_min: int; + heartbeat_max: int; + xapi_healthcheck_latency: int; + xapi_healthcheck_min: int; + xapi_healthcheck_max: int; + host_raw_data: ([`host] Uuid.t, HostRawData.t) Hashtbl.t; + } + let of_xml_element = function + | Xml.Element("raw_status_on_local_host", _, children) -> + begin + let table = hash_table_of_leaf_xml_element_list children in + let find x = + try Hashtbl.find table x + with Not_found -> + invalid_arg (Printf.sprintf "Missing entry '%s' within 'raw_status_on_local_host' element" x) in + let int s = + try int_of_string (String.lowercase s) + with Invalid_argument _ -> + invalid_arg (Printf.sprintf "Invalid integer value '%s' within 'raw_status_on_local_host' element" s) in + let host_raw_data = Hashtblext.of_list ( + List.map + (fun host -> (host.HostRawData.id, host)) + (List.filter_map HostRawData.of_xml_element children) + ) in + + Some({ + statefile_latency = int (find "statefile_latency" ); + statefile_min = int (find "statefile_latency_min" ); + statefile_max = int (find "statefile_latency_max" ); + heartbeat_latency = int (find "heartbeat_latency" ); + heartbeat_min = int (find "heartbeat_latency_min" ); + heartbeat_max = int (find "heartbeat_latency_max" ); + xapi_healthcheck_latency = int (find "Xapi_healthcheck_latency" ); + xapi_healthcheck_min = int (find "Xapi_healthcheck_latency_min"); + xapi_healthcheck_max = int (find "Xapi_healthcheck_latency_max"); + host_raw_data = host_raw_data; + }) + end + | _ -> + None + end + + + + type t = { + status: Status.t; + local_host_id: [`host] Uuid.t; + hosts: ([`host] Uuid.t, Host.t) Hashtbl.t; + raw_status_on_local_host: RawStatus.t option; + warning_on_local_host: Warning.t option; + } + + (** Creates a new HA live set information record *) + (** from the given list of XML elements. *) + let of_xml_element_list elements = { + hosts = Hashtblext.of_list ( + List.map + (fun host -> (host.Host.id, host)) + (List.filter_map Host.of_xml_element elements) + ); + local_host_id = ( + match first_xml_element_with_name elements "localhost" with + | Some Xml.Element + (_, _ , [Xml.Element ("HostID", _, [Xml.PCData (local_host_id)])]) -> + Uuid.of_string local_host_id + | _ -> + invalid_arg "Invalid or missing 'localhost' element." + ); + status = ( + let status_option = + match first_xml_element_with_name elements "status" with + | Some Xml.Element (_, _, [Xml.PCData (status_string)]) -> + Status.of_string status_string + | _ -> + None in + match status_option with + | Some (status) -> status + | _ -> invalid_arg "Invalid or missing 'status' element." + ); + raw_status_on_local_host = ( + match first_xml_element_with_name elements "raw_status_on_local_host" with + | Some x -> RawStatus.of_xml_element x + | None -> None + ); + warning_on_local_host = ( + match first_xml_element_with_name elements "warning_on_local_host" with + | Some x -> Warning.of_xml_element x + | None -> None + ); + } + + (** Creates a new HA live set information record *) + (** from the given root XML element. *) + let of_xml_element = function + | Xml.Element ("ha_liveset_info", _, children) -> + of_xml_element_list children + | _ -> + invalid_arg "Invalid or missing 'ha_liveset_info' element." + + (** See interface. *) + let of_xml_string string = + of_xml_element (Xml.parse_string string) + + (** See interface. *) + let to_summary_string t = + let status = Status.to_string t.status in + let host h = Printf.sprintf "%s [%s%s%s%s%s%s]" + (Uuid.string_of_uuid h.Host.id) + (if h.Host.id = t.local_host_id then "*" else " ") + (if h.Host.liveness then "L" else " ") + (if h.Host.master then "M" else " ") + (if h.Host.excluded then "X" else " ") + (if h.Host.state_file_access then "A" else " ") + (if h.Host.state_file_corrupted then "X" else " ") in + status ^ " " ^ (Hashtbl.fold (fun _ h acc -> host h ^ "; " ^ acc) t.hosts "") end @@ -542,7 +542,7 @@ module DaemonConfigurationTest = struct end let ($) a b = b a - + let _ = { common_hosts = HostTest.mock_hosts ; @@ -591,7 +591,7 @@ module LiveSetInformationTest = struct "}" let ($) f a = a f - + let _ = if Array.length Sys.argv != 2 then print_endline "usage: xha_interface " diff --git a/ocaml/xapi/xha_interface.mli b/ocaml/xapi/xha_interface.mli index 4003240c436..e7b88c3db76 100644 --- a/ocaml/xapi/xha_interface.mli +++ b/ocaml/xapi/xha_interface.mli @@ -11,148 +11,148 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(** +(** * @group High Availability (HA) - *) - +*) + module DaemonConfiguration : sig - module Host : sig - - type t = { - uuid : string; - address : string - } - - end - - type t = { - common_generation_uuid : string; - common_udp_port : int; - common_hosts : Host.t list; - local_host_uuid : string; - local_heart_beat_interface : string; - local_heart_beat_physical_interface : string; - local_state_file : string; - (* if None we defer to xHA built-in default *) - heart_beat_interval : int option; - state_file_interval : int option; - heart_beat_timeout : int option; - state_file_timeout : int option; - heart_beat_watchdog_timeout : int option; - state_file_watchdog_timeout : int option; - boot_join_timeout : int option; - enable_join_timeout : int option; - xapi_healthcheck_interval : int option; - xapi_healthcheck_timeout : int option; - xapi_restart_attempts : int option; - xapi_restart_timeout : int option; - xapi_licensecheck_timeout : int option; - } - - val create : - ?common_udp_port : int -> - ?heart_beat_interval : int -> - ?state_file_interval : int -> - ?heart_beat_timeout : int -> - ?state_file_timeout : int -> - ?heart_beat_watchdog_timeout : int -> - ?state_file_watchdog_timeout : int -> - ?boot_join_timeout : int -> - ?enable_join_timeout : int -> - ?xapi_healthcheck_interval : int -> - ?xapi_healthcheck_timeout : int -> - ?xapi_restart_attempts : int -> - ?xapi_restart_timeout : int -> - ?xapi_licensecheck_timeout : int -> - common_generation_uuid : [`generation] Uuid.t -> - local_heart_beat_interface : string -> - local_heart_beat_physical_interface : string -> - local_state_file : string -> - __context : Context.t -> - unit -> t - - val to_xml_string : t -> string - - (** Path of the xha configuration file in domain 0 *) - val filename : string + module Host : sig + + type t = { + uuid : string; + address : string + } + + end + + type t = { + common_generation_uuid : string; + common_udp_port : int; + common_hosts : Host.t list; + local_host_uuid : string; + local_heart_beat_interface : string; + local_heart_beat_physical_interface : string; + local_state_file : string; + (* if None we defer to xHA built-in default *) + heart_beat_interval : int option; + state_file_interval : int option; + heart_beat_timeout : int option; + state_file_timeout : int option; + heart_beat_watchdog_timeout : int option; + state_file_watchdog_timeout : int option; + boot_join_timeout : int option; + enable_join_timeout : int option; + xapi_healthcheck_interval : int option; + xapi_healthcheck_timeout : int option; + xapi_restart_attempts : int option; + xapi_restart_timeout : int option; + xapi_licensecheck_timeout : int option; + } + + val create : + ?common_udp_port : int -> + ?heart_beat_interval : int -> + ?state_file_interval : int -> + ?heart_beat_timeout : int -> + ?state_file_timeout : int -> + ?heart_beat_watchdog_timeout : int -> + ?state_file_watchdog_timeout : int -> + ?boot_join_timeout : int -> + ?enable_join_timeout : int -> + ?xapi_healthcheck_interval : int -> + ?xapi_healthcheck_timeout : int -> + ?xapi_restart_attempts : int -> + ?xapi_restart_timeout : int -> + ?xapi_licensecheck_timeout : int -> + common_generation_uuid : [`generation] Uuid.t -> + local_heart_beat_interface : string -> + local_heart_beat_physical_interface : string -> + local_state_file : string -> + __context : Context.t -> + unit -> t + + val to_xml_string : t -> string + + (** Path of the xha configuration file in domain 0 *) + val filename : string end module LiveSetInformation : sig - module Status : sig - - type t = Online | Offline | Starting - - end - - module Host : sig - - type t = { - id: [`host] Uuid.t; - liveness: bool; - master: bool; - state_file_access: bool; - state_file_corrupted: bool; - excluded: bool - } - - - end - - module HostRawData : sig - type t = { - id: [`host] Uuid.t; - time_since_last_update_on_statefile: int; - time_since_last_heartbeat: int; - time_since_xapi_restart_first_attempted: int; - heartbeat_active_list_on_heartbeat: [`host] Uuid.t list; - heartbeat_active_list_on_statefile: [`host] Uuid.t list; - (* ... *) - } - end - - module RawStatus : sig - - type t = { - statefile_latency: int; - statefile_min: int; - statefile_max: int; - heartbeat_latency: int; - heartbeat_min: int; - heartbeat_max: int; - xapi_healthcheck_latency: int; - xapi_healthcheck_min: int; - xapi_healthcheck_max: int; - host_raw_data: ([`host] Uuid.t, HostRawData.t) Hashtbl.t; - } - end - - module Warning : sig - type t = { - statefile_lost: bool; - heartbeat_approaching_timeout: bool; - statefile_approaching_timeout: bool; - xapi_healthcheck_approaching_timeout: bool; - network_bonding_error: bool; - } - end - - type t = { - status: Status.t; - local_host_id: [`host] Uuid.t; - hosts: ([`host] Uuid.t, Host.t) Hashtbl.t; - raw_status_on_local_host: RawStatus.t option; - warning_on_local_host: Warning.t option; - } - - (** Creates a new HA live set information record from the *) - (** given XML document string. Raises Invalid_argument if *) - (** the given string is either invalid or incomplete. *) - val of_xml_string : string -> t - - (** Creates a compact one-line summary suitable for debug logging *) - val to_summary_string : t -> string + module Status : sig + + type t = Online | Offline | Starting + + end + + module Host : sig + + type t = { + id: [`host] Uuid.t; + liveness: bool; + master: bool; + state_file_access: bool; + state_file_corrupted: bool; + excluded: bool + } + + + end + + module HostRawData : sig + type t = { + id: [`host] Uuid.t; + time_since_last_update_on_statefile: int; + time_since_last_heartbeat: int; + time_since_xapi_restart_first_attempted: int; + heartbeat_active_list_on_heartbeat: [`host] Uuid.t list; + heartbeat_active_list_on_statefile: [`host] Uuid.t list; + (* ... *) + } + end + + module RawStatus : sig + + type t = { + statefile_latency: int; + statefile_min: int; + statefile_max: int; + heartbeat_latency: int; + heartbeat_min: int; + heartbeat_max: int; + xapi_healthcheck_latency: int; + xapi_healthcheck_min: int; + xapi_healthcheck_max: int; + host_raw_data: ([`host] Uuid.t, HostRawData.t) Hashtbl.t; + } + end + + module Warning : sig + type t = { + statefile_lost: bool; + heartbeat_approaching_timeout: bool; + statefile_approaching_timeout: bool; + xapi_healthcheck_approaching_timeout: bool; + network_bonding_error: bool; + } + end + + type t = { + status: Status.t; + local_host_id: [`host] Uuid.t; + hosts: ([`host] Uuid.t, Host.t) Hashtbl.t; + raw_status_on_local_host: RawStatus.t option; + warning_on_local_host: Warning.t option; + } + + (** Creates a new HA live set information record from the *) + (** given XML document string. Raises Invalid_argument if *) + (** the given string is either invalid or incomplete. *) + val of_xml_string : string -> t + + (** Creates a compact one-line summary suitable for debug logging *) + val to_summary_string : t -> string end diff --git a/ocaml/xapi/xha_metadata_vdi.ml b/ocaml/xapi/xha_metadata_vdi.ml index b382b79ee56..8cdfa9f486e 100644 --- a/ocaml/xapi/xha_metadata_vdi.ml +++ b/ocaml/xapi/xha_metadata_vdi.ml @@ -11,9 +11,9 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(** Manage the lifecycle of HA metadata VDI +(** Manage the lifecycle of HA metadata VDI * @group High Availability (HA) - *) +*) module D = Debug.Make(struct let name="xapi" end) open D @@ -23,51 +23,51 @@ open Stdext open Listext open Xstringext -let create ~__context ~sr = +let create ~__context ~sr = Helpers.call_api_functions ~__context (fun rpc session_id -> Client.VDI.create ~rpc ~session_id - ~name_label:"Metadata for HA" - ~name_description:"Used for master failover" + ~name_label:"Metadata for HA" + ~name_description:"Used for master failover" ~sR:sr ~virtual_size:Redo_log.minimum_vdi_size ~_type:`redo_log ~sharable:true ~read_only:false ~other_config:[] ~xenstore_data:[] ~sm_config:Redo_log.redo_log_sm_config ~tags:[] ) (** Return a reference to a valid metadata VDI in the given SR. This function prefers to reuse existing VDIs to avoid leaking the VDI when HA is disabled without statefile access. *) -let find_or_create ~__context ~sr = +let find_or_create ~__context ~sr = match - List.filter - (fun self -> true - && (Db.VDI.get_type ~__context ~self = `redo_log) - && (Db.VDI.get_virtual_size ~__context ~self >= Redo_log.minimum_vdi_size)) - (Db.SR.get_VDIs ~__context ~self:sr) with - | x :: _ -> - info "re-using existing metadata VDI: %s" (Db.VDI.get_uuid ~__context ~self:x); - x - | [] -> - info "no suitable existing metadata VDI found; creating a fresh one"; - create ~__context ~sr + List.filter + (fun self -> true + && (Db.VDI.get_type ~__context ~self = `redo_log) + && (Db.VDI.get_virtual_size ~__context ~self >= Redo_log.minimum_vdi_size)) + (Db.SR.get_VDIs ~__context ~self:sr) with + | x :: _ -> + info "re-using existing metadata VDI: %s" (Db.VDI.get_uuid ~__context ~self:x); + x + | [] -> + info "no suitable existing metadata VDI found; creating a fresh one"; + create ~__context ~sr -let list_existing () = - List.filter (fun x -> x.Static_vdis.reason = Xapi_globs.ha_metadata_vdi_reason) (Static_vdis.list ()) +let list_existing () = + List.filter (fun x -> x.Static_vdis.reason = Xapi_globs.ha_metadata_vdi_reason) (Static_vdis.list ()) (** Detach all statefiles attached with reason, to clear stale state *) -let detach_existing ~__context = +let detach_existing ~__context = let vdis = list_existing() in List.iter (fun x -> Static_vdis.permanent_vdi_detach_by_uuid ~__context ~uuid:x.Static_vdis.uuid) vdis (** Added for CA-48539 *) let deactivate_and_detach_existing ~__context = - let vdi_uuids = List.map (fun vdi -> vdi.Static_vdis.uuid) (list_existing ()) in - List.iter (fun vdi_uuid -> Static_vdis.permanent_vdi_deactivate_by_uuid ~__context ~uuid:vdi_uuid) vdi_uuids ; - List.iter (fun vdi_uuid -> Static_vdis.permanent_vdi_detach_by_uuid ~__context ~uuid:vdi_uuid) vdi_uuids + let vdi_uuids = List.map (fun vdi -> vdi.Static_vdis.uuid) (list_existing ()) in + List.iter (fun vdi_uuid -> Static_vdis.permanent_vdi_deactivate_by_uuid ~__context ~uuid:vdi_uuid) vdi_uuids ; + List.iter (fun vdi_uuid -> Static_vdis.permanent_vdi_detach_by_uuid ~__context ~uuid:vdi_uuid) vdi_uuids open Pervasiveext (** Attempt to flush the database to the metadata VDI *) -let flush_database ~__context log = +let flush_database ~__context log = try Redo_log.flush_db_to_redo_log (Db_ref.get_database (Db_backend.make ())) log with _ -> false diff --git a/ocaml/xapi/xha_scripts.ml b/ocaml/xapi/xha_scripts.ml index 812a3d483aa..9216a62bf78 100644 --- a/ocaml/xapi/xha_scripts.ml +++ b/ocaml/xapi/xha_scripts.ml @@ -16,8 +16,8 @@ module D = Debug.Make(struct let name="xapi_ha" end) open D let ha_dir () = - let stack = Localdb.get Constants.ha_cluster_stack in - Filename.concat !Xapi_globs.cluster_stack_root stack + let stack = Localdb.get Constants.ha_cluster_stack in + Filename.concat !Xapi_globs.cluster_stack_root stack let ha_set_pool_state = "ha_set_pool_state" let ha_start_daemon = "ha_start_daemon" @@ -33,12 +33,12 @@ let ha_supported_srs = "ha_supported_srs" * If the file does not exist, then we assume that there are no constraints * for backwards compatibility. *) let get_supported_srs cluster_stack = - let fname = Filename.concat !Xapi_globs.cluster_stack_root cluster_stack in - try - let open Stdext.Xstringext.String in - Some (Stdext.Unixext.string_of_file fname |> strip isspace |> split_f isspace) - with _ -> - None + let fname = Filename.concat !Xapi_globs.cluster_stack_root cluster_stack in + try + let open Stdext.Xstringext.String in + Some (Stdext.Unixext.string_of_file fname |> strip isspace |> split_f isspace) + with _ -> + None (** The xHA scripts throw these exceptions: *) exception Xha_error of Xha_errno.code @@ -47,14 +47,14 @@ exception Xha_error of Xha_errno.code let ha_script_m = Mutex.create () let call_script ?log_successful_output script args = - let path = ha_dir () in - let script' = Filename.concat path script in - let env = [| (Printf.sprintf "PATH=%s:%s" (Sys.getenv "PATH") path) |] in - try - Stdext.Threadext.Mutex.execute ha_script_m - (fun () -> Helpers.call_script ?log_successful_output ~env script' args) - with Forkhelpers.Spawn_internal_error(stderr, stdout, Unix.WEXITED n) -> - let code = Xha_errno.of_int n in - warn "%s %s returned %s (%s)" script' (String.concat " " args) - (Xha_errno.to_string code) (Xha_errno.to_description_string code); - raise (Xha_error code) + let path = ha_dir () in + let script' = Filename.concat path script in + let env = [| (Printf.sprintf "PATH=%s:%s" (Sys.getenv "PATH") path) |] in + try + Stdext.Threadext.Mutex.execute ha_script_m + (fun () -> Helpers.call_script ?log_successful_output ~env script' args) + with Forkhelpers.Spawn_internal_error(stderr, stdout, Unix.WEXITED n) -> + let code = Xha_errno.of_int n in + warn "%s %s returned %s (%s)" script' (String.concat " " args) + (Xha_errno.to_string code) (Xha_errno.to_description_string code); + raise (Xha_error code) diff --git a/ocaml/xapi/xha_statefile.ml b/ocaml/xapi/xha_statefile.ml index fb264edc532..289514fb0c3 100644 --- a/ocaml/xapi/xha_statefile.ml +++ b/ocaml/xapi/xha_statefile.ml @@ -13,7 +13,7 @@ *) (** Manage the lifecycle of HA statefiles * @group High Availability (HA) - *) +*) module D = Debug.Make(struct let name="xapi" end) open D @@ -32,62 +32,62 @@ open Xstringext (** Return the minimum size of an HA statefile, as of XenServer HA state-file description vsn 1.3 *) let minimum_size = - let ( ** ) = Int64.mul - and ( ++ ) = Int64.add in + let ( ** ) = Int64.mul + and ( ++ ) = Int64.add in - let global_section_size = 4096L - and host_section_size = 4096L + let global_section_size = 4096L + and host_section_size = 4096L and maximum_number_of_hosts = 64L in - global_section_size ++ maximum_number_of_hosts ** host_section_size + global_section_size ++ maximum_number_of_hosts ** host_section_size let assert_sr_can_host_statefile ~__context ~sr ~cluster_stack = - (* Check that each host has a PBD to this SR *) - let pbds = Db.SR.get_PBDs ~__context ~self:sr in - let connected_hosts = List.setify (List.map (fun self -> Db.PBD.get_host ~__context ~self) pbds) in - let all_hosts = Db.Host.get_all ~__context in - if List.length connected_hosts < (List.length all_hosts) then begin - error "Cannot place statefile in SR %s: some hosts lack a PBD: [ %s ]" - (Ref.string_of sr) - (String.concat "; " (List.map Ref.string_of (List.set_difference all_hosts connected_hosts))); - raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of sr ])) - end; - (* Check that each PBD is plugged in *) - List.iter (fun self -> - if not(Db.PBD.get_currently_attached ~__context ~self) then begin - error "Cannot place statefile in SR %s: PBD %s is not plugged" - (Ref.string_of sr) (Ref.string_of self); - (* Same exception is used in this case (see Helpers.assert_pbd_is_plugged) *) - raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of sr ])) - end) pbds; - (* Check cluster stack constraints *) - Cluster_stack_constraints.assert_sr_compatible ~__context ~cluster_stack ~sr; - (* Check the exported capabilities of the SR's SM plugin *) - let srtype = Db.SR.get_type ~__context ~self:sr in - let open Db_filter_types in - match Db.SM.get_internal_records_where ~__context ~expr:(Eq (Field "type", Literal srtype)) with - | [] -> - (* This should never happen because the PBDs are plugged in *) - raise (Api_errors.Server_error(Api_errors.internal_error, [ "SR does not have corresponding SM record"; Ref.string_of sr; srtype ])) - | (_, sm) :: _ -> - if not (List.mem_assoc "VDI_GENERATE_CONFIG" sm.Db_actions.sM_features) - && not (List.mem_assoc "VDI_ATTACH_OFFLINE" sm.Db_actions.sM_features) - then raise (Api_errors.Server_error (Api_errors.sr_operation_not_supported, [Ref.string_of sr])) + (* Check that each host has a PBD to this SR *) + let pbds = Db.SR.get_PBDs ~__context ~self:sr in + let connected_hosts = List.setify (List.map (fun self -> Db.PBD.get_host ~__context ~self) pbds) in + let all_hosts = Db.Host.get_all ~__context in + if List.length connected_hosts < (List.length all_hosts) then begin + error "Cannot place statefile in SR %s: some hosts lack a PBD: [ %s ]" + (Ref.string_of sr) + (String.concat "; " (List.map Ref.string_of (List.set_difference all_hosts connected_hosts))); + raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of sr ])) + end; + (* Check that each PBD is plugged in *) + List.iter (fun self -> + if not(Db.PBD.get_currently_attached ~__context ~self) then begin + error "Cannot place statefile in SR %s: PBD %s is not plugged" + (Ref.string_of sr) (Ref.string_of self); + (* Same exception is used in this case (see Helpers.assert_pbd_is_plugged) *) + raise (Api_errors.Server_error(Api_errors.sr_no_pbds, [ Ref.string_of sr ])) + end) pbds; + (* Check cluster stack constraints *) + Cluster_stack_constraints.assert_sr_compatible ~__context ~cluster_stack ~sr; + (* Check the exported capabilities of the SR's SM plugin *) + let srtype = Db.SR.get_type ~__context ~self:sr in + let open Db_filter_types in + match Db.SM.get_internal_records_where ~__context ~expr:(Eq (Field "type", Literal srtype)) with + | [] -> + (* This should never happen because the PBDs are plugged in *) + raise (Api_errors.Server_error(Api_errors.internal_error, [ "SR does not have corresponding SM record"; Ref.string_of sr; srtype ])) + | (_, sm) :: _ -> + if not (List.mem_assoc "VDI_GENERATE_CONFIG" sm.Db_actions.sM_features) + && not (List.mem_assoc "VDI_ATTACH_OFFLINE" sm.Db_actions.sM_features) + then raise (Api_errors.Server_error (Api_errors.sr_operation_not_supported, [Ref.string_of sr])) let list_srs_which_can_host_statefile ~__context ~cluster_stack = - List.filter (fun sr -> try assert_sr_can_host_statefile ~__context ~sr ~cluster_stack; true - with _ -> false) (Db.SR.get_all ~__context) + List.filter (fun sr -> try assert_sr_can_host_statefile ~__context ~sr ~cluster_stack; true + with _ -> false) (Db.SR.get_all ~__context) let create ~__context ~sr ~cluster_stack = - assert_sr_can_host_statefile ~__context ~sr ~cluster_stack; - let size = minimum_size in - Helpers.call_api_functions ~__context - (fun rpc session_id -> - Client.VDI.create ~rpc ~session_id - ~name_label:"Statefile for HA" - ~name_description:"Used for storage heartbeating" - ~sR:sr ~virtual_size:size ~_type:`ha_statefile - ~sharable:true ~read_only:false ~other_config:[] ~xenstore_data:[] ~sm_config:statefile_sm_config ~tags:[] - ) + assert_sr_can_host_statefile ~__context ~sr ~cluster_stack; + let size = minimum_size in + Helpers.call_api_functions ~__context + (fun rpc session_id -> + Client.VDI.create ~rpc ~session_id + ~name_label:"Statefile for HA" + ~name_description:"Used for storage heartbeating" + ~sR:sr ~virtual_size:size ~_type:`ha_statefile + ~sharable:true ~read_only:false ~other_config:[] ~xenstore_data:[] ~sm_config:statefile_sm_config ~tags:[] + ) (** Return a reference to a valid statefile VDI in the given SR. This function prefers to reuse existing VDIs to avoid confusing the heartbeat component: @@ -95,32 +95,32 @@ let create ~__context ~sr ~cluster_stack = when using LVM-based SRs the VDI could be deleted on the master but the slaves would still have access to stale data. *) let find_or_create ~__context ~sr ~cluster_stack = - assert_sr_can_host_statefile ~__context ~sr ~cluster_stack; - let size = minimum_size in - match - List.filter - (fun self -> true - && (Db.VDI.get_type ~__context ~self = `ha_statefile) - && (Db.VDI.get_virtual_size ~__context ~self >= size)) - (Db.SR.get_VDIs ~__context ~self:sr) with - | x :: _ -> - info "re-using existing statefile: %s" (Db.VDI.get_uuid ~__context ~self:x); - x - | [] -> - info "no suitable existing statefile found; creating a fresh one"; - create ~__context ~sr ~cluster_stack + assert_sr_can_host_statefile ~__context ~sr ~cluster_stack; + let size = minimum_size in + match + List.filter + (fun self -> true + && (Db.VDI.get_type ~__context ~self = `ha_statefile) + && (Db.VDI.get_virtual_size ~__context ~self >= size)) + (Db.SR.get_VDIs ~__context ~self:sr) with + | x :: _ -> + info "re-using existing statefile: %s" (Db.VDI.get_uuid ~__context ~self:x); + x + | [] -> + info "no suitable existing statefile found; creating a fresh one"; + create ~__context ~sr ~cluster_stack let list_existing_statefiles () = - List.filter (fun x -> x.Static_vdis.reason = reason) (Static_vdis.list ()) + List.filter (fun x -> x.Static_vdis.reason = reason) (Static_vdis.list ()) (** Detach all statefiles attached with reason 'HA statefile', to clear stale state *) let detach_existing_statefiles ~__context = - let statefile_uuids = List.map (fun vdi -> vdi.Static_vdis.uuid) (list_existing_statefiles ()) in - List.iter (fun uuid -> Static_vdis.permanent_vdi_detach_by_uuid ~__context ~uuid) statefile_uuids + let statefile_uuids = List.map (fun vdi -> vdi.Static_vdis.uuid) (list_existing_statefiles ()) in + List.iter (fun uuid -> Static_vdis.permanent_vdi_detach_by_uuid ~__context ~uuid) statefile_uuids (** Added for CA-48539. Deactivate and detach all statefiles attached - with reason 'HA statefile', to clear stale state *) + with reason 'HA statefile', to clear stale state *) let deactivate_and_detach_existing_statefiles ~__context = - let statefile_uuids = List.map (fun vdi -> vdi.Static_vdis.uuid) (list_existing_statefiles ()) in - List.iter (fun uuid -> Static_vdis.permanent_vdi_deactivate_by_uuid ~__context ~uuid) statefile_uuids ; - List.iter (fun uuid -> Static_vdis.permanent_vdi_detach_by_uuid ~__context ~uuid) statefile_uuids + let statefile_uuids = List.map (fun vdi -> vdi.Static_vdis.uuid) (list_existing_statefiles ()) in + List.iter (fun uuid -> Static_vdis.permanent_vdi_deactivate_by_uuid ~__context ~uuid) statefile_uuids ; + List.iter (fun uuid -> Static_vdis.permanent_vdi_detach_by_uuid ~__context ~uuid) statefile_uuids diff --git a/ocaml/xapi/xmlrpc_sexpr.ml b/ocaml/xapi/xmlrpc_sexpr.ml index 8f446feada3..43db1283c53 100644 --- a/ocaml/xapi/xmlrpc_sexpr.ml +++ b/ocaml/xapi/xmlrpc_sexpr.ml @@ -29,57 +29,57 @@ open Stdext.Xstringext with contents (tag child1 child2 ... childn) where: - tag is an SExpr.String - child is an SExpr.t (String or Node) - exception: - - 'member' tags are not in sexpr because they + exception: + - 'member' tags are not in sexpr because they are basically redundant information inside struct children. - security notes: + security notes: 1. there is no verification that the incoming xml-rpc tree conforms to the xml-rpc specification. an incorrect xml-rpc tree might result in an unexpected sexpr mapping. therefore, this function should not be used to process unsanitized/untrusted xml-rpc trees. *) -let xmlrpc_to_sexpr (root:xml) = - - let rec visit (h:int) (xml_lt:xml list) = match (h, xml_lt) with - | h, [] -> [] - | h, (PCData text)::_ -> - let text = String.strip String.isspace text in - SExpr.String text::[] - - (* empty s have default value '' *) - | h,((Element ("value", _, []))::siblings) -> - (SExpr.String "")::(visit (h) siblings) - - (* ,, tags: ignore them and go to children *) - | h,((Element ("data", _, children))::siblings) - | h,((Element ("value", _, children))::siblings) - | h,((Element ("name", _, children))::siblings) -> - (visit (h+1) children)@(visit (h) siblings) - - (* tags *) - | h,((Element ("member", _, children))::siblings) -> - let (mychildren:SExpr.t list) = visit (h+1) children in - let anode = (SExpr.Node (mychildren)) in - let (mysiblings:SExpr.t list) = visit (h) siblings in - if (List.length mychildren) = 2 then (*name & value?*) - begin match (List.nth mychildren 0) with - |(SExpr.String name) -> (*is name a string?*) - anode::mysiblings (*then add member anode*) - |_ -> - mysiblings (*ignore incorrect member*) - end - else mysiblings (*ignore incorrect member*) - - (* any other element *) - | h,((Element (tag, _, children))::siblings) -> - let tag = String.strip String.isspace tag in - let mytag = (SExpr.String tag) in - let (mychildren:SExpr.t list) = visit (h+1) children in - let anode = (SExpr.Node (mytag::mychildren)) in - let (mysiblings:SExpr.t list) = visit (h) siblings in - anode::mysiblings - in - List.hd (visit 0 (root::[])) +let xmlrpc_to_sexpr (root:xml) = + + let rec visit (h:int) (xml_lt:xml list) = match (h, xml_lt) with + | h, [] -> [] + | h, (PCData text)::_ -> + let text = String.strip String.isspace text in + SExpr.String text::[] + + (* empty s have default value '' *) + | h,((Element ("value", _, []))::siblings) -> + (SExpr.String "")::(visit (h) siblings) + + (* ,, tags: ignore them and go to children *) + | h,((Element ("data", _, children))::siblings) + | h,((Element ("value", _, children))::siblings) + | h,((Element ("name", _, children))::siblings) -> + (visit (h+1) children)@(visit (h) siblings) + + (* tags *) + | h,((Element ("member", _, children))::siblings) -> + let (mychildren:SExpr.t list) = visit (h+1) children in + let anode = (SExpr.Node (mychildren)) in + let (mysiblings:SExpr.t list) = visit (h) siblings in + if (List.length mychildren) = 2 then (*name & value?*) + begin match (List.nth mychildren 0) with + |(SExpr.String name) -> (*is name a string?*) + anode::mysiblings (*then add member anode*) + |_ -> + mysiblings (*ignore incorrect member*) + end + else mysiblings (*ignore incorrect member*) + + (* any other element *) + | h,((Element (tag, _, children))::siblings) -> + let tag = String.strip String.isspace tag in + let mytag = (SExpr.String tag) in + let (mychildren:SExpr.t list) = visit (h+1) children in + let anode = (SExpr.Node (mytag::mychildren)) in + let (mysiblings:SExpr.t list) = visit (h) siblings in + anode::mysiblings + in + List.hd (visit 0 (root::[])) (** Accepts a tree of s-expressions of type SExpr.t with contents (tag child1 child2 ... childn) @@ -89,76 +89,76 @@ let xmlrpc_to_sexpr (root:xml) = with contents [child1] [child2] ... [childn] where: - tag is an xml tag. - child is an xml tag or a pcdata. - exception: - - 'member' tags are not in sexpr because they + exception: + - 'member' tags are not in sexpr because they are redundant information inside struct children. - security notes: + security notes: 1. there is no verification that the incoming sexpr trees conforms to the output of xmlrpc_to_sexpr. an incorrect sexpr tree might result in an unexpected xml-rpc mapping. therefore, this function should not be used to process unsanitized/untrusted sexpr trees. *) -let sexpr_to_xmlrpc (root:SExpr.t) = - - let encase_with (container:string) (el:xml) = - (Element (container,[],el::[])) - in - let is_not_empty_tag (el:xml) = match (el) with - | (Element ("",_,_)) -> false - | _ -> true - in - let rec visit (h:int) (parent:SExpr.t) (sexpr:SExpr.t) = match (h, parent, sexpr) with - - (* sexpr representing a struct with member tags *) - | h, (SExpr.Node (SExpr.String "struct"::_)), (SExpr.Node (SExpr.String name:: avalue ::_))-> - begin match (avalue) with - |SExpr.String "" -> - (Element ("member",[],Element ("name",[],PCData name::[])::Element ("value",[],[])::[])) - |SExpr.String value -> - (Element ("member",[],Element ("name",[],PCData name::[])::Element ("value",[],PCData value::[])::[])) - |SExpr.Node _ as somenode -> - (Element ("member",[],Element ("name",[],PCData name::[])::Element ("value",[],(visit (h+1) (SExpr.String "member") (somenode))::[])::[])) - |_ -> (Element ("WRONG_SEXPR_MEMBER",[],[])) - end - - (* member tag without values - wrong format - defaults to empty value *) - | h, (SExpr.Node (SExpr.String "struct"::_)), (SExpr.Node (SExpr.String name:: []))-> - (Element ("member",[],Element ("name",[],PCData name::[])::Element ("value",[],[])::[])) - - (* sexpr representing array tags *) - | h, _, (SExpr.Node (SExpr.String "array"::values)) -> - let xmlvalues = (List.map (visit (h+1) sexpr) values) in - (Element ("array",[],Element ("data",[],List.map (encase_with "value") xmlvalues)::[])) - - (* sexpr representing any other tag with children *) - | h, _, (SExpr.Node (SExpr.String tag::atail)) -> - let xmlvalues = (List.map (visit (h+1) sexpr) atail) in - let xml_noemptytags = List.filter (is_not_empty_tag) xmlvalues in - (Element (tag, [], xml_noemptytags)) - - (* sexpr representing a pcdata *) - | h, _, (SExpr.String s) -> - (PCData s) - - (* sexpr representing a nameless tag *) - | h, _, (SExpr.Node []) -> - (Element ("EMPTY_SEXPR",[],[])) - - (* otherwise, we reached a senseless sexpr *) - | _ -> (Element ("WRONG_SEXPR",[],[])) - in - (encase_with "value" (visit 0 (SExpr.Node []) root)) +let sexpr_to_xmlrpc (root:SExpr.t) = + + let encase_with (container:string) (el:xml) = + (Element (container,[],el::[])) + in + let is_not_empty_tag (el:xml) = match (el) with + | (Element ("",_,_)) -> false + | _ -> true + in + let rec visit (h:int) (parent:SExpr.t) (sexpr:SExpr.t) = match (h, parent, sexpr) with + + (* sexpr representing a struct with member tags *) + | h, (SExpr.Node (SExpr.String "struct"::_)), (SExpr.Node (SExpr.String name:: avalue ::_))-> + begin match (avalue) with + |SExpr.String "" -> + (Element ("member",[],Element ("name",[],PCData name::[])::Element ("value",[],[])::[])) + |SExpr.String value -> + (Element ("member",[],Element ("name",[],PCData name::[])::Element ("value",[],PCData value::[])::[])) + |SExpr.Node _ as somenode -> + (Element ("member",[],Element ("name",[],PCData name::[])::Element ("value",[],(visit (h+1) (SExpr.String "member") (somenode))::[])::[])) + |_ -> (Element ("WRONG_SEXPR_MEMBER",[],[])) + end + + (* member tag without values - wrong format - defaults to empty value *) + | h, (SExpr.Node (SExpr.String "struct"::_)), (SExpr.Node (SExpr.String name:: []))-> + (Element ("member",[],Element ("name",[],PCData name::[])::Element ("value",[],[])::[])) + + (* sexpr representing array tags *) + | h, _, (SExpr.Node (SExpr.String "array"::values)) -> + let xmlvalues = (List.map (visit (h+1) sexpr) values) in + (Element ("array",[],Element ("data",[],List.map (encase_with "value") xmlvalues)::[])) + + (* sexpr representing any other tag with children *) + | h, _, (SExpr.Node (SExpr.String tag::atail)) -> + let xmlvalues = (List.map (visit (h+1) sexpr) atail) in + let xml_noemptytags = List.filter (is_not_empty_tag) xmlvalues in + (Element (tag, [], xml_noemptytags)) + + (* sexpr representing a pcdata *) + | h, _, (SExpr.String s) -> + (PCData s) + + (* sexpr representing a nameless tag *) + | h, _, (SExpr.Node []) -> + (Element ("EMPTY_SEXPR",[],[])) + + (* otherwise, we reached a senseless sexpr *) + | _ -> (Element ("WRONG_SEXPR",[],[])) + in + (encase_with "value" (visit 0 (SExpr.Node []) root)) (** helper function that maps between sexpr strings and xml-rpc trees *) let sexpr_str_to_xmlrpc (sexpr_str:string) = - let sroot1 = SExpr_TS.of_string sexpr_str in - let xroot1 = sexpr_to_xmlrpc sroot1 in - match xroot1 with - | Element("value", [], [Element("WRONG_SEXPR",[],[])]) -> None - | _ -> Some xroot1 + let sroot1 = SExpr_TS.of_string sexpr_str in + let xroot1 = sexpr_to_xmlrpc sroot1 in + match xroot1 with + | Element("value", [], [Element("WRONG_SEXPR",[],[])]) -> None + | _ -> Some xroot1 (** helper function that maps between xml-rpc trees and sexpr strings *) let xmlrpc_to_sexpr_str xml= - let sroot1 = xmlrpc_to_sexpr xml in - SExpr.string_of sroot1 + let sroot1 = xmlrpc_to_sexpr xml in + SExpr.string_of sroot1 diff --git a/ocaml/xe-cli/newcli.ml b/ocaml/xe-cli/newcli.ml index d9aa185f3d8..faf0475c32c 100644 --- a/ocaml/xe-cli/newcli.ml +++ b/ocaml/xe-cli/newcli.ml @@ -56,8 +56,8 @@ let debug fmt = exception Usage let usage () = - error "Usage: %s [-s server] [-p port] ([-u username] [-pw password] or [-pwf ]) \n" Sys.argv.(0); - error "\nA full list of commands can be obtained by running \n\t%s help -s -p \n" Sys.argv.(0) + error "Usage: %s [-s server] [-p port] ([-u username] [-pw password] or [-pwf ]) \n" Sys.argv.(0); + error "\nA full list of commands can be obtained by running \n\t%s help -s -p \n" Sys.argv.(0) let is_localhost ip = ip = "127.0.0.1" @@ -183,17 +183,17 @@ let parse_args = let rec process_args = function | [] -> [] | args -> - match parse_opt args with - | Some(k, v, rest) -> - if set_keyword(k, v) then process_args rest else process_eql args - | None -> - process_eql args + match parse_opt args with + | Some(k, v, rest) -> + if set_keyword(k, v) then process_args rest else process_eql args + | None -> + process_eql args and process_eql = function | [] -> [] | arg :: args -> - match parse_eql arg with - | Some(k, v) when set_keyword(k,v) -> process_args args - | _ -> arg :: process_args args in + match parse_eql arg with + | Some(k, v) when set_keyword(k,v) -> process_args args + | _ -> arg :: process_args args in fun args -> let rcs = Options.read_rc() in @@ -206,12 +206,12 @@ let parse_args = while !pos < String.length extra_args do if extra_args.[!pos] = ',' then (incr pos; i := !pos) else - if !i >= String.length extra_args - || extra_args.[!i] = ',' && extra_args.[!i-1] <> '\\' then - (let seg = String.sub extra_args !pos (!i - !pos) in - l := String.filter_chars seg ((<>) '\\') :: !l; - incr i; pos := !i) - else incr i + if !i >= String.length extra_args + || extra_args.[!i] = ',' && extra_args.[!i-1] <> '\\' then + (let seg = String.sub extra_args !pos (!i - !pos) in + l := String.filter_chars seg ((<>) '\\') :: !l; + incr i; pos := !i) + else incr i done; List.rev !l in let extras_rest = process_args extras in @@ -237,15 +237,15 @@ let open_tcp_ssl server = (match !ciphersuites with None -> "" | Some c -> " with ciphersuites "^c); Stunnel.set_legacy_protocol_and_ciphersuites_allowed !allow_ssl_legacy; (match !ciphersuites with - | None -> () - | Some c -> (* Use only the specified ones, none of Stunnel's built-in defaults. *) - Stunnel.set_good_ciphersuites c; - Stunnel.set_legacy_ciphersuites "" + | None -> () + | Some c -> (* Use only the specified ones, none of Stunnel's built-in defaults. *) + Stunnel.set_good_ciphersuites c; + Stunnel.set_legacy_ciphersuites "" ); (* We don't bother closing fds since this requires our close_and_exec wrapper *) let x = Stunnel.connect ~use_fork_exec_helper:false - ~write_to_log:(fun x -> debug "stunnel: %s\n%!" x) - ~extended_diagnosis:(!debug_file <> None) server port in + ~write_to_log:(fun x -> debug "stunnel: %s\n%!" x) + ~extended_diagnosis:(!debug_file <> None) server port in if !stunnel_process = None then stunnel_process := Some x; Unix.in_channel_of_descr x.Stunnel.fd, Unix.out_channel_of_descr x.Stunnel.fd @@ -272,23 +272,23 @@ let http_response_code x = match String.split ' ' x with | _ -> failwith "Bad response from HTTP server" let copy_with_heartbeat ?(block=65536) in_ch out_ch heartbeat_fun = - let buf = String.make block '\000' in - let last_heartbeat = ref (Unix.time()) in - let finish = ref false in - while not !finish do - let bytes = input in_ch buf 0 block in - if bytes <> 0 then - output out_ch buf 0 bytes - else begin - flush out_ch; - finish := true - end; - let now = Unix.time () in - if now -. !last_heartbeat >= heartbeat_interval then begin - heartbeat_fun (); - last_heartbeat := now - end - done + let buf = String.make block '\000' in + let last_heartbeat = ref (Unix.time()) in + let finish = ref false in + while not !finish do + let bytes = input in_ch buf 0 block in + if bytes <> 0 then + output out_ch buf 0 bytes + else begin + flush out_ch; + finish := true + end; + let now = Unix.time () in + if now -. !last_heartbeat >= heartbeat_interval then begin + heartbeat_fun (); + last_heartbeat := now + end + done exception Http_failure exception Connect_failure @@ -299,29 +299,29 @@ exception Unexpected_msg of message exception Server_internal_error let handle_unmarshal_failure ex ifd = match ex with - | Unmarshal_failure (e, s) -> - let s = s ^ Stdext.Unixext.try_read_string ifd in - debug "Read: %s\n" s; - if String.length s >= 4 && String.uppercase (String.sub s 0 4) = "HTTP" - then raise Server_internal_error - else raise e - | e -> raise e + | Unmarshal_failure (e, s) -> + let s = s ^ Stdext.Unixext.try_read_string ifd in + debug "Read: %s\n" s; + if String.length s >= 4 && String.uppercase (String.sub s 0 4) = "HTTP" + then raise Server_internal_error + else raise e + | e -> raise e let main_loop ifd ofd = (* Intially exchange version information *) let major', minor' = - try unmarshal_protocol ifd with - | Unmarshal_failure (_, "") -> raise Connect_failure - | e -> handle_unmarshal_failure e ifd in + try unmarshal_protocol ifd with + | Unmarshal_failure (_, "") -> raise Connect_failure + | e -> handle_unmarshal_failure e ifd in let msg = Printf.sprintf "Server has protocol version %d.%d. Client has %d.%d" major' minor' major minor in debug "%s\n%!" msg; if major' <> major then raise (Protocol_version_mismatch msg); let with_heartbeat = - major' * 10 + minor' >= int_of_float (heartbeat_version *. 10.) in + major' * 10 + minor' >= int_of_float (heartbeat_version *. 10.) in let heartbeat_fun = - if with_heartbeat then (fun () -> marshal ofd (Response Wait)) - else ignore in + if with_heartbeat then (fun () -> marshal ofd (Response Wait)) + else ignore in marshal_protocol ofd; let exit_code = ref None in @@ -330,351 +330,351 @@ let main_loop ifd ofd = of Stunnel every now and then, for better debug/dignosis. *) while (match Unix.select [ifd] [] [] 5.0 with - | _ :: _, _, _ -> false - | _ -> - match !stunnel_process with - | Some { Stunnel.pid = Stunnel.FEFork pid } -> begin - match Forkhelpers.waitpid_nohang pid with - | 0, _ -> true - | i, e -> raise (Stunnel_exit (i, e)) - end - | Some {Stunnel.pid = Stunnel.StdFork pid} -> begin - match Unix.waitpid [Unix.WNOHANG] pid with - | 0, _ -> true - | i, e -> raise (Stunnel_exit (i, e)) - end - | _ -> true) do () + | _ :: _, _, _ -> false + | _ -> + match !stunnel_process with + | Some { Stunnel.pid = Stunnel.FEFork pid } -> begin + match Forkhelpers.waitpid_nohang pid with + | 0, _ -> true + | i, e -> raise (Stunnel_exit (i, e)) + end + | Some {Stunnel.pid = Stunnel.StdFork pid} -> begin + match Unix.waitpid [Unix.WNOHANG] pid with + | 0, _ -> true + | i, e -> raise (Stunnel_exit (i, e)) + end + | _ -> true) do () done; - let cmd = - try unmarshal ifd - with e -> handle_unmarshal_failure e ifd in + let cmd = + try unmarshal ifd + with e -> handle_unmarshal_failure e ifd in debug "Read: %s\n%!" (string_of_message cmd); flush stderr; match cmd with | Command (Print x) -> print_endline x; flush stdout | Command (PrintStderr x) -> Printf.fprintf stderr "%s%!" x | Command (Debug x) -> debug "debug from server: %s\n%!" x | Command (Load x) -> - begin - try - let fd = Unix.openfile x [ Unix.O_RDONLY ] 0 in - marshal ofd (Response OK); - let length = (Unix.stat x).Unix.st_size in - marshal ofd (Blob (Chunk (Int32.of_int length))); - let buffer = String.make (1024 * 1024 * 10) '\000' in - let left = ref length in - while !left > 0 do - let n = Unix.read fd buffer 0 (min (String.length buffer) !left) in - Stdext.Unixext.really_write ofd buffer 0 n; - left := !left - n - done; - marshal ofd (Blob End); - Unix.close fd - with - | e -> marshal ofd (Response Failed) - end + begin + try + let fd = Unix.openfile x [ Unix.O_RDONLY ] 0 in + marshal ofd (Response OK); + let length = (Unix.stat x).Unix.st_size in + marshal ofd (Blob (Chunk (Int32.of_int length))); + let buffer = String.make (1024 * 1024 * 10) '\000' in + let left = ref length in + while !left > 0 do + let n = Unix.read fd buffer 0 (min (String.length buffer) !left) in + Stdext.Unixext.really_write ofd buffer 0 n; + left := !left - n + done; + marshal ofd (Blob End); + Unix.close fd + with + | e -> marshal ofd (Response Failed) + end | Command (HttpConnect(url)) -> - let server, path = parse_url url in - (* The releatively complex design here helps to buffer input/output - when the underlying connection temporarily breaks, hence provides - seemingly continous connection. *) - let block = 65536 in - let buf_local = String.make block '\000' in - let buf_local_end = ref 0 in - let buf_local_start = ref 0 in - let buf_remote = String.make block '\000' in - let buf_remote_end = ref 0 in - let buf_remote_start = ref 0 in - let final = ref false in - let tc_save = ref None in - let connection ic oc = - let fd = Unix.descr_of_out_channel oc in - Printf.fprintf oc "CONNECT %s HTTP/1.0\r\ncontent-length: 0\r\n\r\n" path; - flush oc; - let resultline = input_line ic in - let _ = read_rest_of_headers ic in - (* Get the result header immediately *) - begin match http_response_code resultline with - | 200 -> - if !tc_save = None then begin - (* Remember the current terminal state so we can restore it *) - let tc = Unix.tcgetattr Unix.stdin in - (* Switch into a raw mode, passing through stuff like Control + C *) - let tc' = { - tc with - Unix.c_ignbrk = false; - Unix.c_brkint = false; - Unix.c_parmrk = false; - Unix.c_istrip = false; - Unix.c_inlcr = false; - Unix.c_igncr = false; - Unix.c_icrnl = false; - Unix.c_ixon = false; - Unix.c_opost = false; - Unix.c_echo = false; - Unix.c_echonl = false; - Unix.c_icanon = false; - Unix.c_isig = false; - (* IEXTEN? *) - Unix.c_csize = 8; - Unix.c_parenb = false; - Unix.c_vmin = 0; - Unix.c_vtime = 0; - } in - Unix.tcsetattr Unix.stdin Unix.TCSANOW tc'; - tc_save := Some tc - end; - let finished = ref false in - let last_heartbeat = ref (Unix.time ()) in - while not !finished do - if !buf_local_start <> !buf_local_end then begin - let b = Unix.write Unix.stdout buf_local - !buf_local_start (!buf_local_end - !buf_local_start) - in - buf_local_start := !buf_local_start + b; - if !buf_local_start = !buf_local_end then - (buf_local_start := 0; buf_local_end := 0) - end - else if !buf_remote_start <> !buf_remote_end then begin - let b = Unix.write fd buf_remote !buf_remote_start - (!buf_remote_end - !buf_remote_start) in - buf_remote_start := !buf_remote_start + b; - if !buf_remote_start = !buf_remote_end then - (buf_remote_start := 0; buf_remote_end := 0) - end - else if !final then finished := true - else begin - let r, _, _ = - Unix.select [Unix.stdin; fd] [] [] heartbeat_interval in - let now = Unix.time () in - if now -. !last_heartbeat >= heartbeat_interval then begin - heartbeat_fun (); - last_heartbeat := now - end; - if List.mem Unix.stdin r then begin - let b = Unix.read Unix.stdin buf_remote - !buf_remote_end (block - !buf_remote_end) in - let i = ref !buf_remote_end in - while !i < !buf_remote_end + b && Char.code buf_remote.[!i] <> 0x1d do incr i; done; - if !i < !buf_remote_end + b then final := true; - buf_remote_end := !i - end; - if List.mem fd r then begin - let b = Unix.read fd buf_local - !buf_local_end (block - !buf_local_end) in - buf_local_end := !buf_local_end + b - end - end - done; - marshal ofd (Response OK) - | 404 -> - Printf.fprintf stderr "Server replied with HTTP 404: the console is not available\n"; - marshal ofd (Response Failed) - | x -> - Printf.fprintf stderr "Server said: %s" resultline; - marshal ofd (Response Failed) - end in - let delay = ref 0.1 in - let rec keep_connection () = - try - let ic, oc = open_tcp server in - delay := 0.1; - Stdext.Pervasiveext.finally - (fun () -> connection ic oc) - (fun () -> try close_in ic with _ -> ()) - with - | Unix.Unix_error (_, _, _) - when !delay <= long_connection_retry_timeout -> - ignore (Unix.select [] [] [] !delay); - delay := !delay *. 2.; - keep_connection () - | e -> - prerr_endline (Printexc.to_string e); - marshal ofd (Response Failed) in - keep_connection (); - (match !tc_save with - | Some tc -> - Unix.tcsetattr Unix.stdin Unix.TCSANOW tc; - print_endline "\r" - | None -> ()) + let server, path = parse_url url in + (* The releatively complex design here helps to buffer input/output + when the underlying connection temporarily breaks, hence provides + seemingly continous connection. *) + let block = 65536 in + let buf_local = String.make block '\000' in + let buf_local_end = ref 0 in + let buf_local_start = ref 0 in + let buf_remote = String.make block '\000' in + let buf_remote_end = ref 0 in + let buf_remote_start = ref 0 in + let final = ref false in + let tc_save = ref None in + let connection ic oc = + let fd = Unix.descr_of_out_channel oc in + Printf.fprintf oc "CONNECT %s HTTP/1.0\r\ncontent-length: 0\r\n\r\n" path; + flush oc; + let resultline = input_line ic in + let _ = read_rest_of_headers ic in + (* Get the result header immediately *) + begin match http_response_code resultline with + | 200 -> + if !tc_save = None then begin + (* Remember the current terminal state so we can restore it *) + let tc = Unix.tcgetattr Unix.stdin in + (* Switch into a raw mode, passing through stuff like Control + C *) + let tc' = { + tc with + Unix.c_ignbrk = false; + Unix.c_brkint = false; + Unix.c_parmrk = false; + Unix.c_istrip = false; + Unix.c_inlcr = false; + Unix.c_igncr = false; + Unix.c_icrnl = false; + Unix.c_ixon = false; + Unix.c_opost = false; + Unix.c_echo = false; + Unix.c_echonl = false; + Unix.c_icanon = false; + Unix.c_isig = false; + (* IEXTEN? *) + Unix.c_csize = 8; + Unix.c_parenb = false; + Unix.c_vmin = 0; + Unix.c_vtime = 0; + } in + Unix.tcsetattr Unix.stdin Unix.TCSANOW tc'; + tc_save := Some tc + end; + let finished = ref false in + let last_heartbeat = ref (Unix.time ()) in + while not !finished do + if !buf_local_start <> !buf_local_end then begin + let b = Unix.write Unix.stdout buf_local + !buf_local_start (!buf_local_end - !buf_local_start) + in + buf_local_start := !buf_local_start + b; + if !buf_local_start = !buf_local_end then + (buf_local_start := 0; buf_local_end := 0) + end + else if !buf_remote_start <> !buf_remote_end then begin + let b = Unix.write fd buf_remote !buf_remote_start + (!buf_remote_end - !buf_remote_start) in + buf_remote_start := !buf_remote_start + b; + if !buf_remote_start = !buf_remote_end then + (buf_remote_start := 0; buf_remote_end := 0) + end + else if !final then finished := true + else begin + let r, _, _ = + Unix.select [Unix.stdin; fd] [] [] heartbeat_interval in + let now = Unix.time () in + if now -. !last_heartbeat >= heartbeat_interval then begin + heartbeat_fun (); + last_heartbeat := now + end; + if List.mem Unix.stdin r then begin + let b = Unix.read Unix.stdin buf_remote + !buf_remote_end (block - !buf_remote_end) in + let i = ref !buf_remote_end in + while !i < !buf_remote_end + b && Char.code buf_remote.[!i] <> 0x1d do incr i; done; + if !i < !buf_remote_end + b then final := true; + buf_remote_end := !i + end; + if List.mem fd r then begin + let b = Unix.read fd buf_local + !buf_local_end (block - !buf_local_end) in + buf_local_end := !buf_local_end + b + end + end + done; + marshal ofd (Response OK) + | 404 -> + Printf.fprintf stderr "Server replied with HTTP 404: the console is not available\n"; + marshal ofd (Response Failed) + | x -> + Printf.fprintf stderr "Server said: %s" resultline; + marshal ofd (Response Failed) + end in + let delay = ref 0.1 in + let rec keep_connection () = + try + let ic, oc = open_tcp server in + delay := 0.1; + Stdext.Pervasiveext.finally + (fun () -> connection ic oc) + (fun () -> try close_in ic with _ -> ()) + with + | Unix.Unix_error (_, _, _) + when !delay <= long_connection_retry_timeout -> + ignore (Unix.select [] [] [] !delay); + delay := !delay *. 2.; + keep_connection () + | e -> + prerr_endline (Printexc.to_string e); + marshal ofd (Response Failed) in + keep_connection (); + (match !tc_save with + | Some tc -> + Unix.tcsetattr Unix.stdin Unix.TCSANOW tc; + print_endline "\r" + | None -> ()) | Command (HttpPut(filename, url)) -> - begin - try - let rec doit url = - let (server,path) = parse_url url in - if not (Sys.file_exists filename) then - raise (ClientSideError (Printf.sprintf "file '%s' does not exist" filename)); - (* If we can tell the file size then supply a content-length header-- - this will make the progress bar work. If we can't tell the file size - (e.g. because it's a pipe) then we provide no header and rely on EOF - to signal the upload is complete. *) - let content_length = - let stats = Unix.LargeFile.stat filename in - if stats.Unix.LargeFile.st_kind = Unix.S_REG - then Printf.sprintf "\r\nContent-length: %Ld" stats.Unix.LargeFile.st_size - else "" in - let file_ch = open_in_bin filename in - let ic, oc = open_tcp server in - debug "PUTting to path [%s]\n%!" path; - Printf.fprintf oc "PUT %s HTTP/1.0%s\r\n\r\n" path content_length; - flush oc; - let resultline = input_line ic in - let headers = read_rest_of_headers ic in - (* Get the result header immediately *) - match http_response_code resultline with - | 200 -> - Stdext.Pervasiveext.finally - (fun () -> - copy_with_heartbeat file_ch oc heartbeat_fun; - marshal ofd (Response OK)) - (fun () -> - (try close_in ic with _ -> ()); - (try close_in file_ch with _ -> ())) - | 302 -> - let newloc = List.assoc "location" headers in - (try close_in ic with _ -> ()); (* Nb. Unix.close_connection only requires the in_channel *) - doit newloc - | _ -> failwith "Unhandled response code" - in - doit url - with - | ClientSideError msg -> - marshal ofd (Response Failed); - Printf.fprintf stderr "Operation failed. Error: %s\n" msg; - exit_code := Some 1 - | e -> - debug "HttpPut failure: %s\n%!" (Printexc.to_string e); - (* Assume the server will figure out what's wrong and tell us over - the normal communication channel *) - marshal ofd (Response Failed) - end + begin + try + let rec doit url = + let (server,path) = parse_url url in + if not (Sys.file_exists filename) then + raise (ClientSideError (Printf.sprintf "file '%s' does not exist" filename)); + (* If we can tell the file size then supply a content-length header-- + this will make the progress bar work. If we can't tell the file size + (e.g. because it's a pipe) then we provide no header and rely on EOF + to signal the upload is complete. *) + let content_length = + let stats = Unix.LargeFile.stat filename in + if stats.Unix.LargeFile.st_kind = Unix.S_REG + then Printf.sprintf "\r\nContent-length: %Ld" stats.Unix.LargeFile.st_size + else "" in + let file_ch = open_in_bin filename in + let ic, oc = open_tcp server in + debug "PUTting to path [%s]\n%!" path; + Printf.fprintf oc "PUT %s HTTP/1.0%s\r\n\r\n" path content_length; + flush oc; + let resultline = input_line ic in + let headers = read_rest_of_headers ic in + (* Get the result header immediately *) + match http_response_code resultline with + | 200 -> + Stdext.Pervasiveext.finally + (fun () -> + copy_with_heartbeat file_ch oc heartbeat_fun; + marshal ofd (Response OK)) + (fun () -> + (try close_in ic with _ -> ()); + (try close_in file_ch with _ -> ())) + | 302 -> + let newloc = List.assoc "location" headers in + (try close_in ic with _ -> ()); (* Nb. Unix.close_connection only requires the in_channel *) + doit newloc + | _ -> failwith "Unhandled response code" + in + doit url + with + | ClientSideError msg -> + marshal ofd (Response Failed); + Printf.fprintf stderr "Operation failed. Error: %s\n" msg; + exit_code := Some 1 + | e -> + debug "HttpPut failure: %s\n%!" (Printexc.to_string e); + (* Assume the server will figure out what's wrong and tell us over + the normal communication channel *) + marshal ofd (Response Failed) + end | Command (HttpGet(filename, url)) -> - begin - try - let rec doit url = - let (server,path) = parse_url url in - debug "Opening connection to server '%s' path '%s'\n%!" server path; - let ic, oc = open_tcp server in - Printf.fprintf oc "GET %s HTTP/1.0\r\n\r\n" path; - flush oc; - (* Get the result header immediately *) - let resultline = input_line ic in - debug "Got %s\n%!" resultline; - match http_response_code resultline with - | 200 -> - let file_ch = - if filename = "" then - Unix.out_channel_of_descr (Unix.dup Unix.stdout) - else - try open_out_gen [Open_wronly; Open_creat; Open_excl] 0o600 filename - with e -> raise (ClientSideError (Printexc.to_string e)) - in - while input_line ic <> "\r" do () done; - Stdext.Pervasiveext.finally - (fun () -> - copy_with_heartbeat ic file_ch heartbeat_fun; - marshal ofd (Response OK)) - (fun () -> - (try close_in ic with _ -> ()); - (try close_out file_ch with _ -> ())) - | 302 -> - let headers = read_rest_of_headers ic in - let newloc = List.assoc "location" headers in - (try close_in ic with _ -> ()); (* Nb. Unix.close_connection only requires the in_channel *) - doit newloc - | _ -> failwith "Unhandled response code" - in - doit url - with - | ClientSideError msg -> - marshal ofd (Response Failed); - Printf.fprintf stderr "Operation failed. Error: %s\n" msg; - exit_code := Some 1 - | e -> - debug "HttpGet failure: %s\n%!" (Printexc.to_string e); - marshal ofd (Response Failed) - end + begin + try + let rec doit url = + let (server,path) = parse_url url in + debug "Opening connection to server '%s' path '%s'\n%!" server path; + let ic, oc = open_tcp server in + Printf.fprintf oc "GET %s HTTP/1.0\r\n\r\n" path; + flush oc; + (* Get the result header immediately *) + let resultline = input_line ic in + debug "Got %s\n%!" resultline; + match http_response_code resultline with + | 200 -> + let file_ch = + if filename = "" then + Unix.out_channel_of_descr (Unix.dup Unix.stdout) + else + try open_out_gen [Open_wronly; Open_creat; Open_excl] 0o600 filename + with e -> raise (ClientSideError (Printexc.to_string e)) + in + while input_line ic <> "\r" do () done; + Stdext.Pervasiveext.finally + (fun () -> + copy_with_heartbeat ic file_ch heartbeat_fun; + marshal ofd (Response OK)) + (fun () -> + (try close_in ic with _ -> ()); + (try close_out file_ch with _ -> ())) + | 302 -> + let headers = read_rest_of_headers ic in + let newloc = List.assoc "location" headers in + (try close_in ic with _ -> ()); (* Nb. Unix.close_connection only requires the in_channel *) + doit newloc + | _ -> failwith "Unhandled response code" + in + doit url + with + | ClientSideError msg -> + marshal ofd (Response Failed); + Printf.fprintf stderr "Operation failed. Error: %s\n" msg; + exit_code := Some 1 + | e -> + debug "HttpGet failure: %s\n%!" (Printexc.to_string e); + marshal ofd (Response Failed) + end | Command Prompt -> - let data = input_line stdin in - marshal ofd (Blob (Chunk (Int32.of_int (String.length data)))); - ignore (Unix.write ofd data 0 (String.length data)); - marshal ofd (Blob End) + let data = input_line stdin in + marshal ofd (Blob (Chunk (Int32.of_int (String.length data)))); + ignore (Unix.write ofd data 0 (String.length data)); + marshal ofd (Blob End) | Command (Error(code, params)) -> - error "Error code: %s\n" code; - error "Error parameters: %s\n" (String.concat ", " params) + error "Error code: %s\n" code; + error "Error parameters: %s\n" (String.concat ", " params) | Command (Exit c) -> - exit_code := Some c + exit_code := Some c | x -> - raise (Unexpected_msg x) + raise (Unexpected_msg x) done; match !exit_code with Some c -> c | _ -> assert false let main () = let exit_status = ref 1 in let _ = try - Sys.set_signal Sys.sigpipe Sys.Signal_ignore; - Sys.set_signal Sys.sigint (Sys.Signal_handle (fun _ -> exit 1)); - let xe, args = - match Array.to_list Sys.argv with - | h :: t -> h, t - | _ -> assert false in - if List.mem "-version" args then begin - Printf.printf "ThinCLI protocol: %d.%d\n" major minor; - exit 0 - end; + Sys.set_signal Sys.sigpipe Sys.Signal_ignore; + Sys.set_signal Sys.sigint (Sys.Signal_handle (fun _ -> exit 1)); + let xe, args = + match Array.to_list Sys.argv with + | h :: t -> h, t + | _ -> assert false in + if List.mem "-version" args then begin + Printf.printf "ThinCLI protocol: %d.%d\n" major minor; + exit 0 + end; - let args = parse_args args in + let args = parse_args args in - if List.length args < 1 then raise Usage else - begin - let ic, oc = open_channels () in - Printf.fprintf oc "POST /cli HTTP/1.0\r\n"; - let args = args @ [("username="^ !xapiuname);("password="^ !xapipword)] in - let args = String.concat "\n" args in - Printf.fprintf oc "User-agent: xe-cli/Unix/%d.%d\r\n" major minor; - Printf.fprintf oc "content-length: %d\r\n\r\n" (String.length args); - Printf.fprintf oc "%s" args; - flush_all (); - - let in_fd = Unix.descr_of_in_channel ic - and out_fd = Unix.descr_of_out_channel oc in - exit_status := main_loop in_fd out_fd - end - with - | Usage -> + if List.length args < 1 then raise Usage else + begin + let ic, oc = open_channels () in + Printf.fprintf oc "POST /cli HTTP/1.0\r\n"; + let args = args @ [("username="^ !xapiuname);("password="^ !xapipword)] in + let args = String.concat "\n" args in + Printf.fprintf oc "User-agent: xe-cli/Unix/%d.%d\r\n" major minor; + Printf.fprintf oc "content-length: %d\r\n\r\n" (String.length args); + Printf.fprintf oc "%s" args; + flush_all (); + + let in_fd = Unix.descr_of_in_channel ic + and out_fd = Unix.descr_of_out_channel oc in + exit_status := main_loop in_fd out_fd + end + with + | Usage -> exit_status := 0; usage (); - | Not_a_cli_server -> + | Not_a_cli_server -> error "Failed to contact a running management agent.\n"; error "Try specifying a server name and port.\n"; usage(); - | Protocol_version_mismatch x -> + | Protocol_version_mismatch x -> error "Protocol version mismatch: %s.\n" x; error "Try specifying a server name and port on the command-line.\n"; usage(); - | Not_found -> + | Not_found -> error "Host '%s' not found.\n" !xapiserver; - | Unix.Unix_error(err,fn,arg) -> + | Unix.Unix_error(err,fn,arg) -> error "Error: %s (calling %s %s)\n" (Unix.error_message err) fn arg - | Connect_failure -> + | Connect_failure -> error "Unable to contact server. Please check server and port settings.\n" - | Stunnel.Stunnel_binary_missing -> + | Stunnel.Stunnel_binary_missing -> error "Please install the stunnel package or define the XE_STUNNEL environment variable to point to the binary.\n" - | End_of_file -> + | End_of_file -> error "Lost connection to the server.\n" - | Unexpected_msg m -> - error "Unexpected message from server: %s" (string_of_message m) - | Server_internal_error -> - error "Server internal error.\n" - | Stunnel_exit (i, e) -> + | Unexpected_msg m -> + error "Unexpected message from server: %s" (string_of_message m) + | Server_internal_error -> + error "Server internal error.\n" + | Stunnel_exit (i, e) -> error "Stunnel process %d %s.\n" i (match e with | Unix.WEXITED c -> "existed with exit code " ^ string_of_int c | Unix.WSIGNALED c -> "killed by signal " ^ (Stdext.Unixext.string_of_signal c) | Unix.WSTOPPED c -> "stopped by signal " ^ string_of_int c) - | e -> + | e -> error "Unhandled exception\n%s\n" (Printexc.to_string e) in begin match !stunnel_process with - | Some p -> + | Some p -> if Sys.file_exists p.Stunnel.logfile then begin if !exit_status <> 0 then @@ -684,18 +684,18 @@ let main () = try Unix.unlink p.Stunnel.logfile with _ -> () end; Stunnel.disconnect ~wait:false ~force:true p - | None -> () + | None -> () end; begin match !debug_file, !debug_channel with - | Some f, Some ch -> begin - close_out ch; - if !exit_status <> 0 then begin - output_string stderr "\nDebug info:\n\n"; - output_string stderr (Stdext.Unixext.string_of_file f) - end; - try Unix.unlink f with _ -> () - end - | _ -> () + | Some f, Some ch -> begin + close_out ch; + if !exit_status <> 0 then begin + output_string stderr "\nDebug info:\n\n"; + output_string stderr (Stdext.Unixext.string_of_file f) + end; + try Unix.unlink f with _ -> () + end + | _ -> () end; exit !exit_status diff --git a/ocaml/xe-cli/options.ml b/ocaml/xe-cli/options.ml index 3554230fb05..7de52822267 100644 --- a/ocaml/xe-cli/options.ml +++ b/ocaml/xe-cli/options.ml @@ -15,31 +15,31 @@ let parse_lines ls = let rec inner ls cur = - try + try match ls with ""::ls -> inner ls cur (* skip blank lines *) | l::ls -> - let colon = String.index l '=' in - let token = String.sub l 0 colon in - let value = String.sub l (colon+1) (String.length l - colon - 1) in - inner ls ((token,value)::cur) + let colon = String.index l '=' in + let token = String.sub l 0 colon in + let value = String.sub l (colon+1) (String.length l - colon - 1) in + inner ls ((token,value)::cur) | _ -> cur - with Not_found -> + with Not_found -> Printf.fprintf stderr "Error parsing rc file. No defaults loaded\n"; [] in inner ls [] - + let read_rc () = try let home = Sys.getenv "HOME" in let rc_file = open_in (home^"/.xe") in let rec getlines cur = - try - let line = input_line rc_file in - getlines (line::cur) + try + let line = input_line rc_file in + getlines (line::cur) with - _ -> cur + _ -> cur in let lines = getlines [] in parse_lines lines @@ -49,5 +49,5 @@ let read_rc () = let insert_params params = let rc = read_rc () in let fold_func params (param,value) = - if List.mem_assoc param params then params else (param,value)::params + if List.mem_assoc param params then params else (param,value)::params in List.fold_left fold_func params rc diff --git a/ocaml/xe-cli/rt/cliops.ml b/ocaml/xe-cli/rt/cliops.ml index 5eabeef25f2..0de5ab259e9 100644 --- a/ocaml/xe-cli/rt/cliops.ml +++ b/ocaml/xe-cli/rt/cliops.ml @@ -27,8 +27,8 @@ let pic=ref "" let is_success rc s = match rc with - Unix.WEXITED i -> i=0 - | _ -> false + Unix.WEXITED i -> i=0 + | _ -> false let expect_success f = let (s,rc) = f() in @@ -43,14 +43,14 @@ let expect_failure f = if not (is_success rc s) then s else raise (CliOpFailed s) let random_mac () = - let macs = [0x00; 0x16; 0x3e] @ (List.map Random.int [0x80; 0x100; 0x100]) in - String.concat ":" (List.map (Printf.sprintf "%02x") macs) - + let macs = [0x00; 0x16; 0x3e] @ (List.map Random.int [0x80; 0x100; 0x100]) in + String.concat ":" (List.map (Printf.sprintf "%02x") macs) + (* Mapping of vmid to ip *) let ipmap = ref ( [] : (string * string) list) -let iface = ref "eth0" +let iface = ref "eth0" let use_gt = ref true - + (* Operations using other programs *) let get_client_domid vmid = try @@ -70,9 +70,9 @@ let get_domid_state domid = let domain = List.filter (fun (d,_,_) -> d=domid) dominfo in let (_,_,state) = List.hd domain in state - with + with _ -> "gone" - + (* Guest operations *) let run_ga_command ip command = expect_success_retry (fun () -> run_command (Printf.sprintf "%s %s %s" !Commands.guest_agent_client ip command)) @@ -86,83 +86,83 @@ let read_xs_file path = let (lines,rc) = run_command ("xenstore-read "^path) in lines with _ -> ["error"] - + (* Specific XE things *) let get_version (cli : Util.t_cli) = - let host = List.hd (expect_success (fun () -> cli "pool-list" ["params","master";"minimal","true"])) in + let host = List.hd (expect_success (fun () -> cli "pool-list" ["params","master";"minimal","true"])) in let lines = expect_success (fun()->cli "host-param-get" ["param-name","software-version"; "uuid",host]) in List.hd lines let get_short_version (cli : Util.t_cli) = - let host = List.hd (expect_success (fun () -> cli "pool-list" ["params","master";"minimal","true"])) in + let host = List.hd (expect_success (fun () -> cli "pool-list" ["params","master";"minimal","true"])) in let lines = expect_success (fun()->cli "host-param-get" ["param-name","software-version"; "param-key","build_number"; "uuid",host]) in List.hd lines let reset_xapi_log (cli : Util.t_cli) = -(* (try Sys.remove "/tmp/xapi.log" with _ -> ()); - ignore (expect_success (fun () -> cli "log-set-output" ["output","nil"])); - ignore (expect_success (fun () -> cli "log-set-output" ["output","file:/tmp/xapi.log"]))*) () + (* (try Sys.remove "/tmp/xapi.log" with _ -> ()); + ignore (expect_success (fun () -> cli "log-set-output" ["output","nil"])); + ignore (expect_success (fun () -> cli "log-set-output" ["output","file:/tmp/xapi.log"]))*) () let get_xapi_log (cli : Util.t_cli) = try let ic = open_in "/tmp/xapi.log" in let rec r lines = - let nextline = + let nextline = try Some (input_line ic) with _ -> None in match nextline with Some x -> r (x::lines) | None -> List.rev lines in r [] with _ -> [] - + let get_vms cli = - let lines = expect_success (fun()->cli "vm-list" - ["params","name-label"; - "minimal","true"]) in + let lines = expect_success (fun()->cli "vm-list" + ["params","name-label"; + "minimal","true"]) in String.split ',' (List.hd lines) let get_networks cli = - let lines = expect_success (fun()->cli "network-list" - ["params","uuid"; - "minimal","true"]) in + let lines = expect_success (fun()->cli "network-list" + ["params","uuid"; + "minimal","true"]) in String.split ',' (List.hd lines) let get_state cli vmid = - let lines = expect_success (fun()->cli "vm-param-get" - ["uuid",vmid; "param-name","power-state"]) in + let lines = expect_success (fun()->cli "vm-param-get" + ["uuid",vmid; "param-name","power-state"]) in List.hd lines let get_uuid cli name = - let lines = expect_success (fun () -> cli "vm-list" - ["name-label",name; "params","uuid"; "minimal","true"]) in + let lines = expect_success (fun () -> cli "vm-list" + ["name-label",name; "params","uuid"; "minimal","true"]) in List.hd lines let get_all_uuids cli = - let lines = expect_success (fun () -> cli "vm-list" - ["params","uuid"; "minimal","true"]) in + let lines = expect_success (fun () -> cli "vm-list" + ["params","uuid"; "minimal","true"]) in String.split ',' (List.hd lines) let get_vm_names cli = - let lines = expect_success (fun () -> cli "vm-list" - ["params","name-label"; "minimal","true"]) in + let lines = expect_success (fun () -> cli "vm-list" + ["params","name-label"; "minimal","true"]) in String.split ',' (List.hd lines) let get_disks cli vmid = - let lines = expect_success (fun () -> cli "vbd-list" - [("vm-uuid",vmid); "params","device"; "minimal","true"]) in + let lines = expect_success (fun () -> cli "vbd-list" + [("vm-uuid",vmid); "params","device"; "minimal","true"]) in String.split ',' (List.hd lines) let get_nics cli vmid = - let lines = expect_success (fun () -> cli "vif-list" - [("vm-uuid",vmid); "params","uuid"; "minimal","true"]) in + let lines = expect_success (fun () -> cli "vif-list" + [("vm-uuid",vmid); "params","uuid"; "minimal","true"]) in String.split ',' (List.hd lines) - + let get_pifs cli = - let lines = expect_success (fun () -> cli "pif-list" - ["params","uuid";"minimal","true"]) in + let lines = expect_success (fun () -> cli "pif-list" + ["params","uuid";"minimal","true"]) in String.split ',' (List.hd lines) let get_pif_device cli uuid = let lines = expect_success (fun () -> cli "pif-list" - ["params","device";"uuid",uuid;"minimal","true"]) in + ["params","device";"uuid",uuid;"minimal","true"]) in List.hd lines let shutdown cli force vmid = @@ -181,7 +181,7 @@ let export cli vmid filename = let clone cli vmid newname = let params = [("vm",vmid); ("new-name-label", newname)] in expect_success (fun () -> cli "vm-clone" params) - + let uninstall cli vmid = expect_success (fun () -> cli "vm-uninstall" [("vm",vmid); ("force","true")]) @@ -189,30 +189,30 @@ let uninstall_all_vms cli = let uuids = get_all_uuids cli in List.iter (fun u -> ignore (shutdown cli true u)) uuids; List.iter (fun u -> ignore (uninstall cli u)) uuids - + let install_guest cli (template,name) = let params = [("template-name",template); - ("name",name)] in + ("name",name)] in let _ = expect_success (fun () -> cli "vm-install" params) in let new_uuid = get_uuid cli name in get_state cli new_uuid let add_disk cli vmid (device,disksize) = let sruuid = List.hd (expect_success (fun () -> cli "pool-list" - ["params","default-SR";"minimal","true"])) in + ["params","default-SR";"minimal","true"])) in let params = [("sr-uuid",sruuid); - ("name-label","test-disk"); - ("type","user"); - ("virtual-size",disksize);] in + ("name-label","test-disk"); + ("type","user"); + ("virtual-size",disksize);] in let vdiuuid=List.hd (expect_success (fun () -> cli "vdi-create" params)) in let vbduuid=List.hd (expect_success (fun () -> cli "vbd-create" - ["vm-uuid",vmid; "vdi-uuid",vdiuuid; "device",device])) in + ["vm-uuid",vmid; "vdi-uuid",vdiuuid; "device",device])) in (vdiuuid,vbduuid) let attach_disk cli vmid diskuuid device = let params = [("vm-uuid",vmid); - ("vdi-uuid",diskuuid); - ("device",device)] in + ("vdi-uuid",diskuuid); + ("device",device)] in let uuid = List.hd (expect_success (fun () -> cli "vbd-create" params)) in (try ignore(cli "vbd-plug" ["uuid",uuid]) with _ -> ()); uuid @@ -223,9 +223,9 @@ let detach_disk cli vmid vbduuid = let attach_cd cli vmid vdiuuid device = let params = [("vm-uuid",vmid); - ("vdi-uuid",vdiuuid); - ("device",device); - ("mode","RO"); + ("vdi-uuid",vdiuuid); + ("device",device); + ("mode","RO"); ("type","CD")] in let uuid = List.hd (expect_success (fun () -> cli "vbd-create" params)) in (try ignore(cli "vbd-plug" ["uuid",uuid]) with _ -> ()); @@ -233,7 +233,7 @@ let attach_cd cli vmid vdiuuid device = let get_dom0 cli = let params = [("is-control-domain","true"); - ("minimal","true")] in + ("minimal","true")] in let dom0 = List.hd (expect_success (fun () -> cli "vm-list" params)) in dom0 @@ -241,22 +241,22 @@ let make_bootable cli vmid = let disks_to_try = ["hda"; "sda"] in let dodisk disk = let lines = expect_success( fun () -> cli "vbd-list" - ["vm-uuid",vmid; "device",disk; "minimal", "true"]) in + ["vm-uuid",vmid; "device",disk; "minimal", "true"]) in if List.length lines > 0 then let uuid = List.hd lines in ignore(expect_success (fun () -> cli "vbd-param-set" - ["uuid",uuid; "bootable","true"])) + ["uuid",uuid; "bootable","true"])) in List.iter dodisk disks_to_try - + let get_attached_cds cli vmid = let lines = expect_success (fun () -> cli "vbd-list" [ - "vm-uuid",vmid;"type","CD";"minimal","true"]) in + "vm-uuid",vmid;"type","CD";"minimal","true"]) in try let result = String.split ',' (List.hd lines) in List.filter (fun s -> String.length s > 2) result with - _ -> [] + _ -> [] let detach_cd cli vbduuid = (try ignore(cli "vbd-unplug" ["uuid",vbduuid; "timeout","5.0"]) with _ -> ()); @@ -264,7 +264,7 @@ let detach_cd cli vbduuid = let get_vdi_uuid_from_name cli name = let lines = expect_success (fun () -> cli "vdi-list" ["name-label",name; - "minimal","true"]) in + "minimal","true"]) in List.hd lines (* Check that the VM has each of the listed cds attached *) @@ -279,7 +279,7 @@ let check_disk_ok cli vmid device = let allowed_vbds = String.split ';' (List.hd lines) in let allowed_vbds = List.map (Parsers.zap_whitespace) allowed_vbds in List.mem device allowed_vbds - + let check_disk cli vmid device = let lines = expect_success (fun () -> cli "vbd-list" [("vm-uuid",vmid);("userdevice",device); "minimal","true"]) in let lines2 = List.filter (fun x -> String.length x > 2) lines in @@ -312,19 +312,19 @@ let destroy_disk cli (vdiuuid,vbduuid) = let params = [("uuid",vdiuuid)] in ignore(expect_success (fun () -> cli "vdi-destroy" params)) -let add_network cli netname = +let add_network cli netname = let params = [ ("name-label", netname) ] in List.hd (expect_success (fun () -> cli "network-create" params)) - -let remove_network cli netname = + +let remove_network cli netname = let params = [ ("uuid", netname) ] in expect_success (fun () -> cli "network-destroy" params) - -let add_nic cli vmid (nicname,mac,network) = + +let add_nic cli vmid (nicname,mac,network) = let params = [("vm-uuid",vmid); - ("device",nicname); - ("mac",mac); - ("network-uuid",network)] in + ("device",nicname); + ("mac",mac); + ("network-uuid",network)] in List.hd (expect_success (fun () -> cli "vif-create" params)) let plug_nic cli uuid = @@ -337,13 +337,13 @@ let remove_nic cli uuid = let params = [("uuid",uuid)] in expect_success (fun () -> cli "vif-destroy" params) -let create_vlan cli (pif_uuid : string) (tag: string) (network_uuid: string) : string = +let create_vlan cli (pif_uuid : string) (tag: string) (network_uuid: string) : string = let params = [("pif-uuid", pif_uuid); - ("vlan", tag); - ("network-uuid", network_uuid) ] in + ("vlan", tag); + ("network-uuid", network_uuid) ] in List.hd (expect_success (fun () -> cli "vlan-create" params)) -let remove_pif cli uuid = +let remove_pif cli uuid = let params = [("uuid", uuid)] in expect_success (fun () -> cli "vlan-destroy" params) @@ -355,15 +355,15 @@ let get_nic_params cli uuid = let mac = get_nic_param cli uuid "MAC" in let network = get_nic_param cli uuid "network-uuid" in (device,mac,network) - + let set_param cli vmid param_name value = let params = [("uuid",vmid); - (param_name,value)] in + (param_name,value)] in expect_success (fun () -> cli "vm-param-set" params) let get_param cli vmid param_name = let params = [("uuid",vmid); - ("param-name",param_name)] in + ("param-name",param_name)] in List.nth (expect_success (fun () -> cli "vm-param-get" params)) 0 let is_currently_suspendable cli vmid = @@ -378,8 +378,8 @@ let event_wait cli vmid cls params timeout = let params = [("class",cls); ("uuid",vmid)]@params in let cmdargs = String.concat " " (List.map (fun (a,b) -> a^"="^b) params) in match run_command_with_timeout ("xe event-wait "^cmdargs) timeout with - Some (lines,rc) -> () - | None -> raise (CliOpFailed (["Timeout waiting for event";cmdargs])) + Some (lines,rc) -> () + | None -> raise (CliOpFailed (["Timeout waiting for event";cmdargs])) (** Higher level operations *) @@ -389,16 +389,16 @@ type vmop = Start | Shutdown | Reboot | Resume | Suspend let change_vm_state_fail cli vmid st = let params = [("vm",vmid)] in ignore (match st with - Start -> - expect_failure (fun () -> cli "vm-start" params) - | Shutdown -> - expect_failure (fun () -> cli "vm-shutdown" params) - | Suspend -> - expect_failure (fun () -> cli "vm-suspend" params) - | Reboot -> - expect_failure (fun () -> cli "vm-reboot" params) - | Resume -> - expect_failure (fun () -> cli "vm-resume" params)) + Start -> + expect_failure (fun () -> cli "vm-start" params) + | Shutdown -> + expect_failure (fun () -> cli "vm-shutdown" params) + | Suspend -> + expect_failure (fun () -> cli "vm-suspend" params) + | Reboot -> + expect_failure (fun () -> cli "vm-reboot" params) + | Resume -> + expect_failure (fun () -> cli "vm-resume" params)) let get_pic cli vmid = let domid = get_client_domid vmid in @@ -406,7 +406,7 @@ let get_pic cli vmid = let is_pv = String.endswith "pv" domainname in log Log.Info "Obtaining vnc snapshot for vm %s from domain %d" domainname domid; (if not is_pv then - ignore (expect_success (fun () -> run_command (Printf.sprintf "vncsnapshot localhost:%d /tmp/pic.jpg" (domid))))); + ignore (expect_success (fun () -> run_command (Printf.sprintf "vncsnapshot localhost:%d /tmp/pic.jpg" (domid))))); pic := "/tmp/pic.jpg" (** True if a vm can clean shutdown *) @@ -418,13 +418,13 @@ let can_clean_shutdown cli vmid = (* ping the guest agent *) exception GAFailed - + let rec ping ip attempts = begin if attempts > 500 then raise GAFailed; try ignore(run_ga_command ip "test"); - with _ -> + with _ -> Unix.sleep 1; ping ip (attempts+1) end @@ -437,27 +437,27 @@ let wait_for_up cli vmid = let mac = get_nic_param cli nic "MAC" in let iface = !iface in (* for now *) let command = (Printf.sprintf "xgetip %s %s" iface mac) in - let lines = + let lines = match run_command_with_timeout command 120.0 with - Some (lines,rc) -> lines - | None -> - get_pic cli vmid; - raise (CliOpFailed ["Timeout!"]) in + Some (lines,rc) -> lines + | None -> + get_pic cli vmid; + raise (CliOpFailed ["Timeout!"]) in let ip = List.hd lines in ipmap := (vmid,ip)::!ipmap; - ping ip 0 + ping ip 0 with - CliOpFailed ls -> - get_pic cli vmid; - log Log.Warn "wait_for_up: IP address not obtained. Message returned was:"; - List.iter (fun l -> log Log.Warn "%s" l) ls - | GAFailed -> - get_pic cli vmid; - log Log.Warn "wait_for_up: Guest agent failed to start"; - | e -> - get_pic cli vmid; - log Log.Warn "wait_for_up: Failed to get IP address - got exception %s" (Printexc.to_string e) - + CliOpFailed ls -> + get_pic cli vmid; + log Log.Warn "wait_for_up: IP address not obtained. Message returned was:"; + List.iter (fun l -> log Log.Warn "%s" l) ls + | GAFailed -> + get_pic cli vmid; + log Log.Warn "wait_for_up: Guest agent failed to start"; + | e -> + get_pic cli vmid; + log Log.Warn "wait_for_up: Failed to get IP address - got exception %s" (Printexc.to_string e) + (** Wait for a shutdown operation to complete. Returns true if xapi believes the domain shut down or false if it didn't *) let shutdown_wait cli vmid = try @@ -468,10 +468,10 @@ let shutdown_wait cli vmid = if domstate <> "gone" && xapistate <> "halted" then log Log.Error "Domain still up after shutdown! xapistate=%s xenstate=%s" xapistate domstate; true with - _ -> false + _ -> false (** Wait for a reboot operation to complete. Returns true if xapi believes the domain rebooted or false if it didn't *) -let reboot_wait cli vmid = +let reboot_wait cli vmid = try let start_time = get_param cli vmid "start-time" in event_wait cli vmid "vm" ["start-time", "/=" ^ start_time] 120.0; @@ -481,9 +481,9 @@ let reboot_wait cli vmid = if domstate = "gone" || xapistate = "halted" then log Log.Error "Domain went down after reboot! xapistate=%s xenstate=%s" xapistate domstate; true with - _ -> false + _ -> false -type run_command_output = string list * Unix.process_status +type run_command_output = string list * Unix.process_status (** Shutdown hard *) let shutdown_phase3 cli vmid = @@ -492,13 +492,13 @@ let shutdown_phase3 cli vmid = ignore(expect_success (fun () -> cli "vm-shutdown" params)) with CliOpFailed ls -> - log Log.Warn "shutdown_phase3: Shutdown failed - cli reported:"; - List.iter (fun l -> log Log.Error "%s" l) ls; - let (_: run_command_output) = run_command !Commands.list_domains in - raise (OpFailed "Shutdown failed!") + log Log.Warn "shutdown_phase3: Shutdown failed - cli reported:"; + List.iter (fun l -> log Log.Error "%s" l) ls; + let (_: run_command_output) = run_command !Commands.list_domains in + raise (OpFailed "Shutdown failed!") | e -> - log Log.Warn "shutdown_phase2: Exception caught: %s" (Printexc.to_string e); - raise (OpFailed "Shutdown failed!") + log Log.Warn "shutdown_phase2: Exception caught: %s" (Printexc.to_string e); + raise (OpFailed "Shutdown failed!") (** Shutdown clean *) let shutdown_phase2 cli vmid = @@ -507,47 +507,47 @@ let shutdown_phase2 cli vmid = try if can_clean_shutdown cli vmid then begin - ignore(expect_success (fun () -> cli "vm-shutdown" params)); - if shutdown_wait cli vmid - then () - else next () + ignore(expect_success (fun () -> cli "vm-shutdown" params)); + if shutdown_wait cli vmid + then () + else next () end else next () with CliOpFailed ls -> - log Log.Warn "shutdown_phase2: Shutdown failed - cli reported:"; - List.iter (fun l -> log Log.Error "%s" l) ls; - let (_: run_command_output) = run_command !Commands.list_domains in - next () + log Log.Warn "shutdown_phase2: Shutdown failed - cli reported:"; + List.iter (fun l -> log Log.Error "%s" l) ls; + let (_: run_command_output) = run_command !Commands.list_domains in + next () | (OpFailed x) as e -> - raise e + raise e | e -> - log Log.Warn "shutdown_phase2: Exception caught: %s" (Printexc.to_string e); - next () - + log Log.Warn "shutdown_phase2: Exception caught: %s" (Printexc.to_string e); + next () + (** Use the guest agent to shutdown the domain *) let shutdown_phase1 cli vmid = let next () = shutdown_phase2 cli vmid in if not !use_gt then next () else - try - if List.mem_assoc vmid !ipmap - then - begin - ignore(run_ga_command (List.assoc vmid !ipmap) "shutdown 10"); - if shutdown_wait cli vmid - then () - else next () - end - else - begin - log Log.Warn "shutdown_phase1: Use of guest agent requested, but IP address for VM is not known"; - next () - end - with - (OpFailed x) as e -> + try + if List.mem_assoc vmid !ipmap + then + begin + ignore(run_ga_command (List.assoc vmid !ipmap) "shutdown 10"); + if shutdown_wait cli vmid + then () + else next () + end + else + begin + log Log.Warn "shutdown_phase1: Use of guest agent requested, but IP address for VM is not known"; + next () + end + with + (OpFailed x) as e -> let (_: run_command_output) = run_command !Commands.list_domains in raise e - | e -> + | e -> log Log.Warn "shutdown_phase1: Exception caught: %s" (Printexc.to_string e); next () @@ -556,98 +556,98 @@ let reboot_phase2 cli vmid = try let params = [("vm",vmid)] in ignore(expect_success (fun () -> cli "vm-reboot" params)); -(* shutdown_wait cli vmid; -- vm-reboot only returns when the old domain has been destroyed - event_wait cli vmid "vm" ["power-state","running"] 10.0; *) + (* shutdown_wait cli vmid; -- vm-reboot only returns when the old domain has been destroyed + event_wait cli vmid "vm" ["power-state","running"] 10.0; *) wait_for_up cli vmid with CliOpFailed ls -> - log Log.Error "reboot_phase2: Reboot failed: cli reported:"; - List.iter (fun l -> log Log.Error "%s" l) ls; - let (_: run_command_output) = run_command !Commands.list_domains in - raise (OpFailed "Failed to reboot") + log Log.Error "reboot_phase2: Reboot failed: cli reported:"; + List.iter (fun l -> log Log.Error "%s" l) ls; + let (_: run_command_output) = run_command !Commands.list_domains in + raise (OpFailed "Failed to reboot") | e -> - log Log.Error "reboot_phase2: Reboot failed: exception caught: %s" (Printexc.to_string e); - raise (OpFailed "Failed to reboot") - + log Log.Error "reboot_phase2: Reboot failed: exception caught: %s" (Printexc.to_string e); + raise (OpFailed "Failed to reboot") + (** Reboot via the guest agent *) let reboot_phase1 cli vmid = let next () = reboot_phase2 cli vmid in if not !use_gt then next () else - try - if List.mem_assoc vmid !ipmap - then - begin - ignore(run_ga_command (List.assoc vmid !ipmap) "reboot 10"); - (* Guest powerstate nolonger glitches to Halted in the middle of a reboot *) - let (_: bool) = reboot_wait cli vmid in - wait_for_up cli vmid; - end - else - begin - log Log.Warn "reboot_phase1: Use of guest agent requested, but no IP address for VM available!"; - next () - end - with + try + if List.mem_assoc vmid !ipmap + then + begin + ignore(run_ga_command (List.assoc vmid !ipmap) "reboot 10"); + (* Guest powerstate nolonger glitches to Halted in the middle of a reboot *) + let (_: bool) = reboot_wait cli vmid in + wait_for_up cli vmid; + end + else + begin + log Log.Warn "reboot_phase1: Use of guest agent requested, but no IP address for VM available!"; + next () + end + with | (OpFailed x) as e -> (* next might have raise this, in which case, pass it through *) - let (_: run_command_output) = run_command !Commands.list_domains in - raise e + let (_: run_command_output) = run_command !Commands.list_domains in + raise e | CliOpFailed ls -> - log Log.Warn "reboot_phase1: Cli op failed: %s" (String.concat "; " ls); - next () + log Log.Warn "reboot_phase1: Cli op failed: %s" (String.concat "; " ls); + next () | e -> - log Log.Warn "reboot_phase1: Exception caught: %s" (Printexc.to_string e); - next () + log Log.Warn "reboot_phase1: Exception caught: %s" (Printexc.to_string e); + next () let change_vm_state cli vmid st = let params = [("vm",vmid)] in let domid = get_client_domid vmid in ignore begin match st with - Start -> - begin - try - let (_: string list) = expect_success (fun () -> cli "vm-start" params) in - let domid = get_client_domid vmid in - log Log.Info "New domid: %d" domid; - log Log.Info "Waiting for VM to start..."; - wait_for_up cli vmid; - with - CliOpFailed ls -> - log Log.Error "change_vm_state: VM start failed: cli reported:"; - List.iter (fun l -> log Log.Error "%s" l) ls; - raise (OpFailed "Failed to start VM") - end - | Shutdown -> - shutdown_phase1 cli vmid; - (try ipmap := List.remove_assoc vmid !ipmap with _ -> ()) - | Suspend -> - begin - try - ignore (expect_success (fun () -> cli "vm-suspend" params)) - with - CliOpFailed ls -> - log Log.Error "change_vm_state: VM suspend failed: cli reported:"; - List.iter (fun l -> log Log.Error "%s" l) ls; - let (_: run_command_output) = run_command !Commands.list_domains in - raise (OpFailed "Failed to suspend VM") - end - | Reboot -> - (* All waiting for up etc moved to this func to allow it to try other methods if the first failed *) - reboot_phase1 cli vmid; - | Resume -> - begin - try - ignore (expect_success (fun () -> cli "vm-resume" params)); - ping (List.assoc vmid !ipmap) 0 - with - CliOpFailed ls -> - log Log.Error "change_vm_state: VM resume failed: cli reported:"; - List.iter (fun l -> log Log.Error "%s" l) ls; - raise (OpFailed "Failed to resume VM") - | GAFailed -> - log Log.Error "change_vm_state: VM resume failed: failed to contact guest agent"; - raise (OpFailed "Failed to resume VM") - end; + Start -> + begin + try + let (_: string list) = expect_success (fun () -> cli "vm-start" params) in + let domid = get_client_domid vmid in + log Log.Info "New domid: %d" domid; + log Log.Info "Waiting for VM to start..."; + wait_for_up cli vmid; + with + CliOpFailed ls -> + log Log.Error "change_vm_state: VM start failed: cli reported:"; + List.iter (fun l -> log Log.Error "%s" l) ls; + raise (OpFailed "Failed to start VM") + end + | Shutdown -> + shutdown_phase1 cli vmid; + (try ipmap := List.remove_assoc vmid !ipmap with _ -> ()) + | Suspend -> + begin + try + ignore (expect_success (fun () -> cli "vm-suspend" params)) + with + CliOpFailed ls -> + log Log.Error "change_vm_state: VM suspend failed: cli reported:"; + List.iter (fun l -> log Log.Error "%s" l) ls; + let (_: run_command_output) = run_command !Commands.list_domains in + raise (OpFailed "Failed to suspend VM") + end + | Reboot -> + (* All waiting for up etc moved to this func to allow it to try other methods if the first failed *) + reboot_phase1 cli vmid; + | Resume -> + begin + try + ignore (expect_success (fun () -> cli "vm-resume" params)); + ping (List.assoc vmid !ipmap) 0 + with + CliOpFailed ls -> + log Log.Error "change_vm_state: VM resume failed: cli reported:"; + List.iter (fun l -> log Log.Error "%s" l) ls; + raise (OpFailed "Failed to resume VM") + | GAFailed -> + log Log.Error "change_vm_state: VM resume failed: failed to contact guest agent"; + raise (OpFailed "Failed to resume VM") + end; end; log Log.Info "new state: %s " (get_state cli vmid); if domid>0 @@ -661,37 +661,37 @@ let change_vm_state cli vmid st = let rec ensure_vm_down cli vmid count = log Log.Info "Doing everything possible to turn off VM"; if count > 5 then raise (OpFailed "Poweroff operation failed!") else - let params = [("vm",vmid)] in - match get_state cli vmid with - "halted" -> () - | "paused" -> + let params = [("vm",vmid)] in + match get_state cli vmid with + "halted" -> () + | "paused" -> log Log.Warn "Host is currently paused! Unpausing and attempting shutdown"; - (try - ignore (expect_success (fun () -> cli "vm-unpause" params)); - change_vm_state cli vmid Shutdown; - with _ -> ()); + (try + ignore (expect_success (fun () -> cli "vm-unpause" params)); + change_vm_state cli vmid Shutdown; + with _ -> ()); ensure_vm_down cli vmid (count+1) - | "running" -> + | "running" -> log Log.Warn "Host is currently running! Shutting down"; (try - change_vm_state cli vmid Shutdown; - with _ -> ()); + change_vm_state cli vmid Shutdown; + with _ -> ()); ensure_vm_down cli vmid (count+1) - | "suspended" -> + | "suspended" -> log Log.Warn "Host is currently suspended! Resuming and shutting down!"; (try - ignore (expect_success (fun () -> cli "vm-resume" params)); - change_vm_state cli vmid Shutdown; - with _ -> ()); + ignore (expect_success (fun () -> cli "vm-resume" params)); + change_vm_state cli vmid Shutdown; + with _ -> ()); ensure_vm_down cli vmid (count+1) - | "shutting down" -> + | "shutting down" -> (try - change_vm_state cli vmid Shutdown; - with _ -> ()); + change_vm_state cli vmid Shutdown; + with _ -> ()); ensure_vm_down cli vmid (count+1) - | "migrating" -> + | "migrating" -> raise (OpFailed "Host is corrently migrating!") - | _ -> + | _ -> raise (OpFailed "Host is in an unknown state!") diff --git a/ocaml/xe-cli/rt/geneva/cli_test.ml b/ocaml/xe-cli/rt/geneva/cli_test.ml index 391395eefa3..57c6fd11387 100644 --- a/ocaml/xe-cli/rt/geneva/cli_test.ml +++ b/ocaml/xe-cli/rt/geneva/cli_test.ml @@ -19,44 +19,44 @@ open Cli_utils let exportdir = ref "" let bridge_name = ref "breth0" - + let cd_test cli vmid = cd_attach_remove cli vmid "w2k3sesp1.iso" None; cd_attach_remove cli vmid "winxpsp2.iso" None; -(* cd_attach_remove cli vmid "/dev/hda" None; *) + (* cd_attach_remove cli vmid "/dev/hda" None; *) cd_attach_remove cli vmid "xswindrivers.iso" None (* ------------------------------------------------------------------------- SCRIPT THE TESTS ------------------------------------------------------------------------- *) - + (* Definitions used in tests: *) let pv_guests = [("Debian Sarge 3.1","reg_pv1"); - ("Debian Sarge 3.1","reg_pv2"); - ("Debian Sarge 3.1","reg_pv3"); - ("Debian Sarge 3.1","reg_v4")] - + ("Debian Sarge 3.1","reg_pv2"); + ("Debian Sarge 3.1","reg_pv3"); + ("Debian Sarge 3.1","reg_v4")] + let pv_disks = [("sdc","512"); - ("sdd","1024"); - ("sde","2000"); - ("sdf","512"); - ("sdg","900")] + ("sdd","1024"); + ("sde","2000"); + ("sdf","512"); + ("sdg","900")] let hvm_guests = [ - ("Windows XP SP2","reg_hvm1"); - ("Windows XP SP2","reg_hvm2"); - ("Windows XP SP2","reg_hvm3"); - ("Windows XP SP2","reg_hvm4") - ] + ("Windows XP SP2","reg_hvm1"); + ("Windows XP SP2","reg_hvm2"); + ("Windows XP SP2","reg_hvm3"); + ("Windows XP SP2","reg_hvm4") +] let hvm_disks = [("hdb","512"); - ("hdc","1024")] + ("hdc","1024")] let pv_nics () = [("eth5","00:00:00:00:55",!bridge_name); - ("eth6","00:00:00:00:66",!bridge_name)] - + ("eth6","00:00:00:00:66",!bridge_name)] + let hvm_nics () = [("nic5","00:00:00:00:55",!bridge_name); - ("nic6","00:00:00:00:66",!bridge_name)] + ("nic6","00:00:00:00:66",!bridge_name)] let vbridges = ["vbridge2";"vbridge3";"vbridge4"] @@ -85,7 +85,7 @@ let run_host_test_cycle cli vbridges = (* Generate host output report *) let _ = print_host_output cli in - + (* Remove vbridges from host and verify they're gone *) let _ = List.iter (sync_remove_vbridge cli) vbridges in @@ -93,7 +93,7 @@ let run_host_test_cycle cli vbridges = let existing_patches = get_patches cli in let _ = List.iter (sync_remove_patch cli) existing_patches in - print_line "\n---- HOST TEST CYCLE FINISHED ----" + print_line "\n---- HOST TEST CYCLE FINISHED ----" (* ------------------------------------------------------------------------- @@ -108,20 +108,20 @@ let run_vm_test_cycle is_hvm cli vms disks nics = "os";"vcpus";"memory_set";"auto_poweron";"force_hvm";"boot_params"] in let vm_set_params = [("name","testnameset"); - ("description","testdescriptionset"); - ("vcpus","1"); - ("memory_set","300"); - ("auto_poweron","false"); - ("auto_poweron","true"); -(* ("force_hvm","false"); - ("force_hvm","true");*) - ("boot_params","boottest"); - ("on_crash","destroy"); - ("on_crash","restart")] in - + ("description","testdescriptionset"); + ("vcpus","1"); + ("memory_set","300"); + ("auto_poweron","false"); + ("auto_poweron","true"); + (* ("force_hvm","false"); + ("force_hvm","true");*) + ("boot_params","boottest"); + ("on_crash","destroy"); + ("on_crash","restart")] in + (* Force shutdown and uninstall all vms from host *) - let _ = uninstall_all_vms cli in - + let _ = uninstall_all_vms cli in + (* Install new debian guests and remember their uuids *) let uuids = List.map @@ -134,13 +134,13 @@ let run_vm_test_cycle is_hvm cli vms disks nics = let _ = List.iter (fun vmid -> - List.iter (sync_add_disk cli vmid) disks) uuids in + List.iter (sync_add_disk cli vmid) disks) uuids in (* Add nics to all VMs *) let _ = List.iter (fun vmid -> - List.iter (sync_add_nic cli vmid) nics) uuids in + List.iter (sync_add_nic cli vmid) nics) uuids in (* Add/Remove/List CDs if HVM *) let _ = @@ -154,24 +154,24 @@ let run_vm_test_cycle is_hvm cli vms disks nics = let _ = List.iter (fun vmid -> - List.iter (sync_remove_nic cli vmid) - (List.map (fun (n,_,_)->n) nics)) uuids in + List.iter (sync_remove_nic cli vmid) + (List.map (fun (n,_,_)->n) nics)) uuids in (* Resize disks on all Vms *) let _ = List.iter (fun (disk,size) -> - List.iter (fun vmid->resize_disk cli vmid disk size) uuids) + List.iter (fun vmid->resize_disk cli vmid disk size) uuids) (List.map - (fun (d,s)->(d,string_of_int ((int_of_string s)+500))) disks) in + (fun (d,s)->(d,string_of_int ((int_of_string s)+500))) disks) in (* Remove disks from all VMs *) let _ = List.iter (fun vmid -> - List.iter (sync_remove_disk cli vmid) - (List.map fst disks)) uuids in - + List.iter (sync_remove_disk cli vmid) + (List.map fst disks)) uuids in + (* Move VM between states in PV case *) let _ = if not is_hvm then state_test cli test_vm in @@ -183,7 +183,7 @@ let run_vm_test_cycle is_hvm cli vms disks nics = List.map (fun paramname -> getparam cli test_vm paramname) vm_get_params in - + (* Test set params *) let _ = List.iter @@ -191,13 +191,13 @@ let run_vm_test_cycle is_hvm cli vms disks nics = vm_set_params in (* Uninstall everything *) - let _ = uninstall_all_vms cli in - print_line "\n---- VM TEST CYCLE FINISHED ----" + let _ = uninstall_all_vms cli in + print_line "\n---- VM TEST CYCLE FINISHED ----" let installuninstall cli = let vm_uuid = install_guest cli (List.hd hvm_guests) in - sync_uninstall cli vm_uuid + sync_uninstall cli vm_uuid (* import/export test *) let export_checks cli = @@ -208,7 +208,7 @@ let export_checks cli = let _ = expect_success (fun()->cli "vm-export" params) in let _ = sync_uninstall cli vmid in let _ = expect_success (fun()->cli "vm-import" params) in - () + () (* ----------------------------------------------------------------------- ENTRY POINT: @@ -217,12 +217,12 @@ let export_checks cli = (* Read cmd-line args *) let _ = Arg.parse [ - "-host", Arg.Set_string host, "hostname of test XE host"; - "-xe", Arg.Set_string xe, "path to XE CLI executable"; - "-exportdir", Arg.Set_string exportdir, "path to export VM to"; - "-bridge", Arg.Set_string bridge_name, - Printf.sprintf "bridge to attach VIFs to (default %s)" !bridge_name; - ] + "-host", Arg.Set_string host, "hostname of test XE host"; + "-xe", Arg.Set_string xe, "path to XE CLI executable"; + "-exportdir", Arg.Set_string exportdir, "path to export VM to"; + "-bridge", Arg.Set_string bridge_name, + Printf.sprintf "bridge to attach VIFs to (default %s)" !bridge_name; + ] (fun x -> Printf.printf "Warning, ignoring unknown argument: %s" x) "Regression test for XE CLI" @@ -243,7 +243,7 @@ let cli = if !host="onhost" then (print_line "TEST RUNNING IN ONHOST MODE\n"; cli_onhost) else (print_line "TEST RUNNING IN OFFHOST MODE\n"; cli_offhost) - + (* Start by licensing server *) (* let _ = apply_license_to_server cli @@ -260,28 +260,28 @@ let _ = run_vm_test_cycle true cli hvm_guests hvm_disks (hvm_nics ()) (* Add some patches -- actually add the same one twice for now :) *) let _ = expect_success (fun()->cli "host-patch-upload" - ["patch-file",patchfilename]) + ["patch-file",patchfilename]) let _ = expect_success (fun()->cli "host-patch-upload" - ["patch-file",patchfilename]) + ["patch-file",patchfilename]) let _ = expect_success (fun()-> cli "host-patch-apply" - ["patch-name",patchfilename]) + ["patch-name",patchfilename]) -let _ = if !exportdir<>"" then export_checks cli_offhost +let _ = if !exportdir<>"" then export_checks cli_offhost (* Off-host checks to run *) let offhost_checks() = - + (* Check unix password file by running through hvm install/uninstall *) let _ = installuninstall (cli_offhost_with_pwf pwf_unix) in - (* .. and same for windows password file [i.e. with CR/LFs] *) + (* .. and same for windows password file [i.e. with CR/LFs] *) let _ = installuninstall (cli_offhost_with_pwf pwf_windows) in (* Test setting/resetting host password *) let _ = test_password_set cli_offhost_with_pwd in - - () + + () (* Run off-host checks if off-host *) let _ = if !host<>"onhost" then offhost_checks() diff --git a/ocaml/xe-cli/rt/geneva/cli_utils.ml b/ocaml/xe-cli/rt/geneva/cli_utils.ml index 0f6e8110589..56c33d71b5e 100644 --- a/ocaml/xe-cli/rt/geneva/cli_utils.ml +++ b/ocaml/xe-cli/rt/geneva/cli_utils.ml @@ -45,35 +45,35 @@ let run_command cmd = let read_str () = try while true do - result := (!result) @ [(input_line ic)^"\n"]; + result := (!result) @ [(input_line ic)^"\n"]; done with _ -> () in let _ = read_str() in let rc = Unix.close_process_in ic in - (!result,rc) + (!result,rc) type pwspec = - | Password of string - | PasswordFile of string + | Password of string + | PasswordFile of string let cli_with_pwspec is_offhost cmd params pwspec = let rec mk_params l = match l with - [] -> "" - | ((k,v)::kvs) -> k^"=\""^v^"\""^" "^(mk_params kvs) in + [] -> "" + | ((k,v)::kvs) -> k^"=\""^v^"\""^" "^(mk_params kvs) in let param_str = mk_params params in let cli_base_string = (!xe)^" "^cmd ^(if is_offhost then - " -h "^(!host) - ^" " - ^(match pwspec with - Password s -> "-u "^user^" -pw "^s - | PasswordFile s -> "-pwf "^s) + " -h "^(!host) + ^" " + ^(match pwspec with + Password s -> "-u "^user^" -pw "^s + | PasswordFile s -> "-pwf "^s) else " -u "^user) ^" "^param_str in - print_line ("Executing: "^cli_base_string); - run_command cli_base_string + print_line ("Executing: "^cli_base_string); + run_command cli_base_string let cli_offhost_with_pwspec cmd params pwspec = cli_with_pwspec true cmd params pwspec @@ -81,7 +81,7 @@ let cli_offhost_with_pwspec cmd params pwspec = let cli_onhost cmd params = cli_with_pwspec false cmd params (Password "ignore") - + let cli_offhost_with_pwd pwd cmd params = cli_offhost_with_pwspec cmd params (Password pwd) @@ -101,41 +101,41 @@ let report_failure msg = let is_success rc s = match rc with - Unix.WEXITED i -> - if i<>0 then - report_failure ("Expected rc==0; actual rc=="^(string_of_int i) - ^". cmd returned: "^(String.concat "" s)) - | _ -> () - + Unix.WEXITED i -> + if i<>0 then + report_failure ("Expected rc==0; actual rc=="^(string_of_int i) + ^". cmd returned: "^(String.concat "" s)) + | _ -> () + let expect_success f = let (s,rc) = f() in - is_success rc s; s - + is_success rc s; s + let getval v line = let tks = tokenize line in let vs = tokenize v in let rec domatch tokens values = match (tokens,values) with - (t::_,[]) -> Some t - | (t::ts,v::vs) -> - if t=v then domatch ts vs - else None - | ([],_) -> None in - domatch tks vs - + (t::_,[]) -> Some t + | (t::ts,v::vs) -> + if t=v then domatch ts vs + else None + | ([],_) -> None in + domatch tks vs + let rec mapopt f l = match l with - [] -> [] - | (x::xs) -> - match (f x) with - None -> mapopt f xs - | (Some y) -> y::(mapopt f xs) + [] -> [] + | (x::xs) -> + match (f x) with + None -> mapopt f xs + | (Some y) -> y::(mapopt f xs) exception OptionFailure let rec getoptval x = match x with - None -> raise OptionFailure - | (Some x) -> x + None -> raise OptionFailure + | (Some x) -> x exception VMNotFound of string exception CLIOutputFormatError of string @@ -144,24 +144,24 @@ let rec getstate cli vmid = let lines = expect_success (fun()->cli "host-vm-list" []) in let rec findstate ls = match ls with - [] -> raise (VMNotFound vmid) - | (l::ls) -> - begin - match (ls,getval "uuid:" l) with - ([],None) -> raise (VMNotFound vmid) - | ([],Some _) -> raise (CLIOutputFormatError "host-vm-list") - | (_,None) -> findstate ls - | (nextline::_,Some v) -> - if v=vmid then - begin - let s = getval "state:" nextline in - if s=None then - raise (CLIOutputFormatError "host-vm-list") - else getoptval s - end - else findstate ls - end in - findstate lines + [] -> raise (VMNotFound vmid) + | (l::ls) -> + begin + match (ls,getval "uuid:" l) with + ([],None) -> raise (VMNotFound vmid) + | ([],Some _) -> raise (CLIOutputFormatError "host-vm-list") + | (_,None) -> findstate ls + | (nextline::_,Some v) -> + if v=vmid then + begin + let s = getval "state:" nextline in + if s=None then + raise (CLIOutputFormatError "host-vm-list") + else getoptval s + end + else findstate ls + end in + findstate lines exception TimeOut @@ -169,15 +169,15 @@ let poll f = let start_time = Unix.time() in let rec retry() = let current_time = Unix.time() in - if (current_time -. start_time > wait_timeout) then - raise TimeOut - else - if not (f()) then - begin - Unix.sleep poll_interval; - retry() - end in - retry() + if (current_time -. start_time > wait_timeout) then + raise TimeOut + else + if not (f()) then + begin + Unix.sleep poll_interval; + retry() + end in + retry() let startswith s1 s2 = (String.length s1)>=(String.length s2) && @@ -186,24 +186,24 @@ let startswith s1 s2 = exception Last let rec last l = match l with - [] -> raise Last - | [x] -> x - | (x::xs) -> last xs + [] -> raise Last + | [x] -> x + | (x::xs) -> last xs let read_end_from_output line_start lines = let new_uuids = mapopt - (fun l-> - if (startswith l line_start) then Some (last (tokenize l)) - else None) lines in - begin - match new_uuids with - [x] -> x - | _ -> - raise - (CLIOutputFormatError "can't find required parameter") - end - -(* Wait for specified vm to get into specified state *) + (fun l-> + if (startswith l line_start) then Some (last (tokenize l)) + else None) lines in + begin + match new_uuids with + [x] -> x + | _ -> + raise + (CLIOutputFormatError "can't find required parameter") + end + +(* Wait for specified vm to get into specified state *) let waitstate cli vmid state = print_line ("Waiting for vm "^vmid^" to get into state "^state); poll (fun ()->(getstate cli vmid)=state) @@ -211,22 +211,22 @@ let waitstate cli vmid state = (* Get all vm uuids on host *) let get_vm_uuids cli = let lines = expect_success (fun()->cli "host-vm-list" []) in - mapopt (getval "uuid:") lines + mapopt (getval "uuid:") lines (* Get all vm names on host *) let get_vm_names cli = let lines = expect_success (fun()->cli "host-vm-list" []) in - mapopt (getval "NAME:") lines + mapopt (getval "NAME:") lines (* Get all disk names for specified vm *) let get_disks cli vmid = let lines = expect_success (fun()->cli "vm-disk-list" [("vm-id",vmid)]) in - mapopt (getval "name:") lines + mapopt (getval "name:") lines (* Get all NICs for specified vm *) let get_nics cli vmid = let lines = expect_success (fun()->cli "vm-vif-list" [("vm-id",vmid)]) in - mapopt (getval "name:") lines + mapopt (getval "name:") lines (* Get all vbridges on host *) let get_vbridges cli = @@ -236,7 +236,7 @@ let get_vbridges cli = (* Get all patches on host *) let get_patches cli = let lines = expect_success (fun()->cli "host-patch-list" []) in - mapopt (getval "uuid:") lines + mapopt (getval "uuid:") lines (* Remove specified patch and check its gone *) let sync_remove_patch cli uuid = @@ -247,8 +247,8 @@ let sync_remove_patch cli uuid = let sync_shutdown cli force vmid = let params = if force then [("force","true")] else [] in let params = ("vm-id",vmid)::params in - ignore (cli "vm-shutdown" params); - waitstate cli vmid "DOWN" + ignore (cli "vm-shutdown" params); + waitstate cli vmid "DOWN" (* Uninstall VM, returning when VM removed from host-vm-list *) let sync_uninstall cli vmid = @@ -258,76 +258,76 @@ let sync_uninstall cli vmid = (* Uninstall all VMs from unknown state, shutting them down first *) let uninstall_all_vms cli = let uuids = get_vm_uuids cli in - List.iter (sync_shutdown cli true) uuids; - List.iter (sync_uninstall cli) uuids + List.iter (sync_shutdown cli true) uuids; + List.iter (sync_uninstall cli) uuids (* Install guest, using specified template and wait until state==down *) let install_guest cli (template, name) = let params = [("template-name",template); - ("name", name); - ("auto_poweron", "true"); - ("vcpus", "1"); - ("memory_set", "256")] in + ("name", name); + ("auto_poweron", "true"); + ("vcpus", "1"); + ("memory_set", "256")] in let lines = expect_success (fun ()->cli "vm-install" params) in let new_uuid = read_end_from_output "New VM" lines in - waitstate cli new_uuid "DOWN"; - new_uuid + waitstate cli new_uuid "DOWN"; + new_uuid (* Add disk to VM and wait until it appears in list *) let sync_add_disk cli vmid (diskname,disksize) = let params = [("vm-id",vmid); - ("disk-name",diskname); - ("disk-size",disksize)] in - ignore (expect_success (fun ()->cli "vm-disk-add" params)); - poll (fun ()-> - let vmdisks = get_disks cli vmid in - List.mem diskname vmdisks) + ("disk-name",diskname); + ("disk-size",disksize)] in + ignore (expect_success (fun ()->cli "vm-disk-add" params)); + poll (fun ()-> + let vmdisks = get_disks cli vmid in + List.mem diskname vmdisks) (* Remove disk from VM and wait until its gone from list *) let sync_remove_disk cli vmid diskname = let params = [("vm-id",vmid); - ("disk-name",diskname)] in - ignore (expect_success (fun ()->cli "vm-disk-remove" params)); - poll (fun ()-> - let vmdisks = get_disks cli vmid in - not (List.mem diskname vmdisks)) + ("disk-name",diskname)] in + ignore (expect_success (fun ()->cli "vm-disk-remove" params)); + poll (fun ()-> + let vmdisks = get_disks cli vmid in + not (List.mem diskname vmdisks)) (* Add NIC to VM and wait until it appears in list *) let sync_add_nic cli vmid (nicname,mac,bridge) = let params = [("vm-id",vmid); - ("vif-name",nicname); - ("mac",mac); - ("bridge-name",bridge)] in - ignore (expect_success (fun ()->cli "vm-vif-add" params)); - poll (fun ()-> - let vmnics = get_nics cli vmid in - List.mem nicname vmnics) + ("vif-name",nicname); + ("mac",mac); + ("bridge-name",bridge)] in + ignore (expect_success (fun ()->cli "vm-vif-add" params)); + poll (fun ()-> + let vmnics = get_nics cli vmid in + List.mem nicname vmnics) (* Remove NIC from VM and wait until it has gone from list *) let sync_remove_nic cli vmid nicname = let params = [("vm-id",vmid); - ("vif-name",nicname)] in - ignore (expect_success (fun ()->cli "vm-vif-remove" params)); - poll (fun ()-> - let vmnics = get_nics cli vmid in - not (List.mem nicname vmnics)) + ("vif-name",nicname)] in + ignore (expect_success (fun ()->cli "vm-vif-remove" params)); + poll (fun ()-> + let vmnics = get_nics cli vmid in + not (List.mem nicname vmnics)) (* Add vbridge, returning when it appears in vbridge-list *) let sync_add_vbridge cli bridge = let params = [("vbridge-name",bridge); - ("auto-vm-add","false")] in - ignore (expect_success (fun ()->cli "host-vbridge-add" params)); - poll (fun ()-> - let vbridges = get_vbridges cli in - List.mem bridge vbridges) + ("auto-vm-add","false")] in + ignore (expect_success (fun ()->cli "host-vbridge-add" params)); + poll (fun ()-> + let vbridges = get_vbridges cli in + List.mem bridge vbridges) (* Remove vbridge, returning when it has gone from vbridge-list *) let sync_remove_vbridge cli bridge = let params = [("vbridge-name",bridge)] in - ignore (expect_success (fun ()->cli "host-vbridge-remove" params)); - poll (fun ()-> - let vbridges = get_vbridges cli in - not (List.mem bridge vbridges)) + ignore (expect_success (fun ()->cli "host-vbridge-remove" params)); + poll (fun ()-> + let vbridges = get_vbridges cli in + not (List.mem bridge vbridges)) (* Print report from *-list commands: *) @@ -349,7 +349,7 @@ let print_vm_output cli vmid = ("vifs",expect_success (fun ()->cli "vm-vif-list" param)); ("cds",expect_success (fun ()->cli "vm-cd-list" param)); ("params",expect_success (fun ()->cli "vm-param-list" param))] in - print_report_output output + print_report_output output (* Collate info from Host list commands: *) let print_host_output cli = @@ -365,7 +365,7 @@ let print_host_output cli = ("params",expect_success (fun ()->cli "host-param-list" [])); ("cds",expect_success (fun ()->cli "host-cd-list" [])); ("patches",expect_success (fun ()->cli "host-patch-list" []))] in - print_report_output output + print_report_output output let state_test cli vmid = let wait_print time = @@ -375,118 +375,118 @@ let state_test cli vmid = let move_state cmd rstate = ignore (expect_success (fun ()->cli cmd param)); waitstate cli vmid rstate in - begin - move_state "vm-start" "UP"; - move_state "vm-suspend" "SUSPENDED"; - move_state "vm-resume" "UP"; - wait_print 5; (* there is a window where a VM doesn't see the shutdown signal *) - move_state "vm-shutdown" "DOWN"; - move_state "vm-start" "UP"; - (* reboot and wait for VM to come back up *) - ignore (expect_success - (fun ()->cli "vm-reboot" param)); - waitstate cli vmid "UP"; - (* and try force shutdown... *) - ignore (expect_success - (fun ()->cli "vm-shutdown" (("force","true")::param))); - waitstate cli vmid "DOWN" - end + begin + move_state "vm-start" "UP"; + move_state "vm-suspend" "SUSPENDED"; + move_state "vm-resume" "UP"; + wait_print 5; (* there is a window where a VM doesn't see the shutdown signal *) + move_state "vm-shutdown" "DOWN"; + move_state "vm-start" "UP"; + (* reboot and wait for VM to come back up *) + ignore (expect_success + (fun ()->cli "vm-reboot" param)); + waitstate cli vmid "UP"; + (* and try force shutdown... *) + ignore (expect_success + (fun ()->cli "vm-shutdown" (("force","true")::param))); + waitstate cli vmid "DOWN" + end (* Clone VM and return new uuid *) let clone_test cli vmid new_name = let params = [("vm-id",vmid); - ("new-name",new_name); - ("new-description","cloned with CLI regression test")] in + ("new-name",new_name); + ("new-description","cloned with CLI regression test")] in let lines = expect_success (fun ()->cli "vm-clone" params) in let new_uuid = read_end_from_output "Cloned VM" lines in - waitstate cli new_uuid "DOWN"; - new_uuid + waitstate cli new_uuid "DOWN"; + new_uuid (* Check that loglevel calls succeed *) let loglevel_test cli = List.iter (fun x-> ignore (expect_success - (fun ()->cli "host-loglevel-set" [("log-level",x)]))) - ["1";"2";"3";"4";"2"] + (fun ()->cli "host-loglevel-set" [("log-level",x)]))) + ["1";"2";"3";"4";"2"] (* Read specified param from specified vm, returning it as string *) let getparam cli vmid param_name = let params = [("vm-id",vmid); - ("param-name",param_name)] in + ("param-name",param_name)] in let lines = expect_success (fun ()->cli "vm-param-get" params) in - match (mapopt (getval (param_name^":")) lines) with - [] -> raise (CLIOutputFormatError "param get failure") - | [x] -> x - | _ -> raise (CLIOutputFormatError "multiple param get candidates") + match (mapopt (getval (param_name^":")) lines) with + [] -> raise (CLIOutputFormatError "param get failure") + | [x] -> x + | _ -> raise (CLIOutputFormatError "multiple param get candidates") (* Set specified parameter, read back and check it was set *) let set_and_check_param cli vmid param_name param_value = let params = [("vm-id",vmid); - ("param-name",param_name); - ("param-value",param_value)] in + ("param-name",param_name); + ("param-value",param_value)] in let _ = ignore (expect_success (fun ()->cli "vm-param-set" params)) in - poll (fun ()->(getparam cli vmid param_name)=param_value) + poll (fun ()->(getparam cli vmid param_name)=param_value) (* Resize disks *) let resize_disk cli vmid disk_name new_size = let params = [("vm-id",vmid); - ("disk-name",disk_name); - ("disk-size",new_size)] in - ignore (expect_success (fun ()->cli "vm-disk-resize" params)) + ("disk-name",disk_name); + ("disk-size",new_size)] in + ignore (expect_success (fun ()->cli "vm-disk-resize" params)) (* Test host password *) let test_password_set pswdcli = let setpwd params password = ignore (expect_success - (fun()->pswdcli password "host-password-set" params)) in + (fun()->pswdcli password "host-password-set" params)) in let change_params = [("new-password","testpwd")] in let change_back_params = [("new-password",password)] in - setpwd change_params password; - setpwd change_back_params "testpwd" + setpwd change_params password; + setpwd change_back_params "testpwd" (* Build param string from vmid, cdnname, cdlocation *) let build_cd_params vmid cdname cdlocation = let params = [("vm-id",vmid); - ("cd-name",cdname)] in - match cdlocation with - (Some x) -> ("cd-location",x)::params - | None -> params + ("cd-name",cdname)] in + match cdlocation with + (Some x) -> ("cd-location",x)::params + | None -> params (* Attach CD and check its there *) let verify_attach_cd cli vmid cdname cdlocation = let params = build_cd_params vmid cdname cdlocation in let _ = expect_success (fun ()->cli "vm-cd-add" params) in - poll - (fun ()-> - let lines = expect_success (fun ()->cli "vm-cd-list" params) in - let name = mapopt (getval "name:") lines in - match name with - [] -> false - | [x] -> - print_line x; - if cdname="empty" then (x=" raise (CLIOutputFormatError "cd list") - ) + poll + (fun ()-> + let lines = expect_success (fun ()->cli "vm-cd-list" params) in + let name = mapopt (getval "name:") lines in + match name with + [] -> false + | [x] -> + print_line x; + if cdname="empty" then (x=" raise (CLIOutputFormatError "cd list") + ) (* Remove CD and check _NO CDs LEFT_ *) let verify_remove_cd cli vmid cdname cdlocation = let params = build_cd_params vmid cdname cdlocation in let _ = expect_success (fun ()->cli "vm-cd-remove" params) in - poll - (fun ()-> - let lines = expect_success (fun ()->cli "vm-cd-list" params) in - let names = mapopt (getval "name:") lines in - names=[]) + poll + (fun ()-> + let lines = expect_success (fun ()->cli "vm-cd-list" params) in + let names = mapopt (getval "name:") lines in + names=[]) (* CD: Attach, check, Remove, check *) let cd_attach_remove cli vmid cdname cdlocation = verify_attach_cd cli vmid cdname cdlocation; verify_remove_cd cli vmid cdname cdlocation - + let apply_license_to_server cli = expect_success (fun()->cli "host-license-add" - [("license-file",license_file)]) + [("license-file",license_file)]) diff --git a/ocaml/xe-cli/rt/geneva/sm_stress.ml b/ocaml/xe-cli/rt/geneva/sm_stress.ml index 9b5f6a72bf1..759f9d7d5ac 100644 --- a/ocaml/xe-cli/rt/geneva/sm_stress.ml +++ b/ocaml/xe-cli/rt/geneva/sm_stress.ml @@ -29,12 +29,12 @@ let size_inc = ref 0 (* Read cmd-line args *) let _ = Arg.parse [ -(* "-host", Arg.Set_string host, "hostname of test XE host"; *) - "-xe", Arg.Set_string xe, "path to XE CLI executable"; - "-base_size1", Arg.Set_int base_size1, "base-size1 for disk create"; - "-base_size2", Arg.Set_int base_size2, "base-size2 for disk create"; - "-size_inc", Arg.Set_int size_inc, "size-increment for disk resizing" - ] + (* "-host", Arg.Set_string host, "hostname of test XE host"; *) + "-xe", Arg.Set_string xe, "path to XE CLI executable"; + "-base_size1", Arg.Set_int base_size1, "base-size1 for disk create"; + "-base_size2", Arg.Set_int base_size2, "base-size2 for disk create"; + "-size_inc", Arg.Set_int size_inc, "size-increment for disk resizing" + ] (fun x -> Printf.printf "Warning, ignoring unknown argument: %s" x) "Regression test for XE CLI" @@ -66,11 +66,11 @@ let _ = uninstall_all_vms cli (* Install HVM guests *) let hvm_guests = [ - ("Windows XP Service Pack 2","sr_stress_hvm1"); - ("Windows XP Service Pack 2","sr_stress_hvm2"); - ("Windows XP Service Pack 2","sr_stress_hvm3"); - ("Windows XP Service Pack 2","sr_stress_hvm4") - ] + ("Windows XP Service Pack 2","sr_stress_hvm1"); + ("Windows XP Service Pack 2","sr_stress_hvm2"); + ("Windows XP Service Pack 2","sr_stress_hvm3"); + ("Windows XP Service Pack 2","sr_stress_hvm4") +] let uuids = List.map (install_guest cli) hvm_guests @@ -99,27 +99,27 @@ let _ = List.iter Thread.join threads (* Install some PV guests *) let pv_guests = [("Debian Sarge 3.1","reg_pv1"); - ("Debian Sarge 3.1","reg_pv2"); - ("Debian Sarge 3.1","reg_pv3"); - ("Debian Sarge 3.1","reg_v4")] + ("Debian Sarge 3.1","reg_pv2"); + ("Debian Sarge 3.1","reg_pv3"); + ("Debian Sarge 3.1","reg_v4")] let pv_uuids = List.map (install_guest cli) pv_guests (* Start them all up sequentially *) let sync_startup vmid = - let param = [("vm-id",vmid)] in - ignore (expect_success (fun ()->cli "vm-start" param)); - waitstate cli vmid "UP" + let param = [("vm-id",vmid)] in + ignore (expect_success (fun ()->cli "vm-start" param)); + waitstate cli vmid "UP" let _ = List.iter sync_startup pv_uuids (* And suspend them all concurrently *) let suspend vmid = let param = [("vm-id",vmid)] in - ignore (expect_success (fun ()->cli "vm-suspend" param)) + ignore (expect_success (fun ()->cli "vm-suspend" param)) let _ = List.iter suspend pv_uuids -(* +(* (* And then uninstall everything again *) let _ = uninstall_all_vms cli *) diff --git a/ocaml/xe-cli/rt/geneva/utils.ml b/ocaml/xe-cli/rt/geneva/utils.ml index 637f9031a73..2bcd80dad15 100644 --- a/ocaml/xe-cli/rt/geneva/utils.ml +++ b/ocaml/xe-cli/rt/geneva/utils.ml @@ -28,20 +28,20 @@ let rec optlistToList = function | [] -> [] | None::xs -> optlistToList xs | (Some x)::xs -> x::(optlistToList xs) - + (* bit o sets *) let rec remove fromset element = match fromset with -| [] -> [] -| (x::xs) -> - if x = element then xs - else x::(remove xs element) + | [] -> [] + | (x::xs) -> + if x = element then xs + else x::(remove xs element) (* this looks like a fold *) let rec subtract fromset elements = match elements with -| [] -> fromset -| (x::xs) -> subtract (remove fromset x) xs + | [] -> fromset + | (x::xs) -> subtract (remove fromset x) xs exception Empty_List @@ -54,9 +54,9 @@ let tl = function | (_::xs) -> xs -let rec member set x = match set with -| [] -> false -| (y::ys) -> if (x = y) then true else (member ys x) +let rec member set x = match set with + | [] -> false + | (y::ys) -> if (x = y) then true else (member ys x) let rec length = function @@ -64,11 +64,11 @@ let rec length = function | (_::xs) -> 1 + (length xs) -let rec explode s = +let rec explode s = if (String.length s = 0) then [] else (String.get s 0)::(explode (String.sub s 1 (String.length s - 1))) -let rec implode chars = +let rec implode chars = let s = String.create (length chars) in let rec setchar n = function | [] -> () @@ -78,14 +78,14 @@ let rec implode chars = let split s c = let rec search results acc tocome = match tocome with - | [] -> results @ [acc] - | (x::xs) -> if (c x) then search (results @ [acc]) [] xs + | [] -> results @ [acc] + | (x::xs) -> if (c x) then search (results @ [acc]) [] xs else search results (acc @ [x]) xs in List.map implode (search [] [] (explode s)) -let tokenize s = +let tokenize s = let whitespace = function | ' ' -> true | '\t' -> true diff --git a/ocaml/xe-cli/rt/getip.ml b/ocaml/xe-cli/rt/getip.ml index ec362482306..1e5ac50f736 100644 --- a/ocaml/xe-cli/rt/getip.ml +++ b/ocaml/xe-cli/rt/getip.ml @@ -35,21 +35,21 @@ let _ = let mac = String.lowercase Sys.argv.(2) in let cmd = Printf.sprintf "tcpdump -lne -i %s arp or udp port bootps" iface in let ic = Unix.open_process_in cmd in - + let rec inner () = let line = String.lowercase (input_line ic) in - + (* Try first regexp *) let ip = getip1 mac line in - match ip with - Some x -> x - | None -> - let ip2 = getip2 mac line in - match ip2 with - Some x -> x - | None -> inner () - in + match ip with + Some x -> x + | None -> + let ip2 = getip2 mac line in + match ip2 with + Some x -> x + | None -> inner () + in let ip = inner () in Printf.printf "%s" ip - + diff --git a/ocaml/xe-cli/rt/gtclient.ml b/ocaml/xe-cli/rt/gtclient.ml index 081880a76e7..cd481738758 100644 --- a/ocaml/xe-cli/rt/gtclient.ml +++ b/ocaml/xe-cli/rt/gtclient.ml @@ -24,26 +24,26 @@ let rec nthtl l n = let _ = let addr = Sys.argv.(1) in - let msg = + let msg = match Sys.argv.(2) with "test" -> Test | "shutdown" -> Shutdown (int_of_string Sys.argv.(3)) | "reboot" -> Reboot (int_of_string Sys.argv.(3)) - | "crash" -> Crash - | "checkcd" -> CheckCD (nthtl (Array.to_list Sys.argv) 3) + | "crash" -> Crash + | "checkcd" -> CheckCD (nthtl (Array.to_list Sys.argv) 3) | "checkcdfail" -> CheckCDFail (nthtl (Array.to_list Sys.argv) 3) | "checkvif" -> CheckVIF Sys.argv.(3) | "checkdisks" -> CheckDisks (nthtl (Array.to_list Sys.argv) 3) | "checkmountdisk" -> CheckMountDisks (nthtl (Array.to_list Sys.argv) 3) | "setuptestdisk" -> SetupTestDisk Sys.argv.(3) | _ -> raise (Failure "Unknown command!") - in + in try let ans = Client.emit_answer addr 8085 msg in match ans with CmdResult str -> print_endline str | _ -> exit 0 with - exc -> - Printf.printf "Exception trapped: %s\n" (Printexc.to_string exc); - exit 1 + exc -> + Printf.printf "Exception trapped: %s\n" (Printexc.to_string exc); + exit 1 diff --git a/ocaml/xe-cli/rt/gtcomms.ml b/ocaml/xe-cli/rt/gtcomms.ml index 7d3e8c4984b..025d61c9cd7 100644 --- a/ocaml/xe-cli/rt/gtcomms.ml +++ b/ocaml/xe-cli/rt/gtcomms.ml @@ -12,104 +12,104 @@ * GNU Lesser General Public License for more details. *) module type PROTOCOL = - sig - type t - val to_string : t -> string - val of_string : string -> t - end +sig + type t + val to_string : t -> string + val of_string : string -> t +end module Make_Protocol = functor ( T : sig type t end ) -> - struct - type t=T.t - let to_string (x:t) = Marshal.to_string x [Marshal.Closures] - let of_string s = (Marshal.from_string s 0 : t) - end +struct + type t=T.t + let to_string (x:t) = Marshal.to_string x [Marshal.Closures] + let of_string s = (Marshal.from_string s 0 : t) +end let rec really_read s string off n = if n=0 then () else let m = Unix.read s string off n in (if m = 0 then raise End_of_file); really_read s string (off+m) (n-m) - + module Com = functor (P : PROTOCOL) -> - struct - let send fd m = - let mes = P.to_string m in - let l = (string_of_int (String.length mes)) in - let buffer = String.make 12 ' ' in - for i=0 to (String.length l)-1 do buffer.[i] <- l.[i] done; - ignore(Unix.write fd buffer 0 12); - ignore(Unix.write fd mes 0 (String.length mes)) - - let receive fd = - let buffer = String.make 12 ' ' +struct + let send fd m = + let mes = P.to_string m in + let l = (string_of_int (String.length mes)) in + let buffer = String.make 12 ' ' in + for i=0 to (String.length l)-1 do buffer.[i] <- l.[i] done; + ignore(Unix.write fd buffer 0 12); + ignore(Unix.write fd mes 0 (String.length mes)) + + let receive fd = + let buffer = String.make 12 ' ' + in + try + ignore (really_read fd buffer 0 12); + let l= + let i=ref 0 in + while(buffer.[!i]<>' ') do incr i done; + int_of_string (String.sub buffer 0 !i) + in + Printf.fprintf stderr "Length of message: %d\n" l; + let buffer = String.create l in - try - ignore (really_read fd buffer 0 12); - let l= - let i=ref 0 in - while(buffer.[!i]<>' ') do incr i done; - int_of_string (String.sub buffer 0 !i) - in - Printf.fprintf stderr "Length of message: %d\n" l; - let buffer = String.create l - in - (try - ignore (really_read fd buffer 0 l); - with - e -> - Printf.fprintf stderr "Exception: %s\nGot '%s' so far (%d bytes)" (Printexc.to_string e) buffer (String.length buffer); - failwith "error"); - P.of_string buffer - with e -> - Printf.fprintf stderr "%s\n" (Printexc.to_string e); - raise (Failure ("Problem interpreting response: got '"^buffer^"'")) - end + (try + ignore (really_read fd buffer 0 l); + with + e -> + Printf.fprintf stderr "Exception: %s\nGot '%s' so far (%d bytes)" (Printexc.to_string e) buffer (String.length buffer); + failwith "error"); + P.of_string buffer + with e -> + Printf.fprintf stderr "%s\n" (Printexc.to_string e); + raise (Failure ("Problem interpreting response: got '"^buffer^"'")) +end module Server = functor (P : PROTOCOL) -> - struct - module Com = Com(P) - - class virtual ['a] server p np = - object(s) - constraint 'a = P.t - val port_num = p - val nb_pending = np - val sock = Unix.socket Unix.PF_INET Unix.SOCK_STREAM 0 +struct + module Com = Com(P) - method start = - Printf.printf "Starting...\n"; - let sock_addr = Unix.ADDR_INET(Unix.inet_addr_any,port_num) in - Unix.bind sock sock_addr; - Unix.listen sock nb_pending; - while true do - let (service_sock, client_sock_addr) = Unix.accept sock - in ignore (s#process service_sock) - done - method send = Com.send - method receive = Com.receive - method virtual process : Unix.file_descr -> unit - end - end + class virtual ['a] server p np = + object(s) + constraint 'a = P.t + val port_num = p + val nb_pending = np + val sock = Unix.socket Unix.PF_INET Unix.SOCK_STREAM 0 + + method start = + Printf.printf "Starting...\n"; + let sock_addr = Unix.ADDR_INET(Unix.inet_addr_any,port_num) in + Unix.bind sock sock_addr; + Unix.listen sock nb_pending; + while true do + let (service_sock, client_sock_addr) = Unix.accept sock + in ignore (s#process service_sock) + done + method send = Com.send + method receive = Com.receive + method virtual process : Unix.file_descr -> unit + end +end module Client = functor (P : PROTOCOL) -> - struct - module Com = Com(P) - - let connect addr port = - let sock = Unix.socket Unix.PF_INET Unix.SOCK_STREAM 0 - and in_addr = (Unix.gethostbyname addr).Unix.h_addr_list.(0) - in Unix.connect sock (Unix.ADDR_INET(in_addr, port)); - sock +struct + module Com = Com(P) + + let connect addr port = + let sock = Unix.socket Unix.PF_INET Unix.SOCK_STREAM 0 + and in_addr = (Unix.gethostbyname addr).Unix.h_addr_list.(0) + in Unix.connect sock (Unix.ADDR_INET(in_addr, port)); + sock + + let emit_simple addr port mes = + let sock = connect addr port + in Com.send sock mes; + Unix.close sock - let emit_simple addr port mes = - let sock = connect addr port - in Com.send sock mes; - Unix.close sock - - let emit_answer addr port mes = - let sock = connect addr port - in Com.send sock mes; - let res = Com.receive sock - in Unix.close sock; res - end + let emit_answer addr port mes = + let sock = connect addr port + in Com.send sock mes; + let res = Com.receive sock + in Unix.close sock; res +end diff --git a/ocaml/xe-cli/rt/gtlinuxops.ml b/ocaml/xe-cli/rt/gtlinuxops.ml index 9048c6fb0c3..a76b5697cde 100644 --- a/ocaml/xe-cli/rt/gtlinuxops.ml +++ b/ocaml/xe-cli/rt/gtlinuxops.ml @@ -12,10 +12,10 @@ * GNU Lesser General Public License for more details. *) -let rec read ic cur = +let rec read ic cur = try let line = input_line ic in - read ic (line::cur) + read ic (line::cur) with _ -> List.rev cur @@ -46,30 +46,30 @@ let trytwice fn = let checkdevice device = let cmd () = Sys.command ("test -e /dev/"^device) in trytwice cmd - + let checkcds devices checkdev = let is_pv = try ignore(Unix.stat "/proc/xen"); true with _ -> false in - let devmap = + let devmap = if is_pv then ["2","xvdc";"3","xvdd"] else ["2","hdc";"3","hdd"] - in - let results = List.map (fun dev -> - let ldevice = List.assoc dev devmap in - if checkdev then ignore(checkdevice ldevice); - ignore(Sys.command ("mkdir -p /mnt/cdrom0")); - ignore(Sys.command ("mount /dev/"^ldevice^" /mnt/cdrom0")); - let ic = Unix.open_process_in "ls /mnt/cdrom0" in - let result = read ic [] in - ignore(Sys.command "umount /mnt/cdrom0"); - dev ^ ":\n\n" ^ (String.concat "\n" result)) devices in + in + let results = List.map (fun dev -> + let ldevice = List.assoc dev devmap in + if checkdev then ignore(checkdevice ldevice); + ignore(Sys.command ("mkdir -p /mnt/cdrom0")); + ignore(Sys.command ("mount /dev/"^ldevice^" /mnt/cdrom0")); + let ic = Unix.open_process_in "ls /mnt/cdrom0" in + let result = read ic [] in + ignore(Sys.command "umount /mnt/cdrom0"); + dev ^ ":\n\n" ^ (String.concat "\n" result)) devices in Gtmessages.CmdResult (String.concat "\n" results) let checkdisks devices = - let results = List.map (fun dev -> - ignore(checkdevice dev); - let ldevice = dev in - let ic = Unix.open_process_in ("fdisk /dev/"^ldevice) in - let result = read ic [] in - String.concat "\n" result) devices in + let results = List.map (fun dev -> + ignore(checkdevice dev); + let ldevice = dev in + let ic = Unix.open_process_in ("fdisk /dev/"^ldevice) in + let result = read ic [] in + String.concat "\n" result) devices in Gtmessages.CmdResult (String.concat "\n" results) let setuptestdisk device = (* Device is in this case a full sda or hda or whatever *) @@ -85,7 +85,7 @@ let setuptestdisk device = (* Device is in this case a full sda or hda or whatev ignore(Sys.command ("touch /mnt/disk/testing")); ignore(Sys.command ("umount /mnt/disk")); Gtmessages.CmdResult "OK" - + let checkmountdisks devices = (* Don't know whether we're pv (sdx) or hvm (hdx), so try both *) let mapfn dev = @@ -112,5 +112,5 @@ let logerr msg = close_out oc - + diff --git a/ocaml/xe-cli/rt/gtmessages.ml b/ocaml/xe-cli/rt/gtmessages.ml index 67b1bc2341a..cddd826ebba 100644 --- a/ocaml/xe-cli/rt/gtmessages.ml +++ b/ocaml/xe-cli/rt/gtmessages.ml @@ -11,11 +11,11 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -type message = - (* Guest to Dom0 messages *) - | CmdResult of string +type message = + (* Guest to Dom0 messages *) + | CmdResult of string - (* Dom0 to guest messages *) + (* Dom0 to guest messages *) | CheckCD of string list (* Check we've got connected cds *) | CheckVIF of string (* Check one device exists *) | CheckDisks of string list diff --git a/ocaml/xe-cli/rt/gtserver_linux.ml b/ocaml/xe-cli/rt/gtserver_linux.ml index a80c59bf4dc..e20acc0e3dd 100644 --- a/ocaml/xe-cli/rt/gtserver_linux.ml +++ b/ocaml/xe-cli/rt/gtserver_linux.ml @@ -35,19 +35,19 @@ class server n np = method crash s = ignore(GuestOp.crash ()); self#send s (CmdResult "You might not get this...") - + method test s = self#send s (CmdResult "Test worked OK!") method checkcds s devices = self#send s (GuestOp.checkcds devices true) - + method checkcdsfail s devices = self#send s (GuestOp.checkcds devices false) method checkvif s device = - self#send s (GuestOp.checkvifs device) - + self#send s (GuestOp.checkvifs device) + method checkdisks s devices = self#send s (GuestOp.checkdisks devices) @@ -57,26 +57,26 @@ class server n np = method setuptestdisk s device = self#send s (GuestOp.setuptestdisk device) - method process s = + method process s = try - begin - match self#receive s with - Shutdown timeout -> self#shutdown s timeout - | Reboot timeout -> self#reboot s timeout - | Test -> self#test s - | Crash -> self#crash s - | CheckCD devs -> self#checkcds s devs - | CheckVIF dev -> self#checkvif s dev - | CheckDisks devs -> self#checkdisks s devs - | CheckMountDisks devs -> self#checkmountdisks s devs - | SetupTestDisk dev -> self#setuptestdisk s dev - | CheckCDFail devs -> self#checkcdsfail s devs - | _ -> () - end; - Unix.close s + begin + match self#receive s with + Shutdown timeout -> self#shutdown s timeout + | Reboot timeout -> self#reboot s timeout + | Test -> self#test s + | Crash -> self#crash s + | CheckCD devs -> self#checkcds s devs + | CheckVIF dev -> self#checkvif s dev + | CheckDisks devs -> self#checkdisks s devs + | CheckMountDisks devs -> self#checkmountdisks s devs + | SetupTestDisk dev -> self#setuptestdisk s dev + | CheckCDFail devs -> self#checkcdsfail s devs + | _ -> () + end; + Unix.close s with - e -> GuestOp.logerr (Printexc.to_string e) - + e -> GuestOp.logerr (Printexc.to_string e) + end let _ = diff --git a/ocaml/xe-cli/rt/gtserver_win.ml b/ocaml/xe-cli/rt/gtserver_win.ml index 0bc014fc4c9..23b6e179dad 100644 --- a/ocaml/xe-cli/rt/gtserver_win.ml +++ b/ocaml/xe-cli/rt/gtserver_win.ml @@ -35,7 +35,7 @@ class server n np = method crash s = ignore (GuestOp.crash ()); self#send s (CmdResult "You might not get this...") - + method test s = self#send s (CmdResult "Test worked OK!") @@ -51,26 +51,26 @@ class server n np = method checkmountdisks s devices = self#send s (GuestOp.checkmountdisks devices) - method process s = + method process s = try - begin - match self#receive s with - Shutdown timeout -> self#shutdown s timeout - | Reboot timeout -> self#reboot s timeout - | Test -> self#test s - | Crash -> self#crash s - | CheckCD devs -> self#checkcds s devs - | CheckVIF dev -> self#checkvif s dev - | CheckDisks devs -> self#checkdisks s devs - | CheckMountDisks devs -> self#checkmountdisks s devs - | CheckCDFail devs -> self#checkcds s devs - | _ -> () - end; - Unix.shutdown s Unix.SHUTDOWN_ALL + begin + match self#receive s with + Shutdown timeout -> self#shutdown s timeout + | Reboot timeout -> self#reboot s timeout + | Test -> self#test s + | Crash -> self#crash s + | CheckCD devs -> self#checkcds s devs + | CheckVIF dev -> self#checkvif s dev + | CheckDisks devs -> self#checkdisks s devs + | CheckMountDisks devs -> self#checkmountdisks s devs + | CheckCDFail devs -> self#checkcds s devs + | _ -> () + end; + Unix.shutdown s Unix.SHUTDOWN_ALL with - e -> GuestOp.logerr (Printexc.to_string e) + e -> GuestOp.logerr (Printexc.to_string e) + - end let _ = diff --git a/ocaml/xe-cli/rt/gtwindowsops.ml b/ocaml/xe-cli/rt/gtwindowsops.ml index de4c22f7b57..01dc9301b16 100644 --- a/ocaml/xe-cli/rt/gtwindowsops.ml +++ b/ocaml/xe-cli/rt/gtwindowsops.ml @@ -12,10 +12,10 @@ * GNU Lesser General Public License for more details. *) -let rec read ic cur = +let rec read ic cur = try let line = input_line ic in - read ic (line::cur) + read ic (line::cur) with _ -> List.rev cur @@ -31,8 +31,8 @@ let reboot timeout = (try close_in ic with _ -> ()); Gtmessages.CmdResult (String.concat "\n" result) - let crash () = - Gtmessages.CmdResult "Unimplemented in windows!" +let crash () = + Gtmessages.CmdResult "Unimplemented in windows!" let rec myfilter func l = match l with @@ -42,22 +42,22 @@ let rec myfilter func l = let parse_diskpart ls = Printf.printf "In parse_diskpart\n"; flush_all (); - let volumes = (myfilter (fun l -> try - let substr = String.lowercase (String.sub l 2 6) in - Printf.printf "1. substr='%s' comparing with '%s'\n" substr "volume"; - Printf.printf " result=%b\n" (substr="volume"); - flush_all (); - substr="volume" with _ -> false) ls) in + let volumes = (myfilter (fun l -> try + let substr = String.lowercase (String.sub l 2 6) in + Printf.printf "1. substr='%s' comparing with '%s'\n" substr "volume"; + Printf.printf " result=%b\n" (substr="volume"); + flush_all (); + substr="volume" with _ -> false) ls) in List.iter (fun l -> Printf.printf "%s\n" l) volumes; - let cds = List.filter (fun l -> try - Printf.printf "2. substr=%s\n" (String.sub l 30 4); - "CDFS"=String.sub l 32 4 with _ -> false) volumes in - let mapfn driveline = + let cds = List.filter (fun l -> try + Printf.printf "2. substr=%s\n" (String.sub l 30 4); + "CDFS"=String.sub l 32 4 with _ -> false) volumes in + let mapfn driveline = let drive = String.sub driveline 15 1 in drive in let cddrives = List.map mapfn cds in cddrives - + let checkcds devices _ = let (ic,oc) = Unix.open_process "DISKPART.EXE" in Printf.fprintf oc "list volume\nexit\n"; @@ -68,22 +68,22 @@ let checkcds devices _ = let drives = parse_diskpart result in let mapfn drive = let cmd = "DIR " ^ drive ^ ":\\" in - Printf.printf "cmd=%s\n" cmd; - flush_all (); - let ic = Unix.open_process_in cmd in - let result = read ic [] in - (try close_in ic with _ -> ()); - String.concat "\n" result - in + Printf.printf "cmd=%s\n" cmd; + flush_all (); + let ic = Unix.open_process_in cmd in + let result = read ic [] in + (try close_in ic with _ -> ()); + String.concat "\n" result + in Gtmessages.CmdResult (String.concat "\n" (List.map mapfn drives)) - + let checkdisks devices = let (ic,oc) = Unix.open_process "DISKPART.EXE" in Printf.fprintf oc "list disk\nexit\n"; flush_all (); let result = read ic [] in (try close_in ic with _ -> ()); - (try close_out oc with _ -> ()); + (try close_out oc with _ -> ()); Gtmessages.CmdResult (String.concat "\n" result) let checkmountdisks devices = @@ -98,7 +98,7 @@ let checkvifs vif = (try close_in ic with _ -> ()); Gtmessages.CmdResult (String.concat "\n" result) -let logerr msg = +let logerr msg = let oc = open_out "c:\\gtserver.log" in Printf.fprintf oc "%s\n" msg; close_out oc diff --git a/ocaml/xe-cli/rt/log.ml b/ocaml/xe-cli/rt/log.ml index 364bdd27d3c..ddb9d981630 100644 --- a/ocaml/xe-cli/rt/log.ml +++ b/ocaml/xe-cli/rt/log.ml @@ -13,12 +13,12 @@ *) type level = -| Error -| Warn -| Debug -| Info + | Error + | Warn + | Debug + | Info module D=Debug.Make(struct let name="rt" end) let log (fmt: ('a, unit, string, 'b) format4) = - Printf.kprintf (fun s -> D.info "%s" s) fmt + Printf.kprintf (fun s -> D.info "%s" s) fmt diff --git a/ocaml/xe-cli/rt/networks.ml b/ocaml/xe-cli/rt/networks.ml index 8d8339ff85e..f19609d4a76 100644 --- a/ocaml/xe-cli/rt/networks.ml +++ b/ocaml/xe-cli/rt/networks.ml @@ -28,40 +28,40 @@ let log = Testlog.log let name_prefix = "testnetwork" (* Make a fresh name *) -let make_name = +let make_name = let counter = ref 0 in fun () -> let name = name_prefix ^ (string_of_int !counter) in incr counter; name -(** Ensure we don't have existing networks with 'name_prefix' becaues this - might confuse the tests *) -let delete_existing_networks cli = +(** Ensure we don't have existing networks with 'name_prefix' becaues this + might confuse the tests *) +let delete_existing_networks cli = let existing_networks = Cliops.get_networks cli in List.iter (fun name -> - if String.startswith name_prefix name then begin - log Info "Deleting network: %s" name; - ignore(Cliops.remove_network cli name) - end - ) existing_networks + if String.startswith name_prefix name then begin + log Info "Deleting network: %s" name; + ignore(Cliops.remove_network cli name) + end + ) existing_networks -let run_n_times n f cli = +let run_n_times n f cli = for i = 1 to n do f cli done (** Create a network, check a bridge has been created. Remove the network and check the bridge has gone. Must be run from dom0. *) -let network_create_destroy cli vmid = - let list_bridge () = +let network_create_destroy cli vmid = + let list_bridge () = let ifaces = List.filter Netdev.Link.is_up (Netdev.list ()) in List.filter (fun x -> Netdev.network.Netdev.exists x) ifaces in delete_existing_networks cli; - + (** Run (f x) and return (bridges created, bridges destroyed) *) - let bridge_change f x = + let bridge_change f x = let set_difference a b = List.filter (fun x -> not(List.mem x b)) a in let before = list_bridge () in let uuid = f x in @@ -73,23 +73,23 @@ let network_create_destroy cli vmid = let newvif = Cliops.add_nic cli vmid ("3","random",newnet) in ignore(Cliops.plug_nic cli newvif); (newnet,newvif) in - + let remove_network cli (netuuid,vifuuid) = let (_: string list) = Cliops.unplug_nic cli vifuuid in let (_: string list) = Cliops.remove_nic cli vifuuid in let (_: string list) = Cliops.remove_network cli netuuid in "" in - + (** Add a new network (with hopefully a fresh name) and return the name. Check that exactly one bridge is created. *) - let add () = + let add () = let name = make_name () in let created, destroyed, uuids = bridge_change (add_network cli) name in (* print_endline (Printf.sprintf "created = %s" (List.hd created)); *) if destroyed <> [] || (List.length created <> 1) then failwith (Printf.sprintf "Expected exactly one bridge to be created during a network-add; instead we got: created = [ %s ]; destroyed = [ %s ]" (String.concat "; " created) (String.concat "; " destroyed)); uuids in - let remove uuids = + let remove uuids = let created, destroyed,_ = bridge_change (remove_network cli) uuids in (* print_endline (Printf.sprintf "destroyed = %s" (List.hd destroyed)); *) if created <> [] || (List.length destroyed <> 1) @@ -100,14 +100,14 @@ let network_create_destroy cli vmid = let network_create_destroy n cli vmid = run_n_times n (network_create_destroy cli) vmid -let vlan_create_destroy cli = +let vlan_create_destroy cli = delete_existing_networks cli; try (* make a fresh network reference *) let net = make_name () in log Info "Adding network %s" net; let net_uuid = Cliops.add_network cli net in - + (* Choose an existing PIF (with no VLAN tag!) to use as a base device *) let pifs = Cliops.get_pifs cli in if pifs = [] then failwith "Couldn't find an existing PIF to create a VLAN on"; @@ -116,22 +116,22 @@ let vlan_create_destroy cli = let pif_device = Cliops.get_pif_device cli pif in let tag : string = string_of_int (Random.int 100) in - + log Info "Adding PIF with vlan tag %s" tag; let pif_uuid = Cliops.create_vlan cli pif tag net_uuid in - + let ifaces = List.filter Netdev.Link.is_up (Netdev.list ()) in let expected_iface = pif_device ^ "." ^ tag in if not(List.mem expected_iface ifaces) then failwith (Printf.sprintf "Failed to find interface: %s" expected_iface); - + log Info "Deleting PIF with vlan tag %s" tag; let (_: string list) = Cliops.remove_pif cli pif_uuid in - + let ifaces = List.filter Netdev.Link.is_up (Netdev.list ()) in if List.mem expected_iface ifaces then failwith (Printf.sprintf "Failed to delete interface: %s" expected_iface); - + log Info "Deleting network %s" net; Cliops.remove_network cli net_uuid with e -> @@ -142,7 +142,7 @@ let vlan_create_destroy cli = let vlan_create_destroy n cli = run_n_times n vlan_create_destroy cli (* -let _ = +let _ = let cli = Util.cli_onhost_with_pwd "xenroot" in for i = 0 to 100 do network_create_destroy cli; diff --git a/ocaml/xe-cli/rt/parsers.ml b/ocaml/xe-cli/rt/parsers.ml index 86f5425388e..c126eeb74cd 100644 --- a/ocaml/xe-cli/rt/parsers.ml +++ b/ocaml/xe-cli/rt/parsers.ml @@ -20,7 +20,7 @@ let explode str c = try let i = String.index_from str start c in inner (i+1) ((String.sub str start (i-start))::cur) - with + with Not_found -> (String.sub str start (String.length str - start))::cur in List.rev (inner 0 []) @@ -41,27 +41,27 @@ let zap_whitespace s = (* Two functions to parse the output of 'xe'. Result is a list of association lists *) let parse_record ls = - let rec inner ls cur = - match ls with - ""::rest -> (cur,rest) - | l::ls -> - let colon = String.index l ':' in - let token = zap_whitespace (String.sub l 0 colon) in - let value = zap_whitespace (String.sub l (colon+1) (String.length l - colon - 1)) in - inner ls ((token,value)::cur) - | _ -> (cur,[]) + let rec inner ls cur = + match ls with + ""::rest -> (cur,rest) + | l::ls -> + let colon = String.index l ':' in + let token = zap_whitespace (String.sub l 0 colon) in + let value = zap_whitespace (String.sub l (colon+1) (String.length l - colon - 1)) in + inner ls ((token,value)::cur) + | _ -> (cur,[]) in inner ls [] -let parse ls = - let rec inner ls cur = - match ls with - [] -> cur - | records -> - let (record,rest) = parse_record ls in - inner rest (record::cur) - in - inner ls [] +let parse ls = + let rec inner ls cur = + match ls with + [] -> cur + | records -> + let (record,rest) = parse_record ls in + inner rest (record::cur) + in + inner ls [] (* Get the last domid from a run of 'list_domains' *) @@ -78,7 +78,7 @@ let parse_ld ls = match ls with [] -> cur | l::ls -> - inner ls ((parse_ld_line l)::cur) + inner ls ((parse_ld_line l)::cur) in inner (List.tl ls) [] @@ -90,7 +90,7 @@ let parse_ipconfig ls = let colon = String.index ipline ':' in let ipaddr = zap_whitespace (String.sub ipline (colon+1) (String.length ipline - colon - 1)) in ipaddr - + (* Extract the ip address from a linux guest - done on guest now*) let parse_ifconfig ls = @@ -98,15 +98,15 @@ let parse_ifconfig ls = zap_whitespace (List.hd ls) with _ -> raise (Failure "Failed to parse IP address") - + (* Try to parse it as a windows ipconfig output first, if not, a linux type *) let parse_ip ls = - try + try parse_ipconfig ls - with + with _ -> parse_ifconfig ls - + diff --git a/ocaml/xe-cli/rt/test_host.ml b/ocaml/xe-cli/rt/test_host.ml index b333ad2273f..65b06dd7496 100644 --- a/ocaml/xe-cli/rt/test_host.ml +++ b/ocaml/xe-cli/rt/test_host.ml @@ -14,37 +14,37 @@ (* Util to test various aspects of the new control stack *) (* Tests: - * - * powerstate - cycle through various power states of the vms + * + * powerstate - cycle through various power states of the vms * disk - add and remove disks lots while vm is down * vif - add and remove vifs lots while vm is down * param - set and reset parameters, making sure things are consistent - *) +*) let alltests = [ ("clone",Tests.clone_test); ("powerstate",Tests.powerstate); ("offlinedisk",Tests.offline_disk); ("diskguestverified",Tests.disk_guest_verified); -(* ("vif",Tests.vif); - ("onlinevif",Tests.online_vif); *) + (* ("vif",Tests.vif); + ("onlinevif",Tests.online_vif); *) ("importexport",Tests.importexport); ("param",Tests.param); ("cd",Tests.cd_guest_verified); -(* ("net", Tests.offline_network); *)] - + (* ("net", Tests.offline_network); *)] + let _ = let tests = ref "" in let vms = ref "" in let all = ref false in Arg.parse [ - ("-t",Arg.Set_string tests,"Comma separated lists of tests (no spaces!) tests are: "^(String.concat "," (List.map fst alltests))); - ("-v",Arg.Set_string vms,"Comma separated list of VMS to run the tests on (no spaces!)"); - ("-i",Arg.Set_string Cliops.iface,"Interface on which to listen for IPs"); - ("-a",Arg.Set all,"Run all the tests") ] + ("-t",Arg.Set_string tests,"Comma separated lists of tests (no spaces!) tests are: "^(String.concat "," (List.map fst alltests))); + ("-v",Arg.Set_string vms,"Comma separated list of VMS to run the tests on (no spaces!)"); + ("-i",Arg.Set_string Cliops.iface,"Interface on which to listen for IPs"); + ("-a",Arg.Set all,"Run all the tests") ] (fun _ -> raise (Failure "Invalid argument! (try -help for help)")) "VM testing utility"; - let cli : Util.t_cli = Util.cli_onhost in + let cli : Util.t_cli = Util.cli_onhost in let version = Cliops.get_version cli in let short_version = Cliops.get_short_version cli in let tests = Parsers.explode !tests ',' in @@ -53,16 +53,16 @@ let _ = let vms = Parsers.explode vms ',' in let vms = List.map (fun vm -> Cliops.get_uuid cli vm) vms in (try - List.iter - (fun test -> - let testfn = (List.assoc test alltests) cli in - List.iter (fun vm -> if not !Tests.fatal_error then testfn vm) vms) tests; - with _ -> ()); + List.iter + (fun test -> + let testfn = (List.assoc test alltests) cli in + List.iter (fun vm -> if not !Tests.fatal_error then testfn vm) vms) tests; + with _ -> ()); Testlog.output_html version "test_log.html"; Testlog.output_html version (Printf.sprintf "test_log_%s.html" short_version); Testlog.output_txt "test_log.txt"; Testlog.output_xenrt "test_log.xml" - - - + + + diff --git a/ocaml/xe-cli/rt/testlog.ml b/ocaml/xe-cli/rt/testlog.ml index 270dd46c9c7..6c73c124040 100644 --- a/ocaml/xe-cli/rt/testlog.ml +++ b/ocaml/xe-cli/rt/testlog.ml @@ -13,10 +13,10 @@ *) (* Log the tests status *) -type result = - | Success +type result = + | Success | Warning - | Fail + | Fail let string_of_result = function | Success -> "Success" @@ -45,7 +45,7 @@ let log level (fmt: ('a, unit, string, unit) format4) : 'a = | _ -> () end; Log.log fmt - + let set_ignore_errors b = ignore_errors := b (* Once a test is completed, it is registered here. A test_log *) @@ -53,8 +53,8 @@ let set_ignore_errors b = ignore_errors := b (* the code above over the course of the test *) type vm=string - -type test_type = + +type test_type = OfflineVM of vm | OnlineVM of vm | GuestVerified of vm @@ -69,16 +69,16 @@ let test_type_to_string = function type timestamp = string type test_info = { - test_result : result; - test_ts : timestamp; - test_type: test_type; - test_name: string; - test_class: string; - test_desc: string; - test_log: string list; - test_pic: string option; + test_result : result; + test_ts : timestamp; + test_type: test_type; + test_name: string; + test_class: string; + test_desc: string; + test_log: string list; + test_pic: string option; } - + let tests = ref ([] : test_info list) @@ -87,16 +87,16 @@ module StringSet = Set.Make(struct type t=string let compare=compare end) let get_all_classes tests = let foldfn set t = - StringSet.add t.test_class set + StringSet.add t.test_class set in - List.fold_left foldfn StringSet.empty tests + List.fold_left foldfn StringSet.empty tests let get_all_test_names tests = let foldfn set t = - let testname = t.test_name in - StringSet.add testname set + let testname = t.test_name in + StringSet.add testname set in - List.fold_left foldfn StringSet.empty tests + List.fold_left foldfn StringSet.empty tests let get_all_vms tests = let foldfn set t = @@ -104,7 +104,7 @@ let get_all_vms tests = OfflineVM vm -> StringSet.add vm set | OnlineVM vm -> StringSet.add vm set | GuestVerified vm -> StringSet.add vm set - | Other -> StringSet.add "none" set + | Other -> StringSet.add "none" set in List.fold_left foldfn StringSet.empty tests @@ -133,19 +133,19 @@ let get_combined_log t xapi_log = let comblog = zip xelog xapilog in comblog -let testloganchor test vm = test^vm -let testxapiloganchor test vm = "xapi"^test^vm -let testlogurl test vm = "#"^(testloganchor test vm) -let testxapilogurl test vm = (testxapiloganchor test vm)^".html" -let testpicurl test vm = (testxapiloganchor test vm)^".jpg" +let testloganchor test vm = test^vm +let testxapiloganchor test vm = "xapi"^test^vm +let testlogurl test vm = "#"^(testloganchor test vm) +let testxapilogurl test vm = (testxapiloganchor test vm)^".html" +let testpicurl test vm = (testxapiloganchor test vm)^".jpg" (* Big ugly function to output some HTML *) let output_html version fname = let oc = open_out fname in Printf.fprintf oc "%s" ("Test Results"^ - ""^ - ""^ - "

Test Results

\n"); + ""^ + ""^ + "

Test Results

\n"); Printf.fprintf oc "

Xapi version: %s

" version; @@ -155,7 +155,7 @@ let output_html version fname = Printf.fprintf oc "
"; let vms = get_all_vms tests in let classes = get_all_classes tests in - + Printf.fprintf oc "

%s

\n" test_type; Printf.fprintf oc "
"; let vm_func vm = @@ -163,50 +163,50 @@ let output_html version fname = in StringSet.iter vm_func vms; Printf.fprintf oc "\n"; - + let class_func classname = - let tests = List.filter - (fun t -> t.test_class=classname) tests in + let tests = List.filter + (fun t -> t.test_class=classname) tests in let testnames = get_all_test_names tests in - + Printf.fprintf oc "\n"; let test_func test = - let tests = List.filter (fun t -> t.test_name=test) tests in - let desc = (List.hd tests).test_desc in - Printf.fprintf oc "" test desc; - let vm_func vm = - begin - try - let t = List.find (fun t -> vm=get_vm_name t.test_type) tests in - let r = t.test_result in - Printf.fprintf oc "" - (string_of_result r) (string_of_result r) (testloganchor test vm) - (match (List.hd tests).test_pic with None -> "" | Some x -> "pic") (testxapilogurl test vm); - with - _ -> Printf.fprintf oc ""; - end; - Printf.fprintf oc ""; - in - StringSet.iter vm_func vms; - Printf.fprintf oc "\n" + let tests = List.filter (fun t -> t.test_name=test) tests in + let desc = (List.hd tests).test_desc in + Printf.fprintf oc "" test desc; + let vm_func vm = + begin + try + let t = List.find (fun t -> vm=get_vm_name t.test_type) tests in + let r = t.test_result in + Printf.fprintf oc "" + (string_of_result r) (string_of_result r) (testloganchor test vm) + (match (List.hd tests).test_pic with None -> "" | Some x -> "pic") (testxapilogurl test vm); + with + _ -> Printf.fprintf oc ""; + end; + Printf.fprintf oc ""; + in + StringSet.iter vm_func vms; + Printf.fprintf oc "\n" in StringSet.iter test_func testnames; Printf.fprintf oc "\n" - + in - + StringSet.iter class_func classes; - + Printf.fprintf oc "
Test nameDescription
%s%s%s
command log
%sxapi log
 
%s%s%s
command log
%sxapi log
 
 
\n"; Printf.fprintf oc "\n" - in + in let online_tests = List.filter (fun t -> match t.test_type with OnlineVM _ -> true | _ -> false) !tests in do_test_type_with_vm "Online tests (VM in running state)" online_tests; - + let offline_tests = List.filter (fun t -> match t.test_type with OfflineVM _ -> true | _ -> false) !tests in do_test_type_with_vm "Offline tests (VM in stopped state)" offline_tests; - + let verified_tests = List.filter (fun t -> match t.test_type with GuestVerified _ -> true | _ -> false) !tests in do_test_type_with_vm "Guest verified tests" verified_tests; @@ -216,7 +216,7 @@ let output_html version fname = Printf.fprintf oc "
 
\n"; (* Now do output the logs *) - + let dolog t = let vm=get_vm_name t.test_type in let anchor = testloganchor t.test_name vm in @@ -229,8 +229,8 @@ let output_html version fname = Printf.fprintf oc "

Test result: %s

" (string_of_result t.test_result); Printf.fprintf oc "

Timestamp: %s

" t.test_ts; Printf.fprintf oc "
";
-    List.iter (fun s -> 
-      Printf.fprintf oc "%s\n" s) t.test_log;
+    List.iter (fun s ->
+        Printf.fprintf oc "%s\n" s) t.test_log;
     Printf.fprintf oc "
"; Printf.fprintf oc "\n" in @@ -248,10 +248,10 @@ let output_xenrt_chan oc = let test_to_xml test = let vm_name = get_vm_name test.test_type in Xml.Element ("test",[],[ - Xml.Element ("name",[],[Xml.PCData (test.test_name^"_"^vm_name)]); - Xml.Element ("state",[],[Xml.PCData (string_of_result test.test_result)]); - Xml.Element ("log",[],[Xml.PCData (String.concat "\n" test.test_log)]); - ]) + Xml.Element ("name",[],[Xml.PCData (test.test_name^"_"^vm_name)]); + Xml.Element ("state",[],[Xml.PCData (string_of_result test.test_result)]); + Xml.Element ("log",[],[Xml.PCData (String.concat "\n" test.test_log)]); + ]) in Xml.Element("group",[],List.map test_to_xml tests) in @@ -270,26 +270,26 @@ let output_txt fname = Printf.fprintf oc "Test report\n"; Printf.fprintf oc "Time: %s\n" (Debug.gettimestring ()); Printf.fprintf oc "\n\n"; - + let printtest t = let vm = match t.test_type with - OfflineVM x -> x - | OnlineVM x -> x - | GuestVerified x-> x - | Other -> "none" in + OfflineVM x -> x + | OnlineVM x -> x + | GuestVerified x-> x + | Other -> "none" in Printf.fprintf oc "VM: %10s test:%40s result: %20s\n" vm t.test_name (string_of_result t.test_result) in - + List.iter printtest (List.rev !tests); Printf.fprintf oc "Detailed logs\n"; - + let printtest t = let vm = match t.test_type with - OfflineVM x -> x - | OnlineVM x -> x - | GuestVerified x -> x - | Other -> "none" in + OfflineVM x -> x + | OnlineVM x -> x + | GuestVerified x -> x + | Other -> "none" in Printf.fprintf oc "VM: %s\ntest: %s\ndescription: %s\nresult: %s\n" vm t.test_name t.test_desc (string_of_result t.test_result); Printf.fprintf oc "Log:\n"; List.iter (fun l -> Printf.fprintf oc "%s" l) (t.test_log) @@ -298,18 +298,18 @@ let output_txt fname = List.iter printtest (List.rev !tests); close_out oc - - + + let register_test name test_type class_name description xapi_log pic = let log = List.rev (get_log ()) in let timestamp = Debug.gettimestring () in let vm=get_vm_name test_type in let picurl = testpicurl name vm in - let pic = - (match pic with - None -> None - | Some x -> let (_: int) = Sys.command (Printf.sprintf "mv %s %s" x picurl) in Some picurl) in + let pic = + (match pic with + None -> None + | Some x -> let (_: int) = Sys.command (Printf.sprintf "mv %s %s" x picurl) in Some picurl) in let test_info = { test_result= !test_status_flag; test_ts=timestamp; @@ -326,10 +326,10 @@ let register_test name test_type class_name description xapi_log pic = let oc=open_out url in let comblog = get_combined_log t xapi_log in Printf.fprintf oc "
\n";
-  List.iter (fun (l,t) -> 
-    if t="xapi" 
-    then Printf.fprintf oc "      XAPI %s\n" l
-    else Printf.fprintf oc "%s\n" l) comblog;
+  List.iter (fun (l,t) ->
+      if t="xapi"
+      then Printf.fprintf oc "      XAPI %s\n" l
+      else Printf.fprintf oc "%s\n" l) comblog;
   Printf.fprintf oc "
\n"; close_out oc; output_html "" "test_in_progress.html" diff --git a/ocaml/xe-cli/rt/tests.ml b/ocaml/xe-cli/rt/tests.ml index e8bcade7a2d..90e69805bd5 100644 --- a/ocaml/xe-cli/rt/tests.ml +++ b/ocaml/xe-cli/rt/tests.ml @@ -26,24 +26,24 @@ let fatal_error = ref false let runtest (cli : Util.t_cli) test_type (name,func,clas,desc) = Testlog.reset_log (); reset_xapi_log cli; - let pic = + let pic = begin Printf.fprintf stderr "Running test %s\n" name; flush_all (); - try - func (); - if !Cliops.pic <> "" then Some !Cliops.pic else None - with - CliOpFailed cmdlog -> - log Error "Failure of cli command. Command output follows:"; - List.iter (fun s -> log Error "%s" s) cmdlog; - Some !Cliops.pic - | OpFailed msg -> - log Error "Operation failed! msg=%s" msg; - Some !Cliops.pic - | e -> - log Error "Uncaught exception! %s" (Printexc.to_string e); - Some !Cliops.pic + try + func (); + if !Cliops.pic <> "" then Some !Cliops.pic else None + with + CliOpFailed cmdlog -> + log Error "Failure of cli command. Command output follows:"; + List.iter (fun s -> log Error "%s" s) cmdlog; + Some !Cliops.pic + | OpFailed msg -> + log Error "Operation failed! msg=%s" msg; + Some !Cliops.pic + | e -> + log Error "Uncaught exception! %s" (Printexc.to_string e); + Some !Cliops.pic end in Cliops.pic := ""; @@ -56,17 +56,17 @@ let runtest (cli : Util.t_cli) test_type (name,func,clas,desc) = (* Power state checks * * Start and stop the VM, checking that it comes up correctly, - * and also make sure that inappropriate state changes are + * and also make sure that inappropriate state changes are * prevented by xapi *) let powerstate (cli : Util.t_cli) vmid = let domainname = get_param cli vmid "name-label" in -(* WARNING: very dumb thing here of checking whether a guest has PV drivers or not *) -(* by checking whether the VM name has 'pv' in it! *) + (* WARNING: very dumb thing here of checking whether a guest has PV drivers or not *) + (* by checking whether the VM name has 'pv' in it! *) let is_pv = String.endswith "pv" domainname in - + let ps_init () = log Info "Test: VM NAME='%s'" domainname; log Info "Test: Initial powerstate test"; @@ -81,13 +81,13 @@ let powerstate (cli : Util.t_cli) vmid = change_vm_state_fail cli vmid Resume; change_vm_state_fail cli vmid Suspend; in - + let ps_start () = log Info "Starting VM"; fail_if_state_isnt cli vmid "halted"; change_vm_state cli vmid Start; in - + let ps_started_statechange_failures () = log Info "Checking for failure of some powerstate operations"; fail_if_state_isnt cli vmid "running"; @@ -98,7 +98,7 @@ let powerstate (cli : Util.t_cli) vmid = let ps_suspend () = log Info "Suspending VM"; fail_if_state_isnt cli vmid "running"; - change_vm_state cli vmid Suspend + change_vm_state cli vmid Suspend in let ps_suspended_statechange_failures () = @@ -118,7 +118,7 @@ let powerstate (cli : Util.t_cli) vmid = let ps_reboot () = log Info "Rebooting VM"; fail_if_state_isnt cli vmid "running"; - change_vm_state cli vmid Reboot; + change_vm_state cli vmid Reboot; in let ps_shutdown () = @@ -135,13 +135,13 @@ let powerstate (cli : Util.t_cli) vmid = try change_vm_state cli vmid Shutdown; Cliops.use_gt := true - with e -> + with e -> Cliops.use_gt := true; (* Make sure that whatever happens, we reset this flag *) - raise e + raise e in - let start_tests = + let start_tests = [("ps_init__________________________",ps_init,"powerstate", "Initial setup of the powerstate tests. The VM's status \ is queried and everything possible is done to make sure it's down. The \ @@ -167,24 +167,24 @@ let powerstate (cli : Util.t_cli) vmid = ("ps_resume________________________",ps_resume,"powerstate-nonhvm","Resume the VM")] in - let end_tests = + let end_tests = [("ps_reboot________________________",ps_reboot,"powerstate","Reboot the VM. This test uses the guest agent"); ("ps_shutdown______________________",ps_shutdown,"powerstate","Shutdown the VM. This test uses the guest agent")] in - + let noagent_tests = [("ps_shutdown_no_agent_____________",ps_shutdown_no_agent,"powerstate","Startup and shutdown the VM purely using the cli")] in - let tests = - if is_suspendable cli vmid + let tests = + if is_suspendable cli vmid then start_tests @ suspend_tests @ end_tests else start_tests @ end_tests in - let tests = + let tests = if is_pv then tests @ noagent_tests else tests in - + List.iter (runtest cli (Testlog.OnlineVM domainname)) tests let clone_test (cli : Util.t_cli) vmid = @@ -193,9 +193,9 @@ let clone_test (cli : Util.t_cli) vmid = log Info "Test: Testing clone operation"; log Info "Uninstalling any clones"; (try - let cloneuuid = get_uuid cli "clone" in - ignore(uninstall cli cloneuuid) - with _ -> ()); + let cloneuuid = get_uuid cli "clone" in + ignore(uninstall cli cloneuuid) + with _ -> ()); ensure_vm_down cli vmid 0; let runclone () = log Info "Cloning VM"; @@ -204,20 +204,20 @@ let clone_test (cli : Util.t_cli) vmid = log Info "Starting VM"; begin try - change_vm_state cli newvmid Start; - ignore (get_client_ip newvmid) (* Will throw exception if the VM has failed to register *) - with _ -> - log Error "VM failed to start correctly!" + change_vm_state cli newvmid Start; + ignore (get_client_ip newvmid) (* Will throw exception if the VM has failed to register *) + with _ -> + log Error "VM failed to start correctly!" end; log Info "Shutting down VM"; set_ignore_errors true; (* Error if the VM fails to start *) begin try - change_vm_state cli newvmid Shutdown; - set_ignore_errors false + change_vm_state cli newvmid Shutdown; + set_ignore_errors false with e -> - set_ignore_errors false; - raise e + set_ignore_errors false; + raise e end; log Info "Uninstalling VM"; ignore(uninstall cli newvmid) @@ -226,7 +226,7 @@ let clone_test (cli : Util.t_cli) vmid = let tests = [("vm_clone_________________________",runclone,"clone","Clone the VM and ensure it still starts")] in - List.iter (runtest cli (Testlog.GuestVerified domainname)) tests + List.iter (runtest cli (Testlog.GuestVerified domainname)) tests let cd_guest_verified (cli : Util.t_cli) vmid = let domainname = get_param cli vmid "name-label" in @@ -265,11 +265,11 @@ let cd_guest_verified (cli : Util.t_cli) vmid = let lines = List.map String.lowercase lines in let results = List.map (fun (a,b,c,d) -> grep lines d) cdset in let results = List.flatten results in - if List.length results <> List.length cdset + if List.length results <> List.length cdset then begin - log Error "CD test failed"; - log Error "Expected results containing: %s" (String.concat "," (List.map (fun (a,b,c,d) -> d) cdset)); + log Error "CD test failed"; + log Error "Expected results containing: %s" (String.concat "," (List.map (fun (a,b,c,d) -> d) cdset)); end; log Info "Stopping VM and detaching all cds"; set_ignore_errors true; @@ -277,7 +277,7 @@ let cd_guest_verified (cli : Util.t_cli) vmid = set_ignore_errors false; remove_all_cds cli vmid in - + let cd_hotplug cdset = log Info "Starting CD hotplug test"; checkcdset cdset; @@ -290,36 +290,36 @@ let cd_guest_verified (cli : Util.t_cli) vmid = (* set_ignore_errors false;*) try for i = 1 to 100 do - List.iter cdattach cdset; - log Info "Attached cd(s)"; - let cduuids = List.map (fun (a,b,c,d) -> b) cdset in - check_attached_cds cli vmid cduuids; - log Info "Attached cd(s) verified by xapi"; - let cddevices = List.map (fun (a,b,c,d) -> c) cdset in - let gacmd = "checkcd " ^ (String.concat " " cddevices) in - let gacmdfail = "checkcdfail " ^ (String.concat " " cddevices) in - let ip = get_client_ip vmid in - let lines = run_ga_command ip gacmd in - let lines = List.map String.lowercase lines in - let results = List.map (fun (a,b,c,d) -> grep lines d) cdset in - let results = List.flatten results in - if List.length results <> List.length cdset - then - begin - log Error "CD test failed"; - log Error "Expected results containing: %s" (String.concat "," (List.map (fun (a,b,c,d) -> d) cdset)); - end; - log Info "Test succeeded! Detaching all cds"; - remove_all_cds cli vmid; - let lines = run_ga_command ip gacmdfail in - let lines = List.map String.lowercase lines in - let results = List.map (fun (a,b,c,d) -> grep lines d) cdset in - let results = List.flatten results in - if List.length results <> 0 then - begin - log Error "CD test failed"; - log Error "Found non-null results when nothing was expected!" - end + List.iter cdattach cdset; + log Info "Attached cd(s)"; + let cduuids = List.map (fun (a,b,c,d) -> b) cdset in + check_attached_cds cli vmid cduuids; + log Info "Attached cd(s) verified by xapi"; + let cddevices = List.map (fun (a,b,c,d) -> c) cdset in + let gacmd = "checkcd " ^ (String.concat " " cddevices) in + let gacmdfail = "checkcdfail " ^ (String.concat " " cddevices) in + let ip = get_client_ip vmid in + let lines = run_ga_command ip gacmd in + let lines = List.map String.lowercase lines in + let results = List.map (fun (a,b,c,d) -> grep lines d) cdset in + let results = List.flatten results in + if List.length results <> List.length cdset + then + begin + log Error "CD test failed"; + log Error "Expected results containing: %s" (String.concat "," (List.map (fun (a,b,c,d) -> d) cdset)); + end; + log Info "Test succeeded! Detaching all cds"; + remove_all_cds cli vmid; + let lines = run_ga_command ip gacmdfail in + let lines = List.map String.lowercase lines in + let results = List.map (fun (a,b,c,d) -> grep lines d) cdset in + let results = List.flatten results in + if List.length results <> 0 then + begin + log Error "CD test failed"; + log Error "Found non-null results when nothing was expected!" + end done; remove_all_cds cli vmid; log Info "Test succeeded! Shutting VM down"; @@ -328,35 +328,35 @@ let cd_guest_verified (cli : Util.t_cli) vmid = set_ignore_errors false with e -> remove_all_cds cli vmid; raise e in - - let tests = + + let tests = [("cd_test_3________________________",(fun () -> cd_test [cd1]),"cd", - "Attaching cd to device 3 while VM is stopped, booting, verifying the guest can read it, shutting down "^ - "and detaching the cd"); -(* ("cd_test_3_4",(fun () -> cd_test [cd1;cd2]),"cd", - "Attaching cds to devices 3 and 4 while VM is stopped, booting, verifying the guest can read it, shutting down "^ - "and detaching the cds"); -*) + "Attaching cd to device 3 while VM is stopped, booting, verifying the guest can read it, shutting down "^ + "and detaching the cd"); + (* ("cd_test_3_4",(fun () -> cd_test [cd1;cd2]),"cd", + "Attaching cds to devices 3 and 4 while VM is stopped, booting, verifying the guest can read it, shutting down "^ + "and detaching the cds"); + *) ("cd_test_4________________________",(fun () -> cd_test [cd2]),"cd", - "Attaching cd to device 4 while VM is stopped, booting, verifying the guest can read it, shutting down "^ - "and detaching the cd") -] in + "Attaching cd to device 4 while VM is stopped, booting, verifying the guest can read it, shutting down "^ + "and detaching the cd") + ] in let hotplug_tests = [("cd_hotplug_3_____________________",(fun () -> cd_hotplug [cd1]),"cd", - "Attaching cd to device 3 while VM is started then verifying the guest can read it, repeated 100 times. "); + "Attaching cd to device 3 while VM is started then verifying the guest can read it, repeated 100 times. "); (* ("cd_hotplug_3_4",(fun () -> cd_hotplug [cd1;cd2]),"cd", "Attaching cd to device 3 while VM is started then verifying the guest can read it, repeated 100 times. "); *) ("cd_hotplug_4_____________________",(fun () -> cd_hotplug [cd2]),"cd", - "Attaching cd to device 3 while VM is started then verifying the guest can read it, repeated 100 times. ") -] in - - let tests = + "Attaching cd to device 3 while VM is started then verifying the guest can read it, repeated 100 times. ") + ] in + + let tests = if is_suspendable cli vmid (* Only PV Linux can hotplug cds *) then tests @ hotplug_tests else tests in - + List.iter (runtest cli (Testlog.GuestVerified domainname)) tests @@ -378,21 +378,21 @@ let importexport (cli : Util.t_cli) vmid = let newvmid=get_uuid cli newname in change_vm_state cli newvmid Start; begin - try - ignore(get_client_ip newvmid) (* Will throw exception if the VM has failed to register *) - with - Not_found -> - log Error "VM Failed to start correctly" + try + ignore(get_client_ip newvmid) (* Will throw exception if the VM has failed to register *) + with + Not_found -> + log Error "VM Failed to start correctly" end; log Info "Shutting down VM"; set_ignore_errors true; (* Don't error if the VM fails to stop *) begin - try - change_vm_state cli newvmid Shutdown; - set_ignore_errors false - with e -> - set_ignore_errors false; - raise e + try + change_vm_state cli newvmid Shutdown; + set_ignore_errors false + with e -> + set_ignore_errors false; + raise e end; log Info "Uninstalling VM"; ignore(uninstall cli newvmid); @@ -401,14 +401,14 @@ let importexport (cli : Util.t_cli) vmid = let (_: string list) = set_param cli vmid "name-label" domainname in raise e in - + let tests = [("vm_importexport__________________",test,"importexport","Export the VM, import it, and check that it still starts")] in - + List.iter (runtest cli (Testlog.GuestVerified domainname)) tests - + (* NB. this test requires a vm called 'debian-pv' which has dosfstools installed *) let disk_guest_verified (cli : Util.t_cli) vmid = let domainname = get_param cli vmid "name-label" in @@ -417,11 +417,11 @@ let disk_guest_verified (cli : Util.t_cli) vmid = log Info "Test: Testing disk operations (guest verified)"; ensure_vm_down cli vmid 0; ensure_vm_down cli debianuuid 0; - + let disk_test () = log Info "Setting up test disk"; let (vdi_uuid,vbd_uuid) = add_disk cli debianuuid ("hdd","20MiB") in - + log Info "Disk added. Starting debian-pv"; change_vm_state cli debianuuid Start; let (_: string list) = run_ga_command (get_client_ip debianuuid) "setuptestdisk hdd" in @@ -440,33 +440,33 @@ let disk_guest_verified (cli : Util.t_cli) vmid = log Info "Checking readability of attached disk"; let rec get_err_string err n = if n>30 then err else begin - Unix.sleep 1; - let gacmd = "checkmountdisk hdd1" in - let ip = get_client_ip vmid in - let lines = run_ga_command ip gacmd in - let lines = List.map String.lowercase lines in - let results = grep lines "testing" in - if List.length results <> 1 then - get_err_string (Some lines) (n+1) - else - None + Unix.sleep 1; + let gacmd = "checkmountdisk hdd1" in + let ip = get_client_ip vmid in + let lines = run_ga_command ip gacmd in + let lines = List.map String.lowercase lines in + let results = grep lines "testing" in + if List.length results <> 1 then + get_err_string (Some lines) (n+1) + else + None end in - match get_err_string None 0 with - | Some lines -> - log Error "Disk test failed!"; - log Error "Expected results containing: 'testing'"; - log Error "Returned strings:"; - List.iter (fun line -> log Error "%s" line) lines; - fatal_error:=true; - | None -> - log Info "Stopping VM and detaching disk"; - set_ignore_errors true; - change_vm_state cli vmid Shutdown; - set_ignore_errors false; - ignore(destroy_disk cli (vdi_uuid,newvbd)) + match get_err_string None 0 with + | Some lines -> + log Error "Disk test failed!"; + log Error "Expected results containing: 'testing'"; + log Error "Returned strings:"; + List.iter (fun line -> log Error "%s" line) lines; + fatal_error:=true; + | None -> + log Info "Stopping VM and detaching disk"; + set_ignore_errors true; + change_vm_state cli vmid Shutdown; + set_ignore_errors false; + ignore(destroy_disk cli (vdi_uuid,newvbd)) in - + (* let disk_hotplug cdset = log Info "Starting CD hotplug test"; @@ -478,7 +478,7 @@ let disk_guest_verified (cli : Util.t_cli) vmid = change_vm_state cli vmid Start; (* set_ignore_errors false;*) let rec doit n = - if n=0 then () + if n=0 then () else begin List.iter cdattach cdset; log Info "Attached cd(s)"; @@ -492,7 +492,7 @@ let disk_guest_verified (cli : Util.t_cli) vmid = let lines = List.map String.lowercase lines in let results = List.map (fun (a,b,c) -> grep lines c) cdset in let results = List.flatten results in - if List.length results <> List.length cdset + if List.length results <> List.length cdset then begin log Error "CD test failed"; @@ -517,24 +517,24 @@ let disk_guest_verified (cli : Util.t_cli) vmid = set_ignore_errors false in *) - - let tests = + + let tests = [("disk_test________________________",disk_test,"disk", - "Attaches a formatted disk to a VM and checks that it can be read")] in - + "Attaches a formatted disk to a VM and checks that it can be read")] in + List.iter (runtest cli (Testlog.GuestVerified domainname)) tests - - + + let offline_disk (cli : Util.t_cli) vmid = let domainname = get_param cli vmid "name-label" in let test () = let disks = [("2","1"); (* CA-25864: trigger the backend to always round up *) - ("3","41943040"); - ("4","83886080")] in + ("3","41943040"); + ("4","83886080")] in let disks = List.filter (fun (d,_) -> (check_disk_ok cli vmid d)) disks in log Info "Test: VM NAME='%s'" domainname; log Info "Test: Adding/removing disks from stopped VM"; - log Info "Disk list: %s" (String.concat "," (List.map fst disks)); + log Info "Disk list: %s" (String.concat "," (List.map fst disks)); let vdivbds = List.map (fun d -> add_disk cli vmid d) disks in List.iter (fun (vdi,vbd) -> log Info "Added vdi uuid=%s vbd uuid=%s" vdi vbd) vdivbds; let results = List.map (check_disk_size cli vmid) disks in @@ -544,26 +544,26 @@ let offline_disk (cli : Util.t_cli) vmid = List.iter (fun d -> ignore (destroy_disk cli d)) vdivbds; let results = List.map (check_disk_size cli vmid) disks in let allfalse = not (List.fold_left (fun a b -> a || b) false results) in - if not allfalse then + if not allfalse then log Error "Error removing disks!: %s" (String.concat " " (List.map (fun b -> if b then "t" else "f") results)) else log Info "Disk test succeeded!" - in - let tests = + in + let tests = [("offline_disk_____________________",test,"disk", - "Attaches up to 4 disks to the VM while offline, checks that they're reported to be attached, then removes them")] in + "Attaches up to 4 disks to the VM while offline, checks that they're reported to be attached, then removes them")] in List.iter (runtest cli (Testlog.OfflineVM domainname)) tests - + let vif (cli : Util.t_cli) vmid = let domainname = get_param cli vmid "name-label" in log Info "Test: VM NAME='%s'" domainname; log Info "Test: Adding/removing VIFs from stopped VM"; let nets = Cliops.get_networks cli in - let test_net net = + let test_net net = log Info "Testing network %s" net; let vifs = [("1","11:22:33:44:55:66",net); - ("2","12:34:56:78:9A:BC",net); - ("3","98:76:54:32:10:00",net)] in + ("2","12:34:56:78:9A:BC",net); + ("3","98:76:54:32:10:00",net)] in let testfunc (name,mac,network) = log Info "Adding VIF to VM"; let vifuuid = add_nic cli vmid (name,mac,network) in @@ -574,9 +574,9 @@ let vif (cli : Util.t_cli) vmid = (try let (_: string * string * string) = get_nic_params cli vifuuid in raise (Failure ("VIF "^name^" still present!")) with Failure x -> raise (Failure x)); in List.iter testfunc vifs - in + in List.iter test_net nets; - log Info "VIF test succeeded!" + log Info "VIF test succeeded!" let online_vif (cli : Util.t_cli) vmid = let domainname = get_param cli vmid "name-label" in @@ -586,18 +586,18 @@ let online_vif (cli : Util.t_cli) vmid = let test_net net = log Info "Testing network %s" net; let vifs = [("1","11:22:33:44:55:66",net); - ("2","12:34:56:78:9A:BC",net); - ("3","98:76:54:32:10:00",net)] in + ("2","12:34:56:78:9A:BC",net); + ("3","98:76:54:32:10:00",net)] in let testfunc (name,mac,net) = log Info "Making sure VM is currently down"; ensure_vm_down cli vmid 0; log Info "Adding VIF"; let vifuuid = add_nic cli vmid (name,mac,net) in log Info "Powering up VM"; - change_vm_state cli vmid Start; + change_vm_state cli vmid Start; let result = run_ga_command (get_client_ip vmid) "checkvif" in let lines = grep result mac in - if List.length lines <> 1 + if List.length lines <> 1 then raise (Failure ("Error, MAC not found in result! result was:"^(String.concat "\n" result))); change_vm_state cli vmid Shutdown; log Info "Removing VIF"; @@ -608,30 +608,30 @@ let online_vif (cli : Util.t_cli) vmid = List.iter test_net nets; log Info "Online VIF test succeeded!" -let offline_network (cli : Util.t_cli) vmid = +let offline_network (cli : Util.t_cli) vmid = let domainname = get_param cli vmid "name-label" in if domainname="debian-pv" then begin log Info "Beginning network tests using debian-pv"; - + Cliops.change_vm_state cli vmid Start; - - let network_create_destroy _ = - log Info "Test: Offline network test"; - Networks.network_create_destroy 100 cli vmid; - log Info "Offline Network test succeeded!" in - let vlan_create_destroy _ = - log Info "Test: Offline VLAN test"; - Networks.vlan_create_destroy 100 cli; - log Info "Offline VLAN test succeeded!" in - - let tests = - [("net_create_destroy",network_create_destroy,"net", - "Repeatedly creates and destroys networks, checking that bridges are created and destroyed in dom0"); - ("vlan_create_destroy", vlan_create_destroy, "net", - "Repeatedly creates and destroys PIFs with VLAN tags, checking that the right interfaces are being created and destroyed in dom0"); - ] in - + + let network_create_destroy _ = + log Info "Test: Offline network test"; + Networks.network_create_destroy 100 cli vmid; + log Info "Offline Network test succeeded!" in + let vlan_create_destroy _ = + log Info "Test: Offline VLAN test"; + Networks.vlan_create_destroy 100 cli; + log Info "Offline VLAN test succeeded!" in + + let tests = + [("net_create_destroy",network_create_destroy,"net", + "Repeatedly creates and destroys networks, checking that bridges are created and destroyed in dom0"); + ("vlan_create_destroy", vlan_create_destroy, "net", + "Repeatedly creates and destroys PIFs with VLAN tags, checking that the right interfaces are being created and destroyed in dom0"); + ] in + List.iter (runtest cli Testlog.Other) tests end @@ -640,13 +640,13 @@ let param (cli : Util.t_cli) vmid = let domainname = get_param cli vmid "name-label" in let test () = let params = [("name-description","Testing testing!"); - ("user-version","100"); - ("VCPUs-max","2"); - ("PV-kernel","whatever"); - ] in + ("user-version","100"); + ("VCPUs-max","2"); + ("PV-kernel","whatever"); + ] in log Info "Test: VM NAME='%s'" domainname; log Info "Testing setting/resetting parameters on stopped VM"; - let testfunc (param,value) = + let testfunc (param,value) = let before = get_param cli vmid param in let (_: string list) = set_param cli vmid param value in let after = get_param cli vmid param in @@ -658,7 +658,7 @@ let param (cli : Util.t_cli) vmid = List.iter testfunc params; log Info "Parameter test succeeded!" in - let tests = + let tests = [("offline_param____________________",test,"param", "Reads, sets, checks, resets, and verifies VM parameters")] in diff --git a/ocaml/xe-cli/rt/util.ml b/ocaml/xe-cli/rt/util.ml index 5acdb02522d..b1c75158370 100644 --- a/ocaml/xe-cli/rt/util.ml +++ b/ocaml/xe-cli/rt/util.ml @@ -35,14 +35,14 @@ let run_command ?(dolog=true) cmd = let read_str () = try while true do - result := (input_line ic)::(!result); + result := (input_line ic)::(!result); done with _ -> () in let _ = read_str() in result := List.rev !result; if dolog then List.iter (fun l -> log Log.Debug "%s" l) !result; let rc = Unix.close_process_in ic in - (!result,rc) + (!result,rc) let run_command_with_timeout cmd timeout = @@ -50,7 +50,7 @@ let run_command_with_timeout cmd timeout = let ic = Unix.open_process_in cmd in let f = Unix.descr_of_in_channel ic in let (a,b,c) = (Unix.select [f] [] [] timeout) in - if List.length a = 0 + if List.length a = 0 then (log Log.Debug "Command timed out"; ignore_int (Sys.command "killall nc"); @@ -59,9 +59,9 @@ let run_command_with_timeout cmd timeout = let result : (string list) ref = ref [] in let read_str () = try - while true do - result := (input_line ic) :: (!result) - done + while true do + result := (input_line ic) :: (!result) + done with _ -> () in let _ = read_str() in result := List.rev (!result); @@ -70,27 +70,27 @@ let run_command_with_timeout cmd timeout = Some (!result,rc) type pwspec = - | NoPassword - | Password of string - | PasswordFile of string + | NoPassword + | Password of string + | PasswordFile of string let cli_with_pwspec ?(dolog=true) is_offhost cmd params pwspec = let rec mk_params l = match l with - [] -> "" - | ((k,v)::kvs) -> k^"=\""^v^"\""^" "^(mk_params kvs) in + [] -> "" + | ((k,v)::kvs) -> k^"=\""^v^"\""^" "^(mk_params kvs) in let param_str = mk_params params in let cli_base_string = (!Commands.xe)^" "^cmd ^(if is_offhost then - " -h "^(!host) else "") + " -h "^(!host) else "") ^" " ^(match pwspec with - | NoPassword -> "" - | Password s -> "-u "^user^" -pw "^s - | PasswordFile s -> "-pwf "^s) + | NoPassword -> "" + | Password s -> "-u "^user^" -pw "^s + | PasswordFile s -> "-pwf "^s) ^" "^param_str in - run_command ~dolog cli_base_string + run_command ~dolog cli_base_string let cli_offhost_with_pwspec ?dolog cmd params pwspec = cli_with_pwspec ?dolog true cmd params pwspec @@ -110,7 +110,7 @@ let cli_offhost_with_pwf ?dolog pwf cmd params = let cli_offhost ?dolog cmd params = cli_offhost_with_pwd ?dolog password cmd params - + (* Misc util funcs *) @@ -119,7 +119,7 @@ let grep lines patt = List.filter (fun l -> try ignore(Str.search_forward regex l 0); true with _ -> false) lines - - + + diff --git a/ocaml/xsh/xsh.ml b/ocaml/xsh/xsh.ml index 27f53a482d8..71a5cb66873 100644 --- a/ocaml/xsh/xsh.ml +++ b/ocaml/xsh/xsh.ml @@ -15,68 +15,68 @@ type endpoint = { fdin: Unix.file_descr; fdout: Unix.file_descr; mutable buffer: string; mutable buffer_len: int } let make_endpoint fdin fdout = { - fdin = fdin; - fdout = fdout; - buffer = String.make 4096 '\000'; - buffer_len = 0 + fdin = fdin; + fdout = fdout; + buffer = String.make 4096 '\000'; + buffer_len = 0 } let proxy (ain: Unix.file_descr) (aout: Unix.file_descr) (bin: Unix.file_descr) (bout: Unix.file_descr) = - let a' = make_endpoint ain aout and b' = make_endpoint bin bout in - Unix.set_nonblock ain; - Unix.set_nonblock aout; - Unix.set_nonblock bin; - Unix.set_nonblock bout; + let a' = make_endpoint ain aout and b' = make_endpoint bin bout in + Unix.set_nonblock ain; + Unix.set_nonblock aout; + Unix.set_nonblock bin; + Unix.set_nonblock bout; - let can_read x = - x.buffer_len < (String.length x.buffer - 1) in - let can_write x = - x.buffer_len > 0 in - let write_from x y = - let written = Unix.single_write y.fdout x.buffer 0 x.buffer_len in - String.blit x.buffer written x.buffer 0 (x.buffer_len - written); - x.buffer_len <- x.buffer_len - written in - let read_into x = - let read = Unix.read x.fdin x.buffer x.buffer_len (String.length x.buffer - x.buffer_len) in - if read = 0 then raise End_of_file; - x.buffer_len <- x.buffer_len + read in + let can_read x = + x.buffer_len < (String.length x.buffer - 1) in + let can_write x = + x.buffer_len > 0 in + let write_from x y = + let written = Unix.single_write y.fdout x.buffer 0 x.buffer_len in + String.blit x.buffer written x.buffer 0 (x.buffer_len - written); + x.buffer_len <- x.buffer_len - written in + let read_into x = + let read = Unix.read x.fdin x.buffer x.buffer_len (String.length x.buffer - x.buffer_len) in + if read = 0 then raise End_of_file; + x.buffer_len <- x.buffer_len + read in - try - while true do - let r = (if can_read a' then [ ain ] else []) @ (if can_read b' then [ bin ] else []) in - let w = (if can_write a' then [ bout ] else []) @ (if can_write b' then [ aout ] else []) in + try + while true do + let r = (if can_read a' then [ ain ] else []) @ (if can_read b' then [ bin ] else []) in + let w = (if can_write a' then [ bout ] else []) @ (if can_write b' then [ aout ] else []) in - let r, w, _ = Unix.select r w [] (-1.0) in - (* Do the writing before the reading *) - List.iter (fun fd -> if aout = fd then write_from b' a' else write_from a' b') w; - List.iter (fun fd -> if ain = fd then read_into a' else read_into b') r - done - with _ -> - (try Unix.clear_nonblock ain with _ -> ()); - (try Unix.clear_nonblock bin with _ -> ()); - (try Unix.clear_nonblock aout with _ -> ()); - (try Unix.clear_nonblock bout with _ -> ()); - (try Unix.close ain with _ -> ()); - (try Unix.close bin with _ -> ()); - (try Unix.close aout with _ -> ()); - (try Unix.close bout with _ -> ()) + let r, w, _ = Unix.select r w [] (-1.0) in + (* Do the writing before the reading *) + List.iter (fun fd -> if aout = fd then write_from b' a' else write_from a' b') w; + List.iter (fun fd -> if ain = fd then read_into a' else read_into b') r + done + with _ -> + (try Unix.clear_nonblock ain with _ -> ()); + (try Unix.clear_nonblock bin with _ -> ()); + (try Unix.clear_nonblock aout with _ -> ()); + (try Unix.clear_nonblock bout with _ -> ()); + (try Unix.close ain with _ -> ()); + (try Unix.close bin with _ -> ()); + (try Unix.close aout with _ -> ()); + (try Unix.close bout with _ -> ()) -let open_tcp_ssl server = +let open_tcp_ssl server = let port = 443 in (* We don't bother closing fds since this requires our close_and_exec wrapper *) - let x = Stunnel.connect ~use_fork_exec_helper:false - ~write_to_log:(fun _ -> ()) server port in + let x = Stunnel.connect ~use_fork_exec_helper:false + ~write_to_log:(fun _ -> ()) server port in x.Stunnel.fd let _ = let host = Sys.argv.(1) in let cmd = Sys.argv.(2) in Stunnel.set_legacy_protocol_and_ciphersuites_allowed - (try bool_of_string (Sys.getenv "XSH_SSL_LEGACY") with _ -> failwith "ssl_legacy not specified"); + (try bool_of_string (Sys.getenv "XSH_SSL_LEGACY") with _ -> failwith "ssl_legacy not specified"); Stunnel.set_good_ciphersuites - (try Sys.getenv "XSH_GOOD_CIPHERSUITES" with _ -> failwith "Good ciphersuites not specified"); + (try Sys.getenv "XSH_GOOD_CIPHERSUITES" with _ -> failwith "Good ciphersuites not specified"); Stunnel.set_legacy_ciphersuites - (try Sys.getenv "XSH_LEGACY_CIPHERSUITES" with _ -> ""); + (try Sys.getenv "XSH_LEGACY_CIPHERSUITES" with _ -> ""); let session = try Sys.getenv "XSH_SESSION" with _ -> failwith "Session not provided" in let args = List.map (fun arg -> "&arg="^arg) (List.tl (List.tl (List.tl (Array.to_list Sys.argv)))) in let req = Printf.sprintf "CONNECT /remotecmd?session_id=%s&cmd=%s%s http/1.0\r\n\r\n" session cmd (String.concat "" args) in diff --git a/ocaml/xstest/bench.ml b/ocaml/xstest/bench.ml index 74590f86ebd..e43eb0e73d6 100644 --- a/ocaml/xstest/bench.ml +++ b/ocaml/xstest/bench.ml @@ -12,131 +12,131 @@ * GNU Lesser General Public License for more details. *) let avg_fct arr = - let sum = ref 0. in - Array.iter (fun v -> sum := !sum +. v) arr; - !sum /. (float_of_int (Array.length arr)) + let sum = ref 0. in + Array.iter (fun v -> sum := !sum +. v) arr; + !sum /. (float_of_int (Array.length arr)) let min_fct arr = - let min = ref 10000. in - Array.iter (fun v -> if !min > v then min := v else ()) arr; - !min + let min = ref 10000. in + Array.iter (fun v -> if !min > v then min := v else ()) arr; + !min let max_fct arr = - let max = ref 0. in - Array.iter (fun v -> if !max < v then max := v else ()) arr; - !max + let max = ref 0. in + Array.iter (fun v -> if !max < v then max := v else ()) arr; + !max let variance_fct arr = - let n = ref 0 and mean = ref 0. and s = ref 0. in - Array.iter (fun x -> - n := !n + 1; - let delta = x -. !mean in - mean := !mean +. delta /. (float_of_int !n); - s := !s +. delta *. (x -. !mean)) arr; - !s /. (float_of_int (!n - 1)) + let n = ref 0 and mean = ref 0. and s = ref 0. in + Array.iter (fun x -> + n := !n + 1; + let delta = x -. !mean in + mean := !mean +. delta /. (float_of_int !n); + s := !s +. delta *. (x -. !mean)) arr; + !s /. (float_of_int (!n - 1)) let output chan name (avg, min, max, var, o, all) = - let s = Printf.sprintf "[%s] = avg: %.6f | min: %.6f | max: %.6f | V: %.3f [%d]\n%!" - name avg min max (var *. 1000000.) o in - output_string chan s; - flush chan + let s = Printf.sprintf "[%s] = avg: %.6f | min: %.6f | max: %.6f | V: %.3f [%d]\n%!" + name avg min max (var *. 1000000.) o in + output_string chan s; + flush chan let once (f: unit -> unit) = - let before = Unix.gettimeofday () in - f (); - let after = Unix.gettimeofday () in - after -. before + let before = Unix.gettimeofday () in + f (); + let after = Unix.gettimeofday () in + after -. before let multi ?times:(times=100000) (f: unit -> unit): float * float * float * float * int * float array = - let ring = Ring.make times 0. in - for i = 0 to times - do - try - let x = once f in - Ring.push ring x - with exn -> - Printf.printf "bench exception: %s\n%!" (Printexc.to_string exn) - done; + let ring = Ring.make times 0. in + for i = 0 to times + do + try + let x = once f in + Ring.push ring x + with exn -> + Printf.printf "bench exception: %s\n%!" (Printexc.to_string exn) + done; - let arr = Ring.get ring in + let arr = Ring.get ring in - let avg = avg_fct arr - and min = min_fct arr - and max = max_fct arr - and var = variance_fct arr in + let avg = avg_fct arr + and min = min_fct arr + and max = max_fct arr + and var = variance_fct arr in - avg, min, max, var, 0, arr + avg, min, max, var, 0, arr let multithread ?times:(times=100000) testname fcts = - let len = Array.length fcts in - let start = ref false in - let stopped = ref 0 in - - let avgs = Array.make len 0. - and maxs = Array.make len 0. - and mins = Array.make len 0. - and vars = Array.make len 0. - and alls = Array.make len 0. - and others = Array.make len 0 in - - ignore alls; - - let start_on i f = - while not !start - do - Thread.yield () - done; - let (avg, min, max, var, r, all) = f () in - avgs.(i) <- avg; - mins.(i) <- min; - maxs.(i) <- max; - vars.(i) <- var; - others.(i) <- r; - - incr stopped - in - - Array.iteri (fun i f -> - ignore (Thread.create (fun () -> start_on i f) ())) fcts; - start := true; - - while !stopped < len do Thread.yield () done; - - for i = 0 to len - 1 - do - output stdout testname (avgs.(i), mins.(i), maxs.(i), vars.(i), others.(i), alls.(i)) - done; - () + let len = Array.length fcts in + let start = ref false in + let stopped = ref 0 in + + let avgs = Array.make len 0. + and maxs = Array.make len 0. + and mins = Array.make len 0. + and vars = Array.make len 0. + and alls = Array.make len 0. + and others = Array.make len 0 in + + ignore alls; + + let start_on i f = + while not !start + do + Thread.yield () + done; + let (avg, min, max, var, r, all) = f () in + avgs.(i) <- avg; + mins.(i) <- min; + maxs.(i) <- max; + vars.(i) <- var; + others.(i) <- r; + + incr stopped + in + + Array.iteri (fun i f -> + ignore (Thread.create (fun () -> start_on i f) ())) fcts; + start := true; + + while !stopped < len do Thread.yield () done; + + for i = 0 to len - 1 + do + output stdout testname (avgs.(i), mins.(i), maxs.(i), vars.(i), others.(i), alls.(i)) + done; + () let multifork ?times:(times=100000) testname fcts = - let len = Array.length fcts in - let pids = Array.make len 0 in - - let file = "/tmp/start" in - Unix.close (Unix.openfile file [ Unix.O_CREAT ] 0o777); - - Array.iteri (fun i f -> - let pid = Unix.fork () in - if pid = 0 then ( - try - while true do - ignore(Unix.stat file); - Thread.delay 0.00001 - done - with _ -> (); - output stdout testname (f ()) ; - (exit 0) - ) else - pids.(i) <- pid - ) fcts; - (* start signal *) - (try Unix.unlink file with _ -> ()); - - let childdead = ref 0 in - while !childdead < len - do - match Unix.waitpid [] (-1) with - | pid, Unix.WEXITED ret -> incr childdead - | _ -> Printf.printf "hum\n%!" - done; - (*Printf.printf "out !\n%!";*) + let len = Array.length fcts in + let pids = Array.make len 0 in + + let file = "/tmp/start" in + Unix.close (Unix.openfile file [ Unix.O_CREAT ] 0o777); + + Array.iteri (fun i f -> + let pid = Unix.fork () in + if pid = 0 then ( + try + while true do + ignore(Unix.stat file); + Thread.delay 0.00001 + done + with _ -> (); + output stdout testname (f ()) ; + (exit 0) + ) else + pids.(i) <- pid + ) fcts; + (* start signal *) + (try Unix.unlink file with _ -> ()); + + let childdead = ref 0 in + while !childdead < len + do + match Unix.waitpid [] (-1) with + | pid, Unix.WEXITED ret -> incr childdead + | _ -> Printf.printf "hum\n%!" + done; + (*Printf.printf "out !\n%!";*) diff --git a/ocaml/xstest/bm.ml b/ocaml/xstest/bm.ml index 47ea513dd64..60d060a5e85 100644 --- a/ocaml/xstest/bm.ml +++ b/ocaml/xstest/bm.ml @@ -16,147 +16,147 @@ open Printf open Xenstore let fill_store () = - let xsh = Xs.daemon_open () in - List.iter (fun (a,b) -> xsh.Xs.write a b) - [ "/benchs", ""; - "/benchs/ooo/x", "1"; - "/benchs/xxx", "xxx"; - "/benchs/xxx/qwr", "206"; - "/benchs/yyy", "yyy"; - "/benchs/yyy/www", "2901"; - "/benchs/zzz", "zzz"; - "/benchs/zzz/xyz", "zzz"; - "/benchs/1", ""; - "/benchs/1/www", "abc"; - "/benchs/2", ""; - "/benchs/2/qwr", "avds"; - "/benchs/3", ""; - "/benchs/3/x", "xxx"; - "/benchs/4", ""; - "/benchs/4/xyz", "abc"; ]; - Xs.close xsh + let xsh = Xs.daemon_open () in + List.iter (fun (a,b) -> xsh.Xs.write a b) + [ "/benchs", ""; + "/benchs/ooo/x", "1"; + "/benchs/xxx", "xxx"; + "/benchs/xxx/qwr", "206"; + "/benchs/yyy", "yyy"; + "/benchs/yyy/www", "2901"; + "/benchs/zzz", "zzz"; + "/benchs/zzz/xyz", "zzz"; + "/benchs/1", ""; + "/benchs/1/www", "abc"; + "/benchs/2", ""; + "/benchs/2/qwr", "avds"; + "/benchs/3", ""; + "/benchs/3/x", "xxx"; + "/benchs/4", ""; + "/benchs/4/xyz", "abc"; ]; + Xs.close xsh let bench_single () = - let xsh = Xs.daemon_open () in - Bench.output stdout "single read" (Bench.multi ~times:1000 (fun () -> - ignore (xsh.Xs.read "/benchs/xxx"))); - Xs.close xsh + let xsh = Xs.daemon_open () in + Bench.output stdout "single read" (Bench.multi ~times:1000 (fun () -> + ignore (xsh.Xs.read "/benchs/xxx"))); + Xs.close xsh let bench_single_wr () = - let xsh = Xs.daemon_open () in - Bench.output stdout "single write" (Bench.multi ~times:1000 (fun () -> - xsh.Xs.write "/benchs/zzz/123" "abc")); - Xs.close xsh + let xsh = Xs.daemon_open () in + Bench.output stdout "single write" (Bench.multi ~times:1000 (fun () -> + xsh.Xs.write "/benchs/zzz/123" "abc")); + Xs.close xsh let really_maybe_yield () = - if Random.bool () then Thread.delay 0.00000001 + if Random.bool () then Thread.delay 0.00000001 let maybe_yield () = - (* if Random.bool () then Thread.yield () *) - if true then Thread.delay 0.00000001 + (* if Random.bool () then Thread.yield () *) + if true then Thread.delay 0.00000001 let bench_transaction ?(single_read_path=false) ~test_name ~times tests = - let doit ~read_path ~write_path name xsh = - try - let i = ref 0 in - Xs.transaction xsh (fun t -> - incr i; - let v = - try t.Xst.read (read_path ^ "/www") - with _ -> - t.Xst.write (read_path ^ "/www") "2901"; - "2901" - in - t.Xst.write (write_path ^ "/abc") v; - t.Xst.write (write_path ^ "/abc2") (v ^ "abc2"); - ); - !i - with exn -> - printf "%s raise an exception %s\n%!" name (Printexc.to_string exn); - raise exn - in - - let fct fct_transact () = - let xsh = Xs.daemon_open () in - let eagain = ref 0 in - - let a1, a2, a3, a4, a5, a6 = Bench.multi ~times (fun () -> - try - let i = (fct_transact xsh) in - if i > 1 then - eagain := !eagain + i - 1 - with exn -> - printf "X exception: %s\n%!" (Printexc.to_string exn)) in - Xs.close xsh; - a1, a2, a3, a4, !eagain, a6 in - - let generate offset i = - Array.init i (fun i -> - let i1 = i + 1 in - let write_path = sprintf "/benchs/%d" (offset + i1) in - let read_path = if single_read_path then "/benchs/0" else write_path in - let name = sprintf "%d" i1 in - fct (doit ~read_path ~write_path name) - ) - in - - let rec aux last_n = function - | n::t -> Bench.multifork (Printf.sprintf "%s%i" test_name n) (generate last_n n); aux n t - | [] -> () - in - - aux 0 tests + let doit ~read_path ~write_path name xsh = + try + let i = ref 0 in + Xs.transaction xsh (fun t -> + incr i; + let v = + try t.Xst.read (read_path ^ "/www") + with _ -> + t.Xst.write (read_path ^ "/www") "2901"; + "2901" + in + t.Xst.write (write_path ^ "/abc") v; + t.Xst.write (write_path ^ "/abc2") (v ^ "abc2"); + ); + !i + with exn -> + printf "%s raise an exception %s\n%!" name (Printexc.to_string exn); + raise exn + in + + let fct fct_transact () = + let xsh = Xs.daemon_open () in + let eagain = ref 0 in + + let a1, a2, a3, a4, a5, a6 = Bench.multi ~times (fun () -> + try + let i = (fct_transact xsh) in + if i > 1 then + eagain := !eagain + i - 1 + with exn -> + printf "X exception: %s\n%!" (Printexc.to_string exn)) in + Xs.close xsh; + a1, a2, a3, a4, !eagain, a6 in + + let generate offset i = + Array.init i (fun i -> + let i1 = i + 1 in + let write_path = sprintf "/benchs/%d" (offset + i1) in + let read_path = if single_read_path then "/benchs/0" else write_path in + let name = sprintf "%d" i1 in + fct (doit ~read_path ~write_path name) + ) + in + + let rec aux last_n = function + | n::t -> Bench.multifork (Printf.sprintf "%s%i" test_name n) (generate last_n n); aux n t + | [] -> () + in + + aux 0 tests let bench_slowfast () = - let ft1 xsh = - let i = ref 0 in - ignore (Xs.transaction xsh (fun t -> - incr i; - let v = t.Xst.read "/benchs/1/www" in - List.iter (fun path -> - t.Xst.write ("/benchs/1/" ^ path) v; - really_maybe_yield ()) - [ "a"; "b"; "c"; "d"; "e"; "f"; "g"; "h"; "i" ]; - maybe_yield (); - )); !i - and ft2 xsh = - let i = ref 0 in - ignore (Xs.transaction xsh (fun t -> - incr i; - ignore (t.Xst.write "/benchs/2/www" "x"); - maybe_yield (); - )); !i - and ft3 xsh = - let i = ref 0 in - ignore (Xs.transaction xsh (fun t -> - incr i; - ignore (t.Xst.write "/benchs/3/www" "x"); - maybe_yield (); - )); !i - and ft4 xsh = - let i = ref 0 in - ignore (Xs.transaction xsh (fun t -> - incr i; - ignore (t.Xst.write "/benchs/4/www" "x"); - maybe_yield (); - )); !i - in - let fct fct_transact () = - let xsh = Xs.daemon_open () in - let eagain = ref 0 in - - let a1, a2, a3, a4, a5, a6 = Bench.multi ~times:10000 (fun () -> - try - let i = (fct_transact xsh) in - if i > 1 then - eagain := !eagain + i - 1 - with exn -> - printf "X exception: %s\n%!" (Printexc.to_string exn)) in - Xs.close xsh; - a1, a2, a3, a4, !eagain, a6 in - - Bench.multithread "sf2" [| (fct ft1); (fct ft2); |]; - Bench.multithread "sf4" [| (fct ft1); (fct ft2); (fct ft3); (fct ft4); |] + let ft1 xsh = + let i = ref 0 in + ignore (Xs.transaction xsh (fun t -> + incr i; + let v = t.Xst.read "/benchs/1/www" in + List.iter (fun path -> + t.Xst.write ("/benchs/1/" ^ path) v; + really_maybe_yield ()) + [ "a"; "b"; "c"; "d"; "e"; "f"; "g"; "h"; "i" ]; + maybe_yield (); + )); !i + and ft2 xsh = + let i = ref 0 in + ignore (Xs.transaction xsh (fun t -> + incr i; + ignore (t.Xst.write "/benchs/2/www" "x"); + maybe_yield (); + )); !i + and ft3 xsh = + let i = ref 0 in + ignore (Xs.transaction xsh (fun t -> + incr i; + ignore (t.Xst.write "/benchs/3/www" "x"); + maybe_yield (); + )); !i + and ft4 xsh = + let i = ref 0 in + ignore (Xs.transaction xsh (fun t -> + incr i; + ignore (t.Xst.write "/benchs/4/www" "x"); + maybe_yield (); + )); !i + in + let fct fct_transact () = + let xsh = Xs.daemon_open () in + let eagain = ref 0 in + + let a1, a2, a3, a4, a5, a6 = Bench.multi ~times:10000 (fun () -> + try + let i = (fct_transact xsh) in + if i > 1 then + eagain := !eagain + i - 1 + with exn -> + printf "X exception: %s\n%!" (Printexc.to_string exn)) in + Xs.close xsh; + a1, a2, a3, a4, !eagain, a6 in + + Bench.multithread "sf2" [| (fct ft1); (fct ft2); |]; + Bench.multithread "sf4" [| (fct ft1); (fct ft2); (fct ft3); (fct ft4); |] let single = ref false let single_wr = ref false @@ -168,26 +168,26 @@ type transaction_test = { times: int; name: string; tests: int list } let t1 = { times=1000; name="t"; tests=[2;4;8;16;32] } let t2 = { times=50; name="twr"; tests=[128] } -let classic () = - single := true; - single_wr := true; - transaction := true +let classic () = + single := true; + single_wr := true; + transaction := true let _ = Arg.parse (Arg.align - [ "--classic", Arg.Unit classic, " perform the 'single', 'single_wr' and 'transaction' tests"; - "--single", Arg.Set single, " perform the 'single' tests"; - "--single-wr", Arg.Set single_wr, " perform the 'single_wr' tests"; - "--transaction", Arg.Set transaction, " perform the 'transaction' tests"; - "--transaction_wr", Arg.Set transaction_wr, " perform the 'transaction_wr' tests"; - "--slowfast", Arg.Set slowfast, " perform the 'slowfast' tests"]) - (fun _ -> ()) - ("usage:"); - - (try fill_store (); with exn -> printf "fill_store exn\n%!"; raise exn); - if !single then (try bench_single (); with exn -> printf "bench_single exn\n%!"; raise exn); - if !single_wr then (try bench_single_wr (); with exn -> printf "bench_single_wr exn\n%!"; raise exn); - if !transaction then (try bench_transaction ~test_name:t1.name ~times:t1.times t1.tests; with exn -> printf "bench_trans exn\n%!"; raise exn); - if !transaction_wr then (try bench_transaction ~single_read_path:true ~test_name:t2.name ~times:t2.times t2.tests; with exn -> printf "bench_trans exn\n%!"; raise exn); - if !slowfast then (try bench_slowfast (); with exn -> printf "bench_slowfast exn\n%!"; raise exn); - () + [ "--classic", Arg.Unit classic, " perform the 'single', 'single_wr' and 'transaction' tests"; + "--single", Arg.Set single, " perform the 'single' tests"; + "--single-wr", Arg.Set single_wr, " perform the 'single_wr' tests"; + "--transaction", Arg.Set transaction, " perform the 'transaction' tests"; + "--transaction_wr", Arg.Set transaction_wr, " perform the 'transaction_wr' tests"; + "--slowfast", Arg.Set slowfast, " perform the 'slowfast' tests"]) + (fun _ -> ()) + ("usage:"); + + (try fill_store (); with exn -> printf "fill_store exn\n%!"; raise exn); + if !single then (try bench_single (); with exn -> printf "bench_single exn\n%!"; raise exn); + if !single_wr then (try bench_single_wr (); with exn -> printf "bench_single_wr exn\n%!"; raise exn); + if !transaction then (try bench_transaction ~test_name:t1.name ~times:t1.times t1.tests; with exn -> printf "bench_trans exn\n%!"; raise exn); + if !transaction_wr then (try bench_transaction ~single_read_path:true ~test_name:t2.name ~times:t2.times t2.tests; with exn -> printf "bench_trans exn\n%!"; raise exn); + if !slowfast then (try bench_slowfast (); with exn -> printf "bench_slowfast exn\n%!"; raise exn); + () diff --git a/ocaml/xstest/common.ml b/ocaml/xstest/common.ml index 2f5b6b95423..a3072c86c8c 100644 --- a/ocaml/xstest/common.ml +++ b/ocaml/xstest/common.ml @@ -21,38 +21,38 @@ open Xstringext open Xenbus let pkt_recv con = - let workdone = ref false in - while not !workdone - do - workdone := Xb.input con - done; - Xb.get_in_packet con + let workdone = ref false in + while not !workdone + do + workdone := Xb.input con + done; + Xb.get_in_packet con let pkt_send con = - if Xb.has_old_output con then - raise X; - let workdone = ref false in - while not !workdone - do - workdone := Xb.output con - done - -let send_packet con tid rid ty data = - Xb.queue con (Xb.Packet.create tid rid ty data); - pkt_send con; - () + if Xb.has_old_output con then + raise X; + let workdone = ref false in + while not !workdone + do + workdone := Xb.output con + done + +let send_packet con tid rid ty data = + Xb.queue con (Xb.Packet.create tid rid ty data); + pkt_send con; + () let recv_packet con = Xb.Packet.unpack (pkt_recv con) let daemon_socket = "/var/run/xenstored/socket" let open_xb () = - let sockaddr = Unix.ADDR_UNIX daemon_socket in - let sock = Unix.socket Unix.PF_UNIX Unix.SOCK_STREAM 0 in - Unix.connect sock sockaddr; - Unix.set_close_on_exec sock; - Xb.open_fd sock + let sockaddr = Unix.ADDR_UNIX daemon_socket in + let sock = Unix.socket Unix.PF_UNIX Unix.SOCK_STREAM 0 in + Unix.connect sock sockaddr; + Unix.set_close_on_exec sock; + Xb.open_fd sock let close_xb xb = - Xb.close xb + Xb.close xb diff --git a/ocaml/xstest/perms.ml b/ocaml/xstest/perms.ml index 019ec311e93..937ba658228 100644 --- a/ocaml/xstest/perms.ml +++ b/ocaml/xstest/perms.ml @@ -17,252 +17,252 @@ open Xenbus module Perm = struct - type access = NONE | READ | WRITE | RDWR - - let string_of_access = function - | NONE -> "n" - | READ -> "r" - | WRITE -> "w" - | RDWR -> "b" - - let make_access () = [|NONE; READ; WRITE; RDWR|].(Random.int 4) - - type t = - { owner : int; - other : access; - ack : (int * access) list } - - let string_of_ack = function - | [] -> "" - | ack -> String.concat "\000" (List.map (fun (i,p) -> sprintf "%s%i" (string_of_access p) i) ack) ^ "\000" - - let pretty_string_of_ack ack = - String.concat "," (List.map (fun (i,p) -> sprintf "%s%i" (string_of_access p) i) ack) - - let make_ack n = - let rec aux accu i = - let accu = - if Random.int 3 = 0 - then (i, make_access ()) :: accu - else accu - in - if i >= n - then accu - else aux accu (i+1) - in - aux [] 0 - - let to_string p = - sprintf "%s%i\000%s" (string_of_access p.other) p.owner (string_of_ack p.ack) - - let to_pretty_string p = - sprintf "%s%i,%s" (string_of_access p.other) p.owner (pretty_string_of_ack p.ack) - - let make n = - let o = Random.int (n+1) in - { owner = o; - other = make_access (); - ack = make_ack (n+1) } - - let write a = - a = WRITE || a = RDWR - - let read a = - a = READ || a = RDWR - - let can fn domain perm = - perm.owner = domain || - if List.mem_assoc domain perm.ack - then List.exists (fun (d,a) -> d=domain && fn a) perm.ack - else fn perm.other - - - let can_write = can write - let can_read = can read + type access = NONE | READ | WRITE | RDWR + + let string_of_access = function + | NONE -> "n" + | READ -> "r" + | WRITE -> "w" + | RDWR -> "b" + + let make_access () = [|NONE; READ; WRITE; RDWR|].(Random.int 4) + + type t = + { owner : int; + other : access; + ack : (int * access) list } + + let string_of_ack = function + | [] -> "" + | ack -> String.concat "\000" (List.map (fun (i,p) -> sprintf "%s%i" (string_of_access p) i) ack) ^ "\000" + + let pretty_string_of_ack ack = + String.concat "," (List.map (fun (i,p) -> sprintf "%s%i" (string_of_access p) i) ack) + + let make_ack n = + let rec aux accu i = + let accu = + if Random.int 3 = 0 + then (i, make_access ()) :: accu + else accu + in + if i >= n + then accu + else aux accu (i+1) + in + aux [] 0 + + let to_string p = + sprintf "%s%i\000%s" (string_of_access p.other) p.owner (string_of_ack p.ack) + + let to_pretty_string p = + sprintf "%s%i,%s" (string_of_access p.other) p.owner (pretty_string_of_ack p.ack) + + let make n = + let o = Random.int (n+1) in + { owner = o; + other = make_access (); + ack = make_ack (n+1) } + + let write a = + a = WRITE || a = RDWR + + let read a = + a = READ || a = RDWR + + let can fn domain perm = + perm.owner = domain || + if List.mem_assoc domain perm.ack + then List.exists (fun (d,a) -> d=domain && fn a) perm.ack + else fn perm.other + + + let can_write = can write + let can_read = can read end - + module Tree = struct - type node = - { name : string; - perm : Perm.t } - - let node_ref = ref 0 - - let make_name () = - incr node_ref; - sprintf "n%i" (!node_ref) - - let make_node nb_domains = - { name = make_name (); - perm = Perm.make nb_domains } - - type t = Node of node * t list | Leaf - - let rec make nb_domains nb_nodes = - let rec make_children nb_nodes_to_create = - match nb_nodes_to_create with - | 0 -> [] - | 1 -> [make nb_domains 1] - | _ -> - let nb_new_nodes = Random.int nb_nodes_to_create in - let nb_nodes_to_create = nb_nodes_to_create - nb_new_nodes in - make nb_domains nb_new_nodes :: make_children nb_nodes_to_create - in - if nb_nodes = 0 - then Leaf - else Node (make_node nb_domains, make_children (nb_nodes-1)) - - let iter fn tree = - let rec aux path = function - | Node (node, children) -> - let path = sprintf "%s/%s" path node.name in - fn path node; - List.iter (aux path) children - | Leaf -> () - in - aux "" tree + type node = + { name : string; + perm : Perm.t } + + let node_ref = ref 0 + + let make_name () = + incr node_ref; + sprintf "n%i" (!node_ref) + + let make_node nb_domains = + { name = make_name (); + perm = Perm.make nb_domains } + + type t = Node of node * t list | Leaf + + let rec make nb_domains nb_nodes = + let rec make_children nb_nodes_to_create = + match nb_nodes_to_create with + | 0 -> [] + | 1 -> [make nb_domains 1] + | _ -> + let nb_new_nodes = Random.int nb_nodes_to_create in + let nb_nodes_to_create = nb_nodes_to_create - nb_new_nodes in + make nb_domains nb_new_nodes :: make_children nb_nodes_to_create + in + if nb_nodes = 0 + then Leaf + else Node (make_node nb_domains, make_children (nb_nodes-1)) + + let iter fn tree = + let rec aux path = function + | Node (node, children) -> + let path = sprintf "%s/%s" path node.name in + fn path node; + List.iter (aux path) children + | Leaf -> () + in + aux "" tree end module Xenstore = struct - open Xb - open Tree - - type connection = - { domain: int; xb: Xb.t; mutable req_id: int } - - let make_connection n = - { domain = n; xb = open_xb (); req_id = 0 } - - let close_connection con = - close_xb con.xb - - let rec read_packet con = - let packet = pkt_recv con.xb in - if Packet.get_rid packet <> con.req_id - then read_packet con - else packet - - let write_packet con = - send_packet con.xb 0 con.req_id - - let ok = "OK" - let invalid_perms = "EACCES" - - let process op con data = - write_packet con op data; - invalid_perms <> Packet.get_data (read_packet con) - - let write = process Op.Write - let setperms = process Op.Setperms - let rm = process Op.Rm - let restrict = process Op.Restrict - let read = process Op.Read - - let ignore_bool (b:bool) = ignore b - - let with_connection n f = - let con = make_connection n in - if n <> 0 - then ignore_bool (restrict con (sprintf "%i\000" n)); - Pervasiveext.finally - (fun () -> f con) - (fun () -> close_connection con) - - let make tree = - with_connection 0 - (fun con -> iter - (fun path node -> - (* printf "create node '%s' with permissions {%s}\n" path (Perm.to_pretty_string node.perm); *) - ignore_bool (write con (sprintf "%s\000%s" path node.name)); - ignore_bool (setperms con (sprintf "%s\000%s" path (Perm.to_string node.perm)))) - tree) - - let clean = function - | Leaf -> () - | Node (node,_) -> with_connection 0 (fun con -> ignore_bool (rm con (sprintf "/%s\000" node.name))) + open Xb + open Tree + + type connection = + { domain: int; xb: Xb.t; mutable req_id: int } + + let make_connection n = + { domain = n; xb = open_xb (); req_id = 0 } + + let close_connection con = + close_xb con.xb + + let rec read_packet con = + let packet = pkt_recv con.xb in + if Packet.get_rid packet <> con.req_id + then read_packet con + else packet + + let write_packet con = + send_packet con.xb 0 con.req_id + + let ok = "OK" + let invalid_perms = "EACCES" + + let process op con data = + write_packet con op data; + invalid_perms <> Packet.get_data (read_packet con) + + let write = process Op.Write + let setperms = process Op.Setperms + let rm = process Op.Rm + let restrict = process Op.Restrict + let read = process Op.Read + + let ignore_bool (b:bool) = ignore b + + let with_connection n f = + let con = make_connection n in + if n <> 0 + then ignore_bool (restrict con (sprintf "%i\000" n)); + Pervasiveext.finally + (fun () -> f con) + (fun () -> close_connection con) + + let make tree = + with_connection 0 + (fun con -> iter + (fun path node -> + (* printf "create node '%s' with permissions {%s}\n" path (Perm.to_pretty_string node.perm); *) + ignore_bool (write con (sprintf "%s\000%s" path node.name)); + ignore_bool (setperms con (sprintf "%s\000%s" path (Perm.to_string node.perm)))) + tree) + + let clean = function + | Leaf -> () + | Node (node,_) -> with_connection 0 (fun con -> ignore_bool (rm con (sprintf "/%s\000" node.name))) end module Test = struct - open Xb - open Tree - open Xenstore - - let align tab s = - let n = String.length s in - let t = String.make tab ' ' in - String.blit s 0 t 0 n; - t - - let print_result node dom perm wanted succeed = - let aux b = - if b - then "success" - else "failure" - in - if wanted <> succeed - then begin - let s = - sprintf "<%s>, dom%i, %s: #%s (should be #%s)" node dom (Perm.to_pretty_string perm) (aux succeed) (aux wanted) - in - printf "%s[ERROR]\n%!" (align 80 s) - end - - let test test_fn apply_fn n tree = - with_connection n - (fun con -> - Tree.iter - (fun path node -> - con.req_id <- con.req_id + 1; - let result = apply_fn con path in - print_result node.name con.domain node.perm (test_fn con.domain node.perm) result) - tree) - - let write = test Perm.can_write (fun con path -> Xenstore.write con (sprintf "%s\000%i" path con.req_id)) - let read = test Perm.can_read (fun con path -> Xenstore.read con (sprintf "%s\000" path)) + open Xb + open Tree + open Xenstore + + let align tab s = + let n = String.length s in + let t = String.make tab ' ' in + String.blit s 0 t 0 n; + t + + let print_result node dom perm wanted succeed = + let aux b = + if b + then "success" + else "failure" + in + if wanted <> succeed + then begin + let s = + sprintf "<%s>, dom%i, %s: #%s (should be #%s)" node dom (Perm.to_pretty_string perm) (aux succeed) (aux wanted) + in + printf "%s[ERROR]\n%!" (align 80 s) + end + + let test test_fn apply_fn n tree = + with_connection n + (fun con -> + Tree.iter + (fun path node -> + con.req_id <- con.req_id + 1; + let result = apply_fn con path in + print_result node.name con.domain node.perm (test_fn con.domain node.perm) result) + tree) + + let write = test Perm.can_write (fun con path -> Xenstore.write con (sprintf "%s\000%i" path con.req_id)) + let read = test Perm.can_read (fun con path -> Xenstore.read con (sprintf "%s\000" path)) end - + let make nb_connections nb_nodes = - let test_write = ref true in - let test_read = ref true in - let clean = ref true in - - printf "=== creating the internal tree===\n%!"; - let tree = Tree.make nb_connections nb_nodes in - - printf "\n=== populating xenstore ===\n%!"; - Xenstore.make tree; - - if !test_write - then begin - printf "\n=== testing write permisssions ===\n%!"; - for i = 1 to nb_connections do - printf "testing connection %i\n%!" i; - Test.write i tree; - done; - end; - - if !test_read - then begin - printf "\n=== testing read permisssions ===\n%!"; - for i = 1 to nb_connections do - printf "testing connection %i\n%!" i; - Test.read i tree; - done; - end; - - if !clean - then begin - printf "\n=== cleaning xenstore ===\n%!"; - Xenstore.clean tree - end + let test_write = ref true in + let test_read = ref true in + let clean = ref true in + + printf "=== creating the internal tree===\n%!"; + let tree = Tree.make nb_connections nb_nodes in + + printf "\n=== populating xenstore ===\n%!"; + Xenstore.make tree; + + if !test_write + then begin + printf "\n=== testing write permisssions ===\n%!"; + for i = 1 to nb_connections do + printf "testing connection %i\n%!" i; + Test.write i tree; + done; + end; + + if !test_read + then begin + printf "\n=== testing read permisssions ===\n%!"; + for i = 1 to nb_connections do + printf "testing connection %i\n%!" i; + Test.read i tree; + done; + end; + + if !clean + then begin + printf "\n=== cleaning xenstore ===\n%!"; + Xenstore.clean tree + end let _ = - Random.self_init (); - make 10 500 + Random.self_init (); + make 10 500 diff --git a/ocaml/xstest/xstest.ml b/ocaml/xstest/xstest.ml index 24c0d95fed5..aa08a4059d2 100644 --- a/ocaml/xstest/xstest.ml +++ b/ocaml/xstest/xstest.ml @@ -20,113 +20,113 @@ open Common open Xenbus let tnb data = - try int_of_string (List.hd (String.split '\000' data)) - with _ -> 0 + try int_of_string (List.hd (String.split '\000' data)) + with _ -> 0 let do_test xb = - let i = ref 0 in - let disp op pkt = - let tid, rid, ty, data = pkt in - printf "%3x %s %d(%d) %s \"%s\".\n%!" !i op tid rid (Xb.Op.to_string ty) (String.escaped data); - in - let y (tid, ty, data) = - let spkt = (tid, !i, ty, data) in - disp "S: " spkt; - send_packet xb tid !i ty data; - let rpkt = recv_packet xb in - disp "R: " rpkt; - incr i; - let (_, _, _, data) = rpkt in - data - in - let x spkt = ignore (y spkt) in - (* no arguments *) - x (0, Xb.Op.Read, "\000"); - x (0, Xb.Op.Write, "\000"); - x (0, Xb.Op.Getdomainpath, "\000"); - x (0, Xb.Op.Directory, "\000"); - x (0, Xb.Op.Mkdir, "\000"); - x (0, Xb.Op.Getperms, "\000"); - x (0, Xb.Op.Setperms, "\000"); + let i = ref 0 in + let disp op pkt = + let tid, rid, ty, data = pkt in + printf "%3x %s %d(%d) %s \"%s\".\n%!" !i op tid rid (Xb.Op.to_string ty) (String.escaped data); + in + let y (tid, ty, data) = + let spkt = (tid, !i, ty, data) in + disp "S: " spkt; + send_packet xb tid !i ty data; + let rpkt = recv_packet xb in + disp "R: " rpkt; + incr i; + let (_, _, _, data) = rpkt in + data + in + let x spkt = ignore (y spkt) in + (* no arguments *) + x (0, Xb.Op.Read, "\000"); + x (0, Xb.Op.Write, "\000"); + x (0, Xb.Op.Getdomainpath, "\000"); + x (0, Xb.Op.Directory, "\000"); + x (0, Xb.Op.Mkdir, "\000"); + x (0, Xb.Op.Getperms, "\000"); + x (0, Xb.Op.Setperms, "\000"); - (* too many arguments *) - x (0, Xb.Op.Write, "/test\000xxx\000yyy\000\000zzz"); - x (0, Xb.Op.Read, "/test"); - x (0, Xb.Op.Read, "/test\000"); - x (0, Xb.Op.Read, "/test\000some\000otherargs"); - x (0, Xb.Op.Directory, "/test\000spurious\000abc"); - x (0, Xb.Op.Directory, "/test\000spurious"); - x (0, Xb.Op.Getperms, "/test\000someotherargs"); - x (0, Xb.Op.Getperms, "/test\000someotherargs\000others"); + (* too many arguments *) + x (0, Xb.Op.Write, "/test\000xxx\000yyy\000\000zzz"); + x (0, Xb.Op.Read, "/test"); + x (0, Xb.Op.Read, "/test\000"); + x (0, Xb.Op.Read, "/test\000some\000otherargs"); + x (0, Xb.Op.Directory, "/test\000spurious\000abc"); + x (0, Xb.Op.Directory, "/test\000spurious"); + x (0, Xb.Op.Getperms, "/test\000someotherargs"); + x (0, Xb.Op.Getperms, "/test\000someotherargs\000others"); - (* others *) - x (0, Xb.Op.Write, "/test\000"); - x (0, Xb.Op.Write, "/test\000abc"); - x (0, Xb.Op.Read, "/test"); - x (0, Xb.Op.Read, "/test\000"); - x (0, Xb.Op.Read, "/test\000\000"); - x (0, Xb.Op.Write, "/test"); - x (0, Xb.Op.Read, "/test"); - x (0, Xb.Op.Read, "/test\000"); - x (0, Xb.Op.Read, "/test\000\000"); + (* others *) + x (0, Xb.Op.Write, "/test\000"); + x (0, Xb.Op.Write, "/test\000abc"); + x (0, Xb.Op.Read, "/test"); + x (0, Xb.Op.Read, "/test\000"); + x (0, Xb.Op.Read, "/test\000\000"); + x (0, Xb.Op.Write, "/test"); + x (0, Xb.Op.Read, "/test"); + x (0, Xb.Op.Read, "/test\000"); + x (0, Xb.Op.Read, "/test\000\000"); - x (0, Xb.Op.Write, "\000/"); + x (0, Xb.Op.Write, "\000/"); - x (0, Xb.Op.Directory, "/test"); - x (0, Xb.Op.Directory, "/test\000"); - x (0, Xb.Op.Directory, "/test\000\000"); + x (0, Xb.Op.Directory, "/test"); + x (0, Xb.Op.Directory, "/test\000"); + x (0, Xb.Op.Directory, "/test\000\000"); - x (0, Xb.Op.Rm, "/test"); - x (0, Xb.Op.Directory, "/test"); - x (0, Xb.Op.Directory, "/test\000"); - x (0, Xb.Op.Directory, "/test\000\000"); + x (0, Xb.Op.Rm, "/test"); + x (0, Xb.Op.Directory, "/test"); + x (0, Xb.Op.Directory, "/test\000"); + x (0, Xb.Op.Directory, "/test\000\000"); - x (0, Xb.Op.Write, "/test/abc\000x1"); - x (0, Xb.Op.Write, "/test/def\000x2"); - x (0, Xb.Op.Write, "/test/xyz\000x3"); - x (0, Xb.Op.Write, "/test/xyz\000x4"); + x (0, Xb.Op.Write, "/test/abc\000x1"); + x (0, Xb.Op.Write, "/test/def\000x2"); + x (0, Xb.Op.Write, "/test/xyz\000x3"); + x (0, Xb.Op.Write, "/test/xyz\000x4"); - x (0, Xb.Op.Directory, "/test"); - x (0, Xb.Op.Directory, "/test\000"); - x (0, Xb.Op.Directory, "/test\000spurious\000"); + x (0, Xb.Op.Directory, "/test"); + x (0, Xb.Op.Directory, "/test\000"); + x (0, Xb.Op.Directory, "/test\000spurious\000"); - x (0, Xb.Op.Getperms, "/test"); - x (0, Xb.Op.Getperms, "/test\000"); - x (0, Xb.Op.Getperms, "/test\000spurious\000"); + x (0, Xb.Op.Getperms, "/test"); + x (0, Xb.Op.Getperms, "/test\000"); + x (0, Xb.Op.Getperms, "/test\000spurious\000"); - (* setperms getperms *) - x (0, Xb.Op.Setperms, "/test"); - x (0, Xb.Op.Getperms, "/test\000"); - x (0, Xb.Op.Setperms, "/test\000"); - x (0, Xb.Op.Getperms, "/test\000"); - x (0, Xb.Op.Setperms, "/test\000\000"); - x (0, Xb.Op.Getperms, "/test\000"); - x (0, Xb.Op.Setperms, "/test\000n0\000"); - x (0, Xb.Op.Getperms, "/test\000"); - x (0, Xb.Op.Setperms, "/test\000n0"); - x (0, Xb.Op.Getperms, "/test\000"); - x (0, Xb.Op.Setperms, "/test\000n0\000r1"); - x (0, Xb.Op.Getperms, "/test\000"); - x (0, Xb.Op.Setperms, "/test\000n0\000r1\000"); - x (0, Xb.Op.Getperms, "/test\000"); - x (0, Xb.Op.Setperms, "/test\000n0r2\000spurious"); - x (0, Xb.Op.Getperms, "/test\000"); + (* setperms getperms *) + x (0, Xb.Op.Setperms, "/test"); + x (0, Xb.Op.Getperms, "/test\000"); + x (0, Xb.Op.Setperms, "/test\000"); + x (0, Xb.Op.Getperms, "/test\000"); + x (0, Xb.Op.Setperms, "/test\000\000"); + x (0, Xb.Op.Getperms, "/test\000"); + x (0, Xb.Op.Setperms, "/test\000n0\000"); + x (0, Xb.Op.Getperms, "/test\000"); + x (0, Xb.Op.Setperms, "/test\000n0"); + x (0, Xb.Op.Getperms, "/test\000"); + x (0, Xb.Op.Setperms, "/test\000n0\000r1"); + x (0, Xb.Op.Getperms, "/test\000"); + x (0, Xb.Op.Setperms, "/test\000n0\000r1\000"); + x (0, Xb.Op.Getperms, "/test\000"); + x (0, Xb.Op.Setperms, "/test\000n0r2\000spurious"); + x (0, Xb.Op.Getperms, "/test\000"); - (* get domain path *) - x (0, Xb.Op.Getdomainpath, "3\000"); - x (0, Xb.Op.Getdomainpath, "10\000"); - x (0, Xb.Op.Getdomainpath, "13 \000"); - x (0, Xb.Op.Getdomainpath, " 17\000"); - x (0, Xb.Op.Getdomainpath, "45\000spurious"); - x (0, Xb.Op.Getdomainpath, "45\000really\000"); + (* get domain path *) + x (0, Xb.Op.Getdomainpath, "3\000"); + x (0, Xb.Op.Getdomainpath, "10\000"); + x (0, Xb.Op.Getdomainpath, "13 \000"); + x (0, Xb.Op.Getdomainpath, " 17\000"); + x (0, Xb.Op.Getdomainpath, "45\000spurious"); + x (0, Xb.Op.Getdomainpath, "45\000really\000"); - let t1 = tnb (y (0, Xb.Op.Transaction_start, "\000")) in - x (t1, Xb.Op.Transaction_end, "\000"); - let t2 = tnb (y (0, Xb.Op.Transaction_start, "\000")) in - x (t2, Xb.Op.Transaction_end, string_of_int t2 ^ "\000"); - let t3 = tnb (y (0, Xb.Op.Transaction_start, "\000\000")) in - x (t3, Xb.Op.Transaction_end, "0\000\000"); - () + let t1 = tnb (y (0, Xb.Op.Transaction_start, "\000")) in + x (t1, Xb.Op.Transaction_end, "\000"); + let t2 = tnb (y (0, Xb.Op.Transaction_start, "\000")) in + x (t2, Xb.Op.Transaction_end, string_of_int t2 ^ "\000"); + let t3 = tnb (y (0, Xb.Op.Transaction_start, "\000\000")) in + x (t3, Xb.Op.Transaction_end, "0\000\000"); + () (*************************************************************************** * Main @@ -134,5 +134,5 @@ let do_test xb = open Pervasiveext let () = - let xb = open_xb () in - finally (fun () -> do_test xb) (fun () -> close_xb xb); + let xb = open_xb () in + finally (fun () -> do_test xb) (fun () -> close_xb xb); diff --git a/ocaml/xva/xva.ml b/ocaml/xva/xva.ml index 9e1873cfea5..7ce3447ad54 100644 --- a/ocaml/xva/xva.ml +++ b/ocaml/xva/xva.ml @@ -34,211 +34,211 @@ let variety_of_string x = match (String.lowercase x) with | x -> raise (Parse_failure (Printf.sprintf "Unknown variety: %s" x)) let string_of_variety = function | `system -> "system" | `ephemeral -> "ephemeral" | `user -> "user" | `suspend -> "suspend" | `crashdump -> "crashdump" | `ha_statefile -> "ha_statefile" | `metadata -> "metadata" - + type vdi = { vdi_name: string; - size: int64; - source: string; - ty: string; - variety: variety } - + size: int64; + source: string; + ty: string; + variety: variety } + type funct = Root | Unknown let funct_of_string x = match (String.lowercase x) with | "root" -> Root | _ -> Unknown let string_of_funct = function | Root -> "root" | _ -> "unknown" - + type mode = [ `RO | `RW ] let mode_of_string x = match (String.lowercase x) with | "rw" | "w" -> `RW | "r" -> `RO | x -> raise (Parse_failure (Printf.sprintf "Unknown mode: %s" x)) let string_of_mode = function | `RW -> "rw" | `RO -> "r" - + type vbd = { device: string; - funct: funct; - mode: mode; - vdi: vdi } - + funct: funct; + mode: mode; + vdi: vdi } + type vm = { vm_name: string; - description: string; - memory: int64; - vcpus: int; - is_hvm: bool; - kernel_boot_cmdline: string; - distrib: string option; - distrib_version: string option; - vbds: vbd list } - + description: string; + memory: int64; + vcpus: int; + is_hvm: bool; + kernel_boot_cmdline: string; + distrib: string option; + distrib_version: string option; + vbds: vbd list } + let total_size_of_disks vdis = List.fold_left Int64.add 0L (List.map (fun vdi -> vdi.size) vdis) (* convert a vms, vdis representation list into xml *) let to_xml (vms, vdis) = - let xml_of_vdi vdi = - let attrs = [ "name", vdi.vdi_name; - "size", Int64.to_string vdi.size; - "source", vdi.source; - "type", vdi.ty; - "variety", string_of_variety vdi.variety ] in - Xml.Element("vdi", attrs, []) - in - let vdis = List.map xml_of_vdi vdis in - - let xml_of_vbd vbd = - let attrs = [ "device", vbd.device; - "function", string_of_funct vbd.funct; - "mode", string_of_mode vbd.mode; - "vdi", vbd.vdi.vdi_name ] in - Xml.Element("vbd", attrs, []) - in - let xml_of_vm vm = - let label = Xml.Element("label", [], [ Xml.PCData vm.vm_name ]) in - let description = Xml.Element("shortdesc", [], [ Xml.PCData vm.description ]) in - let config = Xml.Element("config", [ "mem_set", Int64.to_string vm.memory; - "vcpus", string_of_int vm.vcpus ], []) in - let hacks = Xml.Element("hacks", [ "is_hvm", string_of_bool vm.is_hvm; - "kernel_boot_cmdline", vm.kernel_boot_cmdline ], []) in - let vbds = List.map xml_of_vbd vm.vbds in - Xml.Element("vm", [ "name", vm.vm_name ], - [ label; description; config; hacks ] @ vbds) - in - let vms = List.map xml_of_vm vms in - Xml.Element("appliance", [ "version", "0.1" ], vms @ vdis) + let xml_of_vdi vdi = + let attrs = [ "name", vdi.vdi_name; + "size", Int64.to_string vdi.size; + "source", vdi.source; + "type", vdi.ty; + "variety", string_of_variety vdi.variety ] in + Xml.Element("vdi", attrs, []) + in + let vdis = List.map xml_of_vdi vdis in + + let xml_of_vbd vbd = + let attrs = [ "device", vbd.device; + "function", string_of_funct vbd.funct; + "mode", string_of_mode vbd.mode; + "vdi", vbd.vdi.vdi_name ] in + Xml.Element("vbd", attrs, []) + in + let xml_of_vm vm = + let label = Xml.Element("label", [], [ Xml.PCData vm.vm_name ]) in + let description = Xml.Element("shortdesc", [], [ Xml.PCData vm.description ]) in + let config = Xml.Element("config", [ "mem_set", Int64.to_string vm.memory; + "vcpus", string_of_int vm.vcpus ], []) in + let hacks = Xml.Element("hacks", [ "is_hvm", string_of_bool vm.is_hvm; + "kernel_boot_cmdline", vm.kernel_boot_cmdline ], []) in + let vbds = List.map xml_of_vbd vm.vbds in + Xml.Element("vm", [ "name", vm.vm_name ], + [ label; description; config; hacks ] @ vbds) + in + let vms = List.map xml_of_vm vms in + Xml.Element("appliance", [ "version", "0.1" ], vms @ vdis) let parse_appliance attrs children = - let version = assoc "version" attrs in - if version <> "0.1" then - raise Version_mismatch; - - let find_all name xs = - let f x = - match x with - | Xml.Element(name', _, _) as e when name = name' -> - [ e ] - | _ -> - [] - in - List.concat (List.map f xs) - in - let vdis = List.map (fun node -> - match node with - | Xml.Element("vdi", attrs, _) -> - let name = assoc "name" attrs - and size = Int64.of_string (assoc "size" attrs) - and source = assoc "source" attrs - and ty = assoc "type" attrs - and variety = variety_of_string (assoc "variety" attrs) in - { vdi_name = name; size = size; source = source; ty = ty; variety = variety } - | _ -> raise (Parse_failure "expected VDI")) - (find_all "vdi" children) in - - (* make an assocation list of vdi names -> vdis *) - let vdi_table = List.map (fun vdi -> vdi.vdi_name, vdi) vdis in - - (* then VMs *) - let find_element name xs = - match (find_all name xs) with - | element :: _ -> element - | [] -> raise (Parse_failure (Printf.sprintf "Failed to find element: %s" name)) in - let child_string node = - match node with - | Xml.Element(_, _, [ Xml.PCData x ]) -> String.strip String.isspace x - | Xml.Element(_, _, []) -> "" - | _ -> raise (Parse_failure (Printf.sprintf "Failed to find PCData within element")) in - - let vmconfig_of_xml node = - match node with - | Xml.Element("vm", _, children) -> - let name = child_string(find_element "label" children) in - let description = child_string(find_element "shortdesc" children) in - let memory, vcpus, distrib, distrib_version = match find_element "config" children with - | Xml.Element(_, attrs, _) -> - assoc "mem_set" attrs, assoc "vcpus" attrs, - (try Some (assoc "distrib" attrs) with _ -> None), - (try Some (assoc "distrib_version" attrs) with _ -> None) - | _ -> raise (Parse_failure "Failed to find element: config") in - let default_assoc default key pairs = try List.assoc key pairs with Not_found -> default in - (* make HVM the default if nothing is specified *) - let is_hvm, cmdline = match find_element "hacks" children with - | Xml.Element(_, attrs, _) -> - default_assoc "true" "is_hvm" attrs, - default_assoc "" "kernel_boot_cmdline" attrs - | _ -> "true", "" in - - let vbds = find_all "vbd" children in - - let vbdconfig_of_xml node = - match node with - | Xml.Element("vbd", attrs, _) -> - let device = assoc "device" attrs - and funct = funct_of_string (assoc "function" attrs) - and mode = mode_of_string (assoc "mode" attrs) - and vdi = assoc "vdi" attrs in - let vdi = assoc vdi vdi_table in - { - device = device; - funct = funct; - mode = mode; - vdi = vdi - } - | _ -> raise (Parse_failure "expected VBD") - in - - let vbds = List.map vbdconfig_of_xml vbds in - { - vm_name = name; - description = description; - memory = Int64.of_string memory; - vcpus = int_of_string vcpus; - is_hvm = (String.lowercase is_hvm) = "true"; - kernel_boot_cmdline = cmdline; - vbds = vbds; - distrib = distrib; - distrib_version = distrib_version - } - | _ -> raise (Parse_failure "expected VM") - in - - let vms = List.map vmconfig_of_xml (find_all "vm" children) in - vms, vdis + let version = assoc "version" attrs in + if version <> "0.1" then + raise Version_mismatch; + + let find_all name xs = + let f x = + match x with + | Xml.Element(name', _, _) as e when name = name' -> + [ e ] + | _ -> + [] + in + List.concat (List.map f xs) + in + let vdis = List.map (fun node -> + match node with + | Xml.Element("vdi", attrs, _) -> + let name = assoc "name" attrs + and size = Int64.of_string (assoc "size" attrs) + and source = assoc "source" attrs + and ty = assoc "type" attrs + and variety = variety_of_string (assoc "variety" attrs) in + { vdi_name = name; size = size; source = source; ty = ty; variety = variety } + | _ -> raise (Parse_failure "expected VDI")) + (find_all "vdi" children) in + + (* make an assocation list of vdi names -> vdis *) + let vdi_table = List.map (fun vdi -> vdi.vdi_name, vdi) vdis in + + (* then VMs *) + let find_element name xs = + match (find_all name xs) with + | element :: _ -> element + | [] -> raise (Parse_failure (Printf.sprintf "Failed to find element: %s" name)) in + let child_string node = + match node with + | Xml.Element(_, _, [ Xml.PCData x ]) -> String.strip String.isspace x + | Xml.Element(_, _, []) -> "" + | _ -> raise (Parse_failure (Printf.sprintf "Failed to find PCData within element")) in + + let vmconfig_of_xml node = + match node with + | Xml.Element("vm", _, children) -> + let name = child_string(find_element "label" children) in + let description = child_string(find_element "shortdesc" children) in + let memory, vcpus, distrib, distrib_version = match find_element "config" children with + | Xml.Element(_, attrs, _) -> + assoc "mem_set" attrs, assoc "vcpus" attrs, + (try Some (assoc "distrib" attrs) with _ -> None), + (try Some (assoc "distrib_version" attrs) with _ -> None) + | _ -> raise (Parse_failure "Failed to find element: config") in + let default_assoc default key pairs = try List.assoc key pairs with Not_found -> default in + (* make HVM the default if nothing is specified *) + let is_hvm, cmdline = match find_element "hacks" children with + | Xml.Element(_, attrs, _) -> + default_assoc "true" "is_hvm" attrs, + default_assoc "" "kernel_boot_cmdline" attrs + | _ -> "true", "" in + + let vbds = find_all "vbd" children in + + let vbdconfig_of_xml node = + match node with + | Xml.Element("vbd", attrs, _) -> + let device = assoc "device" attrs + and funct = funct_of_string (assoc "function" attrs) + and mode = mode_of_string (assoc "mode" attrs) + and vdi = assoc "vdi" attrs in + let vdi = assoc vdi vdi_table in + { + device = device; + funct = funct; + mode = mode; + vdi = vdi + } + | _ -> raise (Parse_failure "expected VBD") + in + + let vbds = List.map vbdconfig_of_xml vbds in + { + vm_name = name; + description = description; + memory = Int64.of_string memory; + vcpus = int_of_string vcpus; + is_hvm = (String.lowercase is_hvm) = "true"; + kernel_boot_cmdline = cmdline; + vbds = vbds; + distrib = distrib; + distrib_version = distrib_version + } + | _ -> raise (Parse_failure "expected VM") + in + + let vms = List.map vmconfig_of_xml (find_all "vm" children) in + vms, vdis (* convert xml to a vm/vdi config representation *) let of_xml node = - match node with - | Xml.Element("appliance", attrs, children) -> - parse_appliance attrs children - | _ -> raise (Parse_failure "expected appliance or vm") + match node with + | Xml.Element("appliance", attrs, children) -> + parse_appliance attrs children + | _ -> raise (Parse_failure "expected appliance or vm") (** Return true if looks like a Zurich/Geneva style XVA *) -let is_valid path = +let is_valid path = let stats = Unix.LargeFile.stat path in if stats.Unix.LargeFile.st_kind <> Unix.S_DIR then false else begin - let meta_path = Filename.concat path xml_filename in - let stats = Unix.stat meta_path in - if stats.Unix.st_kind <> Unix.S_REG then false - else begin - try - let xml = Xml.parse_file meta_path in - ignore(of_xml xml); - true - with _ -> false - end + let meta_path = Filename.concat path xml_filename in + let stats = Unix.stat meta_path in + if stats.Unix.st_kind <> Unix.S_REG then false + else begin + try + let xml = Xml.parse_file meta_path in + ignore(of_xml xml); + true + with _ -> false end + end (** Transmit a Zurich/Geneva style XVA at to the server *) -let send path fd = +let send path fd = let is_dir path = let stat = Unix.stat path in stat.Unix.st_kind = Unix.S_DIR in - let add path (* actual path *) filename (* for tar header *) = + let add path (* actual path *) filename (* for tar header *) = debug "Attempting to add %s (%s)\n" path filename; let hdr = Tar_unix.Header.of_file path in let hdr = { hdr with Tar_unix.Header.file_name = filename } in - debug "file_size = %Ld\n" (hdr.Tar_unix.Header.file_size); - Tar_unix.write_block hdr + debug "file_size = %Ld\n" (hdr.Tar_unix.Header.file_size); + Tar_unix.write_block hdr (fun ofd -> - let ifd = Unix.openfile path [Unix.O_RDONLY] 0o644 in - Stdext.Pervasiveext.finally (fun () -> Tar_unix.Archive.copy_n ifd ofd hdr.Tar_unix.Header.file_size) - (fun () -> Unix.close ifd)) fd in + let ifd = Unix.openfile path [Unix.O_RDONLY] 0o644 in + Stdext.Pervasiveext.finally (fun () -> Tar_unix.Archive.copy_n ifd ofd hdr.Tar_unix.Header.file_size) + (fun () -> Unix.close ifd)) fd in - let add_disk path = + let add_disk path = let chunks = List.filter (fun x -> String.endswith ".gz" x) (Array.to_list (Sys.readdir path)) in let chunks = List.sort compare chunks in List.iter (fun chunk -> add (Filename.concat path chunk) (path ^ "/" ^ chunk)) chunks in @@ -251,4 +251,4 @@ let send path fd = List.iter add_disk disks; Tar_unix.write_end fd - +