diff --git a/.vscode/launch.json b/.vscode/launch.json index 96d0adec2..31b0247d2 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -21,7 +21,7 @@ }, { // Update this with whatever VMs you want to debug. - "text": "add-symbol-file ${workspaceRoot}/out/reference/qemu_aarch64_clang/obj/test/vmapi/primary_with_secondaries/services/service_vm2.elf 0x43d00000+0xc4", + "text": "add-symbol-file ${workspaceRoot}/out/reference/qemu_aarch64_vm_clang/obj/test/vmapi/primary_with_secondaries/services/service_vm2.elf 0x43d00000+0xc4", } ] }, diff --git a/Hafnium-README.md b/Hafnium-README.md index a85d3bd41..f0d266dae 100644 --- a/Hafnium-README.md +++ b/Hafnium-README.md @@ -7,8 +7,18 @@ Get in touch and keep up-to-date at ## Getting started -To jump in and build Hafnium, follow the [getting -started](docs/GettingStarted.md) instructions. +To jump in and build Hafnium, follow the +[getting started](docs/GettingStarted.md) instructions. -If you want to contribute to the project, see details of [how we accept -contributions](CONTRIBUTING.md). +If you want to contribute to the project, see details of +[how we accept contributions](CONTRIBUTING.md). + +## Documentation + +More documentation is available on: + +* [Hafnium test infrastructure](docs/Testing.md) +* [Running Hafnium under the Arm Fixed Virtual Platform](docs/FVP.md) +* [How to build a RAM disk containing VMs for Hafnium to run](docs/HafniumRamDisk.md) +* [Building Hafnium hermetically with Docker](docs/HermeticBuild.md) +* [The interface Hafnium provides to VMs](docs/VmInterface.md) diff --git a/Makefile b/Makefile index 76ff72619..3fd7252e8 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. +# If HAFNIUM_HERMETIC_BUILD is "true" (not default), invoke `make` inside +# a container. The 'run_in_container.sh' script will set the variable value to +# 'inside' to avoid recursion. +ifeq ($(HAFNIUM_HERMETIC_BUILD),true) + +# TODO: This is not ideal as (a) we invoke the container once per command-line +# target, and (b) we cannot pass `make` arguments to the script. We could +# consider creating a bash alias for `make` to invoke the script directly. + +# Need to define at least one non-default target. +all: + @$(PWD)/build/run_in_container.sh make $@ + +# Catch-all target. +.DEFAULT: + @$(PWD)/build/run_in_container.sh make $@ + +else # HAFNIUM_HERMETIC_BUILD + # Set path to prebuilts used in the build. UNNAME_S := $(shell uname -s | tr '[:upper:]' '[:lower:]') PREBUILTS := $(PWD)/prebuilts/$(UNNAME_S)-x64 @@ -19,8 +38,14 @@ GN ?= $(PREBUILTS)/gn/gn NINJA ?= $(PREBUILTS)/ninja/ninja export PATH := $(PREBUILTS)/clang/bin:$(PATH) + CHECKPATCH := $(PWD)/third_party/linux/scripts/checkpatch.pl \ - --ignore BRACES,SPDX_LICENSE_TAG,VOLATILE,SPLIT_STRING,AVOID_EXTERNS,USE_SPINLOCK_T,NEW_TYPEDEFS,INITIALISED_STATIC,FILE_PATH_CHANGES,EMBEDDED_FUNCTION_NAME --quiet + --ignore BRACES,SPDX_LICENSE_TAG,VOLATILE,SPLIT_STRING,AVOID_EXTERNS,USE_SPINLOCK_T,NEW_TYPEDEFS,INITIALISED_STATIC,FILE_PATH_CHANGES,EMBEDDED_FUNCTION_NAME,SINGLE_STATEMENT_DO_WHILE_MACRO,MACRO_WITH_FLOW_CONTROL --quiet + +# Specifies the grep pattern for ignoring specific files in checkpatch. +# Separate the different items in the list with a grep or (\|). +# debug_el1.c : uses XMACROS, which checkpatch doesn't understand. +CHECKPATCH_IGNORE := "src/arch/aarch64/hypervisor/debug_el1.c" # Select the project to build. PROJECT ?= reference @@ -62,18 +87,18 @@ clobber: .PHONY: format format: @echo "Formatting..." - @find src/ -name \*.c -o -name \*.cc -o -name \*.h | xargs clang-format -style file -i - @find inc/ -name \*.c -o -name \*.cc -o -name \*.h | xargs clang-format -style file -i - @find test/ -name \*.c -o -name \*.cc -o -name \*.h | xargs clang-format -style file -i - @find project/ -name \*.c -o -name \*.cc -o -name \*.h | xargs clang-format -style file -i + @find src/ -name \*.c -o -name \*.cc -o -name \*.h | xargs -r clang-format -style file -i + @find inc/ -name \*.c -o -name \*.cc -o -name \*.h | xargs -r clang-format -style file -i + @find test/ -name \*.c -o -name \*.cc -o -name \*.h | xargs -r clang-format -style file -i + @find project/ -name \*.c -o -name \*.cc -o -name \*.h | xargs -r clang-format -style file -i @find . \( -name \*.gn -o -name \*.gni \) | xargs -n1 $(GN) format .PHONY: checkpatch checkpatch: - @find src/ -name \*.c -o -name \*.h | xargs $(CHECKPATCH) -f - @find inc/ -name \*.c -o -name \*.h | xargs $(CHECKPATCH) -f + @find src/ -name \*.c -o -name \*.h | grep -v $(CHECKPATCH_IGNORE) | xargs $(CHECKPATCH) -f + @find inc/ -name \*.c -o -name \*.h | grep -v $(CHECKPATCH_IGNORE) | xargs $(CHECKPATCH) -f # TODO: enable for test/ - @find project/ -name \*.c -o -name \*.h | xargs $(CHECKPATCH) -f + @find project/ -name \*.c -o -name \*.h | grep -v $(CHECKPATCH_IGNORE) | xargs $(CHECKPATCH) -f # see .clang-tidy. .PHONY: tidy @@ -95,9 +120,9 @@ check: $(OUT_DIR)/build.ninja .PHONY: license license: - @find src/ -name \*.S -o -name \*.c -o -name \*.cc -o -name \*.h | xargs -n1 python build/license.py --style c - @find inc/ -name \*.S -o -name \*.c -o -name \*.cc -o -name \*.h | xargs -n1 python build/license.py --style c - @find test/ -name \*.S -o -name \*.c -o -name \*.cc -o -name \*.h | xargs -n1 python build/license.py --style c + @find src/ -name \*.S -o -name \*.c -o -name \*.cc -o -name \*.h -o -name \*.dts | xargs -n1 python build/license.py --style c + @find inc/ -name \*.S -o -name \*.c -o -name \*.cc -o -name \*.h -o -name \*.dts | xargs -n1 python build/license.py --style c + @find test/ -name \*.S -o -name \*.c -o -name \*.cc -o -name \*.h -o -name \*.dts | xargs -n1 python build/license.py --style c @find build/ -name \*.py| xargs -n1 python build/license.py --style hash @find test/ -name \*.py| xargs -n1 python build/license.py --style hash @find . \( -name \*.gn -o -name \*.gni \) | xargs -n1 python build/license.py --style hash @@ -108,3 +133,5 @@ update-prebuilts: prebuilts/linux-aarch64/linux/vmlinuz prebuilts/linux-aarch64/linux/vmlinuz: $(OUT_DIR)/build.ninja @$(NINJA) -C $(OUT_DIR) "third_party:linux" cp out/reference/obj/third_party/linux.bin $@ + +endif # HAFNIUM_HERMETIC_BUILD diff --git a/build/docker/Dockerfile b/build/docker/Dockerfile new file mode 100644 index 000000000..0361e9c7d --- /dev/null +++ b/build/docker/Dockerfile @@ -0,0 +1,44 @@ +# Copyright 2019 The Hafnium Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Base container image to be uploaded to Google Cloud Platform as +# "eu.gcr.io/hafnium-build/hafnium_ci". Each user derives their own container +# with local user permissions from this base image. It should contain everything +# needed to build and test Hafnium. +# +FROM launcher.gcr.io/google/ubuntu1804 +MAINTAINER Hafnium Team + +# Install dependencies. Clear APT cache at the end to save space. +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update \ + && apt-get install -y \ + bc `# for Linux headers` \ + binutils-aarch64-linux-gnu \ + bison \ + build-essential \ + cpio \ + device-tree-compiler \ + flex \ + git \ + libpixman-1-0 `# for QEMU` \ + libsdl2-2.0-0 `# for QEMU` \ + libglib2.0 `# for QEMU` \ + libssl-dev `# for Linux headers` \ + python \ + python-git `# for Linux checkpatch` \ + python-ply `# for Linux checkpatch` \ + strace `# for strace_open.sh` \ + && rm -rf /var/lib/apt/lists/* diff --git a/build/docker/Dockerfile.local b/build/docker/Dockerfile.local new file mode 100644 index 000000000..67eb92f2b --- /dev/null +++ b/build/docker/Dockerfile.local @@ -0,0 +1,35 @@ +# Copyright 2019 The Hafnium Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Container derived from the base image hosted on Google Cloud Platform. +# It sets up a user with the same UID/GID as the local user, so that generated +# files can be accessed by the host. +# Please keep the diff between base and local images as small as possible. +# +FROM eu.gcr.io/hafnium-build/hafnium_ci +ARG LOCAL_UID=1000 +ARG LOCAL_GID=1000 + +RUN addgroup \ + --gid "${LOCAL_GID}" \ + hafnium \ + && adduser \ + -disabled-password \ + -gecos "" \ + --uid "${LOCAL_UID}" \ + --shell "/bin/bash" \ + --ingroup hafnium \ + hafnium +USER hafnium \ No newline at end of file diff --git a/build/docker/build.sh b/build/docker/build.sh new file mode 100755 index 000000000..597203806 --- /dev/null +++ b/build/docker/build.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# Copyright 2019 The Hafnium Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -euo pipefail + +SCRIPT_DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")" +source "${SCRIPT_DIR}/common.inc" + +${DOCKER} build \ + --pull \ + -f "${SCRIPT_DIR}/Dockerfile" \ + -t "${CONTAINER_TAG}" \ + "${SCRIPT_DIR}" diff --git a/build/docker/common.inc b/build/docker/common.inc new file mode 100644 index 000000000..0d1e1db99 --- /dev/null +++ b/build/docker/common.inc @@ -0,0 +1,21 @@ +# Copyright 2019 The Hafnium Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +CONTAINER_TAG="eu.gcr.io/hafnium-build/hafnium_ci" + +if [[ ! -v DOCKER ]] +then + DOCKER="$(which docker)" \ + || (echo "ERROR: Could not find Docker binary" 1>&2; exit 1) +fi \ No newline at end of file diff --git a/build/docker/publish.sh b/build/docker/publish.sh new file mode 100755 index 000000000..96ec2f11a --- /dev/null +++ b/build/docker/publish.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# Copyright 2019 The Hafnium Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -euo pipefail + +SCRIPT_DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")" +source "${SCRIPT_DIR}/common.inc" + +# Requires for the user to be an owner of the GCP 'hafnium-build' project and +# have gcloud SDK installed and authenticated. + +${DOCKER} push "${CONTAINER_TAG}" diff --git a/build/image/dtc.py b/build/image/dtc.py new file mode 100755 index 000000000..2ae8efe42 --- /dev/null +++ b/build/image/dtc.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# +# Copyright 2019 The Hafnium Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Wrapper around Device Tree Compiler (dtc)""" + +import argparse +import os +import subprocess +import sys + +HF_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) +DTC_ROOT = os.path.join(HF_ROOT, "prebuilts", "linux-x64", "dtc") +DTC = os.path.join(DTC_ROOT, "dtc") +FDTOVERLAY = os.path.join(DTC_ROOT, "fdtoverlay") + +def cmd_compile(args): + exec_args = [ + DTC, + "-I", "dts", "-O", "dtb", + "--out-version", "17", + ] + + if args.output_file: + exec_args += [ "-o", args.output_file ] + if args.input_file: + exec_args += [ args.input_file ] + + return subprocess.call(exec_args) + +def cmd_overlay(args): + exec_args = [ + FDTOVERLAY, + "-i", args.base_dtb, + "-o", args.output_dtb, + ] + args.overlay_dtb + return subprocess.call(exec_args) + +def main(): + parser = argparse.ArgumentParser() + subparsers = parser.add_subparsers(dest="command") + + parser_compile = subparsers.add_parser("compile", help="compile DTS to DTB") + parser_compile.add_argument("-i", "--input-file") + parser_compile.add_argument("-o", "--output-file") + + parser_overlay = subparsers.add_parser("overlay", help="merge DTBs") + parser_overlay.add_argument("output_dtb") + parser_overlay.add_argument("base_dtb") + parser_overlay.add_argument("overlay_dtb", nargs='*') + + args = parser.parse_args() + + if args.command == "compile": + return cmd_compile(args) + elif args.command == "overlay": + return cmd_overlay(args) + else: + raise Error("Unknown command: {}".format(args.command)) + +if __name__ == "__main__": + sys.exit(main()) diff --git a/build/image/generate_initrd.py b/build/image/generate_initrd.py index 6fb76de2f..38ce19c5a 100644 --- a/build/image/generate_initrd.py +++ b/build/image/generate_initrd.py @@ -34,12 +34,17 @@ def Main(): parser.add_argument( "--secondary_vm", action="append", - nargs=4, - metavar=("MEMORY", "CORES", "NAME", "IMAGE")) + nargs=2, + metavar=("NAME", "IMAGE")) parser.add_argument("--staging", required=True) parser.add_argument("--output", required=True) args = parser.parse_args() staged_files = ["vmlinuz", "initrd.img"] + + # Create staging folder if needed. + if not os.path.isdir(args.staging): + os.makedirs(args.staging) + # Prepare the primary VM image. shutil.copyfile(args.primary_vm, os.path.join(args.staging, "vmlinuz")) # Prepare the primary VM's initrd. @@ -48,14 +53,11 @@ def Main(): else: open(os.path.join(args.staging, "initrd.img"), "w").close() # Prepare the secondary VMs. - with open(os.path.join(args.staging, "vms.txt"), "w") as vms_txt: - staged_files.append("vms.txt") - if args.secondary_vm: - for vm in args.secondary_vm: - (vm_memory, vm_cores, vm_name, vm_image) = vm - staged_files.append(vm_name) - shutil.copy(vm_image, os.path.join(args.staging, vm_name)) - vms_txt.write("{} {} {}\n".format(vm_memory, vm_cores, vm_name)) + if args.secondary_vm: + for vm in args.secondary_vm: + (vm_name, vm_image) = vm + staged_files.append(vm_name) + shutil.copy(vm_image, os.path.join(args.staging, vm_name)) # Package files into an initial RAM disk. with open(args.output, "w") as initrd: # Move into the staging directory so the file names taken by cpio don't diff --git a/build/image/image.gni b/build/image/image.gni index 61a407a00..dd08dfcf3 100644 --- a/build/image/image.gni +++ b/build/image/image.gni @@ -33,6 +33,7 @@ template("image_binary") { "cflags_c", "defines", "deps", + "libs", "include_dirs", "public_configs", "public_deps", @@ -84,6 +85,7 @@ template("hypervisor") { "cflags_c", "defines", "deps", + "libs", "public_deps", "sources", "testonly", @@ -148,38 +150,69 @@ template("linux_initrd") { } } +template("device_tree") { + action_foreach(target_name) { + forward_variables_from(invoker, + [ + "testonly", + "sources", + "deps", + ]) + script = "//build/image/dtc.py" + + outputs = [ + invoker.output_pattern, + ] + args = [ + "compile", + "-i", + "{{source}}", + "-o", + rebase_path(invoker.output_pattern), + ] + } +} + # Build the initial RAM disk for the hypervisor. template("initrd") { - assert( - defined(invoker.primary_vm) || defined(invoker.primary_vm_prebuilt), - "initrd() must specify a \"primary_vm\" or \"primary_vm_prebuilt\" value") + assert(defined(invoker.primary_vm), + "initrd() must specify a \"primary_vm\" value") + + manifest_target = "${target_name}__manifest" + base_out_dir = "${target_out_dir}/${target_name}" + + # Generate manifest.dtbo + device_tree(manifest_target) { + sources = [ + invoker.manifest, + ] + output_pattern = "${base_out_dir}/{{source_name_part}}.dtbo" + } action(target_name) { forward_variables_from(invoker, [ "testonly" ]) script = "//build/image/generate_initrd.py" - initrd_base = "${target_out_dir}/${target_name}/initrd" - initrd_file = "${initrd_base}.img" - initrd_staging = "${initrd_base}" + initrd_file = "${base_out_dir}/initrd.img" + initrd_staging = "${base_out_dir}/initrd" - deps = [] + # Cannot get target outputs here as they are defined in a different file. + primary_vm_image = get_label_info(invoker.primary_vm, "target_out_dir") + + "/" + get_label_info(invoker.primary_vm, "name") + ".bin" - if (defined(invoker.primary_vm_prebuilt)) { - primary_vm_output = invoker.primary_vm_prebuilt - } else { - primary_vm_output = - get_label_info(invoker.primary_vm, "target_out_dir") + "/" + - get_label_info(invoker.primary_vm, "name") + ".bin" - deps += [ invoker.primary_vm ] - } + deps = [ + ":${manifest_target}", + invoker.primary_vm, + ] args = [ "--primary_vm", - rebase_path(primary_vm_output), + rebase_path(primary_vm_image), "--staging", rebase_path(initrd_staging), "--output", rebase_path(initrd_file), ] + if (defined(invoker.primary_initrd)) { deps += [ invoker.primary_initrd ] primary_initrd_outputs = get_target_outputs(invoker.primary_initrd) @@ -192,27 +225,22 @@ template("initrd") { # Add the info about the secondary VMs. The information about the VMs is # encoded in lists with the following elements: # - # 1. Memory in bytes. - # 2. Number of cores. - # 3. File name for the VM image. - # 4. Build target for the VM. + # 1. File name for the VM image. + # 2. Build target for the VM. if (defined(invoker.secondary_vms)) { foreach(vm, invoker.secondary_vms) { - deps += [ vm[3] ] + deps += [ vm[1] ] args += [ "--secondary_vm", vm[0], - vm[1], - vm[2], - rebase_path(get_label_info(vm[3], "target_out_dir") + "/" + - get_label_info(vm[3], "name") + ".bin"), + rebase_path(get_label_info(vm[1], "target_out_dir") + "/" + + get_label_info(vm[1], "name") + ".bin"), ] } } outputs = [ initrd_file, - "${initrd_staging}/vms.txt", ] } } diff --git a/build/linux/copy_dirs.py b/build/linux/copy_dirs.py new file mode 100644 index 000000000..e44128e74 --- /dev/null +++ b/build/linux/copy_dirs.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# +# Copyright 2019 The Hafnium Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env python +"""Copies all files inside one folder to another, preserving subfolders.""" + +import argparse +import os +import shutil +import sys + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("source_folder", + help="directory to be copied from") + parser.add_argument("destination_folder", + help="directory to be copied into") + parser.add_argument("stamp_file", + help="stamp file to be touched") + args = parser.parse_args() + + # Walk the subfolders of the source directory and copy individual files. + # Not using shutil.copytree() because it never overwrites files. + for root, _, files in os.walk(args.source_folder): + for f in files: + abs_src_path = os.path.join(root, f) + rel_path = os.path.relpath(abs_src_path, args.source_folder) + abs_dst_path = os.path.join(args.destination_folder, rel_path) + abs_dst_folder = os.path.dirname(abs_dst_path) + if not os.path.isdir(abs_dst_folder): + os.makedirs(abs_dst_folder) + shutil.copyfile(abs_src_path, abs_dst_path) + + # Touch `stamp_file`. + with open(args.stamp_file, "w"): + pass + +if __name__ == "__main__": + sys.exit(main()) diff --git a/build/linux/gen_depfile.py b/build/linux/gen_depfile.py new file mode 100755 index 000000000..72908403e --- /dev/null +++ b/build/linux/gen_depfile.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python +# +# Copyright 2019 The Hafnium Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/usr/bin/env python +"""Generate a depfile for a folder.""" + +import argparse +import os +import sys + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("root_dir", help="input directory") + parser.add_argument("stamp_file", help="stamp file to be touched") + parser.add_argument("dep_file", help="depfile to be written") + args = parser.parse_args() + + # Compile list of all files in the folder, relative to `root_dir`. + sources = [] + for root, _, files in os.walk(args.root_dir): + sources.extend([ os.path.join(root, f) for f in files ]) + sources = sorted(sources) + + # Write `dep_file` as a Makefile rule for `stamp_file`. + with open(args.dep_file, "w") as f: + f.write(args.stamp_file) + f.write(":") + for source in sources: + f.write(' '); + f.write(source) + f.write(os.linesep) + + # Touch `stamp_file`. + with open(args.stamp_file, "w"): + pass + +if __name__ == "__main__": + sys.exit(main()) diff --git a/build/linux/linux.gni b/build/linux/linux.gni index 0f096ebcd..de0f8a387 100644 --- a/build/linux/linux.gni +++ b/build/linux/linux.gni @@ -12,8 +12,49 @@ # See the License for the specific language governing permissions and # limitations under the License. +template("source_dir") { + action("${target_name}") { + depfile = "${target_out_dir}/${target_name}.d" + outputs = [ + "$target_out_dir/${target_name}.script.stamp", + ] + + script = "//build/linux/gen_depfile.py" + args = [ + rebase_path(invoker.path, root_build_dir), + rebase_path(outputs[0], root_build_dir), + rebase_path(depfile, root_build_dir), + ] + } +} + +template("source_dir_copy") { + source_dir_target = "${target_name}__source_dir" + + source_dir(source_dir_target) { + path = invoker.path + } + + action("${target_name}") { + script = "//build/linux/copy_dirs.py" + outputs = [ + "$target_out_dir/${target_name}.script.stamp", + ] + args = [ + rebase_path(invoker.path), + rebase_path(target_out_dir), + rebase_path(outputs[0]), + ] + deps = [ + ":${source_dir_target}", + ] + } +} + template("linux_kernel") { - # TODO: target has no "sources" + source_target = "${target_name}__source" + defconfig_target = "${target_name}__defconfig" + prebuilt_target = "${target_name}__prebuilt" # Args to build/make.py to start the Linux build. shared_args = [ @@ -32,9 +73,15 @@ template("linux_kernel") { "-j24", ] + # Subtarget which generates a depfile with all files in the Linux tree + # and gets invalidated if any of them change. + source_dir(source_target) { + path = invoker.kernel_dir + } + # Subtarget which runs `defconfig` and `modules_prepare`. Used by targets # which do not require the whole kernel to have been built. - action("${target_name}__defconfig") { + action(defconfig_target) { script = "//build/make.py" args = shared_args + [ "defconfig", @@ -46,6 +93,9 @@ template("linux_kernel") { outputs = [ "${target_out_dir}/.config", ] + deps = [ + ":${source_target}", + ] } action(target_name) { @@ -60,24 +110,33 @@ template("linux_kernel") { output_file, ] deps = [ - ":${target_name}__defconfig", + ":${defconfig_target}", + ":${source_target}", ] } + + # Subtarget for a prebuilt image, if defined. + if (defined(invoker.prebuilt)) { + copy(prebuilt_target) { + sources = [ + invoker.prebuilt, + ] + outputs = [ + "${target_out_dir}/${prebuilt_target}.bin", + ] + } + } } template("linux_kernel_module") { # Out-of-tree modules cannot be built outside of their directory. # So as to avoid parallel builds under different toolchains clashing, # work around by copying source files to `target_out_dir`. - copy("${target_name}__copy_source") { - forward_variables_from(invoker, - [ - "sources", - "testonly", - ]) - outputs = [ - "${target_out_dir}/{{source_file_part}}", - ] + + source_target = "${target_name}__source" + + source_dir_copy(source_target) { + path = invoker.module_dir } action(target_name) { @@ -87,7 +146,7 @@ template("linux_kernel_module") { "--directory", rebase_path(target_out_dir), "HAFNIUM_PATH=" + rebase_path("//"), - "KERNEL_PATH=" + rebase_path(invoker.kernel_src_dir), + "KERNEL_PATH=" + rebase_path(invoker.kernel_dir), "O=" + rebase_path(get_label_info(invoker.kernel_target, "target_out_dir")), "CC=" + rebase_path("//prebuilts/linux-x64/clang/bin/clang"), @@ -98,7 +157,7 @@ template("linux_kernel_module") { "${target_out_dir}/${invoker.module_name}.ko", ] deps = [ - ":${target_name}__copy_source", + ":${source_target}", "${invoker.kernel_target}__defconfig", ] } diff --git a/build/parse_strace_open.py b/build/parse_strace_open.py new file mode 100755 index 000000000..5dd878bc1 --- /dev/null +++ b/build/parse_strace_open.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python +# Copyright 2019 The Hafnium Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Script which parses the output of `strace` and dumping a list of files +that were touched by the traced processes outside of whitelisted folders. +It assumes that strace was invoked with the following arguments: + -e trace=%file,chdir,%process record required syscalls + -qq silence 'exit code' records + -o output format is different when writing + to a file from printing to the console +""" + +import argparse +import os +import sys + +FORK_SYSCALLS = [ + "clone", + "fork", + "vfork", + ] +OPEN_SYSCALLS = [ + "access", + "creat", + "lstat", + "mkdir", + "open", + "openat", + "readlink", + "stat", + ] + +def get_unfinished(line): + pos = line.find("") + if pos < 0: + return None + else: + return line[:pos] + +def get_resumed(line): + pos = line.find(" resumed>") + if pos < 0: + return None + else: + return line[pos + len(" resumed>"):] + +def merge_unfinished_lines(lines): + """Process input lines and merge those split by an interrupting syscall.""" + # Lines in the order they were started being written. + finished = [] + + # Pending unfinished lines. Map from PID to index in `finished`. + cursor = {} + + for line in lines: + pid = int(line.split()[0]) + + resumed = get_resumed(line) + if resumed is not None: + assert(pid in cursor) + unfinished = get_unfinished(resumed) + if unfinished is not None: + finished[cursor[pid]] += unfinished + else: + finished[cursor[pid]] += resumed + del(cursor[pid]) + else: + assert(pid not in cursor) + unfinished = get_unfinished(line) + if unfinished is not None: + # Line is unfinished. Store its location to `cursor`. + cursor[pid] = len(finished) + finished += [ unfinished ] + else: + finished += [ line ] + return finished + +def abs_path(cwd, path): + """If `path` is relative, resolve it against the current working directory. + Also normalize the resulting path.""" + if path[0] != '/': + path = os.path.join(cwd, path) + path = os.path.abspath(path) + # while '//' in path: + # path = path.replace('//', '/') + path = os.path.realpath(path) + return path + +def get_touched_files(lines, orig_cwd): + """Parse strace output and return all files that an open()-like syscall was + called on.""" + files = set() + + # Map from PID to the current working directory. + cwd = {} + + # Map from PID to executable name + executable = {} + + # Map from PID to the PID of the process which forked it. + fork_of = {} + + first_pid = True + for line in lines: + # Split line: + line = line.split() + pid = int(line[0]) + syscall = " ".join(line[1:]) + + # If seeing a PID for the first time, derive its working directory + # from its parent. + if pid not in cwd: + if first_pid: + # Very first line of strace output. Set working directory from + # command line arguments (should match cwd of strace). + first_pid = False + cwd[pid] = orig_cwd + else: + # There should have been a fork/clone syscall which spawned this + # process. Inherit its working directory. + cwd[pid] = cwd[fork_of[pid]] + + # We are looking for lines which match: + # name(arg1, arg2, ..., argN) = result + left_bracket = syscall.find("(") + right_bracket = syscall.rfind(")") + assign_sign = syscall.rfind("=") + if left_bracket < 0 or right_bracket < 0 or assign_sign < right_bracket: + continue + + syscall_name = syscall[:left_bracket] + syscall_result = syscall[assign_sign+2:] + + syscall_args = syscall[left_bracket+1:right_bracket].split(",") + syscall_args = list(map(lambda x: x.strip(), syscall_args)) + + if syscall_name in FORK_SYSCALLS: + # If this is a fork, keep track of the parent-child relationship. + # The child's PID is the syscall's return code. + new_pid = int(syscall_result) + fork_of[new_pid] = pid + executable[new_pid] = executable[pid] + elif syscall_name == "chdir": + # If this is a change of working directory, keep track of it. + # It is in the first argument in quotes. + new_dir = syscall_args[0][1:-1] + cwd[pid] = abs_path(cwd[pid], new_dir) + elif syscall_name == "execve": + # If this is executing a new program, record its name. + # It is in the first argument in quotes. + binary_name = syscall_args[0][1:-1] + executable[pid] = binary_name + elif syscall_name in OPEN_SYSCALLS: + # If this is a syscall touching a file, record the path. + # We ignore the result code, i.e. record the path even if the + # syscall failed to open it. + arg_idx = 0 + if syscall_name == "openat": + # openat() can open a file (second arg) relative to a given + # folder (first arg). We only support passing AT_FDCWD, ie. + # resolve against the current working directory. + arg_idx = 1 + assert(syscall_args[0] == "AT_FDCWD") + fname = abs_path(cwd[pid], syscall_args[arg_idx][1:-1]) + # Record the file and the name of the program which touched it. + files.add((fname, executable[pid])) + return files + +def filter_results(files, root_dir): + """Remove paths which are whitelisted from the results.""" + # Anything in the Hafnium directory is allowed. + files = filter(lambda x: not x[0].startswith(root_dir + "/"), files) + # Clang puts intermediate files in /tmp. + files = filter(lambda x: not x[0].startswith("/tmp/"), files) + return list(files) + +def main(args): + parser = argparse.ArgumentParser() + parser.add_argument("root_dir", + help="Root directory of Hafnium, cwd of strace") + args, make_args = parser.parse_known_args() + + stdin = map(lambda x: x.strip(), sys.stdin.readlines()) + stdin = merge_unfinished_lines(stdin) + files = get_touched_files(stdin, args.root_dir) + files = filter_results(files, args.root_dir) + files = sorted(list(files)) + + print("\n".join(map(lambda x: "{} ({})".format(x[0], x[1]), files))) + +if __name__ == "__main__": + main(sys.argv) diff --git a/build/run_in_container.sh b/build/run_in_container.sh new file mode 100755 index 000000000..ae0850ea1 --- /dev/null +++ b/build/run_in_container.sh @@ -0,0 +1,102 @@ +#!/usr/bin/env bash +# Copyright 2019 The Hafnium Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -euo pipefail + +SCRIPT_DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")" +ROOT_DIR="$(realpath ${SCRIPT_DIR}/..)" + +source "${SCRIPT_DIR}/docker/common.inc" + +if [ "${HAFNIUM_HERMETIC_BUILD:-}" == "inside" ] +then + echo "ERROR: Invoked $0 recursively" 1>&2 + exit 1 +fi + +# Set up a temp directory and register a cleanup function on exit. +TMP_DIR="$(mktemp -d)" +function cleanup() { + rm -rf "${TMP_DIR}" +} +trap cleanup EXIT + +# Build local image and write its hash to a temporary file. +IID_FILE="${TMP_DIR}/imgid.txt" +"${DOCKER}" build \ + --build-arg LOCAL_UID="$(id -u)" \ + --build-arg LOCAL_GID="$(id -g)" \ + --iidfile="${IID_FILE}" \ + -f "${SCRIPT_DIR}/docker/Dockerfile.local" \ + "${SCRIPT_DIR}/docker" +IMAGE_ID="$(cat ${IID_FILE})" + +# Parse command line arguments +INTERACTIVE=false +ALLOW_PTRACE=false +while true +do + case "${1:-}" in + -i) + INTERACTIVE=true + shift + ;; + -p) + ALLOW_PTRACE=true + shift + ;; + -*) + echo "ERROR: Unknown command line flag: $1" 1>&2 + echo "Usage: $0 [-i] [-p] " + exit 1 + ;; + *) + break + ;; + esac +done + +ARGS=() +# Run with a pseduo-TTY for nicer logging. +ARGS+=(-t) +# Run interactive if this script was invoked with '-i'. +if [ "${INTERACTIVE}" == "true" ] +then + ARGS+=(-i) +fi +# Allow ptrace() syscall if invoked with '-p'. +if [ "${ALLOW_PTRACE}" == "true" ] +then + echo "WARNING: Docker seccomp profile is disabled!" 1>&2 + ARGS+=(--cap-add=SYS_PTRACE --security-opt seccomp=unconfined) +fi +# Set environment variable informing the build that we are running inside +# a container. +ARGS+=(-e HAFNIUM_HERMETIC_BUILD=inside) +# Bind-mount the Hafnium root directory. We mount it at the same absolute +# location so that all paths match across the host and guest. +ARGS+=(-v "${ROOT_DIR}":"${ROOT_DIR}") +# Make all files outside of the Hafnium directory read-only to ensure that all +# generated files are written there. +ARGS+=(--read-only) +# Mount a writable /tmp folder. Required by LLVM/Clang for intermediate files. +ARGS+=(--tmpfs /tmp) +# Set working directory. +ARGS+=(-w "${ROOT_DIR}") + +echo "Running in container: $*" 1>&2 +${DOCKER} run \ + ${ARGS[@]} \ + "${IMAGE_ID}" \ + /bin/bash -c "$*" \ No newline at end of file diff --git a/build/strace_open.sh b/build/strace_open.sh new file mode 100755 index 000000000..e960dafa0 --- /dev/null +++ b/build/strace_open.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +# Copyright 2019 The Hafnium Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -euxo pipefail + +SCRIPT_NAME="$(realpath "${BASH_SOURCE[0]}")" +SCRIPT_DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")" +ROOT_DIR="$(realpath ${SCRIPT_DIR}/..)" + +if [ "${HAFNIUM_HERMETIC_BUILD:-}" == "true" ] +then + exec "${ROOT_DIR}/build/run_in_container.sh" -p ${SCRIPT_NAME} $@ +fi + +if [ $# != 1 ] +then + echo "Usage: $0 " 1>&2 + exit 1 +fi + +MAKE="$(which make)" +STRACE="$(which strace)" + +# Set up a temp directory and register a cleanup function on exit. +TMP_DIR="$(mktemp -d)" +function cleanup() { + rm -rf "${TMP_DIR}" +} +trap cleanup EXIT + +STRACE_LOG="${TMP_DIR}/strace.log" + +echo "Building with strace" +pushd ${ROOT_DIR} +${MAKE} clobber +${STRACE} \ + -o "${STRACE_LOG}" \ + -f \ + -qq \ + -e trace=%file,chdir,%process \ + ${MAKE} +popd + +echo "Processing strace output" +"${SCRIPT_DIR}/parse_strace_open.py" ${ROOT_DIR} < "${STRACE_LOG}" > "$1" diff --git a/build/toolchain/BUILD.gn b/build/toolchain/BUILD.gn index f7caa6c1b..c152b39ac 100644 --- a/build/toolchain/BUILD.gn +++ b/build/toolchain/BUILD.gn @@ -39,6 +39,7 @@ embedded_clang_toolchain("aarch64_linux_clang") { toolchain_args = { use_platform = true plat_arch = "fake" + plat_boot_flow = "//src/arch/fake:boot_flow" plat_console = "//src/arch/fake:console" } } diff --git a/build/toolchain/embedded.gni b/build/toolchain/embedded.gni index 3a02530ad..166e7e01c 100644 --- a/build/toolchain/embedded.gni +++ b/build/toolchain/embedded.gni @@ -320,6 +320,7 @@ template("aarch64_common_toolchain") { } toolchain_args = { + plat_boot_flow = invoker.boot_flow plat_console = invoker.console forward_variables_from(invoker.toolchain_args, "*") } @@ -340,6 +341,7 @@ template("aarch64_toolchains") { forward_variables_from(invoker, [ "origin_address", + "boot_flow", "console", "gic_version", "gicd_base_address", @@ -350,6 +352,9 @@ template("aarch64_toolchains") { "toolchain_args", ]) cpu = "${invoker.cpu}+nofp" + + # Add a macro so files can tell whether they are not being built for a VM. + extra_defines = " -DVM_TOOLCHAIN=0" } # Toolchain for building test VMs which run under Hafnium. @@ -364,10 +369,14 @@ template("aarch64_toolchains") { "toolchain_args", ]) cpu = "${invoker.cpu}+fp" + boot_flow = "//src/arch/fake:boot_flow" console = "//src/arch/aarch64/hftest:console" # Nonsense values because they are required but shouldn't be used. heap_pages = 0 - max_vms = 1 + max_vms = 0 + + # Add a macro so files can tell whether they are being built for a VM. + extra_defines = " -DVM_TOOLCHAIN=1" } } diff --git a/build/toolchain/host.gni b/build/toolchain/host.gni index 5b646b660..4d2dfa3be 100644 --- a/build/toolchain/host.gni +++ b/build/toolchain/host.gni @@ -150,6 +150,7 @@ template("host_toolchain") { # When building for the ${target_name}, use the fake architecture to make things # testable. plat_arch = "fake" + plat_boot_flow = "//src/arch/fake:boot_flow" plat_console = "//src/arch/fake:console" plat_heap_pages = invoker.heap_pages plat_max_cpus = invoker.max_cpus diff --git a/build/toolchain/platform.gni b/build/toolchain/platform.gni index 8e9feead6..b99ca64e0 100644 --- a/build/toolchain/platform.gni +++ b/build/toolchain/platform.gni @@ -19,6 +19,11 @@ declare_args() { # The architecture of the platform. plat_arch = "" + + # Boot flow driver to be used by the platform, specified as build target. + plat_boot_flow = "" + + # Console driver to be used for the platform, specified as build target. plat_console = "" # The number of pages to allocate for the hypervisor heap. @@ -28,5 +33,5 @@ declare_args() { plat_max_cpus = 1 # The maximum number of VMs required for the platform. - plat_max_vms = 1 + plat_max_vms = 0 } diff --git a/docs/FVP.md b/docs/FVP.md new file mode 100644 index 000000000..16997bc58 --- /dev/null +++ b/docs/FVP.md @@ -0,0 +1,40 @@ +# FVP + +Arm offers a series of emulators known as Fixed Virtual Platforms (FVPs), which +simulate various processors. They are generally more accurate to the hardware +than QEMU, at the cost of being considerably slower. We support running +[tests](Testing.md) on the FVP as well as QEMU. + +## Set up + +1. Download the + [Armv8-A Base Platform FVP](https://developer.arm.com/products/system-design/fixed-virtual-platforms) + from Arm. +1. Unzip it to a directory called `fvp` alongside the root directory of your + Hafnium checkout. + +## Running tests + +To run tests with the FVP instead of QEMU, from the root directory of your +Hafnium checkout: + +```shell +$ make && kokoro/ubuntu/test.sh --fvp +``` + +See the `fvp` function in +[`hftest.py`](http://cs/hafnium/test/hftest/hftest.py?q=symbol:fvp) for details +on how this works. + +## Other resources + +When running tests under the FVP we also use a prebuilt version of TF-A, which +is checked in under +[`prebuilts/linux-aarch64/arm-trusted-firmware/`](https://hafnium.googlesource.com/hafnium/prebuilts/+/refs/heads/master/linux-aarch64/arm-trusted-firmware/). +The +[README](https://hafnium.googlesource.com/hafnium/prebuilts/+/refs/heads/master/linux-aarch64/arm-trusted-firmware/README.md) +there has details on how it was built. The source code is available from the +[Arm Trusted Firmware site](https://developer.trustedfirmware.org/dashboard/view/6/). + +Documentation of the FVP (including memory maps) is +[available from Arm](https://static.docs.arm.com/100966/1101/fast_models_fvp_rg_100966_1101_00_en.pdf). diff --git a/docs/GettingStarted.md b/docs/GettingStarted.md index 98c61968f..19898559b 100644 --- a/docs/GettingStarted.md +++ b/docs/GettingStarted.md @@ -2,18 +2,18 @@ ## Getting the source code -``` shell +```shell git clone --recurse-submodules https://hafnium.googlesource.com/hafnium && (cd hafnium && f=`git rev-parse --git-dir`/hooks/commit-msg ; curl -Lo $f https://gerrit-review.googlesource.com/tools/hooks/commit-msg ; chmod +x $f) ``` To upload a commit for review: -``` shell +```shell git push origin HEAD:refs/for/master ``` -Browse source at https://hafnium.googlesource.com/hafnium. -Review CLs at https://hafnium-review.googlesource.com/. +Browse source at https://hafnium.googlesource.com/hafnium. Review CLs at +https://hafnium-review.googlesource.com/. See details of [how to contribute](../CONTRIBUTING.md). @@ -21,8 +21,8 @@ See details of [how to contribute](../CONTRIBUTING.md). Install prerequisites: -``` shell -sudo apt install make binutils-aarch64-linux-gnu device-tree-compiler libssl-dev flex bison +```shell +sudo apt install make binutils-aarch64-linux-gnu aarch64-linux-gnu-gcc device-tree-compiler libssl-dev flex bison ``` By default, the hypervisor is built with clang for a few target platforms along @@ -31,19 +31,19 @@ configurations of the build. Adding a project is the preferred way to extend support to new platforms. The target project that is built is selected by the `PROJECT` make variable, the default project is 'reference'. -``` shell +```shell make PROJECT= ``` -The compiled image can be found under `out/`, for example the QEMU image is at -at `out/reference/qemu_aarch64_clang/hafnium.bin`. +The compiled image can be found under `out/`, for example the QEMU +image is at `out/reference/qemu_aarch64_clang/hafnium.bin`. ## Running on QEMU You will need at least version 2.9 for QEMU. The following command line can be used to run Hafnium on it: -``` shell +```shell qemu-system-aarch64 -M virt,gic_version=3 -cpu cortex-a57 -nographic -machine virtualization=true -kernel out/reference/qemu_aarch64_clang/hafnium.bin ``` @@ -51,18 +51,39 @@ Though it is admittedly not very useful because it doesn't have any virtual machines to run. Follow the [Hafnium RAM disk](HafniumRamDisk.md) instructions to create an initial RAM disk for Hafnium with Linux as the primary VM. +Next, you need to create a manifest which will describe the VM to Hafnium. +Follow the [Manifest](Manifest.md) instructions and build a DTBO with: +``` +/dts-v1/; +/plugin/; + +&{/} { + hypervisor { + vm1 { + debug_name = "Linux VM"; + }; + }; +}; +``` + +Dump the DTB used by QEMU: +```shell +qemu-system-aarch64 -M virt,gic_version=3 -cpu cortex-a57 -nographic -machine virtualization=true -kernel out/reference/qemu_aarch64_clang/hafnium.bin -initrd initrd.img -append "rdinit=/sbin/init" -machine dumpdtb=qemu.dtb +``` +and follow instructions in [Manifest](Manifest.md) to overlay it with the manifest. + The following command line will run Hafnium, with the RAM disk just created, which will then boot into the primary Linux VM: -``` shell -qemu-system-aarch64 -M virt,gic_version=3 -cpu cortex-a57 -nographic -machine virtualization=true -kernel out/reference/qemu_aarch64_clang/hafnium.bin -initrd initrd.img -append "rdinit=/sbin/init" +```shell +qemu-system-aarch64 -M virt,gic_version=3 -cpu cortex-a57 -nographic -machine virtualization=true -kernel out/reference/qemu_aarch64_clang/hafnium.bin -initrd initrd.img -append "rdinit=/sbin/init" -dtb=qemu_with_manifest.dtb ``` ## Running tests After building, presubmit tests can be run with the following command line: -``` shell +```shell ./kokoro/ubuntu/test.sh ``` diff --git a/docs/HafniumRamDisk.md b/docs/HafniumRamDisk.md index 59925ef79..c09568d3e 100644 --- a/docs/HafniumRamDisk.md +++ b/docs/HafniumRamDisk.md @@ -3,30 +3,13 @@ Hafnium expects to find the following files in the root directory of its RAM disk: - * `vmlinuz` -- the kernel of the primary VM. - * `initrd.img` -- the initial ramdisk of the primary VM. - * `vms.txt` -- optionally describes the secondary VMs. - * kernels for the secondary VMs, whose names are described in `vms.txt`. +* `vmlinuz` -- the kernel of the primary VM. +* `initrd.img` -- the initial ramdisk of the primary VM. +* kernels for the secondary VMs, whose names are described in the manifest. Follow the [preparing Linux](PreparingLinux.md) instructions to produce `vmlinuz` and `initrd.img` for a basic Linux primary VM. -## Format of `vms.txt` file -The format is currently one line per secondary VM, with the following format: - -``` shell - -``` - -For example, the following defines two secondary VMs, the first one with 1MB of -memory, 2 CPUs and kernel image called `kernel0`, while the second one has 2MB -of memory, 4 CPUs and a kernel image called `kernel1`. - -``` shell -1048576 2 kernel0 -2097152 4 kernel1 -``` - ## Create a RAM disk for Hafnium Assuming that a subdirectory called `initrd` contains the files listed in the diff --git a/docs/HermeticBuild.md b/docs/HermeticBuild.md new file mode 100644 index 000000000..f30de144f --- /dev/null +++ b/docs/HermeticBuild.md @@ -0,0 +1,93 @@ +# Hermetic build + +Hafnium build is not hermetic as it uses some system tools and libraries, e.g. +`bison` and `libssl`. To ensure consistency and repeatability, the team +maintains and periodically publishes a container image as the reference build +environment. The image is hosted on Google Cloud Platform as +`eu.gcr.io/hafnium-build/hafnium_ci`. + +Building inside a container is always enabled only for Kokoro pre-submit tests +but can be enabled for local builds too. It is disabled by default as it +requires the use of Docker which currently supports rootless containers only in +nightly builds. As rootless container tools mature, Hafnium may change the +default settings. For now, running the hermetic build locally is intended +primarily to reproduce issues in pre-submit tests. + +## Installing Docker + +### Stable + +If you don't mind running a Docker daemon with root privileges on your system, +you can follow the [official guide](https://docs.docker.com/install/) to install +Docker, or [go/installdocker](https://goto.google.com/installdocker) if you are +a Googler. + +Because the daemon runs as root, files generated by the container are owned by +root as well. To work around this, the build will automatically derive a local +container image from the base container, adding user `hafnium` with the same +UID/GID as the local user. + +### Nightly with rootless + +The latest nightly version of Docker has support for running containers with +user namespaces, thus eliminating the need for a daemon with root privileges. It +can be installed into the local user's `bin` directory with a script: + +```shell +curl -fsSL https://get.docker.com/rootless -o get-docker.sh +sh get-docker.sh +``` + +The script will also walk you through the installation of dependencies, changes +to system configuration files and environment variable values needed by the +client to discover the rootless daemon. + +## Enabling for local builds + +Hermetic builds are controlled by the `HAFNIUM_HERMETIC_BUILD` environment +variable. Setting it to `true` instructs the build to run commands inside the +container. Any other value disables the feature. + +To always enable hermetic builds, put this line in your `~/.bashrc`: + +```shell +export HAFNIUM_HERMETIC_BUILD=true +``` + +When you now run `make`, you should see the following line: + +```shell +$ make +Running in container: make all +... +``` + +## Running commands inside the container + +An arbitrary command can be executed inside the container with +`build/run_in_container.sh [-i] ...`. This is done automatically +inside `Makefile` and `kokoro/ubuntu/build.sh` which detect whether they are +already running inside the container and respawn themselves using +`run_in_container.sh` if not. + +For example, you can spawn a shell with: + +```shell +./build/run_in_container.sh -i bash +``` + +## Building container image + +The container image is defined in `build/docker/Dockerfile` and can be built +locally: + +```shell +./build/docker/build.sh +``` + +Owners of the `hafnium-build` GCP repository can publish the new image (requires +[go/cloud-sdk](https://goto.google.com/cloud-sdk) installed and authenticated): + +```shell +./build/docker/publish.sh +``` diff --git a/docs/Manifest.md b/docs/Manifest.md new file mode 100644 index 000000000..58a560094 --- /dev/null +++ b/docs/Manifest.md @@ -0,0 +1,79 @@ +# Hafnium Manifest + +## Format + +The format of the manifest is a simple DeviceTree overlay: + +``` +/dts-v1/; +/plugin/; + +&{/} { + hypervisor { + vm1 { + debug_name = "name"; + }; + + vm2 { + debug_name = "name"; + kernel_filename = "filename"; + vcpu_count = ; + mem_size = ; + }; + ... + }; +}; +``` + +Note: `&{/}` is a syntactic sugar expanded by the DTC compiler. Make sure to +use the DTC in `prebuilts/` as the version packaged with your OS may not support +it yet. + +## Example + +The following manifest defines two secondary VMs, the first one with 1MB of +memory, 2 CPUs and kernel image called `kernel0` (matches filename in Hafnium's +[ramdisk](HafniumRamDisk.md)), while the second one has 2MB of memory, 4 CPUs +and a kernel image called `kernel1`. + +``` +/dts-v1/; +/plugin/; + +&{/} { + hypervisor { + vm1 { + debug_name = "primary VM"; + }; + + vm2 { + debug_name = "secondary VM 1"; + kernel_filename = "kernel0"; + vcpu_count = <2>; + mem_size = <0x100000>; + }; + + vm3 { + debug_name = "secondary VM 2"; + kernel_filename = "kernel1"; + vcpu_count = <4>; + mem_size = <0x200000>; + }; + }; +}; +``` + +## Compiling + +Hafnium expects the manifest as part of the board FDT, i.e. DeviceTree in binary +format (DTB). + +First, compile the manifest into a DTBO (binary overlay): +```shell +prebuilts/linux-x64/dtc/dtc -I dts -O dtb --out-version 17 -o manifest.dtbo +``` + +Then overlay it with the DTB of your board: +```shell +prebuilts/linux-x64/dtc/fdtoverlay -i -o manifest.dtbo +``` diff --git a/docs/PreparingLinux.md b/docs/PreparingLinux.md index 283b9ce40..96549604c 100644 --- a/docs/PreparingLinux.md +++ b/docs/PreparingLinux.md @@ -8,7 +8,7 @@ To boot Linux, a kernel image (`vmlinuz`) and a suitable initial RAM disk The Linux kernel for the primary VM can be built using the following command-line: -``` shell +```shell git clone https://github.com/torvalds/linux.git cd linux ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make defconfig @@ -24,7 +24,7 @@ From the Hafnium root directory, the following commands can be used to compile the kernel module, replacing `` with the path to the kernel checked out in the previous section: -``` shell +```shell cd hafnium/driver/linux/ ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- KERNEL_PATH= make ``` @@ -38,7 +38,7 @@ To make Linux useful, it needs a shell. These following instructions will construct a file system for the Linux RAM disk with the BusyBox shell as the init process. -``` shell +```shell git clone git://busybox.net/busybox.git cd busybox ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make defconfig @@ -48,7 +48,7 @@ ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make menuconfig At this point you should ensure that the option `Settings > Build static binary (no shared libs)` is selected. Then you can proceed with the following commands: -``` shell +```shell ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make -j24 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make install cd _install @@ -71,13 +71,13 @@ want in the RAM disk, for example, the kernel module built in the previous section. Assuming the BusyBox root directory is in the same parent directory as the Hafnium root directory: -``` shell +```shell cp ../../hafnium/driver/linux/hafnium.ko . ``` Then run the following commands: -``` shell +```shell find . | cpio -o -H newc | gzip > ../initrd.img cd .. ``` diff --git a/docs/Testing.md b/docs/Testing.md index 1b66e8307..b7b8c931a 100644 --- a/docs/Testing.md +++ b/docs/Testing.md @@ -1,37 +1,69 @@ # Testing -Testing of Hafnium is currently evolving. There are basic tests running on QEMU -but we want more and more kinds of tests e.g. unit tests. +## Overview + +Hafnium has 4 main kinds of tests: + +1. Host tests + * Unit tests of core functionality, e.g. page table manipulation. + * Source in `src/*_test.cc`. + * Using the [Google Test](https://github.com/google/googletest) framework, + built against 'fake' architecture (`src/arch/fake`). +1. Arch tests + * Architecture-specific unit tests, e.g. MMU setup. + * Source under `test/arch`. + * Using our own _hftest_ framework, with `standalone_main.c`. + * Build own hypervisor image, run in EL2. +1. VM API tests + * Exercise hypervisor API from both primary and secondary VMs. + * Source under `test/vmapi`. + * Tests are run from the primary VM under a normal build of the Hafnium + hypervisor, possibly communicating with a helper service in one or more + secondary VMs. + * Using our own _hftest_ framework, with `standalone_main.c` for the + primary VM and `hftest_service.c` for secondary VMs. + * Build own primary and secondary VMs, run in EL1 under actual Hafnium + image. +1. Linux tests + * Exercise the Hafnium Linux kernel module. + * Source under `test/linux`. + * Tests are run from userspace (PID 1) under Linux in the primary VM under + Hafnium, possibly with other secondary VMs. + * Using our own _hftest_ framework, with `linux_main.c`. + +Host tests run directly on the host machine where they are built, whereas the +other 3 types can run under an emulator such as QEMU, or on real hardware. ## Presubmit -Presubmit builds everything, runs all tests and checks the source for -formatting and lint errors. This can be run locally with: +Presubmit builds everything, runs all tests and checks the source for formatting +and lint errors. This can be run locally with: -``` shell +```shell ./kokoro/ubuntu/build.sh ``` Or to just run the tests after having built everything manually run: -``` shell +```shell ./kokoro/ubuntu/test.sh ``` ## QEMU tests These tests boot Hafnium on QEMU and the VMs make calls to Hafnium to test its -behaviour. +behaviour. They can also be run on the Arm [FVP](FVP.md) and in some cases on +real hardware. -### `hftest` +### hftest -Having a framework for tests makes them easier to read and write. `hftest` is a +Having a framework for tests makes them easier to read and write. _hftest_ is a framework to meet the needs of VM based tests for Hafnium. It consists of: - * assertions - * test declarations - * base VM image - * driver script +* assertions +* test declarations +* base VM image +* driver script Assertions should be familiar from other testing libraries. They make use of C11's `_Generic` expressions for type genericity. diff --git a/docs/VmInterface.md b/docs/VmInterface.md new file mode 100644 index 000000000..24644931a --- /dev/null +++ b/docs/VmInterface.md @@ -0,0 +1,167 @@ +# VM interface + +This page provides an overview of the interface Hafnium provides to VMs. Hafnium +makes a distinction between the 'primary VM', which controls scheduling and has +more direct access to some hardware, and 'secondary VMs' which exist mostly to +provide services to the primary VM, and have a more paravirtualised interface. +The intention is that the primary VM can run a mostly unmodified operating +system (such as Linux) with the addition of a Hafnium driver, while secondary +VMs will run more specialised trusted OSes or bare-metal code which is designed +with Hafnium in mind. + +The interface documented here is what is planned for the first release of +Hafnium, not necessarily what is currently implemented. + +## CPU scheduling + +The primary VM will have one vCPU for each physical CPU, and control the +scheduling. + +Secondary VMs will have a configurable number of vCPUs, scheduled on arbitrary +physical CPUs at the whims of the primary VM scheduler. + +All VMs will start with a single active vCPU. Subsequent vCPUs can be started +through PSCI. + +## PSCI + +The primary VM will be able to control the physical CPUs through the following +PSCI 1.1 calls, which will be forwarded to the underlying implementation in EL3: + +* PSCI_VERSION +* PSCI_FEATURES +* PSCI_SYSTEM_OFF +* PSCI_SYSTEM_RESET +* PSCI_AFFINITY_INFO +* PSCI_CPU_SUSPEND +* PSCI_CPU_OFF +* PSCI_CPU_ON + +All other PSCI calls are unsupported. + +Secondary VMs will be able to control their vCPUs through the following PSCI 1.1 +calls, which will be implemented by Hafnium: + +* PSCI_VERSION +* PSCI_FEATURES +* PSCI_AFFINITY_INFO +* PSCI_CPU_SUSPEND +* PSCI_CPU_OFF +* PSCI_CPU_ON + +All other PSCI calls are unsupported. + +## Hardware timers + +The primary VM will have access to both the physical and virtual EL1 timers +through the usual control registers (`CNT[PV]_TVAL_EL0` and `CNT[PV]_CTL_EL0`). + +Secondary VMs will have access to the virtual timer only, which will be emulated +with help from the kernel driver in the primary VM. + +## Interrupts + +The primary VM will have direct access to control the physical GIC, and receive +all interrupts (other than anything already trapped by TrustZone). It will be +responsible for forwarding any necessary interrupts to secondary VMs. The +Interrupt Translation Service (ITS) will be disabled by Hafnium so that it +cannot be used to circumvent access controls. + +Secondary VMs will have access to a simple paravirtualized interrupt controller +through two hypercalls: one to enable or disable a given virtual interrupt ID, +and one to get and acknowledge the next pending interrupt. There is no concept +of interrupt priorities or a distinction between edge and level triggered +interrupts. Secondary VMs may also inject interrupts into their own vCPUs. + +## Performance counters + +VMs will be blocked from accessing performance counter registers (for the +performance monitor extensions described in chapter D5 of the ARMv8-A reference +manual) in production, to prevent them from being used as a side channel to leak +data between VMs. + +Hafnium may allow VMs to use them in debug builds. + +## Debug registers + +VMs will be blocked from accessing debug registers in production builds, to +prevent them from being used to circumvent access controls. + +Hafnium may allow VMs to use these registers in debug builds. + +## RAS Extension registers + +VMs will be blocked from using registers associated with the RAS Extension. + +## Asynchronous message passing + +VMs will be able to send messages of up to 4 KiB to each other asynchronously, +with no queueing, as specified by SPCI. + +## Memory + +VMs will statically be given access to mutually-exclusive regions of the +physical address space at boot. This includes MMIO space for controlling +devices, plus a fixed amount of RAM for secondaries, and all remaining address +space to the primary. Note that this means that only one VM can control any +given page of MMIO registers for a device. + +VMs may choose to donate or share their memory with other VMs at runtime. Any +given page may be shared with at most 2 VMs at once (including the original +owning VM). Memory which has been donated or shared may not be forcefully +reclaimed, but the VM with which it was shared may choose to return it. + +## Logging + +VMs may send a character to a shared log by means of a hypercall or SMC call. +These log messages will be buffered per VM to make complete lines, then output +to a Hafnium-owned UART and saved in a shared ring buffer which may be extracted +from RAM dumps. VM IDs will be prepended to these logs. + +This log API is intended for use in early bringup and low-level debugging. No +sensitive data should be logged through it. Higher level logs can be sent to the +primary VM through the asynchronous message passing mechanism described above, +or through shared memory. + +## Configuration + +Hafnium will read configuration from a flattened device tree blob (FDT). This +may either be the same device tree used for the other details of the system or a +separate minimal one just for Hafnium. This will include at least: + +* The available RAM. +* The number of secondary VMs, how many vCPUs each should have, how much + memory to assign to each of them, and where to load their initial images. + (Most likely the initial image will be a minimal loader supplied with + Hafnium which will validate and load the rest of the image from the primary + later on.) +* Which devices exist on the system, their details (MMIO regions, interrupts + and SYSMMU details), and which VM each is assigned to. + * A single physical device may be split into multiple logical ‘devices’ + from Hafnium’s point of view if necessary to have different VMs own + different parts of it. +* A whitelist of which SMC calls each VM is allowed to make. + +## Failure handling + +If a secondary VM tries to do something it shouldn't, Hafnium will either inject +a fault or kill it and inform the primary VM. The primary VM may choose to +restart the system or to continue without the secondary VM. + +If the primary VM tries to do something it shouldn't, Hafnium will either inject +a fault or restart the system. + +## TrustZone communication + +The primary VM will be able to communicate with a TEE running in TrustZone +either through SPCI messages or through whitelisted SMC calls, and through +shared memory. + +## Other SMC calls + +Other than the PSCI calls described above and those used to communicate with +Hafnium, all other SMC calls will be blocked by default. Hafnium will allow SMC +calls to be whitelisted on a per-VM, per-function ID basis, as part of the +static configuration described above. These whitelisted SMC calls will be +forwarded to the EL3 handler with the client ID (as described by the SMCCC) set +to the calling VM's ID. diff --git a/driver/BUILD.gn b/driver/BUILD.gn index 0fae976f2..2cb214a0f 100644 --- a/driver/BUILD.gn +++ b/driver/BUILD.gn @@ -16,11 +16,7 @@ import("//build/linux/linux.gni") linux_kernel_module("linux") { module_name = "hafnium" + module_dir = "./linux" kernel_target = "//third_party:linux" - kernel_src_dir = "//third_party/linux" - sources = [ - "linux/Makefile", - "linux/hf_call.S", - "linux/main.c", - ] + kernel_dir = "//third_party/linux" } diff --git a/driver/linux b/driver/linux index 4c96d0c02..474c4396e 160000 --- a/driver/linux +++ b/driver/linux @@ -1 +1 @@ -Subproject commit 4c96d0c02815e07a54a52e46db472cfa5af57941 +Subproject commit 474c4396e72c33692b0cd40eeb0f3ac7c76fe0f7 diff --git a/hfo2/Cargo.lock b/hfo2/Cargo.lock index 21562cf98..31f916c0f 100644 --- a/hfo2/Cargo.lock +++ b/hfo2/Cargo.lock @@ -2,32 +2,32 @@ # It is not intended for manual editing. [[package]] name = "arrayvec" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "bitflags" -version = "1.0.4" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "hfo2" version = "0.1.0" dependencies = [ - "arrayvec 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", - "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", "reduce 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "static_assertions 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "static_assertions 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "memoffset" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -35,7 +35,7 @@ dependencies = [ [[package]] name = "nodrop" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -71,17 +71,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "static_assertions" -version = "0.3.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" [metadata] -"checksum arrayvec 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "92c7fb76bc8826a8b33b4ee5bb07a247a81e76764ab4d55e8f73e3a4d8808c71" -"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12" -"checksum memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ce6075db033bbbb7ee5a0bbd3a3186bbae616f57fb001c485c7ff77955f8177f" -"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945" +"checksum arrayvec 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" +"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" +"checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9" +"checksum nodrop 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" "checksum reduce 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "75b1fa5668b02f2a69746bba558f8f98cc087b123a587fd959122872ad9a3f3c" "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" "checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d" "checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -"checksum static_assertions 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ec3c2cbd19f2033005f463529553273cdcb3e91b3281346007adc7967b6789af" +"checksum static_assertions 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" diff --git a/hfo2/Cargo.toml b/hfo2/Cargo.toml index 9ceab54c0..ddc7a02c1 100644 --- a/hfo2/Cargo.toml +++ b/hfo2/Cargo.toml @@ -20,7 +20,7 @@ lto = true [dependencies] bitflags = "1.0" -static_assertions = "0.3.2" +static_assertions = "1.1.0" reduce = "0.1" arrayvec = { version = "0.4", default-features = false } memoffset = "0.5.1" diff --git a/hfo2/src/api.rs b/hfo2/src/api.rs index 1162a4f50..add04af6f 100644 --- a/hfo2/src/api.rs +++ b/hfo2/src/api.rs @@ -35,7 +35,7 @@ use crate::types::*; // Currently, a page is mapped for the send and receive buffers so the maximum request is the size // of a page. -const_assert_eq!(hf_mailbox_size; HF_MAILBOX_SIZE, PAGE_SIZE); +const_assert_eq!(HF_MAILBOX_SIZE, PAGE_SIZE); /// Returns to the primary vm and signals that the vcpu still has work to do so. #[no_mangle] diff --git a/hfo2/src/arch/aarch64.rs b/hfo2/src/arch/aarch64.rs index f43b15b4e..fc47ff318 100644 --- a/hfo2/src/arch/aarch64.rs +++ b/hfo2/src/arch/aarch64.rs @@ -50,7 +50,7 @@ struct float_reg { high: u64, } -const_assert_eq!(float_reg_size; mem::size_of::(), FLOAT_REG_BYTES); +const_assert_eq!(mem::size_of::(), FLOAT_REG_BYTES); /// Arch-specific information about a VM. #[repr(C)] @@ -93,7 +93,7 @@ const CPU_ID: usize = 0; const CPU_STACK_BOTTOM: usize = 8; const VCPU_REGS: usize = 32; const REGS_LAZY: usize = 264; -const REGS_FREGS: usize = REGS_LAZY + 232; +const REGS_FREGS: usize = REGS_LAZY + 248; //#[cfg(any(feature = "GIC_VERSION=3", feature = "GIC_VERSION=4"))] const REGS_GIC: usize = REGS_FREGS + 528; @@ -149,6 +149,7 @@ pub struct ArchSysRegs { cptr_el2: uintreg_t, cnthctl_el2: uintreg_t, vttbr_el2: uintreg_t, + mdcr_el2: uintreg_t, } #[repr(C)] diff --git a/hfo2/src/boot_flow.rs b/hfo2/src/boot_flow.rs new file mode 100644 index 000000000..6c153d24d --- /dev/null +++ b/hfo2/src/boot_flow.rs @@ -0,0 +1,95 @@ +/* + * Copyright 2019 Sanguk Park + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::addr::*; +use crate::arch::*; +use crate::boot_params::*; +use crate::fdt::*; +use crate::fdt_handler::*; +use crate::manifest::*; +use crate::mm::*; +use crate::mpool::*; + +// from "inc/hf/plat/boot_flow.h" +extern "C" { + fn plat_get_fdt_addr() -> paddr_t; + fn plat_get_kernel_arg() -> uintreg_t; + fn plat_get_initrd_range( + fdt_root: *const fdt_node, + begin: *mut paddr_t, + end: *mut paddr_t, + ) -> bool; +} + +pub mod plat { + use super::*; + + pub fn get_fdt_addr() -> paddr_t { + unsafe { plat_get_fdt_addr() } + } + + pub fn get_kernel_arg() -> uintreg_t { + unsafe { plat_get_kernel_arg() } + } + + pub fn get_initrd_range<'a>(fdt_root: &FdtNode<'a>) -> Result<(paddr_t, paddr_t), ()> { + let fdt_root = fdt_root.clone().into(); + let (mut begin, mut end) = (pa_init(0), pa_init(0)); + + if unsafe { plat_get_initrd_range(&fdt_root, &mut begin, &mut end) } { + Ok((begin, end)) + } else { + Err(()) + } + } +} + +/// Parse information from FDT needed to initialize Hafnium. +/// FDT is mapped at the beginning and unmapped before exiting the function. +pub fn boot_flow_init( + ptable: &mut PageTable, + manifest: &mut Manifest, + boot_params: &mut BootParams, + ppool: &MPool, +) -> Result<(), ()> { + // Get the memory map from the FDT. + let mut fdt_root = unsafe { map(ptable, plat::get_fdt_addr(), ppool) }.ok_or_else(|| { + dlog!("Unable to map FDT.\n"); + })?; + + let ret = try { + fdt_root.find_child("\0".as_ptr()).ok_or_else(|| { + dlog!("Unable to find FDT root node.\n"); + })?; + + manifest.init(&fdt_root).map_err(|e| { + dlog!( + "Could not parse manifest: {}.\n", + >::into(e) + ); + })?; + + boot_params.init(&fdt_root).map_err(|_| { + dlog!("Could not parse boot params.\n"); + })?; + }; + + unsafe { unmap(ptable, pa_addr(plat::get_fdt_addr()) as _, ppool) }.map_err(|_| { + dlog!("Unable to unmap FDT.\n"); + })?; + + ret +} diff --git a/hfo2/src/boot_params.rs b/hfo2/src/boot_params.rs index e7731f99e..0f0be6115 100644 --- a/hfo2/src/boot_params.rs +++ b/hfo2/src/boot_params.rs @@ -14,13 +14,13 @@ * limitations under the License. */ -use core::mem::MaybeUninit; - use crate::addr::*; use crate::arch::*; +use crate::boot_flow::*; +use crate::fdt::*; +use crate::fdt_handler::*; use crate::mm::*; use crate::mpool::*; -use crate::spinlock::*; use crate::types::*; pub const MAX_MEM_RANGES: usize = 20; @@ -68,46 +68,33 @@ impl BootParamsUpdate { } } -/// TODO(HfO2): `plat.c`, containing those functions are not ported into Rust. -/// It's because functions in `plat.c` are denoted by `#pragma weak` which is -/// not supported in Rust yet. (#47.) -extern "C" { - fn plat_get_boot_params( - stage1_locked: mm_stage1_locked, - p: *mut BootParams, - ppool: *const MPool, - ) -> bool; +impl BootParams { + /// Extract the boot parameters from the FDT and the boot-flow driver. + pub fn init<'a>(&mut self, fdt_root: &FdtNode<'a>) -> Result<(), ()> { + self.mem_ranges_count = 0; + self.kernel_arg = plat::get_kernel_arg(); - fn plat_update_boot_params( - stage1_locked: mm_stage1_locked, - p: *mut BootParamsUpdate, - ppool: *const MPool, - ) -> bool; -} + let (begin, end) = plat::get_initrd_range(fdt_root)?; + self.initrd_begin = begin; + self.initrd_end = end; -/// Reads platform-specific boot parameters. -pub fn boot_params_get( - ptable: &mut SpinLockGuard>, - ppool: &MPool, -) -> Option { - let mut p: MaybeUninit = MaybeUninit::uninit(); + self.cpu_count = fdt_root.find_cpus(&mut self.cpu_ids).ok_or(())?; + fdt_root.find_memory_ranges(self).ok_or(())?; - if unsafe { plat_get_boot_params(mm_stage1_locked::from_ref(ptable), p.get_mut(), ppool) } { - Some(unsafe { p.assume_init() }) - } else { - None + Ok(()) } } -/// Updates boot parameters for primary VM to read. -pub fn boot_params_update( - ptable: &mut SpinLockGuard>, - p: &mut BootParamsUpdate, - ppool: &MPool, +/// Updates the FDT before being passed to the primary VM's kernel. +/// +/// TODO: in future, each VM will declare whether it expects an argument passed and that will be +/// static data e.g. it will provide its own FDT so there will be no FDT modification. This is +/// done because each VM has a very different view of the system and we don't want to force VMs +/// to require loader code when another loader can load the data for it. +pub fn boot_params_patch_fdt( + ptable: &mut PageTable, + p: &BootParamsUpdate, + mpool: &MPool, ) -> Result<(), ()> { - if unsafe { plat_update_boot_params(mm_stage1_locked::from_ref(ptable), p, ppool) } { - Ok(()) - } else { - Err(()) - } + unsafe { patch(ptable, plat::get_fdt_addr(), p, mpool) } } diff --git a/hfo2/src/cpio.rs b/hfo2/src/cpio.rs index f8ce85f6a..adb9e35e9 100644 --- a/hfo2/src/cpio.rs +++ b/hfo2/src/cpio.rs @@ -73,8 +73,10 @@ pub fn parse_cpio(it: &mut MemIter) -> Option { /// Looks for a file in the given cpio archive. The filename is not null-terminated, so we use a /// memory iterator to represent it. The file, if found, is returned in the `it` argument. -pub fn find_file_memiter(cpio: &mut MemIter, filename: &MemIter) -> Option { - while let Some(result) = parse_cpio(cpio) { +pub fn find_file_memiter(cpio: &MemIter, filename: &MemIter) -> Option { + let mut iter = cpio.clone(); + + while let Some(result) = parse_cpio(&mut iter) { if unsafe { filename.iseq(result.name) } { return Some(unsafe { MemIter::from_raw(result.contents, result.size) }); } @@ -85,8 +87,10 @@ pub fn find_file_memiter(cpio: &mut MemIter, filename: &MemIter) -> Option Option { - while let Some(result) = parse_cpio(cpio) { +pub unsafe fn find_file(cpio: &MemIter, filename: *const u8) -> Option { + let mut iter = cpio.clone(); + + while let Some(result) = parse_cpio(&mut iter) { if strcmp(filename, result.name) == 0 { return Some(MemIter::from_raw(result.contents, result.size)); } diff --git a/hfo2/src/cpu.rs b/hfo2/src/cpu.rs index 2c90cf439..8ca7d2cae 100644 --- a/hfo2/src/cpu.rs +++ b/hfo2/src/cpu.rs @@ -14,7 +14,7 @@ * limitations under the License. */ -use core::mem::{self, ManuallyDrop}; +use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::ops::Deref; use core::ptr; @@ -173,8 +173,12 @@ impl Interrupts { Ok(()) } + /// Checks whether the vCPU's attempt to block for a message has already been interrupted or + /// whether it is allowed to block. #[inline] pub fn is_interrupted(&self) -> bool { + // Don't block if there are enabled and pending interrupts, to match behaviour of + // wait_for_interrupt. self.enabled_and_pending_count > 0 } @@ -382,6 +386,21 @@ impl Cpu { } } +pub unsafe fn cpu_get_buffer(cpu_id: cpu_id_t) -> &'static mut RawPage { + /// Internal buffer used to store SPCI messages from a VM Tx. Its usage prevents TOCTOU issues + /// while Hafnium performs actions on information that would otherwise be re-writable by the VM. + /// + /// Each buffer is owned by a single cpu. The buffer can only be used for `spci_msg_send`. The + /// information stored in the buffer is only valid during the `spci_msg_send` request is + /// performed. + /// + /// TODO(HfO2): Can we safely model this like `std::thread_local`? + static mut MESSAGE_BUFFER: MaybeUninit<[RawPage; MAX_CPUS]> = MaybeUninit::uninit(); + assert!(cpu_id < MAX_CPUS as _); + + &mut MESSAGE_BUFFER.get_mut()[cpu_id as usize] +} + pub struct CpuManager { /// State of all supported CPUs. cpus: ArrayVec<[Cpu; MAX_CPUS]>, @@ -536,6 +555,11 @@ pub unsafe extern "C" fn vcpu_get_interrupts(vcpu: *const VCpu) -> *mut Interrup (*vcpu).interrupts.get_mut_unchecked() } +#[no_mangle] +pub unsafe extern "C" fn vcpu_is_interrupted(vcpu: *const VCpu) -> bool { + (*vcpu).interrupts.lock().is_interrupted() +} + /// Check whether the given vcpu_inner is an off state, for the purpose of /// turning vCPUs on and off. Note that aborted still counts as on in this /// context. diff --git a/hfo2/src/dlog.rs b/hfo2/src/dlog.rs index 357ec3be4..4c07034af 100644 --- a/hfo2/src/dlog.rs +++ b/hfo2/src/dlog.rs @@ -35,7 +35,7 @@ impl fmt::Write for Writer { fn write_str(&mut self, s: &str) -> fmt::Result { for byte in s.bytes() { unsafe { - plat_console_putchar(byte); + dlog_putchar(byte); } } Ok(()) @@ -76,7 +76,19 @@ pub unsafe extern "C" fn dlog_unlock() { } } +const DLOG_BUFFER_SIZE: usize = 8192; + +// These global variables for the log buffer are public because a test needs to access them +// directly. +#[no_mangle] +pub static mut dlog_buffer_offset: usize = 0; + +#[no_mangle] +pub static mut dlog_buffer: [u8; DLOG_BUFFER_SIZE] = [0; DLOG_BUFFER_SIZE]; + #[no_mangle] pub unsafe extern "C" fn dlog_putchar(c: u8) { + dlog_buffer[dlog_buffer_offset] = c; + dlog_buffer_offset = (dlog_buffer_offset + 1) % DLOG_BUFFER_SIZE; plat_console_putchar(c); } diff --git a/hfo2/src/fdt.rs b/hfo2/src/fdt.rs index c1d2cd020..a1dd58d22 100644 --- a/hfo2/src/fdt.rs +++ b/hfo2/src/fdt.rs @@ -20,6 +20,8 @@ use core::ptr; use core::slice; use core::str; +use crate::std::*; + use scopeguard::guard; /// TODO(HfO2): port this function into `std.rs` (#48.) @@ -124,6 +126,37 @@ const FDT_VERSION: u32 = 17; const FDT_MAGIC: u32 = 0xd00d_feed; const FDT_TOKEN_ALIGNMENT: usize = mem::size_of::(); +/// Helper method for parsing 32/64-bit units from FDT data. +pub fn fdt_parse_number(data: &[u8]) -> Option { + #[repr(C, align(8))] + struct T { + a: [u8; 8], + } + + // FDT values should be aligned to 32-bit boundary. + assert!(is_aligned(data.as_ptr() as _, FDT_TOKEN_ALIGNMENT)); + + let ret = match data.len() { + 4 => { + // Assert that `data` is already sufficiently aligned to dereference as u32. + const_assert!(mem::align_of::() <= FDT_TOKEN_ALIGNMENT); + unsafe { u32::from_be(*(data.as_ptr() as *const u32)) as u64 } + } + 8 => { + // ARMv8 requires `data` to be realigned to 64-bit boundary to dereferences as u64. + // May not be needed on other architectures. + let mut t = T { + a: Default::default(), + }; + t.a.copy_from_slice(data); + u64::from_be(unsafe { mem::transmute(t) }) + } + _ => return None, + }; + + Some(ret) +} + impl<'a> FdtTokenizer<'a> { fn new(cur: &'a [u8], strs: &'a [u8]) -> Self { Self { cur, strs } @@ -149,34 +182,11 @@ impl<'a> FdtTokenizer<'a> { Some(first) } - fn bytes_filter(&mut self, size: usize, pred: F) -> Option<&'a [u8]> - where - F: FnOnce(&'a [u8]) -> bool, - { - if self.cur.len() < size { - return None; - } - - let (first, rest) = self.cur.split_at(size); - if !pred(first) { - return None; - } - self.cur = rest; - self.align(); - - Some(first) - } - fn u32(&mut self) -> Option { let bytes = self.bytes(mem::size_of::())?; Some(u32::from_be_bytes(bytes.try_into().unwrap())) } - fn u32_expect(&mut self, expect: u32) -> Option { - let bytes = self.bytes_filter(mem::size_of::(), |b| b == expect.to_be_bytes())?; - Some(u32::from_be_bytes(bytes.try_into().unwrap())) - } - fn token(&mut self) -> Option { while let Some(v) = self.u32() { let token = v.try_into().unwrap(); @@ -189,14 +199,23 @@ impl<'a> FdtTokenizer<'a> { } fn token_expect(&mut self, expect: FdtToken) -> Option { - while let Some(v) = self.u32_expect(expect as u32) { - let token = v.try_into().unwrap(); - if token != FdtToken::Nop { - return Some(token); + let token = self.token()?; + + if token != expect { + unsafe { + self.rewind(); } + return None; } - None + Some(token) + } + + unsafe fn rewind(&mut self) { + self.cur = slice::from_raw_parts( + self.cur.as_ptr().sub(FDT_TOKEN_ALIGNMENT), + self.cur.len() + FDT_TOKEN_ALIGNMENT, + ); } fn str(&mut self) -> Option<&'a [u8]> { @@ -352,7 +371,7 @@ impl<'a> FdtNode<'a> { impl FdtHeader { pub fn dump(&self) { unsafe fn asciz_to_utf8(ptr: *const u8) -> &'static str { - let len = (0..).find(|i| *ptr.add(*i) != 0).unwrap(); + let len = (0..).find(|i| *ptr.add(*i) == 0).unwrap(); let bytes = slice::from_raw_parts(ptr, len); str::from_utf8_unchecked(bytes) } @@ -383,7 +402,7 @@ impl FdtHeader { } } - if t.token().filter(|t| *t != FdtToken::EndNode).is_none() { + if t.token().filter(|t| *t == FdtToken::EndNode).is_none() { return; } @@ -405,7 +424,7 @@ impl FdtHeader { unsafe { while (*entry).address != 0 || (*entry).size != 0 { dlog!( - "Entry: {:p} (0x{:x} bytes)\n", + "Entry: {:p} ({:#x} bytes)\n", u64::from_be((*entry).address) as *const u8, u64::from_be((*entry).size) ); diff --git a/hfo2/src/fdt_handler.rs b/hfo2/src/fdt_handler.rs index 18bcf7db9..7872dfdb2 100644 --- a/hfo2/src/fdt_handler.rs +++ b/hfo2/src/fdt_handler.rs @@ -14,7 +14,6 @@ * limitations under the License. */ -use core::convert::TryInto; use core::mem; use core::ptr; use core::slice; @@ -31,21 +30,11 @@ use crate::types::*; use scopeguard::{guard, ScopeGuard}; -fn convert_number(data: &[u8]) -> Option { - let ret = match data.len() { - 4 => u64::from(u32::from_be_bytes(data.try_into().unwrap())), - 8 => u64::from_be_bytes(data.try_into().unwrap()), - _ => return None, - }; - - Some(ret) -} - impl<'a> FdtNode<'a> { fn read_number(&self, name: *const u8) -> Result { let data = self.read_property(name)?; - convert_number(data).ok_or(()) + fdt_parse_number(data).ok_or(()) } unsafe fn write_number(&mut self, name: *const u8, value: u64) -> Result<(), ()> { @@ -67,18 +56,19 @@ impl<'a> FdtNode<'a> { /// Finds the memory region where initrd is stored, and updates the fdt node /// cursor to the node called "chosen". - pub fn find_initrd(&mut self) -> Option<(paddr_t, paddr_t)> { - if self.find_child("chosen\0".as_ptr()).is_none() { + pub fn find_initrd(&self) -> Option<(paddr_t, paddr_t)> { + let mut node = self.clone(); + if node.find_child("chosen\0".as_ptr()).is_none() { dlog!("Unable to find 'chosen'\n"); return None; } - let initrd_begin = ok_or!(self.read_number("linux,initrd-start\0".as_ptr()), { + let initrd_begin = ok_or!(node.read_number("linux,initrd-start\0".as_ptr()), { dlog!("Unable to read linux,initrd-start\n"); return None; }); - let initrd_end = ok_or!(self.read_number("linux,initrd-end\0".as_ptr()), { + let initrd_end = ok_or!(node.read_number("linux,initrd-end\0".as_ptr()), { dlog!("Unable to read linux,initrd-end\n"); return None; }); @@ -135,7 +125,10 @@ impl<'a> FdtNode<'a> { return None; } - cpu_ids[cpu_count] = convert_number(&data[..address_size]).unwrap() as cpu_id_t; + cpu_ids[cpu_count] = some_or!(fdt_parse_number(&data[..address_size]), { + dlog!("Could not parse CPU id\n"); + return None; + }) as cpu_id_t; cpu_count += 1; data = &data[address_size..]; @@ -193,8 +186,8 @@ impl<'a> FdtNode<'a> { // Traverse all memory ranges within this node. while data.len() >= entry_size { - let addr = convert_number(&data[..address_size]).unwrap() as usize; - let len = convert_number(&data[address_size..entry_size]).unwrap() as usize; + let addr = fdt_parse_number(&data[..address_size]).unwrap() as usize; + let len = fdt_parse_number(&data[address_size..entry_size]).unwrap() as usize; if mem_range_index < MAX_MEM_RANGES { p.mem_ranges[mem_range_index].begin = pa_init(addr); @@ -271,12 +264,16 @@ pub unsafe fn map( pub unsafe fn unmap( stage1_ptable: &mut PageTable, - fdt: &FdtHeader, + fdt: *const FdtHeader, ppool: &MPool, ) -> Result<(), ()> { - let fdt_addr = pa_init(fdt as *const _ as usize); + let fdt_addr = pa_init(fdt as usize); - stage1_ptable.unmap(fdt_addr, pa_add(fdt_addr, fdt.total_size() as usize), ppool) + stage1_ptable.unmap( + fdt_addr, + pa_add(fdt_addr, (*fdt).total_size() as usize), + ppool, + ) } pub unsafe fn patch( @@ -434,7 +431,7 @@ pub unsafe extern "C" fn fdt_unmap( fdt: *const FdtHeader, ppool: *const MPool, ) -> bool { - unmap(&mut stage1_locked, &*fdt, &*ppool).is_ok() + unmap(&mut stage1_locked, fdt, &*ppool).is_ok() } #[no_mangle] @@ -457,15 +454,14 @@ pub unsafe extern "C" fn fdt_find_memory_ranges(root: *const fdt_node, p: *mut B #[no_mangle] pub unsafe extern "C" fn fdt_find_initrd( - n: *mut fdt_node, + n: *const fdt_node, begin: *mut paddr_t, end: *mut paddr_t, ) -> bool { - let mut node = FdtNode::from((*n).clone()); + let node = FdtNode::from((*n).clone()); let (b, e) = some_or!(node.find_initrd(), return false); ptr::write(begin, b); ptr::write(end, e); - ptr::write(n, node.into()); true } diff --git a/hfo2/src/hypervisor.rs b/hfo2/src/hypervisor.rs index b10f87c74..aac2b0d36 100644 --- a/hfo2/src/hypervisor.rs +++ b/hfo2/src/hypervisor.rs @@ -14,7 +14,7 @@ * limitations under the License. */ -use core::mem::{self, MaybeUninit}; +use core::mem; use core::ops::Deref; use core::ptr; use core::sync::atomic::Ordering; @@ -507,10 +507,7 @@ impl Hypervisor { } else { // Buffer holding the internal copy of the shared memory regions. // TODO: Buffer is temporarily in the stack. - let mut message_buffer: [u8; mem::size_of::() - + mem::size_of::() - + mem::size_of::()] = - unsafe { MaybeUninit::uninit().assume_init() }; + let message_buffer = &mut unsafe { cpu_get_buffer((*current.get_inner().cpu).id) }; let architected_header = from_msg.get_architected_message_header(); @@ -775,7 +772,7 @@ impl Hypervisor { unsafe { ptr::write_bytes(region as *mut u8, 0, size); - arch_mm_write_back_dcache(region as usize, size); + arch_mm_flush_dcache(region as usize, size); } hypervisor_ptable.unmap(begin, end, ppool).unwrap(); diff --git a/hfo2/src/init.rs b/hfo2/src/init.rs index e089d647e..d05db1593 100644 --- a/hfo2/src/init.rs +++ b/hfo2/src/init.rs @@ -19,10 +19,12 @@ use core::ptr; use crate::addr::*; use crate::arch::*; +use crate::boot_flow::*; use crate::boot_params::*; use crate::cpu::*; use crate::hypervisor::*; use crate::load::*; +use crate::manifest::*; use crate::memiter::*; use crate::mm::*; use crate::mpool::*; @@ -96,13 +98,25 @@ unsafe extern "C" fn one_time_init(c: *const Cpu) -> *const Cpu { let mm = MemoryManager::new(&ppool).expect("mm_init failed"); + mm.cpu_init(); + // Enable locks now that mm is initialised. dlog_enable_lock(); mpool_enable_locks(); + /// Note(HfO2): This variable was originally local, but now is static to prevent stack overflow. + static mut MANIFEST: MaybeUninit = MaybeUninit::uninit(); + let mut manifest = MANIFEST.get_mut(); + let mut params: BootParams = MaybeUninit::uninit().assume_init(); + // TODO(HfO2): doesn't need to lock, actually - let params = boot_params_get(&mut mm.hypervisor_ptable.lock(), &ppool) - .expect("unable to retrieve boot params"); + boot_flow_init( + &mut mm.hypervisor_ptable.lock(), + &mut manifest, + &mut params, + &ppool, + ) + .expect("Could not parse data from FDT."); let cpum = CpuManager::new( ¶ms.cpu_ids[..params.cpu_count], @@ -118,14 +132,14 @@ unsafe extern "C" fn one_time_init(c: *const Cpu) -> *const Cpu { for i in 0..params.mem_ranges_count { dlog!( - "Memory range: 0x{:x} - 0x{:x}\n", + "Memory range: {:#x} - {:#x}\n", pa_addr(params.mem_ranges[i].begin), pa_addr(params.mem_ranges[i].end) - 1 ); } dlog!( - "Ramdisk range: 0x{:x} - 0x{:x}\n", + "Ramdisk range: {:#x} - {:#x}\n", pa_addr(params.initrd_begin), pa_addr(params.initrd_end) - 1 ); @@ -169,6 +183,7 @@ unsafe extern "C" fn one_time_init(c: *const Cpu) -> *const Cpu { load_secondary( &mut HYPERVISOR.get_mut().vm_manager, &mut hypervisor_ptable, + &mut manifest, &cpio, ¶ms, &mut update, @@ -177,7 +192,7 @@ unsafe extern "C" fn one_time_init(c: *const Cpu) -> *const Cpu { .expect("unable to load secondary VMs"); // Prepare to run by updating bootparams as seen by primary VM. - boot_params_update(&mut hypervisor_ptable, &mut update, &hypervisor().mpool) + boot_params_patch_fdt(&mut hypervisor_ptable, &mut update, &hypervisor().mpool) .expect("plat_update_boot_params failed"); hypervisor_ptable.defrag(&hypervisor().mpool); @@ -203,8 +218,9 @@ pub fn hypervisor() -> &'static Hypervisor { // all state and return the first vCPU to run. #[no_mangle] pub unsafe extern "C" fn cpu_main(c: *const Cpu) -> *const VCpu { - let raw_ptable = hypervisor().memory_manager.get_raw_ptable(); - MemoryManager::cpu_init(raw_ptable).expect("mm_cpu_init failed"); + if hypervisor().cpu_manager.index_of(c) != 0 { + hypervisor().memory_manager.cpu_init(); + } let primary = hypervisor().vm_manager.get_primary(); let vcpu = &primary.vcpus[hypervisor().cpu_manager.index_of(c)]; diff --git a/hfo2/src/lib.rs b/hfo2/src/lib.rs index 658c0d800..ca858b3b6 100644 --- a/hfo2/src/lib.rs +++ b/hfo2/src/lib.rs @@ -21,8 +21,11 @@ #![feature(maybe_uninit_ref)] #![feature(ptr_offset_from)] #![feature(const_raw_ptr_to_usize_cast)] -#![feature(bind_by_move_pattern_guards)] #![feature(ptr_wrapping_offset_from)] +#![feature(slice_from_raw_parts)] +#![feature(linkage)] +#![feature(track_caller)] +#![feature(try_blocks)] #[macro_use] extern crate bitflags; @@ -45,6 +48,7 @@ mod abi; mod addr; mod api; mod arch; +mod boot_flow; mod boot_params; mod cpu; mod fdt; @@ -53,6 +57,7 @@ mod hypervisor; mod init; mod layout; mod load; +mod manifest; mod memiter; mod mm; mod mpool; diff --git a/hfo2/src/load.rs b/hfo2/src/load.rs index e8720bb0b..46ed9d89f 100644 --- a/hfo2/src/load.rs +++ b/hfo2/src/load.rs @@ -24,6 +24,7 @@ use crate::boot_params::*; use crate::cpio::*; use crate::cpu::*; use crate::layout::*; +use crate::manifest::*; use crate::memiter::*; use crate::mm::*; use crate::mpool::*; @@ -43,10 +44,11 @@ use arrayvec::ArrayVec; unsafe fn copy_to_unmapped( hypervisor_ptable: &mut PageTable, to: paddr_t, - from: *const c_void, - size: usize, + from_it: &MemIter, ppool: &MPool, ) -> bool { + let from = from_it.get_next(); + let size = from_it.len(); let to_end = pa_add(to, size); if hypervisor_ptable @@ -57,7 +59,7 @@ unsafe fn copy_to_unmapped( } ptr::copy_nonoverlapping(from, pa_addr(to) as *mut _, size); - arch_mm_write_back_dcache(pa_addr(to), size); + arch_mm_flush_dcache(pa_addr(to), size); hypervisor_ptable.unmap(to, to_end, ppool).unwrap(); @@ -74,7 +76,7 @@ pub unsafe fn load_primary( ) -> Result { let primary_begin = layout_primary_begin(); - let it = some_or!(find_file(&mut cpio.clone(), "vmlinuz\0".as_ptr()), { + let it = some_or!(find_file(cpio, "vmlinuz\0".as_ptr()), { dlog!("Unable to find vmlinuz\n"); return Err(()); }); @@ -84,18 +86,12 @@ pub unsafe fn load_primary( pa_addr(primary_begin) as *const u8 ); - if !copy_to_unmapped( - hypervisor_ptable, - primary_begin, - it.get_next() as usize as *mut _, - it.len(), - ppool, - ) { + if !copy_to_unmapped(hypervisor_ptable, primary_begin, &it, ppool) { dlog!("Unable to relocate kernel for primary vm.\n"); return Err(()); } - let initrd = some_or!(find_file(&mut cpio.clone(), "initrd.img\0".as_ptr()), { + let initrd = some_or!(find_file(cpio, "initrd.img\0".as_ptr()), { dlog!("Unable to find initrd.img\n"); return Err(()); }); @@ -184,6 +180,17 @@ fn update_reserved_ranges( return Err(()); } + update.reserved_ranges[update.reserved_ranges_count].begin = before.begin; + update.reserved_ranges[update.reserved_ranges_count].end = after.begin; + update.reserved_ranges_count += 1; + } + + if pa_addr(after.end) < pa_addr(before.end) { + if update.reserved_ranges_count >= MAX_MEM_RANGES { + dlog!("Too many reserved ranges after loading secondary VMs.\n"); + return Err(()); + } + update.reserved_ranges[update.reserved_ranges_count].begin = after.end; update.reserved_ranges[update.reserved_ranges_count].end = before.end; update.reserved_ranges_count += 1; @@ -198,6 +205,7 @@ fn update_reserved_ranges( pub unsafe fn load_secondary( vm_manager: &mut VmManager, hypervisor_ptable: &mut PageTable, + manifest: &mut Manifest, cpio: &MemIter, params: &BootParams, update: &mut BootParamsUpdate, @@ -213,54 +221,49 @@ pub unsafe fn load_secondary( mem_ranges_available.clone_from_slice(¶ms.mem_ranges); mem_ranges_available.truncate(params.mem_ranges_count); - let mut it = some_or!(find_file(&mut cpio.clone(), "vms.txt\0".as_ptr()), { - dlog!("vms.txt is missing\n"); - return Ok(()); - }); - // Round the last addresses down to the page size. for mem_range in mem_ranges_available.iter_mut() { mem_range.end = pa_init(round_down(pa_addr(mem_range.end), PAGE_SIZE)); } - loop { - // Note(HfO2): There is `while let (Some(x), Some(y)) = (...) {}` but it - // is not short-circuiting. - let mut mem = some_or!(it.parse_uint(), break); - let cpu = some_or!(it.parse_uint(), break); - let name = some_or!(it.parse_str(), break); - let name_str = str::from_utf8_unchecked(name.as_slice()); + for (i, manifest_vm) in manifest.vms.iter_mut().enumerate() { + let vm_id = HF_VM_ID_OFFSET + i as spci_vm_id_t; + if vm_id == HF_PRIMARY_VM_ID { + continue; + } + + dlog!( + "Loading VM{}: {}.\n", + vm_id, + str::from_utf8(as_asciz(&manifest_vm.debug_name)).unwrap(), + ); - dlog!("Loading {}\n", name_str); + let kernel_filename = MemIter::from_raw( + manifest_vm.kernel_filename.as_ptr(), + as_asciz(&manifest_vm.kernel_filename).len(), + ); - let kernel = some_or!(find_file_memiter(&mut cpio.clone(), &name), { - dlog!("Unable to load kernel\n"); + let kernel = some_or!(find_file_memiter(cpio, &kernel_filename), { + dlog!( + "Could not find kernel file \"{}\".", + str::from_utf8(as_asciz(&manifest_vm.kernel_filename)).unwrap(), + ); continue; }); - // Round up to page size. - mem = (mem + PAGE_SIZE as u64 - 1) & !(PAGE_SIZE as u64 - 1); - - if mem < kernel.len() as u64 { + let mem_size = round_up(manifest_vm.mem_size as usize, PAGE_SIZE) as u64; + if mem_size < kernel.len() as u64 { dlog!("Kernel is larger than available memory\n"); continue; } - let (secondary_mem_begin, secondary_mem_end) = ok_or!( - carve_out_mem_range(&mut mem_ranges_available, mem as u64), - { - dlog!("Not enough memory ({} bytes)\n", mem); + let (secondary_mem_begin, secondary_mem_end) = + ok_or!(carve_out_mem_range(&mut mem_ranges_available, mem_size), { + dlog!("Not enough memory ({} bytes)\n", mem_size); continue; - } - ); + }); - if !copy_to_unmapped( - hypervisor_ptable, - secondary_mem_begin, - kernel.get_next() as usize as *const _, - kernel.len(), - ppool, - ) { + if !copy_to_unmapped(hypervisor_ptable, secondary_mem_begin, &kernel, ppool) { dlog!("Unable to copy kernel\n"); continue; } @@ -279,7 +282,7 @@ pub unsafe fn load_secondary( return Err(()); } - let vm = some_or!(vm_manager.new_vm(cpu as spci_vcpu_count_t, ppool), { + let vm = some_or!(vm_manager.new_vm(manifest_vm.vcpu_count, ppool), { dlog!("Unable to initialise VM\n"); continue; }); @@ -303,7 +306,7 @@ pub unsafe fn load_secondary( dlog!( "Loaded with {} vcpus, entry at 0x{:x}\n", - cpu, + manifest_vm.vcpu_count, pa_addr(secondary_mem_begin) ); diff --git a/hfo2/src/manifest.rs b/hfo2/src/manifest.rs new file mode 100644 index 000000000..0cbcd0dad --- /dev/null +++ b/hfo2/src/manifest.rs @@ -0,0 +1,614 @@ +/* + * Copyright 2019 Sanguk Park + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use core::convert::TryInto; +use core::fmt::{self, Write}; + +use crate::fdt::*; +use crate::memiter::*; +use crate::types::*; + +use arrayvec::ArrayVec; + +const VM_NAME_BUF_SIZE: usize = 2 + 5 + 1; // "vm" + number + null terminator +const_assert!(MAX_VMS <= 99999); + +#[derive(PartialEq, Debug)] +pub enum Error { + NoHypervisorFdtNode, + NotCompatible, + ReservedVmId, + NoPrimaryVm, + TooManyVms, + PropertyNotFound, + MalformedString, + StringTooLong, + MalformedStringList, + MalformedInteger, + IntegerOverflow, +} + +impl Into<&'static str> for Error { + fn into(self) -> &'static str { + use Error::*; + match self { + NoHypervisorFdtNode => "Could not find \"hypervisor\" node in manifest", + NotCompatible => "Hypervisor manifest entry not compatible with Hafnium", + ReservedVmId => "Manifest defines a VM with a reserved ID", + NoPrimaryVm => "Manifest does not contain a primary VM entry", + TooManyVms => { + "Manifest specifies more VMs than Hafnium has statically allocated space for" + } + PropertyNotFound => "Property not found", + MalformedString => "Malformed string property", + StringTooLong => "String too long", + MalformedStringList => "Malformed string list property", + MalformedInteger => "Malformed integer property", + IntegerOverflow => "Integer overflow", + } + } +} + +/// Maximum length of a string parsed from the FDT, including NULL terminator. +const MANIFEST_MAX_STRING_LENGTH: usize = 32; + +/// Holds information about one of the VMs described in the manifest. +#[derive(Debug)] +pub struct ManifestVm { + // Properties defined for both primary and secondary VMs. + pub debug_name: [u8; MANIFEST_MAX_STRING_LENGTH], + + // Properties specific to secondary VMs. + pub kernel_filename: [u8; MANIFEST_MAX_STRING_LENGTH], + pub mem_size: u64, + pub vcpu_count: spci_vcpu_count_t, +} + +/// Hafnium manifest parsed from FDT. +#[derive(Debug)] +pub struct Manifest { + pub vms: ArrayVec<[ManifestVm; MAX_VMS]>, +} + +/// Generates a string with the two letters "vm" followed by an integer. +fn generate_vm_node_name<'a>( + buf: &'a mut [u8; VM_NAME_BUF_SIZE], + vm_id: spci_vm_id_t, +) -> &'a mut [u8] { + struct BufWrite<'a> { + buf: &'a mut [u8; VM_NAME_BUF_SIZE], + size: usize, + } + + impl<'a> Write for BufWrite<'a> { + fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { + let dest = self + .buf + .get_mut(self.size..(self.size + s.len())) + .ok_or(fmt::Error)?; + dest.copy_from_slice(s.as_bytes()); + self.size += s.len(); + + Ok(()) + } + } + + let mut buf = BufWrite { buf, size: 0 }; + write!(buf, "vm{}\0", vm_id).unwrap(); + &mut buf.buf[..buf.size] +} + +impl<'a> FdtNode<'a> { + /// TODO(HfO2): This function is marked `inline(never)`, to prevent stack overflow. It is still + /// mysterious why inlining this function into ManifestVm::new makes stack overflow. + #[inline(never)] + fn read_string(&self, property: *const u8, out: &mut [u8]) -> Result<(), Error> { + let data = self + .read_property(property) + .map_err(|_| Error::PropertyNotFound)?; + + // Require that the value contains exactly one NULL character and that it is the last byte. + if data.iter().position(|&c| c == b'\0') != Some(data.len() - 1) { + return Err(Error::MalformedString); + } + + // Check that the string fits into the buffer. + if data.len() > out.len() { + return Err(Error::StringTooLong); + } + + out[..data.len()].copy_from_slice(data); + Ok(()) + } + + #[inline(never)] + fn read_u64(&self, property: *const u8) -> Result { + let data = self + .read_property(property) + .map_err(|_| Error::PropertyNotFound)?; + + fdt_parse_number(data).ok_or(Error::MalformedInteger) + } + + #[inline(never)] + fn read_u16(&self, property: *const u8) -> Result { + let value = self.read_u64(property)?; + + value.try_into().map_err(|_| Error::IntegerOverflow) + } +} + +/// Represents the value of property whose type is a list of strings. These are encoded as one +/// contiguous byte buffer with NULL-separated entries. +#[derive(Clone)] +struct StringList { + mem_it: MemIter, +} + +impl StringList { + fn read_from<'a>(node: &FdtNode<'a>, property: *const u8) -> Result { + let data = node + .read_property(property) + .map_err(|_| Error::PropertyNotFound)?; + + // Require that the value ends with a NULL terminator. Other NULL characters separate the + // string list entries. + if *data.last().unwrap() != b'\0' { + return Err(Error::MalformedStringList); + } + + Ok(Self { + mem_it: unsafe { MemIter::from_raw(data.as_ptr(), data.len() - 1) }, + }) + } + + fn has_next(&self) -> bool { + self.mem_it.len() > 0 + } + + fn get_next(&mut self) -> MemIter { + assert!(self.has_next()); + + let null_term = unsafe { self.mem_it.as_slice() } + .iter() + .position(|&c| c == b'\0'); + if let Some(pos) = null_term { + // Found NULL terminator. Set entry memiter to byte range [base, null) and move list + // memiter past the terminator. + let ret = unsafe { MemIter::from_raw(self.mem_it.get_next(), pos) }; + self.mem_it.advance(pos + 1).unwrap(); + ret + } else { + // NULL terminator not found, this is the last entry. + // Set entry memiter to the entire byte range and advance list memiter to the end of + // the byte range. + let ret = self.mem_it.clone(); + self.mem_it.advance(self.mem_it.len()).unwrap(); + ret + } + } + + fn contains(&self, asciz: &[u8]) -> bool { + let mut it = self.clone(); + + while it.has_next() { + let entry = it.get_next(); + if unsafe { entry.iseq(asciz.as_ptr()) } { + return true; + } + } + + false + } +} + +impl ManifestVm { + fn new<'a>(node: &FdtNode<'a>, vm_id: spci_vm_id_t) -> Result { + let mut debug_name: [u8; MANIFEST_MAX_STRING_LENGTH] = Default::default(); + node.read_string("debug_name\0".as_ptr(), &mut debug_name)?; + + let mut kernel_filename: [u8; MANIFEST_MAX_STRING_LENGTH] = Default::default(); + + let (mem_size, vcpu_count) = if vm_id != HF_PRIMARY_VM_ID { + node.read_string("kernel_filename\0".as_ptr(), &mut kernel_filename)?; + ( + node.read_u64("mem_size\0".as_ptr())?, + node.read_u16("vcpu_count\0".as_ptr())?, + ) + } else { + (0, 0) + }; + + Ok(Self { + debug_name, + kernel_filename, + mem_size, + vcpu_count, + }) + } +} + +impl Manifest { + /// Parse manifest from FDT. + pub fn init<'a>(&mut self, fdt: &FdtNode<'a>) -> Result<(), Error> { + let mut vm_name_buf = Default::default(); + let mut found_primary_vm = false; + unsafe { + self.vms.set_len(0); + } + + // Find hypervisor node. + let mut hyp_node = fdt.clone(); + hyp_node + .find_child("hypervisor\0".as_ptr()) + .ok_or(Error::NoHypervisorFdtNode)?; + + // Check "compatible" property. + let compatible_list = StringList::read_from(&hyp_node, "compatible\0".as_ptr())?; + if !compatible_list.contains(b"hafnium,hafnium\0") { + return Err(Error::NotCompatible); + } + + // Iterate over reserved VM IDs and check no such nodes exist. + for vm_id in 0..HF_VM_ID_OFFSET { + let mut vm_node = hyp_node.clone(); + let vm_name = generate_vm_node_name(&mut vm_name_buf, vm_id); + + if vm_node.find_child(vm_name.as_ptr()).is_some() { + return Err(Error::ReservedVmId); + } + } + + // Iterate over VM nodes until we find one that does not exist. + for i in 0..=MAX_VMS as spci_vm_id_t { + let vm_id = HF_VM_ID_OFFSET + i; + let mut vm_node = hyp_node.clone(); + let vm_name = generate_vm_node_name(&mut vm_name_buf, vm_id); + + if vm_node.find_child(vm_name.as_ptr()).is_none() { + break; + } + + if i == MAX_VMS as spci_vm_id_t { + return Err(Error::TooManyVms); + } + + if vm_id == HF_PRIMARY_VM_ID { + assert!(found_primary_vm == false); // sanity check + found_primary_vm = true; + } + + self.vms.push(ManifestVm::new(&vm_node, vm_id)?); + } + + if !found_primary_vm { + Err(Error::NoPrimaryVm) + } else { + Ok(()) + } + } +} + +#[cfg(test)] +mod test { + extern crate std; + use std::fmt::Write as _; + use std::io::Write; + use std::mem::MaybeUninit; + use std::process::*; + use std::string::String; + use std::vec::Vec; + + use super::*; + use crate::utils::*; + + /// Class for programatically building a Device Tree. + /// + /// # Usage + /// ``` + /// let dtb = ManifestDtBuilder::new() + /// .Command1() + /// .Command2() + /// ... + /// .CommandN() + /// .Build(); + /// ``` + struct ManifestDtBuilder { + dts: String, + } + + impl ManifestDtBuilder { + fn new() -> Self { + let mut builder = Self { dts: String::new() }; + builder.dts.push_str("/dts-v1/;\n"); + builder.dts.push_str("\n"); + + // Start root node. + builder.start_child("/"); + builder + } + + fn build(&mut self) -> Vec { + self.end_child(); + + let mut child = Command::new("../build/image/dtc.py") + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .args(&["compile"]) + .spawn() + .unwrap(); + + child + .stdin + .as_mut() + .unwrap() + .write_all(self.dts.as_bytes()) + .unwrap(); + + child.wait_with_output().unwrap().stdout + } + + fn start_child(&mut self, name: &str) -> &mut Self { + self.dts.push_str(name); + self.dts.push_str(" {\n"); + self + } + + fn end_child(&mut self) -> &mut Self { + self.dts.push_str("};\n"); + self + } + + fn compatible(&mut self, value: &[&str]) -> &mut Self { + self.string_list_property("compatible", value) + } + + fn compatible_hafnium(&mut self) -> &mut Self { + self.compatible(&["hafnium,hafnium"]) + } + + fn debug_name(&mut self, value: &str) -> &mut Self { + self.string_property("debug_name", value) + } + + fn kernel_filename(&mut self, value: &str) -> &mut Self { + self.string_property("kernel_filename", value) + } + + fn vcpu_count(&mut self, value: u64) -> &mut Self { + self.integer_property("vcpu_count", value) + } + + fn mem_size(&mut self, value: u64) -> &mut Self { + self.integer_property("mem_size", value) + } + + fn string_property(&mut self, name: &str, value: &str) -> &mut Self { + write!(self.dts, "{} = \"{}\";\n", name, value).unwrap(); + self + } + + fn string_list_property(&mut self, name: &str, value: &[&str]) -> &mut Self { + write!(self.dts, "{} = \"", name).unwrap(); + self.dts.push_str(&value.join("\", \"")); + self.dts.push_str("\";\n"); + self + } + + fn integer_property(&mut self, name: &str, value: u64) -> &mut Self { + write!(self.dts, "{} = <{}>;\n", name, value).unwrap(); + self + } + } + + fn get_fdt_root<'a>(dtb: &'a [u8]) -> Option> { + let fdt_header = unsafe { &*(dtb.as_ptr() as *const FdtHeader) }; + + let mut node = FdtNode::new_root(fdt_header)?; + node.find_child("\0".as_ptr()); + Some(node) + } + + #[test] + fn no_hypervisor_node() { + let dtb = ManifestDtBuilder::new().build(); + + let fdt_root = get_fdt_root(&dtb).unwrap(); + let mut m: Manifest = unsafe { MaybeUninit::uninit().assume_init() }; + assert_eq!(m.init(&fdt_root).unwrap_err(), Error::NoHypervisorFdtNode); + } + + #[test] + fn no_compatible_property() { + let dtb = ManifestDtBuilder::new() + .start_child("hypervisor") + .end_child() + .build(); + + let fdt_root = get_fdt_root(&dtb).unwrap(); + let mut m: Manifest = unsafe { MaybeUninit::uninit().assume_init() }; + assert_eq!(m.init(&fdt_root).unwrap_err(), Error::PropertyNotFound); + } + + #[test] + fn not_compatible() { + let dtb = ManifestDtBuilder::new() + .start_child("hypervisor") + .compatible(&["foo,bar"]) + .end_child() + .build(); + + let fdt_root = get_fdt_root(&dtb).unwrap(); + let mut m: Manifest = unsafe { MaybeUninit::uninit().assume_init() }; + assert_eq!(m.init(&fdt_root).unwrap_err(), Error::NotCompatible); + } + + #[test] + fn compatible_one_of_many() { + let dtb = ManifestDtBuilder::new() + .start_child("hypervisor") + .compatible(&["foo,bar", "hafnium,hafnium"]) + .start_child("vm1") + .debug_name("primary") + .end_child() + .end_child() + .build(); + + let fdt_root = get_fdt_root(&dtb).unwrap(); + let mut m: Manifest = unsafe { MaybeUninit::uninit().assume_init() }; + m.init(&fdt_root).unwrap(); + } + + #[test] + fn no_vm_nodes() { + let dtb = ManifestDtBuilder::new() + .start_child("hypervisor") + .compatible_hafnium() + .end_child() + .build(); + + let fdt_root = get_fdt_root(&dtb).unwrap(); + let mut m: Manifest = unsafe { MaybeUninit::uninit().assume_init() }; + assert_eq!(m.init(&fdt_root).unwrap_err(), Error::NoPrimaryVm); + } + + #[test] + fn long_string() { + fn gen_long_string_dtb(valid: bool) -> Vec { + const LAST_VALID: &'static str = "1234567890123456789012345678901"; + const FIRST_INVALID: &'static str = "12345678901234567890123456789012"; + assert_eq!(LAST_VALID.len() + 1, MANIFEST_MAX_STRING_LENGTH); + assert_eq!(FIRST_INVALID.len() + 1, MANIFEST_MAX_STRING_LENGTH + 1); + + ManifestDtBuilder::new() + .start_child("hypervisor") + .compatible_hafnium() + .start_child("vm1") + .debug_name(if valid { LAST_VALID } else { FIRST_INVALID }) + .end_child() + .end_child() + .build() + } + + let dtb_last_valid = gen_long_string_dtb(true); + let dtb_first_invalid = gen_long_string_dtb(false); + + let fdt_root = get_fdt_root(&dtb_last_valid).unwrap(); + let mut m: Manifest = unsafe { MaybeUninit::uninit().assume_init() }; + m.init(&fdt_root).unwrap(); + + let fdt_root = get_fdt_root(&dtb_first_invalid).unwrap(); + assert_eq!(m.init(&fdt_root).unwrap_err(), Error::StringTooLong); + } + + #[test] + fn reserved_vm_id() { + let dtb = ManifestDtBuilder::new() + .start_child("hypervisor") + .compatible_hafnium() + .start_child("vm1") + .debug_name("primary_vm") + .end_child() + .start_child("vm0") + .debug_name("reserved_vm") + .vcpu_count(1) + .mem_size(0x1000) + .kernel_filename("kernel") + .end_child() + .end_child() + .build(); + + let fdt_root = get_fdt_root(&dtb).unwrap(); + let mut m: Manifest = unsafe { MaybeUninit::uninit().assume_init() }; + assert_eq!(m.init(&fdt_root).unwrap_err(), Error::ReservedVmId); + } + + #[test] + fn vcpu_count_limit() { + fn gen_vcpu_count_limit_dtb(vcpu_count: u64) -> Vec { + ManifestDtBuilder::new() + .start_child("hypervisor") + .compatible_hafnium() + .start_child("vm1") + .debug_name("primary_vm") + .end_child() + .start_child("vm2") + .debug_name("secondary_vm") + .vcpu_count(vcpu_count) + .mem_size(0x1000) + .kernel_filename("kernel") + .end_child() + .end_child() + .build() + } + + let dtb_last_valid = gen_vcpu_count_limit_dtb(u16::max_value() as u64); + let dtb_first_invalid = gen_vcpu_count_limit_dtb(u16::max_value() as u64 + 1); + + let fdt_root = get_fdt_root(&dtb_last_valid).unwrap(); + let mut m: Manifest = unsafe { MaybeUninit::uninit().assume_init() }; + m.init(&fdt_root).unwrap(); + assert_eq!(m.vms.len(), 2); + assert_eq!(m.vms[1].vcpu_count, u16::max_value()); + + let fdt_root = get_fdt_root(&dtb_first_invalid).unwrap(); + assert_eq!(m.init(&fdt_root).unwrap_err(), Error::IntegerOverflow); + } + + #[test] + fn valid() { + let dtb = ManifestDtBuilder::new() + .start_child("hypervisor") + .compatible_hafnium() + .start_child("vm1") + .debug_name("primary_vm") + .end_child() + .start_child("vm3") + .debug_name("second_secondary_vm") + .vcpu_count(43) + .mem_size(0x12345) + .kernel_filename("second_kernel") + .end_child() + .start_child("vm2") + .debug_name("first_secondary_vm") + .vcpu_count(42) + .mem_size(12345) + .kernel_filename("first_kernel") + .end_child() + .end_child() + .build(); + + let fdt_root = get_fdt_root(&dtb).unwrap(); + let mut m: Manifest = unsafe { MaybeUninit::uninit().assume_init() }; + m.init(&fdt_root).unwrap(); + assert_eq!(m.vms.len(), 3); + + let vm = &m.vms[0]; + assert_eq!(as_asciz(&vm.debug_name), b"primary_vm"); + + let vm = &m.vms[1]; + assert_eq!(as_asciz(&vm.debug_name), b"first_secondary_vm"); + assert_eq!(vm.vcpu_count, 42); + assert_eq!(vm.mem_size, 12345); + assert_eq!(as_asciz(&vm.kernel_filename), b"first_kernel"); + + let vm = &m.vms[2]; + assert_eq!(as_asciz(&vm.debug_name), b"second_secondary_vm"); + assert_eq!(vm.vcpu_count, 43); + assert_eq!(vm.mem_size, 0x12345); + assert_eq!(as_asciz(&vm.kernel_filename), b"second_kernel"); + } +} diff --git a/hfo2/src/memiter.rs b/hfo2/src/memiter.rs index 9f0e1adb5..0b3c779d1 100644 --- a/hfo2/src/memiter.rs +++ b/hfo2/src/memiter.rs @@ -16,12 +16,13 @@ use core::ptr; use core::slice; +use core::str; use crate::std::*; use crate::types::*; #[repr(C)] -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct MemIter { next: *const u8, limit: *const u8, @@ -159,6 +160,10 @@ impl MemIter { slice::from_raw_parts(self.next, self.limit.offset_from(self.next) as usize) } + pub unsafe fn as_str(&self) -> &str { + str::from_utf8_unchecked(self.as_slice()) + } + pub fn len(&self) -> usize { unsafe { self.limit.offset_from(self.next) as usize } } diff --git a/hfo2/src/mm.rs b/hfo2/src/mm.rs index 1238978e4..cdf2d2719 100644 --- a/hfo2/src/mm.rs +++ b/hfo2/src/mm.rs @@ -68,7 +68,7 @@ extern "C" { fn arch_mm_stage2_attrs_to_mode(attrs: u64) -> c_int; - pub fn arch_mm_write_back_dcache(base: usize, size: size_t); + pub fn arch_mm_flush_dcache(base: usize, size: size_t); fn arch_mm_stage1_max_level() -> u8; fn arch_mm_stage2_max_level() -> u8; @@ -76,7 +76,9 @@ extern "C" { fn arch_mm_stage1_root_table_count() -> u8; fn arch_mm_stage2_root_table_count() -> u8; - fn arch_mm_init(table: paddr_t, first: bool) -> bool; + fn arch_mm_init() -> bool; + + fn arch_mm_enable(table: paddr_t); fn arch_mm_combine_table_entry_attrs(table_attrs: u64, block_attrs: u64) -> u64; @@ -149,10 +151,14 @@ bitflags! { /// Flags for memory management operations. struct Flags: u32 { /// Commit - const COMMIT = 0b01; + const COMMIT = 0b001; /// Unmap - const UNMAP = 0b10; + const UNMAP = 0b010; + + /// Stage 1 + /// Note(HfO2): This flag is not used in HfO2; only exists for FFI. + const STAGE1 = 0b100; } } @@ -161,7 +167,7 @@ bitflags! { type ptable_addr_t = uintvaddr_t; // For stage 2, the input is an intermediate physical addresses rather than a virtual address so: -const_assert_eq!(addr_size_eq; mem::size_of::(), mem::size_of::()); +assert_eq_size!(ptable_addr_t, uintpaddr_t); /// Utility functions for address manipulation. mod addr { @@ -222,6 +228,13 @@ pub trait Stage { /// Converts the attributes back to the corresponding mode. fn attrs_to_mode(attrs: u64) -> Mode; + + /// Returns the first address which cannot be encoded in page tables given the stage. It is the + /// exclusive end of the address space created by the tables. + fn ptable_addr_space_end() -> ptable_addr_t { + let root_level = Self::max_level() + 1; + Self::root_table_count() as usize * addr::entry_size(root_level) + } } /// The page table stage for the hypervisor. @@ -309,14 +322,13 @@ impl PageTableEntry { unsafe { Self::from_raw(arch_mm_block_pte(level, begin, attrs)) } } - /// # Safety - /// - /// `page` should be a proper page table. - unsafe fn table(level: u8, page: Page) -> Self { - Self::from_raw(arch_mm_table_pte( - level, - pa_init(page.into_raw() as uintpaddr_t), - )) + fn table(level: u8, node: PageTableNode) -> Self { + unsafe { + Self::from_raw(arch_mm_table_pte( + level, + pa_init(node.into_page() as uintpaddr_t), + )) + } } fn is_present(&self, level: u8) -> bool { @@ -360,30 +372,33 @@ impl PageTableEntry { } fn as_table_mut(&mut self, level: u8) -> Result<&mut RawPageTable, ()> { - unsafe { - if arch_mm_pte_is_table(self.inner, level) { - Ok(&mut *(pa_addr(arch_mm_table_from_pte(self.inner, level)) as *mut _)) - } else { - Err(()) - } + if self.is_table(level) { + unsafe { Ok(&mut *(pa_addr(arch_mm_table_from_pte(self.inner, level)) as *mut _)) } + } else { + Err(()) } } - /// Frees all page-table-related memory associated with the given pte at the given level, - /// including any subtables recursively. - /// - /// # Safety - /// - /// After a page table entry is freed, it's value is undefined. - unsafe fn free(&mut self, level: u8, mpool: &MPool) { - if let Ok(table) = self.as_table_mut(level) { - // Recursively free any subtables. - for pte in table.iter_mut() { - pte.free(level - 1, mpool); + fn into_table(self, level: u8) -> Result { + let ret = if self.is_table(level) { + unsafe { + Ok(PageTableNode::from_raw( + pa_addr(arch_mm_table_from_pte(self.inner, level)) as *mut _, + )) } + } else { + Err(()) + }; - // Free the table itself. - mpool.free(Page::from_raw(table as *mut _ as *mut _)); + mem::forget(self); + ret + } + + /// Frees all page-table-related memory associated with the given pte at the given level, + /// including any subtables recursively. + fn drop(self, level: u8, mpool: &MPool) { + if let Ok(table) = self.into_table(level) { + table.drop(level - 1, mpool); } } @@ -399,8 +414,6 @@ impl PageTableEntry { level: u8, mpool: &MPool, ) { - let inner = self.inner; - // We need to do the break-before-make sequence if both values are present and the TLB is // being invalidated. if self.is_valid(level) && new_pte.is_valid(level) { @@ -409,16 +422,8 @@ impl PageTableEntry { } // Assign the new pte. - unsafe { - ptr::write(self, new_pte); - } - - // Free pages that aren't in use anymore. - unsafe { - let mut old_pte = Self::from_raw(inner); - old_pte.free(level, mpool); - mem::forget(old_pte); - } + let old_pte = mem::replace(self, new_pte); + old_pte.drop(level, mpool); } /// Populates the provided page table entry with a reference to another table if needed, that @@ -437,35 +442,26 @@ impl PageTableEntry { } // Allocate a new table. - let mut page = mpool + let page = mpool .alloc() .map_err(|_| dlog!("Failed to allocate memory for page table\n"))?; - let table = unsafe { RawPageTable::deref_mut_page(&mut page) }; - // Initialise entries in the new table. let level_below = level - 1; - if self.is_block(level) { + let table = if self.is_block(level) { let attrs = self.attrs(level); let entry_size = addr::entry_size(level_below); - for (i, pte) in table.iter_mut().enumerate() { - unsafe { - ptr::write( - pte, - Self::block( - level_below, - pa_init(self.inner as usize + i * entry_size), - attrs, - ), - ); - } - } + PageTableNode::new(page, |i| { + Self::block( + level_below, + pa_init(self.inner as usize + i * entry_size), + attrs, + ) + }) } else { - for pte in table.iter_mut() { - unsafe { ptr::write(pte, Self::absent(level_below)) }; - } - } + PageTableNode::new(page, |_| Self::absent(level_below)) + }; // Ensure initialisation is visible before updating the pte. // @@ -473,7 +469,7 @@ impl PageTableEntry { fence(Ordering::Release); // Replace the pte entry, doing a break-before-make if needed. - let table = unsafe { Self::table(level, page) }; + let table = Self::table(level, table); self.replace::(table, begin, level, mpool); Ok(()) @@ -573,8 +569,8 @@ struct RawPageTable { entries: [PageTableEntry; PTE_PER_PAGE], } -const_assert!(raw_page_table_align; mem::align_of::() == PAGE_SIZE); -const_assert!(raw_page_table_size; mem::size_of::() == PAGE_SIZE); +const_assert_eq!(mem::align_of::(), PAGE_SIZE); +const_assert_eq!(mem::size_of::(), PAGE_SIZE); impl Deref for RawPageTable { type Target = [PageTableEntry; PTE_PER_PAGE]; @@ -591,22 +587,6 @@ impl DerefMut for RawPageTable { } impl RawPageTable { - unsafe fn deref_page(page: &Page) -> &Self { - Self::deref_raw_page(page) - } - - unsafe fn deref_mut_page(page: &mut Page) -> &mut Self { - Self::deref_mut_raw_page(page) - } - - unsafe fn deref_raw_page(page: &RawPage) -> &Self { - &*(page as *const _ as *const _) - } - - unsafe fn deref_mut_raw_page(page: &mut RawPage) -> &mut Self { - &mut *(page as *mut _ as *mut _) - } - /// Returns whether all entries in this table are absent. fn is_empty(&self, level: u8) -> bool { self.iter().all(|pte| !pte.is_present(level)) @@ -743,9 +723,89 @@ impl RawPageTable { } } } + + /// Recursively free any subtables. + /// + /// # Safety + /// + /// After calling this function, the inner value of the RawPageTable is invalid. + /// + /// FIXME(HfO2): We currently cannot drop the table by move, because move forwarding is not + /// working as well as expected. If possible, fix this consume value, and remove unsafe on the + /// function declaration. Note that, we should use by-value iterator if this takes a value, + /// which is also a problem. Use by-value iterator for arrays, ever since array::IntoIter + /// doesn't require [T; N]: LengthAtMost32. If so, this code will have no unsafe. (See + /// https://github.com/rust-lang/rust/issues/25725) + unsafe fn drop(&mut self, level: u8, mpool: &MPool) { + for pte in self.entries.iter() { + ptr::read(pte).drop(level, mpool); + } + + mem::forget(self); + } +} + +struct PageTableNode { + ptr: *mut RawPageTable, +} + +impl PageTableNode { + fn new(page: Page, pte_init: F) -> Self + where + F: Fn(usize) -> PageTableEntry, + { + let ptes = unsafe { &mut *(page.into_raw() as *mut [PageTableEntry; PTE_PER_PAGE]) }; + for (i, pte) in ptes.iter_mut().enumerate() { + mem::forget(mem::replace(pte, pte_init(i))); + } + + Self { + ptr: ptes as *mut _ as *mut RawPageTable, + } + } + + unsafe fn from_raw(ptr: *mut RawPageTable) -> Self { + Self { ptr } + } + + fn into_page(self) -> *mut RawPageTable { + let ret = self.ptr; + mem::forget(self); + ret + } + + fn drop(mut self, level: u8, mpool: &MPool) { + unsafe { + self.deref_mut().drop(level, mpool); + } + + // Free the table itself. + mpool.free(unsafe { Page::from_raw(self.ptr as *mut _) }); + mem::forget(self); + } +} + +impl Deref for PageTableNode { + type Target = RawPageTable; + fn deref(&self) -> &Self::Target { + unsafe { &*self.ptr } + } +} + +impl DerefMut for PageTableNode { + fn deref_mut(&mut self) -> &mut Self::Target { + unsafe { &mut *self.ptr } + } +} + +impl Drop for PageTableNode { + fn drop(&mut self) { + panic!("`PageTableNode` should not be dropped."); + } } /// Page table. +#[repr(C)] pub struct PageTable { root: paddr_t, _marker: PhantomData, @@ -768,12 +828,11 @@ impl PageTable { let root_table_count = S::root_table_count(); let mut pages = mpool.alloc_pages(root_table_count as usize, root_table_count as usize)?; - for page in pages.iter_mut() { - let table = unsafe { RawPageTable::deref_mut_raw_page(page) }; + for raw_page in pages.iter_mut() { + let page = unsafe { Page::from_raw(raw_page) }; + let table = PageTableNode::new(page, |_| PageTableEntry::absent(S::max_level())); - for pte in table.iter_mut() { - unsafe { ptr::write(pte, PageTableEntry::absent(S::max_level())) }; - } + mem::forget(table); } // TODO: halloc could return a virtual or physical address if mm not enabled? @@ -788,10 +847,8 @@ impl PageTable { let level = S::max_level(); for page_table in self.deref_mut().iter_mut() { - for pte in page_table.iter_mut() { - unsafe { - pte.free(level, mpool); - } + unsafe { + page_table.drop(level, mpool); } } @@ -863,7 +920,7 @@ impl PageTable { mpool: &MPool, ) -> Result<(), ()> { let root_level = S::max_level() + 1; - let ptable_end = S::root_table_count() as usize * addr::entry_size(root_level); + let ptable_end = S::ptable_addr_space_end(); let end = cmp::min(addr::round_up_to_page(pa_addr(end)), ptable_end); let begin = pa_addr(unsafe { arch_mm_clear_pa(begin) }); @@ -1058,7 +1115,7 @@ impl MemoryManager { // other similar structures (VmLocked, VCpuExecutionLocked etc.) In that // case we can delay creating a SpinLock here, and more directly show // the meaning of unlocked but safe exclusive access of the page table. - let page_table = SpinLock::new(page_table); + let mut page_table = SpinLock::new(page_table); let stage1_locked = mm_stage1_locked { plock: &page_table as *const _ as usize, }; @@ -1068,15 +1125,15 @@ impl MemoryManager { plat_console_mm_init(stage1_locked, mpool); page_table - .get_mut_unchecked() + .get_mut() .identity_map(layout_text_begin(), layout_text_end(), Mode::X, mpool) .unwrap(); page_table - .get_mut_unchecked() + .get_mut() .identity_map(layout_rodata_begin(), layout_rodata_end(), Mode::R, mpool) .unwrap(); page_table - .get_mut_unchecked() + .get_mut() .identity_map( layout_data_begin(), layout_data_end(), @@ -1085,7 +1142,7 @@ impl MemoryManager { ) .unwrap(); - if !arch_mm_init(page_table.get_unchecked().root, true) { + if !arch_mm_init() { return None; } } @@ -1100,16 +1157,12 @@ impl MemoryManager { /// `self.inner` because the value of `ptable.as_raw()` doesn't change after `ptable` is /// initialized. Of course, actual page table may vary during running. That's why this function /// returns `paddr_t` rather than `&[RawPageTable]`. - pub fn get_raw_ptable(&self) -> paddr_t { + fn get_raw_ptable(&self) -> paddr_t { unsafe { self.hypervisor_ptable.get_unchecked().as_raw() } } - pub fn cpu_init(raw_ptable: paddr_t) -> Result<(), ()> { - if unsafe { arch_mm_init(raw_ptable, false) } { - Ok(()) - } else { - Err(()) - } + pub unsafe fn cpu_init(&self) { + arch_mm_enable(self.get_raw_ptable()) } pub fn vm_unmap_hypervisor(ptable: &mut PageTable, mpool: &MPool) -> Result<(), ()> { @@ -1224,6 +1277,28 @@ pub unsafe extern "C" fn mm_vm_get_mode( t.get_mode(begin, end).map(|m| *mode = m).is_ok() } +#[no_mangle] +pub extern "C" fn mm_ptable_addr_space_end(flags: u32) -> ptable_addr_t { + if Flags::from_bits_truncate(flags).contains(Flags::STAGE1) { + Stage1::ptable_addr_space_end() + } else { + Stage2::ptable_addr_space_end() + } +} + +#[no_mangle] +pub unsafe extern "C" fn mm_ptable_init( + t: *mut PageTable, + flags: u32, + ppool: *mut MPool, +) -> bool { + let ppool = &*ppool; + assert!(Flags::from_bits_truncate(flags).contains(Flags::STAGE1)); + + ptr::write(t, ok_or!(PageTable::::new(ppool), return false)); + true +} + #[no_mangle] pub unsafe extern "C" fn mm_identity_map( mut stage1_locked: mm_stage1_locked, @@ -1239,6 +1314,21 @@ pub unsafe extern "C" fn mm_identity_map( .unwrap_or(ptr::null_mut()) } +#[no_mangle] +pub unsafe extern "C" fn mm_identity_map_nolock( + stage1: *mut PageTable, + begin: paddr_t, + end: paddr_t, + mode: Mode, + mpool: *const MPool, +) -> *mut usize { + let mpool = &*mpool; + (*stage1) + .identity_map(begin, end, mode, mpool) + .map(|_| pa_addr(begin) as *mut _) + .unwrap_or(ptr::null_mut()) +} + #[no_mangle] pub unsafe extern "C" fn mm_defrag(mut stage1_locked: mm_stage1_locked, mpool: *const MPool) { let mpool = &*mpool; diff --git a/hfo2/src/mpool.rs b/hfo2/src/mpool.rs index 8845cd707..94d836dd9 100644 --- a/hfo2/src/mpool.rs +++ b/hfo2/src/mpool.rs @@ -29,15 +29,15 @@ struct Chunk { entry: ListEntry, size: usize, } +const_assert!(mem::size_of::() <= mem::size_of::()); #[repr(C)] struct Entry { entry: ListEntry, } +const_assert!(mem::size_of::() <= mem::size_of::()); impl Chunk { - const_assert!(chunk_size; mem::size_of::() <= mem::size_of::()); - pub const fn new(size: usize) -> Self { Self { entry: ListEntry::new(), @@ -47,8 +47,6 @@ impl Chunk { } impl Entry { - const_assert!(entry_size; mem::size_of::() <= mem::size_of::()); - pub const fn new() -> Self { Self { entry: ListEntry::new(), @@ -94,23 +92,19 @@ impl Pool { /// Allocates a page. pub fn alloc(&mut self) -> Result { - if let Some(entry) = unsafe { self.entry_list.pop() } { + if let Some(entry) = self.entry_list.pop() { #[allow(clippy::cast_ptr_alignment)] return Ok(unsafe { Page::from_raw(entry as *mut RawPage) }); } - let chunk = unsafe { self.chunk_list.pop().ok_or(())? }; + let chunk = self.chunk_list.pop().ok_or(())?; let size = unsafe { (*chunk).size }; - assert_ne!(size, 0); + debug_assert_ne!(size, 0); + #[allow(clippy::cast_ptr_alignment)] let page = unsafe { Page::from_raw(chunk as *mut RawPage) }; - if size == 2 { - let entry = unsafe { &mut *((chunk as usize + PAGE_SIZE) as *mut Entry) }; - unsafe { - self.entry_list.push(entry); - } - } else if size > 2 { + if size > 1 { let new_chunk = unsafe { &mut *((chunk as usize + PAGE_SIZE) as *mut Chunk) }; new_chunk.size = size - 1; unsafe { self.chunk_list.push(new_chunk) }; diff --git a/hfo2/src/page.rs b/hfo2/src/page.rs index f4e0a30a6..b39d626e0 100644 --- a/hfo2/src/page.rs +++ b/hfo2/src/page.rs @@ -16,7 +16,7 @@ use core::mem; use core::ops::*; -use core::slice; +use core::ptr; use crate::utils::*; @@ -29,8 +29,8 @@ pub struct RawPage { inner: [u8; PAGE_SIZE], } -const_assert!(raw_page_align; mem::align_of::() == PAGE_SIZE); -const_assert!(raw_page_size; mem::size_of::() == PAGE_SIZE); +const_assert_eq!(mem::align_of::(), PAGE_SIZE); +const_assert_eq!(mem::size_of::(), PAGE_SIZE); impl RawPage { pub const fn new() -> Self { @@ -97,13 +97,14 @@ impl DerefMut for Page { } pub struct Pages { - ptr: *mut RawPage, - size: usize, + ptr: *mut [RawPage], } impl Pages { - pub unsafe fn from_raw(ptr: *mut RawPage, size: usize) -> Self { - Self { ptr, size } + pub unsafe fn from_raw(raw: *mut RawPage, size: usize) -> Self { + Self { + ptr: ptr::slice_from_raw_parts_mut(raw, size), + } } pub unsafe fn from_raw_u8(ptr: *mut u8, size: usize) -> Result { @@ -116,16 +117,16 @@ impl Pages { return Err(()); } - Ok(Self { - ptr: new_begin as *mut RawPage, - size: (new_end - new_begin) / PAGE_SIZE, - }) + Ok(Self::from_raw( + new_begin as *mut RawPage, + (new_end - new_begin) / PAGE_SIZE, + )) } pub fn into_raw(self) -> *mut RawPage { let ptr = self.ptr; mem::forget(self); - ptr + ptr as *mut _ } pub fn clear(&mut self) { @@ -145,12 +146,12 @@ impl Deref for Pages { type Target = [RawPage]; fn deref(&self) -> &Self::Target { - unsafe { slice::from_raw_parts(self.ptr as *const RawPage, self.size) } + unsafe { &*self.ptr } } } impl DerefMut for Pages { fn deref_mut(&mut self) -> &mut Self::Target { - unsafe { slice::from_raw_parts_mut(self.ptr as *mut RawPage, self.size) } + unsafe { &mut *self.ptr } } } diff --git a/hfo2/src/panic.rs b/hfo2/src/panic.rs index 7d1f0f550..899b28755 100644 --- a/hfo2/src/panic.rs +++ b/hfo2/src/panic.rs @@ -25,7 +25,10 @@ fn abort_impl() -> ! { /// system in an inconsistent state. /// /// TODO: Should this also reset the system? +/// TODO(HfO2): This function needs to be weakly linked because some tests have custom `abort` +/// function but still need HfO2. Dividing HfO2 into many libraries may resolve this. #[cfg(not(feature = "test"))] +#[linkage = "weak"] #[no_mangle] pub extern "C" fn abort() -> ! { abort_impl() diff --git a/hfo2/src/slist.rs b/hfo2/src/slist.rs index 0b9ae9cbd..d13f54095 100644 --- a/hfo2/src/slist.rs +++ b/hfo2/src/slist.rs @@ -19,7 +19,6 @@ use core::marker::PhantomData; use core::ptr; /// An entry in an intrusive linked list. -#[derive(Debug)] #[repr(C)] pub struct ListEntry { /// The next entry in the linked list. @@ -85,12 +84,11 @@ pub trait IsElement { unsafe fn element_of(entry: &ListEntry) -> &T; } -/// A lock-free, intrusive linked list of type `T`. -#[derive(Debug)] +/// A intrusive linked list of type `T`. #[repr(C)] pub struct List = T> { /// The head of the linked list. - pub(crate) head: ListEntry, + head: ListEntry, /// The phantom data for using `T` and `C`. _marker: PhantomData<(T, C)>, @@ -110,7 +108,7 @@ impl ListEntry { /// You should guarantee that: /// /// - `container` is not null - /// - `container` is immovable, e.g. inside an `Owned` + /// - `container` is immovable, e.g. inside a `Page` /// - the same `ListEntry` is not inserted more than once /// - the inserted object will be removed before the list is dropped pub unsafe fn push>(&self, element: &T) { @@ -143,22 +141,22 @@ impl> List { /// You should guarantee that: /// /// - `container` is not null - /// - `container` is immovable, e.g. inside an `Owned` + /// - `container` is immovable, e.g. inside a `Page` /// - the same `ListEntry` is not inserted more than once /// - the inserted object will be removed before the list is dropped pub unsafe fn push(&mut self, element: &T) { self.head.push::(element); } - pub unsafe fn pop(&mut self) -> Option<*mut T> { + pub fn pop(&mut self) -> Option<*mut T> { let head = self.head.next.get(); if head.is_null() { return None; } - let next = (*head).next.get(); + let next = unsafe { (*head).next.get() }; self.head.next.set(next); - Some(C::element_of(&*head) as *const _ as *mut _) + Some(unsafe { C::element_of(&*head) } as *const _ as *mut _) } pub unsafe fn pop_if_some(&mut self, cond: F) -> Option<(*mut T, R)> diff --git a/hfo2/src/spci.rs b/hfo2/src/spci.rs index 4563f2b35..9c1c7da24 100644 --- a/hfo2/src/spci.rs +++ b/hfo2/src/spci.rs @@ -43,6 +43,8 @@ pub enum SpciReturn { #[repr(u16)] #[derive(Clone, Copy, PartialEq)] pub enum SpciMemoryShare { + Lend = 0x0, + Relinquish = 0x1, Donate = 0x2, } @@ -168,3 +170,46 @@ pub struct SpciMemTransitions { pub from_mode: Mode, pub to_mode: Mode, } + +#[allow(non_camel_case_types)] +pub enum SpciLendAccess { + RO_NX, + RO_X, + RW_NX, + RW_X, +} + +#[repr(C)] +pub struct SpciMemoryLend { + pub flags: u16, + pub borrower_attributes: u16, + + _reserved: u32, + pub payload: [u8; 0], +} + +const SPCI_LEND_ACCESS_OFFSET: u16 = 0x7; +const SPCI_LEND_ACCESS_MASK: u16 = 0x3 << SPCI_LEND_ACCESS_OFFSET; + +fn spci_get_lend_access_attr(lend_attr: u16) -> SpciLendAccess { + let access_attr = (lend_attr & SPCI_LEND_ACCESS_MASK) >> SPCI_LEND_ACCESS_OFFSET; + + match access_attr { + 0 => SpciLendAccess::RO_NX, + 1 => SpciLendAccess::RO_X, + 2 => SpciLendAccess::RW_NX, + 3 => SpciLendAccess::RW_X, + _ => unreachable!(), + } +} + +pub fn spci_memory_attrs_to_mode(memory_attributes: u32) -> Mode { + let attr_value = spci_get_lend_access_attr(memory_attributes as _); + + match attr_value { + SpciLendAccess::RO_NX => Mode::R, + SpciLendAccess::RO_X => Mode::R | Mode::X, + SpciLendAccess::RW_NX => Mode::R | Mode::W, + SpciLendAccess::RW_X => Mode::R | Mode::W | Mode::X, + } +} diff --git a/hfo2/src/spci_architected_message.rs b/hfo2/src/spci_architected_message.rs index da2408ca1..afbf3fb72 100644 --- a/hfo2/src/spci_architected_message.rs +++ b/hfo2/src/spci_architected_message.rs @@ -25,35 +25,20 @@ use crate::spci::*; use crate::std::*; use crate::vm::*; -/// Performs initial architected message information parsing. Calls the corresponding api functions -/// implementing the functionality requested in the architected message. -pub fn spci_msg_handle_architected_message( +/// Check if the message length and the number of memory region constituents match, if the check is +/// correct call the memory sharing routine. +fn spci_validate_call_share_memory( to_inner: &mut VmInner, from_inner: &mut VmInner, - architected_message_replica: &SpciArchitectedMessageHeader, - from_msg_replica: &SpciMessage, - to_msg: &mut SpciMessage, + memory_region: &SpciMemoryRegion, + memory_share_size: usize, + memory_to_attributes: Mode, + share: SpciMemoryShare, fallback: &MPool, ) -> SpciReturn { - let from_msg_payload_length = from_msg_replica.length as usize; - - let message_type = architected_message_replica.r#type; - if message_type != SpciMemoryShare::Donate { - dlog!("Invalid memory sharing message."); - return SpciReturn::InvalidParameters; - } - - #[allow(clippy::cast_ptr_alignment)] - let memory_region = - unsafe { &*(architected_message_replica.payload.as_ptr() as *const SpciMemoryRegion) }; - let memory_share_size = - from_msg_payload_length - mem::size_of::(); - // TODO: Add memory attributes. - let to_mode = Mode::R | Mode::W | Mode::X; + let max_count = memory_region.count as usize; - // Check if the message length and the number of memory region constituents match. // Ensure the number of constituents are within the memory bounds. - let max_count = memory_region.count as usize; if memory_share_size != mem::size_of::() + mem::size_of::() * max_count @@ -61,15 +46,100 @@ pub fn spci_msg_handle_architected_message( return SpciReturn::InvalidParameters; } - // Call the memory sharing routine. - let ret = spci_share_memory( + spci_share_memory( to_inner, from_inner, memory_region, - to_mode, - message_type, + memory_to_attributes, + share, fallback, - ); + ) +} + +/// Performs initial architected message information parsing. Calls the corresponding api functions +/// implementing the functionality requested in the architected message. +pub fn spci_msg_handle_architected_message( + to_inner: &mut VmInner, + from_inner: &mut VmInner, + architected_message_replica: &SpciArchitectedMessageHeader, + from_msg_replica: &SpciMessage, + to_msg: &mut SpciMessage, + fallback: &MPool, +) -> SpciReturn { + let from_msg_payload_length = from_msg_replica.length as usize; + + let message_type = architected_message_replica.r#type; + let ret = match message_type { + SpciMemoryShare::Donate => { + #[allow(clippy::cast_ptr_alignment)] + let memory_region = unsafe { + &*(architected_message_replica.payload.as_ptr() as *const SpciMemoryRegion) + }; + let memory_share_size = + from_msg_payload_length - mem::size_of::(); + + // TODO: Add memory attributes. + let to_mode = Mode::R | Mode::W | Mode::X; + + spci_validate_call_share_memory( + to_inner, + from_inner, + memory_region, + memory_share_size, + to_mode, + message_type, + fallback, + ) + } + SpciMemoryShare::Relinquish => { + #[allow(clippy::cast_ptr_alignment)] + let memory_region = unsafe { + &*(architected_message_replica.payload.as_ptr() as *const SpciMemoryRegion) + }; + let memory_share_size = + from_msg_payload_length - mem::size_of::(); + + let to_mode = Mode::R | Mode::W | Mode::X; + + spci_validate_call_share_memory( + to_inner, + from_inner, + memory_region, + memory_share_size, + to_mode, + message_type, + fallback, + ) + } + SpciMemoryShare::Lend => { + // TODO: Add support for lend exclusive. + #[allow(clippy::cast_ptr_alignment)] + let lend_descriptor = unsafe { + &*(architected_message_replica.payload.as_ptr() as *const SpciMemoryLend) + }; + + let borrower_attributes = lend_descriptor.borrower_attributes; + + let memory_region = + unsafe { &*(lend_descriptor.payload.as_ptr() as *const SpciMemoryRegion) }; + + let memory_share_size = from_msg_payload_length + - mem::size_of::() + - mem::size_of::(); + + let to_mode = spci_memory_attrs_to_mode(borrower_attributes as _); + + spci_validate_call_share_memory( + to_inner, + from_inner, + memory_region, + memory_share_size, + to_mode, + message_type, + fallback, + ) + } + }; // Copy data to the destination Rx. // @@ -96,16 +166,12 @@ pub fn spci_msg_handle_architected_message( } /// Obtain the next mode to apply to the two VMs. -/// -/// # Returns -/// -/// The error code -1 indicates that a state transition was not found. Success is indicated by 0. fn spci_msg_get_next_state( transitions: &[SpciMemTransitions], memory_to_attributes: Mode, orig_from_mode: Mode, orig_to_mode: Mode, -) -> Result<(Mode, Mode, Mode), ()> { +) -> Result<(Mode, Mode), ()> { let state_mask = Mode::INVALID | Mode::UNOWNED | Mode::SHARED; let orig_from_state = orig_from_mode & state_mask; let orig_to_state = orig_to_mode & state_mask; @@ -116,9 +182,7 @@ fn spci_msg_get_next_state( if orig_from_state == table_orig_from_mode && orig_to_state == table_orig_to_mode { return Ok(( - orig_from_mode, - // TODO: Change access permission assignment to cater for the lend case. - transition.from_mode, + transition.from_mode | (!state_mask & orig_from_mode), transition.to_mode | memory_to_attributes, )); } @@ -182,6 +246,45 @@ pub fn spci_msg_check_transition( }, ]; + let relinquish_transitions: [SpciMemTransitions; 2] = [ + // 1) {!O-EA, O-NA} -> {!O-NA, O-EA} + SpciMemTransitions { + orig_from_mode: Mode::UNOWNED, + orig_to_mode: Mode::INVALID, + from_mode: Mode::INVALID | Mode::UNOWNED | Mode::SHARED, + to_mode: Mode::empty(), + }, + // 2) {!O-SA, O-SA} -> {!O-NA, O-EA} + SpciMemTransitions { + orig_from_mode: Mode::UNOWNED | Mode::SHARED, + orig_to_mode: Mode::SHARED, + from_mode: Mode::INVALID | Mode::UNOWNED | Mode::SHARED, + to_mode: Mode::empty(), + }, + ]; + + // This data structure holds the allowed state transitions for the "lend with shared access" + // state machine. In this state machine the owner keeps the lent pages mapped on its stage2 + // table and keeps access as well. + let shared_lend_transitions: [SpciMemTransitions; 2] = [ + // 1) {O-EA, !O-NA} -> {O-SA, !O-SA} + SpciMemTransitions { + orig_from_mode: Mode::empty(), + orig_to_mode: Mode::INVALID | Mode::UNOWNED | Mode::SHARED, + from_mode: Mode::SHARED, + to_mode: Mode::UNOWNED | Mode::SHARED, + }, + // Duplicate of 1) in order to cater for an alternative representation of !O-NA: + // (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED) are both alternative representations + // of !O-NA. + SpciMemTransitions { + orig_from_mode: Mode::empty(), + orig_to_mode: Mode::INVALID | Mode::UNOWNED, + from_mode: Mode::SHARED, + to_mode: Mode::UNOWNED | Mode::SHARED, + }, + ]; + // Fail if addresses are not page-aligned. if !is_aligned(ipa_addr(begin), PAGE_SIZE) || !is_aligned(ipa_addr(end), PAGE_SIZE) { return Err(()); @@ -191,16 +294,20 @@ pub fn spci_msg_check_transition( let orig_from_mode = from_inner.ptable.get_mode(begin, end)?; let orig_to_mode = to_inner.ptable.get_mode(begin, end)?; - if share != SpciMemoryShare::Donate { - return Err(()); - } + let mem_transition_table: &[SpciMemTransitions] = match share { + SpciMemoryShare::Donate => &donate_transitions, + SpciMemoryShare::Relinquish => &relinquish_transitions, + SpciMemoryShare::Lend => &shared_lend_transitions, + }; - spci_msg_get_next_state( - &donate_transitions, + let (from_mode, to_mode) = spci_msg_get_next_state( + &mem_transition_table, memory_to_attributes, orig_from_mode, orig_to_mode, - ) + )?; + + Ok((orig_from_mode, from_mode, to_mode)) } /// Shares memory from the calling VM with another. The memory can be shared in different modes. diff --git a/hfo2/src/std.rs b/hfo2/src/std.rs index cf62499d8..ad593604e 100644 --- a/hfo2/src/std.rs +++ b/hfo2/src/std.rs @@ -14,7 +14,6 @@ * limitations under the License. */ -use core::mem; use core::ptr; use crate::types::*; @@ -25,11 +24,30 @@ pub fn is_aligned(v: usize, a: usize) -> bool { (v & (a - 1)) == 0 } +/// As per the C11 specification, mem*_s() operations fill the destination buffer if runtime +/// constraint validation fails, assuming that `dest` and `destsz` are both valid. +#[track_caller] +unsafe fn check_or_fill(cond: bool, dest: *const c_void, destsz: size_t, ch: i32, condmsg: &str) { + if !cond { + if !dest.is_null() && destsz <= RSIZE_MAX { + memset_s(dest, destsz, ch, destsz); + } + panic!("failed: {}", condmsg); + } +} + +#[track_caller] +unsafe fn check_or_fill_zero(cond: bool, dest: *const c_void, destsz: size_t, condmsg: &str) { + check_or_fill(cond, dest, destsz, 0, condmsg) +} + #[no_mangle] pub unsafe extern "C" fn memset_s(dest: *const c_void, destsz: size_t, ch: c_int, count: size_t) { - if dest.is_null() || destsz > RSIZE_MAX || count > RSIZE_MAX || count > destsz { - panic!("memset_s failure"); - } + check_or_fill(!dest.is_null(), dest, destsz, ch, "!dest.is_null()"); + + // Check count <= destsz <= RSIZE_MAX. + check_or_fill(destsz <= RSIZE_MAX, dest, destsz, ch, "destsz <= RSIZE_MAX"); + check_or_fill(count <= destsz, dest, destsz, ch, "count <= destsz"); ptr::write_bytes(dest as *mut u8, ch as u8, count); } @@ -44,18 +62,29 @@ pub unsafe extern "C" fn memcpy_s( let d = dest as usize; let s = src as usize; - if dest.is_null() || src.is_null() - || destsz > RSIZE_MAX || count > RSIZE_MAX - || count > destsz - // Destination overlaps the end of source. - || (d > s && d < s + count) - // Source overlaps the end of destination. - || (s > d && s < d + destsz) - { - panic!("memcpy_s failure"); - } - - // TODO: consider wrapping? + check_or_fill_zero(!dest.is_null(), dest, destsz, "!dest.is_null()"); + check_or_fill_zero(!src.is_null(), dest, destsz, "!src.is_null()"); + + // Check count <= destsz <= RSIZE_MAX. + check_or_fill_zero(destsz <= RSIZE_MAX, dest, destsz, "destsz <= RSIZE_MAX"); + check_or_fill_zero(count <= destsz, dest, destsz, "count <= destsz"); + + // Buffer overlap test. + // case a) `d < s` impiles `s >= d+count` + // case b) `d > s` impiles `d >= s+count` + check_or_fill_zero(d != s, dest, destsz, "d != s"); + check_or_fill_zero( + d < s || d >= (s + count), + dest, + destsz, + "d < s || d >= (s + count)", + ); + check_or_fill_zero( + d > s || s >= (d + count), + dest, + destsz, + "d > s || s >= (d + count)", + ); ptr::copy_nonoverlapping(src as *const u8, dest as *mut u8, count); } @@ -67,27 +96,34 @@ pub unsafe extern "C" fn memmove_s( src: *const c_void, count: size_t, ) { - if dest.is_null() || src.is_null() || destsz > RSIZE_MAX || count > RSIZE_MAX || count > destsz - { - panic!("memmove_s failure"); - } + check_or_fill_zero(!dest.is_null(), dest, destsz, "!dest.is_null()"); + check_or_fill_zero(!src.is_null(), dest, destsz, "!src.is_null()"); + + // Check count <= destsz <= RSIZE_MAX. + check_or_fill_zero(destsz <= RSIZE_MAX, dest, destsz, "destsz <= RSIZE_MAX"); + check_or_fill_zero(count <= destsz, dest, destsz, "count <= destsz"); ptr::copy(src as *const u8, dest as *mut u8, count); } +/// Returns the length of the null-terminated byte string `str`, examining at most `strsz` bytes. +/// +/// If `str` is a NULL pointer, it returns zero. +/// If a NULL character is not found, it returns `strsz`. #[no_mangle] -pub unsafe extern "C" fn strnlen_s(str: *const c_char, mut strsz: size_t) -> size_t { +pub unsafe extern "C" fn strnlen_s(str: *const c_char, strsz: size_t) -> size_t { if str.is_null() { return 0; } - let mut p = str; - while strsz > 0 && *p != 0 { - strsz -= 1; - p = p.add(1); + for i in 0..strsz { + if *str.add(i) == b'\0' { + return i; + } } - ((p as usize) - (str as usize)) / mem::size_of::() + // NULL character not found. + strsz } pub(crate) unsafe fn memcmp_rs(a: *const c_void, b: *const c_void, mut n: size_t) -> c_int { diff --git a/hfo2/src/types.rs b/hfo2/src/types.rs index d7db3dea0..f667554a5 100644 --- a/hfo2/src/types.rs +++ b/hfo2/src/types.rs @@ -73,8 +73,15 @@ pub const MAX_VMS: usize = 6; #[cfg(target_arch = "aarch64")] pub const MAX_VMS: usize = 16; +/// An offset to use when assigning VM IDs. +/// The offset is needed because VM ID 0 is reserved. +pub const HF_VM_ID_OFFSET: spci_vm_id_t = 1; + /// The ID of the primary VM which is responsible for scheduling. -pub const HF_PRIMARY_VM_ID: spci_vm_id_t = 0; +/// +/// Starts at the offset because ID 0 is reserved for the hypervisor itself. +/// All other VM IDs come after the primary. +pub const HF_PRIMARY_VM_ID: spci_vm_id_t = HF_VM_ID_OFFSET; /// The amount of data that can be sent to a mailbox. pub const HF_MAILBOX_SIZE: usize = PAGE_SIZE; diff --git a/hfo2/src/utils.rs b/hfo2/src/utils.rs index bbc7b257d..8dae5ce03 100644 --- a/hfo2/src/utils.rs +++ b/hfo2/src/utils.rs @@ -42,6 +42,17 @@ pub fn spin_loop() -> ! { } } +pub fn as_asciz(bytes: &[u8]) -> &[u8] { + bytes + .split_at( + bytes + .iter() + .position(|&c| c == b'\0') + .unwrap_or(bytes.len()), + ) + .0 +} + #[inline] pub fn div_ceil(a: usize, b: usize) -> usize { (a + b - 1) / b diff --git a/hfo2/src/vm.rs b/hfo2/src/vm.rs index 3d6072554..01a87e974 100644 --- a/hfo2/src/vm.rs +++ b/hfo2/src/vm.rs @@ -457,13 +457,17 @@ impl VmInner { } pub fn debug_log(&mut self, id: spci_vm_id_t, c: c_char) { - if c == '\n' as u32 as u8 || c == '\0' as u32 as u8 || self.log_buffer.is_full() { - // flush the buffer. + let flush = if c == b'\n' || c == b'\0' { + true + } else { + self.log_buffer.push(c); + self.log_buffer.is_full() + }; + + if flush { let log = str::from_utf8(&self.log_buffer).unwrap_or("non-UTF8 bytes"); dlog!("VM {}: {}\n", id, log); self.log_buffer.clear(); - } else { - self.log_buffer.push(c); } } } @@ -547,31 +551,39 @@ impl VmManager { return None; } - let id = self.vms.len(); - let vm = unsafe { self.vms.get_unchecked_mut(id) }; + let idx = self.vms.len(); + + // Generate IDs based on an offset, as low IDs e.g., 0, are reserved + let id = idx + HF_VM_ID_OFFSET as usize; + let vm = unsafe { self.vms.get_unchecked_mut(idx) }; vm.init(id as u16, vcpu_count, ppool).ok()?; unsafe { - self.vms.set_len(id + 1); + self.vms.set_len(idx + 1); } - Some(&mut self.vms[id]) + Some(&mut self.vms[idx]) + } + + fn get_vm_index(vm_id: spci_vm_id_t) -> usize { + assert!(vm_id >= HF_VM_ID_OFFSET); + (vm_id - HF_VM_ID_OFFSET) as _ } pub fn get(&self, id: spci_vm_id_t) -> Option<&Vm> { - self.vms.get(id as usize) + self.vms.get(Self::get_vm_index(id)) } pub fn get_mut(&mut self, id: spci_vm_id_t) -> Option<&mut Vm> { - self.vms.get_mut(id as usize) + self.vms.get_mut(Self::get_vm_index(id)) } pub fn get_primary(&self) -> &Vm { // # Safety // // Primary VM always exists. - unsafe { self.vms.get_unchecked(HF_PRIMARY_VM_ID as usize) } + unsafe { self.vms.get_unchecked(Self::get_vm_index(HF_PRIMARY_VM_ID)) } } pub fn len(&self) -> spci_vm_count_t { diff --git a/inc/hf/arch/mm.h b/inc/hf/arch/mm.h index d43f4b920..d470d084a 100644 --- a/inc/hf/arch/mm.h +++ b/inc/hf/arch/mm.h @@ -112,10 +112,11 @@ void arch_mm_invalidate_stage1_range(vaddr_t va_begin, vaddr_t va_end); void arch_mm_invalidate_stage2_range(ipaddr_t va_begin, ipaddr_t va_end); /** - * Writes the given range of virtual memory back to the point of unification so - * all cores and devices will see the updated values. + * Writes back the given range of virtual memory to such a point that all cores + * and devices will see the updated values. The corresponding cache lines are + * also invalidated. */ -void arch_mm_write_back_dcache(void *base, size_t size); +void arch_mm_flush_dcache(void *base, size_t size); /** * Gets the maximum level allowed in the page table for stage-1. @@ -158,6 +159,11 @@ uint64_t arch_mm_mode_to_stage2_attrs(int mode); int arch_mm_stage2_attrs_to_mode(uint64_t attrs); /** - * Initializes the arch specific memory management state. + * Initializes the arch specific memory management. */ -bool arch_mm_init(paddr_t table, bool first); +bool arch_mm_init(void); + +/** + * Enables the current CPU with arch specific memory management state. + */ +void arch_mm_enable(paddr_t table); diff --git a/inc/hf/boot_params.h b/inc/hf/boot_params.h index ee4677760..59b62d508 100644 --- a/inc/hf/boot_params.h +++ b/inc/hf/boot_params.h @@ -20,6 +20,7 @@ #include "hf/arch/cpu.h" +#include "hf/fdt.h" #include "hf/mm.h" #include "hf/mpool.h" @@ -47,7 +48,6 @@ struct boot_params_update { paddr_t initrd_end; }; -bool plat_get_boot_params(struct mm_stage1_locked stage1_locked, - struct boot_params *p, struct mpool *ppool); -bool plat_update_boot_params(struct mm_stage1_locked stage1_locked, - struct boot_params_update *p, struct mpool *ppool); +bool boot_params_init(struct boot_params *p, const struct fdt_node *fdt_root); +bool boot_params_patch_fdt(struct mm_stage1_locked stage1_locked, + struct boot_params_update *p, struct mpool *ppool); diff --git a/inc/hf/cpu.h b/inc/hf/cpu.h index dcfb8f155..9084856a7 100644 --- a/inc/hf/cpu.h +++ b/inc/hf/cpu.h @@ -110,7 +110,7 @@ struct arch_regs *vcpu_get_regs(struct vcpu *vcpu); const struct arch_regs *vcpu_get_regs_const(const struct vcpu *vcpu); struct vm *vcpu_get_vm(struct vcpu *vcpu); struct cpu *vcpu_get_cpu(struct vcpu *vcpu); -struct interrupts *vcpu_get_interrupts(struct vcpu *vcpu); +bool vcpu_is_interrupted(struct vcpu *vcpu); bool vcpu_is_off(struct vcpu_execution_locked vcpu); bool vcpu_secondary_reset_and_start(struct vcpu *vcpu, ipaddr_t entry, uintreg_t arg); diff --git a/inc/hf/dlog.h b/inc/hf/dlog.h index 01dddd990..f8c0080de 100644 --- a/inc/hf/dlog.h +++ b/inc/hf/dlog.h @@ -17,8 +17,14 @@ #pragma once #include +#include -#include "vm.h" +#include "hf/spci.h" + +#define DLOG_BUFFER_SIZE 8192 + +extern size_t dlog_buffer_offset; +extern char dlog_buffer[]; #if DEBUG void dlog_enable_lock(void); @@ -30,4 +36,4 @@ void vdlog(const char *fmt, va_list args); #define vdlog(fmt, args) #endif -void dlog_flush_vm_buffer(struct vm_locked vm); +void dlog_flush_vm_buffer(spci_vm_id_t id, char buffer[], size_t length); diff --git a/inc/hf/fdt_handler.h b/inc/hf/fdt_handler.h index 8adee755b..7c54061a0 100644 --- a/inc/hf/fdt_handler.h +++ b/inc/hf/fdt_handler.h @@ -26,10 +26,10 @@ struct fdt_header *fdt_map(struct mm_stage1_locked stage1_locked, struct mpool *ppool); bool fdt_unmap(struct mm_stage1_locked stage1_locked, struct fdt_header *fdt, struct mpool *ppool); -void fdt_find_cpus(const struct fdt_node *root, cpu_id_t *cpu_ids, +bool fdt_find_cpus(const struct fdt_node *root, cpu_id_t *cpu_ids, size_t *cpu_count); -void fdt_find_memory_ranges(const struct fdt_node *root, struct boot_params *p); -bool fdt_find_initrd(struct fdt_node *n, paddr_t *begin, paddr_t *end); +bool fdt_find_memory_ranges(const struct fdt_node *root, struct boot_params *p); +bool fdt_find_initrd(const struct fdt_node *root, paddr_t *begin, paddr_t *end); /** Apply an update to the FDT. */ bool fdt_patch(struct mm_stage1_locked stage1_locked, paddr_t fdt_addr, diff --git a/inc/hf/io.h b/inc/hf/io.h index fbafaf8b5..f4a80a43c 100644 --- a/inc/hf/io.h +++ b/inc/hf/io.h @@ -133,7 +133,7 @@ static inline uint8_t io_read8_mb(io8_t io) { uint8_t v = io_read8(io); - dsb(); + data_sync_barrier(); return v; } @@ -141,7 +141,7 @@ static inline uint16_t io_read16_mb(io16_t io) { uint16_t v = io_read16(io); - dsb(); + data_sync_barrier(); return v; } @@ -149,7 +149,7 @@ static inline uint32_t io_read32_mb(io32_t io) { uint32_t v = io_read32(io); - dsb(); + data_sync_barrier(); return v; } @@ -157,7 +157,7 @@ static inline uint64_t io_read64_mb(io64_t io) { uint64_t v = io_read64(io); - dsb(); + data_sync_barrier(); return v; } @@ -165,7 +165,7 @@ static inline uint8_t io_read8_array_mb(io8_array_t io, size_t n) { uint8_t v = io_read8_array(io, n); - dsb(); + data_sync_barrier(); return v; } @@ -173,7 +173,7 @@ static inline uint16_t io_read16_array_mb(io16_array_t io, size_t n) { uint16_t v = io_read16_array(io, n); - dsb(); + data_sync_barrier(); return v; } @@ -181,7 +181,7 @@ static inline uint32_t io_read32_array_mb(io32_array_t io, size_t n) { uint32_t v = io_read32_array(io, n); - dsb(); + data_sync_barrier(); return v; } @@ -189,7 +189,7 @@ static inline uint64_t io_read64_array_mb(io64_array_t io, size_t n) { uint64_t v = io_read64_array(io, n); - dsb(); + data_sync_barrier(); return v; } @@ -247,48 +247,48 @@ static inline void io_write64_array(io64_array_t io, size_t n, uint64_t v) static inline void io_write8_mb(io8_t io, uint8_t v) { - dsb(); + data_sync_barrier(); io_write8(io, v); } static inline void io_write16_mb(io16_t io, uint16_t v) { - dsb(); + data_sync_barrier(); io_write16(io, v); } static inline void io_write32_mb(io32_t io, uint32_t v) { - dsb(); + data_sync_barrier(); io_write32(io, v); } static inline void io_write64_mb(io64_t io, uint64_t v) { - dsb(); + data_sync_barrier(); io_write64(io, v); } static inline void io_write8_array_mb(io8_array_t io, size_t n, uint8_t v) { - dsb(); + data_sync_barrier(); io_write8_array(io, n, v); } static inline void io_write16_array_mb(io16_array_t io, size_t n, uint16_t v) { - dsb(); + data_sync_barrier(); io_write16_array(io, n, v); } static inline void io_write32_array_mb(io32_array_t io, size_t n, uint32_t v) { - dsb(); + data_sync_barrier(); io_write32_array(io, n, v); } static inline void io_write64_array_mb(io64_array_t io, size_t n, uint64_t v) { - dsb(); + data_sync_barrier(); io_write64_array(io, n, v); } diff --git a/inc/hf/mm.h b/inc/hf/mm.h index 180f603c4..dfde6b2de 100644 --- a/inc/hf/mm.h +++ b/inc/hf/mm.h @@ -32,7 +32,6 @@ #define PAGE_SIZE (1 << PAGE_BITS) #define MM_PTE_PER_PAGE (PAGE_SIZE / sizeof(pte_t)) - /* The following are arch-independent page mapping modes. */ #define MM_MODE_R 0x0001 /* read */ #define MM_MODE_W 0x0002 /* write */ @@ -71,6 +70,10 @@ #define MM_MODE_UNOWNED 0x0020 #define MM_MODE_SHARED 0x0040 +#define MM_FLAG_COMMIT 0x01 +#define MM_FLAG_UNMAP 0x02 +#define MM_FLAG_STAGE1 0x04 + /* clang-format on */ struct mm_page_table { @@ -86,11 +89,17 @@ struct mm_ptable { paddr_t root; }; +/** The type of addresses stored in the page table. */ +typedef uintvaddr_t ptable_addr_t; + /** Represents the currently locked stage-1 page table of the hypervisor. */ struct mm_stage1_locked { struct mm_ptable *ptable; }; +bool mm_ptable_init(struct mm_ptable *t, int flags, struct mpool *ppool); +ptable_addr_t mm_ptable_addr_space_end(int flags); + bool mm_vm_init(struct mm_ptable *t, struct mpool *ppool); void mm_vm_fini(struct mm_ptable *t, struct mpool *ppool); bool mm_vm_identity_map(struct mm_ptable *t, paddr_t begin, paddr_t end, @@ -106,6 +115,8 @@ struct mm_stage1_locked mm_lock_stage1(void); void mm_unlock_stage1(struct mm_stage1_locked *lock); void *mm_identity_map(struct mm_stage1_locked stage1_locked, paddr_t begin, paddr_t end, int mode, struct mpool *ppool); +void *mm_identity_map_nolock(struct mm_stage1_locked stage1_locked, paddr_t begin, + paddr_t end, int mode, struct mpool *ppool); void mm_defrag(struct mm_stage1_locked stage1_locked, struct mpool *ppool); bool mm_init(struct mpool *ppool); diff --git a/inc/hf/plat/boot_flow.h b/inc/hf/plat/boot_flow.h new file mode 100644 index 000000000..daaf5a684 --- /dev/null +++ b/inc/hf/plat/boot_flow.h @@ -0,0 +1,25 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "hf/addr.h" +#include "hf/fdt.h" + +paddr_t plat_get_fdt_addr(void); +uintreg_t plat_get_kernel_arg(void); +bool plat_get_initrd_range(const struct fdt_node *fdt_root, paddr_t *begin, + paddr_t *end); diff --git a/inc/hf/std.h b/inc/hf/std.h index be3b168bc..9c6b49420 100644 --- a/inc/hf/std.h +++ b/inc/hf/std.h @@ -31,11 +31,13 @@ typedef size_t rsize_t; * Only the safer versions of these functions are exposed to reduce the chance * of misusing the versions without bounds checking or null pointer checks. * - * These functions don't return errno_t as per the specification and implicity + * These functions don't return errno_t as per the specification and implicitly * have a constraint handler that panics. */ void memset_s(void *dest, rsize_t destsz, int ch, rsize_t count); void memcpy_s(void *dest, rsize_t destsz, const void *src, rsize_t count); void memmove_s(void *dest, rsize_t destsz, const void *src, rsize_t count); +void *memchr(const void *ptr, int ch, size_t count); + size_t strnlen_s(const char *str, size_t strsz); diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h index d04b91448..67c8e352c 100644 --- a/inc/vmapi/hf/call.h +++ b/inc/vmapi/hf/call.h @@ -46,7 +46,7 @@ * This function must be implemented to trigger the architecture specific * mechanism to call to the hypervisor. */ -int64_t hf_call(size_t arg0, size_t arg1, size_t arg2, size_t arg3); +int64_t hf_call(uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3); /** * Returns the VM's own ID. diff --git a/inc/vmapi/hf/spci.h b/inc/vmapi/hf/spci.h index cad5e356a..98d4155da 100644 --- a/inc/vmapi/hf/spci.h +++ b/inc/vmapi/hf/spci.h @@ -48,6 +48,8 @@ /* Architected memory sharing message IDs. */ enum spci_memory_share { + SPCI_MEMORY_LEND = 0x0, + SPCI_MEMORY_RELINQUISH = 0x1, SPCI_MEMORY_DONATE = 0x2, }; @@ -65,9 +67,93 @@ enum spci_memory_share { /* The maximum length possible for a single message. */ #define SPCI_MSG_PAYLOAD_MAX (HF_MAILBOX_SIZE - sizeof(struct spci_message)) +#define spci_get_lend_descriptor(message)\ + ((struct spci_memory_lend *)(((uint8_t *) message)\ + + sizeof(struct spci_message)\ + + sizeof(struct spci_architected_message_header))) + +enum spci_lend_access { + SPCI_LEND_RO_NX, + SPCI_LEND_RO_X, + SPCI_LEND_RW_NX, + SPCI_LEND_RW_X, +}; + +enum spci_lend_type { + SPCI_LEND_NORMAL_MEM, + SPCI_LEND_DEV_NGNRNE, + SPCI_LEND_DEV_NGNRE, + SPCI_LEND_DEV_NGRE, + SPCI_LEND_DEV_GRE, +}; + +enum spci_lend_cacheability { + SPCI_LEND_CACHE_NON_CACHEABLE, + SPCI_LEND_CACHE_WRITE_THROUGH, + SPCI_LEND_CACHE_WRITE_BACK, +}; + +enum spci_lend_shareability { + SPCI_LEND_SHARE_NON_SHAREABLE, + SPCI_LEND_RESERVED, + SPCI_LEND_OUTER_SHAREABLE, + SPCI_LEND_INNER_SHAREABLE, +}; + +#define SPCI_LEND_ACCESS_OFFSET (0x7U) +#define SPCI_LEND_ACCESS_MASK ((0x3U) << SPCI_LEND_ACCESS_OFFSET) + +#define SPCI_LEND_TYPE_OFFSET (0x4U) +#define SPCI_LEND_TYPE_MASK ((0x7U) << SPCI_LEND_TYPE_OFFSET) + +#define SPCI_LEND_CACHEABILITY_OFFSET (0x2U) +#define SPCI_LEND_CACHEABILITY_MASK ((0x3U) <<\ + SPCI_LEND_CACHEABILITY_OFFSET) + +#define SPCI_LEND_SHAREABILITY_OFFSET (0x0U) +#define SPCI_LEND_SHAREABILITY_MASK ((0x3U) <<\ + SPCI_LEND_SHAREABILITY_OFFSET) + +#define LEND_ATTR_FUNCTION_SET(name, offset, mask) \ +static inline void spci_set_lend_##name##_attr(uint16_t *lend_attr,\ + const enum spci_lend_##name perm)\ +{\ + *lend_attr = (*lend_attr & ~(mask)) | ((perm << offset) & mask);\ +} + +#define LEND_ATTR_FUNCTION_GET(name, offset, mask) \ +static inline enum spci_lend_##name spci_get_lend_##name##_attr(\ + uint16_t lend_attr)\ +{\ + return (enum spci_lend_##name)((lend_attr & mask) >> offset);\ +} + +LEND_ATTR_FUNCTION_SET(access, SPCI_LEND_ACCESS_OFFSET, SPCI_LEND_ACCESS_MASK) +LEND_ATTR_FUNCTION_GET(access, SPCI_LEND_ACCESS_OFFSET, SPCI_LEND_ACCESS_MASK) + +LEND_ATTR_FUNCTION_SET(type, SPCI_LEND_TYPE_OFFSET, SPCI_LEND_TYPE_MASK) +LEND_ATTR_FUNCTION_GET(type, SPCI_LEND_TYPE_OFFSET, SPCI_LEND_TYPE_MASK) + +LEND_ATTR_FUNCTION_SET(cacheability, SPCI_LEND_CACHEABILITY_OFFSET, + SPCI_LEND_CACHEABILITY_MASK) + +LEND_ATTR_FUNCTION_GET(cacheability, SPCI_LEND_CACHEABILITY_OFFSET, + SPCI_LEND_CACHEABILITY_MASK) + +LEND_ATTR_FUNCTION_SET(shareability, SPCI_LEND_SHAREABILITY_OFFSET, + SPCI_LEND_SHAREABILITY_MASK) + +LEND_ATTR_FUNCTION_GET(shareability, SPCI_LEND_SHAREABILITY_OFFSET, + SPCI_LEND_SHAREABILITY_MASK) + +enum spci_lend_flags { + SPCI_LEND_KEEP_MAPPED = 0x0, + SPCI_LEND_UNMAP = 0x1 +}; + /* clang-format on */ -/** The ID of a VM. These are assigned sequentially. */ +/** The ID of a VM. These are assigned sequentially starting with an offset. */ typedef uint16_t spci_vm_id_t; typedef uint32_t spci_memory_handle_t; @@ -153,6 +239,15 @@ struct spci_memory_region { struct spci_memory_region_constituent constituents[]; }; +struct spci_memory_lend { + uint16_t flags; + uint16_t borrower_attributes; + + uint32_t reserved; + + uint8_t payload[]; +}; + /* TODO: Move all the functions below this line to a support library. */ /** * Fill all the fields, except for the flags, in the SPCI message common header. @@ -237,17 +332,16 @@ static inline struct spci_memory_region *spci_get_donated_memory_region( } /** - * Add a memory region to the current message. - * A memory region is composed of one or more constituents. + * Helper function that copies the memory constituents and the handle + * information onto the address pointed to by memory_region. + * The function returns the length in bytes occupied by the data copied to + * memory_region (constituents and memory region header size). */ -static inline void spci_memory_region_add( - struct spci_message *message, spci_memory_handle_t handle, +static inline uint32_t spci_memory_region_add( + struct spci_memory_region *memory_region, spci_memory_handle_t handle, const struct spci_memory_region_constituent constituents[], uint32_t num_constituents) { - struct spci_memory_region *memory_region = - spci_get_donated_memory_region(message); - uint32_t constituents_length = num_constituents * sizeof(struct spci_memory_region_constituent); @@ -265,8 +359,8 @@ static inline void spci_memory_region_add( * TODO: Add assert ensuring that the specified message * length is not greater than SPCI_MSG_PAYLOAD_MAX. */ - message->length += - sizeof(struct spci_memory_region) + constituents_length; + + return sizeof(struct spci_memory_region) + constituents_length; } /** Construct the SPCI donate memory region message. */ @@ -277,6 +371,8 @@ static inline void spci_memory_donate( uint32_t num_elements, uint32_t handle) { int32_t message_length; + struct spci_memory_region *memory_region = + spci_get_donated_memory_region(message); message_length = sizeof(struct spci_architected_message_header); @@ -285,6 +381,77 @@ static inline void spci_memory_donate( source_vm_id, SPCI_MEMORY_DONATE); /* Create single memory region. */ - spci_memory_region_add(message, handle, region_constituents, - num_elements); + message->length += spci_memory_region_add( + memory_region, handle, region_constituents, num_elements); +} + +/** + * Construct the SPCI memory region relinquish message. + * A set of memory regions can be given back to the owner. + */ +static inline void spci_memory_relinquish( + struct spci_message *message, spci_vm_id_t target_vm_id, + spci_vm_id_t source_vm_id, + struct spci_memory_region_constituent *region_constituents, + uint64_t num_elements, uint32_t handle) +{ + int32_t message_length; + struct spci_memory_region *memory_region = + spci_get_donated_memory_region(message); + + message_length = sizeof(struct spci_architected_message_header); + + /* Fill in the details on the common message header. */ + spci_architected_message_init(message, message_length, target_vm_id, + source_vm_id, SPCI_MEMORY_RELINQUISH); + + /* Create single memory region. */ + message->length += spci_memory_region_add( + memory_region, handle, region_constituents, num_elements); +} + +/** + * Construct the SPCI memory region lend message. + */ +static inline void spci_memory_lend( + struct spci_message *message, spci_vm_id_t target_vm_id, + spci_vm_id_t source_vm_id, + struct spci_memory_region_constituent *region_constituents, + uint64_t num_elements, uint32_t handle, enum spci_lend_access access, + enum spci_lend_type type, enum spci_lend_cacheability cacheability, + enum spci_lend_shareability shareability) +{ + int32_t message_length; + struct spci_memory_region *memory_region; + + const struct spci_memory_lend lend_init = {0}; + + struct spci_memory_lend *lend_descriptor = + spci_get_lend_descriptor(message); + memory_region = (struct spci_memory_region *)lend_descriptor->payload; + + /* Initilise all struct elements to zero. */ + *lend_descriptor = lend_init; + + message_length = sizeof(struct spci_architected_message_header) + + sizeof(struct spci_memory_lend); + + /* Fill in the details on the common message header. */ + spci_architected_message_init(message, message_length, target_vm_id, + source_vm_id, SPCI_MEMORY_LEND); + + lend_descriptor->flags = SPCI_LEND_KEEP_MAPPED; + + /* Set memory region's page attributes. */ + spci_set_lend_access_attr(&lend_descriptor->borrower_attributes, + access); + spci_set_lend_type_attr(&lend_descriptor->borrower_attributes, type); + spci_set_lend_cacheability_attr(&lend_descriptor->borrower_attributes, + cacheability); + spci_set_lend_shareability_attr(&lend_descriptor->borrower_attributes, + shareability); + + /* Create single memory region. */ + message->length += spci_memory_region_add( + memory_region, handle, region_constituents, num_elements); } diff --git a/inc/hf/arch/barriers.h b/inc/vmapi/hf/transport.h similarity index 58% rename from inc/hf/arch/barriers.h rename to inc/vmapi/hf/transport.h index 1ff43e6f4..686143c4d 100644 --- a/inc/hf/arch/barriers.h +++ b/inc/vmapi/hf/transport.h @@ -1,5 +1,5 @@ /* - * Copyright 2018 The Hafnium Authors. + * Copyright 2019 The Hafnium Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,19 +17,12 @@ #pragma once /** - * Ensures all explicit memory accesses before this point are completed before - * any later memory accesses are performed. - */ -void dmb(void); - -/** - * Ensures all explicit memory access and management instructions have completed - * before continuing. - */ -void dsb(void); - -/** - * Flushes the instruction pipeline so that instructions are fetched from - * memory. + * Header for Hafnium messages + * + * NOTE: This is a work in progress. The final form of a Hafnium message header + * is likely to change. */ -void isb(void); +struct hf_msg_hdr { + uint64_t src_port; + uint64_t dst_port; +}; diff --git a/inc/vmapi/hf/types.h b/inc/vmapi/hf/types.h index 04790120e..f54d7d70a 100644 --- a/inc/vmapi/hf/types.h +++ b/inc/vmapi/hf/types.h @@ -33,10 +33,21 @@ typedef uintptr_t hf_ipaddr_t; #endif -/** The ID of the primary VM which is responsible for scheduling. */ -#define HF_PRIMARY_VM_ID 0 +/** + * An offset to use when assigning VM IDs. + * The offset is needed because VM ID 0 is reserved. + */ +#define HF_VM_ID_OFFSET 1 + +/** + * The ID of the primary VM, which is responsible for scheduling. + * + * Starts at the offset because ID 0 is reserved for the hypervisor itself. + * All other VM IDs come after the primary. + */ +#define HF_PRIMARY_VM_ID HF_VM_ID_OFFSET -/* Sleep value for an indefinite period of time. */ +/** Sleep value for an indefinite period of time. */ #define HF_SLEEP_INDEFINITE 0xffffffffffffff /** The amount of data that can be sent to a mailbox. */ diff --git a/kokoro/ubuntu/build.sh b/kokoro/ubuntu/build.sh index 42bd69393..662fe98ee 100755 --- a/kokoro/ubuntu/build.sh +++ b/kokoro/ubuntu/build.sh @@ -14,6 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +SCRIPT_NAME="$(realpath "${BASH_SOURCE[0]}")" +ROOT_DIR="$(realpath $(dirname "${SCRIPT_NAME}")/../..)" + # Fail on any error. set -e # Fail on any part of a pipeline failing. @@ -23,6 +26,20 @@ set -u # Display commands being run. set -x +# Default value of HAFNIUM_HERMETIC_BUILD is "true" for Kokoro builds. +if [ -v KOKORO_JOB_NAME -a ! -v HAFNIUM_HERMETIC_BUILD ] +then + HAFNIUM_HERMETIC_BUILD=true +fi + +# If HAFNIUM_HERMETIC_BUILD is "true" (not default), relaunch this script inside +# a container. The 'run_in_container.sh' script will set the variable value to +# 'inside' to avoid recursion. +if [ "${HAFNIUM_HERMETIC_BUILD:-}" == "true" ] +then + exec "${ROOT_DIR}/build/run_in_container.sh" ${SCRIPT_NAME} $@ +fi + USE_FVP=0 while test $# -gt 0 @@ -43,9 +60,6 @@ if [ -v KOKORO_JOB_NAME ] then # Server cd git/hafnium -else - # Local - echo "Testing kokoro build locally..." fi CLANG=${PWD}/prebuilts/linux-x64/clang/bin/clang diff --git a/prebuilts b/prebuilts index 16b7382f3..111f71d30 160000 --- a/prebuilts +++ b/prebuilts @@ -1 +1 @@ -Subproject commit 16b7382f3c536e24326e716bde183501a698a78f +Subproject commit 111f71d306f6c2fbfa075376524976305d9c93c4 diff --git a/project/reference b/project/reference index 105d51940..653261a75 160000 --- a/project/reference +++ b/project/reference @@ -1 +1 @@ -Subproject commit 105d51940c5068f7f07ccecd96885b4572589396 +Subproject commit 653261a75f4ef3f5a5335660b2d59df23b88d600 diff --git a/rust-toolchain b/rust-toolchain index 5e0fdfa1a..1a9a5fb78 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1 +1 @@ -nightly-2019-08-28 +nightly-2019-12-20 diff --git a/src/BUILD.gn b/src/BUILD.gn index f2e33e1d2..771de1357 100644 --- a/src/BUILD.gn +++ b/src/BUILD.gn @@ -17,10 +17,9 @@ import("//build/toolchain/platform.gni") # The hypervisor image. hypervisor("hafnium") { - sources = [ - "layout.c", - ] + libs = ["//hfo2/target/aarch64-hfo2/release/libhfo2.a"] deps = [ + ":layout", ":src_not_testable_yet", ] } @@ -29,12 +28,11 @@ hypervisor("hafnium") { # src targets will merge! source_set("src_not_testable_yet") { sources = [ - "plat.c", ] - libs = ["//hfo2/target/aarch64-hfo2/release/libhfo2.a"] deps = [ ":src_testable", "//project/${project}/${plat_name}", + plat_boot_flow, plat_console, ] } @@ -53,8 +51,10 @@ source_set("src_testable") { ":fdt", ":fdt_handler", ":memiter", + ":mm", ":std", "//src/arch/${plat_arch}/hypervisor", + plat_boot_flow, plat_console, ] @@ -63,6 +63,15 @@ source_set("src_testable") { } } +source_set("layout") { + sources = [ + "layout.c", + ] +} + +source_set("mm") { +} + # Standard library functions. source_set("std") { deps = [ @@ -123,7 +132,6 @@ static_library("fake_arch") { complete_static_lib = true sources = [ "layout_fake.c", - "plat.c", ] cflags = [ "-fPIC" ] deps = [ diff --git a/src/arch/aarch64/BUILD.gn b/src/arch/aarch64/BUILD.gn index 9f29b720d..f9cbc5aab 100644 --- a/src/arch/aarch64/BUILD.gn +++ b/src/arch/aarch64/BUILD.gn @@ -19,7 +19,6 @@ config("config") { # Implementation of the arch interface for aarch64. source_set("arch") { sources = [ - "barriers.c", "cpu.c", "mm.c", "timer.c", @@ -44,6 +43,5 @@ source_set("entry") { source_set("smc") { sources = [ "smc.c", - "smc_internal.S", ] } diff --git a/src/arch/aarch64/boot_flow/BUILD.gn b/src/arch/aarch64/boot_flow/BUILD.gn new file mode 100644 index 000000000..d709a9e07 --- /dev/null +++ b/src/arch/aarch64/boot_flow/BUILD.gn @@ -0,0 +1,25 @@ +# Copyright 2019 The Hafnium Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source_set("android") { + sources = [ + "android.S", + ] +} + +source_set("linux") { + sources = [ + "linux.S", + ] +} diff --git a/src/arch/aarch64/smc_internal.S b/src/arch/aarch64/boot_flow/android.S similarity index 79% rename from src/arch/aarch64/smc_internal.S rename to src/arch/aarch64/boot_flow/android.S index 9350fd190..13f0530d6 100644 --- a/src/arch/aarch64/smc_internal.S +++ b/src/arch/aarch64/boot_flow/android.S @@ -1,5 +1,5 @@ /* - * Copyright 2018 The Hafnium Authors. + * Copyright 2019 The Hafnium Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,10 +14,8 @@ * limitations under the License. */ -.section .text.smc, "ax" -.global smc32 -.global smc64_internal -smc32: -smc64_internal: - smc #0 +.section .init.plat_boot_flow_hook, "ax" +.global plat_boot_flow_hook +plat_boot_flow_hook: + /* Do nothing. */ ret diff --git a/src/arch/aarch64/boot_flow/linux.S b/src/arch/aarch64/boot_flow/linux.S new file mode 100644 index 000000000..581a65674 --- /dev/null +++ b/src/arch/aarch64/boot_flow/linux.S @@ -0,0 +1,24 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +.section .init.plat_boot_flow_hook, "ax" +.global plat_boot_flow_hook +plat_boot_flow_hook: + /* Save the FDT pointer to a global variable. */ + adrp x25, plat_fdt_addr + add x25, x25, :lo12:plat_fdt_addr + str x0, [x25] + ret diff --git a/src/arch/aarch64/cpu.c b/src/arch/aarch64/cpu.c index 5993841e9..5881dd47c 100644 --- a/src/arch/aarch64/cpu.c +++ b/src/arch/aarch64/cpu.c @@ -23,6 +23,8 @@ #include "hf/addr.h" #include "hf/std.h" +#include "hypervisor/debug_el1.h" + void arch_irq_disable(void) { __asm__ volatile("msr DAIFSet, #0xf"); @@ -37,13 +39,19 @@ static void gic_regs_reset(struct arch_regs *r, bool is_primary) { #if GIC_VERSION == 3 || GIC_VERSION == 4 uint32_t ich_hcr = 0; + uint32_t icc_sre_el2 = + (1u << 0) | /* SRE, enable ICH_* and ICC_* at EL2. */ + (0x3 << 1); /* DIB and DFB, disable IRQ/FIQ bypass. */ - if (!is_primary) { + if (is_primary) { + icc_sre_el2 |= 1u << 3; /* Enable EL1 access to ICC_SRE_EL1. */ + } else { /* Trap EL1 access to GICv3 system registers. */ ich_hcr = (0x1fu << 10); /* TDIR, TSEI, TALL1, TALL0, TC bits. */ } r->gic.ich_hcr_el2 = ich_hcr; + r->gic.icc_sre_el2 = icc_sre_el2; #endif } @@ -96,6 +104,18 @@ void arch_regs_reset(struct arch_regs *r, bool is_primary, spci_vm_id_t vm_id, r->spsr = 5 | /* M bits, set to EL1h. */ (0xf << 6); /* DAIF bits set; disable interrupts. */ + r->lazy.mdcr_el2 = get_mdcr_el2_value(vm_id); + + /* + * NOTE: It is important that MDSCR_EL1.MDE (bit 15) is set to 0 for + * secondary VMs as long as Hafnium does not support debug register + * access for secondary VMs. If adding Hafnium support for secondary VM + * debug register accesses, then on context switches Hafnium needs to + * save/restore EL1 debug register state that either might change, or + * that needs to be protected. + */ + r->lazy.mdscr_el1 = 0x0u & ~(0x1u << 15); + gic_regs_reset(r, is_primary); } diff --git a/src/arch/aarch64/hftest/BUILD.gn b/src/arch/aarch64/hftest/BUILD.gn index 4c5dace64..78d89c735 100644 --- a/src/arch/aarch64/hftest/BUILD.gn +++ b/src/arch/aarch64/hftest/BUILD.gn @@ -24,7 +24,7 @@ source_set("entry") { # Make a call to the hypervisor from a VM. source_set("hf_call") { sources = [ - "hf_call.S", + "hf_call.c", ] } @@ -79,3 +79,9 @@ source_set("console") { "//src/arch/aarch64/hftest:hf_call", ] } + +source_set("mm") { + sources = [ + "mm.c", + ] +} diff --git a/src/arch/aarch64/hftest/cpu_entry.S b/src/arch/aarch64/hftest/cpu_entry.S index 702e84b20..726b04225 100644 --- a/src/arch/aarch64/hftest/cpu_entry.S +++ b/src/arch/aarch64/hftest/cpu_entry.S @@ -14,16 +14,23 @@ * limitations under the License. */ -.global vm_cpu_entry_raw -vm_cpu_entry_raw: - /* Initialise stack from the cpu_start_state struct. */ - ldr x1, [x0] - mov sp, x1 - +.global vm_cpu_entry +vm_cpu_entry: /* Disable trapping floating point access in EL1. */ mov x1, #(0x3 << 20) msr cpacr_el1, x1 isb - /* Jump to C entry point. */ - b vm_cpu_entry + /* Initialise stack from the cpu_start_state struct. */ + ldr x1, [x0] + mov sp, x1 + + /* Load entry function pointer and its argument. */ + ldr x1, [x0, 8] + ldr x0, [x0, 16] + + /* Branch to entry function. */ + blr x1 + + /* Entry function should not return, but if it does, spin. */ + b . diff --git a/src/arch/aarch64/hftest/hf_call.c b/src/arch/aarch64/hftest/hf_call.c new file mode 100644 index 000000000..3085ff10c --- /dev/null +++ b/src/arch/aarch64/hftest/hf_call.c @@ -0,0 +1,42 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hf/call.h" +#include "hf/types.h" + +int64_t hf_call(uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3) +{ + register uint64_t r0 __asm__("x0") = arg0; + register uint64_t r1 __asm__("x1") = arg1; + register uint64_t r2 __asm__("x2") = arg2; + register uint64_t r3 __asm__("x3") = arg3; + + /* + * We currently implement SMCCC 1.0, which specifies that the callee can + * use x4–x17 as scratch registers. If we move to SMCCC 1.1 then this + * will change. + */ + __asm__ volatile( + "hvc #0" + : /* Output registers, also used as inputs ('+' constraint). */ + "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3) + : + : /* Clobber registers. */ + "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", + "x14", "x15", "x16", "x17"); + + return r0; +} diff --git a/src/arch/aarch64/hftest/interrupts_gicv3.c b/src/arch/aarch64/hftest/interrupts_gicv3.c index 80b67e252..e41854687 100644 --- a/src/arch/aarch64/hftest/interrupts_gicv3.c +++ b/src/arch/aarch64/hftest/interrupts_gicv3.c @@ -164,10 +164,10 @@ void sync_current_exception(uintreg_t esr, uintreg_t elr) { switch (esr >> 26) { case 0x25: /* EC = 100101, Data abort. */ - dlog("Data abort: pc=0x%x, esr=0x%x, ec=0x%x", elr, esr, + dlog("Data abort: pc=%#x, esr=%#x, ec=%#x", elr, esr, esr >> 26); if (!(esr & (1u << 10))) { /* Check FnV bit. */ - dlog(", far=0x%x", read_msr(far_el1)); + dlog(", far=%#x", read_msr(far_el1)); } else { dlog(", far=invalid"); } @@ -176,8 +176,8 @@ void sync_current_exception(uintreg_t esr, uintreg_t elr) break; default: - dlog("Unknown current sync exception pc=0x%x, esr=0x%x, " - "ec=0x%x\n", + dlog("Unknown current sync exception pc=%#x, esr=%#x, " + "ec=%#x\n", elr, esr, esr >> 26); } diff --git a/src/arch/aarch64/hftest/mm.c b/src/arch/aarch64/hftest/mm.c new file mode 100644 index 000000000..86f27d822 --- /dev/null +++ b/src/arch/aarch64/hftest/mm.c @@ -0,0 +1,101 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hf/mm.h" + +#include "hf/arch/barriers.h" +#include "hf/arch/vm/mm.h" + +#include "hf/dlog.h" + +#include "../msr.h" + +#define STAGE1_DEVICEINDX UINT64_C(0) +#define STAGE1_NORMALINDX UINT64_C(1) + +static uintreg_t mm_mair_el1; +static uintreg_t mm_tcr_el1; +static uintreg_t mm_sctlr_el1; + +/** + * Initialize MMU for a test running in EL1. + */ +bool arch_vm_mm_init(void) +{ + static const int pa_bits_table[16] = {32, 36, 40, 42, 44, 48}; + uint64_t features = read_msr(id_aa64mmfr0_el1); + int pa_bits = pa_bits_table[features & 0xf]; + + /* Check that 4KB granules are supported. */ + if ((features >> 28) & 0xf) { + dlog("4KB granules are not supported\n"); + return false; + } + + /* Check the physical address range. */ + if (!pa_bits) { + dlog("Unsupported value of id_aa64mmfr0_el1.PARange: %x\n", + features & 0xf); + return false; + } + + /* + * 0 -> Device-nGnRnE memory + * 0xff -> Normal memory, Inner/Outer Write-Back Non-transient, + * Write-Alloc, Read-Alloc. + */ + mm_mair_el1 = (0 << (8 * STAGE1_DEVICEINDX)) | + (0xff << (8 * STAGE1_NORMALINDX)); + + mm_tcr_el1 = (1 << 20) | /* TBI, top byte ignored. */ + ((features & 0xf) << 16) | /* PS. */ + (0 << 14) | /* TG0, granule size, 4KB. */ + (3 << 12) | /* SH0, inner shareable. */ + (1 << 10) | /* ORGN0, normal mem, WB RA WA Cacheable. */ + (1 << 8) | /* IRGN0, normal mem, WB RA WA Cacheable. */ + (25 << 0) | /* T0SZ, input address is 2^39 bytes. */ + 0; + + mm_sctlr_el1 = (1 << 0) | /* M, enable stage 1 EL2 MMU. */ + (1 << 1) | /* A, enable alignment check faults. */ + (1 << 2) | /* C, data cache enable. */ + (1 << 3) | /* SA, enable stack alignment check. */ + (3 << 4) | /* RES1 bits. */ + (1 << 11) | /* RES1 bit. */ + (1 << 12) | /* I, instruction cache enable. */ + (1 << 16) | /* RES1 bit. */ + (1 << 18) | /* RES1 bit. */ + (0 << 19) | /* WXN bit, writable execute never. */ + (3 << 22) | /* RES1 bits. */ + (3 << 28) | /* RES1 bits. */ + 0; + + return true; +} + +void arch_vm_mm_enable(paddr_t table) +{ + /* Configure translation management registers. */ + write_msr(ttbr0_el1, pa_addr(table)); + write_msr(mair_el1, mm_mair_el1); + write_msr(tcr_el1, mm_tcr_el1); + + /* Configure sctlr_el1 to enable MMU and cache. */ + dsb(sy); + isb(); + write_msr(sctlr_el1, mm_sctlr_el1); + isb(); +} diff --git a/src/arch/aarch64/hftest/power_mgmt.c b/src/arch/aarch64/hftest/power_mgmt.c index 7abe4d9b2..ab2332e07 100644 --- a/src/arch/aarch64/hftest/power_mgmt.c +++ b/src/arch/aarch64/hftest/power_mgmt.c @@ -16,7 +16,6 @@ #include "hf/arch/vm/power_mgmt.h" -#include "hf/spinlock.h" #include "hf/static_assert.h" #include "vmapi/hf/call.h" @@ -25,73 +24,33 @@ #include "smc.h" /** - * Holds temporary state used to set up the environment on which CPUs will - * start executing. + * Starts the CPU with the given ID. It will set the stack pointer according to + * the provided `state` and jump to the entry point with the given argument + * specified in it. * - * vm_cpu_entry_raw requires that the first field of cpu_start_state be the - * initial stack pointer. + * Note: The caller of this function must guarantee that the contents of `state` + * do not change until the new CPU has branched to the given entry point, and + * that it was written-back to memory (that it is not waiting in a data cache) + * because the new CPU is started with caching disabled. */ -struct cpu_start_state { - uintptr_t initial_sp; - void (*entry)(uintptr_t arg); - uintreg_t arg; - struct spinlock lock; -}; - -/** - * Releases the given cpu_start_state struct by releasing its lock, then calls - * the entry point specified by the caller of cpu_start. - */ -void vm_cpu_entry(struct cpu_start_state *s) -{ - struct cpu_start_state local = *(volatile struct cpu_start_state *)s; - - sl_unlock(&s->lock); - - local.entry(local.arg); - - /* Turn off CPU if the entry point ever returns. */ - cpu_stop(); -} - -/** - * Starts the CPU with the given ID. It will start at the provided entry point - * with the provided argument. - */ -bool cpu_start(uintptr_t id, void *stack, size_t stack_size, - void (*entry)(uintptr_t arg), uintptr_t arg) +bool arch_cpu_start(uintptr_t id, struct arch_cpu_start_state *state) { - void vm_cpu_entry_raw(uintptr_t arg); - struct cpu_start_state s; - - /* Initialise the temporary state we'll hold on the stack. */ - sl_init(&s.lock); - sl_lock(&s.lock); - s.initial_sp = (uintptr_t)stack + stack_size; - s.entry = entry; - s.arg = arg; + void vm_cpu_entry(uintptr_t arg); + smc_res_t smc_res; /* Try to start the CPU. */ - if (smc32(PSCI_CPU_ON, id, (size_t)&vm_cpu_entry_raw, (size_t)&s) != - PSCI_RETURN_SUCCESS) { - return false; - } - - /* - * Wait for the starting cpu to release the spin lock, which indicates - * that it won't touch the state we hold on the stack anymore. - */ - sl_lock(&s.lock); + smc_res = smc64(PSCI_CPU_ON, id, (uintptr_t)&vm_cpu_entry, + (uintptr_t)state, 0, 0, 0, SMCCC_CALLER_HYPERVISOR); - return true; + return smc_res.res0 == PSCI_RETURN_SUCCESS; } /** * Stops the current CPU. */ -noreturn void cpu_stop(void) +noreturn void arch_cpu_stop(void) { - smc32(PSCI_CPU_OFF, 0, 0, 0); + smc32(PSCI_CPU_OFF, 0, 0, 0, 0, 0, 0, SMCCC_CALLER_HYPERVISOR); for (;;) { /* This should never be reached. */ } @@ -107,16 +66,19 @@ static_assert(POWER_STATUS_ON_PENDING == PSCI_RETURN_ON_PENDING, /** * Returns the power status of the given CPU. */ -enum power_status cpu_status(cpu_id_t cpu_id) +enum power_status arch_cpu_status(cpu_id_t cpu_id) { uint32_t lowest_affinity_level = 0; + smc_res_t smc_res; /* * This works because the power_status enum values happen to be the same * as the PSCI_RETURN_* values. The static_asserts above validate that * this is the case. */ - return smc32(PSCI_AFFINITY_INFO, cpu_id, lowest_affinity_level, 0); + smc_res = smc32(PSCI_AFFINITY_INFO, cpu_id, lowest_affinity_level, 0, 0, + 0, 0, SMCCC_CALLER_HYPERVISOR); + return smc_res.res0; } /** @@ -124,7 +86,7 @@ enum power_status cpu_status(cpu_id_t cpu_id) */ noreturn void arch_power_off(void) { - smc32(PSCI_SYSTEM_OFF, 0, 0, 0); + smc32(PSCI_SYSTEM_OFF, 0, 0, 0, 0, 0, 0, SMCCC_CALLER_HYPERVISOR); for (;;) { /* This should never be reached. */ } diff --git a/src/arch/aarch64/hypervisor/BUILD.gn b/src/arch/aarch64/hypervisor/BUILD.gn index 85b2e3773..f5001856d 100644 --- a/src/arch/aarch64/hypervisor/BUILD.gn +++ b/src/arch/aarch64/hypervisor/BUILD.gn @@ -23,6 +23,7 @@ source_set("hypervisor") { ] sources += [ + "debug_el1.c", "handler.c", "psci_handler.c", ] diff --git a/src/arch/aarch64/hypervisor/debug_el1.c b/src/arch/aarch64/hypervisor/debug_el1.c new file mode 100644 index 000000000..3e236caf4 --- /dev/null +++ b/src/arch/aarch64/hypervisor/debug_el1.c @@ -0,0 +1,326 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "debug_el1.h" + +#include "hf/check.h" +#include "hf/dlog.h" +#include "hf/panic.h" +#include "hf/types.h" + +#include "msr.h" + +/** + * Controls traps for Trace Filter. + */ +#define MDCR_EL2_TTRF (0x1u << 19) + +/** + * Controls the owning translation regime and access to Profiling Buffer control + * registers from EL1. Depends on whether SPE is implemented. + */ +#define MDCR_EL2_E2PB (0x3u << 12) + +/** + * Controls traps for Debug ROM. + */ +#define MDCR_EL2_TDRA (0x1u << 11) + +/** + * Controls traps for OS-Related Register Access. + */ +#define MDCR_EL2_TDOSA (0x1u << 10) + +/** + * Controls traps for remaining Debug Registers not trapped by TDRA and TDOSA. + */ +#define MDCR_EL2_TDA (0x1u << 9) + +/** + * Controls traps for all debug exceptions (e.g., breakpoints). + */ +#define MDCR_EL2_TDE (0x1u << 8) + +/** + * Defines the number of event counters that are accessible from various + * exception levels, if permitted. Dependant on whether PMUv3 is implemented. + */ +#define MDCR_EL2_HPMN (0x1fu << 0) + +/** + * Controls traps for debug events, i.e., breakpoints, watchpoints, and vector. + * catch exceptions. + */ +#define MDSCR_EL1_MDE (0x1u << 15) + +/** + * System register are identified by op0, op2, op1, crn, crm. The ISS encoding + * includes also rt and direction. Exclude them, @see D13.2.37 (D13-2977). + */ +#define ISS_SYSREG_MASK \ + (((1u << 22) - 1u) & /* Select the ISS bits*/ \ + ~(0x1fu << 5) & /* exclude rt */ \ + ~1u /* exclude direction */) + +#define GET_ISS_SYSREG(esr) (ISS_SYSREG_MASK & (esr)) + +/** + * Op0 from the ISS encoding in the ESR. + */ +#define ISS_OP0_MASK 0x300000 +#define ISS_OP0_SHIFT 20 +#define GET_ISS_OP0(esr) ((ISS_OP0_MASK & (esr)) >> ISS_OP0_SHIFT) + +/** + * Op1 from the ISS encoding in the ESR. + */ +#define ISS_OP1_MASK 0x1c000 +#define ISS_OP1_SHIFT 14 +#define GET_ISS_OP1(esr) ((ISS_OP1_MASK & (esr)) >> ISS_OP1_SHIFT) + +/** + * Direction (i.e., read (1) or write (0), is the first bit in the ISS/ESR. + */ +#define ISS_DIRECTION_MASK 1u + +/** + * Gets the direction of the system register access, read (1) or write (0). + */ +#define GET_ISS_DIRECTION(esr) (ISS_DIRECTION_MASK & (esr)) + +/** + * True if the ISS encoded in the esr indicates a read of the system register. + */ +#define ISS_IS_READ(esr) (ISS_DIRECTION_MASK & (esr)) + +/** + * Rt, which identifies the general purpose register used for the operation. + */ +#define ISS_RT_MASK 0x3e0 +#define ISS_RT_SHIFT 5 +#define GET_ISS_RT(esr) ((ISS_RT_MASK & (esr)) >> ISS_RT_SHIFT) + +/** + * PMCR_EL0.N: Indicates the number of event counters implemented. + */ +#define PMCR_EL0_N_MASK 0xf800 +#define PMCR_EL0_N_SHIFT 11 +#define GET_PMCR_EL0_N(pmcr) ((PMCR_EL0_N_MASK & (pmcr)) >> PMCR_EL0_N_SHIFT) + +/** + * Definitions of read-only debug registers' ISS signatures. + */ +#define EL1_DEBUG_REGISTERS_READ \ + X(MDRAR_EL1, 0x200400) \ + X(DBGAUTHSTATUS_EL1, 0x2c1c1c) \ + X(OSLSR_EL1, 0x280402) + +/** + * Definitions of readable and writeable debug registers' ISS signatures. + */ +#define EL1_DEBUG_REGISTERS_READ_WRITE \ + X(DBGCLAIMCLR_EL1, 0x2c1c12) \ + X(DBGCLAIMSET_EL1, 0x2c1c10) \ + X(DBGPRCR_EL1, 0x280408) \ + X(MDCCINT_EL1, 0x200004) \ + X(MDSCR_EL1, 0x240004) \ + X(OSDLR_EL1, 0x280406) \ + X(OSDTRRX_EL1, 0x240000) \ + X(OSDTRTX_EL1, 0x240006) \ + X(OSECCR_EL1, 0x24000c) \ + X(DBGBCR0_EL1, 0x2a0000) \ + X(DBGBCR1_EL1, 0x2a0002) \ + X(DBGBCR2_EL1, 0x2a0004) \ + X(DBGBCR3_EL1, 0x2a0006) \ + X(DBGBCR4_EL1, 0x2a0008) \ + X(DBGBCR5_EL1, 0x2a000a) \ + X(DBGBCR6_EL1, 0x2a000c) \ + X(DBGBCR7_EL1, 0x2a000e) \ + X(DBGBCR8_EL1, 0x2a0010) \ + X(DBGBCR9_EL1, 0x2a0012) \ + X(DBGBCR10_EL1, 0x2a0014) \ + X(DBGBCR11_EL1, 0x2a0016) \ + X(DBGBCR12_EL1, 0x2a0018) \ + X(DBGBCR13_EL1, 0x2a001a) \ + X(DBGBCR14_EL1, 0x2a001c) \ + X(DBGBCR15_EL1, 0x2a001e) \ + X(DBGBVR0_EL1, 0x280000) \ + X(DBGBVR1_EL1, 0x280002) \ + X(DBGBVR2_EL1, 0x280004) \ + X(DBGBVR3_EL1, 0x280006) \ + X(DBGBVR4_EL1, 0x280008) \ + X(DBGBVR5_EL1, 0x28000a) \ + X(DBGBVR6_EL1, 0x28000c) \ + X(DBGBVR7_EL1, 0x28000e) \ + X(DBGBVR8_EL1, 0x280010) \ + X(DBGBVR9_EL1, 0x280012) \ + X(DBGBVR10_EL1, 0x280014) \ + X(DBGBVR11_EL1, 0x280016) \ + X(DBGBVR12_EL1, 0x280018) \ + X(DBGBVR13_EL1, 0x28001a) \ + X(DBGBVR14_EL1, 0x28001c) \ + X(DBGBVR15_EL1, 0x28001e) \ + X(DBGWCR0_EL1, 0x2e0000) \ + X(DBGWCR1_EL1, 0x2e0002) \ + X(DBGWCR2_EL1, 0x2e0004) \ + X(DBGWCR3_EL1, 0x2e0006) \ + X(DBGWCR4_EL1, 0x2e0008) \ + X(DBGWCR5_EL1, 0x2e000a) \ + X(DBGWCR6_EL1, 0x2e000c) \ + X(DBGWCR7_EL1, 0x2e000e) \ + X(DBGWCR8_EL1, 0x2e0010) \ + X(DBGWCR9_EL1, 0x2e0012) \ + X(DBGWCR10_EL1, 0x2e0014) \ + X(DBGWCR11_EL1, 0x2e0016) \ + X(DBGWCR12_EL1, 0x2e0018) \ + X(DBGWCR13_EL1, 0x2e001a) \ + X(DBGWCR14_EL1, 0x2e001c) \ + X(DBGWCR15_EL1, 0x2e001e) \ + X(DBGWVR0_EL1, 0x2c0000) \ + X(DBGWVR1_EL1, 0x2c0002) \ + X(DBGWVR2_EL1, 0x2c0004) \ + X(DBGWVR3_EL1, 0x2c0006) \ + X(DBGWVR4_EL1, 0x2c0008) \ + X(DBGWVR5_EL1, 0x2c000a) \ + X(DBGWVR6_EL1, 0x2c000c) \ + X(DBGWVR7_EL1, 0x2c000e) \ + X(DBGWVR8_EL1, 0x2c0010) \ + X(DBGWVR9_EL1, 0x2c0012) \ + X(DBGWVR10_EL1, 0x2c0014) \ + X(DBGWVR11_EL1, 0x2c0016) \ + X(DBGWVR12_EL1, 0x2c0018) \ + X(DBGWVR13_EL1, 0x2c001a) \ + X(DBGWVR14_EL1, 0x2c001c) \ + X(DBGWVR15_EL1, 0x2c001e) + +/** + * Definitions of all debug registers' ISS signatures. + */ +#define EL1_DEBUG_REGISTERS \ + EL1_DEBUG_REGISTERS_READ \ + EL1_DEBUG_REGISTERS_READ_WRITE + +/** + * Returns the value for MDCR_EL2 for the particular VM. + * For now, the primary VM has one value and all secondary VMs share a value. + */ +uintreg_t get_mdcr_el2_value(spci_vm_id_t vm_id) +{ + uintreg_t mdcr_el2_value = read_msr(MDCR_EL2); + uintreg_t pmcr_el0 = read_msr(PMCR_EL0); + + /* + * Preserve E2PB for now, which depends on the SPE implementation. + * TODO: Investigate how to detect whether SPE is implemented, and which + * stage's translation regime is applicable, i.e., EL2 or EL1. + */ + mdcr_el2_value &= MDCR_EL2_E2PB; + + /* + * Set the number of event counters accessible from all exception levels + * (MDCR_EL2.HPMN) to be the number of implemented event counters + * (PMCR_EL0.N). + * TODO(b/132394973): examine the implications of this setting. + */ + mdcr_el2_value |= GET_PMCR_EL0_N(pmcr_el0) & MDCR_EL2_HPMN; + + /* + * Trap all VM accesses to debug registers to have fine grained control + * over system register accesses. + * Do not trap the Primary VM's debug events, e.g., watchpoint or + * breakpoint events (!MDCR_EL2_TDE). + */ + mdcr_el2_value |= + MDCR_EL2_TTRF | MDCR_EL2_TDRA | MDCR_EL2_TDOSA | MDCR_EL2_TDA; + + if (vm_id != HF_PRIMARY_VM_ID) { + /* + * Debug event exceptions should be disabled in secondary VMs + * but trap them for additional security. + */ + mdcr_el2_value |= MDCR_EL2_TDE; + } + + return mdcr_el2_value; +} + +/** + * Returns true if the ESR register shows an access to an EL1 debug register. + */ +bool is_debug_el1_register_access(uintreg_t esr_el2) +{ + /* + * Architecture Reference Manual D12.2: op0 == 0b10 is for debug and + * trace system registers. op1 = 0x1 for trace, remaining are debug. + */ + return GET_ISS_OP0(esr_el2) == 0x2 && GET_ISS_OP1(esr_el2) != 0x1; +} + +/** + * Processes an access (msr, mrs) to an EL1 debug register. + * Returns true if the access was allowed and performed, false otherwise. + */ +bool debug_el1_process_access(struct vcpu *vcpu, spci_vm_id_t vm_id, + uintreg_t esr_el2) +{ + /* + * For now, debug registers are not supported by secondary VMs. + * Disallow accesses to them. + */ + if (vm_id != HF_PRIMARY_VM_ID) { + return false; + } + + uintreg_t sys_register = GET_ISS_SYSREG(esr_el2); + uintreg_t rt_register = GET_ISS_RT(esr_el2); + uintreg_t value; + + CHECK(rt_register < NUM_GP_REGS); + + if (ISS_IS_READ(esr_el2)) { + switch (sys_register) { +#define X(reg_name, reg_sig) \ + case reg_sig: \ + value = read_msr(reg_name); \ + break; + EL1_DEBUG_REGISTERS +#undef X + default: + value = vcpu_get_regs(vcpu)->r[rt_register]; + dlog("Unsupported system register read 0x%x\n", + sys_register); + break; + } + vcpu_get_regs(vcpu)->r[rt_register] = value; + + } else { + value = vcpu_get_regs(vcpu)->r[rt_register]; + switch (sys_register) { +#define X(reg_name, reg_sig) \ + case reg_sig: \ + write_msr(reg_name, value); \ + break; + EL1_DEBUG_REGISTERS_READ_WRITE +#undef X + default: + dlog("Unsupported system register write 0x%x\n", + sys_register); + break; + } + } + + return true; +} diff --git a/src/arch/aarch64/hypervisor/debug_el1.h b/src/arch/aarch64/hypervisor/debug_el1.h new file mode 100644 index 000000000..28becb569 --- /dev/null +++ b/src/arch/aarch64/hypervisor/debug_el1.h @@ -0,0 +1,30 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "hf/arch/types.h" + +#include "hf/cpu.h" + +#include "vmapi/hf/spci.h" + +uintreg_t get_mdcr_el2_value(spci_vm_id_t vm_id); + +bool is_debug_el1_register_access(uintreg_t esr_el2); + +bool debug_el1_process_access(struct vcpu *vcpu, spci_vm_id_t vm_id, + uintreg_t esr_el2); diff --git a/src/arch/aarch64/hypervisor/exceptions.S b/src/arch/aarch64/hypervisor/exceptions.S index 900396a9c..0ce6089bf 100644 --- a/src/arch/aarch64/hypervisor/exceptions.S +++ b/src/arch/aarch64/hypervisor/exceptions.S @@ -94,6 +94,17 @@ sub x18, x18, #0x16 cbnz x18, slow_sync_lower + /* + * Make room for hvc_handler_return on stack, and point x8 (the indirect + * result location register in the AAPCS64 standard) to it. + * hvc_handler_return is returned this way according to paragraph + * 5.4.2.B.3 and section 5.5 because it is larger than 16 bytes. + */ + stp xzr, xzr, [sp, #-16]! + stp xzr, xzr, [sp, #-16]! + stp xzr, xzr, [sp, #-16]! + mov x8, sp + /* * Save x29 and x30, which are not saved by the callee, then jump to * HVC handler. @@ -101,19 +112,26 @@ stp x29, x30, [sp, #-16]! bl hvc_handler ldp x29, x30, [sp], #16 - cbnz x1, sync_lower_switch - /* Zero out all volatile registers (except x0) and return. */ + /* Get the hvc_handler_return back off the stack. */ + ldp x0, x1, [sp], #16 + ldp x2, x3, [sp], #16 + ldr x4, [sp], #16 + + cbnz x4, sync_lower_switch + + /* + * Zero out volatile registers (except x0-x3, which contain results) and + * return. + */ stp xzr, xzr, [sp, #-16]! - ldp x1, x2, [sp] - ldp x3, x4, [sp] - ldp x5, x6, [sp] - ldp x7, x8, [sp] - ldp x9, x10, [sp] - ldp x11, x12, [sp] - ldp x13, x14, [sp] - ldp x15, x16, [sp], #16 - mov x17, xzr + ldp x4, x5, [sp] + ldp x6, x7, [sp] + ldp x8, x9, [sp] + ldp x10, x11, [sp] + ldp x12, x13, [sp] + ldp x14, x15, [sp] + ldp x16, x17, [sp], #16 /* Restore x18, which was saved on the stack. */ ldr x18, [sp], #16 @@ -196,6 +214,14 @@ slow_sync_lower: /* The caller must have saved x18, so we don't save it here. */ save_volatile_to_vcpu + /* Extract the exception class (EC) from exception syndrome register. */ + mrs x18, esr_el2 + lsr x18, x18, #26 + + /* Take the system register path for EC 0x18. */ + sub x18, x18, #0x18 + cbz x18, system_register_access + /* Read syndrome register and call C handler. */ mrs x0, esr_el2 bl sync_lower_exception @@ -205,13 +231,39 @@ slow_sync_lower: mrs x0, tpidr_el2 b vcpu_restore_volatile_and_run +/** + * Handle accesses to system registers (EC=0x18) and return to original caller. + */ +system_register_access: + /* + * Non-volatile registers are (conservatively) saved because the handler + * can clobber non-volatile registers that are used by the msr/mrs, + * which results in the wrong value being read or written. + */ + /* Get the current vcpu. */ + mrs x18, tpidr_el2 + stp x19, x20, [x18, #VCPU_REGS + 8 * 19] + stp x21, x22, [x18, #VCPU_REGS + 8 * 21] + stp x23, x24, [x18, #VCPU_REGS + 8 * 23] + stp x25, x26, [x18, #VCPU_REGS + 8 * 25] + stp x27, x28, [x18, #VCPU_REGS + 8 * 27] + + /* Read syndrome register and call C handler. */ + mrs x0, esr_el2 + bl handle_system_register_access + cbnz x0, vcpu_switch + + /* vcpu is not changing. */ + mrs x0, tpidr_el2 + b vcpu_restore_nonvolatile_and_run + sync_lower_switch: /* We'll have to switch, so save volatile state before doing so. */ mrs x18, tpidr_el2 - /* Store zeroes in volatile register storage, except x0. */ - stp x0, xzr, [x18, #VCPU_REGS + 8 * 0] - stp xzr, xzr, [x18, #VCPU_REGS + 8 * 2] + /* Store zeroes in volatile register storage, except x0-x3. */ + stp x0, x1, [x18, #VCPU_REGS + 8 * 0] + stp x2, x3, [x18, #VCPU_REGS + 8 * 2] stp xzr, xzr, [x18, #VCPU_REGS + 8 * 4] stp xzr, xzr, [x18, #VCPU_REGS + 8 * 6] stp xzr, xzr, [x18, #VCPU_REGS + 8 * 8] @@ -231,7 +283,7 @@ sync_lower_switch: stp x2, x3, [x18, #VCPU_REGS + 8 * 31] /* Save lazy state, then switch to new vcpu. */ - mov x0, x1 + mov x0, x4 /* Intentional fallthrough. */ /** @@ -253,64 +305,71 @@ vcpu_switch: stp x27, x28, [x1, #VCPU_REGS + 8 * 27] /* Save lazy state. */ + /* Use x28 as the base */ + add x28, x1, #VCPU_LAZY + mrs x24, vmpidr_el2 mrs x25, csselr_el1 - stp x24, x25, [x1, #VCPU_LAZY + 16 * 0] + stp x24, x25, [x28], #16 mrs x2, sctlr_el1 mrs x3, actlr_el1 - stp x2, x3, [x1, #VCPU_LAZY + 16 * 1] + stp x2, x3, [x28], #16 mrs x4, cpacr_el1 mrs x5, ttbr0_el1 - stp x4, x5, [x1, #VCPU_LAZY + 16 * 2] + stp x4, x5, [x28], #16 mrs x6, ttbr1_el1 mrs x7, tcr_el1 - stp x6, x7, [x1, #VCPU_LAZY + 16 * 3] + stp x6, x7, [x28], #16 mrs x8, esr_el1 mrs x9, afsr0_el1 - stp x8, x9, [x1, #VCPU_LAZY + 16 * 4] + stp x8, x9, [x28], #16 mrs x10, afsr1_el1 mrs x11, far_el1 - stp x10, x11, [x1, #VCPU_LAZY + 16 * 5] + stp x10, x11, [x28], #16 mrs x12, mair_el1 mrs x13, vbar_el1 - stp x12, x13, [x1, #VCPU_LAZY + 16 * 6] + stp x12, x13, [x28], #16 mrs x14, contextidr_el1 mrs x15, tpidr_el0 - stp x14, x15, [x1, #VCPU_LAZY + 16 * 7] + stp x14, x15, [x28], #16 mrs x16, tpidrro_el0 mrs x17, tpidr_el1 - stp x16, x17, [x1, #VCPU_LAZY + 16 * 8] + stp x16, x17, [x28], #16 mrs x18, amair_el1 mrs x19, cntkctl_el1 - stp x18, x19, [x1, #VCPU_LAZY + 16 * 9] + stp x18, x19, [x28], #16 mrs x20, sp_el0 mrs x21, sp_el1 - stp x20, x21, [x1, #VCPU_LAZY + 16 * 10] + stp x20, x21, [x28], #16 mrs x22, elr_el1 mrs x23, spsr_el1 - stp x22, x23, [x1, #VCPU_LAZY + 16 * 11] + stp x22, x23, [x28], #16 mrs x24, par_el1 mrs x25, hcr_el2 - stp x24, x25, [x1, #VCPU_LAZY + 16 * 12] + stp x24, x25, [x28], #16 mrs x26, cptr_el2 mrs x27, cnthctl_el2 - stp x26, x27, [x1, #VCPU_LAZY + 16 * 13] + stp x26, x27, [x28], #16 + + mrs x4, vttbr_el2 + mrs x5, mdcr_el2 + stp x4, x5, [x28], #16 - mrs x28, vttbr_el2 - str x28, [x1, #VCPU_LAZY + 16 * 14] + mrs x6, mdscr_el1 + str x6, [x28], #16 /* Save GIC registers. */ #if GIC_VERSION == 3 || GIC_VERSION == 4 @@ -318,35 +377,32 @@ vcpu_switch: add x2, x1, #VCPU_GIC mrs x3, ich_hcr_el2 - str x3, [x2, #16 * 0] + mrs x4, icc_sre_el2 + stp x3, x4, [x2, #16 * 0] #endif - /* - * Save floating point registers. - * - * Offset is too large, so start from a new base. - */ - add x2, x1, #VCPU_FREGS - stp q0, q1, [x2, #32 * 0] - stp q2, q3, [x2, #32 * 1] - stp q4, q5, [x2, #32 * 2] - stp q6, q7, [x2, #32 * 3] - stp q8, q9, [x2, #32 * 4] - stp q10, q11, [x2, #32 * 5] - stp q12, q13, [x2, #32 * 6] - stp q14, q15, [x2, #32 * 7] - stp q16, q17, [x2, #32 * 8] - stp q18, q19, [x2, #32 * 9] - stp q20, q21, [x2, #32 * 10] - stp q22, q23, [x2, #32 * 11] - stp q24, q25, [x2, #32 * 12] - stp q26, q27, [x2, #32 * 13] - stp q28, q29, [x2, #32 * 14] - /* Offest becomes too large, so move the base. */ - stp q30, q31, [x2, #32 * 15]! + /* Save floating point registers. */ + /* Use x28 as the base. */ + add x28, x1, #VCPU_FREGS + stp q0, q1, [x28], #32 + stp q2, q3, [x28], #32 + stp q4, q5, [x28], #32 + stp q6, q7, [x28], #32 + stp q8, q9, [x28], #32 + stp q10, q11, [x28], #32 + stp q12, q13, [x28], #32 + stp q14, q15, [x28], #32 + stp q16, q17, [x28], #32 + stp q18, q19, [x28], #32 + stp q20, q21, [x28], #32 + stp q22, q23, [x28], #32 + stp q24, q25, [x28], #32 + stp q26, q27, [x28], #32 + stp q28, q29, [x28], #32 + stp q30, q31, [x28], #32 mrs x3, fpsr mrs x4, fpcr - stp x3, x4, [x2, #32 * 1] + stp x3, x4, [x28], #32 /* Save new vcpu pointer in non-volatile register. */ mov x19, x0 @@ -408,72 +464,80 @@ vcpu_restore_all_and_run: vcpu_restore_lazy_and_run: /* Restore lazy registers. */ - ldp x24, x25, [x0, #VCPU_LAZY + 16 * 0] + /* Use x28 as the base. */ + add x28, x0, #VCPU_LAZY + + ldp x24, x25, [x28], #16 msr vmpidr_el2, x24 msr csselr_el1, x25 - ldp x2, x3, [x0, #VCPU_LAZY + 16 * 1] + ldp x2, x3, [x28], #16 msr sctlr_el1, x2 msr actlr_el1, x3 - ldp x4, x5, [x0, #VCPU_LAZY + 16 * 2] + ldp x4, x5, [x28], #16 msr cpacr_el1, x4 msr ttbr0_el1, x5 - ldp x6, x7, [x0, #VCPU_LAZY + 16 * 3] + ldp x6, x7, [x28], #16 msr ttbr1_el1, x6 msr tcr_el1, x7 - ldp x8, x9, [x0, #VCPU_LAZY + 16 * 4] + ldp x8, x9, [x28], #16 msr esr_el1, x8 msr afsr0_el1, x9 - ldp x10, x11, [x0, #VCPU_LAZY + 16 * 5] + ldp x10, x11, [x28], #16 msr afsr1_el1, x10 msr far_el1, x11 - ldp x12, x13, [x0, #VCPU_LAZY + 16 * 6] + ldp x12, x13, [x28], #16 msr mair_el1, x12 msr vbar_el1, x13 - ldp x14, x15, [x0, #VCPU_LAZY + 16 * 7] + ldp x14, x15, [x28], #16 msr contextidr_el1, x14 msr tpidr_el0, x15 - ldp x16, x17, [x0, #VCPU_LAZY + 16 * 8] + ldp x16, x17, [x28], #16 msr tpidrro_el0, x16 msr tpidr_el1, x17 - ldp x18, x19, [x0, #VCPU_LAZY + 16 * 9] + ldp x18, x19, [x28], #16 msr amair_el1, x18 msr cntkctl_el1, x19 - ldp x20, x21, [x0, #VCPU_LAZY + 16 * 10] + ldp x20, x21, [x28], #16 msr sp_el0, x20 msr sp_el1, x21 - ldp x22, x23, [x0, #VCPU_LAZY + 16 * 11] + ldp x22, x23, [x28], #16 msr elr_el1, x22 msr spsr_el1, x23 - ldp x24, x25, [x0, #VCPU_LAZY + 16 * 12] + ldp x24, x25, [x28], #16 msr par_el1, x24 msr hcr_el2, x25 - ldp x26, x27, [x0, #VCPU_LAZY + 16 * 13] + ldp x26, x27, [x28], #16 msr cptr_el2, x26 msr cnthctl_el2, x27 - ldr x28, [x0, #VCPU_LAZY + 16 * 14] - msr vttbr_el2, x28 + ldp x4, x5, [x28], #16 + msr vttbr_el2, x4 + msr mdcr_el2, x5 + + ldr x6, [x28], #16 + msr mdscr_el1, x6 /* Restore GIC registers. */ #if GIC_VERSION == 3 || GIC_VERSION == 4 /* Offset is too large, so start from a new base. */ add x2, x0, #VCPU_GIC - ldr x3, [x2, #16 * 0] + ldp x3, x4, [x2, #16 * 0] msr ich_hcr_el2, x3 + msr icc_sre_el2, x4 #endif /* @@ -486,6 +550,9 @@ vcpu_restore_lazy_and_run: bl maybe_invalidate_tlb mov x0, x19 + /* Intentional fallthrough. */ + +vcpu_restore_nonvolatile_and_run: /* Restore non-volatile registers. */ ldp x19, x20, [x0, #VCPU_REGS + 8 * 19] ldp x21, x22, [x0, #VCPU_REGS + 8 * 21] @@ -522,7 +589,7 @@ vcpu_restore_volatile_and_run: .balign 0x40 /** - * Restores volatile registers from stack and returns. + * Restore volatile registers from stack and return to original caller. */ restore_from_stack_and_return: restore_volatile_from_stack el2 diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c index 5a4177cc8..8bb2dc41f 100644 --- a/src/arch/aarch64/hypervisor/handler.c +++ b/src/arch/aarch64/hypervisor/handler.c @@ -18,8 +18,10 @@ #include "hf/arch/barriers.h" #include "hf/arch/init.h" +#include "hf/arch/mm.h" #include "hf/api.h" +#include "hf/check.h" #include "hf/cpu.h" #include "hf/dlog.h" #include "hf/panic.h" @@ -28,6 +30,7 @@ #include "vmapi/hf/call.h" +#include "debug_el1.h" #include "msr.h" #include "psci.h" #include "psci_handler.h" @@ -35,12 +38,25 @@ #define HCR_EL2_VI (1u << 7) +/** + * Gets the Exception Class from the ESR. + */ +#define GET_EC(esr) ((esr) >> 26) + +/** + * Gets the value to increment for the next PC. + * The ESR encodes whether the instruction is 2 bytes or 4 bytes long. + */ +#define GET_NEXT_PC_INC(esr) (((esr) & (1u << 25)) ? 4 : 2) + struct hvc_handler_return { - uintreg_t user_ret; + smc_res_t user_ret; struct vcpu *new; }; -/* Gets a reference to the currently executing vCPU. */ +/** + * Returns a reference to the currently executing vCPU. + */ static struct vcpu *current(void) { return (struct vcpu *)read_msr(tpidr_el2); @@ -101,15 +117,6 @@ void begin_restoring_state(struct vcpu *vcpu) } } -/** - * Ensures all explicit memory access and management instructions for - * non-shareable normal memory have completed before continuing. - */ -static void dsb_nsh(void) -{ - __asm__ volatile("dsb nsh"); -} - /** * Invalidate all stage 1 TLB entries on the current (physical) CPU for the * current VMID. @@ -135,7 +142,7 @@ static void invalidate_vm_tlb(void) * TLB invalidation has taken effect. Non-sharable is enough because the * TLB is local to the CPU. */ - dsb_nsh(); + dsb(nsh); } /** @@ -193,15 +200,15 @@ noreturn void serr_current_exception(uintreg_t elr, uintreg_t spsr) noreturn void sync_current_exception(uintreg_t elr, uintreg_t spsr) { uintreg_t esr = read_msr(esr_el2); + uintreg_t ec = GET_EC(esr); (void)spsr; - switch (esr >> 26) { + switch (ec) { case 0x25: /* EC = 100101, Data abort. */ - dlog("Data abort: pc=0x%x, esr=0x%x, ec=0x%x", elr, esr, - esr >> 26); + dlog("Data abort: pc=%#x, esr=%#x, ec=%#x", elr, esr, ec); if (!(esr & (1u << 10))) { /* Check FnV bit. */ - dlog(", far=0x%x", read_msr(far_el2)); + dlog(", far=%#x", read_msr(far_el2)); } else { dlog(", far=invalid"); } @@ -210,9 +217,9 @@ noreturn void sync_current_exception(uintreg_t elr, uintreg_t spsr) break; default: - dlog("Unknown current sync exception pc=0x%x, esr=0x%x, " - "ec=0x%x\n", - elr, esr, esr >> 26); + dlog("Unknown current sync exception pc=%#x, esr=%#x, " + "ec=%#x\n", + elr, esr, ec); break; } @@ -247,23 +254,111 @@ static void set_virtual_interrupt_current(bool enable) write_msr(hcr_el2, hcr_el2); } -static bool smc_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, - uintreg_t arg1, uintreg_t arg2, uintreg_t *ret, - struct vcpu **next) +static bool smc_check_client_privileges(const struct vcpu *vcpu) { - if (psci_handler(vcpu, func, arg0, arg1, arg2, ret, next)) { + (void)vcpu; /*UNUSED*/ + + /* + * TODO(b/132421503): Check for privileges based on manifest. + * Currently returns false, which maintains existing behavior. + */ + + return false; +} + +/** + * Applies SMC access control according to manifest. + * Forwards the call if access is granted. + * Returns true if call is forwarded. + */ +static bool smc_forwarder(const struct vcpu *vcpu_, smc_res_t *ret) +{ + struct vcpu *vcpu = (struct vcpu *)vcpu_; + uint32_t func = vcpu_get_regs(vcpu)->r[0]; + /* TODO(b/132421503): obtain vmid according to new scheme. */ + uint32_t client_id = vm_get_id(vcpu_get_vm(vcpu)); + + if (smc_check_client_privileges(vcpu)) { + *ret = smc_forward(func, vcpu_get_regs(vcpu)->r[1], vcpu_get_regs(vcpu)->r[2], + vcpu_get_regs(vcpu)->r[3], vcpu_get_regs(vcpu)->r[4], vcpu_get_regs(vcpu)->r[5], + vcpu_get_regs(vcpu)->r[6], client_id); return true; } + return false; +} + +static bool spci_handler(uintreg_t func, uintreg_t arg1, uintreg_t arg2, + uintreg_t arg3, uintreg_t *ret, struct vcpu **next) +{ + (void)arg2; + (void)arg3; + switch (func & ~SMCCC_CONVENTION_MASK) { - case HF_DEBUG_LOG: - *ret = api_debug_log(arg0, vcpu); + case SPCI_VERSION_32: + *ret = api_spci_version(); + return true; + case SPCI_YIELD_32: + *ret = api_spci_yield(current(), next); + return true; + case SPCI_MSG_SEND_32: + *ret = api_spci_msg_send(arg1, current(), next); + return true; + case SPCI_MSG_RECV_32: + *ret = api_spci_msg_recv(arg1, current(), next); return true; } return false; } +/** + * Set or clear VI bit according to pending interrupts. + */ +static void update_vi(struct vcpu *next) +{ + if (next == NULL) { + /* + * Not switching vCPUs, set the bit for the current vCPU + * directly in the register. + */ + struct vcpu *vcpu = current(); + + set_virtual_interrupt_current(vcpu_is_interrupted(vcpu)); + } else { + /* + * About to switch vCPUs, set the bit for the vCPU to which we + * are switching in the saved copy of the register. + */ + set_virtual_interrupt( + vcpu_get_regs(next), + vcpu_is_interrupted(next)); + } +} + +/** + * Processes SMC instruction calls. + */ +static bool smc_handler(struct vcpu *vcpu, smc_res_t *ret, struct vcpu **next) +{ + uint32_t func = vcpu_get_regs(vcpu)->r[0]; + + if (psci_handler(vcpu, func, vcpu_get_regs(vcpu)->r[1], vcpu_get_regs(vcpu)->r[2], + vcpu_get_regs(vcpu)->r[3], &ret->res0, next)) { + /* SMC PSCI calls are processed by the PSCI handler. */ + return true; + } + + switch (func & ~SMCCC_CONVENTION_MASK) { + case HF_DEBUG_LOG: + api_debug_log(vcpu_get_regs(vcpu)->r[1], vcpu); + return true; + } + + /* Remaining SMC calls need to be forwarded. */ + return smc_forwarder(vcpu, ret); +} + struct hvc_handler_return hvc_handler(uintreg_t arg0, uintreg_t arg1, uintreg_t arg2, uintreg_t arg3) { @@ -271,105 +366,79 @@ struct hvc_handler_return hvc_handler(uintreg_t arg0, uintreg_t arg1, ret.new = NULL; - if (psci_handler(current(), arg0, arg1, arg2, arg3, &ret.user_ret, + if (psci_handler(current(), arg0, arg1, arg2, arg3, &ret.user_ret.res0, &ret.new)) { return ret; } - switch ((uint32_t)arg0) { - case SPCI_VERSION_32: - ret.user_ret = api_spci_version(); - break; + if (spci_handler(arg0, arg1, arg2, arg3, &ret.user_ret.res0, + &ret.new)) { + update_vi(ret.new); + return ret; + } + switch ((uint32_t)arg0) { case HF_VM_GET_ID: - ret.user_ret = api_vm_get_id(current()); + ret.user_ret.res0 = api_vm_get_id(current()); break; case HF_VM_GET_COUNT: - ret.user_ret = api_vm_get_count(); + ret.user_ret.res0 = api_vm_get_count(); break; case HF_VCPU_GET_COUNT: - ret.user_ret = api_vcpu_get_count(arg1, current()); + ret.user_ret.res0 = api_vcpu_get_count(arg1, current()); break; case HF_VCPU_RUN: - ret.user_ret = api_vcpu_run(arg1, arg2, current(), &ret.new); - break; - - case SPCI_YIELD_32: - ret.user_ret = api_spci_yield(current(), &ret.new); + ret.user_ret.res0 = api_vcpu_run(arg1, arg2, current(), &ret.new); break; case HF_VM_CONFIGURE: - ret.user_ret = api_vm_configure(ipa_init(arg1), ipa_init(arg2), - current(), &ret.new); - break; - - case SPCI_MSG_SEND_32: - ret.user_ret = api_spci_msg_send(arg1, current(), &ret.new); - break; - - case SPCI_MSG_RECV_32: - ret.user_ret = api_spci_msg_recv(arg1, current(), &ret.new); + ret.user_ret.res0 = api_vm_configure( + ipa_init(arg1), ipa_init(arg2), current(), &ret.new); break; case HF_MAILBOX_CLEAR: - ret.user_ret = api_mailbox_clear(current(), &ret.new); + ret.user_ret.res0 = api_mailbox_clear(current(), &ret.new); break; case HF_MAILBOX_WRITABLE_GET: - ret.user_ret = api_mailbox_writable_get(current()); + ret.user_ret.res0 = api_mailbox_writable_get(current()); break; case HF_MAILBOX_WAITER_GET: - ret.user_ret = api_mailbox_waiter_get(arg1, current()); + ret.user_ret.res0 = api_mailbox_waiter_get(arg1, current()); break; case HF_INTERRUPT_ENABLE: - ret.user_ret = api_interrupt_enable(arg1, arg2, current()); + ret.user_ret.res0 = api_interrupt_enable(arg1, arg2, current()); break; case HF_INTERRUPT_GET: - ret.user_ret = api_interrupt_get(current()); + ret.user_ret.res0 = api_interrupt_get(current()); break; case HF_INTERRUPT_INJECT: - ret.user_ret = api_interrupt_inject(arg1, arg2, arg3, current(), - &ret.new); + ret.user_ret.res0 = api_interrupt_inject(arg1, arg2, arg3, + current(), &ret.new); break; case HF_SHARE_MEMORY: - ret.user_ret = + ret.user_ret.res0 = api_share_memory(arg1 >> 32, ipa_init(arg2), arg3, arg1 & 0xffffffff, current()); break; case HF_DEBUG_LOG: - ret.user_ret = api_debug_log(arg1, current()); + ret.user_ret.res0 = api_debug_log(arg1, current()); break; default: - ret.user_ret = -1; + ret.user_ret.res0 = -1; } - /* Set or clear VI bit. */ - if (ret.new == NULL) { - /* - * Not switching vCPUs, set the bit for the current vCPU - * directly in the register. - */ - set_virtual_interrupt_current( - vcpu_get_interrupts(current())->enabled_and_pending_count > 0); - } else { - /* - * About to switch vCPUs, set the bit for the vCPU to which we - * are switching in the saved copy of the register. - */ - set_virtual_interrupt( - vcpu_get_regs(ret.new), - vcpu_get_interrupts(ret.new)->enabled_and_pending_count > 0); - } + update_vi(ret.new); return ret; } @@ -435,11 +504,12 @@ struct vcpu *sync_lower_exception(uintreg_t esr) struct vcpu *vcpu = current(); struct vcpu_fault_info info; struct vcpu *new_vcpu; + uintreg_t ec = GET_EC(esr); - switch (esr >> 26) { + switch (ec) { case 0x01: /* EC = 000001, WFI or WFE. */ /* Skip the instruction. */ - vcpu_get_regs(vcpu)->pc += (esr & (1u << 25)) ? 4 : 2; + vcpu_get_regs(vcpu)->pc += GET_NEXT_PC_INC(esr); /* Check TI bit of ISS, 0 = WFI, 1 = WFE. */ if (esr & 1) { /* WFE */ @@ -470,29 +540,72 @@ struct vcpu *sync_lower_exception(uintreg_t esr) case 0x17: /* EC = 010111, SMC instruction. */ { uintreg_t smc_pc = vcpu_get_regs(vcpu)->pc; - uintreg_t ret; + smc_res_t ret; struct vcpu *next = NULL; - if (!smc_handler(vcpu, vcpu_get_regs(vcpu)->r[0], vcpu_get_regs(vcpu)->r[1], - vcpu_get_regs(vcpu)->r[2], vcpu_get_regs(vcpu)->r[3], &ret, - &next)) { - dlog("Unsupported SMC call: 0x%x\n", vcpu_get_regs(vcpu)->r[0]); - ret = PSCI_ERROR_NOT_SUPPORTED; + if (!smc_handler(vcpu, &ret, &next)) { + /* TODO(b/132421503): handle SMC forward rejection */ + dlog("Unsupported SMC call: %#x\n", vcpu_get_regs(vcpu)->r[0]); + ret.res0 = PSCI_ERROR_NOT_SUPPORTED; } /* Skip the SMC instruction. */ - vcpu_get_regs(vcpu)->pc = smc_pc + (esr & (1u << 25) ? 4 : 2); - vcpu_get_regs(vcpu)->r[0] = ret; + vcpu_get_regs(vcpu)->pc = smc_pc + GET_NEXT_PC_INC(esr); + vcpu_get_regs(vcpu)->r[0] = ret.res0; + vcpu_get_regs(vcpu)->r[1] = ret.res1; + vcpu_get_regs(vcpu)->r[2] = ret.res2; + vcpu_get_regs(vcpu)->r[3] = ret.res3; return next; } + /* + * EC = 011000, MSR, MRS or System instruction execution that is not + * reported using EC 000000, 000001 or 000111. + */ + case 0x18: + /* + * NOTE: This should never be reached because it goes through a + * separate path handled by handle_system_register_access(). + */ + panic("Handled by handle_system_register_access()."); + default: - dlog("Unknown lower sync exception pc=0x%x, esr=0x%x, " - "ec=0x%x\n", - vcpu_get_regs(vcpu)->pc, esr, esr >> 26); + dlog("Unknown lower sync exception pc=%#x, esr=%#x, " + "ec=%#x\n", + vcpu_get_regs(vcpu)->pc, esr, ec); break; } /* The exception wasn't handled so abort the VM. */ return api_abort(vcpu); } + +/** + * Handles EC = 011000, msr, mrs instruction traps. + * Returns non-null ONLY if the access failed and the vcpu is changing. + */ +struct vcpu *handle_system_register_access(uintreg_t esr) +{ + struct vcpu *vcpu = current(); + spci_vm_id_t vm_id = vm_get_id(vcpu_get_vm(vcpu)); + uintreg_t ec = GET_EC(esr); + + CHECK(ec == 0x18); + + /* + * Handle accesses to other registers that trap with the same EC. + * Abort when encountering unhandled register accesses. + */ + if (!is_debug_el1_register_access(esr)) { + return api_abort(vcpu); + } + + /* Abort if unable to fulfill the debug register access. */ + if (!debug_el1_process_access(vcpu, vm_id, esr)) { + return api_abort(vcpu); + } + + /* Instruction was fulfilled above. Skip it and run the next one. */ + vcpu_get_regs(vcpu)->pc += GET_NEXT_PC_INC(esr); + return NULL; +} diff --git a/src/arch/aarch64/hypervisor/hypervisor_entry.S b/src/arch/aarch64/hypervisor/hypervisor_entry.S index 55b5631c4..73c83cbcd 100644 --- a/src/arch/aarch64/hypervisor/hypervisor_entry.S +++ b/src/arch/aarch64/hypervisor/hypervisor_entry.S @@ -20,7 +20,7 @@ .global image_entry image_entry: /* Interpret the registers passed from the loader. */ - bl plat_entry + bl plat_boot_flow_hook /* Get pointer to first cpu. */ adrp x0, boot_cpu diff --git a/src/arch/aarch64/hypervisor/offsets.h b/src/arch/aarch64/hypervisor/offsets.h index 51724e0cc..9f43fb832 100644 --- a/src/arch/aarch64/hypervisor/offsets.h +++ b/src/arch/aarch64/hypervisor/offsets.h @@ -21,7 +21,7 @@ #define CPU_STACK_BOTTOM 8 #define VCPU_REGS 32 #define VCPU_LAZY (VCPU_REGS + 264) -#define VCPU_FREGS (VCPU_LAZY + 232) +#define VCPU_FREGS (VCPU_LAZY + 248) #if GIC_VERSION == 3 || GIC_VERSION == 4 #define VCPU_GIC (VCPU_FREGS + 528) diff --git a/src/arch/aarch64/hypervisor/psci_handler.c b/src/arch/aarch64/hypervisor/psci_handler.c index cbc4333bb..0be2d60b6 100644 --- a/src/arch/aarch64/hypervisor/psci_handler.c +++ b/src/arch/aarch64/hypervisor/psci_handler.c @@ -37,7 +37,10 @@ void cpu_entry(struct cpu *c); /* Performs arch specific boot time initialisation. */ void arch_one_time_init(void) { - el3_psci_version = smc32(PSCI_VERSION, 0, 0, 0); + smc_res_t smc_res = + smc32(PSCI_VERSION, 0, 0, 0, 0, 0, 0, SMCCC_CALLER_HYPERVISOR); + + el3_psci_version = smc_res.res0; /* Check there's nothing unexpected about PSCI. */ switch (el3_psci_version) { @@ -45,12 +48,12 @@ void arch_one_time_init(void) case PSCI_VERSION_1_0: case PSCI_VERSION_1_1: /* Supported EL3 PSCI version. */ - dlog("Found PSCI version: 0x%x\n", el3_psci_version); + dlog("Found PSCI version: %#x\n", el3_psci_version); break; default: /* Unsupported EL3 PSCI version. Log a warning but continue. */ - dlog("Warning: unknown PSCI version: 0x%x\n", el3_psci_version); + dlog("Warning: unknown PSCI version: %#x\n", el3_psci_version); el3_psci_version = 0; break; } @@ -69,6 +72,7 @@ bool psci_primary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, uintreg_t arg1, uintreg_t arg2, uintreg_t *ret) { struct cpu *c; + smc_res_t smc_res; /* * If there's a problem with the EL3 PSCI, block standard secure service @@ -100,7 +104,9 @@ bool psci_primary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, *ret = 0; } else { /* PSCI 1.x only defines two feature bits. */ - *ret = smc32(func, arg0, 0, 0) & 0x3; + smc_res = smc32(func, arg0, 0, 0, 0, 0, 0, + SMCCC_CALLER_HYPERVISOR); + *ret = smc_res.res0 & 0x3; } break; @@ -123,12 +129,14 @@ bool psci_primary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, break; case PSCI_SYSTEM_OFF: - smc32(PSCI_SYSTEM_OFF, 0, 0, 0); + smc32(PSCI_SYSTEM_OFF, 0, 0, 0, 0, 0, 0, + SMCCC_CALLER_HYPERVISOR); panic("System off failed"); break; case PSCI_SYSTEM_RESET: - smc32(PSCI_SYSTEM_RESET, 0, 0, 0); + smc32(PSCI_SYSTEM_RESET, 0, 0, 0, 0, 0, 0, + SMCCC_CALLER_HYPERVISOR); panic("System reset failed"); break; @@ -161,14 +169,16 @@ bool psci_primary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, * vcpu registers will be ignored. */ arch_regs_set_pc_arg(vcpu_get_regs(vcpu), ipa_init(arg1), arg2); - *ret = smc64(PSCI_CPU_SUSPEND, arg0, (uintreg_t)&cpu_entry, - (uintreg_t)vcpu_get_cpu(vcpu)); + smc_res = smc64(PSCI_CPU_SUSPEND, arg0, (uintreg_t)&cpu_entry, + (uintreg_t)vcpu_get_cpu(vcpu), 0, 0, 0, + SMCCC_CALLER_HYPERVISOR); + *ret = smc_res.res0; break; } case PSCI_CPU_OFF: cpu_off(vcpu_get_cpu(vcpu)); - smc32(PSCI_CPU_OFF, 0, 0, 0); + smc32(PSCI_CPU_OFF, 0, 0, 0, 0, 0, 0, SMCCC_CALLER_HYPERVISOR); panic("CPU off failed"); break; @@ -191,8 +201,10 @@ bool psci_primary_vm_handler(struct vcpu *vcpu, uint32_t func, uintreg_t arg0, * itself off). */ do { - *ret = smc64(PSCI_CPU_ON, arg0, (uintreg_t)&cpu_entry, - (uintreg_t)c); + smc_res = smc64(PSCI_CPU_ON, arg0, + (uintreg_t)&cpu_entry, (uintreg_t)c, 0, + 0, 0, SMCCC_CALLER_HYPERVISOR); + *ret = smc_res.res0; } while (*ret == PSCI_ERROR_ALREADY_ON); if (*ret != PSCI_RETURN_SUCCESS) { diff --git a/src/arch/aarch64/inc/hf/arch/barriers.h b/src/arch/aarch64/inc/hf/arch/barriers.h new file mode 100644 index 000000000..85d4e7da0 --- /dev/null +++ b/src/arch/aarch64/inc/hf/arch/barriers.h @@ -0,0 +1,70 @@ +/* + * Copyright 2018 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +/** AArch64-specific API */ + +/** + * Ensures explicit memory accesses before this point are completed before any + * later memory accesses are performed. The instruction argument specifies: + * - the shareability domain over which the instruction must operate, + * - the accesses for which the instruction operates. + */ +#define dmb(arg) \ + do { \ + __asm__ volatile("dmb " #arg); \ + } while (0) + +/** + * Ensures explicit memory access and management instructions have completed + * before continuing. The instruction argument specifies: + * - the shareability domain over which the instruction must operate, + * - the accesses for which the instruction operates. + */ +#define dsb(arg) \ + do { \ + __asm__ volatile("dsb " #arg); \ + } while (0) + +/** + * Flushes the instruction pipeline so that instructions are fetched from + * memory. + */ +#define isb() \ + do { \ + __asm__ volatile("isb"); \ + } while (0) + +/** Platform-agnostic API */ + +/** + * Ensures all explicit memory accesses before this point are completed before + * any later memory accesses are performed. + */ +#define memory_ordering_barrier() dmb(sy) + +/** + * Ensures all explicit memory access and management instructions have completed + * before continuing. + */ +#define data_sync_barrier() dsb(sy) + +/** + * Flushes the instruction pipeline so that instructions are fetched from + * memory. + */ +#define insn_sync_barrier() isb() diff --git a/src/arch/aarch64/inc/hf/arch/types.h b/src/arch/aarch64/inc/hf/arch/types.h index 6e3addce0..34beb159e 100644 --- a/src/arch/aarch64/inc/hf/arch/types.h +++ b/src/arch/aarch64/inc/hf/arch/types.h @@ -24,7 +24,9 @@ #define PAGE_BITS 12 #define PAGE_LEVEL_BITS 9 +#define STACK_ALIGN 16 #define FLOAT_REG_BYTES 16 +#define NUM_GP_REGS 31 /** The type of a page table entry (PTE). */ typedef uint64_t pte_t; @@ -68,11 +70,15 @@ struct arch_vm { /** Type to represent the register state of a vCPU. */ struct arch_regs { /* General purpose registers. */ - uintreg_t r[31]; + uintreg_t r[NUM_GP_REGS]; uintreg_t pc; uintreg_t spsr; - /* System registers. */ + /* + * System registers. + * NOTE: Ordering is important. If adding to or reordering registers + * below, make sure to update src/arch/aarch64/hypervisor/exceptions.S. + */ struct { uintreg_t vmpidr_el2; uintreg_t csselr_el1; @@ -103,6 +109,8 @@ struct arch_regs { uintreg_t cptr_el2; uintreg_t cnthctl_el2; uintreg_t vttbr_el2; + uintreg_t mdcr_el2; + uintreg_t mdscr_el1; } lazy; /* Floating point registers. */ @@ -113,6 +121,7 @@ struct arch_regs { #if GIC_VERSION == 3 || GIC_VERSION == 4 struct { uintreg_t ich_hcr_el2; + uintreg_t icc_sre_el2; } gic; #endif diff --git a/src/arch/aarch64/hftest/hf_call.S b/src/arch/aarch64/inc/hf/arch/vm/mm.h similarity index 80% rename from src/arch/aarch64/hftest/hf_call.S rename to src/arch/aarch64/inc/hf/arch/vm/mm.h index 86dd7a68e..d212fa5d4 100644 --- a/src/arch/aarch64/hftest/hf_call.S +++ b/src/arch/aarch64/inc/hf/arch/vm/mm.h @@ -1,5 +1,5 @@ /* - * Copyright 2018 The Hafnium Authors. + * Copyright 2019 The Hafnium Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,8 +14,9 @@ * limitations under the License. */ -.section .text.hf_call, "ax" -.global hf_call -hf_call: - hvc #0 - ret +#pragma once + +#include "hf/mm.h" + +bool arch_vm_mm_init(void); +void arch_vm_mm_enable(paddr_t table); diff --git a/src/arch/aarch64/inc/hf/arch/vm/power_mgmt.h b/src/arch/aarch64/inc/hf/arch/vm/power_mgmt.h index be8b9b430..41ebe49af 100644 --- a/src/arch/aarch64/inc/hf/arch/vm/power_mgmt.h +++ b/src/arch/aarch64/inc/hf/arch/vm/power_mgmt.h @@ -29,10 +29,21 @@ enum power_status { POWER_STATUS_ON_PENDING, }; -noreturn void arch_power_off(void); +/** + * Holds temporary state used to set up the environment on which CPUs will + * start executing. + * + * vm_cpu_entry() depends on the layout of this struct. + */ +struct arch_cpu_start_state { + uintptr_t initial_sp; + void (*entry)(uintreg_t arg); + uintreg_t arg; +}; + +bool arch_cpu_start(uintptr_t id, struct arch_cpu_start_state *s); -bool cpu_start(uintptr_t id, void *stack, size_t stack_size, - void (*entry)(uintptr_t arg), uintptr_t arg); +noreturn void arch_cpu_stop(void); +enum power_status arch_cpu_status(cpu_id_t cpu_id); -noreturn void cpu_stop(void); -enum power_status cpu_status(cpu_id_t cpu_id); +noreturn void arch_power_off(void); diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c index c2a1840b4..595f9c0c0 100644 --- a/src/arch/aarch64/mm.c +++ b/src/arch/aarch64/mm.c @@ -16,6 +16,7 @@ #include "hf/mm.h" +#include "hf/arch/barriers.h" #include "hf/arch/cpu.h" #include "hf/dlog.h" @@ -87,8 +88,26 @@ #define STAGE2_ACCESS_READ UINT64_C(1) #define STAGE2_ACCESS_WRITE UINT64_C(2) +#define CACHE_WORD_SIZE 4 + +/** + * Threshold number of pages in TLB to invalidate after which we invalidate all + * TLB entries on a given level. + * Constant is the number of pointers per page table entry, also used by Linux. + */ +#define MAX_TLBI_OPS MM_PTE_PER_PAGE + /* clang-format on */ +#define tlbi(op) \ + do { \ + __asm__ volatile("tlbi " #op); \ + } while (0) +#define tlbi_reg(op, reg) \ + do { \ + __asm__ __volatile__("tlbi " #op ", %0" : : "r"(reg)); \ + } while (0) + /** Mask for the address bits of the pte. */ #define PTE_ADDR_MASK \ (((UINT64_C(1) << 48) - 1) & ~((UINT64_C(1) << PAGE_BITS) - 1)) @@ -99,6 +118,11 @@ static uint8_t mm_s2_max_level; static uint8_t mm_s2_root_table_count; +static uintreg_t mm_vtcr_el2; +static uintreg_t mm_mair_el2; +static uintreg_t mm_tcr_el2; +static uintreg_t mm_sctlr_el2; + /** * Returns the encoding of a page table entry that isn't present. */ @@ -240,16 +264,39 @@ void arch_mm_invalidate_stage1_range(vaddr_t va_begin, vaddr_t va_end) uintvaddr_t end = va_addr(va_end); uintvaddr_t it; - begin >>= 12; - end >>= 12; - - __asm__ volatile("dsb ishst"); + /* Sync with page table updates. */ + dsb(ishst); - for (it = begin; it < end; it += (UINT64_C(1) << (PAGE_BITS - 12))) { - __asm__("tlbi vae2is, %0" : : "r"(it)); + /* + * Revisions prior to ARMv8.4 do not support invalidating a range of + * addresses, which means we have to loop over individual pages. If + * there are too many, it is quicker to invalidate all TLB entries. + */ + if ((end - begin) > (MAX_TLBI_OPS * PAGE_SIZE)) { + if (VM_TOOLCHAIN == 1) { + tlbi(vmalle1is); + } else { + tlbi(alle2is); + } + } else { + begin >>= 12; + end >>= 12; + /* Invalidate stage-1 TLB, one page from the range at a time. */ + for (it = begin; it < end; + it += (UINT64_C(1) << (PAGE_BITS - 12))) { + if (VM_TOOLCHAIN == 1) { + tlbi_reg(vae1is, it); + } else { + tlbi_reg(vae2is, it); + } + } } - __asm__ volatile("dsb ish"); + /* Sync data accesses with TLB invalidation completion. */ + dsb(ish); + + /* Sync instruction fetches with TLB invalidation completion. */ + isb(); } /** @@ -264,38 +311,77 @@ void arch_mm_invalidate_stage2_range(ipaddr_t va_begin, ipaddr_t va_end) /* TODO: This only applies to the current VMID. */ - begin >>= 12; - end >>= 12; + /* Sync with page table updates. */ + dsb(ishst); - __asm__ volatile("dsb ishst"); - - for (it = begin; it < end; it += (UINT64_C(1) << (PAGE_BITS - 12))) { - __asm__("tlbi ipas2e1, %0" : : "r"(it)); + /* + * Revisions prior to ARMv8.4 do not support invalidating a range of + * addresses, which means we have to loop over individual pages. If + * there are too many, it is quicker to invalidate all TLB entries. + */ + if ((end - begin) > (MAX_TLBI_OPS * PAGE_SIZE)) { + /* + * Invalidate all stage-1 and stage-2 entries of the TLB for + * the current VMID. + */ + tlbi(vmalls12e1is); + } else { + begin >>= 12; + end >>= 12; + + /* + * Invalidate stage-2 TLB, one page from the range at a time. + * Note that this has no effect if the CPU has a TLB with + * combined stage-1/stage-2 translation. + */ + for (it = begin; it < end; + it += (UINT64_C(1) << (PAGE_BITS - 12))) { + tlbi_reg(ipas2e1is, it); + } + + /* + * Ensure completion of stage-2 invalidation in case a page + * table walk on another CPU refilled the TLB with a complete + * stage-1 + stage-2 walk based on the old stage-2 mapping. + */ + dsb(ish); + + /* + * Invalidate all stage-1 TLB entries. If the CPU has a combined + * TLB for stage-1 and stage-2, this will invalidate stage-2 as + * well. + */ + tlbi(vmalle1is); } - __asm__ volatile( - "dsb ish\n" - "tlbi vmalle1is\n" - "dsb ish\n"); + /* Sync data accesses with TLB invalidation completion. */ + dsb(ish); + + /* Sync instruction fetches with TLB invalidation completion. */ + isb(); } /** - * Ensures that the range of data in the cache is written back so that it is - * visible to all cores in the system. + * Returns the smallest cache line size of all the caches for this core. */ -void arch_mm_write_back_dcache(void *base, size_t size) +static uint16_t arch_mm_dcache_line_size(void) { - /* Clean each data cache line the corresponds to data in the range. */ - uint16_t line_size = 1 << ((read_msr(CTR_EL0) >> 16) & 0xf); + return CACHE_WORD_SIZE * + (UINT16_C(1) << ((read_msr(CTR_EL0) >> 16) & 0xf)); +} + +void arch_mm_flush_dcache(void *base, size_t size) +{ + /* Clean and invalidate each data cache line in the range. */ + uint16_t line_size = arch_mm_dcache_line_size(); uintptr_t line_begin = (uintptr_t)base & ~(line_size - 1); uintptr_t end = (uintptr_t)base + size; while (line_begin < end) { - __asm__ volatile("dc cvac, %0" : : "r"(line_begin)); + __asm__ volatile("dc civac, %0" : : "r"(line_begin)); line_begin += line_size; } - - __asm__ volatile("dsb sy"); + dsb(sy); } uint64_t arch_mm_mode_to_stage1_attrs(int mode) @@ -444,11 +530,10 @@ uint8_t arch_mm_stage2_root_table_count(void) return mm_s2_root_table_count; } -bool arch_mm_init(paddr_t table, bool first) +bool arch_mm_init(void) { static const int pa_bits_table[16] = {32, 36, 40, 42, 44, 48}; uint64_t features = read_msr(id_aa64mmfr0_el1); - uint64_t v; int pa_bits = pa_bits_table[features & 0xf]; int extend_bits; int sl0; @@ -466,9 +551,7 @@ bool arch_mm_init(paddr_t table, bool first) return false; } - if (first) { - dlog("Supported bits in physical address: %d\n", pa_bits); - } + dlog("Supported bits in physical address: %d\n", pa_bits); /* * Determine sl0, starting level of the page table, based on the number @@ -502,67 +585,71 @@ bool arch_mm_init(paddr_t table, bool first) } mm_s2_root_table_count = 1 << extend_bits; - if (first) { - dlog("Stage 2 has %d page table levels with %d pages at the " - "root.\n", - mm_s2_max_level + 1, mm_s2_root_table_count); - } + dlog("Stage 2 has %d page table levels with %d pages at the root.\n", + mm_s2_max_level + 1, mm_s2_root_table_count); - v = (1u << 31) | /* RES1. */ - ((features & 0xf) << 16) | /* PS, matching features. */ - (0 << 14) | /* TG0: 4 KB granule. */ - (3 << 12) | /* SH0: inner shareable. */ - (1 << 10) | /* ORGN0: normal, cacheable ... */ - (1 << 8) | /* IRGN0: normal, cacheable ... */ - (sl0 << 6) | /* SL0. */ - ((64 - pa_bits) << 0); /* T0SZ: dependent on PS. */ - write_msr(vtcr_el2, v); + mm_vtcr_el2 = (1u << 31) | /* RES1. */ + ((features & 0xf) << 16) | /* PS, matching features. */ + (0 << 14) | /* TG0: 4 KB granule. */ + (3 << 12) | /* SH0: inner shareable. */ + (1 << 10) | /* ORGN0: normal, cacheable ... */ + (1 << 8) | /* IRGN0: normal, cacheable ... */ + (sl0 << 6) | /* SL0. */ + ((64 - pa_bits) << 0) | /* T0SZ: dependent on PS. */ + 0; /* * 0 -> Device-nGnRnE memory * 0xff -> Normal memory, Inner/Outer Write-Back Non-transient, * Write-Alloc, Read-Alloc. */ - write_msr(mair_el2, (0 << (8 * STAGE1_DEVICEINDX)) | - (0xff << (8 * STAGE1_NORMALINDX))); - - write_msr(ttbr0_el2, pa_addr(table)); + mm_mair_el2 = (0 << (8 * STAGE1_DEVICEINDX)) | + (0xff << (8 * STAGE1_NORMALINDX)); /* * Configure tcr_el2. */ - v = (1 << 20) | /* TBI, top byte ignored. */ - ((features & 0xf) << 16) | /* PS. */ - (0 << 14) | /* TG0, granule size, 4KB. */ - (3 << 12) | /* SH0, inner shareable. */ - (1 << 10) | /* ORGN0, normal mem, WB RA WA Cacheable. */ - (1 << 8) | /* IRGN0, normal mem, WB RA WA Cacheable. */ - (25 << 0) | /* T0SZ, input address is 2^39 bytes. */ - 0; - write_msr(tcr_el2, v); - - v = (1 << 0) | /* M, enable stage 1 EL2 MMU. */ - (1 << 1) | /* A, enable alignment check faults. */ - (1 << 2) | /* C, data cache enable. */ - (1 << 3) | /* SA, enable stack alignment check. */ - (3 << 4) | /* RES1 bits. */ - (1 << 11) | /* RES1 bit. */ - (1 << 12) | /* I, instruction cache enable. */ - (1 << 16) | /* RES1 bit. */ - (1 << 18) | /* RES1 bit. */ - (1 << 19) | /* WXN bit, writable execute never. */ - (3 << 22) | /* RES1 bits. */ - (3 << 28) | /* RES1 bits. */ - 0; - - __asm__ volatile("dsb sy"); - __asm__ volatile("isb"); - write_msr(sctlr_el2, v); - __asm__ volatile("isb"); + mm_tcr_el2 = (1 << 20) | /* TBI, top byte ignored. */ + ((features & 0xf) << 16) | /* PS. */ + (0 << 14) | /* TG0, granule size, 4KB. */ + (3 << 12) | /* SH0, inner shareable. */ + (1 << 10) | /* ORGN0, normal mem, WB RA WA Cacheable. */ + (1 << 8) | /* IRGN0, normal mem, WB RA WA Cacheable. */ + (25 << 0) | /* T0SZ, input address is 2^39 bytes. */ + 0; + + mm_sctlr_el2 = (1 << 0) | /* M, enable stage 1 EL2 MMU. */ + (1 << 1) | /* A, enable alignment check faults. */ + (1 << 2) | /* C, data cache enable. */ + (1 << 3) | /* SA, enable stack alignment check. */ + (3 << 4) | /* RES1 bits. */ + (1 << 11) | /* RES1 bit. */ + (1 << 12) | /* I, instruction cache enable. */ + (1 << 16) | /* RES1 bit. */ + (1 << 18) | /* RES1 bit. */ + (1 << 19) | /* WXN bit, writable execute never. */ + (3 << 22) | /* RES1 bits. */ + (3 << 28) | /* RES1 bits. */ + 0; return true; } +void arch_mm_enable(paddr_t table) +{ + /* Configure translation management registers. */ + write_msr(ttbr0_el2, pa_addr(table)); + write_msr(vtcr_el2, mm_vtcr_el2); + write_msr(mair_el2, mm_mair_el2); + write_msr(tcr_el2, mm_tcr_el2); + + /* Configure sctlr_el2 to enable MMU and cache. */ + dsb(sy); + isb(); + write_msr(sctlr_el2, mm_sctlr_el2); + isb(); +} + /** * Given the attrs from a table at some level and the attrs from all the blocks * in that table, returns equivalent attrs to use for a block which will replace diff --git a/src/arch/aarch64/pl011/pl011.c b/src/arch/aarch64/pl011/pl011.c index 9afc3b9f6..f98f36e03 100644 --- a/src/arch/aarch64/pl011/pl011.c +++ b/src/arch/aarch64/pl011/pl011.c @@ -57,12 +57,10 @@ void plat_console_putchar(char c) /* do nothing */ } - dmb(); - - /* Write the character out. */ + /* Write the character out, force memory access ordering. */ + memory_ordering_barrier(); io_write32(UARTDR, c); - - dmb(); + memory_ordering_barrier(); /* Wait until the UART is no longer busy. */ while (io_read32_mb(UARTFR) & UARTFR_BUSY) { diff --git a/src/arch/aarch64/smc.c b/src/arch/aarch64/smc.c index 4a9d98047..d1c4bb823 100644 --- a/src/arch/aarch64/smc.c +++ b/src/arch/aarch64/smc.c @@ -18,10 +18,55 @@ #include -uint64_t smc64_internal(uint64_t func, uint64_t arg0, uint64_t arg1, - uint64_t arg2); +static smc_res_t smc_internal(uint32_t func, uint64_t arg0, uint64_t arg1, + uint64_t arg2, uint64_t arg3, uint64_t arg4, + uint64_t arg5, uint32_t caller_id) +{ + register uint64_t r0 __asm__("x0") = func; + register uint64_t r1 __asm__("x1") = arg0; + register uint64_t r2 __asm__("x2") = arg1; + register uint64_t r3 __asm__("x3") = arg2; + register uint64_t r4 __asm__("x4") = arg3; + register uint64_t r5 __asm__("x5") = arg4; + register uint64_t r6 __asm__("x6") = arg5; + register uint64_t r7 __asm__("x7") = caller_id; + + /* + * We currently implement SMCCC 1.0, which specifies that the callee can + * use x4–x17 as scratch registers. If we move to SMCCC 1.1 then this + * will change. + */ + __asm__ volatile( + "smc #0" + : /* Output registers, also used as inputs ('+' constraint). */ + "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5), + "+r"(r6), "+r"(r7) + : + : /* Clobber registers. */ + "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", + "x17"); + + return (smc_res_t){.res0 = r0, .res1 = r1, .res2 = r2, .res3 = r3}; +} + +smc_res_t smc32(uint32_t func, uint32_t arg0, uint32_t arg1, uint32_t arg2, + uint32_t arg3, uint32_t arg4, uint32_t arg5, uint32_t caller_id) +{ + return smc_internal(func | SMCCC_32_BIT, arg0, arg1, arg2, arg3, arg4, + arg5, caller_id); +} + +smc_res_t smc64(uint32_t func, uint64_t arg0, uint64_t arg1, uint64_t arg2, + uint64_t arg3, uint64_t arg4, uint64_t arg5, uint32_t caller_id) +{ + return smc_internal(func | SMCCC_64_BIT, arg0, arg1, arg2, arg3, arg4, + arg5, caller_id); +} -uint64_t smc64(uint32_t func, uint64_t arg0, uint64_t arg1, uint64_t arg2) +smc_res_t smc_forward(uint32_t func, uint64_t arg0, uint64_t arg1, + uint64_t arg2, uint64_t arg3, uint64_t arg4, + uint64_t arg5, uint32_t caller_id) { - return smc64_internal(func | SMCCC_64_BIT, arg0, arg1, arg2); + return smc_internal(func, arg0, arg1, arg2, arg3, arg4, arg5, + caller_id); } diff --git a/src/arch/aarch64/smc.h b/src/arch/aarch64/smc.h index 1cc5ba306..93847fab3 100644 --- a/src/arch/aarch64/smc.h +++ b/src/arch/aarch64/smc.h @@ -36,6 +36,8 @@ #define SMCCC_STANDARD_SECURE_SERVICE_CALL 0x04000000 #define SMCCC_STANDARD_HYPERVISOR_SERVICE_CALL 0x05000000 #define SMCCC_VENDOR_HYPERVISOR_SERVICE_CALL 0x06000000 + +#define SMCCC_CALLER_HYPERVISOR 0x0 /* * TODO: Trusted application call: 0x30000000 - 0x31000000 * TODO: Trusted OS call: 0x32000000 - 0x3f000000 @@ -45,5 +47,21 @@ /* clang-format on */ -uint32_t smc32(uint32_t func, uint32_t arg0, uint32_t arg1, uint32_t arg2); -uint64_t smc64(uint32_t func, uint64_t arg0, uint64_t arg1, uint64_t arg2); +typedef struct smc_res { + uint64_t res0; + uint64_t res1; + uint64_t res2; + uint64_t res3; +} smc_res_t; + +smc_res_t smc32(uint32_t func, uint32_t arg0, uint32_t arg1, uint32_t arg2, + uint32_t arg3, uint32_t arg4, uint32_t arg5, + uint32_t caller_id); + +smc_res_t smc64(uint32_t func, uint64_t arg0, uint64_t arg1, uint64_t arg2, + uint64_t arg3, uint64_t arg4, uint64_t arg5, + uint32_t caller_id); + +smc_res_t smc_forward(uint32_t func, uint64_t arg0, uint64_t arg1, + uint64_t arg2, uint64_t arg3, uint64_t arg4, + uint64_t arg5, uint32_t caller_id); diff --git a/src/arch/fake/BUILD.gn b/src/arch/fake/BUILD.gn index 6eee2b8f0..a0c721935 100644 --- a/src/arch/fake/BUILD.gn +++ b/src/arch/fake/BUILD.gn @@ -19,6 +19,12 @@ source_set("fake") { ] } +# Empty implementation of platform boot flow. +# Fake arch targets should not depend on the boot flow functions. Will fail to +# compile if they do. +source_set("boot_flow") { +} + # Fake implementation of putchar logs to the console. source_set("console") { sources = [ diff --git a/src/arch/fake/inc/hf/arch/types.h b/src/arch/fake/inc/hf/arch/types.h index bcf33f319..8a0eccbec 100644 --- a/src/arch/fake/inc/hf/arch/types.h +++ b/src/arch/fake/inc/hf/arch/types.h @@ -21,6 +21,7 @@ #define PAGE_BITS 12 #define PAGE_LEVEL_BITS 9 +#define STACK_ALIGN 64 /** The type of a page table entry (PTE). */ typedef uint64_t pte_t; diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c index e409f9768..c7885185f 100644 --- a/src/arch/fake/mm.c +++ b/src/arch/fake/mm.c @@ -121,7 +121,7 @@ void arch_mm_invalidate_stage2_range(ipaddr_t va_begin, ipaddr_t va_end) /* There's no modelling of the stage-2 TLB. */ } -void arch_mm_write_back_dcache(void *base, size_t size) +void arch_mm_flush_dcache(void *base, size_t size) { /* There's no modelling of the cache. */ } @@ -165,10 +165,14 @@ int arch_mm_stage2_attrs_to_mode(uint64_t attrs) return attrs >> PTE_ATTR_MODE_SHIFT; } -bool arch_mm_init(paddr_t table, bool first) +bool arch_mm_init(void) { /* No initialization required. */ - (void)table; - (void)first; return true; } + +void arch_mm_enable(paddr_t table) +{ + /* There's no modelling of the MMU. */ + (void)table; +} diff --git a/src/boot_flow/BUILD.gn b/src/boot_flow/BUILD.gn new file mode 100644 index 000000000..e7be18620 --- /dev/null +++ b/src/boot_flow/BUILD.gn @@ -0,0 +1,40 @@ +# Copyright 2019 The Hafnium Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import("//build/toolchain/platform.gni") + +source_set("common") { + sources = [ + ] +} + +source_set("android") { + sources = [ + "android.c", + ] + deps = [ + ":common", + "//src/arch/${plat_arch}/boot_flow:android", + ] +} + +source_set("linux") { + sources = [ + "linux.c", + ] + deps = [ + ":common", + "//src/arch/${plat_arch}/boot_flow:linux", + ] +} diff --git a/src/boot_flow/android.c b/src/boot_flow/android.c new file mode 100644 index 000000000..4917ef49a --- /dev/null +++ b/src/boot_flow/android.c @@ -0,0 +1,48 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hf/layout.h" +#include "hf/plat/boot_flow.h" + +/** + * FDT was compiled into Hafnium. Return physical address of the `.plat.fdt` + * section of Hafnium image. + */ +paddr_t plat_get_fdt_addr(void) +{ + return layout_fdt_begin(); +} + +/** + * Android boot flow does not use kernel arguments. Pass zero. + */ +uintreg_t plat_get_kernel_arg(void) +{ + return 0; +} + +/** + * Initrd was compiled into Hafnium. Return range of the '.plat.initrd' section. + */ +bool plat_get_initrd_range(const struct fdt_node *fdt_root, paddr_t *begin, + paddr_t *end) +{ + (void)fdt_root; + + *begin = layout_initrd_begin(); + *end = layout_initrd_end(); + return true; +} diff --git a/src/boot_flow/linux.c b/src/boot_flow/linux.c new file mode 100644 index 000000000..52fcb8abd --- /dev/null +++ b/src/boot_flow/linux.c @@ -0,0 +1,48 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hf/fdt_handler.h" +#include "hf/plat/boot_flow.h" + +/* Set by arch-specific boot-time hook. */ +uintreg_t plat_fdt_addr; + +/** + * Returns the physical address of board FDT. This was passed to Hafnium in the + * first kernel arg by the boot loader. + */ +paddr_t plat_get_fdt_addr(void) +{ + return pa_init((uintpaddr_t)plat_fdt_addr); +} + +/** + * When handing over to the primary, give it the same FDT address that was given + * to Hafnium. The FDT may have been modified during Hafnium init. + */ +uintreg_t plat_get_kernel_arg(void) +{ + return plat_fdt_addr; +} + +/** + * Load initrd range from the board FDT. + */ +bool plat_get_initrd_range(const struct fdt_node *fdt_root, paddr_t *begin, + paddr_t *end) +{ + return fdt_find_initrd(fdt_root, begin, end); +} diff --git a/src/cpu.c b/src/cpu.c index b832f9798..b1c243bae 100644 --- a/src/cpu.c +++ b/src/cpu.c @@ -31,8 +31,20 @@ #define STACK_SIZE PAGE_SIZE -/* The stack to be used by the CPUs. */ -alignas(2 * sizeof(uintreg_t)) char callstacks[MAX_CPUS][STACK_SIZE]; +/** + * The stacks to be used by the CPUs. + * + * Align to page boundaries to ensure that cache lines are not shared between a + * CPU's stack and data that can be accessed from other CPUs. If this did + * happen, there may be coherency problems when the stack is being used before + * caching is enabled. + */ +alignas(PAGE_SIZE) char callstacks[MAX_CPUS][STACK_SIZE]; + +/* NOLINTNEXTLINE(misc-redundant-expression) */ +static_assert((STACK_SIZE % PAGE_SIZE) == 0, "Keep each stack page aligned."); +static_assert((PAGE_SIZE % STACK_ALIGN) == 0, + "Page alignment is too weak for the stack."); /** * A temporal variable for one-time booting sequence. The booting CPU will diff --git a/src/dlog.c b/src/dlog.c index 4cc414bdd..36d1e8e27 100644 --- a/src/dlog.c +++ b/src/dlog.c @@ -21,7 +21,6 @@ #include "hf/spinlock.h" #include "hf/std.h" -#include "hf/vm.h" /* Keep macro alignment */ /* clang-format off */ diff --git a/src/plat.c b/src/plat.c deleted file mode 100644 index d6c5a4a8a..000000000 --- a/src/plat.c +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2018 The Hafnium Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "hf/boot_params.h" -#include "hf/dlog.h" -#include "hf/fdt_handler.h" -#include "hf/layout.h" - -/** - * Default implementation assumes the FDT has been linked into the image. - * - * This can be overridden e.g. to provide a fixed address or an address passed - * by the loader. - */ -#pragma weak plat_get_fdt_addr -paddr_t plat_get_fdt_addr(void) -{ - return layout_fdt_begin(); -} - -/** - * Default implementation assumes the initrd has been linked into the image. - * - * This can be overridden e.g. to provide a fixed address or an address passed - * by the loader. - */ -#pragma weak plat_get_initrd_range -void plat_get_initrd_range(struct mm_stage1_locked stage1_locked, - paddr_t *begin, paddr_t *end, struct mpool *ppool) -{ - (void)stage1_locked; - (void)ppool; - - *begin = layout_initrd_begin(); - *end = layout_initrd_end(); -} - -/** - * Default implementation assumes the FDT address is passed to the kernel. - * - * TODO: make this part of the VM configuration as secondary VMs will also need - * to take arguments. - */ -#pragma weak plat_get_kernel_arg -uintreg_t plat_get_kernel_arg(void) -{ - return (uintreg_t)pa_addr(plat_get_fdt_addr()); -} - -/** - * Default implementation extracts the boot parameters from the FDT but the - * initrd is provided separately. - */ -#pragma weak plat_get_boot_params -bool plat_get_boot_params(struct mm_stage1_locked stage1_locked, - struct boot_params *p, struct mpool *ppool) -{ - struct fdt_header *fdt; - struct fdt_node n; - bool ret = false; - - plat_get_initrd_range(stage1_locked, &p->initrd_begin, &p->initrd_end, - ppool); - p->kernel_arg = plat_get_kernel_arg(); - - /* Get the memory map from the FDT. */ - fdt = fdt_map(stage1_locked, plat_get_fdt_addr(), &n, ppool); - if (!fdt) { - return false; - } - - if (!fdt_find_child(&n, "")) { - dlog("Unable to find FDT root node.\n"); - goto out_unmap_fdt; - } - - fdt_find_cpus(&n, p->cpu_ids, &p->cpu_count); - - p->mem_ranges_count = 0; - fdt_find_memory_ranges(&n, p); - - ret = true; - -out_unmap_fdt: - if (!fdt_unmap(stage1_locked, fdt, ppool)) { - dlog("Unable to unmap fdt."); - return false; - } - - return ret; -} - -/** - * Default implementation updates the FDT which is the argument passed to the - * primary VM's kernel. - * - * TODO: in future, each VM will declare whether it expects an argument passed - * and that will be static data e.g. it will provide its own FDT so there will - * be no FDT modification. This is done because each VM has a very different - * view of the system and we don't want to force VMs to require loader code when - * another loader can load the data for it. - */ -#pragma weak plat_update_boot_params -bool plat_update_boot_params(struct mm_stage1_locked stage1_locked, - struct boot_params_update *p, struct mpool *ppool) -{ - return fdt_patch(stage1_locked, plat_get_fdt_addr(), p, ppool); -} diff --git a/test/arch/BUILD.gn b/test/arch/BUILD.gn index d99a667c8..c770deeb8 100644 --- a/test/arch/BUILD.gn +++ b/test/arch/BUILD.gn @@ -27,6 +27,7 @@ hypervisor("arch_test") { testonly = true sources = [ + "dlog_test.c", "mm_test.c", ] diff --git a/test/arch/dlog_test.c b/test/arch/dlog_test.c new file mode 100644 index 000000000..49403e1c7 --- /dev/null +++ b/test/arch/dlog_test.c @@ -0,0 +1,35 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hf/dlog.h" + +#include "hftest.h" + +/** + * Test that logs are written to the buffer, and the rest is empty. + */ +TEST(dlog, log_buffer) +{ + const char test_string[] = "Test string\n"; + + dlog(test_string); + ASSERT_EQ(strcmp(test_string, dlog_buffer), 0); + /* The \0 at the end shouldn't be counted. */ + ASSERT_EQ(dlog_buffer_offset, sizeof(test_string) - 1); + for (int i = sizeof(test_string) - 1; i < DLOG_BUFFER_SIZE; ++i) { + EXPECT_EQ(dlog_buffer[i], '\0'); + } +} diff --git a/test/hftest/BUILD.gn b/test/hftest/BUILD.gn index 3224044de..ab352091d 100644 --- a/test/hftest/BUILD.gn +++ b/test/hftest/BUILD.gn @@ -18,7 +18,7 @@ config("hftest_config") { include_dirs = [ "inc" ] } -# Testing framework for a primary vm. +# Testing framework for a primary VM. source_set("hftest_primary_vm") { testonly = true @@ -30,7 +30,7 @@ source_set("hftest_primary_vm") { ] } -# Testing framework for a secondary vm. It's currently just a slave VM and +# Testing framework for a secondary VM. It's currently just a slave VM and # can't affect the tests directly. source_set("hftest_secondary_vm") { testonly = true @@ -38,11 +38,12 @@ source_set("hftest_secondary_vm") { public_configs = [ ":hftest_config" ] sources = [ - "hftest_service.c", + "service.c", ] - libs = ["//hfo2/target/aarch64-hfo2-test/release/libhfo2.a"] deps = [ + ":mm", + ":power_mgmt", "//src:dlog", "//src:memiter", "//src:panic", @@ -92,6 +93,8 @@ source_set("hftest_standalone") { deps = [ ":common", + ":mm", + ":power_mgmt", "//src:dlog", "//src:fdt", "//src:memiter", @@ -108,7 +111,7 @@ source_set("common") { testonly = true public_configs = [ ":hftest_config" ] sources = [ - "hftest_common.c", + "common.c", ] libs = ["//hfo2/target/aarch64-hfo2-test/release/libhfo2.a"] deps = [ @@ -118,3 +121,37 @@ source_set("common") { "//src:std", ] } + +source_set("mm") { + testonly = true + + public_configs = [ ":hftest_config" ] + + sources = [ + "mm.c", + ] + + libs = ["//hfo2/target/aarch64-hfo2-test/release/libhfo2.a"] + + deps = [ + "//src:layout", + "//src:mm", + "//src/arch/${plat_arch}:arch", + "//src/arch/${plat_arch}/hftest:mm", + ] +} + +source_set("power_mgmt") { + testonly = true + + public_configs = [ ":hftest_config" ] + + sources = [ + "power_mgmt.c", + ] + + deps = [ + ":mm", + "//src/arch/${plat_arch}/hftest:power_mgmt", + ] +} diff --git a/test/hftest/hftest_common.c b/test/hftest/common.c similarity index 99% rename from test/hftest/hftest_common.c rename to test/hftest/common.c index 8c8d334da..81fc51694 100644 --- a/test/hftest/hftest_common.c +++ b/test/hftest/common.c @@ -14,8 +14,6 @@ * limitations under the License. */ -#include "hftest_common.h" - #include "hf/arch/vm/power_mgmt.h" #include "hf/boot_params.h" @@ -24,6 +22,7 @@ #include "hf/std.h" #include "hftest.h" +#include "hftest_common.h" HFTEST_ENABLE(); diff --git a/test/hftest/hftest.py b/test/hftest/hftest.py index 24dcf20df..8eadad9bf 100755 --- a/test/hftest/hftest.py +++ b/test/hftest/hftest.py @@ -14,9 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Run tests. - -Runs tests on QEMU. +"""Script which drives invocation of tests and parsing their output to produce +a results report. """ from __future__ import print_function @@ -24,6 +23,7 @@ import xml.etree.ElementTree as ET import argparse +import collections import datetime import json import os @@ -35,143 +35,469 @@ HFTEST_LOG_FAILURE_PREFIX = "Failure:" HFTEST_LOG_FINISHED = "FINISHED" +HF_ROOT = os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))) +DTC_SCRIPT = os.path.join(HF_ROOT, "build", "image", "dtc.py") +FVP_BINARY = os.path.join( + os.path.dirname(HF_ROOT), "fvp", "Base_RevC_AEMv8A_pkg", "models", + "Linux64_GCC-4.9", "FVP_Base_RevC-2xAEMv8A") +FVP_PREBUILT_DTS = os.path.join( + HF_ROOT, "prebuilts", "linux-aarch64", "arm-trusted-firmware", + "fvp-base-gicv3-psci-1t.dts") -def log_timeout_returncode(f, returncode): - if returncode == 0: - return - elif returncode == 124: - f.write("\r\n{}{} timed out\r\n".format(HFTEST_LOG_PREFIX, - HFTEST_LOG_FAILURE_PREFIX)) - else: - f.write("\r\n{}{} process return code {}\r\n".format( - HFTEST_LOG_PREFIX, HFTEST_LOG_FAILURE_PREFIX, returncode)) - - -def qemu(image, initrd, args, log): - qemu_args = [ - "timeout", "--foreground", "10s", - "./prebuilts/linux-x64/qemu/qemu-system-aarch64", "-M", "virt,gic_version=3", - "-cpu", "cortex-a57", "-smp", "4", "-m", "64M", "-machine", "virtualization=true", - "-nographic", "-nodefaults", "-serial", "stdio", "-kernel", image, - ] - if initrd: - qemu_args += ["-initrd", initrd] - if args: - qemu_args += ["-append", args] - # Save the log to a file. - with open(log, "w") as f: - f.write("$ {}\r\n".format(" ".join(qemu_args))) - f.flush() - try: - subprocess.check_call(qemu_args, stdout=f, stderr=f) - except subprocess.CalledProcessError as e: - log_timeout_returncode(f, e.returncode) - # Return that log for processing. - with open(log, "r") as f: +def read_file(path): + with open(path, "r") as f: return f.read() +def write_file(path, to_write, append=False): + with open(path, "a" if append else "w") as f: + f.write(to_write) -def fvp(image, initrd, args, log): - uart0_log = log + ".uart0" - uart1_log = log + ".uart1" - fdt = log + ".dtb" - dtc_args = [ - "dtc", "-I", "dts", "-O", "dtb", - "-o", fdt, - ] - fvp_args = [ - "timeout", "--foreground", "40s", - "../fvp/Base_RevC_AEMv8A_pkg/models/Linux64_GCC-4.9/FVP_Base_RevC-2xAEMv8A", - "-C", "pctl.startup=0.0.0.0", - "-C", "bp.secure_memory=0", - "-C", "cluster0.NUM_CORES=4", - "-C", "cluster1.NUM_CORES=4", - "-C", "cache_state_modelled=0", - "-C", "bp.vis.disable_visualisation=true", - "-C", "bp.vis.rate_limit-enable=false", - "-C", "bp.terminal_0.start_telnet=false", - "-C", "bp.terminal_1.start_telnet=false", - "-C", "bp.terminal_2.start_telnet=false", - "-C", "bp.terminal_3.start_telnet=false", - "-C", "bp.pl011_uart0.untimed_fifos=1", - "-C", "bp.pl011_uart0.unbuffered_output=1", - "-C", "bp.pl011_uart0.out_file=" + uart0_log, - "-C", "bp.pl011_uart1.out_file=" + uart1_log, - "-C", "cluster0.cpu0.RVBAR=0x04020000", - "-C", "cluster0.cpu1.RVBAR=0x04020000", - "-C", "cluster0.cpu2.RVBAR=0x04020000", - "-C", "cluster0.cpu3.RVBAR=0x04020000", - "-C", "cluster1.cpu0.RVBAR=0x04020000", - "-C", "cluster1.cpu1.RVBAR=0x04020000", - "-C", "cluster1.cpu2.RVBAR=0x04020000", - "-C", "cluster1.cpu3.RVBAR=0x04020000", - "--data", "cluster0.cpu0=prebuilts/linux-aarch64/arm-trusted-firmware/bl31.bin@0x04020000", - "--data", "cluster0.cpu0=" + fdt + "@0x82000000", - "--data", "cluster0.cpu0=" + image + "@0x80000000", - "-C", "bp.ve_sysregs.mmbSiteDefault=0", - "-C", "bp.ve_sysregs.exit_on_shutdown=1", - ] - initrd_start = 0x84000000 - initrd_end = 0x85000000 # Default value - if initrd: - fvp_args += ["--data", "cluster0.cpu0={}@{}".format(initrd, hex(initrd_start))] - initrd_end = initrd_start + os.path.getsize(initrd) - - - with open(log, "w") as f: - f.write("$ {}\r\n".format(" ".join(dtc_args))) - f.flush() - dtc = subprocess.Popen( - dtc_args, stdout=f, stderr=f, stdin=subprocess.PIPE) - with open( - "prebuilts/linux-aarch64/arm-trusted-firmware/fvp-base-gicv3-psci-1t.dts", - "r") as base_dts: - dtc.stdin.write(base_dts.read()) - dtc.stdin.write("/ {\n") - dtc.stdin.write(" chosen {\n") - dtc.stdin.write(" bootargs = \"" + args + "\";\n") - dtc.stdin.write(" stdout-path = \"serial0:115200n8\";\n") - dtc.stdin.write(" linux,initrd-start = <{}>;\n".format(initrd_start)) - dtc.stdin.write(" linux,initrd-end = <{}>;\n".format(initrd_end)) - dtc.stdin.write(" };\n") - dtc.stdin.write("};\n") - dtc.stdin.close() - dtc.wait() - - f.write("$ {}\r\n".format(" ".join(fvp_args))) - f.flush() - returncode = subprocess.call(fvp_args, stdout=f, stderr=f) - with open(uart0_log, "r") as g: - f.write(g.read()) - log_timeout_returncode(f, returncode) - - with open(log, "r") as f: - return f.read() +def append_file(path, to_write): + write_file(path, to_write, append=True) +def join_if_not_None(*args): + return " ".join(filter(lambda x: x, args)) -def emulator(use_fvp, image, initrd, args, log): - if use_fvp: - return fvp(image, initrd, args, log) - else: - return qemu(image, initrd, args, log) +class ArtifactsManager: + """Class which manages folder with test artifacts.""" + + def __init__(self, log_dir): + self.created_files = [] + self.log_dir = log_dir + + # Create directory. + try: + os.makedirs(self.log_dir) + except OSError: + if not os.path.isdir(self.log_dir): + raise + print("Logs saved under", log_dir) + + # Create files expected by the Sponge test result parser. + self.sponge_log_path = self.create_file("sponge_log", ".log") + self.sponge_xml_path = self.create_file("sponge_log", ".xml") + + def gen_file_path(self, basename, extension): + """Generate path to a file in the log directory.""" + return os.path.join(self.log_dir, basename + extension) + + def create_file(self, basename, extension): + """Create and touch a new file in the log folder. Ensure that no other + file of the same name was created by this instance of ArtifactsManager. + """ + # Determine the path of the file. + path = self.gen_file_path(basename, extension) + + # Check that the path is unique. + assert(path not in self.created_files) + self.created_files += [ path ] + + # Touch file. + with open(path, "w") as f: + pass + + return path + + def get_file(self, basename, extension): + """Return path to a file in the log folder. Assert that it was created + by this instance of ArtifactsManager.""" + path = self.gen_file_path(basename, extension) + assert(path in self.created_files) + return path + + +# Tuple holding the arguments common to all driver constructors. +# This is to avoid having to pass arguments from subclasses to superclasses. +DriverArgs = collections.namedtuple("DriverArgs", [ + "artifacts", + "kernel", + "initrd", + "manifest", + "vm_args", + ]) + + +# State shared between the common Driver class and its subclasses during +# a single invocation of the target platform. +DriverRunState = collections.namedtuple("DriverRunState", [ + "log_path", + "ret_code", + ]) + + +class DriverRunException(Exception): + """Exception thrown if subprocess invoked by a driver returned non-zero + status code. Used to fast-exit from a driver command sequence.""" + pass + + +class Driver: + """Parent class of drivers for all testable platforms.""" + + def __init__(self, args): + self.args = args + + def get_run_log(self, run_name): + """Return path to the main log of a given test run.""" + return self.args.artifacts.get_file(run_name, ".log") + + def start_run(self, run_name): + """Hook called by Driver subclasses before they invoke the target + platform.""" + return DriverRunState( + self.args.artifacts.create_file(run_name, ".log"), 0) + + def exec_logged(self, run_state, exec_args): + """Run a subprocess on behalf of a Driver subclass and append its + stdout and stderr to the main log.""" + assert(run_state.ret_code == 0) + with open(run_state.log_path, "a") as f: + f.write("$ {}\r\n".format(" ".join(exec_args))) + f.flush() + ret_code = subprocess.call(exec_args, stdout=f, stderr=f) + if ret_code != 0: + run_state = DriverRunState(run_state.log_path, ret_code) + raise DriverRunException() + + def finish_run(self, run_state): + """Hook called by Driver subclasses after they finished running the + target platform. `ret_code` argument is the return code of the main + command run by the driver. A corresponding log message is printed.""" + # Decode return code and add a message to the log. + with open(run_state.log_path, "a") as f: + if run_state.ret_code == 124: + f.write("\r\n{}{} timed out\r\n".format( + HFTEST_LOG_PREFIX, HFTEST_LOG_FAILURE_PREFIX)) + elif run_state.ret_code != 0: + f.write("\r\n{}{} process return code {}\r\n".format( + HFTEST_LOG_PREFIX, HFTEST_LOG_FAILURE_PREFIX, + run_state.ret_code)) + + # Append log of this run to full test log. + log_content = read_file(run_state.log_path) + append_file( + self.args.artifacts.sponge_log_path, + log_content + "\r\n\r\n") + return log_content + + def overlay_dtb(self, run_state, base_dtb, overlay_dtb, out_dtb): + """Overlay `overlay_dtb` over `base_dtb` into `out_dtb`.""" + dtc_args = [ + DTC_SCRIPT, "overlay", + out_dtb, base_dtb, overlay_dtb, + ] + self.exec_logged(run_state, dtc_args) + + +class QemuDriver(Driver): + """Driver which runs tests in QEMU.""" + + def __init__(self, args): + Driver.__init__(self, args) + + def gen_exec_args(self, test_args, dtb_path=None, dumpdtb_path=None): + """Generate command line arguments for QEMU.""" + exec_args = [ + "timeout", "--foreground", "10s", + "./prebuilts/linux-x64/qemu/qemu-system-aarch64", + "-M", "virt,gic_version=3", + "-cpu", "cortex-a57", "-smp", "4", "-m", "64M", + "-machine", "virtualization=true", + "-nographic", "-nodefaults", "-serial", "stdio", + "-kernel", self.args.kernel, + ] + + if dtb_path: + exec_args += ["-dtb", dtb_path] + + if dumpdtb_path: + exec_args += ["-machine", "dumpdtb=" + dumpdtb_path] + + if self.args.initrd: + exec_args += ["-initrd", self.args.initrd] + + vm_args = join_if_not_None(self.args.vm_args, test_args) + if vm_args: + exec_args += ["-append", vm_args] + + return exec_args + + def dump_dtb(self, run_state, test_args, path): + dumpdtb_args = self.gen_exec_args(test_args, dumpdtb_path=path) + self.exec_logged(run_state, dumpdtb_args) + + def run(self, run_name, test_args): + """Run test given by `test_args` in QEMU.""" + run_state = self.start_run(run_name) + + try: + dtb_path = None + + # If manifest DTBO specified, dump DTB from QEMU and overlay them. + if self.args.manifest: + base_dtb_path = self.args.artifacts.create_file( + run_name, ".base.dtb") + dtb_path = self.args.artifacts.create_file(run_name, ".dtb") + self.dump_dtb(run_state, test_args, base_dtb_path) + self.overlay_dtb( + run_state, base_dtb_path, self.args.manifest, dtb_path) + + # Execute test in QEMU.. + exec_args = self.gen_exec_args(test_args, dtb_path=dtb_path) + self.exec_logged(run_state, exec_args) + except DriverRunException: + pass + + return self.finish_run(run_state) + + +class FvpDriver(Driver): + """Driver which runs tests in ARM FVP emulator.""" + def __init__(self, args): + Driver.__init__(self, args) -def ensure_dir(path): - try: - os.makedirs(path) - except OSError: - if not os.path.isdir(path): - raise + def gen_dts(self, dts_path, test_args, initrd_start, initrd_end): + """Create a DeviceTree source which will be compiled into a DTB and + passed to FVP for a test run.""" + vm_args = join_if_not_None(self.args.vm_args, test_args) + write_file(dts_path, read_file(FVP_PREBUILT_DTS)) + append_file(dts_path, """ + / {{ + chosen {{ + bootargs = "{}"; + stdout-path = "serial0:115200n8"; + linux,initrd-start = <{}>; + linux,initrd-end = <{}>; + }}; + }}; + """.format(vm_args, initrd_start, initrd_end)) + def gen_fvp_args( + self, initrd_start, uart0_log_path, uart1_log_path, dtb_path): + """Generate command line arguments for FVP.""" + fvp_args = [ + "timeout", "--foreground", "40s", + FVP_BINARY, + "-C", "pctl.startup=0.0.0.0", + "-C", "bp.secure_memory=0", + "-C", "cluster0.NUM_CORES=4", + "-C", "cluster1.NUM_CORES=4", + "-C", "cache_state_modelled=0", + "-C", "bp.vis.disable_visualisation=true", + "-C", "bp.vis.rate_limit-enable=false", + "-C", "bp.terminal_0.start_telnet=false", + "-C", "bp.terminal_1.start_telnet=false", + "-C", "bp.terminal_2.start_telnet=false", + "-C", "bp.terminal_3.start_telnet=false", + "-C", "bp.pl011_uart0.untimed_fifos=1", + "-C", "bp.pl011_uart0.unbuffered_output=1", + "-C", "bp.pl011_uart0.out_file=" + uart0_log_path, + "-C", "bp.pl011_uart1.out_file=" + uart1_log_path, + "-C", "cluster0.cpu0.RVBAR=0x04020000", + "-C", "cluster0.cpu1.RVBAR=0x04020000", + "-C", "cluster0.cpu2.RVBAR=0x04020000", + "-C", "cluster0.cpu3.RVBAR=0x04020000", + "-C", "cluster1.cpu0.RVBAR=0x04020000", + "-C", "cluster1.cpu1.RVBAR=0x04020000", + "-C", "cluster1.cpu2.RVBAR=0x04020000", + "-C", "cluster1.cpu3.RVBAR=0x04020000", + "--data", "cluster0.cpu0=prebuilts/linux-aarch64/arm-trusted-firmware/bl31.bin@0x04020000", + "--data", "cluster0.cpu0=" + dtb_path + "@0x82000000", + "--data", "cluster0.cpu0=" + self.args.kernel + "@0x80000000", + "-C", "bp.ve_sysregs.mmbSiteDefault=0", + "-C", "bp.ve_sysregs.exit_on_shutdown=1", + ] -def hftest_lines(raw): - lines = [] - for line in raw.splitlines(): - if line.startswith("VM "): - line = line[len("VM 0: "):] - if line.startswith(HFTEST_LOG_PREFIX): - lines.append(line[len(HFTEST_LOG_PREFIX):]) - return lines + if self.args.initrd: + fvp_args += [ + "--data", + "cluster0.cpu0={}@{}".format( + self.args.initrd, hex(initrd_start)) + ] + + return fvp_args + + def run(self, run_name, test_args): + run_state = self.start_run(run_name) + + base_dts_path = self.args.artifacts.create_file(run_name, ".base.dts") + base_dtb_path = self.args.artifacts.create_file(run_name, ".base.dtb") + dtb_path = self.args.artifacts.create_file(run_name, ".dtb") + uart0_log_path = self.args.artifacts.create_file(run_name, ".uart0.log") + uart1_log_path = self.args.artifacts.create_file(run_name, ".uart1.log") + + initrd_start = 0x84000000 + if self.args.initrd: + initrd_end = initrd_start + os.path.getsize(self.args.initrd) + else: + initrd_end = 0x85000000 # Default value + + try: + # Create a DT to pass to FVP. + self.gen_dts(base_dts_path, test_args, initrd_start, initrd_end) + + # Compile DTS to DTB. + dtc_args = [ + DTC_SCRIPT, "compile", "-i", base_dts_path, "-o", base_dtb_path, + ] + self.exec_logged(run_state, dtc_args) + + # If manifest DTBO specified, overlay it. + if self.args.manifest: + self.overlay_dtb( + run_state, base_dtb_path, self.args.manifest, dtb_path) + else: + dtb_path = base_dtb_path + + # Run FVP. + fvp_args = self.gen_fvp_args( + initrd_start, uart0_log_path, uart1_log_path, dtb_path) + self.exec_logged(run_state, fvp_args) + except DriverRunException: + pass + + # Append UART0 output to main log. + append_file(run_state.log_path, read_file(uart0_log_path)) + return self.finish_run(run_state) + + +# Tuple used to return information about the results of running a set of tests. +TestRunnerResult = collections.namedtuple("TestRunnerResult", [ + "tests_run", + "tests_failed", + ]) + + +class TestRunner: + """Class which communicates with a test platform to obtain a list of + available tests and driving their execution.""" + + def __init__(self, artifacts, driver, image_name, suite_regex, test_regex): + self.artifacts = artifacts + self.driver = driver + self.image_name = image_name + + self.suite_re = re.compile(suite_regex or ".*") + self.test_re = re.compile(test_regex or ".*") + + def extract_hftest_lines(self, raw): + """Extract hftest-specific lines from a raw output from an invocation + of the test platform.""" + lines = [] + for line in raw.splitlines(): + if line.startswith("VM "): + line = line[len("VM 0: "):] + if line.startswith(HFTEST_LOG_PREFIX): + lines.append(line[len(HFTEST_LOG_PREFIX):]) + return lines + + def get_test_json(self): + """Invoke the test platform and request a JSON of available test and + test suites.""" + out = self.driver.run("json", "json") + hf_out = "\n".join(self.extract_hftest_lines(out)) + try: + return json.loads(hf_out) + except ValueError as e: + print(out) + raise e + + def collect_results(self, fn, it, xml_node): + """Run `fn` on every entry in `it` and collect their TestRunnerResults. + Insert "tests" and "failures" nodes to `xml_node`.""" + tests_run = 0 + tests_failed = 0 + for i in it: + sub_result = fn(i) + assert(sub_result.tests_run >= sub_result.tests_failed) + tests_run += sub_result.tests_run + tests_failed += sub_result.tests_failed + + xml_node.set("tests", str(tests_run)) + xml_node.set("failures", str(tests_failed)) + return TestRunnerResult(tests_run, tests_failed) + + def is_passed_test(self, test_out): + """Parse the output of a test and return True if it passed.""" + return \ + len(test_out) > 0 and \ + test_out[-1] == HFTEST_LOG_FINISHED and \ + not any(l.startswith(HFTEST_LOG_FAILURE_PREFIX) for l in test_out) + + def run_test(self, suite, test, suite_xml): + """Invoke the test platform and request to run a given `test` in given + `suite`. Create a new XML node with results under `suite_xml`. + Test only invoked if it matches the regex given to constructor.""" + if not self.test_re.match(test): + return TestRunnerResult(tests_run=0, tests_failed=0) + + print(" RUN", test) + log_name = suite["name"] + "." + test + + test_xml = ET.SubElement(suite_xml, "testcase") + test_xml.set("name", test) + test_xml.set("classname", suite['name']) + test_xml.set("status", "run") + + out = self.extract_hftest_lines(self.driver.run( + log_name, "run {} {}".format(suite["name"], test))) + + if self.is_passed_test(out): + print(" PASS") + return TestRunnerResult(tests_run=1, tests_failed=0) + else: + print("[x] FAIL --", self.driver.get_run_log(log_name)) + failure_xml = ET.SubElement(test_xml, "failure") + # TODO: set a meaningful message and put log in CDATA + failure_xml.set("message", "Test failed") + return TestRunnerResult(tests_run=1, tests_failed=1) + + def run_suite(self, suite, xml): + """Invoke the test platform and request to run all matching tests in + `suite`. Create new XML nodes with results under `xml`. + Suite skipped if it does not match the regex given to constructor.""" + if not self.suite_re.match(suite["name"]): + return TestRunnerResult(tests_run=0, tests_failed=0) + + print(" SUITE", suite["name"]) + suite_xml = ET.SubElement(xml, "testsuite") + suite_xml.set("name", suite["name"]) + + return self.collect_results( + lambda test: self.run_test(suite, test, suite_xml), + suite["tests"], + suite_xml) + + def run_tests(self): + """Run all suites and tests matching regexes given to constructor. + Write results to sponge log XML. Return the number of run and failed + tests.""" + + test_spec = self.get_test_json() + timestamp = datetime.datetime.now().replace(microsecond=0).isoformat() + + xml = ET.Element("testsuites") + xml.set("name", self.image_name) + xml.set("timestamp", timestamp) + + result = self.collect_results( + lambda suite: self.run_suite(suite, xml), + test_spec["suites"], + xml) + + # Write XML to file. + with open(self.artifacts.sponge_xml_path, "wb") as f: + ET.ElementTree(xml).write(f, encoding='utf-8', xml_declaration=True) + + if result.tests_failed > 0: + print("[x] FAIL:", result.tests_failed, "of", result.tests_run, + "tests failed") + elif result.tests_run > 0: + print(" PASS: all", result.tests_run, "tests passed") + + return result def Main(): @@ -186,96 +512,44 @@ def Main(): parser.add_argument("--vm_args") parser.add_argument("--fvp", type=bool) args = parser.parse_args() + # Resolve some paths. image = os.path.join(args.out, args.image + ".bin") initrd = None - suite = args.image + manifest = None + image_name = args.image if args.initrd: - initrd = os.path.join(args.out_initrd, "obj", args.initrd, "initrd.img") - suite += "_" + args.initrd + initrd_dir = os.path.join(args.out_initrd, "obj", args.initrd) + initrd = os.path.join(initrd_dir, "initrd.img") + manifest = os.path.join(initrd_dir, "manifest.dtbo") + image_name += "_" + args.initrd vm_args = args.vm_args or "" - log = os.path.join(args.log, suite) - ensure_dir(log) - print("Logs saved under", log) - log_file = os.path.join(log, "sponge_log.log") - with open(log_file, "w") as full_log: - # Query the tests in the image. - out = emulator(args.fvp, image, initrd, vm_args + " json", - os.path.join(log, "json.log")) - full_log.write(out) - full_log.write("\r\n\r\n") - hftest_json = "\n".join(hftest_lines(out)) - tests = json.loads(hftest_json) - # Run the selected tests. - tests_run = 0 - failures = 0 - suite_re = re.compile(args.suite or ".*") - test_re = re.compile(args.test or ".*") - suites_xml = ET.Element("testsuites") - suites_xml.set("name", suite) - suites_xml.set( - "timestamp", - datetime.datetime.now().replace(microsecond=0).isoformat()) - for suite in tests["suites"]: - if not suite_re.match(suite["name"]): - continue - tests_run_from_suite = 0 - failures_from_suite = 0 - suite_xml = ET.SubElement(suites_xml, "testsuite") - suite_xml.set("name", suite["name"]) - for test in suite["tests"]: - if not test_re.match(test): - continue - test_xml = ET.SubElement(suite_xml, "testcase") - test_xml.set("name", test) - test_xml.set("classname", suite['name']) - test_xml.set("status", "run") - tests_run_from_suite += 1 - if tests_run_from_suite == 1: - print(" SUITE", suite["name"]) - print(" RUN", test) - test_log = os.path.join(log, - suite["name"] + "." + test + ".log") - out = emulator( - args.fvp, image, initrd, - vm_args + " run {} {}".format(suite["name"], test), - test_log) - full_log.write(out) - full_log.write("\r\n\r\n") - hftest_out = hftest_lines(out) - if len( - hftest_out - ) > 0 and hftest_out[-1] == HFTEST_LOG_FINISHED and not any( - l.startswith(HFTEST_LOG_FAILURE_PREFIX) - for l in hftest_out): - print(" PASS") - else: - failures_from_suite += 1 - failure_xml = ET.SubElement(test_xml, "failure") - # TODO: set a meaningful message and put log in CDATA - failure_xml.set("message", "Test failed") - print("[x] FAIL --", test_log) - tests_run += tests_run_from_suite - failures += failures_from_suite - suite_xml.set("tests", str(tests_run_from_suite)) - suite_xml.set("failures", str(failures_from_suite)) - suites_xml.set("tests", str(tests_run)) - suites_xml.set("failures", str(failures)) - with open(os.path.join(log, "sponge_log.xml"), "wb") as f: - ET.ElementTree(suites_xml).write( - f, encoding='utf-8', xml_declaration=True) - # If none were run, this is probably a mistake. - if tests_run == 0: + + # Create class which will manage all test artifacts. + artifacts = ArtifactsManager(os.path.join(args.log, image_name)) + + # Create a driver for the platform we want to test on. + driver_args = DriverArgs(artifacts, image, initrd, manifest, vm_args) + if args.fvp: + driver = FvpDriver(driver_args) + else: + driver = QemuDriver(driver_args) + + # Create class which will drive test execution. + runner = TestRunner(artifacts, driver, image_name, args.suite, args.test) + + # Run tests. + runner_result = runner.run_tests() + + # Print error message if no tests were run as this is probably unexpected. + # Return suitable error code. + if runner_result.tests_run == 0: print("Error: no tests match") return 10 - # Exit with 0 on success and 1 if any test failed. - if failures: - print("[x] FAIL:", failures, "of", tests_run, "tests failed") + elif runner_result.tests_failed > 0: return 1 else: - print(" PASS: all", tests_run, "tests passed") - return 0 - + return 0 if __name__ == "__main__": sys.exit(Main()) diff --git a/test/hftest/inc/hftest.h b/test/hftest/inc/hftest.h index bcffe5c9e..f8d388a5a 100644 --- a/test/hftest/inc/hftest.h +++ b/test/hftest/inc/hftest.h @@ -90,6 +90,22 @@ */ #define HFTEST_LOG_INDENT " " +/** Initializes stage-1 MMU for tests running in a VM. */ +bool hftest_mm_init(void); + +/** Adds stage-1 identity mapping for pages covering bytes [base, base+size). */ +void hftest_mm_identity_map(const void *base, size_t size, int mode); + +void hftest_mm_vcpu_init(void); + +/** + * Starts the CPU with the given ID. It will start at the provided entry point + * with the provided argument. It is a wrapper around the generic cpu_start() + * and takes care of MMU initialization. + */ +bool hftest_cpu_start(uintptr_t id, void *stack, size_t stack_size, + void (*entry)(uintptr_t arg), uintptr_t arg); + uintptr_t hftest_get_cpu_id(size_t index); /* Above this point is the public API. Now include the implementation. */ diff --git a/test/hftest/mm.c b/test/hftest/mm.c new file mode 100644 index 000000000..65bda76b1 --- /dev/null +++ b/test/hftest/mm.c @@ -0,0 +1,77 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hf/arch/vm/mm.h" + +#include "hftest.h" + +/* Number of pages reserved for page tables. Increase if necessary. */ +#define PTABLE_PAGES 3 + +alignas(alignof(struct mm_page_table)) static char ptable_buf + [sizeof(struct mm_page_table) * PTABLE_PAGES]; + +static struct mpool ppool; +static struct mm_ptable ptable; + +static struct mm_stage1_locked get_stage1_locked(void) +{ + return (struct mm_stage1_locked){.ptable = &ptable}; +} + +bool hftest_mm_init(void) +{ + struct mm_stage1_locked stage1_locked; + + mpool_init(&ppool, sizeof(struct mm_page_table)); + if (!mpool_add_chunk(&ppool, ptable_buf, sizeof(ptable_buf))) { + HFTEST_FAIL(true, "Failed to add buffer to page-table pool."); + } + + if (!mm_ptable_init(&ptable, MM_FLAG_STAGE1, &ppool)) { + HFTEST_FAIL(true, "Unable to allocate memory for page table."); + } + + stage1_locked = get_stage1_locked(); + mm_identity_map_nolock(stage1_locked, pa_init(0), + pa_init(mm_ptable_addr_space_end(MM_FLAG_STAGE1)), + MM_MODE_R | MM_MODE_W | MM_MODE_X, &ppool); + + if (!arch_vm_mm_init()) { + return false; + } + + arch_vm_mm_enable(ptable.root); + + return true; +} + +void hftest_mm_identity_map(const void *base, size_t size, int mode) +{ + struct mm_stage1_locked stage1_locked = get_stage1_locked(); + paddr_t start = pa_from_va(va_from_ptr(base)); + paddr_t end = pa_add(start, size); + + if (mm_identity_map_nolock(stage1_locked, start, end, mode, &ppool) != base) { + FAIL("Could not add new page table mapping. Try increasing " + "size of the page table buffer."); + } +} + +void hftest_mm_vcpu_init(void) +{ + arch_vm_mm_enable(ptable.root); +} diff --git a/test/hftest/power_mgmt.c b/test/hftest/power_mgmt.c new file mode 100644 index 000000000..2daba31e1 --- /dev/null +++ b/test/hftest/power_mgmt.c @@ -0,0 +1,109 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hf/arch/vm/power_mgmt.h" + +#include "hf/arch/mm.h" + +#include "hf/spinlock.h" + +#include "hftest.h" + +struct cpu_start_state { + void (*entry)(uintptr_t arg); + uintreg_t arg; + struct spinlock lock; +}; + +static noreturn void cpu_entry(uintptr_t arg) +{ + struct cpu_start_state *s = (struct cpu_start_state *)arg; + struct cpu_start_state s_copy; + + /* + * Initialize memory and enable caching. Must be the first thing we do. + */ + hftest_mm_vcpu_init(); + + /* Make a copy of the cpu_start_state struct. */ + s_copy = *s; + + /* Inform cpu_start() that the state struct memory can now be freed. */ + sl_unlock(&s->lock); + + /* Call the given entry function with the given argument. */ + s_copy.entry(s_copy.arg); + + /* If the entry function returns, turn off the CPU. */ + arch_cpu_stop(); +} + +bool hftest_cpu_start(uintptr_t id, void *stack, size_t stack_size, + void (*entry)(uintptr_t arg), uintptr_t arg) +{ + struct cpu_start_state s; + struct arch_cpu_start_state s_arch; + + /* + * Config for arch_cpu_start() which will start a new CPU and + * immediately jump to cpu_entry(). This function must guarantee that + * the state struct is not be freed until cpu_entry() is called. + */ + s_arch.initial_sp = (uintptr_t)stack + stack_size; + s_arch.entry = cpu_entry; + s_arch.arg = (uintptr_t)&s; + + /* + * Flush the `cpu_start_state` struct because the new CPU will be + * started without caching enabled and will need the data early on. + * Write back is all that is really needed so flushing will definitely + * get the job done. + */ + arch_mm_flush_dcache(&s_arch, sizeof(s_arch)); + + if ((s_arch.initial_sp % STACK_ALIGN) != 0) { + HFTEST_FAIL(true, + "Stack pointer of new vCPU not properly aligned."); + } + + /* + * Config for cpu_entry(). Its job is to initialize memory and call the + * provided entry point with the provided argument. + */ + s.entry = entry; + s.arg = arg; + sl_init(&s.lock); + + /* + * Lock the cpu_start_state struct which will be unlocked once + * cpu_entry() does not need its content anymore. This simultaneously + * protects the arch_cpu_start_state struct which must not be freed + * before cpu_entry() is called. + */ + sl_lock(&s.lock); + + /* Try to start the given CPU. */ + if (!arch_cpu_start(id, &s_arch)) { + return false; + } + + /* + * Wait until cpu_entry() unlocks the cpu_start_state lock before + * freeing stack memory. + */ + sl_lock(&s.lock); + return true; +} diff --git a/test/hftest/hftest_service.c b/test/hftest/service.c similarity index 92% rename from test/hftest/hftest_service.c rename to test/hftest/service.c index 5b9fafb46..72efa7d9b 100644 --- a/test/hftest/hftest_service.c +++ b/test/hftest/service.c @@ -18,6 +18,7 @@ #include #include "hf/memiter.h" +#include "hf/mm.h" #include "hf/spci.h" #include "hf/std.h" @@ -82,6 +83,17 @@ noreturn void kmain(size_t memory_size) hftest_test_fn service; struct hftest_context *ctx; + /* + * Initialize the stage-1 MMU and identity-map the entire address space. + */ + if (!hftest_mm_init()) { + HFTEST_LOG_FAILURE(); + HFTEST_LOG(HFTEST_LOG_INDENT "Memory initialization failed"); + for (;;) { + /* Hang if memory init failed. */ + } + } + struct spci_message *recv_msg = (struct spci_message *)recv; /* Prepare the context. */ diff --git a/test/hftest/standalone_main.c b/test/hftest/standalone_main.c index c1bfa1abd..63edf1eb5 100644 --- a/test/hftest/standalone_main.c +++ b/test/hftest/standalone_main.c @@ -19,6 +19,7 @@ #include "hf/fdt.h" #include "hf/memiter.h" +#include "hf/mm.h" #include "hftest.h" #include "hftest_common.h" @@ -36,6 +37,14 @@ void kmain(const struct fdt_header *fdt) struct memiter bootargs_iter; struct memiter command; + /* + * Initialize the stage-1 MMU and identity-map the entire address space. + */ + if ((VM_TOOLCHAIN == 1) && !hftest_mm_init()) { + HFTEST_LOG("Memory initialization failed."); + return; + } + hftest_use_list(hftest_begin, hftest_end - hftest_begin); if (!fdt_root_node(&n, fdt)) { diff --git a/test/linux/BUILD.gn b/test/linux/BUILD.gn index 57ea45115..7ef011a3f 100644 --- a/test/linux/BUILD.gn +++ b/test/linux/BUILD.gn @@ -13,8 +13,14 @@ # limitations under the License. import("//build/image/image.gni") +import("//build/toolchain/platform.gni") executable("test_binary") { + include_dirs = [ + "//driver/linux/inc/uapi", + "//third_party/linux/include/uapi", + ] + testonly = true sources = [ "linux.c", @@ -22,9 +28,18 @@ executable("test_binary") { deps = [ "//test/hftest:hftest_linux", ] + libs = ["//hfo2/target/aarch64-hfo2/release/libhfo2.a"] output_name = "test_binary" } +vm_kernel("socket_vm0") { + testonly = true + + deps = [ + ":hftest_secondary_vm_socket", + ] +} + linux_initrd("linux_test_initrd") { testonly = true @@ -43,8 +58,13 @@ linux_initrd("linux_test_initrd") { initrd("linux_test") { testonly = true - primary_vm_prebuilt = "//prebuilts/linux-aarch64/linux/vmlinuz" + manifest = "manifest.dts" + primary_vm = "//third_party:linux__prebuilt" primary_initrd = ":linux_test_initrd" + secondary_vms = [ [ + "socket0", + ":socket_vm0", + ] ] } group("linux") { @@ -54,3 +74,25 @@ group("linux") { ":linux_test", ] } + +# Testing framework for a secondary VM with socket. +source_set("hftest_secondary_vm_socket") { + testonly = true + + configs += [ "//test/hftest:hftest_config" ] + + sources = [ + "hftest_socket.c", + ] + + libs = ["//hfo2/target/aarch64-hfo2/release/libhfo2.a"] + + deps = [ + "//src:dlog", + "//src:panic", + "//src:std", + "//src/arch/${plat_arch}:entry", + "//src/arch/${plat_arch}/hftest:entry", + "//src/arch/${plat_arch}/hftest:power_mgmt", + ] +} diff --git a/test/linux/hftest_socket.c b/test/linux/hftest_socket.c new file mode 100644 index 000000000..0b69ea7f7 --- /dev/null +++ b/test/linux/hftest_socket.c @@ -0,0 +1,106 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "hf/memiter.h" +#include "hf/spci.h" +#include "hf/std.h" + +#include "vmapi/hf/call.h" +#include "vmapi/hf/transport.h" + +#include "hftest.h" + +alignas(4096) uint8_t kstack[4096]; + +static alignas(HF_MAILBOX_SIZE) uint8_t send[HF_MAILBOX_SIZE]; +static alignas(HF_MAILBOX_SIZE) uint8_t recv[HF_MAILBOX_SIZE]; + +static hf_ipaddr_t send_addr = (hf_ipaddr_t)send; +static hf_ipaddr_t recv_addr = (hf_ipaddr_t)recv; + +static struct hftest_context global_context; + +struct hftest_context *hftest_get_context(void) +{ + return &global_context; +} + +noreturn void abort(void) +{ + HFTEST_LOG("Service contained failures."); + /* Cause a fault, as a secondary can't power down the machine. */ + *((volatile uint8_t *)1) = 1; + + /* This should never be reached, but to make the compiler happy... */ + for (;;) { + } +} + +static void swap(uint64_t *a, uint64_t *b) +{ + uint64_t t = *a; + *a = *b; + *b = t; +} + +noreturn void kmain(size_t memory_size) +{ + struct hftest_context *ctx; + + /* Prepare the context. */ + + /* Set up the mailbox. */ + hf_vm_configure(send_addr, recv_addr); + + hf_mailbox_clear(); + + /* Clean the context. */ + ctx = hftest_get_context(); + memset_s(ctx, sizeof(*ctx), 0, sizeof(*ctx)); + ctx->abort = abort; + ctx->send = (struct spci_message *)send; + ctx->recv = (struct spci_message *)recv; + ctx->memory_size = memory_size; + + for (;;) { + struct spci_message *send_buf = (struct spci_message *)send; + struct spci_message *recv_buf = (struct spci_message *)recv; + + /* Receive the packet. */ + spci_msg_recv(SPCI_MSG_RECV_BLOCK); + EXPECT_LE(recv_buf->length, SPCI_MSG_PAYLOAD_MAX); + + /* Echo the message back to the sender. */ + memcpy_s(send_buf->payload, SPCI_MSG_PAYLOAD_MAX, + recv_buf->payload, recv_buf->length); + + /* Swap the socket's source and destination ports */ + struct hf_msg_hdr *hdr = (struct hf_msg_hdr *)send_buf->payload; + swap(&(hdr->src_port), &(hdr->dst_port)); + + /* Swap the destination and source ids. */ + spci_vm_id_t dst_id = recv_buf->source_vm_id; + spci_vm_id_t src_id = recv_buf->target_vm_id; + + spci_message_init(send_buf, recv_buf->length, dst_id, src_id); + + hf_mailbox_clear(); + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + } +} diff --git a/test/linux/linux.c b/test/linux/linux.c index 330079954..f0798e97b 100644 --- a/test/linux/linux.c +++ b/test/linux/linux.c @@ -14,13 +14,23 @@ * limitations under the License. */ +#include #include +#include +#include +#include +#include #include #include "hf/dlog.h" +#include "hf/socket.h" #include "hftest.h" +#include #include +#include + +#define MAX_BUF_SIZE 256 static int finit_module(int fd, const char *param_values, int flags) { @@ -48,8 +58,75 @@ static void rmmod_hafnium(void) EXPECT_EQ(delete_module("hafnium", 0), 0); } +/** + * Loads and unloads the Hafnium kernel module. + */ TEST(linux, load_hafnium) { insmod_hafnium(); rmmod_hafnium(); } + +/** + * Uses the kernel module to send a socket message from the primary VM to a + * secondary VM and echoes it back to the primary. + */ +TEST(linux, socket_echo_hafnium) +{ + spci_vm_id_t vm_id = HF_VM_ID_OFFSET + 1; + int port = 10; + int socket_id; + struct hf_sockaddr addr; + const char send_buf[] = "The quick brown fox jumps over the lazy dogs."; + size_t send_len = strlen(send_buf); + char resp_buf[MAX_BUF_SIZE]; + ssize_t recv_len; + + ASSERT_LT(send_len, MAX_BUF_SIZE); + + insmod_hafnium(); + + /* Create Hafnium socket. */ + socket_id = socket(PF_HF, SOCK_DGRAM, 0); + if (socket_id == -1) { + FAIL("Socket creation failed: %s", strerror(errno)); + return; + } + HFTEST_LOG("Socket created successfully."); + + /* Connect to requested VM & port. */ + addr.family = PF_HF; + addr.vm_id = vm_id; + addr.port = port; + if (connect(socket_id, (struct sockaddr *)&addr, sizeof(addr)) == -1) { + FAIL("Socket connection failed: %s", strerror(errno)); + return; + } + HFTEST_LOG("Socket to secondary VM %d connected on port %d.", vm_id, + port); + + /* + * Send a message to the secondary VM. + * Enable the confirm flag to try again in case port is busy. + */ + if (send(socket_id, send_buf, send_len, MSG_CONFIRM) < 0) { + FAIL("Socket send() failed: %s", strerror(errno)); + return; + } + HFTEST_LOG("Packet with length %d sent.", send_len); + + /* Receive a response, which should be an echo of the sent packet. */ + recv_len = recv(socket_id, resp_buf, sizeof(resp_buf) - 1, 0); + + if (recv_len == -1) { + FAIL("Socket recv() failed: %s", strerror(errno)); + return; + } + HFTEST_LOG("Packet with length %d received.", recv_len); + + EXPECT_EQ(recv_len, send_len); + EXPECT_EQ(memcmp(send_buf, resp_buf, send_len), 0); + + EXPECT_EQ(close(socket_id), 0); + rmmod_hafnium(); +} diff --git a/test/linux/manifest.dts b/test/linux/manifest.dts new file mode 100644 index 000000000..1372663f4 --- /dev/null +++ b/test/linux/manifest.dts @@ -0,0 +1,34 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/dts-v1/; +/plugin/; + +&{/} { + hypervisor { + compatible = "hafnium,hafnium"; + vm1 { + debug_name = "primary"; + }; + + vm2 { + debug_name = "socket0"; + vcpu_count = <1>; + mem_size = <0x100000>; + kernel_filename = "socket0"; + }; + }; +}; diff --git a/test/vmapi/gicv3/BUILD.gn b/test/vmapi/gicv3/BUILD.gn index 162a84af1..196f16937 100644 --- a/test/vmapi/gicv3/BUILD.gn +++ b/test/vmapi/gicv3/BUILD.gn @@ -41,10 +41,9 @@ vm_kernel("gicv3_test_vm") { initrd("gicv3_test") { testonly = true + manifest = "manifest.dts" primary_vm = ":gicv3_test_vm" secondary_vms = [ [ - "1048576", - "1", "services0", "services:gicv3_service_vm0", ] ] diff --git a/test/vmapi/gicv3/busy_secondary.c b/test/vmapi/gicv3/busy_secondary.c index 4631312fa..2f0766a91 100644 --- a/test/vmapi/gicv3/busy_secondary.c +++ b/test/vmapi/gicv3/busy_secondary.c @@ -81,8 +81,7 @@ TEST(busy_secondary, virtual_timer) dlog("Telling secondary to loop.\n"); memcpy_s(send_buffer->payload, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message)); - spci_message_init(send_buffer, 0, SERVICE_VM0, - recv_buffer->target_vm_id); + spci_message_init(send_buffer, 0, SERVICE_VM0, HF_PRIMARY_VM_ID); EXPECT_EQ(spci_msg_send(0), 0); run_res = hf_vcpu_run(SERVICE_VM0, 0); EXPECT_EQ(run_res.code, HF_VCPU_RUN_PREEMPTED); @@ -139,8 +138,7 @@ TEST(busy_secondary, physical_timer) dlog("Telling secondary to loop.\n"); memcpy_s(send_buffer->payload, SPCI_MSG_PAYLOAD_MAX, message, sizeof(message)); - spci_message_init(send_buffer, 0, SERVICE_VM0, - recv_buffer->target_vm_id); + spci_message_init(send_buffer, 0, SERVICE_VM0, HF_PRIMARY_VM_ID); EXPECT_EQ(spci_msg_send(0), 0); run_res = hf_vcpu_run(SERVICE_VM0, 0); EXPECT_EQ(run_res.code, HF_VCPU_RUN_PREEMPTED); diff --git a/test/vmapi/gicv3/gicv3.c b/test/vmapi/gicv3/gicv3.c index 571f978cf..e6e743508 100644 --- a/test/vmapi/gicv3/gicv3.c +++ b/test/vmapi/gicv3/gicv3.c @@ -50,6 +50,11 @@ static void irq(void) void system_setup() { + const int mode = MM_MODE_R | MM_MODE_W | MM_MODE_D; + hftest_mm_identity_map((void *)GICD_BASE, PAGE_SIZE, mode); + hftest_mm_identity_map((void *)GICR_BASE, PAGE_SIZE, mode); + hftest_mm_identity_map((void *)SGI_BASE, PAGE_SIZE, mode); + exception_setup(irq); interrupt_gic_setup(); } @@ -104,3 +109,19 @@ TEST(system, icc_ctlr_write_trapped_secondary) run_res = hf_vcpu_run(SERVICE_VM0, 0); EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED); } + +/* + * Check that an attempt by a secondary VM to write ICC_SRE_EL1 is trapped or + * ignored. + */ +TEST(system, icc_sre_write_trapped_secondary) +{ + struct hf_vcpu_run_return run_res; + + EXPECT_EQ(hf_vm_configure(send_page_addr, recv_page_addr), 0); + SERVICE_SELECT(SERVICE_VM0, "write_systemreg_sre", send_buffer); + + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_TRUE(run_res.code == HF_VCPU_RUN_ABORTED || + run_res.code == HF_VCPU_RUN_YIELD); +} diff --git a/test/vmapi/gicv3/inc/gicv3.h b/test/vmapi/gicv3/inc/gicv3.h index 264dc0c39..d51398f76 100644 --- a/test/vmapi/gicv3/inc/gicv3.h +++ b/test/vmapi/gicv3/inc/gicv3.h @@ -25,7 +25,7 @@ #define NANOS_PER_UNIT 1000000000 -#define SERVICE_VM0 1 +#define SERVICE_VM0 (HF_VM_ID_OFFSET + 1) extern alignas(PAGE_SIZE) uint8_t send_page[PAGE_SIZE]; extern alignas(PAGE_SIZE) uint8_t recv_page[PAGE_SIZE]; diff --git a/test/vmapi/gicv3/manifest.dts b/test/vmapi/gicv3/manifest.dts new file mode 100644 index 000000000..4e3a76983 --- /dev/null +++ b/test/vmapi/gicv3/manifest.dts @@ -0,0 +1,34 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/dts-v1/; +/plugin/; + +&{/} { + hypervisor { + compatible = "hafnium,hafnium"; + vm1 { + debug_name = "primary"; + }; + + vm2 { + debug_name = "services0"; + vcpu_count = <1>; + mem_size = <0x100000>; + kernel_filename = "services0"; + }; + }; +}; diff --git a/test/vmapi/gicv3/services/systemreg.c b/test/vmapi/gicv3/services/systemreg.c index ebf85de43..7535167c7 100644 --- a/test/vmapi/gicv3/services/systemreg.c +++ b/test/vmapi/gicv3/services/systemreg.c @@ -34,7 +34,7 @@ TEST_SERVICE(read_systemreg_ctlr) { /* Reading ICC_CTLR_EL1 should trap and abort the VM. */ - dlog("ICC_CTLR_EL1=0x%x\n", read_msr(ICC_CTLR_EL1)); + dlog("ICC_CTLR_EL1=%#x\n", read_msr(ICC_CTLR_EL1)); FAIL("Reading ICC_CTLR_EL1 didn't trap."); } @@ -44,3 +44,17 @@ TEST_SERVICE(write_systemreg_ctlr) write_msr(ICC_CTLR_EL1, 0); FAIL("Writing ICC_CTLR_EL1 didn't trap."); } + +TEST_SERVICE(write_systemreg_sre) +{ + ASSERT_EQ(read_msr(ICC_SRE_EL1), 0x7); + /* + * Writing ICC_SRE_EL1 should either trap and abort the VM or be + * ignored. + */ + write_msr(ICC_SRE_EL1, 0x0); + ASSERT_EQ(read_msr(ICC_SRE_EL1), 0x7); + write_msr(ICC_SRE_EL1, 0xffffffff); + ASSERT_EQ(read_msr(ICC_SRE_EL1), 0x7); + spci_yield(); +} diff --git a/test/vmapi/primary_only/BUILD.gn b/test/vmapi/primary_only/BUILD.gn index cad907775..ecb197a32 100644 --- a/test/vmapi/primary_only/BUILD.gn +++ b/test/vmapi/primary_only/BUILD.gn @@ -30,6 +30,6 @@ vm_kernel("primary_only_test_vm") { initrd("primary_only_test") { testonly = true - + manifest = "manifest.dts" primary_vm = ":primary_only_test_vm" } diff --git a/test/vmapi/primary_only/faults.c b/test/vmapi/primary_only/faults.c index 1adeb5bf9..bf56b15da 100644 --- a/test/vmapi/primary_only/faults.c +++ b/test/vmapi/primary_only/faults.c @@ -61,9 +61,10 @@ TEST(faults, spurious_due_to_configure) /* Start secondary cpu while holding lock. */ sl_lock(&s.lock); - EXPECT_EQ(cpu_start(hftest_get_cpu_id(1), other_stack, - sizeof(other_stack), rx_reader, (uintptr_t)&s), - true); + EXPECT_EQ( + hftest_cpu_start(hftest_get_cpu_id(1), other_stack, + sizeof(other_stack), rx_reader, (uintptr_t)&s), + true); /* Wait for CPU to release the lock. */ sl_lock(&s.lock); diff --git a/src/arch/aarch64/barriers.c b/test/vmapi/primary_only/manifest.dts similarity index 72% rename from src/arch/aarch64/barriers.c rename to test/vmapi/primary_only/manifest.dts index 8642b058b..3cf090d14 100644 --- a/src/arch/aarch64/barriers.c +++ b/test/vmapi/primary_only/manifest.dts @@ -1,5 +1,5 @@ /* - * Copyright 2018 The Hafnium Authors. + * Copyright 2019 The Hafnium Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,19 +14,14 @@ * limitations under the License. */ -#include "hf/arch/barriers.h" +/dts-v1/; +/plugin/; -void dmb(void) -{ - __asm__ volatile("dmb sy"); -} - -void dsb(void) -{ - __asm__ volatile("dsb sy"); -} - -void isb(void) -{ - __asm__ volatile("isb"); -} +&{/} { + hypervisor { + compatible = "hafnium,hafnium"; + vm1 { + debug_name = "primary"; + }; + }; +}; diff --git a/test/vmapi/primary_only/primary_only.c b/test/vmapi/primary_only/primary_only.c index c57561d82..4bede95a1 100644 --- a/test/vmapi/primary_only/primary_only.c +++ b/test/vmapi/primary_only/primary_only.c @@ -45,7 +45,7 @@ TEST(hf_vm_get_count, no_secondary_vms) */ TEST(hf_vcpu_get_count, primary_has_at_least_one) { - EXPECT_GE(hf_vcpu_get_count(0), 0); + EXPECT_GE(hf_vcpu_get_count(HF_PRIMARY_VM_ID), 0); } /** @@ -54,7 +54,7 @@ TEST(hf_vcpu_get_count, primary_has_at_least_one) */ TEST(hf_vcpu_get_count, no_secondary_vms) { - EXPECT_EQ(hf_vcpu_get_count(1), 0); + EXPECT_EQ(hf_vcpu_get_count(HF_VM_ID_OFFSET + 1), 0); } /** @@ -117,10 +117,10 @@ TEST(cpus, start) /* Start secondary while holding lock. */ sl_lock(&lock); - EXPECT_EQ( - cpu_start(hftest_get_cpu_id(1), other_stack, - sizeof(other_stack), vm_cpu_entry, (uintptr_t)&lock), - true); + EXPECT_EQ(hftest_cpu_start(hftest_get_cpu_id(1), other_stack, + sizeof(other_stack), vm_cpu_entry, + (uintptr_t)&lock), + true); /* Wait for CPU to release the lock. */ sl_lock(&lock); @@ -137,3 +137,23 @@ TEST(spci, spci_version) EXPECT_EQ(spci_version(), current_version); } + +/** + * Test that floating-point operations work in the primary VM. + */ +TEST(fp, fp) +{ + /* + * Get some numbers that the compiler can't tell are constants, so it + * can't optimise them away. + */ + double a = hf_vm_get_count(); + double b = hf_vcpu_get_count(HF_PRIMARY_VM_ID); + double result = a * b; + dlog("VM count: %d\n", hf_vm_get_count()); + dlog("vCPU count: %d\n", hf_vcpu_get_count(HF_PRIMARY_VM_ID)); + dlog("result: %d\n", (int)result); + EXPECT_TRUE(a == 1.0); + EXPECT_TRUE(b == 8.0); + EXPECT_TRUE(result == 8.0); +} diff --git a/test/vmapi/primary_with_secondaries/BUILD.gn b/test/vmapi/primary_with_secondaries/BUILD.gn index d24570f1d..3c009898b 100644 --- a/test/vmapi/primary_with_secondaries/BUILD.gn +++ b/test/vmapi/primary_with_secondaries/BUILD.gn @@ -26,6 +26,7 @@ vm_kernel("primary_with_secondaries_test_vm") { sources = [ "abort.c", "boot.c", + "debug_el1.c", "floating_point.c", "interrupts.c", "mailbox.c", @@ -47,23 +48,19 @@ vm_kernel("primary_with_secondaries_test_vm") { initrd("primary_with_secondaries_test") { testonly = true + manifest = "manifest.dts" + primary_vm = ":primary_with_secondaries_test_vm" secondary_vms = [ [ - "1048576", - "1", "services0", "services:service_vm0", ], [ - "1048576", - "1", "services1", "services:service_vm1", ], [ - "1048576", - "2", "services2", "services:service_vm2", ], diff --git a/test/vmapi/primary_with_secondaries/debug_el1.c b/test/vmapi/primary_with_secondaries/debug_el1.c new file mode 100644 index 000000000..991caa222 --- /dev/null +++ b/test/vmapi/primary_with_secondaries/debug_el1.c @@ -0,0 +1,148 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "debug_el1.h" + +#include "primary_with_secondary.h" +#include "util.h" + +/** + * QEMU does not properly handle the trapping of certain system register + * accesses. This was fixed in a custom local build that we could use. If not + * using that build, limit testing to the subset QEMU handles correctly. + */ +#define CUSTOM_QEMU_BUILD() 0 + +/* + * TODO(b/132422368): Devise a way to test exhaustively read/write behavior to + * all debug registers that does not involve a separate service per register. + * This needs proper trap support as a starting point. + */ + +TEST(debug_el1, secondary_mdccint_el1) +{ + struct hf_vcpu_run_return run_res; + struct mailbox_buffers mb = set_up_mailbox(); + + SERVICE_SELECT(SERVICE_VM0, "debug_el1_secondary_mdccint_el1", mb.send); + + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED); +} + +TEST(debug_el1, secondary_dbgbcr0_el1) +{ + struct hf_vcpu_run_return run_res; + struct mailbox_buffers mb = set_up_mailbox(); + + SERVICE_SELECT(SERVICE_VM0, "debug_el1_secondary_dbgbcr0_el1", mb.send); + + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED); +} + +TEST(debug_el1, secondary_dbgbvr0_el1) +{ + struct hf_vcpu_run_return run_res; + struct mailbox_buffers mb = set_up_mailbox(); + + SERVICE_SELECT(SERVICE_VM0, "debug_el1_secondary_dbgbvr0_el1", mb.send); + + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED); +} + +TEST(debug_el1, secondary_dbgwcr0_el1) +{ + struct hf_vcpu_run_return run_res; + struct mailbox_buffers mb = set_up_mailbox(); + + SERVICE_SELECT(SERVICE_VM0, "debug_el1_secondary_dbgwcr0_el1", mb.send); + + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED); +} + +TEST(debug_el1, secondary_dbgwvr0_el1) +{ + struct hf_vcpu_run_return run_res; + struct mailbox_buffers mb = set_up_mailbox(); + + SERVICE_SELECT(SERVICE_VM0, "debug_el1_secondary_dbgwvr0_el1", mb.send); + + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED); +} + +/** + * Attempts to access many debug registers for read, without validating their + * value. + */ +TEST(debug_el1, primary_basic) +{ + EXPECT_EQ(hf_vm_get_id(), HF_PRIMARY_VM_ID); + + if (CUSTOM_QEMU_BUILD()) { + TRY_READ(DBGAUTHSTATUS_EL1); + TRY_READ(DBGCLAIMCLR_EL1); + TRY_READ(DBGCLAIMSET_EL1); + TRY_READ(DBGPRCR_EL1); + TRY_READ(OSDTRRX_EL1); + TRY_READ(OSDTRTX_EL1); + TRY_READ(OSECCR_EL1); + + TRY_READ(DBGBCR2_EL1); + TRY_READ(DBGBCR3_EL1); + TRY_READ(DBGBCR4_EL1); + TRY_READ(DBGBCR5_EL1); + TRY_READ(DBGBVR2_EL1); + TRY_READ(DBGBVR3_EL1); + TRY_READ(DBGBVR4_EL1); + TRY_READ(DBGBVR5_EL1); + TRY_READ(DBGWCR2_EL1); + TRY_READ(DBGWCR3_EL1); + TRY_READ(DBGWVR2_EL1); + TRY_READ(DBGWVR3_EL1); + } + + /* The following is the subset currently supported by QEMU. */ + TRY_READ(MDCCINT_EL1); + TRY_READ(MDRAR_EL1); + TRY_READ(MDSCR_EL1); + TRY_READ(OSDLR_EL1); + TRY_READ(OSLSR_EL1); + + TRY_READ(DBGBCR0_EL1); + TRY_READ(DBGBCR1_EL1); + TRY_READ(DBGBVR0_EL1); + TRY_READ(DBGBVR1_EL1); + TRY_READ(DBGWCR0_EL1); + TRY_READ(DBGWCR1_EL1); + TRY_READ(DBGWVR0_EL1); + TRY_READ(DBGWVR1_EL1); +} + +/** + * Tests a few debug registers for read and write, and checks that the expected + * value is written/read. + */ +TEST(debug_el1, primary_read_write) +{ + EXPECT_EQ(hf_vm_get_id(), HF_PRIMARY_VM_ID); + + TRY_WRITE_READ(DBGBCR0_EL1, 0x2); + TRY_WRITE_READ(DBGBVR0_EL1, 0xf0); +} diff --git a/test/vmapi/primary_with_secondaries/debug_el1.h b/test/vmapi/primary_with_secondaries/debug_el1.h new file mode 100644 index 000000000..8f0b8e443 --- /dev/null +++ b/test/vmapi/primary_with_secondaries/debug_el1.h @@ -0,0 +1,32 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "vmapi/hf/call.h" + +#include "../msr.h" +#include "hftest.h" + +#define TRY_READ(REG) dlog(#REG "=%#x\n", read_msr(REG)) + +#define TRY_WRITE_READ(REG, VALUE) \ + do { \ + uintreg_t x; \ + write_msr(REG, VALUE); \ + x = read_msr(REG); \ + EXPECT_EQ(x, VALUE); \ + } while (0) diff --git a/test/vmapi/primary_with_secondaries/inc/primary_with_secondary.h b/test/vmapi/primary_with_secondaries/inc/primary_with_secondary.h index a8ff882a6..9e2065078 100644 --- a/test/vmapi/primary_with_secondaries/inc/primary_with_secondary.h +++ b/test/vmapi/primary_with_secondaries/inc/primary_with_secondary.h @@ -16,9 +16,9 @@ #pragma once -#define SERVICE_VM0 1 -#define SERVICE_VM1 2 -#define SERVICE_VM2 3 +#define SERVICE_VM0 (HF_VM_ID_OFFSET + 1) +#define SERVICE_VM1 (HF_VM_ID_OFFSET + 2) +#define SERVICE_VM2 (HF_VM_ID_OFFSET + 3) #define SELF_INTERRUPT_ID 5 #define EXTERNAL_INTERRUPT_ID_A 7 diff --git a/test/vmapi/primary_with_secondaries/manifest.dts b/test/vmapi/primary_with_secondaries/manifest.dts new file mode 100644 index 000000000..6460a6095 --- /dev/null +++ b/test/vmapi/primary_with_secondaries/manifest.dts @@ -0,0 +1,48 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/dts-v1/; +/plugin/; + +&{/} { + hypervisor { + compatible = "hafnium,hafnium"; + vm1 { + debug_name = "primary"; + }; + + vm2 { + debug_name = "services0"; + vcpu_count = <1>; + mem_size = <0x100000>; + kernel_filename = "services0"; + }; + + vm3 { + debug_name = "services1"; + vcpu_count = <1>; + mem_size = <0x100000>; + kernel_filename = "services1"; + }; + + vm4 { + debug_name = "services2"; + vcpu_count = <2>; + mem_size = <0x100000>; + kernel_filename = "services2"; + }; + }; +}; diff --git a/test/vmapi/primary_with_secondaries/memory_sharing.c b/test/vmapi/primary_with_secondaries/memory_sharing.c index 4e80d027a..b094a7a21 100644 --- a/test/vmapi/primary_with_secondaries/memory_sharing.c +++ b/test/vmapi/primary_with_secondaries/memory_sharing.c @@ -49,24 +49,99 @@ void check_cannot_share_memory(void *ptr, size_t size) } /** - * Tries sharing memory in available modes with different VMs and asserts that + * Helper function to test lend memory in the different configurations. + */ + +static void spci_check_cannot_lend_memory( + struct mailbox_buffers mb, + struct spci_memory_region_constituent constituents[]) + +{ + enum spci_lend_access lend_access[] = {SPCI_LEND_RO_NX, SPCI_LEND_RO_X, + SPCI_LEND_RW_NX, SPCI_LEND_RW_X}; + enum spci_lend_type lend_type[] = { + SPCI_LEND_NORMAL_MEM, SPCI_LEND_DEV_NGNRNE, SPCI_LEND_DEV_NGNRE, + SPCI_LEND_DEV_NGRE, SPCI_LEND_DEV_GRE}; + enum spci_lend_cacheability lend_cacheability[] = { + SPCI_LEND_CACHE_NON_CACHEABLE, SPCI_LEND_CACHE_WRITE_THROUGH, + SPCI_LEND_CACHE_WRITE_BACK}; + enum spci_lend_shareability lend_shareability[] = { + SPCI_LEND_SHARE_NON_SHAREABLE, SPCI_LEND_RESERVED, + SPCI_LEND_OUTER_SHAREABLE, SPCI_LEND_INNER_SHAREABLE}; + uint32_t vms[] = {HF_PRIMARY_VM_ID, SERVICE_VM0, SERVICE_VM1}; + + int i = 0; + int j = 0; + int k = 0; + int l = 0; + int m = 0; + + for (i = 0; i < ARRAY_SIZE(vms); ++i) { + for (j = 0; j < ARRAY_SIZE(lend_access); ++j) { + for (k = 0; k < ARRAY_SIZE(lend_type); ++k) { + for (l = 0; l < ARRAY_SIZE(lend_cacheability); + ++l) { + for (m = 0; + m < ARRAY_SIZE(lend_shareability); + ++m) { + spci_memory_lend( + mb.send, vms[i], + HF_PRIMARY_VM_ID, + constituents, 1, 0, + lend_access[j], + lend_type[k], + lend_cacheability[l], + lend_shareability[m]); + EXPECT_EQ( + spci_msg_send(0), + SPCI_INVALID_PARAMETERS); + } + } + } + } + } +} + +/** + * Tries donating memory in available modes with different VMs and asserts that + * it will fail to all except the supplied VM ID as this would succeed if it + * is the only borrower. + */ +static void spci_check_cannot_donate_memory( + struct mailbox_buffers mb, + struct spci_memory_region_constituent constituents[], int num_elements, + int32_t avoid_vm) +{ + uint32_t vms[] = {HF_PRIMARY_VM_ID, SERVICE_VM0, SERVICE_VM1}; + + int i; + for (i = 0; i < ARRAY_SIZE(vms); ++i) { + /* Optionally skip one VM as the donate would succeed. */ + if (vms[i] == avoid_vm) { + continue; + } + spci_memory_donate(mb.send, vms[i], HF_PRIMARY_VM_ID, + constituents, num_elements, 0); + EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); + } +} + +/** + * Tries relinquishing memory with different VMs and asserts that * it will fail. */ -static void spci_check_cannot_share_memory( +static void spci_check_cannot_relinquish_memory( struct mailbox_buffers mb, struct spci_memory_region_constituent constituents[], int num_elements) { uint32_t vms[] = {HF_PRIMARY_VM_ID, SERVICE_VM0, SERVICE_VM1}; - void (*modes[])(struct spci_message *, spci_vm_id_t, spci_vm_id_t, - struct spci_memory_region_constituent *, uint32_t, - uint32_t) = {spci_memory_donate}; + int i; int j; - - for (j = 0; j < ARRAY_SIZE(modes); ++j) { - for (i = 0; i < ARRAY_SIZE(vms); ++i) { - modes[j](mb.send, vms[i], HF_PRIMARY_VM_ID, - constituents, num_elements, 0); + for (i = 0; i < ARRAY_SIZE(vms); ++i) { + for (j = 0; j < ARRAY_SIZE(vms); ++j) { + spci_memory_relinquish(mb.send, vms[i], vms[j], + constituents, num_elements, 0); EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); } } @@ -225,6 +300,44 @@ TEST(memory_sharing, spci_give_and_get_back) EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED); } +/** + * SPCI: Check that memory can be lent and is accessible by both parties. + */ +TEST(memory_sharing, spci_lend_relinquish) +{ + struct hf_vcpu_run_return run_res; + struct mailbox_buffers mb = set_up_mailbox(); + uint8_t *ptr = page; + + SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish", mb.send); + + /* Initialise the memory before giving it. */ + memset_s(ptr, sizeof(page), 'b', PAGE_SIZE); + + struct spci_memory_region_constituent constituents[] = { + {.address = (uint64_t)page, .page_count = 1}, + }; + + spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, + 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + run_res = hf_vcpu_run(SERVICE_VM0, 0); + + /* Let the memory be returned. */ + EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE); + + /* Ensure that the secondary VM accessed the region. */ + for (int i = 0; i < PAGE_SIZE; ++i) { + ASSERT_EQ(ptr[i], 'c'); + } + + /* Observe the service faulting when accessing the memory. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED); +} + /** * Memory given away can be given back. */ @@ -599,8 +712,10 @@ TEST(memory_sharing, spci_donate_twice) run_res = hf_vcpu_run(SERVICE_VM0, 0); EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD); - /* Fail to share memory again with either VM0 or VM1. */ - spci_check_cannot_share_memory(mb, constituents, 1); + /* Fail to share memory again with any VM. */ + spci_check_cannot_donate_memory(mb, constituents, 1, -1); + /* Fail to relinquish memory from any VM. */ + spci_check_cannot_relinquish_memory(mb, constituents, 1); /* Let the memory be sent from VM0 to PRIMARY (returned). */ run_res = hf_vcpu_run(SERVICE_VM0, 0); @@ -634,6 +749,27 @@ TEST(memory_sharing, spci_donate_to_self) EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); } +/** + * SPCI: Check cannot lend to self. + */ +TEST(memory_sharing, spci_lend_to_self) +{ + struct mailbox_buffers mb = set_up_mailbox(); + uint8_t *ptr = page; + + /* Initialise the memory before giving it. */ + memset_s(ptr, sizeof(page), 'b', PAGE_SIZE); + struct spci_memory_region_constituent constituents[] = { + {.address = (uint64_t)page, .page_count = 1}, + }; + + spci_memory_lend(mb.send, HF_PRIMARY_VM_ID, HF_PRIMARY_VM_ID, + constituents, 1, 0, SPCI_LEND_RW_X, + SPCI_LEND_NORMAL_MEM, SPCI_LEND_CACHE_WRITE_BACK, + SPCI_LEND_OUTER_SHAREABLE); + EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); +} + /** * SPCI: Check cannot donate from alternative VM. */ @@ -680,7 +816,7 @@ TEST(memory_sharing, spci_donate_invalid_source) } /** - * SPCI: Check that unaligned addresses can not be donated. + * SPCI: Check that unaligned addresses can not be shared. */ TEST(memory_sharing, spci_give_and_get_back_unaligned) { @@ -695,5 +831,347 @@ TEST(memory_sharing, spci_give_and_get_back_unaligned) spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, 1, 0); EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); + spci_memory_lend( + mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, 1, + 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); + } +} + +/** + * SPCI: Check cannot lend from alternative VM. + */ +TEST(memory_sharing, spci_lend_invalid_source) +{ + struct hf_vcpu_run_return run_res; + struct mailbox_buffers mb = set_up_mailbox(); + uint8_t *ptr = page; + + SERVICE_SELECT(SERVICE_VM0, "spci_lend_invalid_source", mb.send); + + /* Initialise the memory before giving it. */ + memset_s(ptr, sizeof(page), 'b', PAGE_SIZE); + struct spci_memory_region_constituent constituents[] = { + {.address = (uint64_t)page, .page_count = 1}, + }; + + /* Check cannot swap VM IDs. */ + spci_memory_lend(mb.send, HF_PRIMARY_VM_ID, SERVICE_VM0, constituents, + 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); + + /* Lend memory to VM0. */ + spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, + 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + + /* Receive and return memory from VM0. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE); + + /* Try to lend memory from primary in VM0. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD); +} + +/** + * SPCI: Memory can be lent with executable permissions. + * Check RO and RW permissions. + */ +TEST(memory_sharing, spci_lend_relinquish_X_RW) +{ + struct hf_vcpu_run_return run_res; + struct mailbox_buffers mb = set_up_mailbox(); + uint8_t *ptr = page; + + SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_RW", mb.send); + + /* Initialise the memory before giving it. */ + memset_s(ptr, sizeof(page), 'b', PAGE_SIZE); + + struct spci_memory_region_constituent constituents[] = { + {.address = (uint64_t)page, .page_count = 1}, + }; + + spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, + 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + + /* Let the memory be accessed. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD); + + /* Ensure we still have access. */ + for (int i = 0; i < PAGE_SIZE; ++i) { + ASSERT_EQ(ptr[i], 'b'); + ptr[i]++; + } + + /* Let service write to and return memory. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE); + + /* Re-initialise the memory before giving it. */ + memset_s(ptr, sizeof(page), 'b', PAGE_SIZE); + + spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, + 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + + /* Let the memory be accessed. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD); + + /* Ensure we still have access. */ + for (int i = 0; i < PAGE_SIZE; ++i) { + ASSERT_EQ(ptr[i], 'b'); + ptr[i]++; + } + + /* Observe the service faulting when writing to the memory. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED); +} + +/** + * SPCI: Memory can be lent without executable permissions. + * Check RO and RW permissions. + */ +TEST(memory_sharing, spci_lend_relinquish_NX_RW) +{ + struct hf_vcpu_run_return run_res; + struct mailbox_buffers mb = set_up_mailbox(); + uint8_t *ptr = page; + + SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_RW", mb.send); + + /* Initialise the memory before giving it. */ + memset_s(ptr, sizeof(page), 'b', PAGE_SIZE); + + struct spci_memory_region_constituent constituents[] = { + {.address = (uint64_t)page, .page_count = 1}, + }; + + spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, + 1, 0, SPCI_LEND_RW_NX, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + + /* Let the memory be accessed. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD); + + /* Ensure we still have access. */ + for (int i = 0; i < PAGE_SIZE; ++i) { + ASSERT_EQ(ptr[i], 'b'); + } + + /* Let service write to and return memory. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE); + + /* Re-initialise the memory before giving it. */ + memset_s(ptr, sizeof(page), 'b', PAGE_SIZE); + + spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, + 1, 0, SPCI_LEND_RO_NX, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + + /* Let the memory be accessed. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD); + + /* Ensure we still have access. */ + for (int i = 0; i < PAGE_SIZE; ++i) { + ASSERT_EQ(ptr[i], 'b'); + ptr[i]++; + } + + /* Observe the service faulting when writing to the memory. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED); +} + +/** + * SPCI: Exercise execution permissions for lending memory. + */ +TEST(memory_sharing, spci_lend_relinquish_RW_X) +{ + struct hf_vcpu_run_return run_res; + struct mailbox_buffers mb = set_up_mailbox(); + uint8_t *ptr = page; + + SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_X", mb.send); + + /* Initialise the memory before giving it. */ + memset_s(ptr, sizeof(page), 0, PAGE_SIZE); + + uint64_t *ptr2 = (uint64_t *)page; + /* Set memory to contain the RET instruction to attempt to execute. */ + *ptr2 = 0xD65F03C0; + + struct spci_memory_region_constituent constituents[] = { + {.address = (uint64_t)page, .page_count = 1}, + }; + + spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, + 1, 0, SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + + /* Attempt to execute from memory. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE); + + spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, + 1, 0, SPCI_LEND_RW_NX, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + + /* Try and fail to execute from the memory region. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED); +} + +/** + * SPCI: Exercise execution permissions for lending memory without write access. + */ +TEST(memory_sharing, spci_lend_relinquish_RO_X) +{ + struct hf_vcpu_run_return run_res; + struct mailbox_buffers mb = set_up_mailbox(); + uint8_t *ptr = page; + + SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_X", mb.send); + + /* Initialise the memory before giving it. */ + memset_s(ptr, sizeof(page), 0, PAGE_SIZE); + + uint64_t *ptr2 = (uint64_t *)page; + /* Set memory to contain the RET instruction to attempt to execute. */ + *ptr2 = 0xD65F03C0; + + struct spci_memory_region_constituent constituents[] = { + {.address = (uint64_t)page, .page_count = 1}, + }; + + spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, + 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + + /* Attempt to execute from memory. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_MESSAGE); + + spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, + 1, 0, SPCI_LEND_RO_NX, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + + /* Try and fail to execute from the memory region. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_ABORTED); +} + +/** + * SPCI: Memory can be lent, but then no part can be donated. + */ +TEST(memory_sharing, spci_lend_donate) +{ + struct hf_vcpu_run_return run_res; + struct mailbox_buffers mb = set_up_mailbox(); + uint8_t *ptr = page; + + SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_relinquish_RW", mb.send); + SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_relinquish_RW", mb.send); + + /* Initialise the memory before giving it. */ + memset_s(ptr, sizeof(page) * 2, 'b', PAGE_SIZE * 2); + + struct spci_memory_region_constituent constituents[] = { + {.address = (uint64_t)page, .page_count = 2}, + }; + + spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, + 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + + /* Let the memory be accessed. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD); + + /* Ensure we can't donate any sub section of memory to another VM. */ + constituents[0].page_count = 1; + for (int i = 1; i < PAGE_SIZE * 2; i++) { + constituents[0].address = (uint64_t)page + PAGE_SIZE; + spci_memory_donate(mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID, + constituents, 1, 0); + EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); + } + + /* Ensure we can donate to the only borrower. */ + spci_memory_donate(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, + 1, 0); + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); +} + +/** + * SPCI: Memory can be lent, but then no part can be lent again. + */ +TEST(memory_sharing, spci_lend_twice) +{ + struct hf_vcpu_run_return run_res; + struct mailbox_buffers mb = set_up_mailbox(); + uint8_t *ptr = page; + + SERVICE_SELECT(SERVICE_VM0, "spci_memory_lend_twice", mb.send); + SERVICE_SELECT(SERVICE_VM1, "spci_memory_lend_twice", mb.send); + + /* Initialise the memory before giving it. */ + memset_s(ptr, sizeof(page) * 2, 'b', PAGE_SIZE * 2); + + struct spci_memory_region_constituent constituents[] = { + {.address = (uint64_t)page, .page_count = 2}, + }; + + spci_memory_lend(mb.send, SERVICE_VM0, HF_PRIMARY_VM_ID, constituents, + 1, 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + + /* Let the memory be accessed. */ + run_res = hf_vcpu_run(SERVICE_VM0, 0); + EXPECT_EQ(run_res.code, HF_VCPU_RUN_YIELD); + + /* Attempt to lend the same area of memory. */ + spci_check_cannot_lend_memory(mb, constituents); + /* Fail to donate to VM apart from VM0. */ + spci_check_cannot_donate_memory(mb, constituents, 1, SERVICE_VM0); + /* Fail to relinquish from any VM. */ + spci_check_cannot_relinquish_memory(mb, constituents, 1); + + /* Attempt to lend again with different permissions. */ + constituents[0].page_count = 1; + for (int i = 1; i < PAGE_SIZE * 2; i++) { + constituents[0].address = (uint64_t)page + PAGE_SIZE; + spci_memory_lend( + mb.send, SERVICE_VM1, HF_PRIMARY_VM_ID, constituents, 1, + 0, SPCI_LEND_RO_X, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); } } diff --git a/test/vmapi/primary_with_secondaries/no_services.c b/test/vmapi/primary_with_secondaries/no_services.c index 7b171e8e2..975ea6d94 100644 --- a/test/vmapi/primary_with_secondaries/no_services.c +++ b/test/vmapi/primary_with_secondaries/no_services.c @@ -55,7 +55,7 @@ TEST(hf_vm_get_count, three_secondary_vms) */ TEST(hf_vcpu_get_count, secondary_has_one_vcpu) { - EXPECT_EQ(hf_vcpu_get_count(1), 1); + EXPECT_EQ(hf_vcpu_get_count(SERVICE_VM0), 1); } /** diff --git a/test/vmapi/primary_with_secondaries/run_race.c b/test/vmapi/primary_with_secondaries/run_race.c index 49c95e34a..63ba81d1f 100644 --- a/test/vmapi/primary_with_secondaries/run_race.c +++ b/test/vmapi/primary_with_secondaries/run_race.c @@ -88,8 +88,8 @@ TEST(vcpu_state, concurrent_save_restore) SERVICE_SELECT(SERVICE_VM0, "check_state", mb.send); /* Start second vCPU. */ - ASSERT_TRUE(cpu_start(hftest_get_cpu_id(1), stack, sizeof(stack), - vm_cpu_entry, (uintptr_t)&mb)); + ASSERT_TRUE(hftest_cpu_start(hftest_get_cpu_id(1), stack, sizeof(stack), + vm_cpu_entry, (uintptr_t)&mb)); /* Run on a loop until the secondary VM is done. */ EXPECT_TRUE(run_loop(&mb)); diff --git a/test/vmapi/primary_with_secondaries/services/BUILD.gn b/test/vmapi/primary_with_secondaries/services/BUILD.gn index 7dc196410..29da30830 100644 --- a/test/vmapi/primary_with_secondaries/services/BUILD.gn +++ b/test/vmapi/primary_with_secondaries/services/BUILD.gn @@ -28,6 +28,16 @@ source_set("check_state") { ] } +# Service to try to access EL1 debug registers. +source_set("debug_el1") { + testonly = true + public_configs = [ "//test/hftest:hftest_config" ] + + sources = [ + "debug_el1.c", + ] +} + # Service to listen for messages and echo them back to the sender. source_set("echo") { testonly = true @@ -189,6 +199,7 @@ vm_kernel("service_vm0") { ":abort", ":boot", ":check_state", + ":debug_el1", ":echo", ":echo_with_notification", ":floating_point", diff --git a/test/vmapi/primary_with_secondaries/services/abort.c b/test/vmapi/primary_with_secondaries/services/abort.c index e37d7e9b2..3d11bd254 100644 --- a/test/vmapi/primary_with_secondaries/services/abort.c +++ b/test/vmapi/primary_with_secondaries/services/abort.c @@ -49,12 +49,18 @@ TEST_SERVICE(instruction_abort) TEST_SERVICE(straddling_instruction_abort) { + /* + * Get a function pointer which, when branched to, will attempt to + * execute a 4-byte instruction straddling two pages. + */ int (*f)(void) = (int (*)(void))(&pages[PAGE_SIZE - 2]); - /* Give some memory to the primary VM so that it's unmapped. */ + /* Give second page to the primary VM so that it's unmapped. */ ASSERT_EQ(hf_share_memory(HF_PRIMARY_VM_ID, (hf_ipaddr_t)(&pages[PAGE_SIZE]), PAGE_SIZE, HF_MEMORY_GIVE), 0); + + /* Branch to instruction whose 2 bytes are now in an unmapped page. */ f(); } diff --git a/test/vmapi/primary_with_secondaries/services/debug_el1.c b/test/vmapi/primary_with_secondaries/services/debug_el1.c new file mode 100644 index 000000000..58ab389e6 --- /dev/null +++ b/test/vmapi/primary_with_secondaries/services/debug_el1.c @@ -0,0 +1,54 @@ +/* + * Copyright 2019 The Hafnium Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../debug_el1.h" + +#include "hf/dlog.h" + +TEST_SERVICE(debug_el1_secondary_mdccint_el1) +{ + EXPECT_GT(hf_vm_get_id(), HF_PRIMARY_VM_ID); + TRY_READ(MDCCINT_EL1); + FAIL("Reading debug EL1 register in secondary VM didn't trap."); +} + +TEST_SERVICE(debug_el1_secondary_dbgbcr0_el1) +{ + EXPECT_GT(hf_vm_get_id(), HF_PRIMARY_VM_ID); + TRY_READ(DBGBCR0_EL1); + FAIL("Reading debug EL1 register in secondary VM didn't trap."); +} + +TEST_SERVICE(debug_el1_secondary_dbgbvr0_el1) +{ + EXPECT_GT(hf_vm_get_id(), HF_PRIMARY_VM_ID); + TRY_READ(DBGBVR0_EL1); + FAIL("Reading debug EL1 register in secondary VM didn't trap."); +} + +TEST_SERVICE(debug_el1_secondary_dbgwcr0_el1) +{ + EXPECT_GT(hf_vm_get_id(), HF_PRIMARY_VM_ID); + TRY_READ(DBGWCR0_EL1); + FAIL("Reading debug EL1 register in secondary VM didn't trap."); +} + +TEST_SERVICE(debug_el1_secondary_dbgwvr0_el1) +{ + EXPECT_GT(hf_vm_get_id(), HF_PRIMARY_VM_ID); + TRY_READ(DBGWVR0_EL1); + FAIL("Reading debug EL1 register in secondary VM didn't trap."); +} diff --git a/test/vmapi/primary_with_secondaries/services/memory.c b/test/vmapi/primary_with_secondaries/services/memory.c index d749186e6..cfd05f622 100644 --- a/test/vmapi/primary_with_secondaries/services/memory.c +++ b/test/vmapi/primary_with_secondaries/services/memory.c @@ -56,6 +56,44 @@ TEST_SERVICE(memory_increment) } } +TEST_SERVICE(memory_lend_relinquish_spci) +{ + /* Loop, giving memory back to the sender. */ + for (;;) { + spci_msg_recv(SPCI_MSG_RECV_BLOCK); + uint8_t *ptr; + + struct spci_message *recv_buf = SERVICE_RECV_BUFFER(); + struct spci_message *send_buf = SERVICE_SEND_BUFFER(); + struct spci_memory_region *memory_region = + (struct spci_memory_region *)(spci_get_lend_descriptor( + recv_buf) + ->payload); + + ptr = (uint8_t *)memory_region->constituents[0].address; + /* Relevant information read, mailbox can be cleared. */ + hf_mailbox_clear(); + + /* Check that one has access to the shared region. */ + for (int i = 0; i < PAGE_SIZE; ++i) { + ptr[i]++; + } + + hf_mailbox_clear(); + /* Give the memory back and notify the sender. */ + spci_memory_relinquish( + send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id, + memory_region->constituents, memory_region->count, 0); + spci_msg_send(0); + + /* + * Try and access the memory which will cause a fault unless the + * memory has been shared back again. + */ + ptr[0] = 123; + } +} + TEST_SERVICE(memory_return) { /* Loop, giving memory back to the sender. */ @@ -109,7 +147,7 @@ TEST_SERVICE(give_memory_and_fault) hf_vm_get_id()); EXPECT_EQ(spci_msg_send(0), 0); - /* Try using the memory that isn't valid unless it's been returned. */ + /* Try using the memory that isn't valid unless it's been returned. */ page[16] = 123; } @@ -133,7 +171,7 @@ TEST_SERVICE(lend_memory_and_fault) hf_vm_get_id()); EXPECT_EQ(spci_msg_send(0), 0); - /* Try using the memory that isn't valid unless it's been returned. */ + /* Try using the memory that isn't valid unless it's been returned. */ page[633] = 180; } @@ -242,21 +280,21 @@ TEST_SERVICE(spci_donate_twice) struct spci_message *send_buf = SERVICE_SEND_BUFFER(); struct spci_memory_region *memory_region = spci_get_donated_memory_region(recv_buf); + struct spci_memory_region_constituent constituent = + memory_region->constituents[0]; hf_mailbox_clear(); /* Yield to allow attempt to re donate from primary. */ spci_yield(); /* Give the memory back and notify the sender. */ - spci_memory_donate(send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id, - memory_region->constituents, memory_region->count, - 0); + spci_memory_donate(send_buf, HF_PRIMARY_VM_ID, SERVICE_VM0, + &constituent, memory_region->count, 0); EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); /* Attempt to donate the memory to another VM. */ spci_memory_donate(send_buf, SERVICE_VM1, recv_buf->target_vm_id, - memory_region->constituents, memory_region->count, - 0); + &constituent, memory_region->count, 0); EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); spci_yield(); @@ -313,3 +351,268 @@ TEST_SERVICE(spci_donate_invalid_source) EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); spci_yield(); } + +TEST_SERVICE(spci_memory_lend_relinquish) +{ + /* Loop, giving memory back to the sender. */ + for (;;) { + EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS); + uint8_t *ptr; + + struct spci_message *recv_buf = SERVICE_RECV_BUFFER(); + struct spci_message *send_buf = SERVICE_SEND_BUFFER(); + struct spci_memory_region *memory_region = + (struct spci_memory_region *)(spci_get_lend_descriptor( + recv_buf) + ->payload); + + ptr = (uint8_t *)memory_region->constituents[0].address; + /* Relevant information read, mailbox can be cleared. */ + hf_mailbox_clear(); + + /* Check that one has access to the shared region. */ + for (int i = 0; i < PAGE_SIZE; ++i) { + ptr[i]++; + } + + hf_mailbox_clear(); + /* Give the memory back and notify the sender. */ + spci_memory_relinquish( + send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id, + memory_region->constituents, memory_region->count, 0); + spci_msg_send(0); + + /* + * Try and access the memory which will cause a fault unless the + * memory has been shared back again. + */ + ptr[0] = 123; + } +} + +/** + * SPCI: Ensure that we can't relinquish donated memory. + */ +TEST_SERVICE(spci_memory_donate_relinquish) +{ + for (;;) { + EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS); + uint8_t *ptr; + + struct spci_message *recv_buf = SERVICE_RECV_BUFFER(); + struct spci_message *send_buf = SERVICE_SEND_BUFFER(); + struct spci_memory_region *memory_region = + spci_get_donated_memory_region(recv_buf); + hf_mailbox_clear(); + + ptr = (uint8_t *)memory_region->constituents[0].address; + + /* Check that one has access to the shared region. */ + for (int i = 0; i < PAGE_SIZE; ++i) { + ptr[i]++; + } + /* Give the memory back and notify the sender. */ + spci_memory_relinquish( + send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id, + memory_region->constituents, memory_region->count, 0); + EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); + + /* Ensure we still have access to the memory. */ + ptr[0] = 123; + + spci_yield(); + } +} + +/** + * SPCI: Receive memory and attempt to donate from primary VM. + */ +TEST_SERVICE(spci_lend_invalid_source) +{ + EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS); + + struct spci_message *recv_buf = SERVICE_RECV_BUFFER(); + struct spci_message *send_buf = SERVICE_SEND_BUFFER(); + struct spci_memory_region *memory_region = + (struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf) + ->payload); + hf_mailbox_clear(); + + /* Attempt to relinquish from primary VM. */ + spci_memory_relinquish(send_buf, recv_buf->target_vm_id, + HF_PRIMARY_VM_ID, memory_region->constituents, + memory_region->count, 0); + EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); + + /* Give the memory back and notify the sender. */ + spci_memory_relinquish( + send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id, + memory_region->constituents, memory_region->count, 0); + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + + /* Ensure we cannot lend from the primary to another secondary. */ + spci_memory_lend(send_buf, SERVICE_VM1, HF_PRIMARY_VM_ID, + memory_region->constituents, memory_region->count, 0, + SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); + spci_yield(); +} + +/** + * SPCI: Attempt to execute an instruction from the lent memory. + */ +TEST_SERVICE(spci_memory_lend_relinquish_X) +{ + for (;;) { + EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS); + uint64_t *ptr; + + struct spci_message *recv_buf = SERVICE_RECV_BUFFER(); + struct spci_message *send_buf = SERVICE_SEND_BUFFER(); + struct spci_memory_region *memory_region = + (struct spci_memory_region *)(spci_get_lend_descriptor( + recv_buf) + ->payload); + hf_mailbox_clear(); + + ptr = (uint64_t *)memory_region->constituents[0].address; + /* + * Verify that the instruction in memory is the encoded RET + * instruction. + */ + EXPECT_EQ(*ptr, 0xD65F03C0); + /* Try to execute instruction from the shared memory region. */ + __asm__ volatile("blr %0" ::"r"(ptr)); + + /* Release the memory again. */ + spci_memory_relinquish( + send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id, + memory_region->constituents, memory_region->count, 0); + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + } +} + +/** + * SPCI: Attempt to read and write to a shared page. + */ +TEST_SERVICE(spci_memory_lend_relinquish_RW) +{ + for (;;) { + EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS); + uint8_t *ptr; + + struct spci_message *recv_buf = SERVICE_RECV_BUFFER(); + struct spci_message *send_buf = SERVICE_SEND_BUFFER(); + struct spci_memory_region *memory_region = + (struct spci_memory_region *)(spci_get_lend_descriptor( + recv_buf) + ->payload); + hf_mailbox_clear(); + + ptr = (uint8_t *)memory_region->constituents[0].address; + + /* Check that we have read access. */ + for (int i = 0; i < PAGE_SIZE; ++i) { + EXPECT_EQ(ptr[i], 'b'); + } + + /* Return control to primary, to verify shared access. */ + spci_yield(); + + /* Attempt to modify the memory. */ + for (int i = 0; i < PAGE_SIZE; ++i) { + ptr[i]++; + } + + spci_memory_relinquish( + send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id, + memory_region->constituents, memory_region->count, 0); + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); + } +} + +/** + * SPCI: Attempt to modify below the lower bound for the lent memory. + */ +TEST_SERVICE(spci_lend_check_lower_bound) +{ + EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS); + uint8_t *ptr; + + struct spci_message *recv_buf = SERVICE_RECV_BUFFER(); + struct spci_memory_region *memory_region = + (struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf) + ->payload); + hf_mailbox_clear(); + + ptr = (uint8_t *)memory_region->constituents[0].address; + + /* Check that one cannot access before donated region. */ + ptr[-1]++; +} + +/** + * SPCI: Attempt to modify above the upper bound for the lent memory. + */ +TEST_SERVICE(spci_lend_check_upper_bound) +{ + EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS); + uint8_t *ptr; + + struct spci_message *recv_buf = SERVICE_RECV_BUFFER(); + struct spci_memory_region *memory_region = + (struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf) + ->payload); + hf_mailbox_clear(); + + ptr = (uint8_t *)memory_region->constituents[0].address; + + /* Check that one cannot access after donated region. */ + ptr[PAGE_SIZE]++; +} + +TEST_SERVICE(spci_memory_lend_twice) +{ + EXPECT_EQ(spci_msg_recv(SPCI_MSG_RECV_BLOCK), SPCI_SUCCESS); + uint8_t *ptr; + + struct spci_message *recv_buf = SERVICE_RECV_BUFFER(); + struct spci_message *send_buf = SERVICE_SEND_BUFFER(); + struct spci_memory_region *memory_region = + (struct spci_memory_region *)(spci_get_lend_descriptor(recv_buf) + ->payload); + hf_mailbox_clear(); + + ptr = (uint8_t *)memory_region->constituents[0].address; + + /* Check that we have read access. */ + for (int i = 0; i < PAGE_SIZE; ++i) { + EXPECT_EQ(ptr[i], 'b'); + } + + /* Return control to primary. */ + spci_yield(); + + /* Attempt to modify the memory. */ + for (int i = 0; i < PAGE_SIZE; ++i) { + ptr[i]++; + } + + for (int i = 1; i < PAGE_SIZE * 2; i++) { + memory_region->constituents[0].address = (uint64_t)ptr + i; + + /* Fail to lend the memory back to the primary. */ + spci_memory_lend( + send_buf, SERVICE_VM1, HF_PRIMARY_VM_ID, + memory_region->constituents, memory_region->count, 0, + SPCI_LEND_RW_X, SPCI_LEND_NORMAL_MEM, + SPCI_LEND_CACHE_WRITE_BACK, SPCI_LEND_OUTER_SHAREABLE); + EXPECT_EQ(spci_msg_send(0), SPCI_INVALID_PARAMETERS); + } + + spci_memory_relinquish( + send_buf, HF_PRIMARY_VM_ID, recv_buf->target_vm_id, + memory_region->constituents, memory_region->count, 0); + EXPECT_EQ(spci_msg_send(0), SPCI_SUCCESS); +} diff --git a/test/vmapi/primary_with_secondaries/services/smp.c b/test/vmapi/primary_with_secondaries/services/smp.c index 863919d18..5ad31d0eb 100644 --- a/test/vmapi/primary_with_secondaries/services/smp.c +++ b/test/vmapi/primary_with_secondaries/services/smp.c @@ -57,8 +57,8 @@ static void vm_cpu_entry(uintptr_t arg) ASSERT_EQ(arg, ARG_VALUE); /* Check that vCPU statuses are as expected. */ - ASSERT_EQ(cpu_status(0), POWER_STATUS_ON); - ASSERT_EQ(cpu_status(1), POWER_STATUS_ON); + ASSERT_EQ(arch_cpu_status(0), POWER_STATUS_ON); + ASSERT_EQ(arch_cpu_status(1), POWER_STATUS_ON); dlog("Secondary second vCPU started.\n"); send_message("vCPU 1", sizeof("vCPU 1")); @@ -68,18 +68,18 @@ static void vm_cpu_entry(uintptr_t arg) TEST_SERVICE(smp) { /* Check that vCPU statuses are as expected. */ - ASSERT_EQ(cpu_status(0), POWER_STATUS_ON); - ASSERT_EQ(cpu_status(1), POWER_STATUS_OFF); + ASSERT_EQ(arch_cpu_status(0), POWER_STATUS_ON); + ASSERT_EQ(arch_cpu_status(1), POWER_STATUS_OFF); /* Start second vCPU. */ dlog("Secondary starting second vCPU.\n"); - ASSERT_TRUE( - cpu_start(1, stack, sizeof(stack), vm_cpu_entry, ARG_VALUE)); + ASSERT_TRUE(hftest_cpu_start(1, stack, sizeof(stack), vm_cpu_entry, + ARG_VALUE)); dlog("Secondary started second vCPU.\n"); /* Check that vCPU statuses are as expected. */ - ASSERT_EQ(cpu_status(0), POWER_STATUS_ON); - ASSERT_EQ(cpu_status(1), POWER_STATUS_ON); + ASSERT_EQ(arch_cpu_status(0), POWER_STATUS_ON); + ASSERT_EQ(arch_cpu_status(1), POWER_STATUS_ON); send_message("vCPU 0", sizeof("vCPU 0")); } diff --git a/test/vmapi/primary_with_secondaries/spci.c b/test/vmapi/primary_with_secondaries/spci.c index be442b887..ac54da7f0 100644 --- a/test/vmapi/primary_with_secondaries/spci.c +++ b/test/vmapi/primary_with_secondaries/spci.c @@ -50,7 +50,7 @@ TEST(spci, msg_send) } /** - * Send a message to a secondary VM spoofing the source vm id. + * Send a message to a secondary VM spoofing the source VM id. */ TEST(spci, msg_send_spoof) { diff --git a/third_party/BUILD.gn b/third_party/BUILD.gn index 9751f2571..6dfa3b96d 100644 --- a/third_party/BUILD.gn +++ b/third_party/BUILD.gn @@ -58,4 +58,5 @@ source_set("gtest_main") { linux_kernel("linux") { kernel_dir = "linux" + prebuilt = "//prebuilts/linux-aarch64/linux/vmlinuz" }