diff --git a/.github/workflows/unix.yml b/.github/workflows/unix.yml index 0acd4d19a6..b7b2630f17 100644 --- a/.github/workflows/unix.yml +++ b/.github/workflows/unix.yml @@ -43,7 +43,7 @@ jobs: python3 -m pip install -U numpy sudo .github/workflows/dependencies/install_spack - name: Build - env: {CC: mpicc, CXX: mpic++, OMPI_CC: clang-10, OMPI_CXX: clang++-10, CXXFLAGS: -Werror -Wno-deprecated-declarations} + env: {CC: mpicc, CXX: mpic++, OMPI_CC: clang-10, OMPI_CXX: clang++-10, CXXFLAGS: -Werror -Wno-deprecated-declarations, OPENPMD_HDF5_CHUNKS: none} run: | eval $(spack env activate --sh .github/ci/spack-envs/clangtidy_nopy_ompi_h5_ad1_ad2/) spack install @@ -51,8 +51,15 @@ jobs: mkdir build && cd build ../share/openPMD/download_samples.sh && chmod u-w samples/git-sample/*.h5 export LDFLAGS="${LDFLAGS} -fsanitize=address,undefined -shared-libsan" - CXXFLAGS="${CXXFLAGS} -fsanitize=address,undefined -shared-libsan" - CXXFLAGS="${CXXFLAGS}" cmake -S .. -B . -DopenPMD_USE_MPI=ON -DopenPMD_USE_PYTHON=ON -DopenPMD_USE_HDF5=ON -DopenPMD_USE_ADIOS2=ON -DopenPMD_USE_ADIOS1=ON -DopenPMD_USE_INVASIVE_TESTS=ON -DCMAKE_VERBOSE_MAKEFILE=ON + export CXXFLAGS="${CXXFLAGS} -fsanitize=address,undefined -shared-libsan" + cmake -S .. -B . \ + -DopenPMD_USE_MPI=ON \ + -DopenPMD_USE_PYTHON=ON \ + -DopenPMD_USE_HDF5=ON \ + -DopenPMD_USE_ADIOS2=ON \ + -DopenPMD_USE_ADIOS1=ON \ + -DopenPMD_USE_INVASIVE_TESTS=ON \ + -DCMAKE_VERBOSE_MAKEFILE=ON cmake --build . --parallel 2 export ASAN_OPTIONS=detect_stack_use_after_return=1:detect_leaks=1:check_initialization_order=true:strict_init_order=true:detect_stack_use_after_scope=1:fast_unwind_on_malloc=0 export LSAN_OPTIONS=suppressions="$SOURCEPATH/.github/ci/sanitizer/clang/Leak.supp" diff --git a/docs/source/backends/hdf5.rst b/docs/source/backends/hdf5.rst index 6c6ee972c9..08f73c742f 100644 --- a/docs/source/backends/hdf5.rst +++ b/docs/source/backends/hdf5.rst @@ -26,6 +26,7 @@ environment variable default description ===================================== ========= ==================================================================================== ``OPENPMD_HDF5_INDEPENDENT`` ``ON`` Sets the MPI-parallel transfer mode to collective (``OFF``) or independent (``ON``). ``OPENPMD_HDF5_ALIGNMENT`` ``1`` Tuning parameter for parallel I/O, choose an alignment which is a multiple of the disk block size. +``OPENPMD_HDF5_CHUNKS`` ``auto`` Defaults for ``H5Pset_chunk``: ``"auto"`` (heuristic) or ``"none"`` (no chunking). ``H5_COLL_API_SANITY_CHECK`` unset Set to ``1`` to perform an ``MPI_Barrier`` inside each meta-data operation. ===================================== ========= ==================================================================================== @@ -40,6 +41,9 @@ According to the `HDF5 documentation `_, it is advised to set this to the Lustre stripe size. In addition, ORNL Summit GPFS users are recommended to set the alignment value to 16777216(16MB). +``OPENPMD_HDF5_CHUNKS`` This sets defaults for data chunking via `H5Pset_chunk `__. +Chunking generally improves performance and only needs to be disabled in corner-cases, e.g. when heavily relying on independent, parallel I/O that non-collectively declares data records. + ``H5_COLL_API_SANITY_CHECK``: this is a HDF5 control option for debugging parallel I/O logic (API calls). Debugging a parallel program with that option enabled can help to spot bugs such as collective MPI-calls that are not called by all participating MPI ranks. Do not use in production, this will slow parallel I/O operations down. diff --git a/docs/source/details/backendconfig.rst b/docs/source/details/backendconfig.rst index 411b38cd12..7b357b1a38 100644 --- a/docs/source/details/backendconfig.rst +++ b/docs/source/details/backendconfig.rst @@ -74,6 +74,23 @@ Explanation of the single keys: Any setting specified under ``adios2.dataset`` is applicable globally as well as on a per-dataset level. Any setting under ``adios2.engine`` is applicable globally only. +HDF5 +^^^^ + +A full configuration of the HDF5 backend: + +.. literalinclude:: hdf5.json + :language: json + +All keys found under ``hdf5.dataset`` are applicable globally (future: as well as per dataset). +Explanation of the single keys: + +* ``adios2.dataset.chunks``: This key contains options for data chunking via `H5Pset_chunk `__. + The default is ``"auto"`` for a heuristic. + ``"none"`` can be used to disable chunking. + Chunking generally improves performance and only needs to be disabled in corner-cases, e.g. when heavily relying on independent, parallel I/O that non-collectively declares data records. + + Other backends ^^^^^^^^^^^^^^ diff --git a/docs/source/details/json.json b/docs/source/details/json.json new file mode 100644 index 0000000000..99eb609123 --- /dev/null +++ b/docs/source/details/json.json @@ -0,0 +1,7 @@ +{ + "hdf5": { + "dataset": { + "chunks": "auto" + } + } +} diff --git a/include/openPMD/IO/HDF5/HDF5Auxiliary.hpp b/include/openPMD/IO/HDF5/HDF5Auxiliary.hpp index 2e70ae5a12..4785cf4f66 100644 --- a/include/openPMD/IO/HDF5/HDF5Auxiliary.hpp +++ b/include/openPMD/IO/HDF5/HDF5Auxiliary.hpp @@ -1,4 +1,4 @@ -/* Copyright 2017-2021 Fabian Koller +/* Copyright 2017-2021 Fabian Koller, Felix Schmitt, Axel Huebl * * This file is part of openPMD-api. * @@ -34,7 +34,6 @@ namespace openPMD { -#if openPMD_HAVE_HDF5 struct GetH5DataType { std::unordered_map< std::string, hid_t > m_userTypes; @@ -54,5 +53,20 @@ namespace openPMD std::string concrete_h5_file_position(Writable* w); -#endif + /** Computes the chunk dimensions for a dataset. + * + * Chunk dimensions are selected to create chunks sizes between + * 64KByte and 4MB. Smaller chunk sizes are inefficient due to overhead, + * larger chunks do not map well to file system blocks and striding. + * + * Chunk dimensions are less or equal to dataset dimensions and do + * not need to be a factor of the respective dataset dimension. + * + * @param[in] dims dimensions of dataset to get chunk dims for + * @param[in] typeSize size of each element in bytes + * @return array for resulting chunk dimensions + */ + std::vector< hsize_t > + getOptimalChunkDims( std::vector< hsize_t > const dims, + size_t const typeSize ); } // namespace openPMD diff --git a/include/openPMD/IO/HDF5/HDF5IOHandler.hpp b/include/openPMD/IO/HDF5/HDF5IOHandler.hpp index 4b0d98828c..3dfc1f6e1b 100644 --- a/include/openPMD/IO/HDF5/HDF5IOHandler.hpp +++ b/include/openPMD/IO/HDF5/HDF5IOHandler.hpp @@ -22,6 +22,8 @@ #include "openPMD/IO/AbstractIOHandler.hpp" +#include + #include #include #include @@ -34,7 +36,7 @@ class HDF5IOHandlerImpl; class HDF5IOHandler : public AbstractIOHandler { public: - HDF5IOHandler(std::string path, Access); + HDF5IOHandler(std::string path, Access, nlohmann::json config); ~HDF5IOHandler() override; std::string backendName() const override { return "HDF5"; } diff --git a/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp b/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp index 0f5e4a3982..5ec2c4af6e 100644 --- a/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp +++ b/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp @@ -24,6 +24,7 @@ #if openPMD_HAVE_HDF5 # include "openPMD/IO/AbstractIOHandlerImpl.hpp" +# include "openPMD/auxiliary/JSON.hpp" # include "openPMD/auxiliary/Option.hpp" # include @@ -38,7 +39,7 @@ namespace openPMD class HDF5IOHandlerImpl : public AbstractIOHandlerImpl { public: - HDF5IOHandlerImpl(AbstractIOHandler*); + HDF5IOHandlerImpl(AbstractIOHandler*, nlohmann::json config); ~HDF5IOHandlerImpl() override; void createFile(Writable*, Parameter< Operation::CREATE_FILE > const&) override; @@ -77,6 +78,8 @@ namespace openPMD hid_t m_H5T_CLONG_DOUBLE; private: + auxiliary::TracingJSON m_config; + std::string m_chunks = "auto"; struct File { std::string name; diff --git a/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp b/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp index acf6854c9e..70cb681f0d 100644 --- a/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp +++ b/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp @@ -23,6 +23,8 @@ #include "openPMD/config.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" +#include + #include #include #include @@ -36,9 +38,10 @@ namespace openPMD { public: #if openPMD_HAVE_MPI - ParallelHDF5IOHandler(std::string path, Access, MPI_Comm); + ParallelHDF5IOHandler( + std::string path, Access, MPI_Comm, nlohmann::json config); #else - ParallelHDF5IOHandler(std::string path, Access); + ParallelHDF5IOHandler(std::string path, Access, nlohmann::json config); #endif ~ParallelHDF5IOHandler() override; diff --git a/include/openPMD/IO/HDF5/ParallelHDF5IOHandlerImpl.hpp b/include/openPMD/IO/HDF5/ParallelHDF5IOHandlerImpl.hpp index bfc10dc664..d0e18dc85c 100644 --- a/include/openPMD/IO/HDF5/ParallelHDF5IOHandlerImpl.hpp +++ b/include/openPMD/IO/HDF5/ParallelHDF5IOHandlerImpl.hpp @@ -27,6 +27,7 @@ # include # if openPMD_HAVE_HDF5 # include "openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp" +# include # endif #endif @@ -37,7 +38,8 @@ namespace openPMD class ParallelHDF5IOHandlerImpl : public HDF5IOHandlerImpl { public: - ParallelHDF5IOHandlerImpl(AbstractIOHandler*, MPI_Comm); + ParallelHDF5IOHandlerImpl( + AbstractIOHandler*, MPI_Comm, nlohmann::json config); ~ParallelHDF5IOHandlerImpl() override; MPI_Comm m_mpiComm; diff --git a/src/IO/AbstractIOHandlerHelper.cpp b/src/IO/AbstractIOHandlerHelper.cpp index ba8b887cef..25e2bfcd01 100644 --- a/src/IO/AbstractIOHandlerHelper.cpp +++ b/src/IO/AbstractIOHandlerHelper.cpp @@ -45,7 +45,8 @@ namespace openPMD switch( format ) { case Format::HDF5: - return std::make_shared< ParallelHDF5IOHandler >( path, access, comm ); + return std::make_shared< ParallelHDF5IOHandler >( + path, access, comm, std::move( options ) ); case Format::ADIOS1: # if openPMD_HAVE_ADIOS1 return std::make_shared< ParallelADIOS1IOHandler >( path, access, comm ); @@ -80,7 +81,8 @@ namespace openPMD switch( format ) { case Format::HDF5: - return std::make_shared< HDF5IOHandler >( path, access ); + return std::make_shared< HDF5IOHandler >( + path, access, std::move( options ) ); case Format::ADIOS1: #if openPMD_HAVE_ADIOS1 return std::make_shared< ADIOS1IOHandler >( path, access ); diff --git a/src/IO/HDF5/HDF5Auxiliary.cpp b/src/IO/HDF5/HDF5Auxiliary.cpp index 23af642f5f..41d86394f4 100644 --- a/src/IO/HDF5/HDF5Auxiliary.cpp +++ b/src/IO/HDF5/HDF5Auxiliary.cpp @@ -1,4 +1,4 @@ -/* Copyright 2017-2021 Fabian Koller, Axel Huebl +/* Copyright 2017-2021 Fabian Koller, Felix Schmitt, Axel Huebl * * This file is part of openPMD-api. * @@ -30,10 +30,12 @@ # include # include +# include # include # include # include # include +# include # if openPMD_USE_VERIFY # define VERIFY(CONDITION, TEXT) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } @@ -306,4 +308,92 @@ openPMD::concrete_h5_file_position(Writable* w) return auxiliary::replace_all(pos, "//", "/"); } + +std::vector< hsize_t > +openPMD::getOptimalChunkDims( std::vector< hsize_t > const dims, + size_t const typeSize ) +{ + auto const ndims = dims.size(); + std::vector< hsize_t > chunk_dims( dims.size() ); + + // chunk sizes in KiByte + constexpr std::array< size_t, 7u > CHUNK_SIZES_KiB + {{4096u, 2048u, 1024u, 512u, 256u, 128u, 64u}}; + + size_t total_data_size = typeSize; + size_t max_chunk_size = typeSize; + size_t target_chunk_size = 0u; + + // compute the order of dimensions (descending) + // large dataset dimensions should have larger chunk sizes + std::multimap dims_order; + for (uint32_t i = 0; i < ndims; ++i) + dims_order.insert(std::make_pair(dims[i], i)); + + for (uint32_t i = 0; i < ndims; ++i) + { + // initial number of chunks per dimension + chunk_dims[i] = 1; + + // try to make at least two chunks for each dimension + size_t half_dim = dims[i] / 2; + + // compute sizes + max_chunk_size *= (half_dim > 0) ? half_dim : 1; + total_data_size *= dims[i]; + } + + // compute the target chunk size + for( auto const & chunk_size : CHUNK_SIZES_KiB ) + { + target_chunk_size = chunk_size * 1024; + if (target_chunk_size <= max_chunk_size) + break; + } + + size_t current_chunk_size = typeSize; + size_t last_chunk_diff = target_chunk_size; + std::multimap::const_iterator current_index = + dims_order.begin(); + + while (current_chunk_size < target_chunk_size) + { + // test if increasing chunk size optimizes towards target chunk size + size_t chunk_diff = target_chunk_size - (current_chunk_size * 2u); + if (chunk_diff >= last_chunk_diff) + break; + + // find next dimension to increase chunk size for + int can_increase_dim = 0; + for (uint32_t d = 0; d < ndims; ++d) + { + int current_dim = current_index->second; + + // increasing chunk size possible + if (chunk_dims[current_dim] * 2 <= dims[current_dim]) + { + chunk_dims[current_dim] *= 2; + current_chunk_size *= 2; + can_increase_dim = 1; + } + + current_index++; + if (current_index == dims_order.end()) + current_index = dims_order.begin(); + + if (can_increase_dim) + break; + } + + // can not increase chunk size in any dimension + // we must use the current chunk sizes + if (!can_increase_dim) + break; + + last_chunk_diff = chunk_diff; + } + + return chunk_dims; +} + #endif diff --git a/src/IO/HDF5/HDF5IOHandler.cpp b/src/IO/HDF5/HDF5IOHandler.cpp index fad45d7a52..44293dd5df 100644 --- a/src/IO/HDF5/HDF5IOHandler.cpp +++ b/src/IO/HDF5/HDF5IOHandler.cpp @@ -20,8 +20,10 @@ */ #include "openPMD/IO/HDF5/HDF5IOHandler.hpp" #include "openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp" +#include "openPMD/auxiliary/Environment.hpp" #if openPMD_HAVE_HDF5 +# include "openPMD/Datatype.hpp" # include "openPMD/auxiliary/Filesystem.hpp" # include "openPMD/auxiliary/StringManip.hpp" # include "openPMD/backend/Attribute.hpp" @@ -50,7 +52,8 @@ namespace openPMD # define VERIFY(CONDITION, TEXT) do{ (void)sizeof(CONDITION); } while( 0 ) # endif -HDF5IOHandlerImpl::HDF5IOHandlerImpl(AbstractIOHandler* handler) +HDF5IOHandlerImpl::HDF5IOHandlerImpl( + AbstractIOHandler* handler, nlohmann::json config) : AbstractIOHandlerImpl(handler), m_datasetTransferProperty{H5P_DEFAULT}, m_fileAccessProperty{H5P_DEFAULT}, @@ -81,6 +84,39 @@ HDF5IOHandlerImpl::HDF5IOHandlerImpl(AbstractIOHandler* handler) H5Tinsert(m_H5T_CDOUBLE, "i", sizeof(double), H5T_NATIVE_DOUBLE); H5Tinsert(m_H5T_CLONG_DOUBLE, "r", 0, H5T_NATIVE_LDOUBLE); H5Tinsert(m_H5T_CLONG_DOUBLE, "i", sizeof(long double), H5T_NATIVE_LDOUBLE); + + m_chunks = auxiliary::getEnvString( "OPENPMD_HDF5_CHUNKS", "auto" ); + // JSON option can overwrite env option: + if( config.contains( "hdf5" ) ) + { + m_config = std::move( config[ "hdf5" ] ); + + // check for global dataset configs + if( m_config.json().contains( "dataset" ) ) + { + auto datasetConfig = m_config[ "dataset" ]; + if( datasetConfig.json().contains( "chunks" ) ) + { + m_chunks = datasetConfig[ "chunks" ].json().get< std::string >(); + } + } + if( m_chunks != "auto" && m_chunks != "none" ) + { + std::cerr << "Warning: HDF5 chunking option set to an invalid " + "value '" << m_chunks << "'. Reset to 'auto'." + << std::endl; + m_chunks = "auto"; + } + + // unused params + auto shadow = m_config.invertShadow(); + if( shadow.size() > 0 ) + { + std::cerr << "Warning: parts of the JSON configuration for " + "HDF5 remain unused:\n" + << shadow << std::endl; + } + } } HDF5IOHandlerImpl::~HDF5IOHandlerImpl() @@ -238,6 +274,25 @@ HDF5IOHandlerImpl::createDataset(Writable* writable, if( auxiliary::ends_with(name, '/') ) name = auxiliary::replace_last(name, "/", ""); + auto config = nlohmann::json::parse( parameters.options ); + if( config.contains( "hdf5" ) && + config[ "hdf5" ].contains( "dataset" ) ) + { + auxiliary::TracingJSON datasetConfig{ + config[ "hdf5" ][ "dataset" ] }; + /* + * @todo Read options from config here. + */ + auto shadow = datasetConfig.invertShadow(); + if( shadow.size() > 0 ) + { + std::cerr << "Warning: parts of the JSON configuration for " + "HDF5 dataset '" + << name << "' remain unused:\n" + << shadow << std::endl; + } + } + /* Open H5Object to write into */ auto res = getFile( writable ); File file = res ? res.get() : getFile( writable->parent ).get(); @@ -256,21 +311,32 @@ HDF5IOHandlerImpl::createDataset(Writable* writable, Attribute a(0); a.dtype = d; std::vector< hsize_t > dims; - for( auto const& val : parameters.extent ) + std::uint64_t num_elements = 1u; + for( auto const& val : parameters.extent ) { dims.push_back(static_cast< hsize_t >(val)); + num_elements *= val; + } hid_t space = H5Screate_simple(static_cast< int >(dims.size()), dims.data(), dims.data()); VERIFY(space >= 0, "[HDF5] Internal error: Failed to create dataspace during dataset creation"); - std::vector< hsize_t > chunkDims; - for( auto const& val : parameters.chunkSize ) - chunkDims.push_back(static_cast< hsize_t >(val)); - /* enable chunking on the created dataspace */ hid_t datasetCreationProperty = H5Pcreate(H5P_DATASET_CREATE); - herr_t status; - //status = H5Pset_chunk(datasetCreationProperty, chunkDims.size(), chunkDims.data()); - //VERIFY(status == 0, "[HDF5] Internal error: Failed to set chunk size during dataset creation"); + + if( num_elements != 0u && m_chunks != "none" ) + { + //! @todo add per dataset chunk control from JSON config + + // get chunking dimensions + std::vector< hsize_t > chunk_dims = getOptimalChunkDims(dims, toBytes(d)); + + //! @todo allow overwrite with user-provided chunk size + //for( auto const& val : parameters.chunkSize ) + // chunk_dims.push_back(static_cast< hsize_t >(val)); + + herr_t status = H5Pset_chunk(datasetCreationProperty, chunk_dims.size(), chunk_dims.data()); + VERIFY(status == 0, "[HDF5] Internal error: Failed to set chunk size during dataset creation"); + } std::string const& compression = parameters.compression; if( !compression.empty() ) @@ -318,6 +384,7 @@ HDF5IOHandlerImpl::createDataset(Writable* writable, H5P_DEFAULT); VERIFY(group_id >= 0, "[HDF5] Internal error: Failed to create HDF5 group during dataset creation"); + herr_t status; status = H5Dclose(group_id); VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset during dataset creation"); status = H5Tclose(datatype); @@ -1832,9 +1899,9 @@ HDF5IOHandlerImpl::getFile( Writable * writable ) #endif #if openPMD_HAVE_HDF5 -HDF5IOHandler::HDF5IOHandler(std::string path, Access at) +HDF5IOHandler::HDF5IOHandler(std::string path, Access at, nlohmann::json config) : AbstractIOHandler(std::move(path), at), - m_impl{new HDF5IOHandlerImpl(this)} + m_impl{new HDF5IOHandlerImpl(this, std::move(config))} { } HDF5IOHandler::~HDF5IOHandler() = default; @@ -1845,7 +1912,7 @@ HDF5IOHandler::flush() return m_impl->flush(); } #else -HDF5IOHandler::HDF5IOHandler(std::string path, Access at) +HDF5IOHandler::HDF5IOHandler(std::string path, Access at, nlohmann::json /* config */) : AbstractIOHandler(std::move(path), at) { throw std::runtime_error("openPMD-api built without HDF5 support"); diff --git a/src/IO/HDF5/ParallelHDF5IOHandler.cpp b/src/IO/HDF5/ParallelHDF5IOHandler.cpp index e2664c46d3..84ad9dc90d 100644 --- a/src/IO/HDF5/ParallelHDF5IOHandler.cpp +++ b/src/IO/HDF5/ParallelHDF5IOHandler.cpp @@ -38,11 +38,10 @@ namespace openPMD # define VERIFY(CONDITION, TEXT) do{ (void)sizeof(CONDITION); } while( 0 ) # endif -ParallelHDF5IOHandler::ParallelHDF5IOHandler(std::string path, - Access at, - MPI_Comm comm) +ParallelHDF5IOHandler::ParallelHDF5IOHandler( + std::string path, Access at, MPI_Comm comm, nlohmann::json config ) : AbstractIOHandler(std::move(path), at, comm), - m_impl{new ParallelHDF5IOHandlerImpl(this, comm)} + m_impl{new ParallelHDF5IOHandlerImpl(this, comm, std::move(config))} { } ParallelHDF5IOHandler::~ParallelHDF5IOHandler() = default; @@ -53,9 +52,9 @@ ParallelHDF5IOHandler::flush() return m_impl->flush(); } -ParallelHDF5IOHandlerImpl::ParallelHDF5IOHandlerImpl(AbstractIOHandler* handler, - MPI_Comm comm) - : HDF5IOHandlerImpl{handler}, +ParallelHDF5IOHandlerImpl::ParallelHDF5IOHandlerImpl( + AbstractIOHandler* handler, MPI_Comm comm, nlohmann::json config ) + : HDF5IOHandlerImpl{handler, std::move(config)}, m_mpiComm{comm}, m_mpiInfo{MPI_INFO_NULL} /* MPI 3.0+: MPI_INFO_ENV */ { @@ -103,14 +102,16 @@ ParallelHDF5IOHandlerImpl::~ParallelHDF5IOHandlerImpl() # if openPMD_HAVE_MPI ParallelHDF5IOHandler::ParallelHDF5IOHandler(std::string path, Access at, - MPI_Comm comm) + MPI_Comm comm, + nlohmann::json /* config */) : AbstractIOHandler(std::move(path), at, comm) { throw std::runtime_error("openPMD-api built without HDF5 support"); } # else ParallelHDF5IOHandler::ParallelHDF5IOHandler(std::string path, - Access at) + Access at, + nlohmann::json /* config */) : AbstractIOHandler(std::move(path), at) { throw std::runtime_error("openPMD-api built without parallel support and without HDF5 support"); diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index 14d1a9272b..63e24be292 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -764,6 +764,19 @@ hipace_like_write( std::string file_ending ) // the iterations we want to write std::vector< int > iterations = { 10, 30, 50, 70 }; + // Parallel HDF5 + chunking does not work with independent IO pattern + bool const isHDF5 = file_ending == "h5"; + std::string options = "{}"; + if( isHDF5 ) + options = R"( + { + "hdf5": { + "dataset": { + "chunks": "none" + } + } + })"; + // MPI communicator meta-data and file name int i_mpi_rank{ -1 }, i_mpi_size{ -1 }; MPI_Comm_rank( MPI_COMM_WORLD, &i_mpi_rank ); @@ -785,7 +798,7 @@ hipace_like_write( std::string file_ending ) [](precision d) -> precision { return std::sin( d * 2.0 * 3.1415 / 20. ); }); // open a parallel series - Series series( name, Access::CREATE, MPI_COMM_WORLD ); + Series series( name, Access::CREATE, MPI_COMM_WORLD, options ); series.setIterationEncoding( IterationEncoding::groupBased ); series.flush();