From 9044238a50a8dadb9d58ab77e98cd5351022cdcc Mon Sep 17 00:00:00 2001 From: Lee Maguire Date: Fri, 12 Jul 2024 13:41:14 +0100 Subject: [PATCH] Fix compilation issue with std::tuple (#227) --- CHANGELOG.md | 1 + include/cpprealm/internal/bridge/utils.hpp | 10 ++++++++ include/cpprealm/macros.hpp | 23 +++++++++++-------- src/cpprealm/schedulers/default_scheduler.cpp | 2 +- 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b0a02cd..706a684e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ NEXT-RELEASE Release notes (YYYY-MM-DD) ### Fixed * Using the `==` operator in a type safe query for a nullable string property would return the incorrect result when algined storage was disabled. +* Fix compilation issue when building with Bionic due to use of std::tuple (since 2.1.0). ### Enhancements * Add ability to use `managed>` in type safe queries when comparing a value for a key. e.g. diff --git a/include/cpprealm/internal/bridge/utils.hpp b/include/cpprealm/internal/bridge/utils.hpp index 0b9eacf4..975c2727 100644 --- a/include/cpprealm/internal/bridge/utils.hpp +++ b/include/cpprealm/internal/bridge/utils.hpp @@ -60,6 +60,16 @@ namespace realm::internal { static_assert(sizeof...(Ts) == sizeof...(Us), "Tuples must have the same size"); return zip_tuples_impl(tuple1, tuple2, std::index_sequence_for()); } + + template + auto constexpr array_to_tuple_impl(const std::array& arr, std::index_sequence) { + return std::make_tuple(arr[Is]...); + } + + template + auto constexpr array_to_tuple(const std::array& arr) { + return array_to_tuple_impl(arr, std::make_index_sequence{}); + } } #endif //CPPREALM_BRIDGE_UTILS_HPP diff --git a/include/cpprealm/macros.hpp b/include/cpprealm/macros.hpp index dfb8b603..70a7c1e9 100644 --- a/include/cpprealm/macros.hpp +++ b/include/cpprealm/macros.hpp @@ -400,10 +400,13 @@ rbool managed>::operator op(const std::optional& rhs) static constexpr auto managed_pointers() { \ return std::tuple{FOR_EACH(DECLARE_MANAGED_PROPERTY, cls, __VA_ARGS__)}; \ } \ - template static constexpr auto unmanaged_to_managed_pointer(PtrType ptr) { \ - FOR_EACH(DECLARE_COND_UNMANAGED_TO_MANAGED, cls, __VA_ARGS__); \ - } \ - static constexpr auto managed_pointers_names_todo() { return std::tuple{FOR_EACH(DECLARE_MANAGED_PROPERTY_NAME, cls, __VA_ARGS__)}; } \ + template static constexpr auto unmanaged_to_managed_pointer(PtrType ptr) { \ + FOR_EACH(DECLARE_COND_UNMANAGED_TO_MANAGED, cls, __VA_ARGS__); \ + } \ + static constexpr auto managed_pointers_names() { \ + constexpr auto managed_pointers_names = std::array::value>{FOR_EACH(DECLARE_MANAGED_PROPERTY_NAME, cls, __VA_ARGS__)}; \ + return internal::array_to_tuple(managed_pointers_names); \ + } \ \ static constexpr bool is_object = true; \ explicit managed(const internal::bridge::obj& obj, \ @@ -411,7 +414,7 @@ rbool managed>::operator op(const std::optional& rhs) : m_obj(std::move(obj)) \ , m_realm(std::move(realm)) \ { \ - auto zipped = internal::zip_tuples(managed_pointers(), managed_pointers_names_todo()); \ + auto zipped = internal::zip_tuples(managed_pointers(), managed_pointers_names()); \ std::apply([&](auto& ...pair) { \ ((*this.*pair.first).assign(&m_obj, &m_realm, m_obj.get_table().get_column_key(pair.second)), ...); \ }, zipped); \ @@ -420,7 +423,7 @@ rbool managed>::operator op(const std::optional& rhs) m_obj = other.m_obj; \ m_realm = other.m_realm; \ m_rbool_query = other.m_rbool_query; \ - auto zipped = internal::zip_tuples(managed_pointers(), managed_pointers_names_todo()); \ + auto zipped = internal::zip_tuples(managed_pointers(), managed_pointers_names()); \ if (m_rbool_query) { \ auto schema = m_realm.schema().find(other.schema.name); \ auto group = m_realm.read_group(); \ @@ -438,7 +441,7 @@ rbool managed>::operator op(const std::optional& rhs) m_obj = other.m_obj; \ m_realm = other.m_realm; \ m_rbool_query = other.m_rbool_query; \ - auto zipped = internal::zip_tuples(managed_pointers(), managed_pointers_names_todo()); \ + auto zipped = internal::zip_tuples(managed_pointers(), managed_pointers_names()); \ if (m_rbool_query) { \ auto schema = m_realm.schema().find(other.schema.name); \ auto group = m_realm.read_group(); \ @@ -457,7 +460,7 @@ rbool managed>::operator op(const std::optional& rhs) m_obj = std::move(other.m_obj); \ m_realm = std::move(other.m_realm); \ m_rbool_query = std::move(other.m_rbool_query); \ - auto zipped = internal::zip_tuples(managed_pointers(), managed_pointers_names_todo()); \ + auto zipped = internal::zip_tuples(managed_pointers(), managed_pointers_names()); \ if (m_rbool_query) { \ auto schema = m_realm.schema().find(other.schema.name); \ auto group = m_realm.read_group(); \ @@ -475,7 +478,7 @@ rbool managed>::operator op(const std::optional& rhs) m_obj = std::move(other.m_obj); \ m_realm = std::move(other.m_realm); \ m_rbool_query = std::move(other.m_rbool_query); \ - auto zipped = internal::zip_tuples(managed_pointers(), managed_pointers_names_todo()); \ + auto zipped = internal::zip_tuples(managed_pointers(), managed_pointers_names()); \ if (m_rbool_query) { \ auto schema = m_realm.schema().find(other.schema.name); \ auto group = m_realm.read_group(); \ @@ -497,7 +500,7 @@ rbool managed>::operator op(const std::optional& rhs) auto schema = m.m_realm.schema().find(m.schema.name); \ auto group = m.m_realm.read_group(); \ auto table_ref = group.get_table(schema.table_key()); \ - auto zipped = internal::zip_tuples(managed_pointers(), managed_pointers_names_todo()); \ + auto zipped = internal::zip_tuples(managed_pointers(), managed_pointers_names()); \ std::apply([&m, &table_ref](auto& ...pair) { \ ((m.*pair.first).prepare_for_query(&m.m_realm, table_ref, pair.second, m.m_rbool_query), ...); \ }, zipped); \ diff --git a/src/cpprealm/schedulers/default_scheduler.cpp b/src/cpprealm/schedulers/default_scheduler.cpp index bff1c035..ed1780f3 100644 --- a/src/cpprealm/schedulers/default_scheduler.cpp +++ b/src/cpprealm/schedulers/default_scheduler.cpp @@ -20,7 +20,7 @@ namespace realm::default_scheduler { #elif defined(REALM_HAVE_UV) && REALM_HAVE_UV return make_uv(); #else - return std::make_shared(util::Scheduler::make_generic()); + return std::make_shared(util::Scheduler::make_generic()); #endif }