mysql/mysql-Add-sw64-architecture.patch
wzx 1d0aa69575 Add sw64 architecture
Signed-off-by: wzx <wuzx1226@qq.com>
(cherry picked from commit c3188a78de9adb7355102c035f0a43b7c80bbf5e)
2022-11-17 14:14:11 +08:00

1523 lines
52 KiB
Diff
Executable File

From ae8a1ba8f8bec3586eee2a599ddaed833233c6f8 Mon Sep 17 00:00:00 2001
From: wxy <xywang7443@stu.jiangna.edu.cn>
Date: Tue, 1 Nov 2022 19:56:44 +0800
Subject: [PATCH] Add sw64 architecture
Signed-off-by: wxy <xywang7443@stu.jiangna.edu.cn>
---
.../atomic/detail/caps_arch_gcc_sw_64.hpp | 35 +
.../atomic/detail/core_arch_ops_gcc_sw_64.hpp | 1030 +++++++++++++++++
.../detail/fence_arch_ops_gcc_sw_64.hpp | 53 +
.../boost/atomic/detail/platform.hpp | 4 +
.../detail/sw_64_rounding_control.hpp | 113 ++
.../boost/numeric/interval/hw_rounding.hpp | 2 +
.../boost_1_77_0/boost/predef/architecture.h | 1 +
.../boost/predef/architecture/sw_64.h | 54 +
boost/boost_1_77_0/boost/wave/wave_config.hpp | 2 +-
.../icu/icu-release-69-1/source/acinclude.m4 | 6 +
.../icu/icu-release-69-1/source/config.guess | 8 +
extra/icu/icu-release-69-1/source/config.sub | 2 +
extra/icu/icu-release-69-1/source/configure | 8 +-
.../icu/icu-release-69-1/source/configure.ac | 2 +-
.../source/i18n/double-conversion-utils.h | 2 +-
extra/rapidjson/include/rapidjson/rapidjson.h | 2 +-
16 files changed, 1319 insertions(+), 5 deletions(-)
create mode 100644 boost/boost_1_77_0/boost/atomic/detail/caps_arch_gcc_sw_64.hpp
create mode 100644 boost/boost_1_77_0/boost/atomic/detail/core_arch_ops_gcc_sw_64.hpp
create mode 100644 boost/boost_1_77_0/boost/atomic/detail/fence_arch_ops_gcc_sw_64.hpp
create mode 100644 boost/boost_1_77_0/boost/numeric/interval/detail/sw_64_rounding_control.hpp
create mode 100644 boost/boost_1_77_0/boost/predef/architecture/sw_64.h
diff --git a/boost/boost_1_77_0/boost/atomic/detail/caps_arch_gcc_sw_64.hpp b/boost/boost_1_77_0/boost/atomic/detail/caps_arch_gcc_sw_64.hpp
new file mode 100644
index 00000000..42af68f5
--- /dev/null
+++ b/boost/boost_1_77_0/boost/atomic/detail/caps_arch_gcc_sw_64.hpp
@@ -0,0 +1,35 @@
+/*
+ + * Distributed under the Boost Software License, Version 1.0.
+ + * (See accompanying file LICENSE_1_0.txt or copy at
+ + * http://www.boost.org/LICENSE_1_0.txt)
+ + *
+ + * Copyright (c) 2009 Helge Bahmann
+ + * Copyright (c) 2013 Tim Blechmann
+ + * Copyright (c) 2014 Andrey Semashev
+ + */
+/*!
+ + * \file atomic/detail/caps_arch_gcc_sw_64.hpp
+ + *
+ + * This header defines feature capabilities macros
+ + */
+
+#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SW_64_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SW_64_HPP_INCLUDED_
+
+#include <boost/atomic/detail/config.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+#define BOOST_ATOMIC_INT8_LOCK_FREE 2
+#define BOOST_ATOMIC_INT16_LOCK_FREE 2
+#define BOOST_ATOMIC_INT32_LOCK_FREE 2
+#define BOOST_ATOMIC_INT64_LOCK_FREE 2
+#define BOOST_ATOMIC_POINTER_LOCK_FREE 2
+
+#define BOOST_ATOMIC_THREAD_FENCE 2
+#define BOOST_ATOMIC_SIGNAL_FENCE 2
+
+#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SW_64_HPP_INCLUDED_
+
diff --git a/boost/boost_1_77_0/boost/atomic/detail/core_arch_ops_gcc_sw_64.hpp b/boost/boost_1_77_0/boost/atomic/detail/core_arch_ops_gcc_sw_64.hpp
new file mode 100644
index 00000000..91e24ac2
--- /dev/null
+++ b/boost/boost_1_77_0/boost/atomic/detail/core_arch_ops_gcc_sw_64.hpp
@@ -0,0 +1,1030 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2009 Helge Bahmann
+ * Copyright (c) 2013 Tim Blechmann
+ * Copyright (c) 2014 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/core_arch_ops_gcc_sw_64.hpp
+ *
+ * This header contains implementation of the \c core_arch_operations template.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_SW_64_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_SW_64_HPP_INCLUDED_
+
+#include <cstddef>
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/storage_traits.hpp>
+#include <boost/atomic/detail/core_arch_operations_fwd.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+/*
+ Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html
+ (HP OpenVMS systems documentation) and the Sw_64 Architecture Reference Manual.
+ */
+
+/*
+ NB: The most natural thing would be to write the increment/decrement
+ operators along the following lines:
+
+ __asm__ __volatile__
+ (
+ "1: ldl_l %0,%1 \n"
+ "addl %0,1,%0 \n"
+ "stl_c %0,%1 \n"
+ "beq %0,1b\n"
+ : "=&b" (tmp)
+ : "m" (value)
+ : "cc"
+ );
+
+ However according to the comments on the HP website and matching
+ comments in the Linux kernel sources this defies branch prediction,
+ as the cpu assumes that backward branches are always taken; so
+ instead copy the trick from the Linux kernel, introduce a forward
+ branch and back again.
+
+ I have, however, had a hard time measuring the difference between
+ the two versions in microbenchmarks -- I am leaving it in nevertheless
+ as it apparently does not hurt either.
+*/
+
+struct core_arch_operations_gcc_sw_64_base
+{
+ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
+ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
+
+ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
+ __asm__ __volatile__ ("memb" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
+ {
+ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
+ __asm__ __volatile__ ("memb" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order == memory_order_seq_cst)
+ __asm__ __volatile__ ("memb" ::: "memory");
+ }
+};
+
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 4u, Signed, Interprocess > :
+ public core_arch_operations_gcc_sw_64_base
+{
+ typedef typename storage_traits< 4u >::type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 4u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ storage_type tmp1, tmp2;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldi %2,%4\n\t"
+ "ldi %3,1\n\t"
+ "mov %5, %1\n\t"
+ "lldw %0, 0(%2)\n\t"
+ "wr_f %3\n\t"
+ "lstw %1, 0(%2)\n\t"
+ "rd_f %1\n\t"
+ "beq %1, 2f\n\t"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (tmp), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ int success;
+ storage_type current;
+ storage_type tmp1,tmp2;
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldi %4,%6\n\t"
+ "lldw %2, 0(%4)\n\t" // current = *(&storage)
+ "cmpeq %2, %0, %5\n\t" // success = current == expected
+ "wr_f %5\n\t" // success = current == expected
+ "mov %2, %0\n\t" // expected = current
+ "lstw %1, 0(%4)\n\t" // storage = desired; desired = store succeeded
+ "rd_f %1\n\t" // storage = desired; desired = store succeeded
+ "beq %5, 2f\n\t" // if (success == 0) goto end
+ "mov %1, %3\n\t" // success = desired
+ "2:\n\t"
+ : "+r" (expected), // %0
+ "+r" (desired), // %1
+ "=&r" (current), // %2
+ "=&r" (success), // %3
+ "=&r" (tmp1), // %4
+ "=&r" (tmp2) // %5
+ : "m" (storage) // %6
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ storage_type current, tmp;
+ storage_type tmp1,tmp2;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldi %4,%6\n\t"
+ "mov %7, %1\n\t" // tmp = desired
+ "lldw %2, 0(%4)\n\t" // current = *(&storage)
+ "cmpeq %2, %0, %5\n\t" // success = current == expected
+ "wr_f %5\n\t" // success = current == expected
+ "mov %2, %0\n\t" // expected = current
+ "lstw %1, 0(%4)\n\t" // storage = tmp; tmp = store succeeded
+ "rd_f %1\n\t" // storage = tmp; tmp = store succeeded
+ "beq %5, 2f\n\t" // if (success == 0) goto end
+ "beq %1, 3f\n\t" // if (tmp == 0) goto retry
+ "mov %1, %3\n\t" // success = tmp
+ "2:\n\t"
+
+ ".subsection 2\n\t"
+ "3: br 1b\n\t"
+ ".previous\n\t"
+
+ : "+r" (expected), // %0
+ "=&r" (tmp), // %1
+ "=&r" (current), // %2
+ "=&r" (success), // %3
+ "=&r" (tmp1), // %4
+ "=&r" (tmp2) // %5
+ : "m" (storage), // %6
+ "r" (desired) // %7
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1, tmp2;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldi %2,%4\n\t"
+ "ldi %3,1\n\t"
+ "lldw %0, 0(%2)\n\t"
+ "wr_f %3\n\t"
+ "addw %0, %5, %1\n\t"
+ "lstw %1, 0(%2)\n\t"
+ "rd_f %1\n\t"
+ "beq %1, 2f\n\t"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1, tmp2;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldi %2,%4\n\t"
+ "ldi %3,1\n\t"
+ "lldw %0, 0(%2)\n\t"
+ "wr_f %3\n\t"
+ "subw %0, %5, %1\n\t"
+ "lstw %1, 0(%2)\n\t"
+ "rd_f %1\n\t"
+ "beq %1, 2f\n\t"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1,tmp2;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n\t"
+ "ldi %2,%4\n\t"
+ "ldi %3,1\n\t"
+ "lldw %0, 0(%2)\n\t"
+ "wr_f %3\n\t"
+ "and %0, %5, %1\n\t"
+ "lstw %1, 0(%2)\n\t"
+ "rd_f %1\n\t"
+ "beq %1, 2f\n\t"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1,tmp2;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "lldw %0, 0(%2)\n"
+ "wr_f %? \n"
+ "bis %0, %5, %1\n"
+ "lstw %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1, tmp2;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "lldw %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "xor %0, %5, %1\n"
+ "lstw %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, 0, order);
+ }
+};
+
+
+template< bool Interprocess >
+struct core_arch_operations< 1u, false, Interprocess > :
+ public core_arch_operations< 4u, false, Interprocess >
+{
+ typedef core_arch_operations< 4u, false, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1, tmp2;
+ base_type::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "lldw %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "addw %0, %5, %1\n"
+ "zapnot %1, #1, %1\n"
+ "lstw %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ base_type::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1, tmp2;
+ base_type::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "lldw %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "subw %0, %5, %1\n"
+ "zapnot %1, #1, %1\n"
+ "lstw %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ base_type::fence_after(order);
+ return original;
+ }
+};
+
+template< bool Interprocess >
+struct core_arch_operations< 1u, true, Interprocess > :
+ public core_arch_operations< 4u, true, Interprocess >
+{
+ typedef core_arch_operations< 4u, true, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1,tmp2;
+ base_type::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "lldw %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "addw %0, %5, %1\n"
+ "sextb %1, %1\n"
+ "lstw %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ base_type::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1,tmp2;
+ base_type::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "lldw %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "subw %0, %5, %1\n"
+ "sextb %1, %1\n"
+ "lstw %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ base_type::fence_after(order);
+ return original;
+ }
+};
+
+
+template< bool Interprocess >
+struct core_arch_operations< 2u, false, Interprocess > :
+ public core_arch_operations< 4u, false, Interprocess >
+{
+ typedef core_arch_operations< 4u, false, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1,tmp2;
+ base_type::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3\n"
+ "lldw %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "addw %0, %5, %1\n"
+ "zapnot %1, #3, %1\n"
+ "lstw %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ base_type::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1,tmp2;
+ base_type::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "lldw %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "subw %0, %5, %1\n"
+ "zapnot %1, #3, %1\n"
+ "lstw %1, %2\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ base_type::fence_after(order);
+ return original;
+ }
+};
+
+template< bool Interprocess >
+struct core_arch_operations< 2u, true, Interprocess > :
+ public core_arch_operations< 4u, true, Interprocess >
+{
+ typedef core_arch_operations< 4u, true, Interprocess > base_type;
+ typedef typename base_type::storage_type storage_type;
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1,tmp2;
+ base_type::fence_before(order);
+ __asm__ __volatile__
+ (
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "lldw %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "addw %0, %5, %1\n"
+ "sexth %1, %1\n"
+ "lstw %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ base_type::fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1,tmp2;
+ base_type::fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "lldw %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "subw %0, %5, %1\n"
+ "sexth %1, %1\n"
+ "lstw %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ base_type::fence_after(order);
+ return original;
+ }
+};
+
+
+template< bool Signed, bool Interprocess >
+struct core_arch_operations< 8u, Signed, Interprocess > :
+ public core_arch_operations_gcc_sw_64_base
+{
+ typedef typename storage_traits< 8u >::type storage_type;
+
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u;
+ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u;
+ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed;
+ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess;
+
+ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ fence_before(order);
+ storage = v;
+ fence_after_store(order);
+ }
+
+ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type v = storage;
+ fence_after(order);
+ return v;
+ }
+
+ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, tmp;
+ storage_type tmp1,tmp2;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "mov %5, %1\n"
+ "lldl %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "lstl %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (tmp), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_weak(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ fence_before(success_order);
+ int success;
+ storage_type current;
+ storage_type tmp1,tmp2;
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %4,%6\n"
+ "lldl %2, 0(%4)\n" // current = *(&storage)
+ "cmpeq %2, %0, %5\n" // success = current == expected
+ "wr_f %5 \n"
+ "mov %2, %0\n" // expected = current
+ "lstl %1, 0(%4)\n" // storage = desired; desired = store succeeded
+ "rd_f %1 \n"
+ "beq %5, 2f\n" // if (success == 0) goto end
+ "mov %1, %3\n" // success = desired
+ "2:\n\t"
+ : "+r" (expected), // %0
+ "+r" (desired), // %1
+ "=&r" (current), // %2
+ "=&r" (success), // %3
+ "=&r" (tmp1), // %4
+ "=&r" (tmp2) // %5
+ : "m" (storage) // %6
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE bool compare_exchange_strong(
+ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT
+ {
+ int success;
+ storage_type current, tmp;
+ storage_type tmp1,tmp2;
+ fence_before(success_order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %4,%6\n"
+ "mov %7, %1\n" // tmp = desired
+ "lldl %2, 0(%4)\n" // current = *(&storage)
+ "cmpeq %2, %0, %5\n" // success = current == expected
+ "wr_f %5 \n"
+ "mov %2, %0\n" // expected = current
+ "lstl %1, 0(%4)\n" // storage = tmp; tmp = store succeeded
+ "rd_f %1 \n"
+ "beq %5, 2f\n" // if (success == 0) goto end
+ "beq %1, 3f\n" // if (tmp == 0) goto retry
+ "mov %1, %3\n" // success = tmp
+ "2:\n\t"
+
+ ".subsection 2\n\t"
+ "3: br 1b\n\t"
+ ".previous\n\t"
+
+ : "+r" (expected), // %0
+ "=&r" (tmp), // %1
+ "=&r" (current), // %2
+ "=&r" (success), // %3
+ "=&r" (tmp1), // %4
+ "=&r" (tmp2) // %5
+ : "m" (storage), // %6
+ "r" (desired) // %7
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ if (success)
+ fence_after(success_order);
+ else
+ fence_after(failure_order);
+ return !!success;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1, tmp2;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "lldl %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "addl %0, %5, %1\n"
+ "lstl %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1,tmp2;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "lldl %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "subl %0, %5, %1\n"
+ "lstl %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1,tmp2;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "lldl %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "and %0, %5, %1\n"
+ "lstl %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1,tmp2;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "lldl %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "bis %0, %5, %1\n"
+ "lstl %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT
+ {
+ storage_type original, modified;
+ storage_type tmp1,tmp2;
+ fence_before(order);
+ __asm__ __volatile__
+ (
+ "1:\n"
+ "ldi %2,%4\n"
+ "ldi %3,1\n"
+ "lldl %0, 0(%2)\n"
+ "wr_f %3 \n"
+ "xor %0, %5, %1\n"
+ "lstl %1, 0(%2)\n"
+ "rd_f %1 \n"
+ "beq %1, 2f\n"
+
+ ".subsection 2\n\t"
+ "2: br 1b\n\t"
+ ".previous\n\t"
+
+ : "=&r" (original), // %0
+ "=&r" (modified), // %1
+ "=&r" (tmp1), // %2
+ "=&r" (tmp2) // %3
+ : "m" (storage), // %4
+ "r" (v) // %5
+ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC
+ );
+ fence_after(order);
+ return original;
+ }
+
+ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ return !!exchange(storage, (storage_type)1, order);
+ }
+
+ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT
+ {
+ store(storage, (storage_type)0, order);
+ }
+};
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_SW_64_HPP_INCLUDED_
diff --git a/boost/boost_1_77_0/boost/atomic/detail/fence_arch_ops_gcc_sw_64.hpp b/boost/boost_1_77_0/boost/atomic/detail/fence_arch_ops_gcc_sw_64.hpp
new file mode 100644
index 00000000..95d3b209
--- /dev/null
+++ b/boost/boost_1_77_0/boost/atomic/detail/fence_arch_ops_gcc_sw_64.hpp
@@ -0,0 +1,53 @@
+/*
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or copy at
+ * http://www.boost.org/LICENSE_1_0.txt)
+ *
+ * Copyright (c) 2020 Andrey Semashev
+ */
+/*!
+ * \file atomic/detail/fence_arch_ops_gcc_sw_64.hpp
+ *
+ * This header contains implementation of the \c fence_arch_operations struct.
+ */
+
+#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SW_64_HPP_INCLUDED_
+#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SW_64_HPP_INCLUDED_
+
+#include <boost/memory_order.hpp>
+#include <boost/atomic/detail/config.hpp>
+#include <boost/atomic/detail/header.hpp>
+
+#ifdef BOOST_HAS_PRAGMA_ONCE
+#pragma once
+#endif
+
+namespace boost {
+namespace atomics {
+namespace detail {
+
+//! Fence operations for Sw_64
+struct fence_arch_operations_gcc_sw_64
+{
+ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("memb" ::: "memory");
+ }
+
+ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT
+ {
+ if (order != memory_order_relaxed)
+ __asm__ __volatile__ ("" ::: "memory");
+ }
+};
+
+typedef fence_arch_operations_gcc_sw_64 fence_arch_operations;
+
+} // namespace detail
+} // namespace atomics
+} // namespace boost
+
+#include <boost/atomic/detail/footer.hpp>
+
+#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SW_64_HPP_INCLUDED_
diff --git a/boost/boost_1_77_0/boost/atomic/detail/platform.hpp b/boost/boost_1_77_0/boost/atomic/detail/platform.hpp
index 36607283..03207737 100644
--- a/boost/boost_1_77_0/boost/atomic/detail/platform.hpp
+++ b/boost/boost_1_77_0/boost/atomic/detail/platform.hpp
@@ -82,6 +82,10 @@
#define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND gcc_alpha
+#elif defined(__GNUC__) && defined(__sw_64__)
+
+#define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND gcc_sw_64
+
#elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
#define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND msvc_x86
diff --git a/boost/boost_1_77_0/boost/numeric/interval/detail/sw_64_rounding_control.hpp b/boost/boost_1_77_0/boost/numeric/interval/detail/sw_64_rounding_control.hpp
new file mode 100644
index 00000000..01b41024
--- /dev/null
+++ b/boost/boost_1_77_0/boost/numeric/interval/detail/sw_64_rounding_control.hpp
@@ -0,0 +1,113 @@
+/* Boost interval/detail/sw_64_rounding_control.hpp file
+ *
+ * Copyright 2005 Felix Höfling, Guillaume Melquiond
+ *
+ * Distributed under the Boost Software License, Version 1.0.
+ * (See accompanying file LICENSE_1_0.txt or
+ * copy at http://www.boost.org/LICENSE_1_0.txt)
+ */
+
+#ifndef BOOST_NUMERIC_INTERVAL_DETAIL_SW_64_ROUNDING_CONTROL_HPP
+#define BOOST_NUMERIC_INTERVAL_DETAIL_SW_64_ROUNDING_CONTROL_HPP
+
+#if !defined(sw_64) && !defined(__sw_64__)
+#error This header only works on Sw_64 CPUs.
+#endif
+
+#if defined(__GNUC__) || defined(__digital__) || defined(__DECCXX)
+
+#include <float.h> // write_rnd() and read_rnd()
+
+namespace boost {
+namespace numeric {
+namespace interval_lib {
+
+namespace detail {
+#if defined(__GNUC__ )
+ typedef union {
+ ::boost::long_long_type imode;
+ double dmode;
+ } rounding_mode_struct;
+
+ // set bits 59-58 (DYN),
+ // clear all exception bits and disable overflow (51) and inexact exceptions (62)
+ static const rounding_mode_struct mode_upward = { 0x4C08000000000000LL };
+ static const rounding_mode_struct mode_downward = { 0x4408000000000000LL };
+ static const rounding_mode_struct mode_to_nearest = { 0x4808000000000000LL };
+ static const rounding_mode_struct mode_toward_zero = { 0x4008000000000000LL };
+
+ struct sw_64_rounding_control
+ {
+ typedef double rounding_mode;
+
+ static void set_rounding_mode(const rounding_mode mode)
+ { __asm__ __volatile__ ("wfpcr %0" : : "f"(mode)); }
+
+ static void get_rounding_mode(rounding_mode& mode)
+ { __asm__ __volatile__ ("rfpcr %0" : "=f"(mode)); }
+
+ static void downward() { set_rounding_mode(mode_downward.dmode); }
+ static void upward() { set_rounding_mode(mode_upward.dmode); }
+ static void to_nearest() { set_rounding_mode(mode_to_nearest.dmode); }
+ static void toward_zero() { set_rounding_mode(mode_toward_zero.dmode); }
+ };
+#elif defined(__digital__) || defined(__DECCXX)
+
+#if defined(__DECCXX) && !(defined(__FLT_ROUNDS) && __FLT_ROUNDS == -1)
+#error Dynamic rounding mode not enabled. See cxx man page for details.
+#endif
+
+ struct sw_64_rounding_control
+ {
+ typedef unsigned int rounding_mode;
+
+ static void set_rounding_mode(const rounding_mode& mode) { write_rnd(mode); }
+ static void get_rounding_mode(rounding_mode& mode) { mode = read_rnd(); }
+
+ static void downward() { set_rounding_mode(FP_RND_RM); }
+ static void upward() { set_rounding_mode(FP_RND_RP); }
+ static void to_nearest() { set_rounding_mode(FP_RND_RN); }
+ static void toward_zero() { set_rounding_mode(FP_RND_RZ); }
+ };
+#endif
+} // namespace detail
+
+extern "C" {
+ float rintf(float);
+ double rint(double);
+ long double rintl(long double);
+}
+
+template<>
+struct rounding_control<float>:
+ detail::sw_64_rounding_control
+{
+ static float force_rounding(const float r)
+ { volatile float _r = r; return _r; }
+ static float to_int(const float& x) { return rintf(x); }
+};
+
+template<>
+struct rounding_control<double>:
+ detail::sw_64_rounding_control
+{
+ static const double & force_rounding(const double& r) { return r; }
+ static double to_int(const double& r) { return rint(r); }
+};
+
+template<>
+struct rounding_control<long double>:
+ detail::sw_64_rounding_control
+{
+ static const long double & force_rounding(const long double& r) { return r; }
+ static long double to_int(const long double& r) { return rintl(r); }
+};
+
+} // namespace interval_lib
+} // namespace numeric
+} // namespace boost
+
+#undef BOOST_NUMERIC_INTERVAL_NO_HARDWARE
+#endif
+
+#endif /* BOOST_NUMERIC_INTERVAL_DETAIL_SW_64_ROUNDING_CONTROL_HPP */
diff --git a/boost/boost_1_77_0/boost/numeric/interval/hw_rounding.hpp b/boost/boost_1_77_0/boost/numeric/interval/hw_rounding.hpp
index 46d452ee..3e7bb554 100644
--- a/boost/boost_1_77_0/boost/numeric/interval/hw_rounding.hpp
+++ b/boost/boost_1_77_0/boost/numeric/interval/hw_rounding.hpp
@@ -30,6 +30,8 @@
# include <boost/numeric/interval/detail/sparc_rounding_control.hpp>
#elif defined(alpha) || defined(__alpha__)
# include <boost/numeric/interval/detail/alpha_rounding_control.hpp>
+#elif defined(sw_64) || defined(__sw_64__)
+# include <boost/numeric/interval/detail/sw_64_rounding_control.hpp>
#elif defined(ia64) || defined(__ia64) || defined(__ia64__)
# include <boost/numeric/interval/detail/ia64_rounding_control.hpp>
#endif
diff --git a/boost/boost_1_77_0/boost/predef/architecture.h b/boost/boost_1_77_0/boost/predef/architecture.h
index f43f9464..471c263e 100644
--- a/boost/boost_1_77_0/boost/predef/architecture.h
+++ b/boost/boost_1_77_0/boost/predef/architecture.h
@@ -11,6 +11,7 @@ http://www.boost.org/LICENSE_1_0.txt)
#endif
#include <boost/predef/architecture/alpha.h>
+#include <boost/predef/architecture/sw_64.h>
#include <boost/predef/architecture/arm.h>
#include <boost/predef/architecture/blackfin.h>
#include <boost/predef/architecture/convex.h>
diff --git a/boost/boost_1_77_0/boost/predef/architecture/sw_64.h b/boost/boost_1_77_0/boost/predef/architecture/sw_64.h
new file mode 100644
index 00000000..87e90daa
--- /dev/null
+++ b/boost/boost_1_77_0/boost/predef/architecture/sw_64.h
@@ -0,0 +1,54 @@
+/*
+Copyright Rene Rivera 2008-2015
+Distributed under the Boost Software License, Version 1.0.
+(See accompanying file LICENSE_1_0.txt or copy at
+http://www.boost.org/LICENSE_1_0.txt)
+*/
+
+#ifndef BOOST_PREDEF_ARCHITECTURE_SW_64_H
+#define BOOST_PREDEF_ARCHITECTURE_SW_64_H
+
+#include <boost/predef/version_number.h>
+#include <boost/predef/make.h>
+
+/* tag::reference[]
+= `BOOST_ARCH_SW_64`
+
+http://en.wikipedia.org/wiki/DEC_Sw_64[DEC Sw_64] architecture.
+
+[options="header"]
+|===
+| {predef_symbol} | {predef_version}
+| `+__sw_64__+` | {predef_detection}
+| `+__sw_64+` | {predef_detection}
+| `+_M_SW_64+` | {predef_detection}
+
+| `+__sw_64_ev6__+` | 6.0.0
+|===
+*/ // end::reference[]
+
+#define BOOST_ARCH_SW_64 BOOST_VERSION_NUMBER_NOT_AVAILABLE
+
+#if defined(__sw_64__) || defined(__sw_64) || \
+ defined(_M_SW_64)
+# undef BOOST_ARCH_SW_64
+# if !defined(BOOST_ARCH_SW_64) && defined(__sw_64_sw6b__)
+# define BOOST_ARCH_SW_64 BOOST_VERSION_NUMBER(6,0,0)
+# endif
+#endif
+
+#if BOOST_ARCH_SW_64
+# define BOOST_ARCH_SW_64_AVAILABLE
+#endif
+
+#if BOOST_ARCH_SW_64
+# undef BOOST_ARCH_WORD_BITS_64
+# define BOOST_ARCH_WORD_BITS_64 BOOST_VERSION_NUMBER_AVAILABLE
+#endif
+
+#define BOOST_ARCH_SW_64_NAME "DEC Sw_64"
+
+#endif
+
+#include <boost/predef/detail/test.h>
+BOOST_PREDEF_DECLARE_TEST(BOOST_ARCH_SW_64,BOOST_ARCH_SW_64_NAME)
diff --git a/boost/boost_1_77_0/boost/wave/wave_config.hpp b/boost/boost_1_77_0/boost/wave/wave_config.hpp
index dce42d43..abd3d977 100644
--- a/boost/boost_1_77_0/boost/wave/wave_config.hpp
+++ b/boost/boost_1_77_0/boost/wave/wave_config.hpp
@@ -253,7 +253,7 @@
// CW up to 8.3 chokes as well *sigh*
// Tru64/CXX has linker problems when using flex_string
#if BOOST_WORKAROUND(__MWERKS__, < 0x3200) || \
- (defined(__DECCXX) && defined(__alpha)) || \
+ (defined(__DECCXX) && (defined(__alpha) || defined(__sw_64))) || \
defined(BOOST_WAVE_STRINGTYPE_USE_STDSTRING)
#define BOOST_WAVE_STRINGTYPE std::string
diff --git a/extra/icu/icu-release-69-1/source/acinclude.m4 b/extra/icu/icu-release-69-1/source/acinclude.m4
index 507f41f5..344471c0 100644
--- a/extra/icu/icu-release-69-1/source/acinclude.m4
+++ b/extra/icu/icu-release-69-1/source/acinclude.m4
@@ -21,6 +21,12 @@ case "${host}" in
else
icu_cv_host_frag=mh-solaris
fi ;;
+sw_64*-*-linux-gnu)
+ if test "$GCC" = yes; then
+ icu_cv_host_frag=mh-sw_64-linux-gcc
+ else
+ icu_cv_host_frag=mh-sw_64-linux-cc
+ fi ;;
alpha*-*-linux-gnu)
if test "$GCC" = yes; then
icu_cv_host_frag=mh-alpha-linux-gcc
diff --git a/extra/icu/icu-release-69-1/source/config.guess b/extra/icu/icu-release-69-1/source/config.guess
index 31e01efe..3eb10f96 100644
--- a/extra/icu/icu-release-69-1/source/config.guess
+++ b/extra/icu/icu-release-69-1/source/config.guess
@@ -894,6 +894,14 @@ EOF
UNAME_MACHINE=aarch64_be
echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
+ sw_64:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ sw) UNAME_MACHINE=sw_64 ;;
+ esac
+ objdump --private-headers /bin/sh | grep -q ld.so.1
+ if test "$?" = 0 ; then LIBC=gnulibc1 ; fi
+ echo ${UNAME_MACHINE}-sunway-linux-${LIBC}
+ exit ;;
alpha:Linux:*:*)
case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
EV5) UNAME_MACHINE=alphaev5 ;;
diff --git a/extra/icu/icu-release-69-1/source/config.sub b/extra/icu/icu-release-69-1/source/config.sub
index fb579478..ad7fd2a7 100644
--- a/extra/icu/icu-release-69-1/source/config.sub
+++ b/extra/icu/icu-release-69-1/source/config.sub
@@ -245,6 +245,7 @@ case $basic_machine in
1750a | 580 \
| a29k \
| aarch64 | aarch64_be \
+ | sw_64 \
| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
| am33_2.0 \
@@ -369,6 +370,7 @@ case $basic_machine in
580-* \
| a29k-* \
| aarch64-* | aarch64_be-* \
+ | sw_64-* \
| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
| alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
diff --git a/extra/icu/icu-release-69-1/source/configure b/extra/icu/icu-release-69-1/source/configure
index a2ff7e48..a1902502 100755
--- a/extra/icu/icu-release-69-1/source/configure
+++ b/extra/icu/icu-release-69-1/source/configure
@@ -5249,6 +5249,12 @@ case "${host}" in
else
icu_cv_host_frag=mh-solaris
fi ;;
+sw_64*-*-linux-gnu)
+ if test "$GCC" = yes; then
+ icu_cv_host_frag=mh-sw_64-linux-gcc
+ else
+ icu_cv_host_frag=mh-sw_64-linux-cc
+ fi ;;
alpha*-*-linux-gnu)
if test "$GCC" = yes; then
icu_cv_host_frag=mh-alpha-linux-gcc
@@ -6511,7 +6517,7 @@ if test "$CC" = ccc; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: \"C compiler set to CCC ${CC}\" " >&5
$as_echo "\"C compiler set to CCC ${CC}\" " >&6; }
case "${host}" in
- alpha*-*-*) U_HAVE_INTTYPES_H=0;
+ alpha*-*-* | sw_64*-*-*) U_HAVE_INTTYPES_H=0;
CONFIG_CPPFLAGS="${CONFIG_CPPFLAGS} -DU_HAVE_INTTYPES_H=0"
esac
fi
diff --git a/extra/icu/icu-release-69-1/source/configure.ac b/extra/icu/icu-release-69-1/source/configure.ac
index 425fdc7b..73e26739 100644
--- a/extra/icu/icu-release-69-1/source/configure.ac
+++ b/extra/icu/icu-release-69-1/source/configure.ac
@@ -685,7 +685,7 @@ fi
if test "$CC" = ccc; then
AC_MSG_RESULT("C compiler set to CCC ${CC}" )
case "${host}" in
- alpha*-*-*) U_HAVE_INTTYPES_H=0;
+ alpha*-*-*|sw_64*-*-*) U_HAVE_INTTYPES_H=0;
CONFIG_CPPFLAGS="${CONFIG_CPPFLAGS} -DU_HAVE_INTTYPES_H=0"
esac
fi
diff --git a/extra/icu/icu-release-69-1/source/i18n/double-conversion-utils.h b/extra/icu/icu-release-69-1/source/i18n/double-conversion-utils.h
index c9374636..7dcdbf8e 100644
--- a/extra/icu/icu-release-69-1/source/i18n/double-conversion-utils.h
+++ b/extra/icu/icu-release-69-1/source/i18n/double-conversion-utils.h
@@ -122,7 +122,7 @@ int main(int argc, char** argv) {
defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \
defined(_POWER) || defined(_ARCH_PPC) || defined(_ARCH_PPC64) || \
defined(__sparc__) || defined(__sparc) || defined(__s390__) || \
- defined(__SH4__) || defined(__alpha__) || \
+ defined(__SH4__) || defined(__alpha__) || defined(__sw_64__) || \
defined(_MIPS_ARCH_MIPS32R2) || defined(__ARMEB__) ||\
defined(__AARCH64EL__) || defined(__aarch64__) || defined(__AARCH64EB__) || \
defined(__riscv) || defined(__e2k__) || \
diff --git a/extra/rapidjson/include/rapidjson/rapidjson.h b/extra/rapidjson/include/rapidjson/rapidjson.h
index 329ce92b..50ce8e68 100644
--- a/extra/rapidjson/include/rapidjson/rapidjson.h
+++ b/extra/rapidjson/include/rapidjson/rapidjson.h
@@ -239,7 +239,7 @@
// Detect with architecture macros
# elif defined(__sparc) || defined(__sparc__) || defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || defined(__hpux) || defined(__hppa) || defined(_MIPSEB) || defined(_POWER) || defined(__s390__)
# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN
-# elif defined(__i386__) || defined(__alpha__) || defined(__ia64) || defined(__ia64__) || defined(_M_IX86) || defined(_M_IA64) || defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__bfin__)
+# elif defined(__i386__) || defined(__alpha__) || defined(__sw_64__) || defined(__ia64) || defined(__ia64__) || defined(_M_IX86) || defined(_M_IA64) || defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__bfin__)
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
# elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64))
# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN
--
2.33.0