aboutsummaryrefslogtreecommitdiff
path: root/include/rapidjson/internal
diff options
context:
space:
mode:
Diffstat (limited to 'include/rapidjson/internal')
-rw-r--r--include/rapidjson/internal/biginteger.h290
-rw-r--r--include/rapidjson/internal/diyfp.h258
-rw-r--r--include/rapidjson/internal/dtoa.h245
-rw-r--r--include/rapidjson/internal/ieee754.h78
-rw-r--r--include/rapidjson/internal/itoa.h304
-rw-r--r--include/rapidjson/internal/meta.h181
-rw-r--r--include/rapidjson/internal/pow10.h55
-rw-r--r--include/rapidjson/internal/regex.h701
-rw-r--r--include/rapidjson/internal/stack.h230
-rw-r--r--include/rapidjson/internal/strfunc.h55
-rw-r--r--include/rapidjson/internal/strtod.h269
-rw-r--r--include/rapidjson/internal/swap.h46
12 files changed, 2712 insertions, 0 deletions
diff --git a/include/rapidjson/internal/biginteger.h b/include/rapidjson/internal/biginteger.h
new file mode 100644
index 00000000..9d3e88c9
--- /dev/null
+++ b/include/rapidjson/internal/biginteger.h
@@ -0,0 +1,290 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_BIGINTEGER_H_
+#define RAPIDJSON_BIGINTEGER_H_
+
+#include "../rapidjson.h"
+
+#if defined(_MSC_VER) && defined(_M_AMD64)
+#include <intrin.h> // for _umul128
+#pragma intrinsic(_umul128)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+class BigInteger {
+public:
+ typedef uint64_t Type;
+
+ BigInteger(const BigInteger& rhs) : count_(rhs.count_) {
+ std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type));
+ }
+
+ explicit BigInteger(uint64_t u) : count_(1) {
+ digits_[0] = u;
+ }
+
+ BigInteger(const char* decimals, size_t length) : count_(1) {
+ RAPIDJSON_ASSERT(length > 0);
+ digits_[0] = 0;
+ size_t i = 0;
+ const size_t kMaxDigitPerIteration = 19; // 2^64 = 18446744073709551616 > 10^19
+ while (length >= kMaxDigitPerIteration) {
+ AppendDecimal64(decimals + i, decimals + i + kMaxDigitPerIteration);
+ length -= kMaxDigitPerIteration;
+ i += kMaxDigitPerIteration;
+ }
+
+ if (length > 0)
+ AppendDecimal64(decimals + i, decimals + i + length);
+ }
+
+ BigInteger& operator=(const BigInteger &rhs)
+ {
+ if (this != &rhs) {
+ count_ = rhs.count_;
+ std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type));
+ }
+ return *this;
+ }
+
+ BigInteger& operator=(uint64_t u) {
+ digits_[0] = u;
+ count_ = 1;
+ return *this;
+ }
+
+ BigInteger& operator+=(uint64_t u) {
+ Type backup = digits_[0];
+ digits_[0] += u;
+ for (size_t i = 0; i < count_ - 1; i++) {
+ if (digits_[i] >= backup)
+ return *this; // no carry
+ backup = digits_[i + 1];
+ digits_[i + 1] += 1;
+ }
+
+ // Last carry
+ if (digits_[count_ - 1] < backup)
+ PushBack(1);
+
+ return *this;
+ }
+
+ BigInteger& operator*=(uint64_t u) {
+ if (u == 0) return *this = 0;
+ if (u == 1) return *this;
+ if (*this == 1) return *this = u;
+
+ uint64_t k = 0;
+ for (size_t i = 0; i < count_; i++) {
+ uint64_t hi;
+ digits_[i] = MulAdd64(digits_[i], u, k, &hi);
+ k = hi;
+ }
+
+ if (k > 0)
+ PushBack(k);
+
+ return *this;
+ }
+
+ BigInteger& operator*=(uint32_t u) {
+ if (u == 0) return *this = 0;
+ if (u == 1) return *this;
+ if (*this == 1) return *this = u;
+
+ uint64_t k = 0;
+ for (size_t i = 0; i < count_; i++) {
+ const uint64_t c = digits_[i] >> 32;
+ const uint64_t d = digits_[i] & 0xFFFFFFFF;
+ const uint64_t uc = u * c;
+ const uint64_t ud = u * d;
+ const uint64_t p0 = ud + k;
+ const uint64_t p1 = uc + (p0 >> 32);
+ digits_[i] = (p0 & 0xFFFFFFFF) | (p1 << 32);
+ k = p1 >> 32;
+ }
+
+ if (k > 0)
+ PushBack(k);
+
+ return *this;
+ }
+
+ BigInteger& operator<<=(size_t shift) {
+ if (IsZero() || shift == 0) return *this;
+
+ size_t offset = shift / kTypeBit;
+ size_t interShift = shift % kTypeBit;
+ RAPIDJSON_ASSERT(count_ + offset <= kCapacity);
+
+ if (interShift == 0) {
+ std::memmove(&digits_[count_ - 1 + offset], &digits_[count_ - 1], count_ * sizeof(Type));
+ count_ += offset;
+ }
+ else {
+ digits_[count_] = 0;
+ for (size_t i = count_; i > 0; i--)
+ digits_[i + offset] = (digits_[i] << interShift) | (digits_[i - 1] >> (kTypeBit - interShift));
+ digits_[offset] = digits_[0] << interShift;
+ count_ += offset;
+ if (digits_[count_])
+ count_++;
+ }
+
+ std::memset(digits_, 0, offset * sizeof(Type));
+
+ return *this;
+ }
+
+ bool operator==(const BigInteger& rhs) const {
+ return count_ == rhs.count_ && std::memcmp(digits_, rhs.digits_, count_ * sizeof(Type)) == 0;
+ }
+
+ bool operator==(const Type rhs) const {
+ return count_ == 1 && digits_[0] == rhs;
+ }
+
+ BigInteger& MultiplyPow5(unsigned exp) {
+ static const uint32_t kPow5[12] = {
+ 5,
+ 5 * 5,
+ 5 * 5 * 5,
+ 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5,
+ 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5
+ };
+ if (exp == 0) return *this;
+ for (; exp >= 27; exp -= 27) *this *= RAPIDJSON_UINT64_C2(0X6765C793, 0XFA10079D); // 5^27
+ for (; exp >= 13; exp -= 13) *this *= static_cast<uint32_t>(1220703125u); // 5^13
+ if (exp > 0) *this *= kPow5[exp - 1];
+ return *this;
+ }
+
+ // Compute absolute difference of this and rhs.
+ // Assume this != rhs
+ bool Difference(const BigInteger& rhs, BigInteger* out) const {
+ int cmp = Compare(rhs);
+ RAPIDJSON_ASSERT(cmp != 0);
+ const BigInteger *a, *b; // Makes a > b
+ bool ret;
+ if (cmp < 0) { a = &rhs; b = this; ret = true; }
+ else { a = this; b = &rhs; ret = false; }
+
+ Type borrow = 0;
+ for (size_t i = 0; i < a->count_; i++) {
+ Type d = a->digits_[i] - borrow;
+ if (i < b->count_)
+ d -= b->digits_[i];
+ borrow = (d > a->digits_[i]) ? 1 : 0;
+ out->digits_[i] = d;
+ if (d != 0)
+ out->count_ = i + 1;
+ }
+
+ return ret;
+ }
+
+ int Compare(const BigInteger& rhs) const {
+ if (count_ != rhs.count_)
+ return count_ < rhs.count_ ? -1 : 1;
+
+ for (size_t i = count_; i-- > 0;)
+ if (digits_[i] != rhs.digits_[i])
+ return digits_[i] < rhs.digits_[i] ? -1 : 1;
+
+ return 0;
+ }
+
+ size_t GetCount() const { return count_; }
+ Type GetDigit(size_t index) const { RAPIDJSON_ASSERT(index < count_); return digits_[index]; }
+ bool IsZero() const { return count_ == 1 && digits_[0] == 0; }
+
+private:
+ void AppendDecimal64(const char* begin, const char* end) {
+ uint64_t u = ParseUint64(begin, end);
+ if (IsZero())
+ *this = u;
+ else {
+ unsigned exp = static_cast<unsigned>(end - begin);
+ (MultiplyPow5(exp) <<= exp) += u; // *this = *this * 10^exp + u
+ }
+ }
+
+ void PushBack(Type digit) {
+ RAPIDJSON_ASSERT(count_ < kCapacity);
+ digits_[count_++] = digit;
+ }
+
+ static uint64_t ParseUint64(const char* begin, const char* end) {
+ uint64_t r = 0;
+ for (const char* p = begin; p != end; ++p) {
+ RAPIDJSON_ASSERT(*p >= '0' && *p <= '9');
+ r = r * 10u + static_cast<unsigned>(*p - '0');
+ }
+ return r;
+ }
+
+ // Assume a * b + k < 2^128
+ static uint64_t MulAdd64(uint64_t a, uint64_t b, uint64_t k, uint64_t* outHigh) {
+#if defined(_MSC_VER) && defined(_M_AMD64)
+ uint64_t low = _umul128(a, b, outHigh) + k;
+ if (low < k)
+ (*outHigh)++;
+ return low;
+#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
+ __extension__ typedef unsigned __int128 uint128;
+ uint128 p = static_cast<uint128>(a) * static_cast<uint128>(b);
+ p += k;
+ *outHigh = static_cast<uint64_t>(p >> 64);
+ return static_cast<uint64_t>(p);
+#else
+ const uint64_t a0 = a & 0xFFFFFFFF, a1 = a >> 32, b0 = b & 0xFFFFFFFF, b1 = b >> 32;
+ uint64_t x0 = a0 * b0, x1 = a0 * b1, x2 = a1 * b0, x3 = a1 * b1;
+ x1 += (x0 >> 32); // can't give carry
+ x1 += x2;
+ if (x1 < x2)
+ x3 += (static_cast<uint64_t>(1) << 32);
+ uint64_t lo = (x1 << 32) + (x0 & 0xFFFFFFFF);
+ uint64_t hi = x3 + (x1 >> 32);
+
+ lo += k;
+ if (lo < k)
+ hi++;
+ *outHigh = hi;
+ return lo;
+#endif
+ }
+
+ static const size_t kBitCount = 3328; // 64bit * 54 > 10^1000
+ static const size_t kCapacity = kBitCount / sizeof(Type);
+ static const size_t kTypeBit = sizeof(Type) * 8;
+
+ Type digits_[kCapacity];
+ size_t count_;
+};
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_BIGINTEGER_H_
diff --git a/include/rapidjson/internal/diyfp.h b/include/rapidjson/internal/diyfp.h
new file mode 100644
index 00000000..c9fefdc6
--- /dev/null
+++ b/include/rapidjson/internal/diyfp.h
@@ -0,0 +1,258 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+// This is a C++ header-only implementation of Grisu2 algorithm from the publication:
+// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with
+// integers." ACM Sigplan Notices 45.6 (2010): 233-243.
+
+#ifndef RAPIDJSON_DIYFP_H_
+#define RAPIDJSON_DIYFP_H_
+
+#include "../rapidjson.h"
+
+#if defined(_MSC_VER) && defined(_M_AMD64)
+#include <intrin.h>
+#pragma intrinsic(_BitScanReverse64)
+#pragma intrinsic(_umul128)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(padded)
+#endif
+
+struct DiyFp {
+ DiyFp() : f(), e() {}
+
+ DiyFp(uint64_t fp, int exp) : f(fp), e(exp) {}
+
+ explicit DiyFp(double d) {
+ union {
+ double d;
+ uint64_t u64;
+ } u = { d };
+
+ int biased_e = static_cast<int>((u.u64 & kDpExponentMask) >> kDpSignificandSize);
+ uint64_t significand = (u.u64 & kDpSignificandMask);
+ if (biased_e != 0) {
+ f = significand + kDpHiddenBit;
+ e = biased_e - kDpExponentBias;
+ }
+ else {
+ f = significand;
+ e = kDpMinExponent + 1;
+ }
+ }
+
+ DiyFp operator-(const DiyFp& rhs) const {
+ return DiyFp(f - rhs.f, e);
+ }
+
+ DiyFp operator*(const DiyFp& rhs) const {
+#if defined(_MSC_VER) && defined(_M_AMD64)
+ uint64_t h;
+ uint64_t l = _umul128(f, rhs.f, &h);
+ if (l & (uint64_t(1) << 63)) // rounding
+ h++;
+ return DiyFp(h, e + rhs.e + 64);
+#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__)
+ __extension__ typedef unsigned __int128 uint128;
+ uint128 p = static_cast<uint128>(f) * static_cast<uint128>(rhs.f);
+ uint64_t h = static_cast<uint64_t>(p >> 64);
+ uint64_t l = static_cast<uint64_t>(p);
+ if (l & (uint64_t(1) << 63)) // rounding
+ h++;
+ return DiyFp(h, e + rhs.e + 64);
+#else
+ const uint64_t M32 = 0xFFFFFFFF;
+ const uint64_t a = f >> 32;
+ const uint64_t b = f & M32;
+ const uint64_t c = rhs.f >> 32;
+ const uint64_t d = rhs.f & M32;
+ const uint64_t ac = a * c;
+ const uint64_t bc = b * c;
+ const uint64_t ad = a * d;
+ const uint64_t bd = b * d;
+ uint64_t tmp = (bd >> 32) + (ad & M32) + (bc & M32);
+ tmp += 1U << 31; /// mult_round
+ return DiyFp(ac + (ad >> 32) + (bc >> 32) + (tmp >> 32), e + rhs.e + 64);
+#endif
+ }
+
+ DiyFp Normalize() const {
+#if defined(_MSC_VER) && defined(_M_AMD64)
+ unsigned long index;
+ _BitScanReverse64(&index, f);
+ return DiyFp(f << (63 - index), e - (63 - index));
+#elif defined(__GNUC__) && __GNUC__ >= 4
+ int s = __builtin_clzll(f);
+ return DiyFp(f << s, e - s);
+#else
+ DiyFp res = *this;
+ while (!(res.f & (static_cast<uint64_t>(1) << 63))) {
+ res.f <<= 1;
+ res.e--;
+ }
+ return res;
+#endif
+ }
+
+ DiyFp NormalizeBoundary() const {
+ DiyFp res = *this;
+ while (!(res.f & (kDpHiddenBit << 1))) {
+ res.f <<= 1;
+ res.e--;
+ }
+ res.f <<= (kDiySignificandSize - kDpSignificandSize - 2);
+ res.e = res.e - (kDiySignificandSize - kDpSignificandSize - 2);
+ return res;
+ }
+
+ void NormalizedBoundaries(DiyFp* minus, DiyFp* plus) const {
+ DiyFp pl = DiyFp((f << 1) + 1, e - 1).NormalizeBoundary();
+ DiyFp mi = (f == kDpHiddenBit) ? DiyFp((f << 2) - 1, e - 2) : DiyFp((f << 1) - 1, e - 1);
+ mi.f <<= mi.e - pl.e;
+ mi.e = pl.e;
+ *plus = pl;
+ *minus = mi;
+ }
+
+ double ToDouble() const {
+ union {
+ double d;
+ uint64_t u64;
+ }u;
+ const uint64_t be = (e == kDpDenormalExponent && (f & kDpHiddenBit) == 0) ? 0 :
+ static_cast<uint64_t>(e + kDpExponentBias);
+ u.u64 = (f & kDpSignificandMask) | (be << kDpSignificandSize);
+ return u.d;
+ }
+
+ static const int kDiySignificandSize = 64;
+ static const int kDpSignificandSize = 52;
+ static const int kDpExponentBias = 0x3FF + kDpSignificandSize;
+ static const int kDpMaxExponent = 0x7FF - kDpExponentBias;
+ static const int kDpMinExponent = -kDpExponentBias;
+ static const int kDpDenormalExponent = -kDpExponentBias + 1;
+ static const uint64_t kDpExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000);
+ static const uint64_t kDpSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF);
+ static const uint64_t kDpHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000);
+
+ uint64_t f;
+ int e;
+};
+
+inline DiyFp GetCachedPowerByIndex(size_t index) {
+ // 10^-348, 10^-340, ..., 10^340
+ static const uint64_t kCachedPowers_F[] = {
+ RAPIDJSON_UINT64_C2(0xfa8fd5a0, 0x081c0288), RAPIDJSON_UINT64_C2(0xbaaee17f, 0xa23ebf76),
+ RAPIDJSON_UINT64_C2(0x8b16fb20, 0x3055ac76), RAPIDJSON_UINT64_C2(0xcf42894a, 0x5dce35ea),
+ RAPIDJSON_UINT64_C2(0x9a6bb0aa, 0x55653b2d), RAPIDJSON_UINT64_C2(0xe61acf03, 0x3d1a45df),
+ RAPIDJSON_UINT64_C2(0xab70fe17, 0xc79ac6ca), RAPIDJSON_UINT64_C2(0xff77b1fc, 0xbebcdc4f),
+ RAPIDJSON_UINT64_C2(0xbe5691ef, 0x416bd60c), RAPIDJSON_UINT64_C2(0x8dd01fad, 0x907ffc3c),
+ RAPIDJSON_UINT64_C2(0xd3515c28, 0x31559a83), RAPIDJSON_UINT64_C2(0x9d71ac8f, 0xada6c9b5),
+ RAPIDJSON_UINT64_C2(0xea9c2277, 0x23ee8bcb), RAPIDJSON_UINT64_C2(0xaecc4991, 0x4078536d),
+ RAPIDJSON_UINT64_C2(0x823c1279, 0x5db6ce57), RAPIDJSON_UINT64_C2(0xc2109436, 0x4dfb5637),
+ RAPIDJSON_UINT64_C2(0x9096ea6f, 0x3848984f), RAPIDJSON_UINT64_C2(0xd77485cb, 0x25823ac7),
+ RAPIDJSON_UINT64_C2(0xa086cfcd, 0x97bf97f4), RAPIDJSON_UINT64_C2(0xef340a98, 0x172aace5),
+ RAPIDJSON_UINT64_C2(0xb23867fb, 0x2a35b28e), RAPIDJSON_UINT64_C2(0x84c8d4df, 0xd2c63f3b),
+ RAPIDJSON_UINT64_C2(0xc5dd4427, 0x1ad3cdba), RAPIDJSON_UINT64_C2(0x936b9fce, 0xbb25c996),
+ RAPIDJSON_UINT64_C2(0xdbac6c24, 0x7d62a584), RAPIDJSON_UINT64_C2(0xa3ab6658, 0x0d5fdaf6),
+ RAPIDJSON_UINT64_C2(0xf3e2f893, 0xdec3f126), RAPIDJSON_UINT64_C2(0xb5b5ada8, 0xaaff80b8),
+ RAPIDJSON_UINT64_C2(0x87625f05, 0x6c7c4a8b), RAPIDJSON_UINT64_C2(0xc9bcff60, 0x34c13053),
+ RAPIDJSON_UINT64_C2(0x964e858c, 0x91ba2655), RAPIDJSON_UINT64_C2(0xdff97724, 0x70297ebd),
+ RAPIDJSON_UINT64_C2(0xa6dfbd9f, 0xb8e5b88f), RAPIDJSON_UINT64_C2(0xf8a95fcf, 0x88747d94),
+ RAPIDJSON_UINT64_C2(0xb9447093, 0x8fa89bcf), RAPIDJSON_UINT64_C2(0x8a08f0f8, 0xbf0f156b),
+ RAPIDJSON_UINT64_C2(0xcdb02555, 0x653131b6), RAPIDJSON_UINT64_C2(0x993fe2c6, 0xd07b7fac),
+ RAPIDJSON_UINT64_C2(0xe45c10c4, 0x2a2b3b06), RAPIDJSON_UINT64_C2(0xaa242499, 0x697392d3),
+ RAPIDJSON_UINT64_C2(0xfd87b5f2, 0x8300ca0e), RAPIDJSON_UINT64_C2(0xbce50864, 0x92111aeb),
+ RAPIDJSON_UINT64_C2(0x8cbccc09, 0x6f5088cc), RAPIDJSON_UINT64_C2(0xd1b71758, 0xe219652c),
+ RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), RAPIDJSON_UINT64_C2(0xe8d4a510, 0x00000000),
+ RAPIDJSON_UINT64_C2(0xad78ebc5, 0xac620000), RAPIDJSON_UINT64_C2(0x813f3978, 0xf8940984),
+ RAPIDJSON_UINT64_C2(0xc097ce7b, 0xc90715b3), RAPIDJSON_UINT64_C2(0x8f7e32ce, 0x7bea5c70),
+ RAPIDJSON_UINT64_C2(0xd5d238a4, 0xabe98068), RAPIDJSON_UINT64_C2(0x9f4f2726, 0x179a2245),
+ RAPIDJSON_UINT64_C2(0xed63a231, 0xd4c4fb27), RAPIDJSON_UINT64_C2(0xb0de6538, 0x8cc8ada8),
+ RAPIDJSON_UINT64_C2(0x83c7088e, 0x1aab65db), RAPIDJSON_UINT64_C2(0xc45d1df9, 0x42711d9a),
+ RAPIDJSON_UINT64_C2(0x924d692c, 0xa61be758), RAPIDJSON_UINT64_C2(0xda01ee64, 0x1a708dea),
+ RAPIDJSON_UINT64_C2(0xa26da399, 0x9aef774a), RAPIDJSON_UINT64_C2(0xf209787b, 0xb47d6b85),
+ RAPIDJSON_UINT64_C2(0xb454e4a1, 0x79dd1877), RAPIDJSON_UINT64_C2(0x865b8692, 0x5b9bc5c2),
+ RAPIDJSON_UINT64_C2(0xc83553c5, 0xc8965d3d), RAPIDJSON_UINT64_C2(0x952ab45c, 0xfa97a0b3),
+ RAPIDJSON_UINT64_C2(0xde469fbd, 0x99a05fe3), RAPIDJSON_UINT64_C2(0xa59bc234, 0xdb398c25),
+ RAPIDJSON_UINT64_C2(0xf6c69a72, 0xa3989f5c), RAPIDJSON_UINT64_C2(0xb7dcbf53, 0x54e9bece),
+ RAPIDJSON_UINT64_C2(0x88fcf317, 0xf22241e2), RAPIDJSON_UINT64_C2(0xcc20ce9b, 0xd35c78a5),
+ RAPIDJSON_UINT64_C2(0x98165af3, 0x7b2153df), RAPIDJSON_UINT64_C2(0xe2a0b5dc, 0x971f303a),
+ RAPIDJSON_UINT64_C2(0xa8d9d153, 0x5ce3b396), RAPIDJSON_UINT64_C2(0xfb9b7cd9, 0xa4a7443c),
+ RAPIDJSON_UINT64_C2(0xbb764c4c, 0xa7a44410), RAPIDJSON_UINT64_C2(0x8bab8eef, 0xb6409c1a),
+ RAPIDJSON_UINT64_C2(0xd01fef10, 0xa657842c), RAPIDJSON_UINT64_C2(0x9b10a4e5, 0xe9913129),
+ RAPIDJSON_UINT64_C2(0xe7109bfb, 0xa19c0c9d), RAPIDJSON_UINT64_C2(0xac2820d9, 0x623bf429),
+ RAPIDJSON_UINT64_C2(0x80444b5e, 0x7aa7cf85), RAPIDJSON_UINT64_C2(0xbf21e440, 0x03acdd2d),
+ RAPIDJSON_UINT64_C2(0x8e679c2f, 0x5e44ff8f), RAPIDJSON_UINT64_C2(0xd433179d, 0x9c8cb841),
+ RAPIDJSON_UINT64_C2(0x9e19db92, 0xb4e31ba9), RAPIDJSON_UINT64_C2(0xeb96bf6e, 0xbadf77d9),
+ RAPIDJSON_UINT64_C2(0xaf87023b, 0x9bf0ee6b)
+ };
+ static const int16_t kCachedPowers_E[] = {
+ -1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980,
+ -954, -927, -901, -874, -847, -821, -794, -768, -741, -715,
+ -688, -661, -635, -608, -582, -555, -529, -502, -475, -449,
+ -422, -396, -369, -343, -316, -289, -263, -236, -210, -183,
+ -157, -130, -103, -77, -50, -24, 3, 30, 56, 83,
+ 109, 136, 162, 189, 216, 242, 269, 295, 322, 348,
+ 375, 402, 428, 455, 481, 508, 534, 561, 588, 614,
+ 641, 667, 694, 720, 747, 774, 800, 827, 853, 880,
+ 907, 933, 960, 986, 1013, 1039, 1066
+ };
+ return DiyFp(kCachedPowers_F[index], kCachedPowers_E[index]);
+}
+
+inline DiyFp GetCachedPower(int e, int* K) {
+
+ //int k = static_cast<int>(ceil((-61 - e) * 0.30102999566398114)) + 374;
+ double dk = (-61 - e) * 0.30102999566398114 + 347; // dk must be positive, so can do ceiling in positive
+ int k = static_cast<int>(dk);
+ if (dk - k > 0.0)
+ k++;
+
+ unsigned index = static_cast<unsigned>((k >> 3) + 1);
+ *K = -(-348 + static_cast<int>(index << 3)); // decimal exponent no need lookup table
+
+ return GetCachedPowerByIndex(index);
+}
+
+inline DiyFp GetCachedPower10(int exp, int *outExp) {
+ unsigned index = (static_cast<unsigned>(exp) + 348u) / 8u;
+ *outExp = -348 + static_cast<int>(index) * 8;
+ return GetCachedPowerByIndex(index);
+ }
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+RAPIDJSON_DIAG_OFF(padded)
+#endif
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_DIYFP_H_
diff --git a/include/rapidjson/internal/dtoa.h b/include/rapidjson/internal/dtoa.h
new file mode 100644
index 00000000..8d6350e6
--- /dev/null
+++ b/include/rapidjson/internal/dtoa.h
@@ -0,0 +1,245 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+// This is a C++ header-only implementation of Grisu2 algorithm from the publication:
+// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with
+// integers." ACM Sigplan Notices 45.6 (2010): 233-243.
+
+#ifndef RAPIDJSON_DTOA_
+#define RAPIDJSON_DTOA_
+
+#include "itoa.h" // GetDigitsLut()
+#include "diyfp.h"
+#include "ieee754.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+RAPIDJSON_DIAG_OFF(array-bounds) // some gcc versions generate wrong warnings https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59124
+#endif
+
+inline void GrisuRound(char* buffer, int len, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t wp_w) {
+ while (rest < wp_w && delta - rest >= ten_kappa &&
+ (rest + ten_kappa < wp_w || /// closer
+ wp_w - rest > rest + ten_kappa - wp_w)) {
+ buffer[len - 1]--;
+ rest += ten_kappa;
+ }
+}
+
+inline unsigned CountDecimalDigit32(uint32_t n) {
+ // Simple pure C++ implementation was faster than __builtin_clz version in this situation.
+ if (n < 10) return 1;
+ if (n < 100) return 2;
+ if (n < 1000) return 3;
+ if (n < 10000) return 4;
+ if (n < 100000) return 5;
+ if (n < 1000000) return 6;
+ if (n < 10000000) return 7;
+ if (n < 100000000) return 8;
+ // Will not reach 10 digits in DigitGen()
+ //if (n < 1000000000) return 9;
+ //return 10;
+ return 9;
+}
+
+inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buffer, int* len, int* K) {
+ static const uint32_t kPow10[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 };
+ const DiyFp one(uint64_t(1) << -Mp.e, Mp.e);
+ const DiyFp wp_w = Mp - W;
+ uint32_t p1 = static_cast<uint32_t>(Mp.f >> -one.e);
+ uint64_t p2 = Mp.f & (one.f - 1);
+ unsigned kappa = CountDecimalDigit32(p1); // kappa in [0, 9]
+ *len = 0;
+
+ while (kappa > 0) {
+ uint32_t d = 0;
+ switch (kappa) {
+ case 9: d = p1 / 100000000; p1 %= 100000000; break;
+ case 8: d = p1 / 10000000; p1 %= 10000000; break;
+ case 7: d = p1 / 1000000; p1 %= 1000000; break;
+ case 6: d = p1 / 100000; p1 %= 100000; break;
+ case 5: d = p1 / 10000; p1 %= 10000; break;
+ case 4: d = p1 / 1000; p1 %= 1000; break;
+ case 3: d = p1 / 100; p1 %= 100; break;
+ case 2: d = p1 / 10; p1 %= 10; break;
+ case 1: d = p1; p1 = 0; break;
+ default:;
+ }
+ if (d || *len)
+ buffer[(*len)++] = static_cast<char>('0' + static_cast<char>(d));
+ kappa--;
+ uint64_t tmp = (static_cast<uint64_t>(p1) << -one.e) + p2;
+ if (tmp <= delta) {
+ *K += kappa;
+ GrisuRound(buffer, *len, delta, tmp, static_cast<uint64_t>(kPow10[kappa]) << -one.e, wp_w.f);
+ return;
+ }
+ }
+
+ // kappa = 0
+ for (;;) {
+ p2 *= 10;
+ delta *= 10;
+ char d = static_cast<char>(p2 >> -one.e);
+ if (d || *len)
+ buffer[(*len)++] = static_cast<char>('0' + d);
+ p2 &= one.f - 1;
+ kappa--;
+ if (p2 < delta) {
+ *K += kappa;
+ int index = -static_cast<int>(kappa);
+ GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 9 ? kPow10[-static_cast<int>(kappa)] : 0));
+ return;
+ }
+ }
+}
+
+inline void Grisu2(double value, char* buffer, int* length, int* K) {
+ const DiyFp v(value);
+ DiyFp w_m, w_p;
+ v.NormalizedBoundaries(&w_m, &w_p);
+
+ const DiyFp c_mk = GetCachedPower(w_p.e, K);
+ const DiyFp W = v.Normalize() * c_mk;
+ DiyFp Wp = w_p * c_mk;
+ DiyFp Wm = w_m * c_mk;
+ Wm.f++;
+ Wp.f--;
+ DigitGen(W, Wp, Wp.f - Wm.f, buffer, length, K);
+}
+
+inline char* WriteExponent(int K, char* buffer) {
+ if (K < 0) {
+ *buffer++ = '-';
+ K = -K;
+ }
+
+ if (K >= 100) {
+ *buffer++ = static_cast<char>('0' + static_cast<char>(K / 100));
+ K %= 100;
+ const char* d = GetDigitsLut() + K * 2;
+ *buffer++ = d[0];
+ *buffer++ = d[1];
+ }
+ else if (K >= 10) {
+ const char* d = GetDigitsLut() + K * 2;
+ *buffer++ = d[0];
+ *buffer++ = d[1];
+ }
+ else
+ *buffer++ = static_cast<char>('0' + static_cast<char>(K));
+
+ return buffer;
+}
+
+inline char* Prettify(char* buffer, int length, int k, int maxDecimalPlaces) {
+ const int kk = length + k; // 10^(kk-1) <= v < 10^kk
+
+ if (0 <= k && kk <= 21) {
+ // 1234e7 -> 12340000000
+ for (int i = length; i < kk; i++)
+ buffer[i] = '0';
+ buffer[kk] = '.';
+ buffer[kk + 1] = '0';
+ return &buffer[kk + 2];
+ }
+ else if (0 < kk && kk <= 21) {
+ // 1234e-2 -> 12.34
+ std::memmove(&buffer[kk + 1], &buffer[kk], static_cast<size_t>(length - kk));
+ buffer[kk] = '.';
+ if (0 > k + maxDecimalPlaces) {
+ // When maxDecimalPlaces = 2, 1.2345 -> 1.23, 1.102 -> 1.1
+ // Remove extra trailing zeros (at least one) after truncation.
+ for (int i = kk + maxDecimalPlaces; i > kk + 1; i--)
+ if (buffer[i] != '0')
+ return &buffer[i + 1];
+ return &buffer[kk + 2]; // Reserve one zero
+ }
+ else
+ return &buffer[length + 1];
+ }
+ else if (-6 < kk && kk <= 0) {
+ // 1234e-6 -> 0.001234
+ const int offset = 2 - kk;
+ std::memmove(&buffer[offset], &buffer[0], static_cast<size_t>(length));
+ buffer[0] = '0';
+ buffer[1] = '.';
+ for (int i = 2; i < offset; i++)
+ buffer[i] = '0';
+ if (length - kk > maxDecimalPlaces) {
+ // When maxDecimalPlaces = 2, 0.123 -> 0.12, 0.102 -> 0.1
+ // Remove extra trailing zeros (at least one) after truncation.
+ for (int i = maxDecimalPlaces + 1; i > 2; i--)
+ if (buffer[i] != '0')
+ return &buffer[i + 1];
+ return &buffer[3]; // Reserve one zero
+ }
+ else
+ return &buffer[length + offset];
+ }
+ else if (kk < -maxDecimalPlaces) {
+ // Truncate to zero
+ buffer[0] = '0';
+ buffer[1] = '.';
+ buffer[2] = '0';
+ return &buffer[3];
+ }
+ else if (length == 1) {
+ // 1e30
+ buffer[1] = 'e';
+ return WriteExponent(kk - 1, &buffer[2]);
+ }
+ else {
+ // 1234e30 -> 1.234e33
+ std::memmove(&buffer[2], &buffer[1], static_cast<size_t>(length - 1));
+ buffer[1] = '.';
+ buffer[length + 1] = 'e';
+ return WriteExponent(kk - 1, &buffer[0 + length + 2]);
+ }
+}
+
+inline char* dtoa(double value, char* buffer, int maxDecimalPlaces = 324) {
+ RAPIDJSON_ASSERT(maxDecimalPlaces >= 1);
+ Double d(value);
+ if (d.IsZero()) {
+ if (d.Sign())
+ *buffer++ = '-'; // -0.0, Issue #289
+ buffer[0] = '0';
+ buffer[1] = '.';
+ buffer[2] = '0';
+ return &buffer[3];
+ }
+ else {
+ if (value < 0) {
+ *buffer++ = '-';
+ value = -value;
+ }
+ int length, K;
+ Grisu2(value, buffer, &length, &K);
+ return Prettify(buffer, length, K, maxDecimalPlaces);
+ }
+}
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_POP
+#endif
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_DTOA_
diff --git a/include/rapidjson/internal/ieee754.h b/include/rapidjson/internal/ieee754.h
new file mode 100644
index 00000000..82bb0b99
--- /dev/null
+++ b/include/rapidjson/internal/ieee754.h
@@ -0,0 +1,78 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_IEEE754_
+#define RAPIDJSON_IEEE754_
+
+#include "../rapidjson.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+class Double {
+public:
+ Double() {}
+ Double(double d) : d_(d) {}
+ Double(uint64_t u) : u_(u) {}
+
+ double Value() const { return d_; }
+ uint64_t Uint64Value() const { return u_; }
+
+ double NextPositiveDouble() const {
+ RAPIDJSON_ASSERT(!Sign());
+ return Double(u_ + 1).Value();
+ }
+
+ bool Sign() const { return (u_ & kSignMask) != 0; }
+ uint64_t Significand() const { return u_ & kSignificandMask; }
+ int Exponent() const { return static_cast<int>(((u_ & kExponentMask) >> kSignificandSize) - kExponentBias); }
+
+ bool IsNan() const { return (u_ & kExponentMask) == kExponentMask && Significand() != 0; }
+ bool IsInf() const { return (u_ & kExponentMask) == kExponentMask && Significand() == 0; }
+ bool IsNanOrInf() const { return (u_ & kExponentMask) == kExponentMask; }
+ bool IsNormal() const { return (u_ & kExponentMask) != 0 || Significand() == 0; }
+ bool IsZero() const { return (u_ & (kExponentMask | kSignificandMask)) == 0; }
+
+ uint64_t IntegerSignificand() const { return IsNormal() ? Significand() | kHiddenBit : Significand(); }
+ int IntegerExponent() const { return (IsNormal() ? Exponent() : kDenormalExponent) - kSignificandSize; }
+ uint64_t ToBias() const { return (u_ & kSignMask) ? ~u_ + 1 : u_ | kSignMask; }
+
+ static unsigned EffectiveSignificandSize(int order) {
+ if (order >= -1021)
+ return 53;
+ else if (order <= -1074)
+ return 0;
+ else
+ return static_cast<unsigned>(order) + 1074;
+ }
+
+private:
+ static const int kSignificandSize = 52;
+ static const int kExponentBias = 0x3FF;
+ static const int kDenormalExponent = 1 - kExponentBias;
+ static const uint64_t kSignMask = RAPIDJSON_UINT64_C2(0x80000000, 0x00000000);
+ static const uint64_t kExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000);
+ static const uint64_t kSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF);
+ static const uint64_t kHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000);
+
+ union {
+ double d_;
+ uint64_t u_;
+ };
+};
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_IEEE754_
diff --git a/include/rapidjson/internal/itoa.h b/include/rapidjson/internal/itoa.h
new file mode 100644
index 00000000..01a4e7e7
--- /dev/null
+++ b/include/rapidjson/internal/itoa.h
@@ -0,0 +1,304 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_ITOA_
+#define RAPIDJSON_ITOA_
+
+#include "../rapidjson.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+inline const char* GetDigitsLut() {
+ static const char cDigitsLut[200] = {
+ '0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9',
+ '1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9',
+ '2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9',
+ '3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9',
+ '4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9',
+ '5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9',
+ '6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9',
+ '7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9',
+ '8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9',
+ '9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9'
+ };
+ return cDigitsLut;
+}
+
+inline char* u32toa(uint32_t value, char* buffer) {
+ const char* cDigitsLut = GetDigitsLut();
+
+ if (value < 10000) {
+ const uint32_t d1 = (value / 100) << 1;
+ const uint32_t d2 = (value % 100) << 1;
+
+ if (value >= 1000)
+ *buffer++ = cDigitsLut[d1];
+ if (value >= 100)
+ *buffer++ = cDigitsLut[d1 + 1];
+ if (value >= 10)
+ *buffer++ = cDigitsLut[d2];
+ *buffer++ = cDigitsLut[d2 + 1];
+ }
+ else if (value < 100000000) {
+ // value = bbbbcccc
+ const uint32_t b = value / 10000;
+ const uint32_t c = value % 10000;
+
+ const uint32_t d1 = (b / 100) << 1;
+ const uint32_t d2 = (b % 100) << 1;
+
+ const uint32_t d3 = (c / 100) << 1;
+ const uint32_t d4 = (c % 100) << 1;
+
+ if (value >= 10000000)
+ *buffer++ = cDigitsLut[d1];
+ if (value >= 1000000)
+ *buffer++ = cDigitsLut[d1 + 1];
+ if (value >= 100000)
+ *buffer++ = cDigitsLut[d2];
+ *buffer++ = cDigitsLut[d2 + 1];
+
+ *buffer++ = cDigitsLut[d3];
+ *buffer++ = cDigitsLut[d3 + 1];
+ *buffer++ = cDigitsLut[d4];
+ *buffer++ = cDigitsLut[d4 + 1];
+ }
+ else {
+ // value = aabbbbcccc in decimal
+
+ const uint32_t a = value / 100000000; // 1 to 42
+ value %= 100000000;
+
+ if (a >= 10) {
+ const unsigned i = a << 1;
+ *buffer++ = cDigitsLut[i];
+ *buffer++ = cDigitsLut[i + 1];
+ }
+ else
+ *buffer++ = static_cast<char>('0' + static_cast<char>(a));
+
+ const uint32_t b = value / 10000; // 0 to 9999
+ const uint32_t c = value % 10000; // 0 to 9999
+
+ const uint32_t d1 = (b / 100) << 1;
+ const uint32_t d2 = (b % 100) << 1;
+
+ const uint32_t d3 = (c / 100) << 1;
+ const uint32_t d4 = (c % 100) << 1;
+
+ *buffer++ = cDigitsLut[d1];
+ *buffer++ = cDigitsLut[d1 + 1];
+ *buffer++ = cDigitsLut[d2];
+ *buffer++ = cDigitsLut[d2 + 1];
+ *buffer++ = cDigitsLut[d3];
+ *buffer++ = cDigitsLut[d3 + 1];
+ *buffer++ = cDigitsLut[d4];
+ *buffer++ = cDigitsLut[d4 + 1];
+ }
+ return buffer;
+}
+
+inline char* i32toa(int32_t value, char* buffer) {
+ uint32_t u = static_cast<uint32_t>(value);
+ if (value < 0) {
+ *buffer++ = '-';
+ u = ~u + 1;
+ }
+
+ return u32toa(u, buffer);
+}
+
+inline char* u64toa(uint64_t value, char* buffer) {
+ const char* cDigitsLut = GetDigitsLut();
+ const uint64_t kTen8 = 100000000;
+ const uint64_t kTen9 = kTen8 * 10;
+ const uint64_t kTen10 = kTen8 * 100;
+ const uint64_t kTen11 = kTen8 * 1000;
+ const uint64_t kTen12 = kTen8 * 10000;
+ const uint64_t kTen13 = kTen8 * 100000;
+ const uint64_t kTen14 = kTen8 * 1000000;
+ const uint64_t kTen15 = kTen8 * 10000000;
+ const uint64_t kTen16 = kTen8 * kTen8;
+
+ if (value < kTen8) {
+ uint32_t v = static_cast<uint32_t>(value);
+ if (v < 10000) {
+ const uint32_t d1 = (v / 100) << 1;
+ const uint32_t d2 = (v % 100) << 1;
+
+ if (v >= 1000)
+ *buffer++ = cDigitsLut[d1];
+ if (v >= 100)
+ *buffer++ = cDigitsLut[d1 + 1];
+ if (v >= 10)
+ *buffer++ = cDigitsLut[d2];
+ *buffer++ = cDigitsLut[d2 + 1];
+ }
+ else {
+ // value = bbbbcccc
+ const uint32_t b = v / 10000;
+ const uint32_t c = v % 10000;
+
+ const uint32_t d1 = (b / 100) << 1;
+ const uint32_t d2 = (b % 100) << 1;
+
+ const uint32_t d3 = (c / 100) << 1;
+ const uint32_t d4 = (c % 100) << 1;
+
+ if (value >= 10000000)
+ *buffer++ = cDigitsLut[d1];
+ if (value >= 1000000)
+ *buffer++ = cDigitsLut[d1 + 1];
+ if (value >= 100000)
+ *buffer++ = cDigitsLut[d2];
+ *buffer++ = cDigitsLut[d2 + 1];
+
+ *buffer++ = cDigitsLut[d3];
+ *buffer++ = cDigitsLut[d3 + 1];
+ *buffer++ = cDigitsLut[d4];
+ *buffer++ = cDigitsLut[d4 + 1];
+ }
+ }
+ else if (value < kTen16) {
+ const uint32_t v0 = static_cast<uint32_t>(value / kTen8);
+ const uint32_t v1 = static_cast<uint32_t>(value % kTen8);
+
+ const uint32_t b0 = v0 / 10000;
+ const uint32_t c0 = v0 % 10000;
+
+ const uint32_t d1 = (b0 / 100) << 1;
+ const uint32_t d2 = (b0 % 100) << 1;
+
+ const uint32_t d3 = (c0 / 100) << 1;
+ const uint32_t d4 = (c0 % 100) << 1;
+
+ const uint32_t b1 = v1 / 10000;
+ const uint32_t c1 = v1 % 10000;
+
+ const uint32_t d5 = (b1 / 100) << 1;
+ const uint32_t d6 = (b1 % 100) << 1;
+
+ const uint32_t d7 = (c1 / 100) << 1;
+ const uint32_t d8 = (c1 % 100) << 1;
+
+ if (value >= kTen15)
+ *buffer++ = cDigitsLut[d1];
+ if (value >= kTen14)
+ *buffer++ = cDigitsLut[d1 + 1];
+ if (value >= kTen13)
+ *buffer++ = cDigitsLut[d2];
+ if (value >= kTen12)
+ *buffer++ = cDigitsLut[d2 + 1];
+ if (value >= kTen11)
+ *buffer++ = cDigitsLut[d3];
+ if (value >= kTen10)
+ *buffer++ = cDigitsLut[d3 + 1];
+ if (value >= kTen9)
+ *buffer++ = cDigitsLut[d4];
+ if (value >= kTen8)
+ *buffer++ = cDigitsLut[d4 + 1];
+
+ *buffer++ = cDigitsLut[d5];
+ *buffer++ = cDigitsLut[d5 + 1];
+ *buffer++ = cDigitsLut[d6];
+ *buffer++ = cDigitsLut[d6 + 1];
+ *buffer++ = cDigitsLut[d7];
+ *buffer++ = cDigitsLut[d7 + 1];
+ *buffer++ = cDigitsLut[d8];
+ *buffer++ = cDigitsLut[d8 + 1];
+ }
+ else {
+ const uint32_t a = static_cast<uint32_t>(value / kTen16); // 1 to 1844
+ value %= kTen16;
+
+ if (a < 10)
+ *buffer++ = static_cast<char>('0' + static_cast<char>(a));
+ else if (a < 100) {
+ const uint32_t i = a << 1;
+ *buffer++ = cDigitsLut[i];
+ *buffer++ = cDigitsLut[i + 1];
+ }
+ else if (a < 1000) {
+ *buffer++ = static_cast<char>('0' + static_cast<char>(a / 100));
+
+ const uint32_t i = (a % 100) << 1;
+ *buffer++ = cDigitsLut[i];
+ *buffer++ = cDigitsLut[i + 1];
+ }
+ else {
+ const uint32_t i = (a / 100) << 1;
+ const uint32_t j = (a % 100) << 1;
+ *buffer++ = cDigitsLut[i];
+ *buffer++ = cDigitsLut[i + 1];
+ *buffer++ = cDigitsLut[j];
+ *buffer++ = cDigitsLut[j + 1];
+ }
+
+ const uint32_t v0 = static_cast<uint32_t>(value / kTen8);
+ const uint32_t v1 = static_cast<uint32_t>(value % kTen8);
+
+ const uint32_t b0 = v0 / 10000;
+ const uint32_t c0 = v0 % 10000;
+
+ const uint32_t d1 = (b0 / 100) << 1;
+ const uint32_t d2 = (b0 % 100) << 1;
+
+ const uint32_t d3 = (c0 / 100) << 1;
+ const uint32_t d4 = (c0 % 100) << 1;
+
+ const uint32_t b1 = v1 / 10000;
+ const uint32_t c1 = v1 % 10000;
+
+ const uint32_t d5 = (b1 / 100) << 1;
+ const uint32_t d6 = (b1 % 100) << 1;
+
+ const uint32_t d7 = (c1 / 100) << 1;
+ const uint32_t d8 = (c1 % 100) << 1;
+
+ *buffer++ = cDigitsLut[d1];
+ *buffer++ = cDigitsLut[d1 + 1];
+ *buffer++ = cDigitsLut[d2];
+ *buffer++ = cDigitsLut[d2 + 1];
+ *buffer++ = cDigitsLut[d3];
+ *buffer++ = cDigitsLut[d3 + 1];
+ *buffer++ = cDigitsLut[d4];
+ *buffer++ = cDigitsLut[d4 + 1];
+ *buffer++ = cDigitsLut[d5];
+ *buffer++ = cDigitsLut[d5 + 1];
+ *buffer++ = cDigitsLut[d6];
+ *buffer++ = cDigitsLut[d6 + 1];
+ *buffer++ = cDigitsLut[d7];
+ *buffer++ = cDigitsLut[d7 + 1];
+ *buffer++ = cDigitsLut[d8];
+ *buffer++ = cDigitsLut[d8 + 1];
+ }
+
+ return buffer;
+}
+
+inline char* i64toa(int64_t value, char* buffer) {
+ uint64_t u = static_cast<uint64_t>(value);
+ if (value < 0) {
+ *buffer++ = '-';
+ u = ~u + 1;
+ }
+
+ return u64toa(u, buffer);
+}
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_ITOA_
diff --git a/include/rapidjson/internal/meta.h b/include/rapidjson/internal/meta.h
new file mode 100644
index 00000000..5a9aaa42
--- /dev/null
+++ b/include/rapidjson/internal/meta.h
@@ -0,0 +1,181 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_INTERNAL_META_H_
+#define RAPIDJSON_INTERNAL_META_H_
+
+#include "../rapidjson.h"
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+#if defined(_MSC_VER)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(6334)
+#endif
+
+#if RAPIDJSON_HAS_CXX11_TYPETRAITS
+#include <type_traits>
+#endif
+
+//@cond RAPIDJSON_INTERNAL
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+// Helper to wrap/convert arbitrary types to void, useful for arbitrary type matching
+template <typename T> struct Void { typedef void Type; };
+
+///////////////////////////////////////////////////////////////////////////////
+// BoolType, TrueType, FalseType
+//
+template <bool Cond> struct BoolType {
+ static const bool Value = Cond;
+ typedef BoolType Type;
+};
+typedef BoolType<true> TrueType;
+typedef BoolType<false> FalseType;
+
+
+///////////////////////////////////////////////////////////////////////////////
+// SelectIf, BoolExpr, NotExpr, AndExpr, OrExpr
+//
+
+template <bool C> struct SelectIfImpl { template <typename T1, typename T2> struct Apply { typedef T1 Type; }; };
+template <> struct SelectIfImpl<false> { template <typename T1, typename T2> struct Apply { typedef T2 Type; }; };
+template <bool C, typename T1, typename T2> struct SelectIfCond : SelectIfImpl<C>::template Apply<T1,T2> {};
+template <typename C, typename T1, typename T2> struct SelectIf : SelectIfCond<C::Value, T1, T2> {};
+
+template <bool Cond1, bool Cond2> struct AndExprCond : FalseType {};
+template <> struct AndExprCond<true, true> : TrueType {};
+template <bool Cond1, bool Cond2> struct OrExprCond : TrueType {};
+template <> struct OrExprCond<false, false> : FalseType {};
+
+template <typename C> struct BoolExpr : SelectIf<C,TrueType,FalseType>::Type {};
+template <typename C> struct NotExpr : SelectIf<C,FalseType,TrueType>::Type {};
+template <typename C1, typename C2> struct AndExpr : AndExprCond<C1::Value, C2::Value>::Type {};
+template <typename C1, typename C2> struct OrExpr : OrExprCond<C1::Value, C2::Value>::Type {};
+
+
+///////////////////////////////////////////////////////////////////////////////
+// AddConst, MaybeAddConst, RemoveConst
+template <typename T> struct AddConst { typedef const T Type; };
+template <bool Constify, typename T> struct MaybeAddConst : SelectIfCond<Constify, const T, T> {};
+template <typename T> struct RemoveConst { typedef T Type; };
+template <typename T> struct RemoveConst<const T> { typedef T Type; };
+
+
+///////////////////////////////////////////////////////////////////////////////
+// IsSame, IsConst, IsMoreConst, IsPointer
+//
+template <typename T, typename U> struct IsSame : FalseType {};
+template <typename T> struct IsSame<T, T> : TrueType {};
+
+template <typename T> struct IsConst : FalseType {};
+template <typename T> struct IsConst<const T> : TrueType {};
+
+template <typename CT, typename T>
+struct IsMoreConst
+ : AndExpr<IsSame<typename RemoveConst<CT>::Type, typename RemoveConst<T>::Type>,
+ BoolType<IsConst<CT>::Value >= IsConst<T>::Value> >::Type {};
+
+template <typename T> struct IsPointer : FalseType {};
+template <typename T> struct IsPointer<T*> : TrueType {};
+
+///////////////////////////////////////////////////////////////////////////////
+// IsBaseOf
+//
+#if RAPIDJSON_HAS_CXX11_TYPETRAITS
+
+template <typename B, typename D> struct IsBaseOf
+ : BoolType< ::std::is_base_of<B,D>::value> {};
+
+#else // simplified version adopted from Boost
+
+template<typename B, typename D> struct IsBaseOfImpl {
+ RAPIDJSON_STATIC_ASSERT(sizeof(B) != 0);
+ RAPIDJSON_STATIC_ASSERT(sizeof(D) != 0);
+
+ typedef char (&Yes)[1];
+ typedef char (&No) [2];
+
+ template <typename T>
+ static Yes Check(const D*, T);
+ static No Check(const B*, int);
+
+ struct Host {
+ operator const B*() const;
+ operator const D*();
+ };
+
+ enum { Value = (sizeof(Check(Host(), 0)) == sizeof(Yes)) };
+};
+
+template <typename B, typename D> struct IsBaseOf
+ : OrExpr<IsSame<B, D>, BoolExpr<IsBaseOfImpl<B, D> > >::Type {};
+
+#endif // RAPIDJSON_HAS_CXX11_TYPETRAITS
+
+
+//////////////////////////////////////////////////////////////////////////
+// EnableIf / DisableIf
+//
+template <bool Condition, typename T = void> struct EnableIfCond { typedef T Type; };
+template <typename T> struct EnableIfCond<false, T> { /* empty */ };
+
+template <bool Condition, typename T = void> struct DisableIfCond { typedef T Type; };
+template <typename T> struct DisableIfCond<true, T> { /* empty */ };
+
+template <typename Condition, typename T = void>
+struct EnableIf : EnableIfCond<Condition::Value, T> {};
+
+template <typename Condition, typename T = void>
+struct DisableIf : DisableIfCond<Condition::Value, T> {};
+
+// SFINAE helpers
+struct SfinaeTag {};
+template <typename T> struct RemoveSfinaeTag;
+template <typename T> struct RemoveSfinaeTag<SfinaeTag&(*)(T)> { typedef T Type; };
+
+#define RAPIDJSON_REMOVEFPTR_(type) \
+ typename ::RAPIDJSON_NAMESPACE::internal::RemoveSfinaeTag \
+ < ::RAPIDJSON_NAMESPACE::internal::SfinaeTag&(*) type>::Type
+
+#define RAPIDJSON_ENABLEIF(cond) \
+ typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \
+ <RAPIDJSON_REMOVEFPTR_(cond)>::Type * = NULL
+
+#define RAPIDJSON_DISABLEIF(cond) \
+ typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \
+ <RAPIDJSON_REMOVEFPTR_(cond)>::Type * = NULL
+
+#define RAPIDJSON_ENABLEIF_RETURN(cond,returntype) \
+ typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \
+ <RAPIDJSON_REMOVEFPTR_(cond), \
+ RAPIDJSON_REMOVEFPTR_(returntype)>::Type
+
+#define RAPIDJSON_DISABLEIF_RETURN(cond,returntype) \
+ typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \
+ <RAPIDJSON_REMOVEFPTR_(cond), \
+ RAPIDJSON_REMOVEFPTR_(returntype)>::Type
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+//@endcond
+
+#if defined(__GNUC__) || defined(_MSC_VER)
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_INTERNAL_META_H_
diff --git a/include/rapidjson/internal/pow10.h b/include/rapidjson/internal/pow10.h
new file mode 100644
index 00000000..02f475d7
--- /dev/null
+++ b/include/rapidjson/internal/pow10.h
@@ -0,0 +1,55 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_POW10_
+#define RAPIDJSON_POW10_
+
+#include "../rapidjson.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+//! Computes integer powers of 10 in double (10.0^n).
+/*! This function uses lookup table for fast and accurate results.
+ \param n non-negative exponent. Must <= 308.
+ \return 10.0^n
+*/
+inline double Pow10(int n) {
+ static const double e[] = { // 1e-0...1e308: 309 * 8 bytes = 2472 bytes
+ 1e+0,
+ 1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20,
+ 1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40,
+ 1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60,
+ 1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80,
+ 1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100,
+ 1e+101,1e+102,1e+103,1e+104,1e+105,1e+106,1e+107,1e+108,1e+109,1e+110,1e+111,1e+112,1e+113,1e+114,1e+115,1e+116,1e+117,1e+118,1e+119,1e+120,
+ 1e+121,1e+122,1e+123,1e+124,1e+125,1e+126,1e+127,1e+128,1e+129,1e+130,1e+131,1e+132,1e+133,1e+134,1e+135,1e+136,1e+137,1e+138,1e+139,1e+140,
+ 1e+141,1e+142,1e+143,1e+144,1e+145,1e+146,1e+147,1e+148,1e+149,1e+150,1e+151,1e+152,1e+153,1e+154,1e+155,1e+156,1e+157,1e+158,1e+159,1e+160,
+ 1e+161,1e+162,1e+163,1e+164,1e+165,1e+166,1e+167,1e+168,1e+169,1e+170,1e+171,1e+172,1e+173,1e+174,1e+175,1e+176,1e+177,1e+178,1e+179,1e+180,
+ 1e+181,1e+182,1e+183,1e+184,1e+185,1e+186,1e+187,1e+188,1e+189,1e+190,1e+191,1e+192,1e+193,1e+194,1e+195,1e+196,1e+197,1e+198,1e+199,1e+200,
+ 1e+201,1e+202,1e+203,1e+204,1e+205,1e+206,1e+207,1e+208,1e+209,1e+210,1e+211,1e+212,1e+213,1e+214,1e+215,1e+216,1e+217,1e+218,1e+219,1e+220,
+ 1e+221,1e+222,1e+223,1e+224,1e+225,1e+226,1e+227,1e+228,1e+229,1e+230,1e+231,1e+232,1e+233,1e+234,1e+235,1e+236,1e+237,1e+238,1e+239,1e+240,
+ 1e+241,1e+242,1e+243,1e+244,1e+245,1e+246,1e+247,1e+248,1e+249,1e+250,1e+251,1e+252,1e+253,1e+254,1e+255,1e+256,1e+257,1e+258,1e+259,1e+260,
+ 1e+261,1e+262,1e+263,1e+264,1e+265,1e+266,1e+267,1e+268,1e+269,1e+270,1e+271,1e+272,1e+273,1e+274,1e+275,1e+276,1e+277,1e+278,1e+279,1e+280,
+ 1e+281,1e+282,1e+283,1e+284,1e+285,1e+286,1e+287,1e+288,1e+289,1e+290,1e+291,1e+292,1e+293,1e+294,1e+295,1e+296,1e+297,1e+298,1e+299,1e+300,
+ 1e+301,1e+302,1e+303,1e+304,1e+305,1e+306,1e+307,1e+308
+ };
+ RAPIDJSON_ASSERT(n >= 0 && n <= 308);
+ return e[n];
+}
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_POW10_
diff --git a/include/rapidjson/internal/regex.h b/include/rapidjson/internal/regex.h
new file mode 100644
index 00000000..422a5240
--- /dev/null
+++ b/include/rapidjson/internal/regex.h
@@ -0,0 +1,701 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_INTERNAL_REGEX_H_
+#define RAPIDJSON_INTERNAL_REGEX_H_
+
+#include "../allocators.h"
+#include "../stream.h"
+#include "stack.h"
+
+#ifdef __clang__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(padded)
+RAPIDJSON_DIAG_OFF(switch-enum)
+RAPIDJSON_DIAG_OFF(implicit-fallthrough)
+#endif
+
+#ifdef __GNUC__
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(effc++)
+#endif
+
+#ifdef _MSC_VER
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated
+#endif
+
+#ifndef RAPIDJSON_REGEX_VERBOSE
+#define RAPIDJSON_REGEX_VERBOSE 0
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+///////////////////////////////////////////////////////////////////////////////
+// GenericRegex
+
+static const SizeType kRegexInvalidState = ~SizeType(0); //!< Represents an invalid index in GenericRegex::State::out, out1
+static const SizeType kRegexInvalidRange = ~SizeType(0);
+
+//! Regular expression engine with subset of ECMAscript grammar.
+/*!
+ Supported regular expression syntax:
+ - \c ab Concatenation
+ - \c a|b Alternation
+ - \c a? Zero or one
+ - \c a* Zero or more
+ - \c a+ One or more
+ - \c a{3} Exactly 3 times
+ - \c a{3,} At least 3 times
+ - \c a{3,5} 3 to 5 times
+ - \c (ab) Grouping
+ - \c ^a At the beginning
+ - \c a$ At the end
+ - \c . Any character
+ - \c [abc] Character classes
+ - \c [a-c] Character class range
+ - \c [a-z0-9_] Character class combination
+ - \c [^abc] Negated character classes
+ - \c [^a-c] Negated character class range
+ - \c [\b] Backspace (U+0008)
+ - \c \\| \\\\ ... Escape characters
+ - \c \\f Form feed (U+000C)
+ - \c \\n Line feed (U+000A)
+ - \c \\r Carriage return (U+000D)
+ - \c \\t Tab (U+0009)
+ - \c \\v Vertical tab (U+000B)
+
+ \note This is a Thompson NFA engine, implemented with reference to
+ Cox, Russ. "Regular Expression Matching Can Be Simple And Fast (but is slow in Java, Perl, PHP, Python, Ruby,...).",
+ https://swtch.com/~rsc/regexp/regexp1.html
+*/
+template <typename Encoding, typename Allocator = CrtAllocator>
+class GenericRegex {
+public:
+ typedef typename Encoding::Ch Ch;
+
+ GenericRegex(const Ch* source, Allocator* allocator = 0) :
+ states_(allocator, 256), ranges_(allocator, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(),
+ stateSet_(), state0_(allocator, 0), state1_(allocator, 0), anchorBegin_(), anchorEnd_()
+ {
+ GenericStringStream<Encoding> ss(source);
+ DecodedStream<GenericStringStream<Encoding> > ds(ss);
+ Parse(ds);
+ }
+
+ ~GenericRegex() {
+ Allocator::Free(stateSet_);
+ }
+
+ bool IsValid() const {
+ return root_ != kRegexInvalidState;
+ }
+
+ template <typename InputStream>
+ bool Match(InputStream& is) const {
+ return SearchWithAnchoring(is, true, true);
+ }
+
+ bool Match(const Ch* s) const {
+ GenericStringStream<Encoding> is(s);
+ return Match(is);
+ }
+
+ template <typename InputStream>
+ bool Search(InputStream& is) const {
+ return SearchWithAnchoring(is, anchorBegin_, anchorEnd_);
+ }
+
+ bool Search(const Ch* s) const {
+ GenericStringStream<Encoding> is(s);
+ return Search(is);
+ }
+
+private:
+ enum Operator {
+ kZeroOrOne,
+ kZeroOrMore,
+ kOneOrMore,
+ kConcatenation,
+ kAlternation,
+ kLeftParenthesis
+ };
+
+ static const unsigned kAnyCharacterClass = 0xFFFFFFFF; //!< For '.'
+ static const unsigned kRangeCharacterClass = 0xFFFFFFFE;
+ static const unsigned kRangeNegationFlag = 0x80000000;
+
+ struct Range {
+ unsigned start; //
+ unsigned end;
+ SizeType next;
+ };
+
+ struct State {
+ SizeType out; //!< Equals to kInvalid for matching state
+ SizeType out1; //!< Equals to non-kInvalid for split
+ SizeType rangeStart;
+ unsigned codepoint;
+ };
+
+ struct Frag {
+ Frag(SizeType s, SizeType o, SizeType m) : start(s), out(o), minIndex(m) {}
+ SizeType start;
+ SizeType out; //!< link-list of all output states
+ SizeType minIndex;
+ };
+
+ template <typename SourceStream>
+ class DecodedStream {
+ public:
+ DecodedStream(SourceStream& ss) : ss_(ss), codepoint_() { Decode(); }
+ unsigned Peek() { return codepoint_; }
+ unsigned Take() {
+ unsigned c = codepoint_;
+ if (c) // No further decoding when '\0'
+ Decode();
+ return c;
+ }
+
+ private:
+ void Decode() {
+ if (!Encoding::Decode(ss_, &codepoint_))
+ codepoint_ = 0;
+ }
+
+ SourceStream& ss_;
+ unsigned codepoint_;
+ };
+
+ State& GetState(SizeType index) {
+ RAPIDJSON_ASSERT(index < stateCount_);
+ return states_.template Bottom<State>()[index];
+ }
+
+ const State& GetState(SizeType index) const {
+ RAPIDJSON_ASSERT(index < stateCount_);
+ return states_.template Bottom<State>()[index];
+ }
+
+ Range& GetRange(SizeType index) {
+ RAPIDJSON_ASSERT(index < rangeCount_);
+ return ranges_.template Bottom<Range>()[index];
+ }
+
+ const Range& GetRange(SizeType index) const {
+ RAPIDJSON_ASSERT(index < rangeCount_);
+ return ranges_.template Bottom<Range>()[index];
+ }
+
+ template <typename InputStream>
+ void Parse(DecodedStream<InputStream>& ds) {
+ Allocator allocator;
+ Stack<Allocator> operandStack(&allocator, 256); // Frag
+ Stack<Allocator> operatorStack(&allocator, 256); // Operator
+ Stack<Allocator> atomCountStack(&allocator, 256); // unsigned (Atom per parenthesis)
+
+ *atomCountStack.template Push<unsigned>() = 0;
+
+ unsigned codepoint;
+ while (ds.Peek() != 0) {
+ switch (codepoint = ds.Take()) {
+ case '^':
+ anchorBegin_ = true;
+ break;
+
+ case '$':
+ anchorEnd_ = true;
+ break;
+
+ case '|':
+ while (!operatorStack.Empty() && *operatorStack.template Top<Operator>() < kAlternation)
+ if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
+ return;
+ *operatorStack.template Push<Operator>() = kAlternation;
+ *atomCountStack.template Top<unsigned>() = 0;
+ break;
+
+ case '(':
+ *operatorStack.template Push<Operator>() = kLeftParenthesis;
+ *atomCountStack.template Push<unsigned>() = 0;
+ break;
+
+ case ')':
+ while (!operatorStack.Empty() && *operatorStack.template Top<Operator>() != kLeftParenthesis)
+ if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
+ return;
+ if (operatorStack.Empty())
+ return;
+ operatorStack.template Pop<Operator>(1);
+ atomCountStack.template Pop<unsigned>(1);
+ ImplicitConcatenation(atomCountStack, operatorStack);
+ break;
+
+ case '?':
+ if (!Eval(operandStack, kZeroOrOne))
+ return;
+ break;
+
+ case '*':
+ if (!Eval(operandStack, kZeroOrMore))
+ return;
+ break;
+
+ case '+':
+ if (!Eval(operandStack, kOneOrMore))
+ return;
+ break;
+
+ case '{':
+ {
+ unsigned n, m;
+ if (!ParseUnsigned(ds, &n))
+ return;
+
+ if (ds.Peek() == ',') {
+ ds.Take();
+ if (ds.Peek() == '}')
+ m = kInfinityQuantifier;
+ else if (!ParseUnsigned(ds, &m) || m < n)
+ return;
+ }
+ else
+ m = n;
+
+ if (!EvalQuantifier(operandStack, n, m) || ds.Peek() != '}')
+ return;
+ ds.Take();
+ }
+ break;
+
+ case '.':
+ PushOperand(operandStack, kAnyCharacterClass);
+ ImplicitConcatenation(atomCountStack, operatorStack);
+ break;
+
+ case '[':
+ {
+ SizeType range;
+ if (!ParseRange(ds, &range))
+ return;
+ SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, kRangeCharacterClass);
+ GetState(s).rangeStart = range;
+ *operandStack.template Push<Frag>() = Frag(s, s, s);
+ }
+ ImplicitConcatenation(atomCountStack, operatorStack);
+ break;
+
+ case '\\': // Escape character
+ if (!CharacterEscape(ds, &codepoint))
+ return; // Unsupported escape character
+ // fall through to default
+
+ default: // Pattern character
+ PushOperand(operandStack, codepoint);
+ ImplicitConcatenation(atomCountStack, operatorStack);
+ }
+ }
+
+ while (!operatorStack.Empty())
+ if (!Eval(operandStack, *operatorStack.template Pop<Operator>(1)))
+ return;
+
+ // Link the operand to matching state.
+ if (operandStack.GetSize() == sizeof(Frag)) {
+ Frag* e = operandStack.template Pop<Frag>(1);
+ Patch(e->out, NewState(kRegexInvalidState, kRegexInvalidState, 0));
+ root_ = e->start;
+
+#if RAPIDJSON_REGEX_VERBOSE
+ printf("root: %d\n", root_);
+ for (SizeType i = 0; i < stateCount_ ; i++) {
+ State& s = GetState(i);
+ printf("[%2d] out: %2d out1: %2d c: '%c'\n", i, s.out, s.out1, (char)s.codepoint);
+ }
+ printf("\n");
+#endif
+ }
+
+ // Preallocate buffer for SearchWithAnchoring()
+ RAPIDJSON_ASSERT(stateSet_ == 0);
+ if (stateCount_ > 0) {
+ stateSet_ = static_cast<unsigned*>(states_.GetAllocator().Malloc(GetStateSetSize()));
+ state0_.template Reserve<SizeType>(stateCount_);
+ state1_.template Reserve<SizeType>(stateCount_);
+ }
+ }
+
+ SizeType NewState(SizeType out, SizeType out1, unsigned codepoint) {
+ State* s = states_.template Push<State>();
+ s->out = out;
+ s->out1 = out1;
+ s->codepoint = codepoint;
+ s->rangeStart = kRegexInvalidRange;
+ return stateCount_++;
+ }
+
+ void PushOperand(Stack<Allocator>& operandStack, unsigned codepoint) {
+ SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, codepoint);
+ *operandStack.template Push<Frag>() = Frag(s, s, s);
+ }
+
+ void ImplicitConcatenation(Stack<Allocator>& atomCountStack, Stack<Allocator>& operatorStack) {
+ if (*atomCountStack.template Top<unsigned>())
+ *operatorStack.template Push<Operator>() = kConcatenation;
+ (*atomCountStack.template Top<unsigned>())++;
+ }
+
+ SizeType Append(SizeType l1, SizeType l2) {
+ SizeType old = l1;
+ while (GetState(l1).out != kRegexInvalidState)
+ l1 = GetState(l1).out;
+ GetState(l1).out = l2;
+ return old;
+ }
+
+ void Patch(SizeType l, SizeType s) {
+ for (SizeType next; l != kRegexInvalidState; l = next) {
+ next = GetState(l).out;
+ GetState(l).out = s;
+ }
+ }
+
+ bool Eval(Stack<Allocator>& operandStack, Operator op) {
+ switch (op) {
+ case kConcatenation:
+ RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag) * 2);
+ {
+ Frag e2 = *operandStack.template Pop<Frag>(1);
+ Frag e1 = *operandStack.template Pop<Frag>(1);
+ Patch(e1.out, e2.start);
+ *operandStack.template Push<Frag>() = Frag(e1.start, e2.out, Min(e1.minIndex, e2.minIndex));
+ }
+ return true;
+
+ case kAlternation:
+ if (operandStack.GetSize() >= sizeof(Frag) * 2) {
+ Frag e2 = *operandStack.template Pop<Frag>(1);
+ Frag e1 = *operandStack.template Pop<Frag>(1);
+ SizeType s = NewState(e1.start, e2.start, 0);
+ *operandStack.template Push<Frag>() = Frag(s, Append(e1.out, e2.out), Min(e1.minIndex, e2.minIndex));
+ return true;
+ }
+ return false;
+
+ case kZeroOrOne:
+ if (operandStack.GetSize() >= sizeof(Frag)) {
+ Frag e = *operandStack.template Pop<Frag>(1);
+ SizeType s = NewState(kRegexInvalidState, e.start, 0);
+ *operandStack.template Push<Frag>() = Frag(s, Append(e.out, s), e.minIndex);
+ return true;
+ }
+ return false;
+
+ case kZeroOrMore:
+ if (operandStack.GetSize() >= sizeof(Frag)) {
+ Frag e = *operandStack.template Pop<Frag>(1);
+ SizeType s = NewState(kRegexInvalidState, e.start, 0);
+ Patch(e.out, s);
+ *operandStack.template Push<Frag>() = Frag(s, s, e.minIndex);
+ return true;
+ }
+ return false;
+
+ default:
+ RAPIDJSON_ASSERT(op == kOneOrMore);
+ if (operandStack.GetSize() >= sizeof(Frag)) {
+ Frag e = *operandStack.template Pop<Frag>(1);
+ SizeType s = NewState(kRegexInvalidState, e.start, 0);
+ Patch(e.out, s);
+ *operandStack.template Push<Frag>() = Frag(e.start, s, e.minIndex);
+ return true;
+ }
+ return false;
+ }
+ }
+
+ bool EvalQuantifier(Stack<Allocator>& operandStack, unsigned n, unsigned m) {
+ RAPIDJSON_ASSERT(n <= m);
+ RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag));
+
+ if (n == 0) {
+ if (m == 0) // a{0} not support
+ return false;
+ else if (m == kInfinityQuantifier)
+ Eval(operandStack, kZeroOrMore); // a{0,} -> a*
+ else {
+ Eval(operandStack, kZeroOrOne); // a{0,5} -> a?
+ for (unsigned i = 0; i < m - 1; i++)
+ CloneTopOperand(operandStack); // a{0,5} -> a? a? a? a? a?
+ for (unsigned i = 0; i < m - 1; i++)
+ Eval(operandStack, kConcatenation); // a{0,5} -> a?a?a?a?a?
+ }
+ return true;
+ }
+
+ for (unsigned i = 0; i < n - 1; i++) // a{3} -> a a a
+ CloneTopOperand(operandStack);
+
+ if (m == kInfinityQuantifier)
+ Eval(operandStack, kOneOrMore); // a{3,} -> a a a+
+ else if (m > n) {
+ CloneTopOperand(operandStack); // a{3,5} -> a a a a
+ Eval(operandStack, kZeroOrOne); // a{3,5} -> a a a a?
+ for (unsigned i = n; i < m - 1; i++)
+ CloneTopOperand(operandStack); // a{3,5} -> a a a a? a?
+ for (unsigned i = n; i < m; i++)
+ Eval(operandStack, kConcatenation); // a{3,5} -> a a aa?a?
+ }
+
+ for (unsigned i = 0; i < n - 1; i++)
+ Eval(operandStack, kConcatenation); // a{3} -> aaa, a{3,} -> aaa+, a{3.5} -> aaaa?a?
+
+ return true;
+ }
+
+ static SizeType Min(SizeType a, SizeType b) { return a < b ? a : b; }
+
+ void CloneTopOperand(Stack<Allocator>& operandStack) {
+ const Frag src = *operandStack.template Top<Frag>(); // Copy constructor to prevent invalidation
+ SizeType count = stateCount_ - src.minIndex; // Assumes top operand contains states in [src->minIndex, stateCount_)
+ State* s = states_.template Push<State>(count);
+ memcpy(s, &GetState(src.minIndex), count * sizeof(State));
+ for (SizeType j = 0; j < count; j++) {
+ if (s[j].out != kRegexInvalidState)
+ s[j].out += count;
+ if (s[j].out1 != kRegexInvalidState)
+ s[j].out1 += count;
+ }
+ *operandStack.template Push<Frag>() = Frag(src.start + count, src.out + count, src.minIndex + count);
+ stateCount_ += count;
+ }
+
+ template <typename InputStream>
+ bool ParseUnsigned(DecodedStream<InputStream>& ds, unsigned* u) {
+ unsigned r = 0;
+ if (ds.Peek() < '0' || ds.Peek() > '9')
+ return false;
+ while (ds.Peek() >= '0' && ds.Peek() <= '9') {
+ if (r >= 429496729 && ds.Peek() > '5') // 2^32 - 1 = 4294967295
+ return false; // overflow
+ r = r * 10 + (ds.Take() - '0');
+ }
+ *u = r;
+ return true;
+ }
+
+ template <typename InputStream>
+ bool ParseRange(DecodedStream<InputStream>& ds, SizeType* range) {
+ bool isBegin = true;
+ bool negate = false;
+ int step = 0;
+ SizeType start = kRegexInvalidRange;
+ SizeType current = kRegexInvalidRange;
+ unsigned codepoint;
+ while ((codepoint = ds.Take()) != 0) {
+ if (isBegin) {
+ isBegin = false;
+ if (codepoint == '^') {
+ negate = true;
+ continue;
+ }
+ }
+
+ switch (codepoint) {
+ case ']':
+ if (start == kRegexInvalidRange)
+ return false; // Error: nothing inside []
+ if (step == 2) { // Add trailing '-'
+ SizeType r = NewRange('-');
+ RAPIDJSON_ASSERT(current != kRegexInvalidRange);
+ GetRange(current).next = r;
+ }
+ if (negate)
+ GetRange(start).start |= kRangeNegationFlag;
+ *range = start;
+ return true;
+
+ case '\\':
+ if (ds.Peek() == 'b') {
+ ds.Take();
+ codepoint = 0x0008; // Escape backspace character
+ }
+ else if (!CharacterEscape(ds, &codepoint))
+ return false;
+ // fall through to default
+
+ default:
+ switch (step) {
+ case 1:
+ if (codepoint == '-') {
+ step++;
+ break;
+ }
+ // fall through to step 0 for other characters
+
+ case 0:
+ {
+ SizeType r = NewRange(codepoint);
+ if (current != kRegexInvalidRange)
+ GetRange(current).next = r;
+ if (start == kRegexInvalidRange)
+ start = r;
+ current = r;
+ }
+ step = 1;
+ break;
+
+ default:
+ RAPIDJSON_ASSERT(step == 2);
+ GetRange(current).end = codepoint;
+ step = 0;
+ }
+ }
+ }
+ return false;
+ }
+
+ SizeType NewRange(unsigned codepoint) {
+ Range* r = ranges_.template Push<Range>();
+ r->start = r->end = codepoint;
+ r->next = kRegexInvalidRange;
+ return rangeCount_++;
+ }
+
+ template <typename InputStream>
+ bool CharacterEscape(DecodedStream<InputStream>& ds, unsigned* escapedCodepoint) {
+ unsigned codepoint;
+ switch (codepoint = ds.Take()) {
+ case '^':
+ case '$':
+ case '|':
+ case '(':
+ case ')':
+ case '?':
+ case '*':
+ case '+':
+ case '.':
+ case '[':
+ case ']':
+ case '{':
+ case '}':
+ case '\\':
+ *escapedCodepoint = codepoint; return true;
+ case 'f': *escapedCodepoint = 0x000C; return true;
+ case 'n': *escapedCodepoint = 0x000A; return true;
+ case 'r': *escapedCodepoint = 0x000D; return true;
+ case 't': *escapedCodepoint = 0x0009; return true;
+ case 'v': *escapedCodepoint = 0x000B; return true;
+ default:
+ return false; // Unsupported escape character
+ }
+ }
+
+ template <typename InputStream>
+ bool SearchWithAnchoring(InputStream& is, bool anchorBegin, bool anchorEnd) const {
+ RAPIDJSON_ASSERT(IsValid());
+ DecodedStream<InputStream> ds(is);
+
+ state0_.Clear();
+ Stack<Allocator> *current = &state0_, *next = &state1_;
+ const size_t stateSetSize = GetStateSetSize();
+ std::memset(stateSet_, 0, stateSetSize);
+
+ bool matched = AddState(*current, root_);
+ unsigned codepoint;
+ while (!current->Empty() && (codepoint = ds.Take()) != 0) {
+ std::memset(stateSet_, 0, stateSetSize);
+ next->Clear();
+ matched = false;
+ for (const SizeType* s = current->template Bottom<SizeType>(); s != current->template End<SizeType>(); ++s) {
+ const State& sr = GetState(*s);
+ if (sr.codepoint == codepoint ||
+ sr.codepoint == kAnyCharacterClass ||
+ (sr.codepoint == kRangeCharacterClass && MatchRange(sr.rangeStart, codepoint)))
+ {
+ matched = AddState(*next, sr.out) || matched;
+ if (!anchorEnd && matched)
+ return true;
+ }
+ if (!anchorBegin)
+ AddState(*next, root_);
+ }
+ internal::Swap(current, next);
+ }
+
+ return matched;
+ }
+
+ size_t GetStateSetSize() const {
+ return (stateCount_ + 31) / 32 * 4;
+ }
+
+ // Return whether the added states is a match state
+ bool AddState(Stack<Allocator>& l, SizeType index) const {
+ RAPIDJSON_ASSERT(index != kRegexInvalidState);
+
+ const State& s = GetState(index);
+ if (s.out1 != kRegexInvalidState) { // Split
+ bool matched = AddState(l, s.out);
+ return AddState(l, s.out1) || matched;
+ }
+ else if (!(stateSet_[index >> 5] & (1 << (index & 31)))) {
+ stateSet_[index >> 5] |= (1 << (index & 31));
+ *l.template PushUnsafe<SizeType>() = index;
+ }
+ return s.out == kRegexInvalidState; // by using PushUnsafe() above, we can ensure s is not validated due to reallocation.
+ }
+
+ bool MatchRange(SizeType rangeIndex, unsigned codepoint) const {
+ bool yes = (GetRange(rangeIndex).start & kRangeNegationFlag) == 0;
+ while (rangeIndex != kRegexInvalidRange) {
+ const Range& r = GetRange(rangeIndex);
+ if (codepoint >= (r.start & ~kRangeNegationFlag) && codepoint <= r.end)
+ return yes;
+ rangeIndex = r.next;
+ }
+ return !yes;
+ }
+
+ Stack<Allocator> states_;
+ Stack<Allocator> ranges_;
+ SizeType root_;
+ SizeType stateCount_;
+ SizeType rangeCount_;
+
+ static const unsigned kInfinityQuantifier = ~0u;
+
+ // For SearchWithAnchoring()
+ uint32_t* stateSet_; // allocated by states_.GetAllocator()
+ mutable Stack<Allocator> state0_;
+ mutable Stack<Allocator> state1_;
+ bool anchorBegin_;
+ bool anchorEnd_;
+};
+
+typedef GenericRegex<UTF8<> > Regex;
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#ifdef __clang__
+RAPIDJSON_DIAG_POP
+#endif
+
+#ifdef _MSC_VER
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_INTERNAL_REGEX_H_
diff --git a/include/rapidjson/internal/stack.h b/include/rapidjson/internal/stack.h
new file mode 100644
index 00000000..022c9aab
--- /dev/null
+++ b/include/rapidjson/internal/stack.h
@@ -0,0 +1,230 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_INTERNAL_STACK_H_
+#define RAPIDJSON_INTERNAL_STACK_H_
+
+#include "../allocators.h"
+#include "swap.h"
+
+#if defined(__clang__)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(c++98-compat)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+///////////////////////////////////////////////////////////////////////////////
+// Stack
+
+//! A type-unsafe stack for storing different types of data.
+/*! \tparam Allocator Allocator for allocating stack memory.
+*/
+template <typename Allocator>
+class Stack {
+public:
+ // Optimization note: Do not allocate memory for stack_ in constructor.
+ // Do it lazily when first Push() -> Expand() -> Resize().
+ Stack(Allocator* allocator, size_t stackCapacity) : allocator_(allocator), ownAllocator_(0), stack_(0), stackTop_(0), stackEnd_(0), initialCapacity_(stackCapacity) {
+ }
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ Stack(Stack&& rhs)
+ : allocator_(rhs.allocator_),
+ ownAllocator_(rhs.ownAllocator_),
+ stack_(rhs.stack_),
+ stackTop_(rhs.stackTop_),
+ stackEnd_(rhs.stackEnd_),
+ initialCapacity_(rhs.initialCapacity_)
+ {
+ rhs.allocator_ = 0;
+ rhs.ownAllocator_ = 0;
+ rhs.stack_ = 0;
+ rhs.stackTop_ = 0;
+ rhs.stackEnd_ = 0;
+ rhs.initialCapacity_ = 0;
+ }
+#endif
+
+ ~Stack() {
+ Destroy();
+ }
+
+#if RAPIDJSON_HAS_CXX11_RVALUE_REFS
+ Stack& operator=(Stack&& rhs) {
+ if (&rhs != this)
+ {
+ Destroy();
+
+ allocator_ = rhs.allocator_;
+ ownAllocator_ = rhs.ownAllocator_;
+ stack_ = rhs.stack_;
+ stackTop_ = rhs.stackTop_;
+ stackEnd_ = rhs.stackEnd_;
+ initialCapacity_ = rhs.initialCapacity_;
+
+ rhs.allocator_ = 0;
+ rhs.ownAllocator_ = 0;
+ rhs.stack_ = 0;
+ rhs.stackTop_ = 0;
+ rhs.stackEnd_ = 0;
+ rhs.initialCapacity_ = 0;
+ }
+ return *this;
+ }
+#endif
+
+ void Swap(Stack& rhs) RAPIDJSON_NOEXCEPT {
+ internal::Swap(allocator_, rhs.allocator_);
+ internal::Swap(ownAllocator_, rhs.ownAllocator_);
+ internal::Swap(stack_, rhs.stack_);
+ internal::Swap(stackTop_, rhs.stackTop_);
+ internal::Swap(stackEnd_, rhs.stackEnd_);
+ internal::Swap(initialCapacity_, rhs.initialCapacity_);
+ }
+
+ void Clear() { stackTop_ = stack_; }
+
+ void ShrinkToFit() {
+ if (Empty()) {
+ // If the stack is empty, completely deallocate the memory.
+ Allocator::Free(stack_);
+ stack_ = 0;
+ stackTop_ = 0;
+ stackEnd_ = 0;
+ }
+ else
+ Resize(GetSize());
+ }
+
+ // Optimization note: try to minimize the size of this function for force inline.
+ // Expansion is run very infrequently, so it is moved to another (probably non-inline) function.
+ template<typename T>
+ RAPIDJSON_FORCEINLINE void Reserve(size_t count = 1) {
+ // Expand the stack if needed
+ if (RAPIDJSON_UNLIKELY(stackTop_ + sizeof(T) * count > stackEnd_))
+ Expand<T>(count);
+ }
+
+ template<typename T>
+ RAPIDJSON_FORCEINLINE T* Push(size_t count = 1) {
+ Reserve<T>(count);
+ return PushUnsafe<T>(count);
+ }
+
+ template<typename T>
+ RAPIDJSON_FORCEINLINE T* PushUnsafe(size_t count = 1) {
+ RAPIDJSON_ASSERT(stackTop_ + sizeof(T) * count <= stackEnd_);
+ T* ret = reinterpret_cast<T*>(stackTop_);
+ stackTop_ += sizeof(T) * count;
+ return ret;
+ }
+
+ template<typename T>
+ T* Pop(size_t count) {
+ RAPIDJSON_ASSERT(GetSize() >= count * sizeof(T));
+ stackTop_ -= count * sizeof(T);
+ return reinterpret_cast<T*>(stackTop_);
+ }
+
+ template<typename T>
+ T* Top() {
+ RAPIDJSON_ASSERT(GetSize() >= sizeof(T));
+ return reinterpret_cast<T*>(stackTop_ - sizeof(T));
+ }
+
+ template<typename T>
+ const T* Top() const {
+ RAPIDJSON_ASSERT(GetSize() >= sizeof(T));
+ return reinterpret_cast<T*>(stackTop_ - sizeof(T));
+ }
+
+ template<typename T>
+ T* End() { return reinterpret_cast<T*>(stackTop_); }
+
+ template<typename T>
+ const T* End() const { return reinterpret_cast<T*>(stackTop_); }
+
+ template<typename T>
+ T* Bottom() { return reinterpret_cast<T*>(stack_); }
+
+ template<typename T>
+ const T* Bottom() const { return reinterpret_cast<T*>(stack_); }
+
+ bool HasAllocator() const {
+ return allocator_ != 0;
+ }
+
+ Allocator& GetAllocator() {
+ RAPIDJSON_ASSERT(allocator_);
+ return *allocator_;
+ }
+
+ bool Empty() const { return stackTop_ == stack_; }
+ size_t GetSize() const { return static_cast<size_t>(stackTop_ - stack_); }
+ size_t GetCapacity() const { return static_cast<size_t>(stackEnd_ - stack_); }
+
+private:
+ template<typename T>
+ void Expand(size_t count) {
+ // Only expand the capacity if the current stack exists. Otherwise just create a stack with initial capacity.
+ size_t newCapacity;
+ if (stack_ == 0) {
+ if (!allocator_)
+ ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator());
+ newCapacity = initialCapacity_;
+ } else {
+ newCapacity = GetCapacity();
+ newCapacity += (newCapacity + 1) / 2;
+ }
+ size_t newSize = GetSize() + sizeof(T) * count;
+ if (newCapacity < newSize)
+ newCapacity = newSize;
+
+ Resize(newCapacity);
+ }
+
+ void Resize(size_t newCapacity) {
+ const size_t size = GetSize(); // Backup the current size
+ stack_ = static_cast<char*>(allocator_->Realloc(stack_, GetCapacity(), newCapacity));
+ stackTop_ = stack_ + size;
+ stackEnd_ = stack_ + newCapacity;
+ }
+
+ void Destroy() {
+ Allocator::Free(stack_);
+ RAPIDJSON_DELETE(ownAllocator_); // Only delete if it is owned by the stack
+ }
+
+ // Prohibit copy constructor & assignment operator.
+ Stack(const Stack&);
+ Stack& operator=(const Stack&);
+
+ Allocator* allocator_;
+ Allocator* ownAllocator_;
+ char *stack_;
+ char *stackTop_;
+ char *stackEnd_;
+ size_t initialCapacity_;
+};
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#if defined(__clang__)
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_STACK_H_
diff --git a/include/rapidjson/internal/strfunc.h b/include/rapidjson/internal/strfunc.h
new file mode 100644
index 00000000..2edfae52
--- /dev/null
+++ b/include/rapidjson/internal/strfunc.h
@@ -0,0 +1,55 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_INTERNAL_STRFUNC_H_
+#define RAPIDJSON_INTERNAL_STRFUNC_H_
+
+#include "../stream.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+//! Custom strlen() which works on different character types.
+/*! \tparam Ch Character type (e.g. char, wchar_t, short)
+ \param s Null-terminated input string.
+ \return Number of characters in the string.
+ \note This has the same semantics as strlen(), the return value is not number of Unicode codepoints.
+*/
+template <typename Ch>
+inline SizeType StrLen(const Ch* s) {
+ const Ch* p = s;
+ while (*p) ++p;
+ return SizeType(p - s);
+}
+
+//! Returns number of code points in a encoded string.
+template<typename Encoding>
+bool CountStringCodePoint(const typename Encoding::Ch* s, SizeType length, SizeType* outCount) {
+ GenericStringStream<Encoding> is(s);
+ const typename Encoding::Ch* end = s + length;
+ SizeType count = 0;
+ while (is.src_ < end) {
+ unsigned codepoint;
+ if (!Encoding::Decode(is, &codepoint))
+ return false;
+ count++;
+ }
+ *outCount = count;
+ return true;
+}
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_INTERNAL_STRFUNC_H_
diff --git a/include/rapidjson/internal/strtod.h b/include/rapidjson/internal/strtod.h
new file mode 100644
index 00000000..289c413b
--- /dev/null
+++ b/include/rapidjson/internal/strtod.h
@@ -0,0 +1,269 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_STRTOD_
+#define RAPIDJSON_STRTOD_
+
+#include "ieee754.h"
+#include "biginteger.h"
+#include "diyfp.h"
+#include "pow10.h"
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+inline double FastPath(double significand, int exp) {
+ if (exp < -308)
+ return 0.0;
+ else if (exp >= 0)
+ return significand * internal::Pow10(exp);
+ else
+ return significand / internal::Pow10(-exp);
+}
+
+inline double StrtodNormalPrecision(double d, int p) {
+ if (p < -308) {
+ // Prevent expSum < -308, making Pow10(p) = 0
+ d = FastPath(d, -308);
+ d = FastPath(d, p + 308);
+ }
+ else
+ d = FastPath(d, p);
+ return d;
+}
+
+template <typename T>
+inline T Min3(T a, T b, T c) {
+ T m = a;
+ if (m > b) m = b;
+ if (m > c) m = c;
+ return m;
+}
+
+inline int CheckWithinHalfULP(double b, const BigInteger& d, int dExp) {
+ const Double db(b);
+ const uint64_t bInt = db.IntegerSignificand();
+ const int bExp = db.IntegerExponent();
+ const int hExp = bExp - 1;
+
+ int dS_Exp2 = 0, dS_Exp5 = 0, bS_Exp2 = 0, bS_Exp5 = 0, hS_Exp2 = 0, hS_Exp5 = 0;
+
+ // Adjust for decimal exponent
+ if (dExp >= 0) {
+ dS_Exp2 += dExp;
+ dS_Exp5 += dExp;
+ }
+ else {
+ bS_Exp2 -= dExp;
+ bS_Exp5 -= dExp;
+ hS_Exp2 -= dExp;
+ hS_Exp5 -= dExp;
+ }
+
+ // Adjust for binary exponent
+ if (bExp >= 0)
+ bS_Exp2 += bExp;
+ else {
+ dS_Exp2 -= bExp;
+ hS_Exp2 -= bExp;
+ }
+
+ // Adjust for half ulp exponent
+ if (hExp >= 0)
+ hS_Exp2 += hExp;
+ else {
+ dS_Exp2 -= hExp;
+ bS_Exp2 -= hExp;
+ }
+
+ // Remove common power of two factor from all three scaled values
+ int common_Exp2 = Min3(dS_Exp2, bS_Exp2, hS_Exp2);
+ dS_Exp2 -= common_Exp2;
+ bS_Exp2 -= common_Exp2;
+ hS_Exp2 -= common_Exp2;
+
+ BigInteger dS = d;
+ dS.MultiplyPow5(static_cast<unsigned>(dS_Exp5)) <<= static_cast<unsigned>(dS_Exp2);
+
+ BigInteger bS(bInt);
+ bS.MultiplyPow5(static_cast<unsigned>(bS_Exp5)) <<= static_cast<unsigned>(bS_Exp2);
+
+ BigInteger hS(1);
+ hS.MultiplyPow5(static_cast<unsigned>(hS_Exp5)) <<= static_cast<unsigned>(hS_Exp2);
+
+ BigInteger delta(0);
+ dS.Difference(bS, &delta);
+
+ return delta.Compare(hS);
+}
+
+inline bool StrtodFast(double d, int p, double* result) {
+ // Use fast path for string-to-double conversion if possible
+ // see http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/
+ if (p > 22 && p < 22 + 16) {
+ // Fast Path Cases In Disguise
+ d *= internal::Pow10(p - 22);
+ p = 22;
+ }
+
+ if (p >= -22 && p <= 22 && d <= 9007199254740991.0) { // 2^53 - 1
+ *result = FastPath(d, p);
+ return true;
+ }
+ else
+ return false;
+}
+
+// Compute an approximation and see if it is within 1/2 ULP
+inline bool StrtodDiyFp(const char* decimals, size_t length, size_t decimalPosition, int exp, double* result) {
+ uint64_t significand = 0;
+ size_t i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999
+ for (; i < length; i++) {
+ if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) ||
+ (significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5'))
+ break;
+ significand = significand * 10u + static_cast<unsigned>(decimals[i] - '0');
+ }
+
+ if (i < length && decimals[i] >= '5') // Rounding
+ significand++;
+
+ size_t remaining = length - i;
+ const unsigned kUlpShift = 3;
+ const unsigned kUlp = 1 << kUlpShift;
+ int64_t error = (remaining == 0) ? 0 : kUlp / 2;
+
+ DiyFp v(significand, 0);
+ v = v.Normalize();
+ error <<= -v.e;
+
+ const int dExp = static_cast<int>(decimalPosition) - static_cast<int>(i) + exp;
+
+ int actualExp;
+ DiyFp cachedPower = GetCachedPower10(dExp, &actualExp);
+ if (actualExp != dExp) {
+ static const DiyFp kPow10[] = {
+ DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 00000000), -60), // 10^1
+ DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 00000000), -57), // 10^2
+ DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 00000000), -54), // 10^3
+ DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 00000000), -50), // 10^4
+ DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 00000000), -47), // 10^5
+ DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 00000000), -44), // 10^6
+ DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 00000000), -40) // 10^7
+ };
+ int adjustment = dExp - actualExp - 1;
+ RAPIDJSON_ASSERT(adjustment >= 0 && adjustment < 7);
+ v = v * kPow10[adjustment];
+ if (length + static_cast<unsigned>(adjustment)> 19u) // has more digits than decimal digits in 64-bit
+ error += kUlp / 2;
+ }
+
+ v = v * cachedPower;
+
+ error += kUlp + (error == 0 ? 0 : 1);
+
+ const int oldExp = v.e;
+ v = v.Normalize();
+ error <<= oldExp - v.e;
+
+ const unsigned effectiveSignificandSize = Double::EffectiveSignificandSize(64 + v.e);
+ unsigned precisionSize = 64 - effectiveSignificandSize;
+ if (precisionSize + kUlpShift >= 64) {
+ unsigned scaleExp = (precisionSize + kUlpShift) - 63;
+ v.f >>= scaleExp;
+ v.e += scaleExp;
+ error = (error >> scaleExp) + 1 + static_cast<int>(kUlp);
+ precisionSize -= scaleExp;
+ }
+
+ DiyFp rounded(v.f >> precisionSize, v.e + static_cast<int>(precisionSize));
+ const uint64_t precisionBits = (v.f & ((uint64_t(1) << precisionSize) - 1)) * kUlp;
+ const uint64_t halfWay = (uint64_t(1) << (precisionSize - 1)) * kUlp;
+ if (precisionBits >= halfWay + static_cast<unsigned>(error)) {
+ rounded.f++;
+ if (rounded.f & (DiyFp::kDpHiddenBit << 1)) { // rounding overflows mantissa (issue #340)
+ rounded.f >>= 1;
+ rounded.e++;
+ }
+ }
+
+ *result = rounded.ToDouble();
+
+ return halfWay - static_cast<unsigned>(error) >= precisionBits || precisionBits >= halfWay + static_cast<unsigned>(error);
+}
+
+inline double StrtodBigInteger(double approx, const char* decimals, size_t length, size_t decimalPosition, int exp) {
+ const BigInteger dInt(decimals, length);
+ const int dExp = static_cast<int>(decimalPosition) - static_cast<int>(length) + exp;
+ Double a(approx);
+ int cmp = CheckWithinHalfULP(a.Value(), dInt, dExp);
+ if (cmp < 0)
+ return a.Value(); // within half ULP
+ else if (cmp == 0) {
+ // Round towards even
+ if (a.Significand() & 1)
+ return a.NextPositiveDouble();
+ else
+ return a.Value();
+ }
+ else // adjustment
+ return a.NextPositiveDouble();
+}
+
+inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t length, size_t decimalPosition, int exp) {
+ RAPIDJSON_ASSERT(d >= 0.0);
+ RAPIDJSON_ASSERT(length >= 1);
+
+ double result;
+ if (StrtodFast(d, p, &result))
+ return result;
+
+ // Trim leading zeros
+ while (*decimals == '0' && length > 1) {
+ length--;
+ decimals++;
+ decimalPosition--;
+ }
+
+ // Trim trailing zeros
+ while (decimals[length - 1] == '0' && length > 1) {
+ length--;
+ decimalPosition--;
+ exp++;
+ }
+
+ // Trim right-most digits
+ const int kMaxDecimalDigit = 780;
+ if (static_cast<int>(length) > kMaxDecimalDigit) {
+ int delta = (static_cast<int>(length) - kMaxDecimalDigit);
+ exp += delta;
+ decimalPosition -= static_cast<unsigned>(delta);
+ length = kMaxDecimalDigit;
+ }
+
+ // If too small, underflow to zero
+ if (int(length) + exp < -324)
+ return 0.0;
+
+ if (StrtodDiyFp(decimals, length, decimalPosition, exp, &result))
+ return result;
+
+ // Use approximation from StrtodDiyFp and make adjustment with BigInteger comparison
+ return StrtodBigInteger(result, decimals, length, decimalPosition, exp);
+}
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#endif // RAPIDJSON_STRTOD_
diff --git a/include/rapidjson/internal/swap.h b/include/rapidjson/internal/swap.h
new file mode 100644
index 00000000..666e49f9
--- /dev/null
+++ b/include/rapidjson/internal/swap.h
@@ -0,0 +1,46 @@
+// Tencent is pleased to support the open source community by making RapidJSON available.
+//
+// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved.
+//
+// Licensed under the MIT License (the "License"); you may not use this file except
+// in compliance with the License. You may obtain a copy of the License at
+//
+// http://opensource.org/licenses/MIT
+//
+// Unless required by applicable law or agreed to in writing, software distributed
+// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+// CONDITIONS OF ANY KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations under the License.
+
+#ifndef RAPIDJSON_INTERNAL_SWAP_H_
+#define RAPIDJSON_INTERNAL_SWAP_H_
+
+#include "../rapidjson.h"
+
+#if defined(__clang__)
+RAPIDJSON_DIAG_PUSH
+RAPIDJSON_DIAG_OFF(c++98-compat)
+#endif
+
+RAPIDJSON_NAMESPACE_BEGIN
+namespace internal {
+
+//! Custom swap() to avoid dependency on C++ <algorithm> header
+/*! \tparam T Type of the arguments to swap, should be instantiated with primitive C++ types only.
+ \note This has the same semantics as std::swap().
+*/
+template <typename T>
+inline void Swap(T& a, T& b) RAPIDJSON_NOEXCEPT {
+ T tmp = a;
+ a = b;
+ b = tmp;
+}
+
+} // namespace internal
+RAPIDJSON_NAMESPACE_END
+
+#if defined(__clang__)
+RAPIDJSON_DIAG_POP
+#endif
+
+#endif // RAPIDJSON_INTERNAL_SWAP_H_