jsoncpp/src/lib_json/json_reader.cpp
Hans Johnson 3beadff472 PERF: readability container size empty
The emptiness of a container should be checked using the empty() method
instead of the size() method. It is not guaranteed that size() is a
constant-time function, and it is generally more efficient and also
shows clearer intent to use empty(). Furthermore some containers may
implement the empty() method but not implement the size() method. Using
empty() whenever possible makes it easier to switch to another container
in the future.

SRCDIR=/Users/johnsonhj/src/jsoncpp/ #My local SRC
BLDDIR=/Users/johnsonhj/src/jsoncpp/cmake-build-debug/ #My local BLD

cd /Users/johnsonhj/src/jsoncpp/cmake-build-debug/
run-clang-tidy.py -extra-arg=-D__clang__ -checks=-*,readability-container-size-empty  -header-filter=.* -fix
2019-01-15 18:30:49 -06:00

2027 lines
58 KiB
C++

// Copyright 2007-2011 Baptiste Lepilleur and The JsonCpp Authors
// Copyright (C) 2016 InfoTeCS JSC. All rights reserved.
// Distributed under MIT license, or public domain if desired and
// recognized in your jurisdiction.
// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
#if !defined(JSON_IS_AMALGAMATION)
#include "json_tool.h"
#include <json/assertions.h>
#include <json/reader.h>
#include <json/value.h>
#endif // if !defined(JSON_IS_AMALGAMATION)
#include <cassert>
#include <cstring>
#include <istream>
#include <limits>
#include <memory>
#include <set>
#include <sstream>
#include <utility>
#include <cstdio>
#if __cplusplus >= 201103L
#if !defined(sscanf)
#define sscanf std::sscanf
#endif
#endif //__cplusplus
#if defined(_MSC_VER)
#if !defined(_CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES)
#define _CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES 1
#endif //_CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES
#endif //_MSC_VER
#if defined(_MSC_VER)
// Disable warning about strdup being deprecated.
#pragma warning(disable : 4996)
#endif
// Define JSONCPP_DEPRECATED_STACK_LIMIT as an appropriate integer at compile
// time to change the stack limit
#if !defined(JSONCPP_DEPRECATED_STACK_LIMIT)
#define JSONCPP_DEPRECATED_STACK_LIMIT 1000
#endif
static size_t const stackLimit_g =
JSONCPP_DEPRECATED_STACK_LIMIT; // see readValue()
namespace Json {
#if __cplusplus >= 201103L || (defined(_CPPLIB_VER) && _CPPLIB_VER >= 520)
typedef std::unique_ptr<CharReader> CharReaderPtr;
#else
typedef std::auto_ptr<CharReader> CharReaderPtr;
#endif
// Implementation of class Features
// ////////////////////////////////
Features::Features()
: allowComments_(true), strictRoot_(false),
allowDroppedNullPlaceholders_(false), allowNumericKeys_(false) {}
Features Features::all() { return {}; }
Features Features::strictMode() {
Features features;
features.allowComments_ = false;
features.strictRoot_ = true;
features.allowDroppedNullPlaceholders_ = false;
features.allowNumericKeys_ = false;
return features;
}
// Implementation of class Reader
// ////////////////////////////////
bool Reader::containsNewLine(Reader::Location begin, Reader::Location end) {
for (; begin < end; ++begin)
if (*begin == '\n' || *begin == '\r')
return true;
return false;
}
// Class Reader
// //////////////////////////////////////////////////////////////////
Reader::Reader()
: errors_(), document_(), begin_(), end_(), current_(), lastValueEnd_(),
lastValue_(), commentsBefore_(), features_(Features::all()),
collectComments_() {}
Reader::Reader(const Features& features)
: errors_(), document_(), begin_(), end_(), current_(), lastValueEnd_(),
lastValue_(), commentsBefore_(), features_(features), collectComments_() {
}
bool Reader::parse(const std::string& document,
Value& root,
bool collectComments) {
document_.assign(document.begin(), document.end());
const char* begin = document_.c_str();
const char* end = begin + document_.length();
return parse(begin, end, root, collectComments);
}
bool Reader::parse(std::istream& is, Value& root, bool collectComments) {
// std::istream_iterator<char> begin(is);
// std::istream_iterator<char> end;
// Those would allow streamed input from a file, if parse() were a
// template function.
// Since JSONCPP_STRING is reference-counted, this at least does not
// create an extra copy.
JSONCPP_STRING doc;
std::getline(is, doc, (char)EOF);
return parse(doc.data(), doc.data() + doc.size(), root, collectComments);
}
bool Reader::parse(const char* beginDoc,
const char* endDoc,
Value& root,
bool collectComments) {
if (!features_.allowComments_) {
collectComments = false;
}
begin_ = beginDoc;
end_ = endDoc;
collectComments_ = collectComments;
current_ = begin_;
lastValueEnd_ = nullptr;
lastValue_ = nullptr;
commentsBefore_.clear();
errors_.clear();
while (!nodes_.empty())
nodes_.pop();
nodes_.push(&root);
bool successful = readValue();
Token token;
skipCommentTokens(token);
if (collectComments_ && !commentsBefore_.empty())
root.setComment(commentsBefore_, commentAfter);
if (features_.strictRoot_) {
if (!root.isArray() && !root.isObject()) {
// Set error location to start of doc, ideally should be first token found
// in doc
token.type_ = tokenError;
token.start_ = beginDoc;
token.end_ = endDoc;
addError(
"A valid JSON document must be either an array or an object value.",
token);
return false;
}
}
return successful;
}
bool Reader::readValue() {
// readValue() may call itself only if it calls readObject() or ReadArray().
// These methods execute nodes_.push() just before and nodes_.pop)() just
// after calling readValue(). parse() executes one nodes_.push(), so > instead
// of >=.
if (nodes_.size() > stackLimit_g)
throwRuntimeError("Exceeded stackLimit in readValue().");
Token token;
skipCommentTokens(token);
bool successful = true;
if (collectComments_ && !commentsBefore_.empty()) {
currentValue().setComment(commentsBefore_, commentBefore);
commentsBefore_.clear();
}
switch (token.type_) {
case tokenObjectBegin:
successful = readObject(token);
currentValue().setOffsetLimit(current_ - begin_);
break;
case tokenArrayBegin:
successful = readArray(token);
currentValue().setOffsetLimit(current_ - begin_);
break;
case tokenNumber:
successful = decodeNumber(token);
break;
case tokenString:
successful = decodeString(token);
break;
case tokenTrue: {
Value v(true);
currentValue().swapPayload(v);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
} break;
case tokenFalse: {
Value v(false);
currentValue().swapPayload(v);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
} break;
case tokenNull: {
Value v;
currentValue().swapPayload(v);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
} break;
case tokenArraySeparator:
case tokenObjectEnd:
case tokenArrayEnd:
if (features_.allowDroppedNullPlaceholders_) {
// "Un-read" the current token and mark the current value as a null
// token.
current_--;
Value v;
currentValue().swapPayload(v);
currentValue().setOffsetStart(current_ - begin_ - 1);
currentValue().setOffsetLimit(current_ - begin_);
break;
} // Else, fall through...
default:
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
return addError("Syntax error: value, object or array expected.", token);
}
if (collectComments_) {
lastValueEnd_ = current_;
lastValue_ = &currentValue();
}
return successful;
}
void Reader::skipCommentTokens(Token& token) {
if (features_.allowComments_) {
do {
readToken(token);
} while (token.type_ == tokenComment);
} else {
readToken(token);
}
}
bool Reader::readToken(Token& token) {
skipSpaces();
token.start_ = current_;
Char c = getNextChar();
bool ok = true;
switch (c) {
case '{':
token.type_ = tokenObjectBegin;
break;
case '}':
token.type_ = tokenObjectEnd;
break;
case '[':
token.type_ = tokenArrayBegin;
break;
case ']':
token.type_ = tokenArrayEnd;
break;
case '"':
token.type_ = tokenString;
ok = readString();
break;
case '/':
token.type_ = tokenComment;
ok = readComment();
break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
case '-':
token.type_ = tokenNumber;
readNumber();
break;
case 't':
token.type_ = tokenTrue;
ok = match("rue", 3);
break;
case 'f':
token.type_ = tokenFalse;
ok = match("alse", 4);
break;
case 'n':
token.type_ = tokenNull;
ok = match("ull", 3);
break;
case ',':
token.type_ = tokenArraySeparator;
break;
case ':':
token.type_ = tokenMemberSeparator;
break;
case 0:
token.type_ = tokenEndOfStream;
break;
default:
ok = false;
break;
}
if (!ok)
token.type_ = tokenError;
token.end_ = current_;
return true;
}
void Reader::skipSpaces() {
while (current_ != end_) {
Char c = *current_;
if (c == ' ' || c == '\t' || c == '\r' || c == '\n')
++current_;
else
break;
}
}
bool Reader::match(Location pattern, int patternLength) {
if (end_ - current_ < patternLength)
return false;
int index = patternLength;
while (index--)
if (current_[index] != pattern[index])
return false;
current_ += patternLength;
return true;
}
bool Reader::readComment() {
Location commentBegin = current_ - 1;
Char c = getNextChar();
bool successful = false;
if (c == '*')
successful = readCStyleComment();
else if (c == '/')
successful = readCppStyleComment();
if (!successful)
return false;
if (collectComments_) {
CommentPlacement placement = commentBefore;
if (lastValueEnd_ && !containsNewLine(lastValueEnd_, commentBegin)) {
if (c != '*' || !containsNewLine(commentBegin, current_))
placement = commentAfterOnSameLine;
}
addComment(commentBegin, current_, placement);
}
return true;
}
JSONCPP_STRING Reader::normalizeEOL(Reader::Location begin,
Reader::Location end) {
JSONCPP_STRING normalized;
normalized.reserve(static_cast<size_t>(end - begin));
Reader::Location current = begin;
while (current != end) {
char c = *current++;
if (c == '\r') {
if (current != end && *current == '\n')
// convert dos EOL
++current;
// convert Mac EOL
normalized += '\n';
} else {
normalized += c;
}
}
return normalized;
}
void Reader::addComment(Location begin,
Location end,
CommentPlacement placement) {
assert(collectComments_);
const JSONCPP_STRING& normalized = normalizeEOL(begin, end);
if (placement == commentAfterOnSameLine) {
assert(lastValue_ != nullptr);
lastValue_->setComment(normalized, placement);
} else {
commentsBefore_ += normalized;
}
}
bool Reader::readCStyleComment() {
while ((current_ + 1) < end_) {
Char c = getNextChar();
if (c == '*' && *current_ == '/')
break;
}
return getNextChar() == '/';
}
bool Reader::readCppStyleComment() {
while (current_ != end_) {
Char c = getNextChar();
if (c == '\n')
break;
if (c == '\r') {
// Consume DOS EOL. It will be normalized in addComment.
if (current_ != end_ && *current_ == '\n')
getNextChar();
// Break on Moc OS 9 EOL.
break;
}
}
return true;
}
void Reader::readNumber() {
const char* p = current_;
char c = '0'; // stopgap for already consumed character
// integral part
while (c >= '0' && c <= '9')
c = (current_ = p) < end_ ? *p++ : '\0';
// fractional part
if (c == '.') {
c = (current_ = p) < end_ ? *p++ : '\0';
while (c >= '0' && c <= '9')
c = (current_ = p) < end_ ? *p++ : '\0';
}
// exponential part
if (c == 'e' || c == 'E') {
c = (current_ = p) < end_ ? *p++ : '\0';
if (c == '+' || c == '-')
c = (current_ = p) < end_ ? *p++ : '\0';
while (c >= '0' && c <= '9')
c = (current_ = p) < end_ ? *p++ : '\0';
}
}
bool Reader::readString() {
Char c = '\0';
while (current_ != end_) {
c = getNextChar();
if (c == '\\')
getNextChar();
else if (c == '"')
break;
}
return c == '"';
}
bool Reader::readObject(Token& token) {
Token tokenName;
JSONCPP_STRING name;
Value init(objectValue);
currentValue().swapPayload(init);
currentValue().setOffsetStart(token.start_ - begin_);
while (readToken(tokenName)) {
bool initialTokenOk = true;
while (tokenName.type_ == tokenComment && initialTokenOk)
initialTokenOk = readToken(tokenName);
if (!initialTokenOk)
break;
if (tokenName.type_ == tokenObjectEnd && name.empty()) // empty object
return true;
name.clear();
if (tokenName.type_ == tokenString) {
if (!decodeString(tokenName, name))
return recoverFromError(tokenObjectEnd);
} else if (tokenName.type_ == tokenNumber && features_.allowNumericKeys_) {
Value numberName;
if (!decodeNumber(tokenName, numberName))
return recoverFromError(tokenObjectEnd);
name = JSONCPP_STRING(numberName.asCString());
} else {
break;
}
Token colon;
if (!readToken(colon) || colon.type_ != tokenMemberSeparator) {
return addErrorAndRecover("Missing ':' after object member name", colon,
tokenObjectEnd);
}
Value& value = currentValue()[name];
nodes_.push(&value);
bool ok = readValue();
nodes_.pop();
if (!ok) // error already set
return recoverFromError(tokenObjectEnd);
Token comma;
if (!readToken(comma) ||
(comma.type_ != tokenObjectEnd && comma.type_ != tokenArraySeparator &&
comma.type_ != tokenComment)) {
return addErrorAndRecover("Missing ',' or '}' in object declaration",
comma, tokenObjectEnd);
}
bool finalizeTokenOk = true;
while (comma.type_ == tokenComment && finalizeTokenOk)
finalizeTokenOk = readToken(comma);
if (comma.type_ == tokenObjectEnd)
return true;
}
return addErrorAndRecover("Missing '}' or object member name", tokenName,
tokenObjectEnd);
}
bool Reader::readArray(Token& token) {
Value init(arrayValue);
currentValue().swapPayload(init);
currentValue().setOffsetStart(token.start_ - begin_);
skipSpaces();
if (current_ != end_ && *current_ == ']') // empty array
{
Token endArray;
readToken(endArray);
return true;
}
int index = 0;
for (;;) {
Value& value = currentValue()[index++];
nodes_.push(&value);
bool ok = readValue();
nodes_.pop();
if (!ok) // error already set
return recoverFromError(tokenArrayEnd);
Token currentToken;
// Accept Comment after last item in the array.
ok = readToken(currentToken);
while (currentToken.type_ == tokenComment && ok) {
ok = readToken(currentToken);
}
bool badTokenType = (currentToken.type_ != tokenArraySeparator &&
currentToken.type_ != tokenArrayEnd);
if (!ok || badTokenType) {
return addErrorAndRecover("Missing ',' or ']' in array declaration",
currentToken, tokenArrayEnd);
}
if (currentToken.type_ == tokenArrayEnd)
break;
}
return true;
}
bool Reader::decodeNumber(Token& token) {
Value decoded;
if (!decodeNumber(token, decoded))
return false;
currentValue().swapPayload(decoded);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
return true;
}
bool Reader::decodeNumber(Token& token, Value& decoded) {
// Attempts to parse the number as an integer. If the number is
// larger than the maximum supported value of an integer then
// we decode the number as a double.
Location current = token.start_;
bool isNegative = *current == '-';
if (isNegative)
++current;
// TODO: Help the compiler do the div and mod at compile time or get rid of
// them.
Value::LargestUInt maxIntegerValue =
isNegative ? Value::LargestUInt(Value::maxLargestInt) + 1
: Value::maxLargestUInt;
Value::LargestUInt threshold = maxIntegerValue / 10;
Value::LargestUInt value = 0;
while (current < token.end_) {
Char c = *current++;
if (c < '0' || c > '9')
return decodeDouble(token, decoded);
Value::UInt digit(static_cast<Value::UInt>(c - '0'));
if (value >= threshold) {
// We've hit or exceeded the max value divided by 10 (rounded down). If
// a) we've only just touched the limit, b) this is the last digit, and
// c) it's small enough to fit in that rounding delta, we're okay.
// Otherwise treat this number as a double to avoid overflow.
if (value > threshold || current != token.end_ ||
digit > maxIntegerValue % 10) {
return decodeDouble(token, decoded);
}
}
value = value * 10 + digit;
}
if (isNegative && value == maxIntegerValue)
decoded = Value::minLargestInt;
else if (isNegative)
decoded = -Value::LargestInt(value);
else if (value <= Value::LargestUInt(Value::maxInt))
decoded = Value::LargestInt(value);
else
decoded = value;
return true;
}
bool Reader::decodeDouble(Token& token) {
Value decoded;
if (!decodeDouble(token, decoded))
return false;
currentValue().swapPayload(decoded);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
return true;
}
bool Reader::decodeDouble(Token& token, Value& decoded) {
double value = 0;
JSONCPP_STRING buffer(token.start_, token.end_);
JSONCPP_ISTRINGSTREAM is(buffer);
if (!(is >> value))
return addError("'" + JSONCPP_STRING(token.start_, token.end_) +
"' is not a number.",
token);
decoded = value;
return true;
}
bool Reader::decodeString(Token& token) {
JSONCPP_STRING decoded_string;
if (!decodeString(token, decoded_string))
return false;
Value decoded(decoded_string);
currentValue().swapPayload(decoded);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
return true;
}
bool Reader::decodeString(Token& token, JSONCPP_STRING& decoded) {
decoded.reserve(static_cast<size_t>(token.end_ - token.start_ - 2));
Location current = token.start_ + 1; // skip '"'
Location end = token.end_ - 1; // do not include '"'
while (current != end) {
Char c = *current++;
if (c == '"')
break;
else if (c == '\\') {
if (current == end)
return addError("Empty escape sequence in string", token, current);
Char escape = *current++;
switch (escape) {
case '"':
decoded += '"';
break;
case '/':
decoded += '/';
break;
case '\\':
decoded += '\\';
break;
case 'b':
decoded += '\b';
break;
case 'f':
decoded += '\f';
break;
case 'n':
decoded += '\n';
break;
case 'r':
decoded += '\r';
break;
case 't':
decoded += '\t';
break;
case 'u': {
unsigned int unicode;
if (!decodeUnicodeCodePoint(token, current, end, unicode))
return false;
decoded += codePointToUTF8(unicode);
} break;
default:
return addError("Bad escape sequence in string", token, current);
}
} else {
decoded += c;
}
}
return true;
}
bool Reader::decodeUnicodeCodePoint(Token& token,
Location& current,
Location end,
unsigned int& unicode) {
if (!decodeUnicodeEscapeSequence(token, current, end, unicode))
return false;
if (unicode >= 0xD800 && unicode <= 0xDBFF) {
// surrogate pairs
if (end - current < 6)
return addError(
"additional six characters expected to parse unicode surrogate pair.",
token, current);
if (*(current++) == '\\' && *(current++) == 'u') {
unsigned int surrogatePair;
if (decodeUnicodeEscapeSequence(token, current, end, surrogatePair)) {
unicode = 0x10000 + ((unicode & 0x3FF) << 10) + (surrogatePair & 0x3FF);
} else
return false;
} else
return addError("expecting another \\u token to begin the second half of "
"a unicode surrogate pair",
token, current);
}
return true;
}
bool Reader::decodeUnicodeEscapeSequence(Token& token,
Location& current,
Location end,
unsigned int& ret_unicode) {
if (end - current < 4)
return addError(
"Bad unicode escape sequence in string: four digits expected.", token,
current);
int unicode = 0;
for (int index = 0; index < 4; ++index) {
Char c = *current++;
unicode *= 16;
if (c >= '0' && c <= '9')
unicode += c - '0';
else if (c >= 'a' && c <= 'f')
unicode += c - 'a' + 10;
else if (c >= 'A' && c <= 'F')
unicode += c - 'A' + 10;
else
return addError(
"Bad unicode escape sequence in string: hexadecimal digit expected.",
token, current);
}
ret_unicode = static_cast<unsigned int>(unicode);
return true;
}
bool Reader::addError(const JSONCPP_STRING& message,
Token& token,
Location extra) {
ErrorInfo info;
info.token_ = token;
info.message_ = message;
info.extra_ = extra;
errors_.push_back(info);
return false;
}
bool Reader::recoverFromError(TokenType skipUntilToken) {
size_t const errorCount = errors_.size();
Token skip;
for (;;) {
if (!readToken(skip))
errors_.resize(errorCount); // discard errors caused by recovery
if (skip.type_ == skipUntilToken || skip.type_ == tokenEndOfStream)
break;
}
errors_.resize(errorCount);
return false;
}
bool Reader::addErrorAndRecover(const JSONCPP_STRING& message,
Token& token,
TokenType skipUntilToken) {
addError(message, token);
return recoverFromError(skipUntilToken);
}
Value& Reader::currentValue() { return *(nodes_.top()); }
Reader::Char Reader::getNextChar() {
if (current_ == end_)
return 0;
return *current_++;
}
void Reader::getLocationLineAndColumn(Location location,
int& line,
int& column) const {
Location current = begin_;
Location lastLineStart = current;
line = 0;
while (current < location && current != end_) {
Char c = *current++;
if (c == '\r') {
if (*current == '\n')
++current;
lastLineStart = current;
++line;
} else if (c == '\n') {
lastLineStart = current;
++line;
}
}
// column & line start at 1
column = int(location - lastLineStart) + 1;
++line;
}
JSONCPP_STRING Reader::getLocationLineAndColumn(Location location) const {
int line, column;
getLocationLineAndColumn(location, line, column);
char buffer[18 + 16 + 16 + 1];
jsoncpp_snprintf(buffer, sizeof(buffer), "Line %d, Column %d", line, column);
return buffer;
}
// Deprecated. Preserved for backward compatibility
JSONCPP_STRING Reader::getFormatedErrorMessages() const {
return getFormattedErrorMessages();
}
JSONCPP_STRING Reader::getFormattedErrorMessages() const {
JSONCPP_STRING formattedMessage;
for (Errors::const_iterator itError = errors_.begin();
itError != errors_.end(); ++itError) {
const ErrorInfo& error = *itError;
formattedMessage +=
"* " + getLocationLineAndColumn(error.token_.start_) + "\n";
formattedMessage += " " + error.message_ + "\n";
if (error.extra_)
formattedMessage +=
"See " + getLocationLineAndColumn(error.extra_) + " for detail.\n";
}
return formattedMessage;
}
std::vector<Reader::StructuredError> Reader::getStructuredErrors() const {
std::vector<Reader::StructuredError> allErrors;
for (Errors::const_iterator itError = errors_.begin();
itError != errors_.end(); ++itError) {
const ErrorInfo& error = *itError;
Reader::StructuredError structured;
structured.offset_start = error.token_.start_ - begin_;
structured.offset_limit = error.token_.end_ - begin_;
structured.message = error.message_;
allErrors.push_back(structured);
}
return allErrors;
}
bool Reader::pushError(const Value& value, const JSONCPP_STRING& message) {
ptrdiff_t const length = end_ - begin_;
if (value.getOffsetStart() > length || value.getOffsetLimit() > length)
return false;
Token token;
token.type_ = tokenError;
token.start_ = begin_ + value.getOffsetStart();
token.end_ = end_ + value.getOffsetLimit();
ErrorInfo info;
info.token_ = token;
info.message_ = message;
info.extra_ = nullptr;
errors_.push_back(info);
return true;
}
bool Reader::pushError(const Value& value,
const JSONCPP_STRING& message,
const Value& extra) {
ptrdiff_t const length = end_ - begin_;
if (value.getOffsetStart() > length || value.getOffsetLimit() > length ||
extra.getOffsetLimit() > length)
return false;
Token token;
token.type_ = tokenError;
token.start_ = begin_ + value.getOffsetStart();
token.end_ = begin_ + value.getOffsetLimit();
ErrorInfo info;
info.token_ = token;
info.message_ = message;
info.extra_ = begin_ + extra.getOffsetStart();
errors_.push_back(info);
return true;
}
bool Reader::good() const { return errors_.empty(); }
// exact copy of Features
class OurFeatures {
public:
static OurFeatures all();
bool allowComments_;
bool strictRoot_;
bool allowDroppedNullPlaceholders_;
bool allowNumericKeys_;
bool allowSingleQuotes_;
bool failIfExtra_;
bool rejectDupKeys_;
bool allowSpecialFloats_;
int stackLimit_;
}; // OurFeatures
// exact copy of Implementation of class Features
// ////////////////////////////////
OurFeatures OurFeatures::all() { return {}; }
// Implementation of class Reader
// ////////////////////////////////
// exact copy of Reader, renamed to OurReader
class OurReader {
public:
typedef char Char;
typedef const Char* Location;
struct StructuredError {
ptrdiff_t offset_start;
ptrdiff_t offset_limit;
JSONCPP_STRING message;
};
OurReader(OurFeatures const& features);
bool parse(const char* beginDoc,
const char* endDoc,
Value& root,
bool collectComments = true);
JSONCPP_STRING getFormattedErrorMessages() const;
std::vector<StructuredError> getStructuredErrors() const;
bool pushError(const Value& value, const JSONCPP_STRING& message);
bool pushError(const Value& value,
const JSONCPP_STRING& message,
const Value& extra);
bool good() const;
private:
OurReader(OurReader const&); // no impl
void operator=(OurReader const&); // no impl
enum TokenType {
tokenEndOfStream = 0,
tokenObjectBegin,
tokenObjectEnd,
tokenArrayBegin,
tokenArrayEnd,
tokenString,
tokenNumber,
tokenTrue,
tokenFalse,
tokenNull,
tokenNaN,
tokenPosInf,
tokenNegInf,
tokenArraySeparator,
tokenMemberSeparator,
tokenComment,
tokenError
};
class Token {
public:
TokenType type_;
Location start_;
Location end_;
};
class ErrorInfo {
public:
Token token_;
JSONCPP_STRING message_;
Location extra_;
};
typedef std::deque<ErrorInfo> Errors;
bool readToken(Token& token);
void skipSpaces();
bool match(Location pattern, int patternLength);
bool readComment();
bool readCStyleComment();
bool readCppStyleComment();
bool readString();
bool readStringSingleQuote();
bool readNumber(bool checkInf);
bool readValue();
bool readObject(Token& token);
bool readArray(Token& token);
bool decodeNumber(Token& token);
bool decodeNumber(Token& token, Value& decoded);
bool decodeString(Token& token);
bool decodeString(Token& token, JSONCPP_STRING& decoded);
bool decodeDouble(Token& token);
bool decodeDouble(Token& token, Value& decoded);
bool decodeUnicodeCodePoint(Token& token,
Location& current,
Location end,
unsigned int& unicode);
bool decodeUnicodeEscapeSequence(Token& token,
Location& current,
Location end,
unsigned int& unicode);
bool
addError(const JSONCPP_STRING& message, Token& token, Location extra = nullptr);
bool recoverFromError(TokenType skipUntilToken);
bool addErrorAndRecover(const JSONCPP_STRING& message,
Token& token,
TokenType skipUntilToken);
void skipUntilSpace();
Value& currentValue();
Char getNextChar();
void
getLocationLineAndColumn(Location location, int& line, int& column) const;
JSONCPP_STRING getLocationLineAndColumn(Location location) const;
void addComment(Location begin, Location end, CommentPlacement placement);
void skipCommentTokens(Token& token);
static JSONCPP_STRING normalizeEOL(Location begin, Location end);
static bool containsNewLine(Location begin, Location end);
typedef std::stack<Value*> Nodes;
Nodes nodes_;
Errors errors_;
JSONCPP_STRING document_;
Location begin_;
Location end_;
Location current_;
Location lastValueEnd_;
Value* lastValue_;
JSONCPP_STRING commentsBefore_;
OurFeatures const features_;
bool collectComments_;
}; // OurReader
// complete copy of Read impl, for OurReader
bool OurReader::containsNewLine(OurReader::Location begin,
OurReader::Location end) {
for (; begin < end; ++begin)
if (*begin == '\n' || *begin == '\r')
return true;
return false;
}
OurReader::OurReader(OurFeatures const& features)
: errors_(), document_(), begin_(), end_(), current_(), lastValueEnd_(),
lastValue_(), commentsBefore_(), features_(features), collectComments_() {
}
bool OurReader::parse(const char* beginDoc,
const char* endDoc,
Value& root,
bool collectComments) {
if (!features_.allowComments_) {
collectComments = false;
}
begin_ = beginDoc;
end_ = endDoc;
collectComments_ = collectComments;
current_ = begin_;
lastValueEnd_ = nullptr;
lastValue_ = nullptr;
commentsBefore_.clear();
errors_.clear();
while (!nodes_.empty())
nodes_.pop();
nodes_.push(&root);
bool successful = readValue();
Token token;
skipCommentTokens(token);
if (features_.failIfExtra_) {
if ((features_.strictRoot_ || token.type_ != tokenError) &&
token.type_ != tokenEndOfStream) {
addError("Extra non-whitespace after JSON value.", token);
return false;
}
}
if (collectComments_ && !commentsBefore_.empty())
root.setComment(commentsBefore_, commentAfter);
if (features_.strictRoot_) {
if (!root.isArray() && !root.isObject()) {
// Set error location to start of doc, ideally should be first token found
// in doc
token.type_ = tokenError;
token.start_ = beginDoc;
token.end_ = endDoc;
addError(
"A valid JSON document must be either an array or an object value.",
token);
return false;
}
}
return successful;
}
bool OurReader::readValue() {
// To preserve the old behaviour we cast size_t to int.
if (static_cast<int>(nodes_.size()) > features_.stackLimit_)
throwRuntimeError("Exceeded stackLimit in readValue().");
Token token;
skipCommentTokens(token);
bool successful = true;
if (collectComments_ && !commentsBefore_.empty()) {
currentValue().setComment(commentsBefore_, commentBefore);
commentsBefore_.clear();
}
switch (token.type_) {
case tokenObjectBegin:
successful = readObject(token);
currentValue().setOffsetLimit(current_ - begin_);
break;
case tokenArrayBegin:
successful = readArray(token);
currentValue().setOffsetLimit(current_ - begin_);
break;
case tokenNumber:
successful = decodeNumber(token);
break;
case tokenString:
successful = decodeString(token);
break;
case tokenTrue: {
Value v(true);
currentValue().swapPayload(v);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
} break;
case tokenFalse: {
Value v(false);
currentValue().swapPayload(v);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
} break;
case tokenNull: {
Value v;
currentValue().swapPayload(v);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
} break;
case tokenNaN: {
Value v(std::numeric_limits<double>::quiet_NaN());
currentValue().swapPayload(v);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
} break;
case tokenPosInf: {
Value v(std::numeric_limits<double>::infinity());
currentValue().swapPayload(v);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
} break;
case tokenNegInf: {
Value v(-std::numeric_limits<double>::infinity());
currentValue().swapPayload(v);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
} break;
case tokenArraySeparator:
case tokenObjectEnd:
case tokenArrayEnd:
if (features_.allowDroppedNullPlaceholders_) {
// "Un-read" the current token and mark the current value as a null
// token.
current_--;
Value v;
currentValue().swapPayload(v);
currentValue().setOffsetStart(current_ - begin_ - 1);
currentValue().setOffsetLimit(current_ - begin_);
break;
} // else, fall through ...
default:
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
return addError("Syntax error: value, object or array expected.", token);
}
if (collectComments_) {
lastValueEnd_ = current_;
lastValue_ = &currentValue();
}
return successful;
}
void OurReader::skipCommentTokens(Token& token) {
if (features_.allowComments_) {
do {
readToken(token);
} while (token.type_ == tokenComment);
} else {
readToken(token);
}
}
bool OurReader::readToken(Token& token) {
skipSpaces();
token.start_ = current_;
Char c = getNextChar();
bool ok = true;
switch (c) {
case '{':
token.type_ = tokenObjectBegin;
break;
case '}':
token.type_ = tokenObjectEnd;
break;
case '[':
token.type_ = tokenArrayBegin;
break;
case ']':
token.type_ = tokenArrayEnd;
break;
case '"':
token.type_ = tokenString;
ok = readString();
break;
case '\'':
if (features_.allowSingleQuotes_) {
token.type_ = tokenString;
ok = readStringSingleQuote();
break;
} // else fall through
case '/':
token.type_ = tokenComment;
ok = readComment();
break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
token.type_ = tokenNumber;
readNumber(false);
break;
case '-':
if (readNumber(true)) {
token.type_ = tokenNumber;
} else {
token.type_ = tokenNegInf;
ok = features_.allowSpecialFloats_ && match("nfinity", 7);
}
break;
case 't':
token.type_ = tokenTrue;
ok = match("rue", 3);
break;
case 'f':
token.type_ = tokenFalse;
ok = match("alse", 4);
break;
case 'n':
token.type_ = tokenNull;
ok = match("ull", 3);
break;
case 'N':
if (features_.allowSpecialFloats_) {
token.type_ = tokenNaN;
ok = match("aN", 2);
} else {
ok = false;
}
break;
case 'I':
if (features_.allowSpecialFloats_) {
token.type_ = tokenPosInf;
ok = match("nfinity", 7);
} else {
ok = false;
}
break;
case ',':
token.type_ = tokenArraySeparator;
break;
case ':':
token.type_ = tokenMemberSeparator;
break;
case 0:
token.type_ = tokenEndOfStream;
break;
default:
ok = false;
break;
}
if (!ok)
token.type_ = tokenError;
token.end_ = current_;
return true;
}
void OurReader::skipSpaces() {
while (current_ != end_) {
Char c = *current_;
if (c == ' ' || c == '\t' || c == '\r' || c == '\n')
++current_;
else
break;
}
}
bool OurReader::match(Location pattern, int patternLength) {
if (end_ - current_ < patternLength)
return false;
int index = patternLength;
while (index--)
if (current_[index] != pattern[index])
return false;
current_ += patternLength;
return true;
}
bool OurReader::readComment() {
Location commentBegin = current_ - 1;
Char c = getNextChar();
bool successful = false;
if (c == '*')
successful = readCStyleComment();
else if (c == '/')
successful = readCppStyleComment();
if (!successful)
return false;
if (collectComments_) {
CommentPlacement placement = commentBefore;
if (lastValueEnd_ && !containsNewLine(lastValueEnd_, commentBegin)) {
if (c != '*' || !containsNewLine(commentBegin, current_))
placement = commentAfterOnSameLine;
}
addComment(commentBegin, current_, placement);
}
return true;
}
JSONCPP_STRING OurReader::normalizeEOL(OurReader::Location begin,
OurReader::Location end) {
JSONCPP_STRING normalized;
normalized.reserve(static_cast<size_t>(end - begin));
OurReader::Location current = begin;
while (current != end) {
char c = *current++;
if (c == '\r') {
if (current != end && *current == '\n')
// convert dos EOL
++current;
// convert Mac EOL
normalized += '\n';
} else {
normalized += c;
}
}
return normalized;
}
void OurReader::addComment(Location begin,
Location end,
CommentPlacement placement) {
assert(collectComments_);
const JSONCPP_STRING& normalized = normalizeEOL(begin, end);
if (placement == commentAfterOnSameLine) {
assert(lastValue_ != nullptr);
lastValue_->setComment(normalized, placement);
} else {
commentsBefore_ += normalized;
}
}
bool OurReader::readCStyleComment() {
while ((current_ + 1) < end_) {
Char c = getNextChar();
if (c == '*' && *current_ == '/')
break;
}
return getNextChar() == '/';
}
bool OurReader::readCppStyleComment() {
while (current_ != end_) {
Char c = getNextChar();
if (c == '\n')
break;
if (c == '\r') {
// Consume DOS EOL. It will be normalized in addComment.
if (current_ != end_ && *current_ == '\n')
getNextChar();
// Break on Moc OS 9 EOL.
break;
}
}
return true;
}
bool OurReader::readNumber(bool checkInf) {
const char* p = current_;
if (checkInf && p != end_ && *p == 'I') {
current_ = ++p;
return false;
}
char c = '0'; // stopgap for already consumed character
// integral part
while (c >= '0' && c <= '9')
c = (current_ = p) < end_ ? *p++ : '\0';
// fractional part
if (c == '.') {
c = (current_ = p) < end_ ? *p++ : '\0';
while (c >= '0' && c <= '9')
c = (current_ = p) < end_ ? *p++ : '\0';
}
// exponential part
if (c == 'e' || c == 'E') {
c = (current_ = p) < end_ ? *p++ : '\0';
if (c == '+' || c == '-')
c = (current_ = p) < end_ ? *p++ : '\0';
while (c >= '0' && c <= '9')
c = (current_ = p) < end_ ? *p++ : '\0';
}
return true;
}
bool OurReader::readString() {
Char c = 0;
while (current_ != end_) {
c = getNextChar();
if (c == '\\')
getNextChar();
else if (c == '"')
break;
}
return c == '"';
}
bool OurReader::readStringSingleQuote() {
Char c = 0;
while (current_ != end_) {
c = getNextChar();
if (c == '\\')
getNextChar();
else if (c == '\'')
break;
}
return c == '\'';
}
bool OurReader::readObject(Token& token) {
Token tokenName;
JSONCPP_STRING name;
Value init(objectValue);
currentValue().swapPayload(init);
currentValue().setOffsetStart(token.start_ - begin_);
while (readToken(tokenName)) {
bool initialTokenOk = true;
while (tokenName.type_ == tokenComment && initialTokenOk)
initialTokenOk = readToken(tokenName);
if (!initialTokenOk)
break;
if (tokenName.type_ == tokenObjectEnd && name.empty()) // empty object
return true;
name.clear();
if (tokenName.type_ == tokenString) {
if (!decodeString(tokenName, name))
return recoverFromError(tokenObjectEnd);
} else if (tokenName.type_ == tokenNumber && features_.allowNumericKeys_) {
Value numberName;
if (!decodeNumber(tokenName, numberName))
return recoverFromError(tokenObjectEnd);
name = numberName.asString();
} else {
break;
}
Token colon;
if (!readToken(colon) || colon.type_ != tokenMemberSeparator) {
return addErrorAndRecover("Missing ':' after object member name", colon,
tokenObjectEnd);
}
if (name.length() >= (1U << 30))
throwRuntimeError("keylength >= 2^30");
if (features_.rejectDupKeys_ && currentValue().isMember(name)) {
JSONCPP_STRING msg = "Duplicate key: '" + name + "'";
return addErrorAndRecover(msg, tokenName, tokenObjectEnd);
}
Value& value = currentValue()[name];
nodes_.push(&value);
bool ok = readValue();
nodes_.pop();
if (!ok) // error already set
return recoverFromError(tokenObjectEnd);
Token comma;
if (!readToken(comma) ||
(comma.type_ != tokenObjectEnd && comma.type_ != tokenArraySeparator &&
comma.type_ != tokenComment)) {
return addErrorAndRecover("Missing ',' or '}' in object declaration",
comma, tokenObjectEnd);
}
bool finalizeTokenOk = true;
while (comma.type_ == tokenComment && finalizeTokenOk)
finalizeTokenOk = readToken(comma);
if (comma.type_ == tokenObjectEnd)
return true;
}
return addErrorAndRecover("Missing '}' or object member name", tokenName,
tokenObjectEnd);
}
bool OurReader::readArray(Token& token) {
Value init(arrayValue);
currentValue().swapPayload(init);
currentValue().setOffsetStart(token.start_ - begin_);
skipSpaces();
if (current_ != end_ && *current_ == ']') // empty array
{
Token endArray;
readToken(endArray);
return true;
}
int index = 0;
for (;;) {
Value& value = currentValue()[index++];
nodes_.push(&value);
bool ok = readValue();
nodes_.pop();
if (!ok) // error already set
return recoverFromError(tokenArrayEnd);
Token currentToken;
// Accept Comment after last item in the array.
ok = readToken(currentToken);
while (currentToken.type_ == tokenComment && ok) {
ok = readToken(currentToken);
}
bool badTokenType = (currentToken.type_ != tokenArraySeparator &&
currentToken.type_ != tokenArrayEnd);
if (!ok || badTokenType) {
return addErrorAndRecover("Missing ',' or ']' in array declaration",
currentToken, tokenArrayEnd);
}
if (currentToken.type_ == tokenArrayEnd)
break;
}
return true;
}
bool OurReader::decodeNumber(Token& token) {
Value decoded;
if (!decodeNumber(token, decoded))
return false;
currentValue().swapPayload(decoded);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
return true;
}
bool OurReader::decodeNumber(Token& token, Value& decoded) {
// Attempts to parse the number as an integer. If the number is
// larger than the maximum supported value of an integer then
// we decode the number as a double.
Location current = token.start_;
bool isNegative = *current == '-';
if (isNegative)
++current;
// TODO: Help the compiler do the div and mod at compile time or get rid of
// them.
Value::LargestUInt maxIntegerValue =
isNegative ? Value::LargestUInt(Value::minLargestInt)
: Value::maxLargestUInt;
Value::LargestUInt threshold = maxIntegerValue / 10;
Value::LargestUInt value = 0;
while (current < token.end_) {
Char c = *current++;
if (c < '0' || c > '9')
return decodeDouble(token, decoded);
Value::UInt digit(static_cast<Value::UInt>(c - '0'));
if (value >= threshold) {
// We've hit or exceeded the max value divided by 10 (rounded down). If
// a) we've only just touched the limit, b) this is the last digit, and
// c) it's small enough to fit in that rounding delta, we're okay.
// Otherwise treat this number as a double to avoid overflow.
if (value > threshold || current != token.end_ ||
digit > maxIntegerValue % 10) {
return decodeDouble(token, decoded);
}
}
value = value * 10 + digit;
}
if (isNegative)
decoded = -Value::LargestInt(value);
else if (value <= Value::LargestUInt(Value::maxInt))
decoded = Value::LargestInt(value);
else
decoded = value;
return true;
}
bool OurReader::decodeDouble(Token& token) {
Value decoded;
if (!decodeDouble(token, decoded))
return false;
currentValue().swapPayload(decoded);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
return true;
}
bool OurReader::decodeDouble(Token& token, Value& decoded) {
double value = 0;
const int bufferSize = 32;
int count;
ptrdiff_t const length = token.end_ - token.start_;
// Sanity check to avoid buffer overflow exploits.
if (length < 0) {
return addError("Unable to parse token length", token);
}
size_t const ulength = static_cast<size_t>(length);
// Avoid using a string constant for the format control string given to
// sscanf, as this can cause hard to debug crashes on OS X. See here for more
// info:
//
// http://developer.apple.com/library/mac/#DOCUMENTATION/DeveloperTools/gcc-4.0.1/gcc/Incompatibilities.html
char format[] = "%lf";
if (length <= bufferSize) {
Char buffer[bufferSize + 1];
memcpy(buffer, token.start_, ulength);
buffer[length] = 0;
fixNumericLocaleInput(buffer, buffer + length);
count = sscanf(buffer, format, &value);
} else {
JSONCPP_STRING buffer(token.start_, token.end_);
count = sscanf(buffer.c_str(), format, &value);
}
if (count != 1)
return addError("'" + JSONCPP_STRING(token.start_, token.end_) +
"' is not a number.",
token);
decoded = value;
return true;
}
bool OurReader::decodeString(Token& token) {
JSONCPP_STRING decoded_string;
if (!decodeString(token, decoded_string))
return false;
Value decoded(decoded_string);
currentValue().swapPayload(decoded);
currentValue().setOffsetStart(token.start_ - begin_);
currentValue().setOffsetLimit(token.end_ - begin_);
return true;
}
bool OurReader::decodeString(Token& token, JSONCPP_STRING& decoded) {
decoded.reserve(static_cast<size_t>(token.end_ - token.start_ - 2));
Location current = token.start_ + 1; // skip '"'
Location end = token.end_ - 1; // do not include '"'
while (current != end) {
Char c = *current++;
if (c == '"')
break;
else if (c == '\\') {
if (current == end)
return addError("Empty escape sequence in string", token, current);
Char escape = *current++;
switch (escape) {
case '"':
decoded += '"';
break;
case '/':
decoded += '/';
break;
case '\\':
decoded += '\\';
break;
case 'b':
decoded += '\b';
break;
case 'f':
decoded += '\f';
break;
case 'n':
decoded += '\n';
break;
case 'r':
decoded += '\r';
break;
case 't':
decoded += '\t';
break;
case 'u': {
unsigned int unicode;
if (!decodeUnicodeCodePoint(token, current, end, unicode))
return false;
decoded += codePointToUTF8(unicode);
} break;
default:
return addError("Bad escape sequence in string", token, current);
}
} else {
decoded += c;
}
}
return true;
}
bool OurReader::decodeUnicodeCodePoint(Token& token,
Location& current,
Location end,
unsigned int& unicode) {
if (!decodeUnicodeEscapeSequence(token, current, end, unicode))
return false;
if (unicode >= 0xD800 && unicode <= 0xDBFF) {
// surrogate pairs
if (end - current < 6)
return addError(
"additional six characters expected to parse unicode surrogate pair.",
token, current);
if (*(current++) == '\\' && *(current++) == 'u') {
unsigned int surrogatePair;
if (decodeUnicodeEscapeSequence(token, current, end, surrogatePair)) {
unicode = 0x10000 + ((unicode & 0x3FF) << 10) + (surrogatePair & 0x3FF);
} else
return false;
} else
return addError("expecting another \\u token to begin the second half of "
"a unicode surrogate pair",
token, current);
}
return true;
}
bool OurReader::decodeUnicodeEscapeSequence(Token& token,
Location& current,
Location end,
unsigned int& ret_unicode) {
if (end - current < 4)
return addError(
"Bad unicode escape sequence in string: four digits expected.", token,
current);
int unicode = 0;
for (int index = 0; index < 4; ++index) {
Char c = *current++;
unicode *= 16;
if (c >= '0' && c <= '9')
unicode += c - '0';
else if (c >= 'a' && c <= 'f')
unicode += c - 'a' + 10;
else if (c >= 'A' && c <= 'F')
unicode += c - 'A' + 10;
else
return addError(
"Bad unicode escape sequence in string: hexadecimal digit expected.",
token, current);
}
ret_unicode = static_cast<unsigned int>(unicode);
return true;
}
bool OurReader::addError(const JSONCPP_STRING& message,
Token& token,
Location extra) {
ErrorInfo info;
info.token_ = token;
info.message_ = message;
info.extra_ = extra;
errors_.push_back(info);
return false;
}
bool OurReader::recoverFromError(TokenType skipUntilToken) {
size_t errorCount = errors_.size();
Token skip;
for (;;) {
if (!readToken(skip))
errors_.resize(errorCount); // discard errors caused by recovery
if (skip.type_ == skipUntilToken || skip.type_ == tokenEndOfStream)
break;
}
errors_.resize(errorCount);
return false;
}
bool OurReader::addErrorAndRecover(const JSONCPP_STRING& message,
Token& token,
TokenType skipUntilToken) {
addError(message, token);
return recoverFromError(skipUntilToken);
}
Value& OurReader::currentValue() { return *(nodes_.top()); }
OurReader::Char OurReader::getNextChar() {
if (current_ == end_)
return 0;
return *current_++;
}
void OurReader::getLocationLineAndColumn(Location location,
int& line,
int& column) const {
Location current = begin_;
Location lastLineStart = current;
line = 0;
while (current < location && current != end_) {
Char c = *current++;
if (c == '\r') {
if (*current == '\n')
++current;
lastLineStart = current;
++line;
} else if (c == '\n') {
lastLineStart = current;
++line;
}
}
// column & line start at 1
column = int(location - lastLineStart) + 1;
++line;
}
JSONCPP_STRING OurReader::getLocationLineAndColumn(Location location) const {
int line, column;
getLocationLineAndColumn(location, line, column);
char buffer[18 + 16 + 16 + 1];
jsoncpp_snprintf(buffer, sizeof(buffer), "Line %d, Column %d", line, column);
return buffer;
}
JSONCPP_STRING OurReader::getFormattedErrorMessages() const {
JSONCPP_STRING formattedMessage;
for (Errors::const_iterator itError = errors_.begin();
itError != errors_.end(); ++itError) {
const ErrorInfo& error = *itError;
formattedMessage +=
"* " + getLocationLineAndColumn(error.token_.start_) + "\n";
formattedMessage += " " + error.message_ + "\n";
if (error.extra_)
formattedMessage +=
"See " + getLocationLineAndColumn(error.extra_) + " for detail.\n";
}
return formattedMessage;
}
std::vector<OurReader::StructuredError> OurReader::getStructuredErrors() const {
std::vector<OurReader::StructuredError> allErrors;
for (Errors::const_iterator itError = errors_.begin();
itError != errors_.end(); ++itError) {
const ErrorInfo& error = *itError;
OurReader::StructuredError structured;
structured.offset_start = error.token_.start_ - begin_;
structured.offset_limit = error.token_.end_ - begin_;
structured.message = error.message_;
allErrors.push_back(structured);
}
return allErrors;
}
bool OurReader::pushError(const Value& value, const JSONCPP_STRING& message) {
ptrdiff_t length = end_ - begin_;
if (value.getOffsetStart() > length || value.getOffsetLimit() > length)
return false;
Token token;
token.type_ = tokenError;
token.start_ = begin_ + value.getOffsetStart();
token.end_ = end_ + value.getOffsetLimit();
ErrorInfo info;
info.token_ = token;
info.message_ = message;
info.extra_ = nullptr;
errors_.push_back(info);
return true;
}
bool OurReader::pushError(const Value& value,
const JSONCPP_STRING& message,
const Value& extra) {
ptrdiff_t length = end_ - begin_;
if (value.getOffsetStart() > length || value.getOffsetLimit() > length ||
extra.getOffsetLimit() > length)
return false;
Token token;
token.type_ = tokenError;
token.start_ = begin_ + value.getOffsetStart();
token.end_ = begin_ + value.getOffsetLimit();
ErrorInfo info;
info.token_ = token;
info.message_ = message;
info.extra_ = begin_ + extra.getOffsetStart();
errors_.push_back(info);
return true;
}
bool OurReader::good() const { return errors_.empty(); }
class OurCharReader : public CharReader {
bool const collectComments_;
OurReader reader_;
public:
OurCharReader(bool collectComments, OurFeatures const& features)
: collectComments_(collectComments), reader_(features) {}
bool parse(char const* beginDoc,
char const* endDoc,
Value* root,
JSONCPP_STRING* errs) override {
bool ok = reader_.parse(beginDoc, endDoc, *root, collectComments_);
if (errs) {
*errs = reader_.getFormattedErrorMessages();
}
return ok;
}
};
CharReaderBuilder::CharReaderBuilder() { setDefaults(&settings_); }
CharReaderBuilder::~CharReaderBuilder() {}
CharReader* CharReaderBuilder::newCharReader() const {
bool collectComments = settings_["collectComments"].asBool();
OurFeatures features = OurFeatures::all();
features.allowComments_ = settings_["allowComments"].asBool();
features.strictRoot_ = settings_["strictRoot"].asBool();
features.allowDroppedNullPlaceholders_ =
settings_["allowDroppedNullPlaceholders"].asBool();
features.allowNumericKeys_ = settings_["allowNumericKeys"].asBool();
features.allowSingleQuotes_ = settings_["allowSingleQuotes"].asBool();
features.stackLimit_ = settings_["stackLimit"].asInt();
features.failIfExtra_ = settings_["failIfExtra"].asBool();
features.rejectDupKeys_ = settings_["rejectDupKeys"].asBool();
features.allowSpecialFloats_ = settings_["allowSpecialFloats"].asBool();
return new OurCharReader(collectComments, features);
}
static void getValidReaderKeys(std::set<JSONCPP_STRING>* valid_keys) {
valid_keys->clear();
valid_keys->insert("collectComments");
valid_keys->insert("allowComments");
valid_keys->insert("strictRoot");
valid_keys->insert("allowDroppedNullPlaceholders");
valid_keys->insert("allowNumericKeys");
valid_keys->insert("allowSingleQuotes");
valid_keys->insert("stackLimit");
valid_keys->insert("failIfExtra");
valid_keys->insert("rejectDupKeys");
valid_keys->insert("allowSpecialFloats");
}
bool CharReaderBuilder::validate(Json::Value* invalid) const {
Json::Value my_invalid;
if (!invalid)
invalid = &my_invalid; // so we do not need to test for NULL
Json::Value& inv = *invalid;
std::set<JSONCPP_STRING> valid_keys;
getValidReaderKeys(&valid_keys);
Value::Members keys = settings_.getMemberNames();
size_t n = keys.size();
for (size_t i = 0; i < n; ++i) {
JSONCPP_STRING const& key = keys[i];
if (valid_keys.find(key) == valid_keys.end()) {
inv[key] = settings_[key];
}
}
return inv.empty();
}
Value& CharReaderBuilder::operator[](const JSONCPP_STRING& key) {
return settings_[key];
}
// static
void CharReaderBuilder::strictMode(Json::Value* settings) {
//! [CharReaderBuilderStrictMode]
(*settings)["allowComments"] = false;
(*settings)["strictRoot"] = true;
(*settings)["allowDroppedNullPlaceholders"] = false;
(*settings)["allowNumericKeys"] = false;
(*settings)["allowSingleQuotes"] = false;
(*settings)["stackLimit"] = 1000;
(*settings)["failIfExtra"] = true;
(*settings)["rejectDupKeys"] = true;
(*settings)["allowSpecialFloats"] = false;
//! [CharReaderBuilderStrictMode]
}
// static
void CharReaderBuilder::setDefaults(Json::Value* settings) {
//! [CharReaderBuilderDefaults]
(*settings)["collectComments"] = true;
(*settings)["allowComments"] = true;
(*settings)["strictRoot"] = false;
(*settings)["allowDroppedNullPlaceholders"] = false;
(*settings)["allowNumericKeys"] = false;
(*settings)["allowSingleQuotes"] = false;
(*settings)["stackLimit"] = 1000;
(*settings)["failIfExtra"] = false;
(*settings)["rejectDupKeys"] = false;
(*settings)["allowSpecialFloats"] = false;
//! [CharReaderBuilderDefaults]
}
//////////////////////////////////
// global functions
bool parseFromStream(CharReader::Factory const& fact,
JSONCPP_ISTREAM& sin,
Value* root,
JSONCPP_STRING* errs) {
JSONCPP_OSTRINGSTREAM ssin;
ssin << sin.rdbuf();
JSONCPP_STRING doc = ssin.str();
char const* begin = doc.data();
char const* end = begin + doc.size();
// Note that we do not actually need a null-terminator.
CharReaderPtr const reader(fact.newCharReader());
return reader->parse(begin, end, root, errs);
}
JSONCPP_ISTREAM& operator>>(JSONCPP_ISTREAM& sin, Value& root) {
CharReaderBuilder b;
JSONCPP_STRING errs;
bool ok = parseFromStream(b, sin, &root, &errs);
if (!ok) {
throwRuntimeError(errs);
}
return sin;
}
} // namespace Json