From 8457c14c448af1dcee154ce9532c85eb945dd9e8 Mon Sep 17 00:00:00 2001
From: David Grant
Date: Tue, 18 Jun 2024 17:11:47 -0700
Subject: [PATCH 01/10] Add some support for underscore separators.
---
core/lexer.cpp | 43 ++++++++++++++++++++++++++++++++++++++++++-
core/lexer_test.cpp | 2 ++
2 files changed, 44 insertions(+), 1 deletion(-)
diff --git a/core/lexer.cpp b/core/lexer.cpp
index 31599f4d7..70c11ac13 100644
--- a/core/lexer.cpp
+++ b/core/lexer.cpp
@@ -229,7 +229,8 @@ std::string lex_number(const char *&c, const std::string &filename, const Locati
AFTER_DIGIT,
AFTER_E,
AFTER_EXP_SIGN,
- AFTER_EXP_DIGIT
+ AFTER_EXP_DIGIT,
+ AFTER_UNDERSCORE
} state;
std::string r;
@@ -262,6 +263,8 @@ std::string lex_number(const char *&c, const std::string &filename, const Locati
case 'e':
case 'E': state = AFTER_E; break;
+ case '_': state = AFTER_UNDERSCORE; goto skip_char;
+
default: goto end;
}
break;
@@ -284,6 +287,8 @@ std::string lex_number(const char *&c, const std::string &filename, const Locati
case '8':
case '9': state = AFTER_ONE_TO_NINE; break;
+ case '_': state = AFTER_UNDERSCORE; goto skip_char;
+
default: goto end;
}
break;
@@ -325,6 +330,8 @@ std::string lex_number(const char *&c, const std::string &filename, const Locati
case '8':
case '9': state = AFTER_DIGIT; break;
+ //case '_': state = AFTER_UNDERSCORE; goto skip_char;
+
default: goto end;
}
break;
@@ -386,12 +393,46 @@ std::string lex_number(const char *&c, const std::string &filename, const Locati
case '7':
case '8':
case '9': state = AFTER_EXP_DIGIT; break;
+
+ case '_': state = AFTER_UNDERSCORE; goto skip_char;
default: goto end;
}
break;
+
+ case AFTER_UNDERSCORE:
+ switch (*c) {
+ case '_': {
+ // Can't do repeated _s.
+ std::stringstream ss;
+ ss << "couldn't lex number, multiple consecutive _'s: " << *c;
+ throw StaticError(filename, begin, ss.str());
+ }
+
+ // The only valid transition from _ is to a digit.
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9': state = AFTER_EXP_DIGIT; break;
+
+ default: {
+ std::stringstream ss;
+ ss << "couldn't lex number, junk after _: " << *c;
+ throw StaticError(filename, begin, ss.str());
+ }
+ }
+ break;
}
r += *c;
+
+ skip_char:
c++;
}
end:
diff --git a/core/lexer_test.cpp b/core/lexer_test.cpp
index 5b843e580..ee52bcce4 100644
--- a/core/lexer_test.cpp
+++ b/core/lexer_test.cpp
@@ -115,6 +115,8 @@ TEST(Lexer, TestNumbers)
"1e+!",
{},
"number 1e+!:1:1: couldn't lex number, junk after exponent sign: !");
+
+ testLex("number 123_456", "123_456", {Token(Token::Kind::NUMBER, "123456")}, "");
}
TEST(Lexer, TestDoubleStrings)
From 8d19efd3a27c485342feb3a42d9b8f051f12664f Mon Sep 17 00:00:00 2001
From: David Grant
Date: Wed, 19 Jun 2024 07:19:51 -0700
Subject: [PATCH 02/10] Tests and fixes.
---
core/lexer.cpp | 43 ++++++++++++++++++++++++++++++++++---------
core/lexer_test.cpp | 20 ++++++++++++++++++++
2 files changed, 54 insertions(+), 9 deletions(-)
diff --git a/core/lexer.cpp b/core/lexer.cpp
index 70c11ac13..a3f1bfd80 100644
--- a/core/lexer.cpp
+++ b/core/lexer.cpp
@@ -230,7 +230,8 @@ std::string lex_number(const char *&c, const std::string &filename, const Locati
AFTER_E,
AFTER_EXP_SIGN,
AFTER_EXP_DIGIT,
- AFTER_UNDERSCORE
+ AFTER_UNDERSCORE,
+ AFTER_EXP_UNDERSCORE
} state;
std::string r;
@@ -330,12 +331,39 @@ std::string lex_number(const char *&c, const std::string &filename, const Locati
case '8':
case '9': state = AFTER_DIGIT; break;
- //case '_': state = AFTER_UNDERSCORE; goto skip_char;
+ case '_': state = AFTER_UNDERSCORE; goto skip_char;
default: goto end;
}
break;
+ case AFTER_UNDERSCORE:
+ switch (*c) {
+ case '_': {
+ throw StaticError(filename, begin, "couldn't lex number, multiple consecutive _'s");
+ }
+
+ // The only valid transition from _ is to a digit.
+
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9': state = AFTER_ONE_TO_NINE; break;
+
+ default: {
+ std::stringstream ss;
+ ss << "couldn't lex number, junk after _: " << *c;
+ throw StaticError(filename, begin, ss.str());
+ }
+ }
+ break;
+
case AFTER_E:
switch (*c) {
case '+':
@@ -394,19 +422,16 @@ std::string lex_number(const char *&c, const std::string &filename, const Locati
case '8':
case '9': state = AFTER_EXP_DIGIT; break;
- case '_': state = AFTER_UNDERSCORE; goto skip_char;
+ case '_': state = AFTER_EXP_UNDERSCORE; goto skip_char;
default: goto end;
}
break;
- case AFTER_UNDERSCORE:
+ case AFTER_EXP_UNDERSCORE:
switch (*c) {
case '_': {
- // Can't do repeated _s.
- std::stringstream ss;
- ss << "couldn't lex number, multiple consecutive _'s: " << *c;
- throw StaticError(filename, begin, ss.str());
+ throw StaticError(filename, begin, "couldn't lex number, multiple consecutive _'s");
}
// The only valid transition from _ is to a digit.
@@ -432,7 +457,7 @@ std::string lex_number(const char *&c, const std::string &filename, const Locati
}
r += *c;
- skip_char:
+skip_char:
c++;
}
end:
diff --git a/core/lexer_test.cpp b/core/lexer_test.cpp
index ee52bcce4..933f37164 100644
--- a/core/lexer_test.cpp
+++ b/core/lexer_test.cpp
@@ -115,8 +115,27 @@ TEST(Lexer, TestNumbers)
"1e+!",
{},
"number 1e+!:1:1: couldn't lex number, junk after exponent sign: !");
+}
+TEST(Lexer, TestNumbersWithSeparators)
+{
testLex("number 123_456", "123_456", {Token(Token::Kind::NUMBER, "123456")}, "");
+ testLex("number 1_750_000", "1_750_000", {Token(Token::Kind::NUMBER, "1750000")}, "");
+ testLex("number 1_2_3", "1_2_3", {Token(Token::Kind::NUMBER, "123")}, "");
+ testLex("number 3.141_592", "3.141_592", {Token(Token::Kind::NUMBER, "3.141592")}, "");
+
+ testLex("number 1_2.0", "1_2.0", {Token(Token::Kind::NUMBER, "12.0")}, "");
+ testLex("number 0e1_01", "0e1_01", {Token(Token::Kind::NUMBER, "0e101")}, "");
+ testLex("number 10_10e3", "10_10e3", {Token(Token::Kind::NUMBER, "1010e3")}, "");
+ testLex("number 2_3e1_2", "2_3e1_2", {Token(Token::Kind::NUMBER, "23e12")}, "");
+ testLex("number 1.1_2e100", "1.1_2e100", {Token(Token::Kind::NUMBER, "1.12e100")}, "");
+ testLex("number 1.1e-10_1", "1.1e-10_1", {Token(Token::Kind::NUMBER, "1.1e-101")}, "");
+
+ testLex("number 123456_!", "123456_!", {}, "number 123456_!:1:1: couldn't lex number, junk after _: !");
+ testLex("number 123__456",
+ "123__456",
+ {},
+ "number 123__456:1:1: couldn't lex number, multiple consecutive _'s");
}
TEST(Lexer, TestDoubleStrings)
@@ -330,6 +349,7 @@ TEST(Lexer, TestIdentifier)
"foo bar123",
{Token(Token::Kind::IDENTIFIER, "foo"), Token(Token::Kind::IDENTIFIER, "bar123")},
"");
+ testLex("identifier _123", "_123", {Token(Token::Kind::IDENTIFIER, "_123")}, "");
}
TEST(Lexer, TestComments)
From 1865d215b70767849a51ea0de437ac57b747b031 Mon Sep 17 00:00:00 2001
From: David Grant
Date: Wed, 19 Jun 2024 13:32:45 -0700
Subject: [PATCH 03/10] More tests.
---
core/lexer.cpp | 2 +-
core/lexer_test.cpp | 29 ++++++++++++++++++++++++++---
2 files changed, 27 insertions(+), 4 deletions(-)
diff --git a/core/lexer.cpp b/core/lexer.cpp
index a3f1bfd80..e5316881c 100644
--- a/core/lexer.cpp
+++ b/core/lexer.cpp
@@ -227,10 +227,10 @@ std::string lex_number(const char *&c, const std::string &filename, const Locati
AFTER_ONE_TO_NINE,
AFTER_DOT,
AFTER_DIGIT,
+ AFTER_UNDERSCORE,
AFTER_E,
AFTER_EXP_SIGN,
AFTER_EXP_DIGIT,
- AFTER_UNDERSCORE,
AFTER_EXP_UNDERSCORE
} state;
diff --git a/core/lexer_test.cpp b/core/lexer_test.cpp
index 933f37164..d73b5448e 100644
--- a/core/lexer_test.cpp
+++ b/core/lexer_test.cpp
@@ -123,19 +123,42 @@ TEST(Lexer, TestNumbersWithSeparators)
testLex("number 1_750_000", "1_750_000", {Token(Token::Kind::NUMBER, "1750000")}, "");
testLex("number 1_2_3", "1_2_3", {Token(Token::Kind::NUMBER, "123")}, "");
testLex("number 3.141_592", "3.141_592", {Token(Token::Kind::NUMBER, "3.141592")}, "");
-
- testLex("number 1_2.0", "1_2.0", {Token(Token::Kind::NUMBER, "12.0")}, "");
+ testLex("number 01_100", "01_100", {Token(Token::Kind::NUMBER, "0"), Token(Token::Kind::NUMBER, "1100")}, "");
+ testLex("number 1_200.0", "1_200.0", {Token(Token::Kind::NUMBER, "1200.0")}, "");
testLex("number 0e1_01", "0e1_01", {Token(Token::Kind::NUMBER, "0e101")}, "");
testLex("number 10_10e3", "10_10e3", {Token(Token::Kind::NUMBER, "1010e3")}, "");
testLex("number 2_3e1_2", "2_3e1_2", {Token(Token::Kind::NUMBER, "23e12")}, "");
testLex("number 1.1_2e100", "1.1_2e100", {Token(Token::Kind::NUMBER, "1.12e100")}, "");
testLex("number 1.1e-10_1", "1.1e-10_1", {Token(Token::Kind::NUMBER, "1.1e-101")}, "");
- testLex("number 123456_!", "123456_!", {}, "number 123456_!:1:1: couldn't lex number, junk after _: !");
+ testLex("number 123456_!",
+ "123456_!",
+ {},
+ "number 123456_!:1:1: couldn't lex number, junk after _: !");
testLex("number 123__456",
"123__456",
{},
"number 123__456:1:1: couldn't lex number, multiple consecutive _'s");
+ testLex("number 1_200_.0",
+ "1_200_.0",
+ {},
+ "number 1_200_.0:1:1: couldn't lex number, junk after _: .");
+ testLex("number 1_200._0",
+ "1_200._0",
+ {},
+ "number 1_200._0:1:1: couldn't lex number, junk after decimal point: _");
+ testLex("number 1_200_e2",
+ "1_200_e2",
+ {},
+ "number 1_200_e2:1:1: couldn't lex number, junk after _: e");
+ testLex("number 1_200e_2",
+ "1_200e_2",
+ {},
+ "number 1_200e_2:1:1: couldn't lex number, junk after 'E': _");
+ testLex("number 200e-_2",
+ "200e-_2",
+ {},
+ "number 200e-_2:1:1: couldn't lex number, junk after exponent sign: _");
}
TEST(Lexer, TestDoubleStrings)
From c78b2b3300712d5dd04c4c1429d38eaa8c47d637 Mon Sep 17 00:00:00 2001
From: David Grant
Date: Wed, 19 Jun 2024 14:28:50 -0700
Subject: [PATCH 04/10] More tests.
---
core/lexer_test.cpp | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/core/lexer_test.cpp b/core/lexer_test.cpp
index d73b5448e..74d01672b 100644
--- a/core/lexer_test.cpp
+++ b/core/lexer_test.cpp
@@ -130,6 +130,7 @@ TEST(Lexer, TestNumbersWithSeparators)
testLex("number 2_3e1_2", "2_3e1_2", {Token(Token::Kind::NUMBER, "23e12")}, "");
testLex("number 1.1_2e100", "1.1_2e100", {Token(Token::Kind::NUMBER, "1.12e100")}, "");
testLex("number 1.1e-10_1", "1.1e-10_1", {Token(Token::Kind::NUMBER, "1.1e-101")}, "");
+ testLex("number 9.109_383_56e-31", "9.109_383_56e-31", {Token(Token::Kind::NUMBER, "9.10938356e-31")}, "");
testLex("number 123456_!",
"123456_!",
@@ -159,6 +160,10 @@ TEST(Lexer, TestNumbersWithSeparators)
"200e-_2",
{},
"number 200e-_2:1:1: couldn't lex number, junk after exponent sign: _");
+ testLex("number 200e+_2",
+ "200e+_2",
+ {},
+ "number 200e+_2:1:1: couldn't lex number, junk after exponent sign: _");
}
TEST(Lexer, TestDoubleStrings)
From aa2780b13303fe6e468c2d34c65a3c0958bf2f6d Mon Sep 17 00:00:00 2001
From: David Grant
Date: Sun, 23 Jun 2024 15:01:14 -0700
Subject: [PATCH 05/10] Simpler to not special-case consecutive _s.
---
core/lexer.cpp | 10 ----------
core/lexer_test.cpp | 2 +-
2 files changed, 1 insertion(+), 11 deletions(-)
diff --git a/core/lexer.cpp b/core/lexer.cpp
index e5316881c..0d6b698d3 100644
--- a/core/lexer.cpp
+++ b/core/lexer.cpp
@@ -339,12 +339,7 @@ std::string lex_number(const char *&c, const std::string &filename, const Locati
case AFTER_UNDERSCORE:
switch (*c) {
- case '_': {
- throw StaticError(filename, begin, "couldn't lex number, multiple consecutive _'s");
- }
-
// The only valid transition from _ is to a digit.
-
case '0':
case '1':
case '2':
@@ -430,12 +425,7 @@ std::string lex_number(const char *&c, const std::string &filename, const Locati
case AFTER_EXP_UNDERSCORE:
switch (*c) {
- case '_': {
- throw StaticError(filename, begin, "couldn't lex number, multiple consecutive _'s");
- }
-
// The only valid transition from _ is to a digit.
-
case '0':
case '1':
case '2':
diff --git a/core/lexer_test.cpp b/core/lexer_test.cpp
index 74d01672b..e1fba13c3 100644
--- a/core/lexer_test.cpp
+++ b/core/lexer_test.cpp
@@ -139,7 +139,7 @@ TEST(Lexer, TestNumbersWithSeparators)
testLex("number 123__456",
"123__456",
{},
- "number 123__456:1:1: couldn't lex number, multiple consecutive _'s");
+ "number 123__456:1:1: couldn't lex number, junk after _: _");
testLex("number 1_200_.0",
"1_200_.0",
{},
From 60def1e31b74d5044940e81f43b1e249ce36c80b Mon Sep 17 00:00:00 2001
From: David Grant
Date: Mon, 24 Jun 2024 15:12:31 -0700
Subject: [PATCH 06/10] Update docs and tutorial to include digit separators.
---
core/lexer.cpp | 8 ++++--
doc/_includes/examples/syntax.jsonnet | 29 ++++++++++----------
doc/_includes/examples/syntax.jsonnet.golden | 28 ++++++++-----------
doc/learning/tutorial.html | 3 ++
doc/ref/spec.html | 15 ++++++++--
5 files changed, 47 insertions(+), 36 deletions(-)
diff --git a/core/lexer.cpp b/core/lexer.cpp
index 0d6b698d3..205b509a9 100644
--- a/core/lexer.cpp
+++ b/core/lexer.cpp
@@ -217,9 +217,11 @@ std::string lex_number(const char *&c, const std::string &filename, const Locati
// https://www.json.org/img/number.png
// Note, we deviate from the json.org documentation as follows:
- // There is no reason to lex negative numbers as atomic tokens, it is better to parse them
- // as a unary operator combined with a numeric literal. This avoids x-1 being tokenized as
- // instead of the intended .
+ // * There is no reason to lex negative numbers as atomic tokens, it is better to parse them
+ // as a unary operator combined with a numeric literal. This avoids x-1 being tokenized as
+ // instead of the intended .
+ // * We support digit separators using the _ character for readability in
+ // large numeric literals.
enum State {
BEGIN,
diff --git a/doc/_includes/examples/syntax.jsonnet b/doc/_includes/examples/syntax.jsonnet
index eb1ab0d21..8a5288e6c 100644
--- a/doc/_includes/examples/syntax.jsonnet
+++ b/doc/_includes/examples/syntax.jsonnet
@@ -3,21 +3,6 @@
{
cocktails: {
// Ingredient quantities are in fl oz.
- 'Tom Collins': {
- ingredients: [
- { kind: "Farmer's Gin", qty: 1.5 },
- { kind: 'Lemon', qty: 1 },
- { kind: 'Simple Syrup', qty: 0.5 },
- { kind: 'Soda', qty: 2 },
- { kind: 'Angostura', qty: 'dash' },
- ],
- garnish: 'Maraschino Cherry',
- served: 'Tall',
- description: |||
- The Tom Collins is essentially gin and
- lemonade. The bitters add complexity.
- |||,
- },
Manhattan: {
ingredients: [
{ kind: 'Rye', qty: 2.5 },
@@ -28,5 +13,19 @@
served: 'Straight Up',
description: @'A clear \ red drink.',
},
+ 'Trinidad Sour': {
+ ingredients: [
+ { kind: 'Angostura bitters', qty: 1.333_333 },
+ { kind: 'Rye whiskey', qty: 0.5 },
+ { kind: 'Fresh lemon juice', qty: 0.75 },
+ { kind: 'Orgeat syrup', qty: 1 },
+ ],
+ garnish: 'Lemon twist',
+ served: 'chilled Nick & Nora glass',
+ description: |||
+ Boldly balanced: 1 1/3 oz Angostura
+ transforms bitters into the star spirit.
+ |||,
+ },
},
}
diff --git a/doc/_includes/examples/syntax.jsonnet.golden b/doc/_includes/examples/syntax.jsonnet.golden
index 6108519fd..69ae27c07 100644
--- a/doc/_includes/examples/syntax.jsonnet.golden
+++ b/doc/_includes/examples/syntax.jsonnet.golden
@@ -19,32 +19,28 @@
],
"served": "Straight Up"
},
- "Tom Collins": {
- "description": "The Tom Collins is essentially gin and\nlemonade. The bitters add complexity.\n",
- "garnish": "Maraschino Cherry",
+ "Trinidad Sour": {
+ "description": "Boldly balanced: 1 1/3 oz Angostura\ntransforms bitters into the star spirit.\n",
+ "garnish": "Lemon twist",
"ingredients": [
{
- "kind": "Farmer's Gin",
- "qty": 1.5
- },
- {
- "kind": "Lemon",
- "qty": 1
+ "kind": "Angostura bitters",
+ "qty": 1.333333
},
{
- "kind": "Simple Syrup",
+ "kind": "Rye whiskey",
"qty": 0.5
},
{
- "kind": "Soda",
- "qty": 2
+ "kind": "Fresh lemon juice",
+ "qty": 0.75
},
{
- "kind": "Angostura",
- "qty": "dash"
+ "kind": "Orgeat syrup",
+ "qty": 1
}
],
- "served": "Tall"
+ "served": "chilled Nick & Nora glass"
}
}
-}
+}
\ No newline at end of file
diff --git a/doc/learning/tutorial.html b/doc/learning/tutorial.html
index 3ac5520bf..aa9b4927e 100644
--- a/doc/learning/tutorial.html
+++ b/doc/learning/tutorial.html
@@ -60,6 +60,9 @@ Syntax
Verbatim strings @'foo'
and @"foo"
are for single lines.
+
+ Large numeric literals may be rendered more readable by using underscores, e.g. 1_000_000
.
+
Using the interactive demo below, try modifying the strings / quantities. Try adding a "Dry
diff --git a/doc/ref/spec.html b/doc/ref/spec.html
index f0221df4d..77187b5e0 100644
--- a/doc/ref/spec.html
+++ b/doc/ref/spec.html
@@ -142,8 +142,19 @@
Lexing
- number: As defined by JSON but without the leading
- minus.
+ number: As defined by JSON, with two exceptions:
+
+ -
+ Numeric literals may be rendered with underscores (
_
) between any two adjacent 0-9 digits
+ to improve readability. The underscores are discarded by the lexer.
+
+ Examples: 1_000_000
, 0.000_001
, 6.022_140_76e23
+
+ -
+ Negative numbers are lexed as the
-
unary operator applied to a positive number to
+ simplify parsing.
+
+
From 93f23b9b909c8e107c528c6215fd29389d7325d1 Mon Sep 17 00:00:00 2001
From: David Grant
Date: Mon, 24 Jun 2024 15:16:14 -0700
Subject: [PATCH 07/10] newline
---
doc/_includes/examples/syntax.jsonnet.golden | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/doc/_includes/examples/syntax.jsonnet.golden b/doc/_includes/examples/syntax.jsonnet.golden
index 69ae27c07..1e9c7b5bf 100644
--- a/doc/_includes/examples/syntax.jsonnet.golden
+++ b/doc/_includes/examples/syntax.jsonnet.golden
@@ -43,4 +43,4 @@
"served": "chilled Nick & Nora glass"
}
}
-}
\ No newline at end of file
+}
From 5a4a251e75f0c9bbd1ce4d0dc077d2ff9f5afcf0 Mon Sep 17 00:00:00 2001
From: David Grant
Date: Thu, 4 Jul 2024 13:34:57 -0700
Subject: [PATCH 08/10] Add test suite jsonnets for digit separators.
---
test_suite/digitsep.jsonnet | 19 +++++++++++++++++++
test_suite/digitsep.jsonnet.golden | 15 +++++++++++++++
.../error.std_parseInt.nodigitsep.jsonnet | 1 +
...ror.std_parseInt.nodigitsep.jsonnet.golden | 2 ++
4 files changed, 37 insertions(+)
create mode 100644 test_suite/digitsep.jsonnet
create mode 100644 test_suite/digitsep.jsonnet.golden
create mode 100644 test_suite/error.std_parseInt.nodigitsep.jsonnet
create mode 100644 test_suite/error.std_parseInt.nodigitsep.jsonnet.golden
diff --git a/test_suite/digitsep.jsonnet b/test_suite/digitsep.jsonnet
new file mode 100644
index 000000000..5237198ce
--- /dev/null
+++ b/test_suite/digitsep.jsonnet
@@ -0,0 +1,19 @@
+local cases = [
+ [123_456, "123_456"],
+ [1_750_000, "1_750_000"],
+ [1_2_3, "1_2_3"],
+ [3.141_592, "3.141_592"],
+ [1_200.0, "1_200.0"],
+ [0e1_01, "0e1_01"],
+ [10_10e3, "10_10e3"],
+ [2_3e1_2, "2_3e1_2"],
+ [1.1_2e100, "1.1_2e100"],
+ [1.1e-10_1, "1.1e-10_1"],
+ [9.109_383_56e-31, "9.109_383_56e-31"],
+];
+
+local sepParse(s) = std.parseJson(std.strReplace(s, "_", ""));
+
+{
+ test_results: [std.assertEqual(c[0], sepParse(c[1])) for c in cases],
+}
diff --git a/test_suite/digitsep.jsonnet.golden b/test_suite/digitsep.jsonnet.golden
new file mode 100644
index 000000000..e9795a491
--- /dev/null
+++ b/test_suite/digitsep.jsonnet.golden
@@ -0,0 +1,15 @@
+{
+ "test_results": [
+ true,
+ true,
+ true,
+ true,
+ true,
+ true,
+ true,
+ true,
+ true,
+ true,
+ true
+ ]
+}
diff --git a/test_suite/error.std_parseInt.nodigitsep.jsonnet b/test_suite/error.std_parseInt.nodigitsep.jsonnet
new file mode 100644
index 000000000..f50d4f63b
--- /dev/null
+++ b/test_suite/error.std_parseInt.nodigitsep.jsonnet
@@ -0,0 +1 @@
+std.parseJson("987_543")
diff --git a/test_suite/error.std_parseInt.nodigitsep.jsonnet.golden b/test_suite/error.std_parseInt.nodigitsep.jsonnet.golden
new file mode 100644
index 000000000..9e89e4e77
--- /dev/null
+++ b/test_suite/error.std_parseInt.nodigitsep.jsonnet.golden
@@ -0,0 +1,2 @@
+RUNTIME ERROR: [json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: '987_'; expected end of input
+ error.std_parseInt.nodigitsep.jsonnet:1:1-25
From ed3144d315035aa7cc38583d7e65a22efdea0a01 Mon Sep 17 00:00:00 2001
From: David Grant
Date: Mon, 8 Jul 2024 22:21:52 -0700
Subject: [PATCH 09/10] parseInt->parseJson
---
....nodigitsep.jsonnet => error.std_parseJson.nodigitsep.jsonnet} | 0
...onnet.golden => error.std_parseJson.nodigitsep.jsonnet.golden} | 0
2 files changed, 0 insertions(+), 0 deletions(-)
rename test_suite/{error.std_parseInt.nodigitsep.jsonnet => error.std_parseJson.nodigitsep.jsonnet} (100%)
rename test_suite/{error.std_parseInt.nodigitsep.jsonnet.golden => error.std_parseJson.nodigitsep.jsonnet.golden} (100%)
diff --git a/test_suite/error.std_parseInt.nodigitsep.jsonnet b/test_suite/error.std_parseJson.nodigitsep.jsonnet
similarity index 100%
rename from test_suite/error.std_parseInt.nodigitsep.jsonnet
rename to test_suite/error.std_parseJson.nodigitsep.jsonnet
diff --git a/test_suite/error.std_parseInt.nodigitsep.jsonnet.golden b/test_suite/error.std_parseJson.nodigitsep.jsonnet.golden
similarity index 100%
rename from test_suite/error.std_parseInt.nodigitsep.jsonnet.golden
rename to test_suite/error.std_parseJson.nodigitsep.jsonnet.golden
From b7aa8bd9000a1fbfd1cb8f66f75d4c52980efd3a Mon Sep 17 00:00:00 2001
From: David Grant
Date: Mon, 8 Jul 2024 22:24:44 -0700
Subject: [PATCH 10/10] Regenerate golden file.
---
test_suite/error.std_parseJson.nodigitsep.jsonnet.golden | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/test_suite/error.std_parseJson.nodigitsep.jsonnet.golden b/test_suite/error.std_parseJson.nodigitsep.jsonnet.golden
index 9e89e4e77..4ccf27d5f 100644
--- a/test_suite/error.std_parseJson.nodigitsep.jsonnet.golden
+++ b/test_suite/error.std_parseJson.nodigitsep.jsonnet.golden
@@ -1,2 +1,2 @@
RUNTIME ERROR: [json.exception.parse_error.101] parse error at line 1, column 4: syntax error while parsing value - invalid literal; last read: '987_'; expected end of input
- error.std_parseInt.nodigitsep.jsonnet:1:1-25
+ error.std_parseJson.nodigitsep.jsonnet:1:1-25