Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * jsonapi.c
4 : * JSON parser and lexer interfaces
5 : *
6 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : * IDENTIFICATION
10 : * src/common/jsonapi.c
11 : *
12 : *-------------------------------------------------------------------------
13 : */
14 : #ifndef FRONTEND
15 : #include "postgres.h"
16 : #else
17 : #include "postgres_fe.h"
18 : #endif
19 :
20 : #include "common/jsonapi.h"
21 : #include "mb/pg_wchar.h"
22 : #include "port/pg_lfind.h"
23 :
24 : #ifndef FRONTEND
25 : #include "miscadmin.h"
26 : #endif
27 :
28 : /*
29 : * The context of the parser is maintained by the recursive descent
30 : * mechanism, but is passed explicitly to the error reporting routine
31 : * for better diagnostics.
32 : */
33 : typedef enum /* contexts of JSON parser */
34 : {
35 : JSON_PARSE_VALUE, /* expecting a value */
36 : JSON_PARSE_STRING, /* expecting a string (for a field name) */
37 : JSON_PARSE_ARRAY_START, /* saw '[', expecting value or ']' */
38 : JSON_PARSE_ARRAY_NEXT, /* saw array element, expecting ',' or ']' */
39 : JSON_PARSE_OBJECT_START, /* saw '{', expecting label or '}' */
40 : JSON_PARSE_OBJECT_LABEL, /* saw object label, expecting ':' */
41 : JSON_PARSE_OBJECT_NEXT, /* saw object value, expecting ',' or '}' */
42 : JSON_PARSE_OBJECT_COMMA, /* saw object ',', expecting next label */
43 : JSON_PARSE_END, /* saw the end of a document, expect nothing */
44 : } JsonParseContext;
45 :
46 : /*
47 : * Setup for table-driven parser.
48 : * These enums need to be separate from the JsonTokenType and from each other
49 : * so we can have all of them on the prediction stack, which consists of
50 : * tokens, non-terminals, and semantic action markers.
51 : */
52 :
53 : typedef enum
54 : {
55 : JSON_NT_JSON = 32,
56 : JSON_NT_ARRAY_ELEMENTS,
57 : JSON_NT_MORE_ARRAY_ELEMENTS,
58 : JSON_NT_KEY_PAIRS,
59 : JSON_NT_MORE_KEY_PAIRS,
60 : } JsonNonTerminal;
61 :
62 : typedef enum
63 : {
64 : JSON_SEM_OSTART = 64,
65 : JSON_SEM_OEND,
66 : JSON_SEM_ASTART,
67 : JSON_SEM_AEND,
68 : JSON_SEM_OFIELD_INIT,
69 : JSON_SEM_OFIELD_START,
70 : JSON_SEM_OFIELD_END,
71 : JSON_SEM_AELEM_START,
72 : JSON_SEM_AELEM_END,
73 : JSON_SEM_SCALAR_INIT,
74 : JSON_SEM_SCALAR_CALL,
75 : } JsonParserSem;
76 :
77 : /*
78 : * struct containing the 3 stacks used in non-recursive parsing,
79 : * and the token and value for scalars that need to be preserved
80 : * across calls.
81 : *
82 : * typedef appears in jsonapi.h
83 : */
84 : struct JsonParserStack
85 : {
86 : int stack_size;
87 : char *prediction;
88 : int pred_index;
89 : /* these two are indexed by lex_level */
90 : char **fnames;
91 : bool *fnull;
92 : JsonTokenType scalar_tok;
93 : char *scalar_val;
94 : };
95 :
96 : /*
97 : * struct containing state used when there is a possible partial token at the
98 : * end of a json chunk when we are doing incremental parsing.
99 : *
100 : * typedef appears in jsonapi.h
101 : */
102 : struct JsonIncrementalState
103 : {
104 : bool is_last_chunk;
105 : bool partial_completed;
106 : StringInfoData partial_token;
107 : };
108 :
109 : /*
110 : * constants and macros used in the nonrecursive parser
111 : */
112 : #define JSON_NUM_TERMINALS 13
113 : #define JSON_NUM_NONTERMINALS 5
114 : #define JSON_NT_OFFSET JSON_NT_JSON
115 : /* for indexing the table */
116 : #define OFS(NT) (NT) - JSON_NT_OFFSET
117 : /* classify items we get off the stack */
118 : #define IS_SEM(x) ((x) & 0x40)
119 : #define IS_NT(x) ((x) & 0x20)
120 :
121 : /*
122 : * These productions are stored in reverse order right to left so that when
123 : * they are pushed on the stack what we expect next is at the top of the stack.
124 : */
125 : static char JSON_PROD_EPSILON[] = {0}; /* epsilon - an empty production */
126 :
127 : /* JSON -> string */
128 : static char JSON_PROD_SCALAR_STRING[] = {JSON_SEM_SCALAR_CALL, JSON_TOKEN_STRING, JSON_SEM_SCALAR_INIT, 0};
129 :
130 : /* JSON -> number */
131 : static char JSON_PROD_SCALAR_NUMBER[] = {JSON_SEM_SCALAR_CALL, JSON_TOKEN_NUMBER, JSON_SEM_SCALAR_INIT, 0};
132 :
133 : /* JSON -> 'true' */
134 : static char JSON_PROD_SCALAR_TRUE[] = {JSON_SEM_SCALAR_CALL, JSON_TOKEN_TRUE, JSON_SEM_SCALAR_INIT, 0};
135 :
136 : /* JSON -> 'false' */
137 : static char JSON_PROD_SCALAR_FALSE[] = {JSON_SEM_SCALAR_CALL, JSON_TOKEN_FALSE, JSON_SEM_SCALAR_INIT, 0};
138 :
139 : /* JSON -> 'null' */
140 : static char JSON_PROD_SCALAR_NULL[] = {JSON_SEM_SCALAR_CALL, JSON_TOKEN_NULL, JSON_SEM_SCALAR_INIT, 0};
141 :
142 : /* JSON -> '{' KEY_PAIRS '}' */
143 : static char JSON_PROD_OBJECT[] = {JSON_SEM_OEND, JSON_TOKEN_OBJECT_END, JSON_NT_KEY_PAIRS, JSON_TOKEN_OBJECT_START, JSON_SEM_OSTART, 0};
144 :
145 : /* JSON -> '[' ARRAY_ELEMENTS ']' */
146 : static char JSON_PROD_ARRAY[] = {JSON_SEM_AEND, JSON_TOKEN_ARRAY_END, JSON_NT_ARRAY_ELEMENTS, JSON_TOKEN_ARRAY_START, JSON_SEM_ASTART, 0};
147 :
148 : /* ARRAY_ELEMENTS -> JSON MORE_ARRAY_ELEMENTS */
149 : static char JSON_PROD_ARRAY_ELEMENTS[] = {JSON_NT_MORE_ARRAY_ELEMENTS, JSON_SEM_AELEM_END, JSON_NT_JSON, JSON_SEM_AELEM_START, 0};
150 :
151 : /* MORE_ARRAY_ELEMENTS -> ',' JSON MORE_ARRAY_ELEMENTS */
152 : static char JSON_PROD_MORE_ARRAY_ELEMENTS[] = {JSON_NT_MORE_ARRAY_ELEMENTS, JSON_SEM_AELEM_END, JSON_NT_JSON, JSON_SEM_AELEM_START, JSON_TOKEN_COMMA, 0};
153 :
154 : /* KEY_PAIRS -> string ':' JSON MORE_KEY_PAIRS */
155 : static char JSON_PROD_KEY_PAIRS[] = {JSON_NT_MORE_KEY_PAIRS, JSON_SEM_OFIELD_END, JSON_NT_JSON, JSON_SEM_OFIELD_START, JSON_TOKEN_COLON, JSON_TOKEN_STRING, JSON_SEM_OFIELD_INIT, 0};
156 :
157 : /* MORE_KEY_PAIRS -> ',' string ':' JSON MORE_KEY_PAIRS */
158 : static char JSON_PROD_MORE_KEY_PAIRS[] = {JSON_NT_MORE_KEY_PAIRS, JSON_SEM_OFIELD_END, JSON_NT_JSON, JSON_SEM_OFIELD_START, JSON_TOKEN_COLON, JSON_TOKEN_STRING, JSON_SEM_OFIELD_INIT, JSON_TOKEN_COMMA, 0};
159 :
160 : /*
161 : * Note: there are also epsilon productions for ARRAY_ELEMENTS,
162 : * MORE_ARRAY_ELEMENTS, KEY_PAIRS and MORE_KEY_PAIRS
163 : * They are all the same as none require any semantic actions.
164 : */
165 :
166 : /*
167 : * Table connecting the productions with their director sets of
168 : * terminal symbols.
169 : * Any combination not specified here represents an error.
170 : */
171 :
172 : typedef struct
173 : {
174 : size_t len;
175 : char *prod;
176 : } td_entry;
177 :
178 : #define TD_ENTRY(PROD) { sizeof(PROD) - 1, (PROD) }
179 :
180 : static td_entry td_parser_table[JSON_NUM_NONTERMINALS][JSON_NUM_TERMINALS] =
181 : {
182 : /* JSON */
183 : [OFS(JSON_NT_JSON)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_SCALAR_STRING),
184 : [OFS(JSON_NT_JSON)][JSON_TOKEN_NUMBER] = TD_ENTRY(JSON_PROD_SCALAR_NUMBER),
185 : [OFS(JSON_NT_JSON)][JSON_TOKEN_TRUE] = TD_ENTRY(JSON_PROD_SCALAR_TRUE),
186 : [OFS(JSON_NT_JSON)][JSON_TOKEN_FALSE] = TD_ENTRY(JSON_PROD_SCALAR_FALSE),
187 : [OFS(JSON_NT_JSON)][JSON_TOKEN_NULL] = TD_ENTRY(JSON_PROD_SCALAR_NULL),
188 : [OFS(JSON_NT_JSON)][JSON_TOKEN_ARRAY_START] = TD_ENTRY(JSON_PROD_ARRAY),
189 : [OFS(JSON_NT_JSON)][JSON_TOKEN_OBJECT_START] = TD_ENTRY(JSON_PROD_OBJECT),
190 : /* ARRAY_ELEMENTS */
191 : [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_START] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
192 : [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_OBJECT_START] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
193 : [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
194 : [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_NUMBER] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
195 : [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_TRUE] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
196 : [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_FALSE] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
197 : [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_NULL] = TD_ENTRY(JSON_PROD_ARRAY_ELEMENTS),
198 : [OFS(JSON_NT_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_END] = TD_ENTRY(JSON_PROD_EPSILON),
199 : /* MORE_ARRAY_ELEMENTS */
200 : [OFS(JSON_NT_MORE_ARRAY_ELEMENTS)][JSON_TOKEN_COMMA] = TD_ENTRY(JSON_PROD_MORE_ARRAY_ELEMENTS),
201 : [OFS(JSON_NT_MORE_ARRAY_ELEMENTS)][JSON_TOKEN_ARRAY_END] = TD_ENTRY(JSON_PROD_EPSILON),
202 : /* KEY_PAIRS */
203 : [OFS(JSON_NT_KEY_PAIRS)][JSON_TOKEN_STRING] = TD_ENTRY(JSON_PROD_KEY_PAIRS),
204 : [OFS(JSON_NT_KEY_PAIRS)][JSON_TOKEN_OBJECT_END] = TD_ENTRY(JSON_PROD_EPSILON),
205 : /* MORE_KEY_PAIRS */
206 : [OFS(JSON_NT_MORE_KEY_PAIRS)][JSON_TOKEN_COMMA] = TD_ENTRY(JSON_PROD_MORE_KEY_PAIRS),
207 : [OFS(JSON_NT_MORE_KEY_PAIRS)][JSON_TOKEN_OBJECT_END] = TD_ENTRY(JSON_PROD_EPSILON),
208 : };
209 :
210 : /* the GOAL production. Not stored in the table, but will be the initial contents of the prediction stack */
211 : static char JSON_PROD_GOAL[] = {JSON_TOKEN_END, JSON_NT_JSON, 0};
212 :
213 : static inline JsonParseErrorType json_lex_string(JsonLexContext *lex);
214 : static inline JsonParseErrorType json_lex_number(JsonLexContext *lex, char *s,
215 : bool *num_err, int *total_len);
216 : static inline JsonParseErrorType parse_scalar(JsonLexContext *lex, JsonSemAction *sem);
217 : static JsonParseErrorType parse_object_field(JsonLexContext *lex, JsonSemAction *sem);
218 : static JsonParseErrorType parse_object(JsonLexContext *lex, JsonSemAction *sem);
219 : static JsonParseErrorType parse_array_element(JsonLexContext *lex, JsonSemAction *sem);
220 : static JsonParseErrorType parse_array(JsonLexContext *lex, JsonSemAction *sem);
221 : static JsonParseErrorType report_parse_error(JsonParseContext ctx, JsonLexContext *lex);
222 :
223 : /* the null action object used for pure validation */
224 : JsonSemAction nullSemAction =
225 : {
226 : NULL, NULL, NULL, NULL, NULL,
227 : NULL, NULL, NULL, NULL, NULL
228 : };
229 :
230 : /* Parser support routines */
231 :
232 : /*
233 : * lex_peek
234 : *
235 : * what is the current look_ahead token?
236 : */
237 : static inline JsonTokenType
238 7214118 : lex_peek(JsonLexContext *lex)
239 : {
240 7214118 : return lex->token_type;
241 : }
242 :
243 : /*
244 : * lex_expect
245 : *
246 : * move the lexer to the next token if the current look_ahead token matches
247 : * the parameter token. Otherwise, report an error.
248 : */
249 : static inline JsonParseErrorType
250 358506 : lex_expect(JsonParseContext ctx, JsonLexContext *lex, JsonTokenType token)
251 : {
252 358506 : if (lex_peek(lex) == token)
253 358398 : return json_lex(lex);
254 : else
255 108 : return report_parse_error(ctx, lex);
256 : }
257 :
258 : /* chars to consider as part of an alphanumeric token */
259 : #define JSON_ALPHANUMERIC_CHAR(c) \
260 : (((c) >= 'a' && (c) <= 'z') || \
261 : ((c) >= 'A' && (c) <= 'Z') || \
262 : ((c) >= '0' && (c) <= '9') || \
263 : (c) == '_' || \
264 : IS_HIGHBIT_SET(c))
265 :
266 : /*
267 : * Utility function to check if a string is a valid JSON number.
268 : *
269 : * str is of length len, and need not be null-terminated.
270 : */
271 : bool
272 46 : IsValidJsonNumber(const char *str, int len)
273 : {
274 : bool numeric_error;
275 : int total_len;
276 : JsonLexContext dummy_lex;
277 :
278 46 : if (len <= 0)
279 0 : return false;
280 :
281 46 : dummy_lex.incremental = false;
282 46 : dummy_lex.inc_state = NULL;
283 46 : dummy_lex.pstack = NULL;
284 :
285 : /*
286 : * json_lex_number expects a leading '-' to have been eaten already.
287 : *
288 : * having to cast away the constness of str is ugly, but there's not much
289 : * easy alternative.
290 : */
291 46 : if (*str == '-')
292 : {
293 4 : dummy_lex.input = unconstify(char *, str) + 1;
294 4 : dummy_lex.input_length = len - 1;
295 : }
296 : else
297 : {
298 42 : dummy_lex.input = unconstify(char *, str);
299 42 : dummy_lex.input_length = len;
300 : }
301 :
302 46 : dummy_lex.token_start = dummy_lex.input;
303 :
304 46 : json_lex_number(&dummy_lex, dummy_lex.input, &numeric_error, &total_len);
305 :
306 46 : return (!numeric_error) && (total_len == dummy_lex.input_length);
307 : }
308 :
309 : /*
310 : * makeJsonLexContextCstringLen
311 : * Initialize the given JsonLexContext object, or create one
312 : *
313 : * If a valid 'lex' pointer is given, it is initialized. This can
314 : * be used for stack-allocated structs, saving overhead. If NULL is
315 : * given, a new struct is allocated.
316 : *
317 : * If need_escapes is true, ->strval stores the unescaped lexemes.
318 : * Unescaping is expensive, so only request it when necessary.
319 : *
320 : * If need_escapes is true or lex was given as NULL, then caller is
321 : * responsible for freeing the returned struct, either by calling
322 : * freeJsonLexContext() or (in backend environment) via memory context
323 : * cleanup.
324 : */
325 : JsonLexContext *
326 36158 : makeJsonLexContextCstringLen(JsonLexContext *lex, char *json,
327 : int len, int encoding, bool need_escapes)
328 : {
329 36158 : if (lex == NULL)
330 : {
331 5538 : lex = palloc0(sizeof(JsonLexContext));
332 5538 : lex->flags |= JSONLEX_FREE_STRUCT;
333 : }
334 : else
335 30620 : memset(lex, 0, sizeof(JsonLexContext));
336 :
337 36158 : lex->errormsg = NULL;
338 36158 : lex->input = lex->token_terminator = lex->line_start = json;
339 36158 : lex->line_number = 1;
340 36158 : lex->input_length = len;
341 36158 : lex->input_encoding = encoding;
342 36158 : if (need_escapes)
343 : {
344 28262 : lex->strval = makeStringInfo();
345 28262 : lex->flags |= JSONLEX_FREE_STRVAL;
346 : }
347 :
348 36158 : return lex;
349 : }
350 :
351 :
352 : /*
353 : * makeJsonLexContextIncremental
354 : *
355 : * Similar to above but set up for use in incremental parsing. That means we
356 : * need explicit stacks for predictions, field names and null indicators, but
357 : * we don't need the input, that will be handed in bit by bit to the
358 : * parse routine. We also need an accumulator for partial tokens in case
359 : * the boundary between chunks happens to fall in the middle of a token.
360 : */
361 : #define JS_STACK_CHUNK_SIZE 64
362 : #define JS_MAX_PROD_LEN 10 /* more than we need */
363 : #define JSON_TD_MAX_STACK 6400 /* hard coded for now - this is a REALLY high
364 : * number */
365 :
366 : JsonLexContext *
367 1152 : makeJsonLexContextIncremental(JsonLexContext *lex, int encoding,
368 : bool need_escapes)
369 : {
370 1152 : if (lex == NULL)
371 : {
372 2 : lex = palloc0(sizeof(JsonLexContext));
373 2 : lex->flags |= JSONLEX_FREE_STRUCT;
374 : }
375 : else
376 1150 : memset(lex, 0, sizeof(JsonLexContext));
377 :
378 1152 : lex->line_number = 1;
379 1152 : lex->input_encoding = encoding;
380 1152 : lex->incremental = true;
381 1152 : lex->inc_state = palloc0(sizeof(JsonIncrementalState));
382 1152 : initStringInfo(&(lex->inc_state->partial_token));
383 1152 : lex->pstack = palloc(sizeof(JsonParserStack));
384 1152 : lex->pstack->stack_size = JS_STACK_CHUNK_SIZE;
385 1152 : lex->pstack->prediction = palloc(JS_STACK_CHUNK_SIZE * JS_MAX_PROD_LEN);
386 1152 : lex->pstack->pred_index = 0;
387 1152 : lex->pstack->fnames = palloc(JS_STACK_CHUNK_SIZE * sizeof(char *));
388 1152 : lex->pstack->fnull = palloc(JS_STACK_CHUNK_SIZE * sizeof(bool));
389 1152 : if (need_escapes)
390 : {
391 178 : lex->strval = makeStringInfo();
392 178 : lex->flags |= JSONLEX_FREE_STRVAL;
393 : }
394 1152 : return lex;
395 : }
396 :
397 : static inline void
398 1408556 : inc_lex_level(JsonLexContext *lex)
399 : {
400 1408556 : lex->lex_level += 1;
401 :
402 1408556 : if (lex->incremental && lex->lex_level >= lex->pstack->stack_size)
403 : {
404 19200 : lex->pstack->stack_size += JS_STACK_CHUNK_SIZE;
405 38400 : lex->pstack->prediction =
406 19200 : repalloc(lex->pstack->prediction,
407 19200 : lex->pstack->stack_size * JS_MAX_PROD_LEN);
408 19200 : if (lex->pstack->fnames)
409 19200 : lex->pstack->fnames =
410 19200 : repalloc(lex->pstack->fnames,
411 19200 : lex->pstack->stack_size * sizeof(char *));
412 19200 : if (lex->pstack->fnull)
413 19200 : lex->pstack->fnull =
414 19200 : repalloc(lex->pstack->fnull, lex->pstack->stack_size * sizeof(bool));
415 : }
416 1408556 : }
417 :
418 : static inline void
419 589104 : dec_lex_level(JsonLexContext *lex)
420 : {
421 589104 : lex->lex_level -= 1;
422 589104 : }
423 :
424 : static inline void
425 5165240 : push_prediction(JsonParserStack *pstack, td_entry entry)
426 : {
427 5165240 : memcpy(pstack->prediction + pstack->pred_index, entry.prod, entry.len);
428 5165240 : pstack->pred_index += entry.len;
429 5165240 : }
430 :
431 : static inline char
432 19078912 : pop_prediction(JsonParserStack *pstack)
433 : {
434 : Assert(pstack->pred_index > 0);
435 19078912 : return pstack->prediction[--pstack->pred_index];
436 : }
437 :
438 : static inline char
439 38 : next_prediction(JsonParserStack *pstack)
440 : {
441 : Assert(pstack->pred_index > 0);
442 38 : return pstack->prediction[pstack->pred_index - 1];
443 : }
444 :
445 : static inline bool
446 19230406 : have_prediction(JsonParserStack *pstack)
447 : {
448 19230406 : return pstack->pred_index > 0;
449 : }
450 :
451 : static inline void
452 882122 : set_fname(JsonLexContext *lex, char *fname)
453 : {
454 882122 : lex->pstack->fnames[lex->lex_level] = fname;
455 882122 : }
456 :
457 : static inline char *
458 862244 : get_fname(JsonLexContext *lex)
459 : {
460 862244 : return lex->pstack->fnames[lex->lex_level];
461 : }
462 :
463 : static inline void
464 2286980 : set_fnull(JsonLexContext *lex, bool fnull)
465 : {
466 2286980 : lex->pstack->fnull[lex->lex_level] = fnull;
467 2286980 : }
468 :
469 : static inline bool
470 372 : get_fnull(JsonLexContext *lex)
471 : {
472 372 : return lex->pstack->fnull[lex->lex_level];
473 : }
474 :
475 : /*
476 : * Free memory in a JsonLexContext.
477 : *
478 : * There's no need for this if a *lex pointer was given when the object was
479 : * made, need_escapes was false, and json_errdetail() was not called; or if (in
480 : * backend environment) a memory context delete/reset is imminent.
481 : */
482 : void
483 6468 : freeJsonLexContext(JsonLexContext *lex)
484 : {
485 6468 : if (lex->flags & JSONLEX_FREE_STRVAL)
486 6074 : destroyStringInfo(lex->strval);
487 :
488 6468 : if (lex->errormsg)
489 0 : destroyStringInfo(lex->errormsg);
490 :
491 6468 : if (lex->incremental)
492 : {
493 174 : pfree(lex->inc_state->partial_token.data);
494 174 : pfree(lex->inc_state);
495 174 : pfree(lex->pstack->prediction);
496 174 : pfree(lex->pstack->fnames);
497 174 : pfree(lex->pstack->fnull);
498 174 : pfree(lex->pstack);
499 : }
500 :
501 6468 : if (lex->flags & JSONLEX_FREE_STRUCT)
502 5318 : pfree(lex);
503 6468 : }
504 :
505 : /*
506 : * pg_parse_json
507 : *
508 : * Publicly visible entry point for the JSON parser.
509 : *
510 : * lex is a lexing context, set up for the json to be processed by calling
511 : * makeJsonLexContext(). sem is a structure of function pointers to semantic
512 : * action routines to be called at appropriate spots during parsing, and a
513 : * pointer to a state object to be passed to those routines.
514 : *
515 : * If FORCE_JSON_PSTACK is defined then the routine will call the non-recursive
516 : * JSON parser. This is a useful way to validate that it's doing the right
517 : * think at least for non-incremental cases. If this is on we expect to see
518 : * regression diffs relating to error messages about stack depth, but no
519 : * other differences.
520 : */
521 : JsonParseErrorType
522 35420 : pg_parse_json(JsonLexContext *lex, JsonSemAction *sem)
523 : {
524 : #ifdef FORCE_JSON_PSTACK
525 :
526 : lex->incremental = true;
527 : lex->inc_state = palloc0(sizeof(JsonIncrementalState));
528 :
529 : /*
530 : * We don't need partial token processing, there is only one chunk. But we
531 : * still need to init the partial token string so that freeJsonLexContext
532 : * works.
533 : */
534 : initStringInfo(&(lex->inc_state->partial_token));
535 : lex->pstack = palloc(sizeof(JsonParserStack));
536 : lex->pstack->stack_size = JS_STACK_CHUNK_SIZE;
537 : lex->pstack->prediction = palloc(JS_STACK_CHUNK_SIZE * JS_MAX_PROD_LEN);
538 : lex->pstack->pred_index = 0;
539 : lex->pstack->fnames = palloc(JS_STACK_CHUNK_SIZE * sizeof(char *));
540 : lex->pstack->fnull = palloc(JS_STACK_CHUNK_SIZE * sizeof(bool));
541 :
542 : return pg_parse_json_incremental(lex, sem, lex->input, lex->input_length, true);
543 :
544 : #else
545 :
546 : JsonTokenType tok;
547 : JsonParseErrorType result;
548 :
549 35420 : if (lex->incremental)
550 0 : return JSON_INVALID_LEXER_TYPE;
551 :
552 : /* get the initial token */
553 35420 : result = json_lex(lex);
554 35420 : if (result != JSON_SUCCESS)
555 246 : return result;
556 :
557 35174 : tok = lex_peek(lex);
558 :
559 : /* parse by recursive descent */
560 35174 : switch (tok)
561 : {
562 19978 : case JSON_TOKEN_OBJECT_START:
563 19978 : result = parse_object(lex, sem);
564 19910 : break;
565 6342 : case JSON_TOKEN_ARRAY_START:
566 6342 : result = parse_array(lex, sem);
567 6238 : break;
568 8854 : default:
569 8854 : result = parse_scalar(lex, sem); /* json can be a bare scalar */
570 : }
571 :
572 34930 : if (result == JSON_SUCCESS)
573 34488 : result = lex_expect(JSON_PARSE_END, lex, JSON_TOKEN_END);
574 :
575 34930 : return result;
576 : #endif
577 : }
578 :
579 : /*
580 : * json_count_array_elements
581 : *
582 : * Returns number of array elements in lex context at start of array token
583 : * until end of array token at same nesting level.
584 : *
585 : * Designed to be called from array_start routines.
586 : */
587 : JsonParseErrorType
588 6 : json_count_array_elements(JsonLexContext *lex, int *elements)
589 : {
590 : JsonLexContext copylex;
591 : int count;
592 : JsonParseErrorType result;
593 :
594 : /*
595 : * It's safe to do this with a shallow copy because the lexical routines
596 : * don't scribble on the input. They do scribble on the other pointers
597 : * etc, so doing this with a copy makes that safe.
598 : */
599 6 : memcpy(©lex, lex, sizeof(JsonLexContext));
600 6 : copylex.strval = NULL; /* not interested in values here */
601 6 : copylex.lex_level++;
602 :
603 6 : count = 0;
604 6 : result = lex_expect(JSON_PARSE_ARRAY_START, ©lex,
605 : JSON_TOKEN_ARRAY_START);
606 6 : if (result != JSON_SUCCESS)
607 0 : return result;
608 6 : if (lex_peek(©lex) != JSON_TOKEN_ARRAY_END)
609 : {
610 : while (1)
611 : {
612 48 : count++;
613 48 : result = parse_array_element(©lex, &nullSemAction);
614 48 : if (result != JSON_SUCCESS)
615 0 : return result;
616 48 : if (copylex.token_type != JSON_TOKEN_COMMA)
617 6 : break;
618 42 : result = json_lex(©lex);
619 42 : if (result != JSON_SUCCESS)
620 0 : return result;
621 : }
622 : }
623 6 : result = lex_expect(JSON_PARSE_ARRAY_NEXT, ©lex,
624 : JSON_TOKEN_ARRAY_END);
625 6 : if (result != JSON_SUCCESS)
626 0 : return result;
627 :
628 6 : *elements = count;
629 6 : return JSON_SUCCESS;
630 : }
631 :
632 : /*
633 : * pg_parse_json_incremental
634 : *
635 : * Routine for incremental parsing of json. This uses the non-recursive top
636 : * down method of the Dragon Book Algorithm 4.3. It's somewhat slower than
637 : * the Recursive Descent pattern used above, so we only use it for incremental
638 : * parsing of JSON.
639 : *
640 : * The lexing context needs to be set up by a call to
641 : * makeJsonLexContextIncremental(). sem is a structure of function pointers
642 : * to semantic action routines, which should function exactly as those used
643 : * in the recursive descent parser.
644 : *
645 : * This routine can be called repeatedly with chunks of JSON. On the final
646 : * chunk is_last must be set to true. len is the length of the json chunk,
647 : * which does not need to be null terminated.
648 : */
649 : JsonParseErrorType
650 186694 : pg_parse_json_incremental(JsonLexContext *lex,
651 : JsonSemAction *sem,
652 : char *json,
653 : int len,
654 : bool is_last)
655 : {
656 : JsonTokenType tok;
657 : JsonParseErrorType result;
658 186694 : JsonParseContext ctx = JSON_PARSE_VALUE;
659 186694 : JsonParserStack *pstack = lex->pstack;
660 :
661 :
662 186694 : if (!lex->incremental)
663 0 : return JSON_INVALID_LEXER_TYPE;
664 :
665 186694 : lex->input = lex->token_terminator = lex->line_start = json;
666 186694 : lex->input_length = len;
667 186694 : lex->inc_state->is_last_chunk = is_last;
668 :
669 : /* get the initial token */
670 186694 : result = json_lex(lex);
671 186694 : if (result != JSON_SUCCESS)
672 35878 : return result;
673 :
674 150816 : tok = lex_peek(lex);
675 :
676 : /* use prediction stack for incremental parsing */
677 :
678 150816 : if (!have_prediction(pstack))
679 : {
680 1066 : td_entry goal = TD_ENTRY(JSON_PROD_GOAL);
681 :
682 1066 : push_prediction(pstack, goal);
683 : }
684 :
685 19079590 : while (have_prediction(pstack))
686 : {
687 19078912 : char top = pop_prediction(pstack);
688 : td_entry entry;
689 :
690 : /*
691 : * these first two branches are the guts of the Table Driven method
692 : */
693 19078912 : if (top == tok)
694 : {
695 : /*
696 : * tok can only be a terminal symbol, so top must be too. the
697 : * token matches the top of the stack, so get the next token.
698 : */
699 5521182 : if (tok < JSON_TOKEN_END)
700 : {
701 5520504 : result = json_lex(lex);
702 5520504 : if (result != JSON_SUCCESS)
703 150136 : return result;
704 5370718 : tok = lex_peek(lex);
705 : }
706 : }
707 13557730 : else if (IS_NT(top) && (entry = td_parser_table[OFS(top)][tok]).prod != NULL)
708 : {
709 : /*
710 : * the token is in the director set for a production of the
711 : * non-terminal at the top of the stack, so push the reversed RHS
712 : * of the production onto the stack.
713 : */
714 5164174 : push_prediction(pstack, entry);
715 : }
716 8393556 : else if (IS_SEM(top))
717 : {
718 : /*
719 : * top is a semantic action marker, so take action accordingly.
720 : * It's important to have these markers in the prediction stack
721 : * before any token they might need so we don't advance the token
722 : * prematurely. Note in a couple of cases we need to do something
723 : * both before and after the token.
724 : */
725 8393334 : switch (top)
726 : {
727 177816 : case JSON_SEM_OSTART:
728 : {
729 177816 : json_struct_action ostart = sem->object_start;
730 :
731 177816 : if (lex->lex_level >= JSON_TD_MAX_STACK)
732 0 : return JSON_NESTING_TOO_DEEP;
733 :
734 177816 : if (ostart != NULL)
735 : {
736 172478 : result = (*ostart) (sem->semstate);
737 172478 : if (result != JSON_SUCCESS)
738 0 : return result;
739 : }
740 177816 : inc_lex_level(lex);
741 : }
742 177816 : break;
743 177632 : case JSON_SEM_OEND:
744 : {
745 177632 : json_struct_action oend = sem->object_end;
746 :
747 177632 : dec_lex_level(lex);
748 177632 : if (oend != NULL)
749 : {
750 172476 : result = (*oend) (sem->semstate);
751 172476 : if (result != JSON_SUCCESS)
752 0 : return result;
753 : }
754 : }
755 177632 : break;
756 1230868 : case JSON_SEM_ASTART:
757 : {
758 1230868 : json_struct_action astart = sem->array_start;
759 :
760 1230868 : if (lex->lex_level >= JSON_TD_MAX_STACK)
761 128 : return JSON_NESTING_TOO_DEEP;
762 :
763 1230740 : if (astart != NULL)
764 : {
765 370 : result = (*astart) (sem->semstate);
766 370 : if (result != JSON_SUCCESS)
767 0 : return result;
768 : }
769 1230740 : inc_lex_level(lex);
770 : }
771 1230740 : break;
772 411472 : case JSON_SEM_AEND:
773 : {
774 411472 : json_struct_action aend = sem->array_end;
775 :
776 411472 : dec_lex_level(lex);
777 411472 : if (aend != NULL)
778 : {
779 370 : result = (*aend) (sem->semstate);
780 370 : if (result != JSON_SUCCESS)
781 0 : return result;
782 : }
783 : }
784 411472 : break;
785 882122 : case JSON_SEM_OFIELD_INIT:
786 : {
787 : /*
788 : * all we do here is save out the field name. We have
789 : * to wait to get past the ':' to see if the next
790 : * value is null so we can call the semantic routine
791 : */
792 882122 : char *fname = NULL;
793 882122 : json_ofield_action ostart = sem->object_field_start;
794 882122 : json_ofield_action oend = sem->object_field_end;
795 :
796 882122 : if ((ostart != NULL || oend != NULL) && lex->strval != NULL)
797 : {
798 861932 : fname = pstrdup(lex->strval->data);
799 : }
800 882122 : set_fname(lex, fname);
801 : }
802 882122 : break;
803 882058 : case JSON_SEM_OFIELD_START:
804 : {
805 : /*
806 : * the current token should be the first token of the
807 : * value
808 : */
809 882058 : bool isnull = tok == JSON_TOKEN_NULL;
810 882058 : json_ofield_action ostart = sem->object_field_start;
811 :
812 882058 : set_fnull(lex, isnull);
813 :
814 882058 : if (ostart != NULL)
815 : {
816 861932 : char *fname = get_fname(lex);
817 :
818 861932 : result = (*ostart) (sem->semstate, fname, isnull);
819 861932 : if (result != JSON_SUCCESS)
820 0 : return result;
821 : }
822 : }
823 882058 : break;
824 882038 : case JSON_SEM_OFIELD_END:
825 : {
826 882038 : json_ofield_action oend = sem->object_field_end;
827 :
828 882038 : if (oend != NULL)
829 : {
830 312 : char *fname = get_fname(lex);
831 312 : bool isnull = get_fnull(lex);
832 :
833 312 : result = (*oend) (sem->semstate, fname, isnull);
834 312 : if (result != JSON_SUCCESS)
835 0 : return result;
836 : }
837 : }
838 882038 : break;
839 1404922 : case JSON_SEM_AELEM_START:
840 : {
841 1404922 : json_aelem_action astart = sem->array_element_start;
842 1404922 : bool isnull = tok == JSON_TOKEN_NULL;
843 :
844 1404922 : set_fnull(lex, isnull);
845 :
846 1404922 : if (astart != NULL)
847 : {
848 60 : result = (*astart) (sem->semstate, isnull);
849 60 : if (result != JSON_SUCCESS)
850 0 : return result;
851 : }
852 : }
853 1404922 : break;
854 585722 : case JSON_SEM_AELEM_END:
855 : {
856 585722 : json_aelem_action aend = sem->array_element_end;
857 :
858 585722 : if (aend != NULL)
859 : {
860 60 : bool isnull = get_fnull(lex);
861 :
862 60 : result = (*aend) (sem->semstate, isnull);
863 60 : if (result != JSON_SUCCESS)
864 0 : return result;
865 : }
866 : }
867 585722 : break;
868 879342 : case JSON_SEM_SCALAR_INIT:
869 : {
870 879342 : json_scalar_action sfunc = sem->scalar;
871 :
872 879342 : pstack->scalar_val = NULL;
873 :
874 879342 : if (sfunc != NULL)
875 : {
876 : /*
877 : * extract the de-escaped string value, or the raw
878 : * lexeme
879 : */
880 : /*
881 : * XXX copied from RD parser but looks like a
882 : * buglet
883 : */
884 861544 : if (tok == JSON_TOKEN_STRING)
885 : {
886 688910 : if (lex->strval != NULL)
887 688910 : pstack->scalar_val = pstrdup(lex->strval->data);
888 : }
889 : else
890 : {
891 172634 : int tlen = (lex->token_terminator - lex->token_start);
892 :
893 172634 : pstack->scalar_val = palloc(tlen + 1);
894 172634 : memcpy(pstack->scalar_val, lex->token_start, tlen);
895 172634 : pstack->scalar_val[tlen] = '\0';
896 : }
897 861544 : pstack->scalar_tok = tok;
898 : }
899 : }
900 879342 : break;
901 879342 : case JSON_SEM_SCALAR_CALL:
902 : {
903 : /*
904 : * We'd like to be able to get rid of this business of
905 : * two bits of scalar action, but we can't. It breaks
906 : * certain semantic actions which expect that when
907 : * called the lexer has consumed the item. See for
908 : * example get_scalar() in jsonfuncs.c.
909 : */
910 879342 : json_scalar_action sfunc = sem->scalar;
911 :
912 879342 : if (sfunc != NULL)
913 : {
914 861544 : result = (*sfunc) (sem->semstate, pstack->scalar_val, pstack->scalar_tok);
915 861542 : if (result != JSON_SUCCESS)
916 0 : return result;
917 : }
918 : }
919 879340 : break;
920 0 : default:
921 : /* should not happen */
922 0 : break;
923 : }
924 : }
925 : else
926 : {
927 : /*
928 : * The token didn't match the stack top if it's a terminal nor a
929 : * production for the stack top if it's a non-terminal.
930 : *
931 : * Various cases here are Asserted to be not possible, as the
932 : * token would not appear at the top of the prediction stack
933 : * unless the lookahead matched.
934 : */
935 222 : switch (top)
936 : {
937 38 : case JSON_TOKEN_STRING:
938 38 : if (next_prediction(pstack) == JSON_TOKEN_COLON)
939 38 : ctx = JSON_PARSE_STRING;
940 : else
941 : {
942 : Assert(false);
943 0 : ctx = JSON_PARSE_VALUE;
944 : }
945 38 : break;
946 0 : case JSON_TOKEN_NUMBER:
947 : case JSON_TOKEN_TRUE:
948 : case JSON_TOKEN_FALSE:
949 : case JSON_TOKEN_NULL:
950 : case JSON_TOKEN_ARRAY_START:
951 : case JSON_TOKEN_OBJECT_START:
952 : Assert(false);
953 0 : ctx = JSON_PARSE_VALUE;
954 0 : break;
955 0 : case JSON_TOKEN_ARRAY_END:
956 : Assert(false);
957 0 : ctx = JSON_PARSE_ARRAY_NEXT;
958 0 : break;
959 0 : case JSON_TOKEN_OBJECT_END:
960 : Assert(false);
961 0 : ctx = JSON_PARSE_OBJECT_NEXT;
962 0 : break;
963 0 : case JSON_TOKEN_COMMA:
964 : Assert(false);
965 0 : if (next_prediction(pstack) == JSON_TOKEN_STRING)
966 0 : ctx = JSON_PARSE_OBJECT_NEXT;
967 : else
968 0 : ctx = JSON_PARSE_ARRAY_NEXT;
969 0 : break;
970 26 : case JSON_TOKEN_COLON:
971 26 : ctx = JSON_PARSE_OBJECT_LABEL;
972 26 : break;
973 6 : case JSON_TOKEN_END:
974 6 : ctx = JSON_PARSE_END;
975 6 : break;
976 18 : case JSON_NT_MORE_ARRAY_ELEMENTS:
977 18 : ctx = JSON_PARSE_ARRAY_NEXT;
978 18 : break;
979 14 : case JSON_NT_ARRAY_ELEMENTS:
980 14 : ctx = JSON_PARSE_ARRAY_START;
981 14 : break;
982 70 : case JSON_NT_MORE_KEY_PAIRS:
983 70 : ctx = JSON_PARSE_OBJECT_NEXT;
984 70 : break;
985 30 : case JSON_NT_KEY_PAIRS:
986 30 : ctx = JSON_PARSE_OBJECT_START;
987 30 : break;
988 20 : default:
989 20 : ctx = JSON_PARSE_VALUE;
990 : }
991 222 : return report_parse_error(ctx, lex);
992 : }
993 : }
994 :
995 678 : return JSON_SUCCESS;
996 : }
997 :
998 : /*
999 : * Recursive Descent parse routines. There is one for each structural
1000 : * element in a json document:
1001 : * - scalar (string, number, true, false, null)
1002 : * - array ( [ ] )
1003 : * - array element
1004 : * - object ( { } )
1005 : * - object field
1006 : */
1007 : static inline JsonParseErrorType
1008 231702 : parse_scalar(JsonLexContext *lex, JsonSemAction *sem)
1009 : {
1010 231702 : char *val = NULL;
1011 231702 : json_scalar_action sfunc = sem->scalar;
1012 231702 : JsonTokenType tok = lex_peek(lex);
1013 : JsonParseErrorType result;
1014 :
1015 : /* a scalar must be a string, a number, true, false, or null */
1016 231702 : if (tok != JSON_TOKEN_STRING && tok != JSON_TOKEN_NUMBER &&
1017 24216 : tok != JSON_TOKEN_TRUE && tok != JSON_TOKEN_FALSE &&
1018 : tok != JSON_TOKEN_NULL)
1019 188 : return report_parse_error(JSON_PARSE_VALUE, lex);
1020 :
1021 : /* if no semantic function, just consume the token */
1022 231514 : if (sfunc == NULL)
1023 11114 : return json_lex(lex);
1024 :
1025 : /* extract the de-escaped string value, or the raw lexeme */
1026 220400 : if (lex_peek(lex) == JSON_TOKEN_STRING)
1027 : {
1028 71102 : if (lex->strval != NULL)
1029 65294 : val = pstrdup(lex->strval->data);
1030 : }
1031 : else
1032 : {
1033 149298 : int len = (lex->token_terminator - lex->token_start);
1034 :
1035 149298 : val = palloc(len + 1);
1036 149298 : memcpy(val, lex->token_start, len);
1037 149298 : val[len] = '\0';
1038 : }
1039 :
1040 : /* consume the token */
1041 220400 : result = json_lex(lex);
1042 220400 : if (result != JSON_SUCCESS)
1043 0 : return result;
1044 :
1045 : /* invoke the callback */
1046 220400 : result = (*sfunc) (sem->semstate, val, tok);
1047 :
1048 220304 : return result;
1049 : }
1050 :
1051 : static JsonParseErrorType
1052 226254 : parse_object_field(JsonLexContext *lex, JsonSemAction *sem)
1053 : {
1054 : /*
1055 : * An object field is "fieldname" : value where value can be a scalar,
1056 : * object or array. Note: in user-facing docs and error messages, we
1057 : * generally call a field name a "key".
1058 : */
1059 :
1060 226254 : char *fname = NULL; /* keep compiler quiet */
1061 226254 : json_ofield_action ostart = sem->object_field_start;
1062 226254 : json_ofield_action oend = sem->object_field_end;
1063 : bool isnull;
1064 : JsonTokenType tok;
1065 : JsonParseErrorType result;
1066 :
1067 226254 : if (lex_peek(lex) != JSON_TOKEN_STRING)
1068 12 : return report_parse_error(JSON_PARSE_STRING, lex);
1069 226242 : if ((ostart != NULL || oend != NULL) && lex->strval != NULL)
1070 180138 : fname = pstrdup(lex->strval->data);
1071 226242 : result = json_lex(lex);
1072 226242 : if (result != JSON_SUCCESS)
1073 12 : return result;
1074 :
1075 226230 : result = lex_expect(JSON_PARSE_OBJECT_LABEL, lex, JSON_TOKEN_COLON);
1076 226230 : if (result != JSON_SUCCESS)
1077 90 : return result;
1078 :
1079 226140 : tok = lex_peek(lex);
1080 226140 : isnull = tok == JSON_TOKEN_NULL;
1081 :
1082 226140 : if (ostart != NULL)
1083 : {
1084 180054 : result = (*ostart) (sem->semstate, fname, isnull);
1085 180046 : if (result != JSON_SUCCESS)
1086 0 : return result;
1087 : }
1088 :
1089 226132 : switch (tok)
1090 : {
1091 11844 : case JSON_TOKEN_OBJECT_START:
1092 11844 : result = parse_object(lex, sem);
1093 4012 : break;
1094 14974 : case JSON_TOKEN_ARRAY_START:
1095 14974 : result = parse_array(lex, sem);
1096 14940 : break;
1097 199314 : default:
1098 199314 : result = parse_scalar(lex, sem);
1099 : }
1100 218260 : if (result != JSON_SUCCESS)
1101 42 : return result;
1102 :
1103 218218 : if (oend != NULL)
1104 : {
1105 113758 : result = (*oend) (sem->semstate, fname, isnull);
1106 113758 : if (result != JSON_SUCCESS)
1107 0 : return result;
1108 : }
1109 :
1110 218218 : return JSON_SUCCESS;
1111 : }
1112 :
1113 : static JsonParseErrorType
1114 48072 : parse_object(JsonLexContext *lex, JsonSemAction *sem)
1115 : {
1116 : /*
1117 : * an object is a possibly empty sequence of object fields, separated by
1118 : * commas and surrounded by curly braces.
1119 : */
1120 48072 : json_struct_action ostart = sem->object_start;
1121 48072 : json_struct_action oend = sem->object_end;
1122 : JsonTokenType tok;
1123 : JsonParseErrorType result;
1124 :
1125 : #ifndef FRONTEND
1126 44098 : check_stack_depth();
1127 : #endif
1128 :
1129 48060 : if (ostart != NULL)
1130 : {
1131 28328 : result = (*ostart) (sem->semstate);
1132 28308 : if (result != JSON_SUCCESS)
1133 0 : return result;
1134 : }
1135 :
1136 : /*
1137 : * Data inside an object is at a higher nesting level than the object
1138 : * itself. Note that we increment this after we call the semantic routine
1139 : * for the object start and restore it before we call the routine for the
1140 : * object end.
1141 : */
1142 48040 : lex->lex_level++;
1143 :
1144 : Assert(lex_peek(lex) == JSON_TOKEN_OBJECT_START);
1145 48040 : result = json_lex(lex);
1146 48040 : if (result != JSON_SUCCESS)
1147 60 : return result;
1148 :
1149 47980 : tok = lex_peek(lex);
1150 47980 : switch (tok)
1151 : {
1152 45218 : case JSON_TOKEN_STRING:
1153 45218 : result = parse_object_field(lex, sem);
1154 218374 : while (result == JSON_SUCCESS && lex_peek(lex) == JSON_TOKEN_COMMA)
1155 : {
1156 181036 : result = json_lex(lex);
1157 181036 : if (result != JSON_SUCCESS)
1158 0 : break;
1159 181036 : result = parse_object_field(lex, sem);
1160 : }
1161 37338 : break;
1162 2748 : case JSON_TOKEN_OBJECT_END:
1163 2748 : break;
1164 14 : default:
1165 : /* case of an invalid initial token inside the object */
1166 14 : result = report_parse_error(JSON_PARSE_OBJECT_START, lex);
1167 : }
1168 40100 : if (result != JSON_SUCCESS)
1169 170 : return result;
1170 :
1171 39930 : result = lex_expect(JSON_PARSE_OBJECT_NEXT, lex, JSON_TOKEN_OBJECT_END);
1172 39930 : if (result != JSON_SUCCESS)
1173 36 : return result;
1174 :
1175 39894 : lex->lex_level--;
1176 :
1177 39894 : if (oend != NULL)
1178 : {
1179 21866 : result = (*oend) (sem->semstate);
1180 21820 : if (result != JSON_SUCCESS)
1181 0 : return result;
1182 : }
1183 :
1184 39848 : return JSON_SUCCESS;
1185 : }
1186 :
1187 : static JsonParseErrorType
1188 51912 : parse_array_element(JsonLexContext *lex, JsonSemAction *sem)
1189 : {
1190 51912 : json_aelem_action astart = sem->array_element_start;
1191 51912 : json_aelem_action aend = sem->array_element_end;
1192 51912 : JsonTokenType tok = lex_peek(lex);
1193 : JsonParseErrorType result;
1194 : bool isnull;
1195 :
1196 51912 : isnull = tok == JSON_TOKEN_NULL;
1197 :
1198 51912 : if (astart != NULL)
1199 : {
1200 7780 : result = (*astart) (sem->semstate, isnull);
1201 7780 : if (result != JSON_SUCCESS)
1202 0 : return result;
1203 : }
1204 :
1205 : /* an array element is any object, array or scalar */
1206 51912 : switch (tok)
1207 : {
1208 16250 : case JSON_TOKEN_OBJECT_START:
1209 16250 : result = parse_object(lex, sem);
1210 16192 : break;
1211 12128 : case JSON_TOKEN_ARRAY_START:
1212 12128 : result = parse_array(lex, sem);
1213 3292 : break;
1214 23534 : default:
1215 23534 : result = parse_scalar(lex, sem);
1216 : }
1217 :
1218 43000 : if (result != JSON_SUCCESS)
1219 66 : return result;
1220 :
1221 42934 : if (aend != NULL)
1222 : {
1223 7270 : result = (*aend) (sem->semstate, isnull);
1224 7258 : if (result != JSON_SUCCESS)
1225 0 : return result;
1226 : }
1227 :
1228 42922 : return JSON_SUCCESS;
1229 : }
1230 :
1231 : static JsonParseErrorType
1232 33444 : parse_array(JsonLexContext *lex, JsonSemAction *sem)
1233 : {
1234 : /*
1235 : * an array is a possibly empty sequence of array elements, separated by
1236 : * commas and surrounded by square brackets.
1237 : */
1238 33444 : json_struct_action astart = sem->array_start;
1239 33444 : json_struct_action aend = sem->array_end;
1240 : JsonParseErrorType result;
1241 :
1242 : #ifndef FRONTEND
1243 33392 : check_stack_depth();
1244 : #endif
1245 :
1246 33432 : if (astart != NULL)
1247 : {
1248 15224 : result = (*astart) (sem->semstate);
1249 15210 : if (result != JSON_SUCCESS)
1250 0 : return result;
1251 : }
1252 :
1253 : /*
1254 : * Data inside an array is at a higher nesting level than the array
1255 : * itself. Note that we increment this after we call the semantic routine
1256 : * for the array start and restore it before we call the routine for the
1257 : * array end.
1258 : */
1259 33418 : lex->lex_level++;
1260 :
1261 33418 : result = lex_expect(JSON_PARSE_ARRAY_START, lex, JSON_TOKEN_ARRAY_START);
1262 33418 : if (result == JSON_SUCCESS && lex_peek(lex) != JSON_TOKEN_ARRAY_END)
1263 : {
1264 26284 : result = parse_array_element(lex, sem);
1265 :
1266 42940 : while (result == JSON_SUCCESS && lex_peek(lex) == JSON_TOKEN_COMMA)
1267 : {
1268 25580 : result = json_lex(lex);
1269 25580 : if (result != JSON_SUCCESS)
1270 0 : break;
1271 25580 : result = parse_array_element(lex, sem);
1272 : }
1273 : }
1274 24494 : if (result != JSON_SUCCESS)
1275 66 : return result;
1276 :
1277 24428 : result = lex_expect(JSON_PARSE_ARRAY_NEXT, lex, JSON_TOKEN_ARRAY_END);
1278 24428 : if (result != JSON_SUCCESS)
1279 24 : return result;
1280 :
1281 24404 : lex->lex_level--;
1282 :
1283 24404 : if (aend != NULL)
1284 : {
1285 8588 : result = (*aend) (sem->semstate);
1286 8564 : if (result != JSON_SUCCESS)
1287 0 : return result;
1288 : }
1289 :
1290 24380 : return JSON_SUCCESS;
1291 : }
1292 :
1293 : /*
1294 : * Lex one token from the input stream.
1295 : *
1296 : * When doing incremental parsing, we can reach the end of the input string
1297 : * without having (or knowing we have) a complete token. If it's not the
1298 : * final chunk of input, the partial token is then saved to the lex
1299 : * structure's ptok StringInfo. On subsequent calls input is appended to this
1300 : * buffer until we have something that we think is a complete token,
1301 : * which is then lexed using a recursive call to json_lex. Processing then
1302 : * continues as normal on subsequent calls.
1303 : *
1304 : * Note than when doing incremental processing, the lex.prev_token_terminator
1305 : * should not be relied on. It could point into a previous input chunk or
1306 : * worse.
1307 : */
1308 : JsonParseErrorType
1309 6831622 : json_lex(JsonLexContext *lex)
1310 : {
1311 : char *s;
1312 6831622 : char *const end = lex->input + lex->input_length;
1313 : JsonParseErrorType result;
1314 :
1315 6831622 : if (lex->incremental && lex->inc_state->partial_completed)
1316 : {
1317 : /*
1318 : * We just lexed a completed partial token on the last call, so reset
1319 : * everything
1320 : */
1321 17266 : resetStringInfo(&(lex->inc_state->partial_token));
1322 17266 : lex->token_terminator = lex->input;
1323 17266 : lex->inc_state->partial_completed = false;
1324 : }
1325 :
1326 6831622 : s = lex->token_terminator;
1327 :
1328 6831622 : if (lex->incremental && lex->inc_state->partial_token.len)
1329 : {
1330 : /*
1331 : * We have a partial token. Extend it and if completed lex it by a
1332 : * recursive call
1333 : */
1334 41646 : StringInfo ptok = &(lex->inc_state->partial_token);
1335 41646 : int added = 0;
1336 41646 : bool tok_done = false;
1337 : JsonLexContext dummy_lex;
1338 : JsonParseErrorType partial_result;
1339 :
1340 41646 : if (ptok->data[0] == '"')
1341 : {
1342 : /*
1343 : * It's a string. Accumulate characters until we reach an
1344 : * unescaped '"'.
1345 : */
1346 40084 : int escapes = 0;
1347 :
1348 40722 : for (int i = ptok->len - 1; i > 0; i--)
1349 : {
1350 : /* count the trailing backslashes on the partial token */
1351 37838 : if (ptok->data[i] == '\\')
1352 638 : escapes++;
1353 : else
1354 37200 : break;
1355 : }
1356 :
1357 303798 : for (int i = 0; i < lex->input_length; i++)
1358 : {
1359 280018 : char c = lex->input[i];
1360 :
1361 280018 : appendStringInfoCharMacro(ptok, c);
1362 280018 : added++;
1363 280018 : if (c == '"' && escapes % 2 == 0)
1364 : {
1365 16304 : tok_done = true;
1366 16304 : break;
1367 : }
1368 263714 : if (c == '\\')
1369 1022 : escapes++;
1370 : else
1371 262692 : escapes = 0;
1372 : }
1373 : }
1374 : else
1375 : {
1376 : /* not a string */
1377 1562 : char c = ptok->data[0];
1378 :
1379 1562 : if (c == '-' || (c >= '0' && c <= '9'))
1380 : {
1381 : /* for numbers look for possible numeric continuations */
1382 :
1383 328 : bool numend = false;
1384 :
1385 920 : for (int i = 0; i < lex->input_length && !numend; i++)
1386 : {
1387 592 : char cc = lex->input[i];
1388 :
1389 592 : switch (cc)
1390 : {
1391 398 : case '+':
1392 : case '-':
1393 : case 'e':
1394 : case 'E':
1395 : case '0':
1396 : case '1':
1397 : case '2':
1398 : case '3':
1399 : case '4':
1400 : case '5':
1401 : case '6':
1402 : case '7':
1403 : case '8':
1404 : case '9':
1405 : {
1406 398 : appendStringInfoCharMacro(ptok, cc);
1407 398 : added++;
1408 : }
1409 398 : break;
1410 194 : default:
1411 194 : numend = true;
1412 : }
1413 : }
1414 : }
1415 :
1416 : /*
1417 : * Add any remaining alphanumeric chars. This takes care of the
1418 : * {null, false, true} literals as well as any trailing
1419 : * alphanumeric junk on non-string tokens.
1420 : */
1421 3224 : for (int i = added; i < lex->input_length; i++)
1422 : {
1423 2734 : char cc = lex->input[i];
1424 :
1425 2734 : if (JSON_ALPHANUMERIC_CHAR(cc))
1426 : {
1427 1662 : appendStringInfoCharMacro(ptok, cc);
1428 1662 : added++;
1429 : }
1430 : else
1431 : {
1432 1072 : tok_done = true;
1433 1072 : break;
1434 : }
1435 : }
1436 1562 : if (added == lex->input_length &&
1437 490 : lex->inc_state->is_last_chunk)
1438 : {
1439 38 : tok_done = true;
1440 : }
1441 : }
1442 :
1443 41646 : if (!tok_done)
1444 : {
1445 : /* We should have consumed the whole chunk in this case. */
1446 : Assert(added == lex->input_length);
1447 :
1448 24232 : if (!lex->inc_state->is_last_chunk)
1449 24216 : return JSON_INCOMPLETE;
1450 :
1451 : /* json_errdetail() needs access to the accumulated token. */
1452 16 : lex->token_start = ptok->data;
1453 16 : lex->token_terminator = ptok->data + ptok->len;
1454 16 : return JSON_INVALID_TOKEN;
1455 : }
1456 :
1457 : /*
1458 : * Everything up to lex->input[added] has been added to the partial
1459 : * token, so move the input past it.
1460 : */
1461 17414 : lex->input += added;
1462 17414 : lex->input_length -= added;
1463 :
1464 17414 : dummy_lex.input = dummy_lex.token_terminator =
1465 17414 : dummy_lex.line_start = ptok->data;
1466 17414 : dummy_lex.line_number = lex->line_number;
1467 17414 : dummy_lex.input_length = ptok->len;
1468 17414 : dummy_lex.input_encoding = lex->input_encoding;
1469 17414 : dummy_lex.incremental = false;
1470 17414 : dummy_lex.strval = lex->strval;
1471 :
1472 17414 : partial_result = json_lex(&dummy_lex);
1473 :
1474 : /*
1475 : * We either have a complete token or an error. In either case we need
1476 : * to point to the partial token data for the semantic or error
1477 : * routines. If it's not an error we'll readjust on the next call to
1478 : * json_lex.
1479 : */
1480 17414 : lex->token_type = dummy_lex.token_type;
1481 17414 : lex->line_number = dummy_lex.line_number;
1482 :
1483 : /*
1484 : * We know the prev_token_terminator must be back in some previous
1485 : * piece of input, so we just make it NULL.
1486 : */
1487 17414 : lex->prev_token_terminator = NULL;
1488 :
1489 : /*
1490 : * Normally token_start would be ptok->data, but it could be later,
1491 : * see json_lex_string's handling of invalid escapes.
1492 : */
1493 17414 : lex->token_start = dummy_lex.token_start;
1494 17414 : lex->token_terminator = dummy_lex.token_terminator;
1495 17414 : if (partial_result == JSON_SUCCESS)
1496 : {
1497 : /* make sure we've used all the input */
1498 17360 : if (lex->token_terminator - lex->token_start != ptok->len)
1499 : {
1500 : Assert(false);
1501 0 : return JSON_INVALID_TOKEN;
1502 : }
1503 :
1504 17360 : lex->inc_state->partial_completed = true;
1505 : }
1506 17414 : return partial_result;
1507 : /* end of partial token processing */
1508 : }
1509 :
1510 : /* Skip leading whitespace. */
1511 12381912 : while (s < end && (*s == ' ' || *s == '\t' || *s == '\n' || *s == '\r'))
1512 : {
1513 5591936 : if (*s++ == '\n')
1514 : {
1515 367358 : ++lex->line_number;
1516 367358 : lex->line_start = s;
1517 : }
1518 : }
1519 6789976 : lex->token_start = s;
1520 :
1521 : /* Determine token type. */
1522 6789976 : if (s >= end)
1523 : {
1524 213772 : lex->token_start = NULL;
1525 213772 : lex->prev_token_terminator = lex->token_terminator;
1526 213772 : lex->token_terminator = s;
1527 213772 : lex->token_type = JSON_TOKEN_END;
1528 : }
1529 : else
1530 : {
1531 6576204 : switch (*s)
1532 : {
1533 : /* Single-character token, some kind of punctuation mark. */
1534 226210 : case '{':
1535 226210 : lex->prev_token_terminator = lex->token_terminator;
1536 226210 : lex->token_terminator = s + 1;
1537 226210 : lex->token_type = JSON_TOKEN_OBJECT_START;
1538 226210 : break;
1539 217594 : case '}':
1540 217594 : lex->prev_token_terminator = lex->token_terminator;
1541 217594 : lex->token_terminator = s + 1;
1542 217594 : lex->token_type = JSON_TOKEN_OBJECT_END;
1543 217594 : break;
1544 1264450 : case '[':
1545 1264450 : lex->prev_token_terminator = lex->token_terminator;
1546 1264450 : lex->token_terminator = s + 1;
1547 1264450 : lex->token_type = JSON_TOKEN_ARRAY_START;
1548 1264450 : break;
1549 436032 : case ']':
1550 436032 : lex->prev_token_terminator = lex->token_terminator;
1551 436032 : lex->token_terminator = s + 1;
1552 436032 : lex->token_type = JSON_TOKEN_ARRAY_END;
1553 436032 : break;
1554 1086074 : case ',':
1555 1086074 : lex->prev_token_terminator = lex->token_terminator;
1556 1086074 : lex->token_terminator = s + 1;
1557 1086074 : lex->token_type = JSON_TOKEN_COMMA;
1558 1086074 : break;
1559 1108288 : case ':':
1560 1108288 : lex->prev_token_terminator = lex->token_terminator;
1561 1108288 : lex->token_terminator = s + 1;
1562 1108288 : lex->token_type = JSON_TOKEN_COLON;
1563 1108288 : break;
1564 1901894 : case '"':
1565 : /* string */
1566 1901894 : result = json_lex_string(lex);
1567 1901894 : if (result != JSON_SUCCESS)
1568 16520 : return result;
1569 1885374 : lex->token_type = JSON_TOKEN_STRING;
1570 1885374 : break;
1571 184 : case '-':
1572 : /* Negative number. */
1573 184 : result = json_lex_number(lex, s + 1, NULL, NULL);
1574 184 : if (result != JSON_SUCCESS)
1575 0 : return result;
1576 184 : lex->token_type = JSON_TOKEN_NUMBER;
1577 184 : break;
1578 298776 : case '0':
1579 : case '1':
1580 : case '2':
1581 : case '3':
1582 : case '4':
1583 : case '5':
1584 : case '6':
1585 : case '7':
1586 : case '8':
1587 : case '9':
1588 : /* Positive number. */
1589 298776 : result = json_lex_number(lex, s, NULL, NULL);
1590 298776 : if (result != JSON_SUCCESS)
1591 262 : return result;
1592 298514 : lex->token_type = JSON_TOKEN_NUMBER;
1593 298514 : break;
1594 36702 : default:
1595 : {
1596 : char *p;
1597 :
1598 : /*
1599 : * We're not dealing with a string, number, legal
1600 : * punctuation mark, or end of string. The only legal
1601 : * tokens we might find here are true, false, and null,
1602 : * but for error reporting purposes we scan until we see a
1603 : * non-alphanumeric character. That way, we can report
1604 : * the whole word as an unexpected token, rather than just
1605 : * some unintuitive prefix thereof.
1606 : */
1607 201338 : for (p = s; p < end && JSON_ALPHANUMERIC_CHAR(*p); p++)
1608 : /* skip */ ;
1609 :
1610 : /*
1611 : * We got some sort of unexpected punctuation or an
1612 : * otherwise unexpected character, so just complain about
1613 : * that one character.
1614 : */
1615 36702 : if (p == s)
1616 : {
1617 38 : lex->prev_token_terminator = lex->token_terminator;
1618 38 : lex->token_terminator = s + 1;
1619 38 : return JSON_INVALID_TOKEN;
1620 : }
1621 :
1622 36664 : if (lex->incremental && !lex->inc_state->is_last_chunk &&
1623 3916 : p == lex->input + lex->input_length)
1624 : {
1625 916 : appendBinaryStringInfo(
1626 916 : &(lex->inc_state->partial_token), s, end - s);
1627 916 : return JSON_INCOMPLETE;
1628 : }
1629 :
1630 : /*
1631 : * We've got a real alphanumeric token here. If it
1632 : * happens to be true, false, or null, all is well. If
1633 : * not, error out.
1634 : */
1635 35748 : lex->prev_token_terminator = lex->token_terminator;
1636 35748 : lex->token_terminator = p;
1637 35748 : if (p - s == 4)
1638 : {
1639 15902 : if (memcmp(s, "true", 4) == 0)
1640 7496 : lex->token_type = JSON_TOKEN_TRUE;
1641 8406 : else if (memcmp(s, "null", 4) == 0)
1642 8394 : lex->token_type = JSON_TOKEN_NULL;
1643 : else
1644 12 : return JSON_INVALID_TOKEN;
1645 : }
1646 19846 : else if (p - s == 5 && memcmp(s, "false", 5) == 0)
1647 19656 : lex->token_type = JSON_TOKEN_FALSE;
1648 : else
1649 190 : return JSON_INVALID_TOKEN;
1650 : }
1651 : } /* end of switch */
1652 : }
1653 :
1654 6772038 : if (lex->incremental && lex->token_type == JSON_TOKEN_END && !lex->inc_state->is_last_chunk)
1655 143896 : return JSON_INCOMPLETE;
1656 : else
1657 6628142 : return JSON_SUCCESS;
1658 : }
1659 :
1660 : /*
1661 : * The next token in the input stream is known to be a string; lex it.
1662 : *
1663 : * If lex->strval isn't NULL, fill it with the decoded string.
1664 : * Set lex->token_terminator to the end of the decoded input, and in
1665 : * success cases, transfer its previous value to lex->prev_token_terminator.
1666 : * Return JSON_SUCCESS or an error code.
1667 : *
1668 : * Note: be careful that all error exits advance lex->token_terminator
1669 : * to the point after the character we detected the error on.
1670 : */
1671 : static inline JsonParseErrorType
1672 1901894 : json_lex_string(JsonLexContext *lex)
1673 : {
1674 : char *s;
1675 1901894 : char *const end = lex->input + lex->input_length;
1676 1901894 : int hi_surrogate = -1;
1677 :
1678 : /* Convenience macros for error exits */
1679 : #define FAIL_OR_INCOMPLETE_AT_CHAR_START(code) \
1680 : do { \
1681 : if (lex->incremental && !lex->inc_state->is_last_chunk) \
1682 : { \
1683 : appendBinaryStringInfo(&lex->inc_state->partial_token, \
1684 : lex->token_start, end - lex->token_start); \
1685 : return JSON_INCOMPLETE; \
1686 : } \
1687 : lex->token_terminator = s; \
1688 : return code; \
1689 : } while (0)
1690 : #define FAIL_AT_CHAR_END(code) \
1691 : do { \
1692 : lex->token_terminator = \
1693 : s + pg_encoding_mblen_bounded(lex->input_encoding, s); \
1694 : return code; \
1695 : } while (0)
1696 :
1697 1901894 : if (lex->strval != NULL)
1698 1804922 : resetStringInfo(lex->strval);
1699 :
1700 : Assert(lex->input_length > 0);
1701 1901894 : s = lex->token_start;
1702 : for (;;)
1703 : {
1704 3804492 : s++;
1705 : /* Premature end of the string. */
1706 3804492 : if (s >= end)
1707 16224 : FAIL_OR_INCOMPLETE_AT_CHAR_START(JSON_INVALID_TOKEN);
1708 3788268 : else if (*s == '"')
1709 1885374 : break;
1710 1902894 : else if (*s == '\\')
1711 : {
1712 : /* OK, we have an escape character. */
1713 3178 : s++;
1714 3178 : if (s >= end)
1715 46 : FAIL_OR_INCOMPLETE_AT_CHAR_START(JSON_INVALID_TOKEN);
1716 3132 : else if (*s == 'u')
1717 : {
1718 : int i;
1719 1232 : int ch = 0;
1720 :
1721 5912 : for (i = 1; i <= 4; i++)
1722 : {
1723 4780 : s++;
1724 4780 : if (s >= end)
1725 64 : FAIL_OR_INCOMPLETE_AT_CHAR_START(JSON_INVALID_TOKEN);
1726 4716 : else if (*s >= '0' && *s <= '9')
1727 2942 : ch = (ch * 16) + (*s - '0');
1728 1774 : else if (*s >= 'a' && *s <= 'f')
1729 1714 : ch = (ch * 16) + (*s - 'a') + 10;
1730 60 : else if (*s >= 'A' && *s <= 'F')
1731 24 : ch = (ch * 16) + (*s - 'A') + 10;
1732 : else
1733 36 : FAIL_AT_CHAR_END(JSON_UNICODE_ESCAPE_FORMAT);
1734 : }
1735 1132 : if (lex->strval != NULL)
1736 : {
1737 : /*
1738 : * Combine surrogate pairs.
1739 : */
1740 218 : if (is_utf16_surrogate_first(ch))
1741 : {
1742 72 : if (hi_surrogate != -1)
1743 12 : FAIL_AT_CHAR_END(JSON_UNICODE_HIGH_SURROGATE);
1744 60 : hi_surrogate = ch;
1745 60 : continue;
1746 : }
1747 146 : else if (is_utf16_surrogate_second(ch))
1748 : {
1749 60 : if (hi_surrogate == -1)
1750 24 : FAIL_AT_CHAR_END(JSON_UNICODE_LOW_SURROGATE);
1751 36 : ch = surrogate_pair_to_codepoint(hi_surrogate, ch);
1752 36 : hi_surrogate = -1;
1753 : }
1754 :
1755 122 : if (hi_surrogate != -1)
1756 0 : FAIL_AT_CHAR_END(JSON_UNICODE_LOW_SURROGATE);
1757 :
1758 : /*
1759 : * Reject invalid cases. We can't have a value above
1760 : * 0xFFFF here (since we only accepted 4 hex digits
1761 : * above), so no need to test for out-of-range chars.
1762 : */
1763 122 : if (ch == 0)
1764 : {
1765 : /* We can't allow this, since our TEXT type doesn't */
1766 24 : FAIL_AT_CHAR_END(JSON_UNICODE_CODE_POINT_ZERO);
1767 : }
1768 :
1769 : /*
1770 : * Add the represented character to lex->strval. In the
1771 : * backend, we can let pg_unicode_to_server_noerror()
1772 : * handle any required character set conversion; in
1773 : * frontend, we can only deal with trivial conversions.
1774 : */
1775 : #ifndef FRONTEND
1776 : {
1777 : char cbuf[MAX_UNICODE_EQUIVALENT_STRING + 1];
1778 :
1779 84 : if (!pg_unicode_to_server_noerror(ch, (unsigned char *) cbuf))
1780 0 : FAIL_AT_CHAR_END(JSON_UNICODE_UNTRANSLATABLE);
1781 84 : appendStringInfoString(lex->strval, cbuf);
1782 : }
1783 : #else
1784 14 : if (lex->input_encoding == PG_UTF8)
1785 : {
1786 : /* OK, we can map the code point to UTF8 easily */
1787 : char utf8str[5];
1788 : int utf8len;
1789 :
1790 14 : unicode_to_utf8(ch, (unsigned char *) utf8str);
1791 14 : utf8len = pg_utf_mblen((unsigned char *) utf8str);
1792 14 : appendBinaryStringInfo(lex->strval, utf8str, utf8len);
1793 : }
1794 0 : else if (ch <= 0x007f)
1795 : {
1796 : /* The ASCII range is the same in all encodings */
1797 0 : appendStringInfoChar(lex->strval, (char) ch);
1798 : }
1799 : else
1800 0 : FAIL_AT_CHAR_END(JSON_UNICODE_HIGH_ESCAPE);
1801 : #endif /* FRONTEND */
1802 : }
1803 : }
1804 1900 : else if (lex->strval != NULL)
1805 : {
1806 396 : if (hi_surrogate != -1)
1807 0 : FAIL_AT_CHAR_END(JSON_UNICODE_LOW_SURROGATE);
1808 :
1809 396 : switch (*s)
1810 : {
1811 290 : case '"':
1812 : case '\\':
1813 : case '/':
1814 290 : appendStringInfoChar(lex->strval, *s);
1815 290 : break;
1816 38 : case 'b':
1817 38 : appendStringInfoChar(lex->strval, '\b');
1818 38 : break;
1819 2 : case 'f':
1820 2 : appendStringInfoChar(lex->strval, '\f');
1821 2 : break;
1822 56 : case 'n':
1823 56 : appendStringInfoChar(lex->strval, '\n');
1824 56 : break;
1825 2 : case 'r':
1826 2 : appendStringInfoChar(lex->strval, '\r');
1827 2 : break;
1828 2 : case 't':
1829 2 : appendStringInfoChar(lex->strval, '\t');
1830 2 : break;
1831 6 : default:
1832 :
1833 : /*
1834 : * Not a valid string escape, so signal error. We
1835 : * adjust token_start so that just the escape sequence
1836 : * is reported, not the whole string.
1837 : */
1838 6 : lex->token_start = s;
1839 6 : FAIL_AT_CHAR_END(JSON_ESCAPING_INVALID);
1840 : }
1841 : }
1842 1504 : else if (strchr("\"\\/bfnrt", *s) == NULL)
1843 : {
1844 : /*
1845 : * Simpler processing if we're not bothered about de-escaping
1846 : *
1847 : * It's very tempting to remove the strchr() call here and
1848 : * replace it with a switch statement, but testing so far has
1849 : * shown it's not a performance win.
1850 : */
1851 34 : lex->token_start = s;
1852 34 : FAIL_AT_CHAR_END(JSON_ESCAPING_INVALID);
1853 : }
1854 : }
1855 : else
1856 : {
1857 1899716 : char *p = s;
1858 :
1859 1899716 : if (hi_surrogate != -1)
1860 12 : FAIL_AT_CHAR_END(JSON_UNICODE_LOW_SURROGATE);
1861 :
1862 : /*
1863 : * Skip to the first byte that requires special handling, so we
1864 : * can batch calls to appendBinaryStringInfo.
1865 : */
1866 2385184 : while (p < end - sizeof(Vector8) &&
1867 2316538 : !pg_lfind8('\\', (uint8 *) p, sizeof(Vector8)) &&
1868 2315260 : !pg_lfind8('"', (uint8 *) p, sizeof(Vector8)) &&
1869 485480 : !pg_lfind8_le(31, (uint8 *) p, sizeof(Vector8)))
1870 485480 : p += sizeof(Vector8);
1871 :
1872 14945728 : for (; p < end; p++)
1873 : {
1874 14932344 : if (*p == '\\' || *p == '"')
1875 : break;
1876 13046062 : else if ((unsigned char) *p <= 31)
1877 : {
1878 : /* Per RFC4627, these characters MUST be escaped. */
1879 : /*
1880 : * Since *p isn't printable, exclude it from the context
1881 : * string
1882 : */
1883 38 : lex->token_terminator = p;
1884 38 : return JSON_ESCAPING_REQUIRED;
1885 : }
1886 : }
1887 :
1888 1899666 : if (lex->strval != NULL)
1889 1804978 : appendBinaryStringInfo(lex->strval, s, p - s);
1890 :
1891 : /*
1892 : * s will be incremented at the top of the loop, so set it to just
1893 : * behind our lookahead position
1894 : */
1895 1899666 : s = p - 1;
1896 : }
1897 : }
1898 :
1899 1885374 : if (hi_surrogate != -1)
1900 : {
1901 0 : lex->token_terminator = s + 1;
1902 0 : return JSON_UNICODE_LOW_SURROGATE;
1903 : }
1904 :
1905 : /* Hooray, we found the end of the string! */
1906 1885374 : lex->prev_token_terminator = lex->token_terminator;
1907 1885374 : lex->token_terminator = s + 1;
1908 1885374 : return JSON_SUCCESS;
1909 :
1910 : #undef FAIL_OR_INCOMPLETE_AT_CHAR_START
1911 : #undef FAIL_AT_CHAR_END
1912 : }
1913 :
1914 : /*
1915 : * The next token in the input stream is known to be a number; lex it.
1916 : *
1917 : * In JSON, a number consists of four parts:
1918 : *
1919 : * (1) An optional minus sign ('-').
1920 : *
1921 : * (2) Either a single '0', or a string of one or more digits that does not
1922 : * begin with a '0'.
1923 : *
1924 : * (3) An optional decimal part, consisting of a period ('.') followed by
1925 : * one or more digits. (Note: While this part can be omitted
1926 : * completely, it's not OK to have only the decimal point without
1927 : * any digits afterwards.)
1928 : *
1929 : * (4) An optional exponent part, consisting of 'e' or 'E', optionally
1930 : * followed by '+' or '-', followed by one or more digits. (Note:
1931 : * As with the decimal part, if 'e' or 'E' is present, it must be
1932 : * followed by at least one digit.)
1933 : *
1934 : * The 's' argument to this function points to the ostensible beginning
1935 : * of part 2 - i.e. the character after any optional minus sign, or the
1936 : * first character of the string if there is none.
1937 : *
1938 : * If num_err is not NULL, we return an error flag to *num_err rather than
1939 : * raising an error for a badly-formed number. Also, if total_len is not NULL
1940 : * the distance from lex->input to the token end+1 is returned to *total_len.
1941 : */
1942 : static inline JsonParseErrorType
1943 299006 : json_lex_number(JsonLexContext *lex, char *s,
1944 : bool *num_err, int *total_len)
1945 : {
1946 299006 : bool error = false;
1947 299006 : int len = s - lex->input;
1948 :
1949 : /* Part (1): leading sign indicator. */
1950 : /* Caller already did this for us; so do nothing. */
1951 :
1952 : /* Part (2): parse main digit string. */
1953 299006 : if (len < lex->input_length && *s == '0')
1954 : {
1955 46700 : s++;
1956 46700 : len++;
1957 : }
1958 252306 : else if (len < lex->input_length && *s >= '1' && *s <= '9')
1959 : {
1960 : do
1961 : {
1962 841704 : s++;
1963 841704 : len++;
1964 841704 : } while (len < lex->input_length && *s >= '0' && *s <= '9');
1965 : }
1966 : else
1967 2 : error = true;
1968 :
1969 : /* Part (3): parse optional decimal portion. */
1970 299006 : if (len < lex->input_length && *s == '.')
1971 : {
1972 37558 : s++;
1973 37558 : len++;
1974 37558 : if (len == lex->input_length || *s < '0' || *s > '9')
1975 12 : error = true;
1976 : else
1977 : {
1978 : do
1979 : {
1980 92408 : s++;
1981 92408 : len++;
1982 92408 : } while (len < lex->input_length && *s >= '0' && *s <= '9');
1983 : }
1984 : }
1985 :
1986 : /* Part (4): parse optional exponent. */
1987 299006 : if (len < lex->input_length && (*s == 'e' || *s == 'E'))
1988 : {
1989 94 : s++;
1990 94 : len++;
1991 94 : if (len < lex->input_length && (*s == '+' || *s == '-'))
1992 : {
1993 10 : s++;
1994 10 : len++;
1995 : }
1996 94 : if (len == lex->input_length || *s < '0' || *s > '9')
1997 12 : error = true;
1998 : else
1999 : {
2000 : do
2001 : {
2002 284 : s++;
2003 284 : len++;
2004 284 : } while (len < lex->input_length && *s >= '0' && *s <= '9');
2005 : }
2006 : }
2007 :
2008 : /*
2009 : * Check for trailing garbage. As in json_lex(), any alphanumeric stuff
2010 : * here should be considered part of the token for error-reporting
2011 : * purposes.
2012 : */
2013 299210 : for (; len < lex->input_length && JSON_ALPHANUMERIC_CHAR(*s); s++, len++)
2014 204 : error = true;
2015 :
2016 299006 : if (total_len != NULL)
2017 46 : *total_len = len;
2018 :
2019 299006 : if (lex->incremental && !lex->inc_state->is_last_chunk &&
2020 91954 : len >= lex->input_length)
2021 : {
2022 194 : appendBinaryStringInfo(&lex->inc_state->partial_token,
2023 194 : lex->token_start, s - lex->token_start);
2024 194 : if (num_err != NULL)
2025 0 : *num_err = error;
2026 :
2027 194 : return JSON_INCOMPLETE;
2028 : }
2029 298812 : else if (num_err != NULL)
2030 : {
2031 : /* let the caller handle any error */
2032 46 : *num_err = error;
2033 : }
2034 : else
2035 : {
2036 : /* return token endpoint */
2037 298766 : lex->prev_token_terminator = lex->token_terminator;
2038 298766 : lex->token_terminator = s;
2039 : /* handle error if any */
2040 298766 : if (error)
2041 68 : return JSON_INVALID_TOKEN;
2042 : }
2043 :
2044 298744 : return JSON_SUCCESS;
2045 : }
2046 :
2047 : /*
2048 : * Report a parse error.
2049 : *
2050 : * lex->token_start and lex->token_terminator must identify the current token.
2051 : */
2052 : static JsonParseErrorType
2053 544 : report_parse_error(JsonParseContext ctx, JsonLexContext *lex)
2054 : {
2055 : /* Handle case where the input ended prematurely. */
2056 544 : if (lex->token_start == NULL || lex->token_type == JSON_TOKEN_END)
2057 170 : return JSON_EXPECTED_MORE;
2058 :
2059 : /* Otherwise choose the error type based on the parsing context. */
2060 374 : switch (ctx)
2061 : {
2062 30 : case JSON_PARSE_END:
2063 30 : return JSON_EXPECTED_END;
2064 120 : case JSON_PARSE_VALUE:
2065 120 : return JSON_EXPECTED_JSON;
2066 50 : case JSON_PARSE_STRING:
2067 50 : return JSON_EXPECTED_STRING;
2068 14 : case JSON_PARSE_ARRAY_START:
2069 14 : return JSON_EXPECTED_ARRAY_FIRST;
2070 18 : case JSON_PARSE_ARRAY_NEXT:
2071 18 : return JSON_EXPECTED_ARRAY_NEXT;
2072 40 : case JSON_PARSE_OBJECT_START:
2073 40 : return JSON_EXPECTED_OBJECT_FIRST;
2074 50 : case JSON_PARSE_OBJECT_LABEL:
2075 50 : return JSON_EXPECTED_COLON;
2076 52 : case JSON_PARSE_OBJECT_NEXT:
2077 52 : return JSON_EXPECTED_OBJECT_NEXT;
2078 0 : case JSON_PARSE_OBJECT_COMMA:
2079 0 : return JSON_EXPECTED_STRING;
2080 : }
2081 :
2082 : /*
2083 : * We don't use a default: case, so that the compiler will warn about
2084 : * unhandled enum values.
2085 : */
2086 : Assert(false);
2087 0 : return JSON_SUCCESS; /* silence stupider compilers */
2088 : }
2089 :
2090 : /*
2091 : * Construct an (already translated) detail message for a JSON error.
2092 : *
2093 : * The returned pointer should not be freed, the allocation is either static
2094 : * or owned by the JsonLexContext.
2095 : */
2096 : char *
2097 922 : json_errdetail(JsonParseErrorType error, JsonLexContext *lex)
2098 : {
2099 922 : if (lex->errormsg)
2100 0 : resetStringInfo(lex->errormsg);
2101 : else
2102 922 : lex->errormsg = makeStringInfo();
2103 :
2104 : /*
2105 : * A helper for error messages that should print the current token. The
2106 : * format must contain exactly one %.*s specifier.
2107 : */
2108 : #define token_error(lex, format) \
2109 : appendStringInfo((lex)->errormsg, _(format), \
2110 : (int) ((lex)->token_terminator - (lex)->token_start), \
2111 : (lex)->token_start);
2112 :
2113 922 : switch (error)
2114 : {
2115 0 : case JSON_INCOMPLETE:
2116 : case JSON_SUCCESS:
2117 : /* fall through to the error code after switch */
2118 0 : break;
2119 0 : case JSON_INVALID_LEXER_TYPE:
2120 0 : if (lex->incremental)
2121 0 : return _("Recursive descent parser cannot use incremental lexer");
2122 : else
2123 0 : return _("Incremental parser requires incremental lexer");
2124 128 : case JSON_NESTING_TOO_DEEP:
2125 128 : return (_("JSON nested too deep, maximum permitted depth is 6400"));
2126 40 : case JSON_ESCAPING_INVALID:
2127 40 : token_error(lex, "Escape sequence \"\\%.*s\" is invalid.");
2128 40 : break;
2129 38 : case JSON_ESCAPING_REQUIRED:
2130 38 : appendStringInfo(lex->errormsg,
2131 38 : _("Character with value 0x%02x must be escaped."),
2132 38 : (unsigned char) *(lex->token_terminator));
2133 38 : break;
2134 30 : case JSON_EXPECTED_END:
2135 30 : token_error(lex, "Expected end of input, but found \"%.*s\".");
2136 30 : break;
2137 14 : case JSON_EXPECTED_ARRAY_FIRST:
2138 14 : token_error(lex, "Expected array element or \"]\", but found \"%.*s\".");
2139 14 : break;
2140 18 : case JSON_EXPECTED_ARRAY_NEXT:
2141 18 : token_error(lex, "Expected \",\" or \"]\", but found \"%.*s\".");
2142 18 : break;
2143 50 : case JSON_EXPECTED_COLON:
2144 50 : token_error(lex, "Expected \":\", but found \"%.*s\".");
2145 50 : break;
2146 66 : case JSON_EXPECTED_JSON:
2147 66 : token_error(lex, "Expected JSON value, but found \"%.*s\".");
2148 66 : break;
2149 94 : case JSON_EXPECTED_MORE:
2150 94 : return _("The input string ended unexpectedly.");
2151 40 : case JSON_EXPECTED_OBJECT_FIRST:
2152 40 : token_error(lex, "Expected string or \"}\", but found \"%.*s\".");
2153 40 : break;
2154 52 : case JSON_EXPECTED_OBJECT_NEXT:
2155 52 : token_error(lex, "Expected \",\" or \"}\", but found \"%.*s\".");
2156 52 : break;
2157 50 : case JSON_EXPECTED_STRING:
2158 50 : token_error(lex, "Expected string, but found \"%.*s\".");
2159 50 : break;
2160 194 : case JSON_INVALID_TOKEN:
2161 194 : token_error(lex, "Token \"%.*s\" is invalid.");
2162 194 : break;
2163 24 : case JSON_UNICODE_CODE_POINT_ZERO:
2164 24 : return _("\\u0000 cannot be converted to text.");
2165 36 : case JSON_UNICODE_ESCAPE_FORMAT:
2166 36 : return _("\"\\u\" must be followed by four hexadecimal digits.");
2167 0 : case JSON_UNICODE_HIGH_ESCAPE:
2168 : /* note: this case is only reachable in frontend not backend */
2169 0 : return _("Unicode escape values cannot be used for code point values above 007F when the encoding is not UTF8.");
2170 0 : case JSON_UNICODE_UNTRANSLATABLE:
2171 :
2172 : /*
2173 : * Note: this case is only reachable in backend and not frontend.
2174 : * #ifdef it away so the frontend doesn't try to link against
2175 : * backend functionality.
2176 : */
2177 : #ifndef FRONTEND
2178 0 : return psprintf(_("Unicode escape value could not be translated to the server's encoding %s."),
2179 : GetDatabaseEncodingName());
2180 : #else
2181 : Assert(false);
2182 0 : break;
2183 : #endif
2184 12 : case JSON_UNICODE_HIGH_SURROGATE:
2185 12 : return _("Unicode high surrogate must not follow a high surrogate.");
2186 36 : case JSON_UNICODE_LOW_SURROGATE:
2187 36 : return _("Unicode low surrogate must follow a high surrogate.");
2188 0 : case JSON_SEM_ACTION_FAILED:
2189 : /* fall through to the error code after switch */
2190 0 : break;
2191 : }
2192 : #undef token_error
2193 :
2194 : /*
2195 : * We don't use a default: case, so that the compiler will warn about
2196 : * unhandled enum values. But this needs to be here anyway to cover the
2197 : * possibility of an incorrect input.
2198 : */
2199 592 : if (lex->errormsg->len == 0)
2200 0 : appendStringInfo(lex->errormsg,
2201 0 : _("unexpected json parse error type: %d"),
2202 : (int) error);
2203 :
2204 592 : return lex->errormsg->data;
2205 : }
|